2003-08-13 03:27:48 +02:00
|
|
|
/*
|
|
|
|
** 2001 September 15
|
|
|
|
**
|
|
|
|
** The author disclaims copyright to this source code. In place of
|
|
|
|
** a legal notice, here is a blessing:
|
|
|
|
**
|
|
|
|
** May you do good and not evil.
|
|
|
|
** May you find forgiveness for yourself and forgive others.
|
|
|
|
** May you share freely, never taking more than you give.
|
|
|
|
**
|
|
|
|
*************************************************************************
|
2011-08-02 22:20:32 +02:00
|
|
|
** A tokenizer for SQL
|
2003-08-13 03:27:48 +02:00
|
|
|
**
|
|
|
|
** This file contains C code that splits an SQL input string up into
|
|
|
|
** individual tokens and sends those tokens one-by-one over to the
|
|
|
|
** parser for analysis.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <ctype.h>
|
2003-09-06 01:08:26 +02:00
|
|
|
#include <stdarg.h>
|
2003-08-13 03:27:48 +02:00
|
|
|
#include <stdlib.h>
|
2003-09-06 01:08:26 +02:00
|
|
|
|
|
|
|
#include "windef.h"
|
|
|
|
#include "winbase.h"
|
2003-08-13 03:27:48 +02:00
|
|
|
#include "query.h"
|
2004-03-16 04:23:43 +01:00
|
|
|
#include "sql.tab.h"
|
2003-08-13 03:27:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
** All the keywords of the SQL language are stored as in a hash
|
|
|
|
** table composed of instances of the following structure.
|
|
|
|
*/
|
|
|
|
typedef struct Keyword Keyword;
|
|
|
|
struct Keyword {
|
2012-11-09 16:47:09 +01:00
|
|
|
const WCHAR *name; /* The keyword name */
|
|
|
|
unsigned int len;
|
2003-08-13 03:27:48 +02:00
|
|
|
int tokenType; /* The token value for this keyword */
|
|
|
|
};
|
|
|
|
|
2006-08-30 13:07:44 +02:00
|
|
|
#define MAX_TOKEN_LEN 11
|
|
|
|
|
2003-08-13 03:27:48 +02:00
|
|
|
/*
|
|
|
|
** These are the keywords
|
2008-05-06 16:05:40 +02:00
|
|
|
** They MUST be in alphabetical order
|
2003-08-13 03:27:48 +02:00
|
|
|
*/
|
2020-10-22 01:24:07 +02:00
|
|
|
#define X(str) str, ARRAY_SIZE(str) - 1
|
2003-08-13 03:27:48 +02:00
|
|
|
static const Keyword aKeywordTable[] = {
|
2020-10-22 01:24:07 +02:00
|
|
|
{ X(L"ADD"), TK_ADD },
|
|
|
|
{ X(L"ALTER"), TK_ALTER },
|
|
|
|
{ X(L"AND"), TK_AND },
|
|
|
|
{ X(L"BY"), TK_BY },
|
|
|
|
{ X(L"CHAR"), TK_CHAR },
|
|
|
|
{ X(L"CHARACTER"), TK_CHAR },
|
|
|
|
{ X(L"CREATE"), TK_CREATE },
|
|
|
|
{ X(L"DELETE"), TK_DELETE },
|
|
|
|
{ X(L"DISTINCT"), TK_DISTINCT },
|
|
|
|
{ X(L"DROP"), TK_DROP },
|
|
|
|
{ X(L"FREE"), TK_FREE },
|
|
|
|
{ X(L"FROM"), TK_FROM },
|
|
|
|
{ X(L"HOLD"), TK_HOLD },
|
|
|
|
{ X(L"INSERT"), TK_INSERT },
|
|
|
|
{ X(L"INT"), TK_INT },
|
|
|
|
{ X(L"INTEGER"), TK_INT },
|
|
|
|
{ X(L"INTO"), TK_INTO },
|
|
|
|
{ X(L"IS"), TK_IS },
|
|
|
|
{ X(L"KEY"), TK_KEY },
|
|
|
|
{ X(L"LIKE"), TK_LIKE },
|
|
|
|
{ X(L"LOCALIZABLE"), TK_LOCALIZABLE },
|
|
|
|
{ X(L"LONG"), TK_LONG },
|
|
|
|
{ X(L"LONGCHAR"), TK_LONGCHAR },
|
|
|
|
{ X(L"NOT"), TK_NOT },
|
|
|
|
{ X(L"NULL"), TK_NULL },
|
|
|
|
{ X(L"OBJECT"), TK_OBJECT },
|
|
|
|
{ X(L"OR"), TK_OR },
|
|
|
|
{ X(L"ORDER"), TK_ORDER },
|
|
|
|
{ X(L"PRIMARY"), TK_PRIMARY },
|
|
|
|
{ X(L"SELECT"), TK_SELECT },
|
|
|
|
{ X(L"SET"), TK_SET },
|
|
|
|
{ X(L"SHORT"), TK_SHORT },
|
|
|
|
{ X(L"TABLE"), TK_TABLE },
|
|
|
|
{ X(L"TEMPORARY"), TK_TEMPORARY },
|
|
|
|
{ X(L"UPDATE"), TK_UPDATE },
|
|
|
|
{ X(L"VALUES"), TK_VALUES },
|
|
|
|
{ X(L"WHERE"), TK_WHERE },
|
2003-08-13 03:27:48 +02:00
|
|
|
};
|
2020-10-22 01:24:07 +02:00
|
|
|
#undef X
|
2003-08-13 03:27:48 +02:00
|
|
|
|
2006-08-30 13:07:44 +02:00
|
|
|
/*
|
|
|
|
** Comparison function for binary search.
|
|
|
|
*/
|
2019-06-13 08:42:35 +02:00
|
|
|
static int __cdecl compKeyword(const void *m1, const void *m2){
|
2006-08-30 13:07:44 +02:00
|
|
|
const Keyword *k1 = m1, *k2 = m2;
|
2012-11-09 16:47:09 +01:00
|
|
|
int ret, len = min( k1->len, k2->len );
|
2006-08-30 13:07:44 +02:00
|
|
|
|
2019-06-13 08:42:35 +02:00
|
|
|
if ((ret = wcsnicmp( k1->name, k2->name, len ))) return ret;
|
2012-11-09 16:47:09 +01:00
|
|
|
if (k1->len < k2->len) return -1;
|
|
|
|
else if (k1->len > k2->len) return 1;
|
|
|
|
return 0;
|
2006-08-30 13:07:44 +02:00
|
|
|
}
|
|
|
|
|
2003-08-13 03:27:48 +02:00
|
|
|
/*
|
|
|
|
** This function looks up an identifier to determine if it is a
|
|
|
|
** keyword. If it is a keyword, the token code of that keyword is
|
|
|
|
** returned. If the input is not a keyword, TK_ID is returned.
|
|
|
|
*/
|
2005-05-29 22:08:12 +02:00
|
|
|
static int sqliteKeywordCode(const WCHAR *z, int n){
|
2006-08-30 13:07:44 +02:00
|
|
|
Keyword key, *r;
|
|
|
|
|
|
|
|
if( n>MAX_TOKEN_LEN )
|
|
|
|
return TK_ID;
|
2003-08-13 03:27:48 +02:00
|
|
|
|
2006-08-30 13:07:44 +02:00
|
|
|
key.tokenType = 0;
|
2012-11-09 16:47:09 +01:00
|
|
|
key.name = z;
|
|
|
|
key.len = n;
|
|
|
|
r = bsearch( &key, aKeywordTable, ARRAY_SIZE(aKeywordTable), sizeof(Keyword), compKeyword );
|
2006-08-30 13:07:44 +02:00
|
|
|
if( r )
|
|
|
|
return r->tokenType;
|
2003-08-13 03:27:48 +02:00
|
|
|
return TK_ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
** If X is a character that can be used in an identifier then
|
|
|
|
** isIdChar[X] will be 1. Otherwise isIdChar[X] will be 0.
|
|
|
|
**
|
|
|
|
** In this implementation, an identifier can be a string of
|
|
|
|
** alphabetic characters, digits, and "_" plus any character
|
|
|
|
** with the high-order bit set. The latter rule means that
|
|
|
|
** any sequence of UTF-8 characters or characters taken from
|
|
|
|
** an extended ISO8859 character set can form an identifier.
|
|
|
|
*/
|
|
|
|
static const char isIdChar[] = {
|
|
|
|
/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */
|
2010-02-19 12:25:09 +01:00
|
|
|
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */
|
2003-08-13 03:27:48 +02:00
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */
|
2010-02-19 12:25:09 +01:00
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, /* 2x */
|
2003-08-13 03:27:48 +02:00
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */
|
|
|
|
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */
|
|
|
|
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 8x */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 9x */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* Ax */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* Bx */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* Cx */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* Dx */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* Ex */
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* Fx */
|
|
|
|
};
|
|
|
|
|
2017-05-18 10:16:14 +02:00
|
|
|
/*
|
|
|
|
** WCHAR safe version of isdigit()
|
|
|
|
*/
|
|
|
|
static inline int isDigit(WCHAR c)
|
|
|
|
{
|
|
|
|
return c >= '0' && c <= '9';
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
** WCHAR safe version of isspace(), except '\r'
|
|
|
|
*/
|
|
|
|
static inline int isSpace(WCHAR c)
|
|
|
|
{
|
|
|
|
return c == ' ' || c == '\t' || c == '\n' || c == '\f';
|
|
|
|
}
|
2003-08-13 03:27:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
** Return the length of the token that begins at z[0]. Return
|
|
|
|
** -1 if the token is (or might be) incomplete. Store the token
|
|
|
|
** type in *tokenType before returning.
|
|
|
|
*/
|
2012-01-03 13:53:14 +01:00
|
|
|
int sqliteGetToken(const WCHAR *z, int *tokenType, int *skip){
|
2003-08-13 03:27:48 +02:00
|
|
|
int i;
|
2012-01-03 13:53:14 +01:00
|
|
|
|
|
|
|
*skip = 0;
|
2003-08-13 03:27:48 +02:00
|
|
|
switch( *z ){
|
2011-11-05 21:53:52 +01:00
|
|
|
case ' ': case '\t': case '\n': case '\f':
|
2017-05-18 10:16:14 +02:00
|
|
|
for(i=1; isSpace(z[i]); i++){}
|
2003-08-13 03:27:48 +02:00
|
|
|
*tokenType = TK_SPACE;
|
|
|
|
return i;
|
2011-11-05 21:53:52 +01:00
|
|
|
case '-':
|
2003-08-13 03:27:48 +02:00
|
|
|
if( z[1]==0 ) return -1;
|
|
|
|
*tokenType = TK_MINUS;
|
|
|
|
return 1;
|
2006-10-31 06:34:07 +01:00
|
|
|
case '(':
|
|
|
|
*tokenType = TK_LP;
|
2003-08-13 03:27:48 +02:00
|
|
|
return 1;
|
2006-10-31 06:34:07 +01:00
|
|
|
case ')':
|
|
|
|
*tokenType = TK_RP;
|
2003-08-13 03:27:48 +02:00
|
|
|
return 1;
|
2006-10-31 06:34:07 +01:00
|
|
|
case '*':
|
2003-08-13 03:27:48 +02:00
|
|
|
*tokenType = TK_STAR;
|
|
|
|
return 1;
|
2006-10-31 06:34:07 +01:00
|
|
|
case '=':
|
2003-08-13 03:27:48 +02:00
|
|
|
*tokenType = TK_EQ;
|
2006-10-31 06:34:07 +01:00
|
|
|
return 1;
|
2011-11-05 21:53:52 +01:00
|
|
|
case '<':
|
2003-08-13 03:27:48 +02:00
|
|
|
if( z[1]=='=' ){
|
|
|
|
*tokenType = TK_LE;
|
|
|
|
return 2;
|
|
|
|
}else if( z[1]=='>' ){
|
|
|
|
*tokenType = TK_NE;
|
|
|
|
return 2;
|
|
|
|
}else{
|
|
|
|
*tokenType = TK_LT;
|
|
|
|
return 1;
|
|
|
|
}
|
2011-11-05 21:53:52 +01:00
|
|
|
case '>':
|
2003-08-13 03:27:48 +02:00
|
|
|
if( z[1]=='=' ){
|
|
|
|
*tokenType = TK_GE;
|
|
|
|
return 2;
|
|
|
|
}else{
|
|
|
|
*tokenType = TK_GT;
|
|
|
|
return 1;
|
|
|
|
}
|
2011-11-05 21:53:52 +01:00
|
|
|
case '!':
|
2003-08-13 03:27:48 +02:00
|
|
|
if( z[1]!='=' ){
|
|
|
|
*tokenType = TK_ILLEGAL;
|
|
|
|
return 2;
|
|
|
|
}else{
|
|
|
|
*tokenType = TK_NE;
|
|
|
|
return 2;
|
|
|
|
}
|
2006-10-31 06:34:07 +01:00
|
|
|
case '?':
|
2004-06-30 20:18:27 +02:00
|
|
|
*tokenType = TK_WILDCARD;
|
|
|
|
return 1;
|
2006-10-31 06:34:07 +01:00
|
|
|
case ',':
|
2003-08-13 03:27:48 +02:00
|
|
|
*tokenType = TK_COMMA;
|
|
|
|
return 1;
|
2006-12-04 08:52:18 +01:00
|
|
|
case '`': case '\'': {
|
2003-08-13 03:27:48 +02:00
|
|
|
int delim = z[0];
|
|
|
|
for(i=1; z[i]; i++){
|
2009-03-02 11:34:17 +01:00
|
|
|
if( z[i]==delim )
|
|
|
|
break;
|
2003-08-13 03:27:48 +02:00
|
|
|
}
|
|
|
|
if( z[i] ) i++;
|
2005-05-19 13:15:37 +02:00
|
|
|
if( delim == '`' )
|
|
|
|
*tokenType = TK_ID;
|
|
|
|
else
|
|
|
|
*tokenType = TK_STRING;
|
2003-08-13 03:27:48 +02:00
|
|
|
return i;
|
|
|
|
}
|
2011-11-05 21:53:52 +01:00
|
|
|
case '.':
|
2017-05-18 10:16:14 +02:00
|
|
|
if( !isDigit(z[1]) ){
|
2003-08-13 03:27:48 +02:00
|
|
|
*tokenType = TK_DOT;
|
|
|
|
return 1;
|
|
|
|
}
|
2012-05-15 10:15:32 +02:00
|
|
|
/* Fall through */
|
2003-08-13 03:27:48 +02:00
|
|
|
case '0': case '1': case '2': case '3': case '4':
|
2011-11-05 21:53:52 +01:00
|
|
|
case '5': case '6': case '7': case '8': case '9':
|
2003-08-13 03:27:48 +02:00
|
|
|
*tokenType = TK_INTEGER;
|
2017-05-18 10:16:14 +02:00
|
|
|
for(i=1; isDigit(z[i]); i++){}
|
2003-08-13 03:27:48 +02:00
|
|
|
return i;
|
2011-11-05 21:53:52 +01:00
|
|
|
case '[':
|
2003-08-13 03:27:48 +02:00
|
|
|
for(i=1; z[i] && z[i-1]!=']'; i++){}
|
|
|
|
*tokenType = TK_ID;
|
|
|
|
return i;
|
2011-11-05 21:53:52 +01:00
|
|
|
default:
|
2003-08-13 03:27:48 +02:00
|
|
|
if( !isIdChar[*z] ){
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
for(i=1; isIdChar[z[i]]; i++){}
|
|
|
|
*tokenType = sqliteKeywordCode(z, i);
|
2012-01-03 13:53:14 +01:00
|
|
|
if( *tokenType == TK_ID && z[i] == '`' ) *skip = 1;
|
2003-08-13 03:27:48 +02:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
*tokenType = TK_ILLEGAL;
|
|
|
|
return 1;
|
|
|
|
}
|