aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Hudson2011-08-09 10:26:42 -0700
committerRob Hudson2011-08-09 10:26:42 -0700
commit6e3d9da54cffb41461c6778c55b0461410a9655c (patch)
tree1def7369528baac4757493eefb846319c0364323
parentfee623f56f1429888b1b31ff56889debeda8edfb (diff)
downloaddjango-debug-toolbar-6e3d9da54cffb41461c6778c55b0461410a9655c.tar.bz2
Updated sqlparse to v0.1.3
-rw-r--r--debug_toolbar/utils/sqlparse/__init__.py6
-rw-r--r--debug_toolbar/utils/sqlparse/engine/__init__.py20
-rw-r--r--debug_toolbar/utils/sqlparse/engine/filter.py15
-rw-r--r--debug_toolbar/utils/sqlparse/engine/grouping.py170
-rw-r--r--debug_toolbar/utils/sqlparse/filters.py174
-rw-r--r--debug_toolbar/utils/sqlparse/formatter.py6
-rw-r--r--debug_toolbar/utils/sqlparse/keywords.py1141
-rw-r--r--debug_toolbar/utils/sqlparse/lexer.py106
-rw-r--r--debug_toolbar/utils/sqlparse/sql.py136
-rw-r--r--debug_toolbar/utils/sqlparse/tokens.py84
10 files changed, 960 insertions, 898 deletions
diff --git a/debug_toolbar/utils/sqlparse/__init__.py b/debug_toolbar/utils/sqlparse/__init__.py
index 69873ca..99db30e 100644
--- a/debug_toolbar/utils/sqlparse/__init__.py
+++ b/debug_toolbar/utils/sqlparse/__init__.py
@@ -6,10 +6,7 @@
"""Parse SQL statements."""
-__version__ = '0.1.1'
-
-
-import os
+__version__ = '0.1.3'
class SQLParseError(Exception):
@@ -56,4 +53,3 @@ def split(sql):
stack = engine.FilterStack()
stack.split_statements = True
return [unicode(stmt) for stmt in stack.run(sql)]
-
diff --git a/debug_toolbar/utils/sqlparse/engine/__init__.py b/debug_toolbar/utils/sqlparse/engine/__init__.py
index cae0793..e838a3e 100644
--- a/debug_toolbar/utils/sqlparse/engine/__init__.py
+++ b/debug_toolbar/utils/sqlparse/engine/__init__.py
@@ -5,9 +5,7 @@
"""filter"""
-import re
-
-from debug_toolbar.utils.sqlparse import lexer, SQLParseError
+from debug_toolbar.utils.sqlparse import lexer
from debug_toolbar.utils.sqlparse.engine import grouping
from debug_toolbar.utils.sqlparse.engine.filter import StatementFilter
@@ -42,8 +40,8 @@ class FilterStack(object):
stream = lexer.tokenize(sql)
# Process token stream
if self.preprocess:
- for filter_ in self.preprocess:
- stream = filter_.process(self, stream)
+ for filter_ in self.preprocess:
+ stream = filter_.process(self, stream)
if (self.stmtprocess or self.postprocess or self.split_statements
or self._grouping):
@@ -51,6 +49,7 @@ class FilterStack(object):
stream = splitter.process(self, stream)
if self._grouping:
+
def _group(stream):
for stmt in stream:
grouping.group(stmt)
@@ -58,23 +57,24 @@ class FilterStack(object):
stream = _group(stream)
if self.stmtprocess:
- def _run(stream):
+
+ def _run1(stream):
ret = []
for stmt in stream:
for filter_ in self.stmtprocess:
filter_.process(self, stmt)
ret.append(stmt)
return ret
- stream = _run(stream)
+ stream = _run1(stream)
if self.postprocess:
- def _run(stream):
+
+ def _run2(stream):
for stmt in stream:
stmt.tokens = list(self._flatten(stmt.tokens))
for filter_ in self.postprocess:
stmt = filter_.process(self, stmt)
yield stmt
- stream = _run(stream)
+ stream = _run2(stream)
return stream
-
diff --git a/debug_toolbar/utils/sqlparse/engine/filter.py b/debug_toolbar/utils/sqlparse/engine/filter.py
index 8d1c7b2..c1c0d6a 100644
--- a/debug_toolbar/utils/sqlparse/engine/filter.py
+++ b/debug_toolbar/utils/sqlparse/engine/filter.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
+from debug_toolbar.utils.sqlparse.sql import Statement, Token
from debug_toolbar.utils.sqlparse import tokens as T
-from debug_toolbar.utils.sqlparse.engine.grouping import Statement, Token
class TokenFilter(object):
@@ -21,11 +21,13 @@ class StatementFilter(TokenFilter):
self._in_declare = False
self._in_dbldollar = False
self._is_create = False
+ self._begin_depth = 0
def _reset(self):
self._in_declare = False
self._in_dbldollar = False
self._is_create = False
+ self._begin_depth = 0
def _change_splitlevel(self, ttype, value):
# PostgreSQL
@@ -41,29 +43,32 @@ class StatementFilter(TokenFilter):
return 0
# ANSI
- if ttype is not T.Keyword:
+ if ttype not in T.Keyword:
return 0
unified = value.upper()
- if unified == 'DECLARE':
+ if unified == 'DECLARE' and self._is_create:
self._in_declare = True
return 1
if unified == 'BEGIN':
- if self._in_declare:
+ self._begin_depth += 1
+ if self._in_declare: # FIXME(andi): This makes no sense.
return 0
return 0
if unified == 'END':
# Should this respect a preceeding BEGIN?
# In CASE ... WHEN ... END this results in a split level -1.
+ self._begin_depth = max(0, self._begin_depth-1)
return -1
if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
self._is_create = True
+ return 0
- if unified in ('IF', 'FOR') and self._is_create:
+ if unified in ('IF', 'FOR') and self._is_create and self._begin_depth > 0:
return 1
# Default
diff --git a/debug_toolbar/utils/sqlparse/engine/grouping.py b/debug_toolbar/utils/sqlparse/engine/grouping.py
index 532ccec..4e50c7b 100644
--- a/debug_toolbar/utils/sqlparse/engine/grouping.py
+++ b/debug_toolbar/utils/sqlparse/engine/grouping.py
@@ -1,16 +1,19 @@
# -*- coding: utf-8 -*-
import itertools
-import re
-import types
+from debug_toolbar.utils.sqlparse import sql
from debug_toolbar.utils.sqlparse import tokens as T
-from debug_toolbar.utils.sqlparse.sql import *
+try:
+ next
+except NameError: # Python < 2.6
+ next = lambda i: i.next()
def _group_left_right(tlist, ttype, value, cls,
check_right=lambda t: True,
+ check_left=lambda t: True,
include_semicolon=False):
[_group_left_right(sgroup, ttype, value, cls, check_right,
include_semicolon) for sgroup in tlist.get_sublists()
@@ -20,14 +23,20 @@ def _group_left_right(tlist, ttype, value, cls,
while token:
right = tlist.token_next(tlist.token_index(token))
left = tlist.token_prev(tlist.token_index(token))
- if (right is None or not check_right(right)
- or left is None):
- token = tlist.token_next_match(tlist.token_index(token)+1,
+ if right is None or not check_right(right):
+ token = tlist.token_next_match(tlist.token_index(token) + 1,
+ ttype, value)
+ elif left is None or not check_right(left):
+ token = tlist.token_next_match(tlist.token_index(token) + 1,
ttype, value)
else:
if include_semicolon:
- right = tlist.token_next_match(tlist.token_index(right),
- T.Punctuation, ';')
+ sright = tlist.token_next_match(tlist.token_index(right),
+ T.Punctuation, ';')
+ if sright is not None:
+ # only overwrite "right" if a semicolon is actually
+ # present.
+ right = sright
tokens = tlist.tokens_between(left, right)[1:]
if not isinstance(left, cls):
new = cls([left])
@@ -38,9 +47,10 @@ def _group_left_right(tlist, ttype, value, cls,
left.tokens.extend(tokens)
for t in tokens:
tlist.tokens.remove(t)
- token = tlist.token_next_match(tlist.token_index(left)+1,
+ token = tlist.token_next_match(tlist.token_index(left) + 1,
ttype, value)
+
def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon=False, recurse=False):
def _find_matching(i, tl, stt, sva, ett, eva):
@@ -66,7 +76,7 @@ def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
end = _find_matching(tidx, tlist, start_ttype, start_value,
end_ttype, end_value)
if end is None:
- idx = tidx+1
+ idx = tidx + 1
else:
if include_semicolon:
next_ = tlist.token_next(tlist.token_index(end))
@@ -75,71 +85,102 @@ def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
group = tlist.group_tokens(cls, tlist.tokens_between(token, end))
_group_matching(group, start_ttype, start_value,
end_ttype, end_value, cls, include_semicolon)
- idx = tlist.token_index(group)+1
+ idx = tlist.token_index(group) + 1
token = tlist.token_next_match(idx, start_ttype, start_value)
+
def group_if(tlist):
- _group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', If, True)
+ _group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', sql.If, True)
+
def group_for(tlist):
- _group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP', For, True)
+ _group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP',
+ sql.For, True)
+
def group_as(tlist):
- _group_left_right(tlist, T.Keyword, 'AS', Identifier)
+
+ def _right_valid(token):
+ # Currently limited to DML/DDL. Maybe additional more non SQL reserved
+ # keywords should appear here (see issue8).
+ return not token.ttype in (T.DML, T.DDL)
+ _group_left_right(tlist, T.Keyword, 'AS', sql.Identifier,
+ check_right=_right_valid)
+
def group_assignment(tlist):
- _group_left_right(tlist, T.Assignment, ':=', Assignment,
+ _group_left_right(tlist, T.Assignment, ':=', sql.Assignment,
include_semicolon=True)
-def group_comparsion(tlist):
- _group_left_right(tlist, T.Operator, None, Comparsion)
+
+def group_comparison(tlist):
+
+ def _parts_valid(token):
+ return (token.ttype in (T.String.Symbol, T.Name, T.Number,
+ T.Number.Integer, T.Literal,
+ T.Literal.Number.Integer)
+ or isinstance(token, (sql.Identifier,)))
+ _group_left_right(tlist, T.Operator.Comparison, None, sql.Comparison,
+ check_left=_parts_valid, check_right=_parts_valid)
def group_case(tlist):
- _group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', Case,
+ _group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', sql.Case,
include_semicolon=True, recurse=True)
def group_identifier(tlist):
def _consume_cycle(tl, i):
- x = itertools.cycle((lambda y: y.match(T.Punctuation, '.'),
- lambda y: y.ttype in (T.String.Symbol,
- T.Name,
- T.Wildcard)))
+ x = itertools.cycle((
+ lambda y: (y.match(T.Punctuation, '.')
+ or y.ttype is T.Operator),
+ lambda y: (y.ttype in (T.String.Symbol,
+ T.Name,
+ T.Wildcard,
+ T.Literal.Number.Integer))))
for t in tl.tokens[i:]:
- if x.next()(t):
+ if next(x)(t):
yield t
else:
raise StopIteration
# bottom up approach: group subgroups first
[group_identifier(sgroup) for sgroup in tlist.get_sublists()
- if not isinstance(sgroup, Identifier)]
+ if not isinstance(sgroup, sql.Identifier)]
# real processing
idx = 0
- token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name))
+ token = tlist.token_next_by_instance(idx, sql.Function)
+ if token is None:
+ token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name))
while token:
- identifier_tokens = [token]+list(
+ identifier_tokens = [token] + list(
_consume_cycle(tlist,
- tlist.token_index(token)+1))
- group = tlist.group_tokens(Identifier, identifier_tokens)
- idx = tlist.token_index(group)+1
- token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name))
+ tlist.token_index(token) + 1))
+ if not (len(identifier_tokens) == 1
+ and isinstance(identifier_tokens[0], sql.Function)):
+ group = tlist.group_tokens(sql.Identifier, identifier_tokens)
+ idx = tlist.token_index(group) + 1
+ else:
+ idx += 1
+ token = tlist.token_next_by_instance(idx, sql.Function)
+ if token is None:
+ token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name))
def group_identifier_list(tlist):
[group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
- if not isinstance(sgroup, (Identifier, IdentifierList))]
+ if not isinstance(sgroup, sql.IdentifierList)]
idx = 0
# Allowed list items
- fend1_funcs = [lambda t: isinstance(t, Identifier),
+ fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function)),
lambda t: t.is_whitespace(),
+ lambda t: t.ttype == T.Name,
lambda t: t.ttype == T.Wildcard,
lambda t: t.match(T.Keyword, 'null'),
lambda t: t.ttype == T.Number.Integer,
lambda t: t.ttype == T.String.Single,
- lambda t: isinstance(t, Comparsion),
+ lambda t: isinstance(t, sql.Comparison),
]
tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
start = None
@@ -156,7 +197,7 @@ def group_identifier_list(tlist):
if not bpassed or not apassed:
# Something's wrong here, skip ahead to next ","
start = None
- tcomma = tlist.token_next_match(tlist.token_index(tcomma)+1,
+ tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
T.Punctuation, ',')
else:
if start is None:
@@ -165,25 +206,27 @@ def group_identifier_list(tlist):
if next_ is None or not next_.match(T.Punctuation, ','):
# Reached the end of the list
tokens = tlist.tokens_between(start, after)
- group = tlist.group_tokens(IdentifierList, tokens)
+ group = tlist.group_tokens(sql.IdentifierList, tokens)
start = None
- tcomma = tlist.token_next_match(tlist.token_index(group)+1,
+ tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
T.Punctuation, ',')
else:
tcomma = next_
def group_parenthesis(tlist):
- _group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')', Parenthesis)
+ _group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')',
+ sql.Parenthesis)
+
def group_comments(tlist):
[group_comments(sgroup) for sgroup in tlist.get_sublists()
- if not isinstance(sgroup, Comment)]
+ if not isinstance(sgroup, sql.Comment)]
idx = 0
token = tlist.token_next_by_type(idx, T.Comment)
while token:
tidx = tlist.token_index(token)
- end = tlist.token_not_matching(tidx+1,
+ end = tlist.token_not_matching(tidx + 1,
[lambda t: t.ttype in T.Comment,
lambda t: t.is_whitespace()])
if end is None:
@@ -192,49 +235,70 @@ def group_comments(tlist):
eidx = tlist.token_index(end)
grp_tokens = tlist.tokens_between(token,
tlist.token_prev(eidx, False))
- group = tlist.group_tokens(Comment, grp_tokens)
+ group = tlist.group_tokens(sql.Comment, grp_tokens)
idx = tlist.token_index(group)
token = tlist.token_next_by_type(idx, T.Comment)
+
def group_where(tlist):
[group_where(sgroup) for sgroup in tlist.get_sublists()
- if not isinstance(sgroup, Where)]
+ if not isinstance(sgroup, sql.Where)]
idx = 0
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION')
while token:
tidx = tlist.token_index(token)
- end = tlist.token_next_match(tidx+1, T.Keyword, stopwords)
+ end = tlist.token_next_match(tidx + 1, T.Keyword, stopwords)
if end is None:
- end = tlist.tokens[-1]
+ end = tlist._groupable_tokens[-1]
else:
- end = tlist.tokens[tlist.token_index(end)-1]
- group = tlist.group_tokens(Where, tlist.tokens_between(token, end))
+ end = tlist.tokens[tlist.token_index(end) - 1]
+ group = tlist.group_tokens(sql.Where,
+ tlist.tokens_between(token, end),
+ ignore_ws=True)
idx = tlist.token_index(group)
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
+
def group_aliased(tlist):
[group_aliased(sgroup) for sgroup in tlist.get_sublists()
- if not isinstance(sgroup, Identifier)]
+ if not isinstance(sgroup, (sql.Identifier, sql.Function))]
idx = 0
- token = tlist.token_next_by_instance(idx, Identifier)
+ token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function))
while token:
next_ = tlist.token_next(tlist.token_index(token))
- if next_ is not None and isinstance(next_, Identifier):
+ if next_ is not None and isinstance(next_, (sql.Identifier, sql.Function)):
grp = tlist.tokens_between(token, next_)[1:]
token.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
- idx = tlist.token_index(token)+1
- token = tlist.token_next_by_instance(idx, Identifier)
+ idx = tlist.token_index(token) + 1
+ token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function))
def group_typecasts(tlist):
- _group_left_right(tlist, T.Punctuation, '::', Identifier)
+ _group_left_right(tlist, T.Punctuation, '::', sql.Identifier)
+
+
+def group_functions(tlist):
+ [group_functions(sgroup) for sgroup in tlist.get_sublists()
+ if not isinstance(sgroup, sql.Function)]
+ idx = 0
+ token = tlist.token_next_by_type(idx, T.Name)
+ while token:
+ next_ = tlist.token_next(token)
+ if not isinstance(next_, sql.Parenthesis):
+ idx = tlist.token_index(token) + 1
+ else:
+ func = tlist.group_tokens(sql.Function,
+ tlist.tokens_between(token, next_))
+ idx = tlist.token_index(func) + 1
+ token = tlist.token_next_by_type(idx, T.Name)
def group(tlist):
for func in [group_parenthesis,
+ group_functions,
group_comments,
group_where,
group_case,
@@ -243,8 +307,8 @@ def group(tlist):
group_as,
group_aliased,
group_assignment,
- group_comparsion,
+ group_comparison,
group_identifier_list,
group_if,
- group_for,]:
+ group_for]:
func(tlist)
diff --git a/debug_toolbar/utils/sqlparse/filters.py b/debug_toolbar/utils/sqlparse/filters.py
index 3c92791..2d247e7 100644
--- a/debug_toolbar/utils/sqlparse/filters.py
+++ b/debug_toolbar/utils/sqlparse/filters.py
@@ -2,7 +2,6 @@
import re
-from debug_toolbar.utils.sqlparse.engine import grouping
from debug_toolbar.utils.sqlparse import tokens as T
from debug_toolbar.utils.sqlparse import sql
@@ -19,34 +18,6 @@ class TokenFilter(Filter):
raise NotImplementedError
-# FIXME: Should be removed
-def rstrip(stream):
- buff = []
- for token in stream:
- if token.is_whitespace() and '\n' in token.value:
- # assuming there's only one \n in value
- before, rest = token.value.split('\n', 1)
- token.value = '\n%s' % rest
- buff = []
- yield token
- elif token.is_whitespace():
- buff.append(token)
- elif token.is_group():
- token.tokens = list(rstrip(token.tokens))
- # process group and look if it starts with a nl
- if token.tokens and token.tokens[0].is_whitespace():
- before, rest = token.tokens[0].value.split('\n', 1)
- token.tokens[0].value = '\n%s' % rest
- buff = []
- while buff:
- yield buff.pop(0)
- yield token
- else:
- while buff:
- yield buff.pop(0)
- yield token
-
-
# --------------------------
# token process
@@ -74,17 +45,28 @@ class KeywordCaseFilter(_CaseFilter):
class IdentifierCaseFilter(_CaseFilter):
ttype = (T.Name, T.String.Symbol)
+ def process(self, stack, stream):
+ for ttype, value in stream:
+ if ttype in self.ttype and not value.strip()[0] == '"':
+ value = self.convert(value)
+ yield ttype, value
+
# ----------------------
# statement process
class StripCommentsFilter(Filter):
+ def _get_next_comment(self, tlist):
+ # TODO(andi) Comment types should be unified, see related issue38
+ token = tlist.token_next_by_instance(0, sql.Comment)
+ if token is None:
+ token = tlist.token_next_by_type(0, T.Comment)
+ return token
+
def _process(self, tlist):
- idx = 0
- clss = set([x.__class__ for x in tlist.tokens])
- while grouping.Comment in clss:
- token = tlist.token_next_by_instance(0, grouping.Comment)
+ token = self._get_next_comment(tlist)
+ while token:
tidx = tlist.token_index(token)
prev = tlist.token_prev(tidx, False)
next_ = tlist.token_next(tidx, False)
@@ -94,10 +76,10 @@ class StripCommentsFilter(Filter):
and not prev.is_whitespace() and not next_.is_whitespace()
and not (prev.match(T.Punctuation, '(')
or next_.match(T.Punctuation, ')'))):
- tlist.tokens[tidx] = grouping.Token(T.Whitespace, ' ')
+ tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
else:
tlist.tokens.pop(tidx)
- clss = set([x.__class__ for x in tlist.tokens])
+ token = self._get_next_comment(tlist)
def process(self, stack, stmt):
[self.process(stack, sgroup) for sgroup in stmt.get_sublists()]
@@ -149,24 +131,32 @@ class ReindentFilter(Filter):
def _get_offset(self, token):
all_ = list(self._curr_stmt.flatten())
idx = all_.index(token)
- raw = ''.join(unicode(x) for x in all_[:idx+1])
+ raw = ''.join(unicode(x) for x in all_[:idx + 1])
line = raw.splitlines()[-1]
# Now take current offset into account and return relative offset.
- full_offset = len(line)-(len(self.char*(self.width*self.indent)))
+ full_offset = len(line) - len(self.char * (self.width * self.indent))
return full_offset - self.offset
def nl(self):
# TODO: newline character should be configurable
- ws = '\n'+(self.char*((self.indent*self.width)+self.offset))
- return grouping.Token(T.Whitespace, ws)
+ ws = '\n' + (self.char * ((self.indent * self.width) + self.offset))
+ return sql.Token(T.Whitespace, ws)
def _split_kwds(self, tlist):
split_words = ('FROM', 'JOIN$', 'AND', 'OR',
'GROUP', 'ORDER', 'UNION', 'VALUES',
- 'SET')
- idx = 0
- token = tlist.token_next_match(idx, T.Keyword, split_words,
+ 'SET', 'BETWEEN')
+ def _next_token(i):
+ t = tlist.token_next_match(i, T.Keyword, split_words,
regex=True)
+ if t and t.value.upper() == 'BETWEEN':
+ t = _next_token(tlist.token_index(t)+1)
+ if t and t.value.upper() == 'AND':
+ t = _next_token(tlist.token_index(t)+1)
+ return t
+
+ idx = 0
+ token = _next_token(idx)
while token:
prev = tlist.token_prev(tlist.token_index(token), False)
offset = 1
@@ -181,8 +171,7 @@ class ReindentFilter(Filter):
else:
nl = self.nl()
tlist.insert_before(token, nl)
- token = tlist.token_next_match(tlist.token_index(nl)+offset,
- T.Keyword, split_words, regex=True)
+ token = _next_token(tlist.token_index(nl) + offset)
def _split_statements(self, tlist):
idx = 0
@@ -195,7 +184,7 @@ class ReindentFilter(Filter):
if prev:
nl = self.nl()
tlist.insert_before(token, nl)
- token = tlist.token_next_by_type(tlist.token_index(token)+1,
+ token = tlist.token_next_by_type(tlist.token_index(token) + 1,
(T.Keyword.DDL, T.Keyword.DML))
def _process(self, tlist):
@@ -227,9 +216,9 @@ class ReindentFilter(Filter):
def _process_identifierlist(self, tlist):
identifiers = tlist.get_identifiers()
- if len(identifiers) > 1:
+ if len(identifiers) > 1 and not tlist.within(sql.Function):
first = list(identifiers[0].flatten())[0]
- num_offset = self._get_offset(first)-len(first.value)
+ num_offset = self._get_offset(first) - len(first.value)
self.offset += num_offset
for token in identifiers[1:]:
tlist.insert_before(token, self.nl())
@@ -237,16 +226,16 @@ class ReindentFilter(Filter):
self._process_default(tlist)
def _process_case(self, tlist):
- cases = tlist.get_cases()
is_first = True
num_offset = None
case = tlist.tokens[0]
- outer_offset = self._get_offset(case)-len(case.value)
+ outer_offset = self._get_offset(case) - len(case.value)
self.offset += outer_offset
for cond, value in tlist.get_cases():
if is_first:
+ tcond = list(cond[0].flatten())[0]
is_first = False
- num_offset = self._get_offset(cond[0])-len(cond[0].value)
+ num_offset = self._get_offset(tcond) - len(tcond.value)
self.offset += num_offset
continue
if cond is None:
@@ -273,17 +262,17 @@ class ReindentFilter(Filter):
[self._process(sgroup) for sgroup in tlist.get_sublists()]
def process(self, stack, stmt):
- if isinstance(stmt, grouping.Statement):
+ if isinstance(stmt, sql.Statement):
self._curr_stmt = stmt
self._process(stmt)
- if isinstance(stmt, grouping.Statement):
+ if isinstance(stmt, sql.Statement):
if self._last_stmt is not None:
if self._last_stmt.to_unicode().endswith('\n'):
nl = '\n'
else:
nl = '\n\n'
stmt.tokens.insert(0,
- grouping.Token(T.Whitespace, nl))
+ sql.Token(T.Whitespace, nl))
if self._last_stmt != stmt:
self._last_stmt = stmt
@@ -292,7 +281,7 @@ class ReindentFilter(Filter):
class RightMarginFilter(Filter):
keep_together = (
-# grouping.TypeCast, grouping.Identifier, grouping.Alias,
+# sql.TypeCast, sql.Identifier, sql.Alias,
)
def __init__(self, width=79):
@@ -317,7 +306,7 @@ class RightMarginFilter(Filter):
indent = match.group()
else:
indent = ''
- yield grouping.Token(T.Whitespace, '\n%s' % indent)
+ yield sql.Token(T.Whitespace, '\n%s' % indent)
self.line = indent
self.line += val
yield token
@@ -349,14 +338,14 @@ class OutputPythonFilter(Filter):
def _process(self, stream, varname, count, has_nl):
if count > 1:
- yield grouping.Token(T.Whitespace, '\n')
- yield grouping.Token(T.Name, varname)
- yield grouping.Token(T.Whitespace, ' ')
- yield grouping.Token(T.Operator, '=')
- yield grouping.Token(T.Whitespace, ' ')
+ yield sql.Token(T.Whitespace, '\n')
+ yield sql.Token(T.Name, varname)
+ yield sql.Token(T.Whitespace, ' ')
+ yield sql.Token(T.Operator, '=')
+ yield sql.Token(T.Whitespace, ' ')
if has_nl:
- yield grouping.Token(T.Operator, '(')
- yield grouping.Token(T.Text, "'")
+ yield sql.Token(T.Operator, '(')
+ yield sql.Token(T.Text, "'")
cnt = 0
for token in stream:
cnt += 1
@@ -364,20 +353,20 @@ class OutputPythonFilter(Filter):
if cnt == 1:
continue
after_lb = token.value.split('\n', 1)[1]
- yield grouping.Token(T.Text, " '")
- yield grouping.Token(T.Whitespace, '\n')
- for i in range(len(varname)+4):
- yield grouping.Token(T.Whitespace, ' ')
- yield grouping.Token(T.Text, "'")
+ yield sql.Token(T.Text, " '")
+ yield sql.Token(T.Whitespace, '\n')
+ for i in range(len(varname) + 4):
+ yield sql.Token(T.Whitespace, ' ')
+ yield sql.Token(T.Text, "'")
if after_lb: # it's the indendation
- yield grouping.Token(T.Whitespace, after_lb)
+ yield sql.Token(T.Whitespace, after_lb)
continue
elif token.value and "'" in token.value:
token.value = token.value.replace("'", "\\'")
- yield grouping.Token(T.Text, token.value or '')
- yield grouping.Token(T.Text, "'")
+ yield sql.Token(T.Text, token.value or '')
+ yield sql.Token(T.Text, "'")
if has_nl:
- yield grouping.Token(T.Operator, ')')
+ yield sql.Token(T.Operator, ')')
def process(self, stack, stmt):
self.cnt += 1
@@ -398,36 +387,32 @@ class OutputPHPFilter(Filter):
def _process(self, stream, varname):
if self.count > 1:
- yield grouping.Token(T.Whitespace, '\n')
- yield grouping.Token(T.Name, varname)
- yield grouping.Token(T.Whitespace, ' ')
- yield grouping.Token(T.Operator, '=')
- yield grouping.Token(T.Whitespace, ' ')
- yield grouping.Token(T.Text, '"')
- cnt = 0
+ yield sql.Token(T.Whitespace, '\n')
+ yield sql.Token(T.Name, varname)
+ yield sql.Token(T.Whitespace, ' ')
+ yield sql.Token(T.Operator, '=')
+ yield sql.Token(T.Whitespace, ' ')
+ yield sql.Token(T.Text, '"')
for token in stream:
if token.is_whitespace() and '\n' in token.value:
-# cnt += 1
-# if cnt == 1:
-# continue
after_lb = token.value.split('\n', 1)[1]
- yield grouping.Token(T.Text, ' "')
- yield grouping.Token(T.Operator, ';')
- yield grouping.Token(T.Whitespace, '\n')
- yield grouping.Token(T.Name, varname)
- yield grouping.Token(T.Whitespace, ' ')
- yield grouping.Token(T.Punctuation, '.')
- yield grouping.Token(T.Operator, '=')
- yield grouping.Token(T.Whitespace, ' ')
- yield grouping.Token(T.Text, '"')
+ yield sql.Token(T.Text, ' "')
+ yield sql.Token(T.Operator, ';')
+ yield sql.Token(T.Whitespace, '\n')
+ yield sql.Token(T.Name, varname)
+ yield sql.Token(T.Whitespace, ' ')
+ yield sql.Token(T.Punctuation, '.')
+ yield sql.Token(T.Operator, '=')
+ yield sql.Token(T.Whitespace, ' ')
+ yield sql.Token(T.Text, '"')
if after_lb:
- yield grouping.Token(T.Text, after_lb)
+ yield sql.Token(T.Text, after_lb)
continue
elif '"' in token.value:
token.value = token.value.replace('"', '\\"')
- yield grouping.Token(T.Text, token.value)
- yield grouping.Token(T.Text, '"')
- yield grouping.Token(T.Punctuation, ';')
+ yield sql.Token(T.Text, token.value)
+ yield sql.Token(T.Text, '"')
+ yield sql.Token(T.Punctuation, ';')
def process(self, stack, stmt):
self.count += 1
@@ -437,4 +422,3 @@ class OutputPHPFilter(Filter):
varname = self.varname
stmt.tokens = tuple(self._process(stmt.tokens, varname))
return stmt
-
diff --git a/debug_toolbar/utils/sqlparse/formatter.py b/debug_toolbar/utils/sqlparse/formatter.py
index 34e9fe0..3acece9 100644
--- a/debug_toolbar/utils/sqlparse/formatter.py
+++ b/debug_toolbar/utils/sqlparse/formatter.py
@@ -76,11 +76,11 @@ def build_filter_stack(stack, options):
options: Dictionary with options validated by validate_options.
"""
# Token filter
- if 'keyword_case' in options:
+ if options.get('keyword_case', None):
stack.preprocess.append(
filters.KeywordCaseFilter(options['keyword_case']))
- if 'identifier_case' in options:
+ if options.get('identifier_case', None):
stack.preprocess.append(
filters.IdentifierCaseFilter(options['identifier_case']))
@@ -118,5 +118,3 @@ def build_filter_stack(stack, options):
stack.postprocess.append(fltr)
return stack
-
-
diff --git a/debug_toolbar/utils/sqlparse/keywords.py b/debug_toolbar/utils/sqlparse/keywords.py
index cada139..4782cfe 100644
--- a/debug_toolbar/utils/sqlparse/keywords.py
+++ b/debug_toolbar/utils/sqlparse/keywords.py
@@ -1,590 +1,565 @@
-from debug_toolbar.utils.sqlparse.tokens import *
+from debug_toolbar.utils.sqlparse import tokens
KEYWORDS = {
- 'ABORT': Keyword,
- 'ABS': Keyword,
- 'ABSOLUTE': Keyword,
- 'ACCESS': Keyword,
- 'ADA': Keyword,
- 'ADD': Keyword,
- 'ADMIN': Keyword,
- 'AFTER': Keyword,
- 'AGGREGATE': Keyword,
- 'ALIAS': Keyword,
- 'ALL': Keyword,
- 'ALLOCATE': Keyword,
- 'ANALYSE': Keyword,
- 'ANALYZE': Keyword,
- 'AND': Keyword,
- 'ANY': Keyword,
- 'ARE': Keyword,
- 'AS': Keyword,
- 'ASC': Keyword,
- 'ASENSITIVE': Keyword,
- 'ASSERTION': Keyword,
- 'ASSIGNMENT': Keyword,
- 'ASYMMETRIC': Keyword,
- 'AT': Keyword,
- 'ATOMIC': Keyword,
- 'AUTHORIZATION': Keyword,
- 'AVG': Keyword,
-
- 'BACKWARD': Keyword,
- 'BEFORE': Keyword,
- 'BEGIN': Keyword,
- 'BETWEEN': Keyword,
- 'BITVAR': Keyword,
- 'BIT_LENGTH': Keyword,
- 'BOTH': Keyword,
- 'BREADTH': Keyword,
- 'BY': Keyword,
-
-# 'C': Keyword, # most likely this is an alias
- 'CACHE': Keyword,
- 'CALL': Keyword,
- 'CALLED': Keyword,
- 'CARDINALITY': Keyword,
- 'CASCADE': Keyword,
- 'CASCADED': Keyword,
- 'CASE': Keyword,
- 'CAST': Keyword,
- 'CATALOG': Keyword,
- 'CATALOG_NAME': Keyword,
- 'CHAIN': Keyword,
- 'CHARACTERISTICS': Keyword,
- 'CHARACTER_LENGTH': Keyword,
- 'CHARACTER_SET_CATALOG': Keyword,
- 'CHARACTER_SET_NAME': Keyword,
- 'CHARACTER_SET_SCHEMA': Keyword,
- 'CHAR_LENGTH': Keyword,
- 'CHECK': Keyword,
- 'CHECKED': Keyword,
- 'CHECKPOINT': Keyword,
- 'CLASS': Keyword,
- 'CLASS_ORIGIN': Keyword,
- 'CLOB': Keyword,
- 'CLOSE': Keyword,
- 'CLUSTER': Keyword,
- 'COALSECE': Keyword,
- 'COBOL': Keyword,
- 'COLLATE': Keyword,
- 'COLLATION': Keyword,
- 'COLLATION_CATALOG': Keyword,
- 'COLLATION_NAME': Keyword,
- 'COLLATION_SCHEMA': Keyword,
- 'COLUMN': Keyword,
- 'COLUMN_NAME': Keyword,
- 'COMMAND_FUNCTION': Keyword,
- 'COMMAND_FUNCTION_CODE': Keyword,
- 'COMMENT': Keyword,
- 'COMMIT': Keyword,
- 'COMMITTED': Keyword,
- 'COMPLETION': Keyword,
- 'CONDITION_NUMBER': Keyword,
- 'CONNECT': Keyword,
- 'CONNECTION': Keyword,
- 'CONNECTION_NAME': Keyword,
- 'CONSTRAINT': Keyword,
- 'CONSTRAINTS': Keyword,
- 'CONSTRAINT_CATALOG': Keyword,
- 'CONSTRAINT_NAME': Keyword,
- 'CONSTRAINT_SCHEMA': Keyword,
- 'CONSTRUCTOR': Keyword,
- 'CONTAINS': Keyword,
- 'CONTINUE': Keyword,
- 'CONVERSION': Keyword,
- 'CONVERT': Keyword,
- 'COPY': Keyword,
- 'CORRESPONTING': Keyword,
- 'COUNT': Keyword,
- 'CREATEDB': Keyword,
- 'CREATEUSER': Keyword,
- 'CROSS': Keyword,
- 'CUBE': Keyword,
- 'CURRENT': Keyword,
- 'CURRENT_DATE': Keyword,
- 'CURRENT_PATH': Keyword,
- 'CURRENT_ROLE': Keyword,
- 'CURRENT_TIME': Keyword,
- 'CURRENT_TIMESTAMP': Keyword,
- 'CURRENT_USER': Keyword,
- 'CURSOR': Keyword,
- 'CURSOR_NAME': Keyword,
- 'CYCLE': Keyword,
-
- 'DATA': Keyword,
- 'DATABASE': Keyword,
- 'DATETIME_INTERVAL_CODE': Keyword,
- 'DATETIME_INTERVAL_PRECISION': Keyword,
- 'DAY': Keyword,
- 'DEALLOCATE': Keyword,
- 'DECLARE': Keyword,
- 'DEFAULT': Keyword,
- 'DEFAULTS': Keyword,
- 'DEFERRABLE': Keyword,
- 'DEFERRED': Keyword,
- 'DEFINED': Keyword,
- 'DEFINER': Keyword,
- 'DELIMITER': Keyword,
- 'DELIMITERS': Keyword,
- 'DEREF': Keyword,
- 'DESC': Keyword,
- 'DESCRIBE': Keyword,
- 'DESCRIPTOR': Keyword,
- 'DESTROY': Keyword,
- 'DESTRUCTOR': Keyword,
- 'DETERMINISTIC': Keyword,
- 'DIAGNOSTICS': Keyword,
- 'DICTIONARY': Keyword,
- 'DISCONNECT': Keyword,
- 'DISPATCH': Keyword,
- 'DISTINCT': Keyword,
- 'DO': Keyword,
- 'DOMAIN': Keyword,
- 'DYNAMIC': Keyword,
- 'DYNAMIC_FUNCTION': Keyword,
- 'DYNAMIC_FUNCTION_CODE': Keyword,
-
- 'EACH': Keyword,
- 'ELSE': Keyword,
- 'ENCODING': Keyword,
- 'ENCRYPTED': Keyword,
- 'END': Keyword,
- 'END-EXEC': Keyword,
- 'EQUALS': Keyword,
- 'ESCAPE': Keyword,
- 'EVERY': Keyword,
- 'EXCEPT': Keyword,
- 'ESCEPTION': Keyword,
- 'EXCLUDING': Keyword,
- 'EXCLUSIVE': Keyword,
- 'EXEC': Keyword,
- 'EXECUTE': Keyword,
- 'EXISTING': Keyword,
- 'EXISTS': Keyword,
- 'EXTERNAL': Keyword,
- 'EXTRACT': Keyword,
-
- 'FALSE': Keyword,
- 'FETCH': Keyword,
- 'FINAL': Keyword,
- 'FIRST': Keyword,
- 'FOR': Keyword,
- 'FORCE': Keyword,
- 'FOREIGN': Keyword,
- 'FORTRAN': Keyword,
- 'FORWARD': Keyword,
- 'FOUND': Keyword,
- 'FREE': Keyword,
- 'FREEZE': Keyword,
- 'FROM': Keyword,
- 'FULL': Keyword,
- 'FUNCTION': Keyword,
-
- 'G': Keyword,
- 'GENERAL': Keyword,
- 'GENERATED': Keyword,
- 'GET': Keyword,
- 'GLOBAL': Keyword,
- 'GO': Keyword,
- 'GOTO': Keyword,
- 'GRANT': Keyword,
- 'GRANTED': Keyword,
- 'GROUP': Keyword,
- 'GROUPING': Keyword,
-
- 'HANDLER': Keyword,
- 'HAVING': Keyword,
- 'HIERARCHY': Keyword,
- 'HOLD': Keyword,
- 'HOST': Keyword,
-
- 'IDENTITY': Keyword,
- 'IF': Keyword,
- 'IGNORE': Keyword,
- 'ILIKE': Keyword,
- 'IMMEDIATE': Keyword,
- 'IMMUTABLE': Keyword,
-
- 'IMPLEMENTATION': Keyword,
- 'IMPLICIT': Keyword,
- 'IN': Keyword,
- 'INCLUDING': Keyword,
- 'INCREMENT': Keyword,
- 'INDEX': Keyword,
-
- 'INDITCATOR': Keyword,
- 'INFIX': Keyword,
- 'INHERITS': Keyword,
- 'INITIALIZE': Keyword,
- 'INITIALLY': Keyword,
- 'INNER': Keyword,
- 'INOUT': Keyword,
- 'INPUT': Keyword,
- 'INSENSITIVE': Keyword,
- 'INSTANTIABLE': Keyword,
- 'INSTEAD': Keyword,
- 'INTERSECT': Keyword,
- 'INTO': Keyword,
- 'INVOKER': Keyword,
- 'IS': Keyword,
- 'ISNULL': Keyword,
- 'ISOLATION': Keyword,
- 'ITERATE': Keyword,
-
- 'JOIN': Keyword,
-
- 'K': Keyword,
- 'KEY': Keyword,
- 'KEY_MEMBER': Keyword,
- 'KEY_TYPE': Keyword,
-
- 'LANCOMPILER': Keyword,
- 'LANGUAGE': Keyword,
- 'LARGE': Keyword,
- 'LAST': Keyword,
- 'LATERAL': Keyword,
- 'LEADING': Keyword,
- 'LEFT': Keyword,
- 'LENGTH': Keyword,
- 'LESS': Keyword,
- 'LEVEL': Keyword,
- 'LIKE': Keyword,
- 'LIMIT': Keyword,
- 'LISTEN': Keyword,
- 'LOAD': Keyword,
- 'LOCAL': Keyword,
- 'LOCALTIME': Keyword,
- 'LOCALTIMESTAMP': Keyword,
- 'LOCATION': Keyword,
- 'LOCATOR': Keyword,
- 'LOCK': Keyword,
- 'LOWER': Keyword,
-
- 'M': Keyword,
- 'MAP': Keyword,
- 'MATCH': Keyword,
- 'MAX': Keyword,
- 'MAXVALUE': Keyword,
- 'MESSAGE_LENGTH': Keyword,
- 'MESSAGE_OCTET_LENGTH': Keyword,
- 'MESSAGE_TEXT': Keyword,
- 'METHOD': Keyword,
- 'MIN': Keyword,
- 'MINUTE': Keyword,
- 'MINVALUE': Keyword,
- 'MOD': Keyword,
- 'MODE': Keyword,
- 'MODIFIES': Keyword,
- 'MODIFY': Keyword,
- 'MONTH': Keyword,
- 'MORE': Keyword,
- 'MOVE': Keyword,
- 'MUMPS': Keyword,
-
- 'NAMES': Keyword,
- 'NATIONAL': Keyword,
- 'NATURAL': Keyword,
- 'NCHAR': Keyword,
- 'NCLOB': Keyword,
- 'NEW': Keyword,
- 'NEXT': Keyword,
- 'NO': Keyword,
- 'NOCREATEDB': Keyword,
- 'NOCREATEUSER': Keyword,
- 'NONE': Keyword,
- 'NOT': Keyword,
- 'NOTHING': Keyword,
- 'NOTIFY': Keyword,
- 'NOTNULL': Keyword,
- 'NULL': Keyword,
- 'NULLABLE': Keyword,
- 'NULLIF': Keyword,
-
- 'OBJECT': Keyword,
- 'OCTET_LENGTH': Keyword,
- 'OF': Keyword,
- 'OFF': Keyword,
- 'OFFSET': Keyword,
- 'OIDS': Keyword,
- 'OLD': Keyword,
- 'ON': Keyword,
- 'ONLY': Keyword,
- 'OPEN': Keyword,
- 'OPERATION': Keyword,
- 'OPERATOR': Keyword,
- 'OPTION': Keyword,
- 'OPTIONS': Keyword,
- 'OR': Keyword,
- 'ORDER': Keyword,
- 'ORDINALITY': Keyword,
- 'OUT': Keyword,
- 'OUTER': Keyword,
- 'OUTPUT': Keyword,
- 'OVERLAPS': Keyword,
- 'OVERLAY': Keyword,
- 'OVERRIDING': Keyword,
- 'OWNER': Keyword,
-
- 'PAD': Keyword,
- 'PARAMETER': Keyword,
- 'PARAMETERS': Keyword,
- 'PARAMETER_MODE': Keyword,
- 'PARAMATER_NAME': Keyword,
- 'PARAMATER_ORDINAL_POSITION': Keyword,
- 'PARAMETER_SPECIFIC_CATALOG': Keyword,
- 'PARAMETER_SPECIFIC_NAME': Keyword,
- 'PARAMATER_SPECIFIC_SCHEMA': Keyword,
- 'PARTIAL': Keyword,
- 'PASCAL': Keyword,
- 'PENDANT': Keyword,
- 'PLACING': Keyword,
- 'PLI': Keyword,
- 'POSITION': Keyword,
- 'POSTFIX': Keyword,
- 'PRECISION': Keyword,
- 'PREFIX': Keyword,
- 'PREORDER': Keyword,
- 'PREPARE': Keyword,
- 'PRESERVE': Keyword,
- 'PRIMARY': Keyword,
- 'PRIOR': Keyword,
- 'PRIVILEGES': Keyword,
- 'PROCEDURAL': Keyword,
- 'PROCEDURE': Keyword,
- 'PUBLIC': Keyword,
-
- 'RAISE': Keyword,
- 'READ': Keyword,
- 'READS': Keyword,
- 'RECHECK': Keyword,
- 'RECURSIVE': Keyword,
- 'REF': Keyword,
- 'REFERENCES': Keyword,
- 'REFERENCING': Keyword,
- 'REINDEX': Keyword,
- 'RELATIVE': Keyword,
- 'RENAME': Keyword,
- 'REPEATABLE': Keyword,
- 'REPLACE': Keyword,
- 'RESET': Keyword,
- 'RESTART': Keyword,
- 'RESTRICT': Keyword,
- 'RESULT': Keyword,
- 'RETURN': Keyword,
- 'RETURNED_LENGTH': Keyword,
- 'RETURNED_OCTET_LENGTH': Keyword,
- 'RETURNED_SQLSTATE': Keyword,
- 'RETURNS': Keyword,
- 'REVOKE': Keyword,
- 'RIGHT': Keyword,
- 'ROLE': Keyword,
- 'ROLLBACK': Keyword,
- 'ROLLUP': Keyword,
- 'ROUTINE': Keyword,
- 'ROUTINE_CATALOG': Keyword,
- 'ROUTINE_NAME': Keyword,
- 'ROUTINE_SCHEMA': Keyword,
- 'ROW': Keyword,
- 'ROWS': Keyword,
- 'ROW_COUNT': Keyword,
- 'RULE': Keyword,
-
- 'SAVE_POINT': Keyword,
- 'SCALE': Keyword,
- 'SCHEMA': Keyword,
- 'SCHEMA_NAME': Keyword,
- 'SCOPE': Keyword,
- 'SCROLL': Keyword,
- 'SEARCH': Keyword,
- 'SECOND': Keyword,
- 'SECURITY': Keyword,
- 'SELF': Keyword,
- 'SENSITIVE': Keyword,
- 'SERIALIZABLE': Keyword,
- 'SERVER_NAME': Keyword,
- 'SESSION': Keyword,
- 'SESSION_USER': Keyword,
- 'SETOF': Keyword,
- 'SETS': Keyword,
- 'SHARE': Keyword,
- 'SHOW': Keyword,
- 'SIMILAR': Keyword,
- 'SIMPLE': Keyword,
- 'SIZE': Keyword,
- 'SOME': Keyword,
- 'SOURCE': Keyword,
- 'SPACE': Keyword,
- 'SPECIFIC': Keyword,
- 'SPECIFICTYPE': Keyword,
- 'SPECIFIC_NAME': Keyword,
- 'SQL': Keyword,
- 'SQLCODE': Keyword,
- 'SQLERROR': Keyword,
- 'SQLEXCEPTION': Keyword,
- 'SQLSTATE': Keyword,
- 'SQLWARNINIG': Keyword,
- 'STABLE': Keyword,
- 'START': Keyword,
- 'STATE': Keyword,
- 'STATEMENT': Keyword,
- 'STATIC': Keyword,
- 'STATISTICS': Keyword,
- 'STDIN': Keyword,
- 'STDOUT': Keyword,
- 'STORAGE': Keyword,
- 'STRICT': Keyword,
- 'STRUCTURE': Keyword,
- 'STYPE': Keyword,
- 'SUBCLASS_ORIGIN': Keyword,
- 'SUBLIST': Keyword,
- 'SUBSTRING': Keyword,
- 'SUM': Keyword,
- 'SYMMETRIC': Keyword,
- 'SYSID': Keyword,
- 'SYSTEM': Keyword,
- 'SYSTEM_USER': Keyword,
-
- 'TABLE': Keyword,
- 'TABLE_NAME': Keyword,
- ' TEMP': Keyword,
- 'TEMPLATE': Keyword,
- 'TEMPORARY': Keyword,
- 'TERMINATE': Keyword,
- 'THAN': Keyword,
- 'THEN': Keyword,
- 'TIMESTAMP': Keyword,
- 'TIMEZONE_HOUR': Keyword,
- 'TIMEZONE_MINUTE': Keyword,
- 'TO': Keyword,
- 'TOAST': Keyword,
- 'TRAILING': Keyword,
- 'TRANSATION': Keyword,
- 'TRANSACTIONS_COMMITTED': Keyword,
- 'TRANSACTIONS_ROLLED_BACK': Keyword,
- 'TRANSATION_ACTIVE': Keyword,
- 'TRANSFORM': Keyword,
- 'TRANSFORMS': Keyword,
- 'TRANSLATE': Keyword,
- 'TRANSLATION': Keyword,
- 'TREAT': Keyword,
- 'TRIGGER': Keyword,
- 'TRIGGER_CATALOG': Keyword,
- 'TRIGGER_NAME': Keyword,
- 'TRIGGER_SCHEMA': Keyword,
- 'TRIM': Keyword,
- 'TRUE': Keyword,
- 'TRUNCATE': Keyword,
- 'TRUSTED': Keyword,
- 'TYPE': Keyword,
-
- 'UNCOMMITTED': Keyword,
- 'UNDER': Keyword,
- 'UNENCRYPTED': Keyword,
- 'UNION': Keyword,
- 'UNIQUE': Keyword,
- 'UNKNOWN': Keyword,
- 'UNLISTEN': Keyword,
- 'UNNAMED': Keyword,
- 'UNNEST': Keyword,
- 'UNTIL': Keyword,
- 'UPPER': Keyword,
- 'USAGE': Keyword,
- 'USER': Keyword,
- 'USER_DEFINED_TYPE_CATALOG': Keyword,
- 'USER_DEFINED_TYPE_NAME': Keyword,
- 'USER_DEFINED_TYPE_SCHEMA': Keyword,
- 'USING': Keyword,
-
- 'VACUUM': Keyword,
- 'VALID': Keyword,
- 'VALIDATOR': Keyword,
- 'VALUES': Keyword,
- 'VARIABLE': Keyword,
- 'VERBOSE': Keyword,
- 'VERSION': Keyword,
- 'VIEW': Keyword,
- 'VOLATILE': Keyword,
-
- 'WHEN': Keyword,
- 'WHENEVER': Keyword,
- 'WHERE': Keyword,
- 'WITH': Keyword,
- 'WITHOUT': Keyword,
- 'WORK': Keyword,
- 'WRITE': Keyword,
-
- 'YEAR': Keyword,
-
- 'ZONE': Keyword,
-
-
- 'ARRAY': Name.Builtin,
- 'BIGINT': Name.Builtin,
- 'BINARY': Name.Builtin,
- 'BIT': Name.Builtin,
- 'BLOB': Name.Builtin,
- 'BOOLEAN': Name.Builtin,
- 'CHAR': Name.Builtin,
- 'CHARACTER': Name.Builtin,
- 'DATE': Name.Builtin,
- 'DEC': Name.Builtin,
- 'DECIMAL': Name.Builtin,
- 'FLOAT': Name.Builtin,
- 'INT': Name.Builtin,
- 'INTEGER': Name.Builtin,
- 'INTERVAL': Name.Builtin,
- 'NUMBER': Name.Builtin,
- 'NUMERIC': Name.Builtin,
- 'REAL': Name.Builtin,
- 'SERIAL': Name.Builtin,
- 'SMALLINT': Name.Builtin,
- 'VARCHAR': Name.Builtin,
- 'VARYING': Name.Builtin,
- 'INT8': Name.Builtin,
- 'SERIAL8': Name.Builtin,
- 'TEXT': Name.Builtin,
+ 'ABORT': tokens.Keyword,
+ 'ABS': tokens.Keyword,
+ 'ABSOLUTE': tokens.Keyword,
+ 'ACCESS': tokens.Keyword,
+ 'ADA': tokens.Keyword,
+ 'ADD': tokens.Keyword,
+ 'ADMIN': tokens.Keyword,
+ 'AFTER': tokens.Keyword,
+ 'AGGREGATE': tokens.Keyword,
+ 'ALIAS': tokens.Keyword,
+ 'ALL': tokens.Keyword,
+ 'ALLOCATE': tokens.Keyword,
+ 'ANALYSE': tokens.Keyword,
+ 'ANALYZE': tokens.Keyword,
+ 'ANY': tokens.Keyword,
+ 'ARE': tokens.Keyword,
+ 'ASC': tokens.Keyword,
+ 'ASENSITIVE': tokens.Keyword,
+ 'ASSERTION': tokens.Keyword,
+ 'ASSIGNMENT': tokens.Keyword,
+ 'ASYMMETRIC': tokens.Keyword,
+ 'AT': tokens.Keyword,
+ 'ATOMIC': tokens.Keyword,
+ 'AUTHORIZATION': tokens.Keyword,
+ 'AVG': tokens.Keyword,
+
+ 'BACKWARD': tokens.Keyword,
+ 'BEFORE': tokens.Keyword,
+ 'BEGIN': tokens.Keyword,
+ 'BETWEEN': tokens.Keyword,
+ 'BITVAR': tokens.Keyword,
+ 'BIT_LENGTH': tokens.Keyword,
+ 'BOTH': tokens.Keyword,
+ 'BREADTH': tokens.Keyword,
+
+# 'C': tokens.Keyword, # most likely this is an alias
+ 'CACHE': tokens.Keyword,
+ 'CALL': tokens.Keyword,
+ 'CALLED': tokens.Keyword,
+ 'CARDINALITY': tokens.Keyword,
+ 'CASCADE': tokens.Keyword,
+ 'CASCADED': tokens.Keyword,
+ 'CAST': tokens.Keyword,
+ 'CATALOG': tokens.Keyword,
+ 'CATALOG_NAME': tokens.Keyword,
+ 'CHAIN': tokens.Keyword,
+ 'CHARACTERISTICS': tokens.Keyword,
+ 'CHARACTER_LENGTH': tokens.Keyword,
+ 'CHARACTER_SET_CATALOG': tokens.Keyword,
+ 'CHARACTER_SET_NAME': tokens.Keyword,
+ 'CHARACTER_SET_SCHEMA': tokens.Keyword,
+ 'CHAR_LENGTH': tokens.Keyword,
+ 'CHECK': tokens.Keyword,
+ 'CHECKED': tokens.Keyword,
+ 'CHECKPOINT': tokens.Keyword,
+ 'CLASS': tokens.Keyword,
+ 'CLASS_ORIGIN': tokens.Keyword,
+ 'CLOB': tokens.Keyword,
+ 'CLOSE': tokens.Keyword,
+ 'CLUSTER': tokens.Keyword,
+ 'COALSECE': tokens.Keyword,
+ 'COBOL': tokens.Keyword,
+ 'COLLATE': tokens.Keyword,
+ 'COLLATION': tokens.Keyword,
+ 'COLLATION_CATALOG': tokens.Keyword,
+ 'COLLATION_NAME': tokens.Keyword,
+ 'COLLATION_SCHEMA': tokens.Keyword,
+ 'COLUMN': tokens.Keyword,
+ 'COLUMN_NAME': tokens.Keyword,
+ 'COMMAND_FUNCTION': tokens.Keyword,
+ 'COMMAND_FUNCTION_CODE': tokens.Keyword,
+ 'COMMENT': tokens.Keyword,
+ 'COMMIT': tokens.Keyword,
+ 'COMMITTED': tokens.Keyword,
+ 'COMPLETION': tokens.Keyword,
+ 'CONDITION_NUMBER': tokens.Keyword,
+ 'CONNECT': tokens.Keyword,
+ 'CONNECTION': tokens.Keyword,
+ 'CONNECTION_NAME': tokens.Keyword,
+ 'CONSTRAINT': tokens.Keyword,
+ 'CONSTRAINTS': tokens.Keyword,
+ 'CONSTRAINT_CATALOG': tokens.Keyword,
+ 'CONSTRAINT_NAME': tokens.Keyword,
+ 'CONSTRAINT_SCHEMA': tokens.Keyword,
+ 'CONSTRUCTOR': tokens.Keyword,
+ 'CONTAINS': tokens.Keyword,
+ 'CONTINUE': tokens.Keyword,
+ 'CONVERSION': tokens.Keyword,
+ 'CONVERT': tokens.Keyword,
+ 'COPY': tokens.Keyword,
+ 'CORRESPONTING': tokens.Keyword,
+ 'COUNT': tokens.Keyword,
+ 'CREATEDB': tokens.Keyword,
+ 'CREATEUSER': tokens.Keyword,
+ 'CROSS': tokens.Keyword,
+ 'CUBE': tokens.Keyword,
+ 'CURRENT': tokens.Keyword,
+ 'CURRENT_DATE': tokens.Keyword,
+ 'CURRENT_PATH': tokens.Keyword,
+ 'CURRENT_ROLE': tokens.Keyword,
+ 'CURRENT_TIME': tokens.Keyword,
+ 'CURRENT_TIMESTAMP': tokens.Keyword,
+ 'CURRENT_USER': tokens.Keyword,
+ 'CURSOR': tokens.Keyword,
+ 'CURSOR_NAME': tokens.Keyword,
+ 'CYCLE': tokens.Keyword,
+
+ 'DATA': tokens.Keyword,
+ 'DATABASE': tokens.Keyword,
+ 'DATETIME_INTERVAL_CODE': tokens.Keyword,
+ 'DATETIME_INTERVAL_PRECISION': tokens.Keyword,
+ 'DAY': tokens.Keyword,
+ 'DEALLOCATE': tokens.Keyword,
+ 'DECLARE': tokens.Keyword,
+ 'DEFAULT': tokens.Keyword,
+ 'DEFAULTS': tokens.Keyword,
+ 'DEFERRABLE': tokens.Keyword,
+ 'DEFERRED': tokens.Keyword,
+ 'DEFINED': tokens.Keyword,
+ 'DEFINER': tokens.Keyword,
+ 'DELIMITER': tokens.Keyword,
+ 'DELIMITERS': tokens.Keyword,
+ 'DEREF': tokens.Keyword,
+ 'DESC': tokens.Keyword,
+ 'DESCRIBE': tokens.Keyword,
+ 'DESCRIPTOR': tokens.Keyword,
+ 'DESTROY': tokens.Keyword,
+ 'DESTRUCTOR': tokens.Keyword,
+ 'DETERMINISTIC': tokens.Keyword,
+ 'DIAGNOSTICS': tokens.Keyword,
+ 'DICTIONARY': tokens.Keyword,
+ 'DISCONNECT': tokens.Keyword,
+ 'DISPATCH': tokens.Keyword,
+ 'DO': tokens.Keyword,
+ 'DOMAIN': tokens.Keyword,
+ 'DYNAMIC': tokens.Keyword,
+ 'DYNAMIC_FUNCTION': tokens.Keyword,
+ 'DYNAMIC_FUNCTION_CODE': tokens.Keyword,
+
+ 'EACH': tokens.Keyword,
+ 'ENCODING': tokens.Keyword,
+ 'ENCRYPTED': tokens.Keyword,
+ 'END-EXEC': tokens.Keyword,
+ 'EQUALS': tokens.Keyword,
+ 'ESCAPE': tokens.Keyword,
+ 'EVERY': tokens.Keyword,
+ 'EXCEPT': tokens.Keyword,
+ 'ESCEPTION': tokens.Keyword,
+ 'EXCLUDING': tokens.Keyword,
+ 'EXCLUSIVE': tokens.Keyword,
+ 'EXEC': tokens.Keyword,
+ 'EXECUTE': tokens.Keyword,
+ 'EXISTING': tokens.Keyword,
+ 'EXISTS': tokens.Keyword,
+ 'EXTERNAL': tokens.Keyword,
+ 'EXTRACT': tokens.Keyword,
+
+ 'FALSE': tokens.Keyword,
+ 'FETCH': tokens.Keyword,
+ 'FINAL': tokens.Keyword,
+ 'FIRST': tokens.Keyword,
+ 'FORCE': tokens.Keyword,
+ 'FOREIGN': tokens.Keyword,
+ 'FORTRAN': tokens.Keyword,
+ 'FORWARD': tokens.Keyword,
+ 'FOUND': tokens.Keyword,
+ 'FREE': tokens.Keyword,
+ 'FREEZE': tokens.Keyword,
+ 'FULL': tokens.Keyword,
+ 'FUNCTION': tokens.Keyword,
+
+# 'G': tokens.Keyword,
+ 'GENERAL': tokens.Keyword,
+ 'GENERATED': tokens.Keyword,
+ 'GET': tokens.Keyword,
+ 'GLOBAL': tokens.Keyword,
+ 'GO': tokens.Keyword,
+ 'GOTO': tokens.Keyword,
+ 'GRANT': tokens.Keyword,
+ 'GRANTED': tokens.Keyword,
+ 'GROUPING': tokens.Keyword,
+
+ 'HANDLER': tokens.Keyword,
+ 'HAVING': tokens.Keyword,
+ 'HIERARCHY': tokens.Keyword,
+ 'HOLD': tokens.Keyword,
+ 'HOST': tokens.Keyword,
+
+ 'IDENTITY': tokens.Keyword,
+ 'IGNORE': tokens.Keyword,
+ 'ILIKE': tokens.Keyword,
+ 'IMMEDIATE': tokens.Keyword,
+ 'IMMUTABLE': tokens.Keyword,
+
+ 'IMPLEMENTATION': tokens.Keyword,
+ 'IMPLICIT': tokens.Keyword,
+ 'INCLUDING': tokens.Keyword,
+ 'INCREMENT': tokens.Keyword,
+ 'INDEX': tokens.Keyword,
+
+ 'INDITCATOR': tokens.Keyword,
+ 'INFIX': tokens.Keyword,
+ 'INHERITS': tokens.Keyword,
+ 'INITIALIZE': tokens.Keyword,
+ 'INITIALLY': tokens.Keyword,
+ 'INOUT': tokens.Keyword,
+ 'INPUT': tokens.Keyword,
+ 'INSENSITIVE': tokens.Keyword,
+ 'INSTANTIABLE': tokens.Keyword,
+ 'INSTEAD': tokens.Keyword,
+ 'INTERSECT': tokens.Keyword,
+ 'INTO': tokens.Keyword,
+ 'INVOKER': tokens.Keyword,
+ 'IS': tokens.Keyword,
+ 'ISNULL': tokens.Keyword,
+ 'ISOLATION': tokens.Keyword,
+ 'ITERATE': tokens.Keyword,
+
+# 'K': tokens.Keyword,
+ 'KEY': tokens.Keyword,
+ 'KEY_MEMBER': tokens.Keyword,
+ 'KEY_TYPE': tokens.Keyword,
+
+ 'LANCOMPILER': tokens.Keyword,
+ 'LANGUAGE': tokens.Keyword,
+ 'LARGE': tokens.Keyword,
+ 'LAST': tokens.Keyword,
+ 'LATERAL': tokens.Keyword,
+ 'LEADING': tokens.Keyword,
+ 'LENGTH': tokens.Keyword,
+ 'LESS': tokens.Keyword,
+ 'LEVEL': tokens.Keyword,
+ 'LIMIT': tokens.Keyword,
+ 'LISTEN': tokens.Keyword,
+ 'LOAD': tokens.Keyword,
+ 'LOCAL': tokens.Keyword,
+ 'LOCALTIME': tokens.Keyword,
+ 'LOCALTIMESTAMP': tokens.Keyword,
+ 'LOCATION': tokens.Keyword,
+ 'LOCATOR': tokens.Keyword,
+ 'LOCK': tokens.Keyword,
+ 'LOWER': tokens.Keyword,
+
+# 'M': tokens.Keyword,
+ 'MAP': tokens.Keyword,
+ 'MATCH': tokens.Keyword,
+ 'MAXVALUE': tokens.Keyword,
+ 'MESSAGE_LENGTH': tokens.Keyword,
+ 'MESSAGE_OCTET_LENGTH': tokens.Keyword,
+ 'MESSAGE_TEXT': tokens.Keyword,
+ 'METHOD': tokens.Keyword,
+ 'MINUTE': tokens.Keyword,
+ 'MINVALUE': tokens.Keyword,
+ 'MOD': tokens.Keyword,
+ 'MODE': tokens.Keyword,
+ 'MODIFIES': tokens.Keyword,
+ 'MODIFY': tokens.Keyword,
+ 'MONTH': tokens.Keyword,
+ 'MORE': tokens.Keyword,
+ 'MOVE': tokens.Keyword,
+ 'MUMPS': tokens.Keyword,
+
+ 'NAMES': tokens.Keyword,
+ 'NATIONAL': tokens.Keyword,
+ 'NATURAL': tokens.Keyword,
+ 'NCHAR': tokens.Keyword,
+ 'NCLOB': tokens.Keyword,
+ 'NEW': tokens.Keyword,
+ 'NEXT': tokens.Keyword,
+ 'NO': tokens.Keyword,
+ 'NOCREATEDB': tokens.Keyword,
+ 'NOCREATEUSER': tokens.Keyword,
+ 'NONE': tokens.Keyword,
+ 'NOT': tokens.Keyword,
+ 'NOTHING': tokens.Keyword,
+ 'NOTIFY': tokens.Keyword,
+ 'NOTNULL': tokens.Keyword,
+ 'NULL': tokens.Keyword,
+ 'NULLABLE': tokens.Keyword,
+ 'NULLIF': tokens.Keyword,
+
+ 'OBJECT': tokens.Keyword,
+ 'OCTET_LENGTH': tokens.Keyword,
+ 'OF': tokens.Keyword,
+ 'OFF': tokens.Keyword,
+ 'OFFSET': tokens.Keyword,
+ 'OIDS': tokens.Keyword,
+ 'OLD': tokens.Keyword,
+ 'ONLY': tokens.Keyword,
+ 'OPEN': tokens.Keyword,
+ 'OPERATION': tokens.Keyword,
+ 'OPERATOR': tokens.Keyword,
+ 'OPTION': tokens.Keyword,
+ 'OPTIONS': tokens.Keyword,
+ 'ORDINALITY': tokens.Keyword,
+ 'OUT': tokens.Keyword,
+ 'OUTPUT': tokens.Keyword,
+ 'OVERLAPS': tokens.Keyword,
+ 'OVERLAY': tokens.Keyword,
+ 'OVERRIDING': tokens.Keyword,
+ 'OWNER': tokens.Keyword,
+
+ 'PAD': tokens.Keyword,
+ 'PARAMETER': tokens.Keyword,
+ 'PARAMETERS': tokens.Keyword,
+ 'PARAMETER_MODE': tokens.Keyword,
+ 'PARAMATER_NAME': tokens.Keyword,
+ 'PARAMATER_ORDINAL_POSITION': tokens.Keyword,
+ 'PARAMETER_SPECIFIC_CATALOG': tokens.Keyword,
+ 'PARAMETER_SPECIFIC_NAME': tokens.Keyword,
+ 'PARAMATER_SPECIFIC_SCHEMA': tokens.Keyword,
+ 'PARTIAL': tokens.Keyword,
+ 'PASCAL': tokens.Keyword,
+ 'PENDANT': tokens.Keyword,
+ 'PLACING': tokens.Keyword,
+ 'PLI': tokens.Keyword,
+ 'POSITION': tokens.Keyword,
+ 'POSTFIX': tokens.Keyword,
+ 'PRECISION': tokens.Keyword,
+ 'PREFIX': tokens.Keyword,
+ 'PREORDER': tokens.Keyword,
+ 'PREPARE': tokens.Keyword,
+ 'PRESERVE': tokens.Keyword,
+ 'PRIMARY': tokens.Keyword,
+ 'PRIOR': tokens.Keyword,
+ 'PRIVILEGES': tokens.Keyword,
+ 'PROCEDURAL': tokens.Keyword,
+ 'PROCEDURE': tokens.Keyword,
+ 'PUBLIC': tokens.Keyword,
+
+ 'RAISE': tokens.Keyword,
+ 'READ': tokens.Keyword,
+ 'READS': tokens.Keyword,
+ 'RECHECK': tokens.Keyword,
+ 'RECURSIVE': tokens.Keyword,
+ 'REF': tokens.Keyword,
+ 'REFERENCES': tokens.Keyword,
+ 'REFERENCING': tokens.Keyword,
+ 'REINDEX': tokens.Keyword,
+ 'RELATIVE': tokens.Keyword,
+ 'RENAME': tokens.Keyword,
+ 'REPEATABLE': tokens.Keyword,
+ 'RESET': tokens.Keyword,
+ 'RESTART': tokens.Keyword,
+ 'RESTRICT': tokens.Keyword,
+ 'RESULT': tokens.Keyword,
+ 'RETURN': tokens.Keyword,
+ 'RETURNED_LENGTH': tokens.Keyword,
+ 'RETURNED_OCTET_LENGTH': tokens.Keyword,
+ 'RETURNED_SQLSTATE': tokens.Keyword,
+ 'RETURNS': tokens.Keyword,
+ 'REVOKE': tokens.Keyword,
+ 'RIGHT': tokens.Keyword,
+ 'ROLE': tokens.Keyword,
+ 'ROLLBACK': tokens.Keyword,
+ 'ROLLUP': tokens.Keyword,
+ 'ROUTINE': tokens.Keyword,
+ 'ROUTINE_CATALOG': tokens.Keyword,
+ 'ROUTINE_NAME': tokens.Keyword,
+ 'ROUTINE_SCHEMA': tokens.Keyword,
+ 'ROW': tokens.Keyword,
+ 'ROWS': tokens.Keyword,
+ 'ROW_COUNT': tokens.Keyword,
+ 'RULE': tokens.Keyword,
+
+ 'SAVE_POINT': tokens.Keyword,
+ 'SCALE': tokens.Keyword,
+ 'SCHEMA': tokens.Keyword,
+ 'SCHEMA_NAME': tokens.Keyword,
+ 'SCOPE': tokens.Keyword,
+ 'SCROLL': tokens.Keyword,
+ 'SEARCH': tokens.Keyword,
+ 'SECOND': tokens.Keyword,
+ 'SECURITY': tokens.Keyword,
+ 'SELF': tokens.Keyword,
+ 'SENSITIVE': tokens.Keyword,
+ 'SERIALIZABLE': tokens.Keyword,
+ 'SERVER_NAME': tokens.Keyword,
+ 'SESSION': tokens.Keyword,
+ 'SESSION_USER': tokens.Keyword,
+ 'SETOF': tokens.Keyword,
+ 'SETS': tokens.Keyword,
+ 'SHARE': tokens.Keyword,
+ 'SHOW': tokens.Keyword,
+ 'SIMILAR': tokens.Keyword,
+ 'SIMPLE': tokens.Keyword,
+ 'SIZE': tokens.Keyword,
+ 'SOME': tokens.Keyword,
+ 'SOURCE': tokens.Keyword,
+ 'SPACE': tokens.Keyword,
+ 'SPECIFIC': tokens.Keyword,
+ 'SPECIFICTYPE': tokens.Keyword,
+ 'SPECIFIC_NAME': tokens.Keyword,
+ 'SQL': tokens.Keyword,
+ 'SQLCODE': tokens.Keyword,
+ 'SQLERROR': tokens.Keyword,
+ 'SQLEXCEPTION': tokens.Keyword,
+ 'SQLSTATE': tokens.Keyword,
+ 'SQLWARNING': tokens.Keyword,
+ 'STABLE': tokens.Keyword,
+ 'START': tokens.Keyword,
+ 'STATE': tokens.Keyword,
+ 'STATEMENT': tokens.Keyword,
+ 'STATIC': tokens.Keyword,
+ 'STATISTICS': tokens.Keyword,
+ 'STDIN': tokens.Keyword,
+ 'STDOUT': tokens.Keyword,
+ 'STORAGE': tokens.Keyword,
+ 'STRICT': tokens.Keyword,
+ 'STRUCTURE': tokens.Keyword,
+ 'STYPE': tokens.Keyword,
+ 'SUBCLASS_ORIGIN': tokens.Keyword,
+ 'SUBLIST': tokens.Keyword,
+ 'SUBSTRING': tokens.Keyword,
+ 'SUM': tokens.Keyword,
+ 'SYMMETRIC': tokens.Keyword,
+ 'SYSID': tokens.Keyword,
+ 'SYSTEM': tokens.Keyword,
+ 'SYSTEM_USER': tokens.Keyword,
+
+ 'TABLE': tokens.Keyword,
+ 'TABLE_NAME': tokens.Keyword,
+ ' TEMP': tokens.Keyword,
+ 'TEMPLATE': tokens.Keyword,
+ 'TEMPORARY': tokens.Keyword,
+ 'TERMINATE': tokens.Keyword,
+ 'THAN': tokens.Keyword,
+ 'TIMESTAMP': tokens.Keyword,
+ 'TIMEZONE_HOUR': tokens.Keyword,
+ 'TIMEZONE_MINUTE': tokens.Keyword,
+ 'TO': tokens.Keyword,
+ 'TOAST': tokens.Keyword,
+ 'TRAILING': tokens.Keyword,
+ 'TRANSATION': tokens.Keyword,
+ 'TRANSACTIONS_COMMITTED': tokens.Keyword,
+ 'TRANSACTIONS_ROLLED_BACK': tokens.Keyword,
+ 'TRANSATION_ACTIVE': tokens.Keyword,
+ 'TRANSFORM': tokens.Keyword,
+ 'TRANSFORMS': tokens.Keyword,
+ 'TRANSLATE': tokens.Keyword,
+ 'TRANSLATION': tokens.Keyword,
+ 'TREAT': tokens.Keyword,
+ 'TRIGGER': tokens.Keyword,
+ 'TRIGGER_CATALOG': tokens.Keyword,
+ 'TRIGGER_NAME': tokens.Keyword,
+ 'TRIGGER_SCHEMA': tokens.Keyword,
+ 'TRIM': tokens.Keyword,
+ 'TRUE': tokens.Keyword,
+ 'TRUNCATE': tokens.Keyword,
+ 'TRUSTED': tokens.Keyword,
+ 'TYPE': tokens.Keyword,
+
+ 'UNCOMMITTED': tokens.Keyword,
+ 'UNDER': tokens.Keyword,
+ 'UNENCRYPTED': tokens.Keyword,
+ 'UNION': tokens.Keyword,
+ 'UNIQUE': tokens.Keyword,
+ 'UNKNOWN': tokens.Keyword,
+ 'UNLISTEN': tokens.Keyword,
+ 'UNNAMED': tokens.Keyword,
+ 'UNNEST': tokens.Keyword,
+ 'UNTIL': tokens.Keyword,
+ 'UPPER': tokens.Keyword,
+ 'USAGE': tokens.Keyword,
+ 'USER': tokens.Keyword,
+ 'USER_DEFINED_TYPE_CATALOG': tokens.Keyword,
+ 'USER_DEFINED_TYPE_NAME': tokens.Keyword,
+ 'USER_DEFINED_TYPE_SCHEMA': tokens.Keyword,
+ 'USING': tokens.Keyword,
+
+ 'VACUUM': tokens.Keyword,
+ 'VALID': tokens.Keyword,
+ 'VALIDATOR': tokens.Keyword,
+ 'VALUES': tokens.Keyword,
+ 'VARIABLE': tokens.Keyword,
+ 'VERBOSE': tokens.Keyword,
+ 'VERSION': tokens.Keyword,
+ 'VIEW': tokens.Keyword,
+ 'VOLATILE': tokens.Keyword,
+
+ 'WHENEVER': tokens.Keyword,
+ 'WITH': tokens.Keyword,
+ 'WITHOUT': tokens.Keyword,
+ 'WORK': tokens.Keyword,
+ 'WRITE': tokens.Keyword,
+
+ 'YEAR': tokens.Keyword,
+
+ 'ZONE': tokens.Keyword,
+
+
+ 'ARRAY': tokens.Name.Builtin,
+ 'BIGINT': tokens.Name.Builtin,
+ 'BINARY': tokens.Name.Builtin,
+ 'BIT': tokens.Name.Builtin,
+ 'BLOB': tokens.Name.Builtin,
+ 'BOOLEAN': tokens.Name.Builtin,
+ 'CHAR': tokens.Name.Builtin,
+ 'CHARACTER': tokens.Name.Builtin,
+ 'DATE': tokens.Name.Builtin,
+ 'DEC': tokens.Name.Builtin,
+ 'DECIMAL': tokens.Name.Builtin,
+ 'FLOAT': tokens.Name.Builtin,
+ 'INT': tokens.Name.Builtin,
+ 'INTEGER': tokens.Name.Builtin,
+ 'INTERVAL': tokens.Name.Builtin,
+ 'LONG': tokens.Name.Builtin,
+ 'NUMBER': tokens.Name.Builtin,
+ 'NUMERIC': tokens.Name.Builtin,
+ 'REAL': tokens.Name.Builtin,
+ 'SERIAL': tokens.Name.Builtin,
+ 'SMALLINT': tokens.Name.Builtin,
+ 'VARCHAR': tokens.Name.Builtin,
+ 'VARCHAR2': tokens.Name.Builtin,
+ 'VARYING': tokens.Name.Builtin,
+ 'INT8': tokens.Name.Builtin,
+ 'SERIAL8': tokens.Name.Builtin,
+ 'TEXT': tokens.Name.Builtin,
}
KEYWORDS_COMMON = {
- 'SELECT': Keyword.DML,
- 'INSERT': Keyword.DML,
- 'DELETE': Keyword.DML,
- 'UPDATE': Keyword.DML,
- 'DROP': Keyword.DDL,
- 'CREATE': Keyword.DDL,
- 'ALTER': Keyword.DDL,
-
- 'WHERE': Keyword,
- 'FROM': Keyword,
- 'INNER': Keyword,
- 'JOIN': Keyword,
- 'AND': Keyword,
- 'OR': Keyword,
- 'LIKE': Keyword,
- 'ON': Keyword,
- 'IN': Keyword,
- 'SET': Keyword,
-
- 'BY': Keyword,
- 'GROUP': Keyword,
- 'ORDER': Keyword,
- 'LEFT': Keyword,
- 'OUTER': Keyword,
-
- 'IF': Keyword,
- 'END': Keyword,
- 'THEN': Keyword,
- 'LOOP': Keyword,
- 'AS': Keyword,
- 'ELSE': Keyword,
- 'FOR': Keyword,
-
- 'CASE': Keyword,
- 'WHEN': Keyword,
- 'MIN': Keyword,
- 'MAX': Keyword,
- 'DISTINCT': Keyword,
-
+ 'SELECT': tokens.Keyword.DML,
+ 'INSERT': tokens.Keyword.DML,
+ 'DELETE': tokens.Keyword.DML,
+ 'UPDATE': tokens.Keyword.DML,
+ 'REPLACE': tokens.Keyword.DML,
+ 'DROP': tokens.Keyword.DDL,
+ 'CREATE': tokens.Keyword.DDL,
+ 'ALTER': tokens.Keyword.DDL,
+
+ 'WHERE': tokens.Keyword,
+ 'FROM': tokens.Keyword,
+ 'INNER': tokens.Keyword,
+ 'JOIN': tokens.Keyword,
+ 'AND': tokens.Keyword,
+ 'OR': tokens.Keyword,
+ 'LIKE': tokens.Keyword,
+ 'ON': tokens.Keyword,
+ 'IN': tokens.Keyword,
+ 'SET': tokens.Keyword,
+
+ 'BY': tokens.Keyword,
+ 'GROUP': tokens.Keyword,
+ 'ORDER': tokens.Keyword,
+ 'LEFT': tokens.Keyword,
+ 'OUTER': tokens.Keyword,
+
+ 'IF': tokens.Keyword,
+ 'END': tokens.Keyword,
+ 'THEN': tokens.Keyword,
+ 'LOOP': tokens.Keyword,
+ 'AS': tokens.Keyword,
+ 'ELSE': tokens.Keyword,
+ 'FOR': tokens.Keyword,
+
+ 'CASE': tokens.Keyword,
+ 'WHEN': tokens.Keyword,
+ 'MIN': tokens.Keyword,
+ 'MAX': tokens.Keyword,
+ 'DISTINCT': tokens.Keyword,
}
diff --git a/debug_toolbar/utils/sqlparse/lexer.py b/debug_toolbar/utils/sqlparse/lexer.py
index 727a4ff..ae3fc2e 100644
--- a/debug_toolbar/utils/sqlparse/lexer.py
+++ b/debug_toolbar/utils/sqlparse/lexer.py
@@ -14,14 +14,14 @@
import re
+from debug_toolbar.utils.sqlparse import tokens
from debug_toolbar.utils.sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
-from debug_toolbar.utils.sqlparse.tokens import *
-from debug_toolbar.utils.sqlparse.tokens import _TokenType
class include(str):
pass
+
class combined(tuple):
"""Indicates a state combined from multiple states."""
@@ -32,9 +32,10 @@ class combined(tuple):
# tuple.__init__ doesn't do anything
pass
+
def is_keyword(value):
test = value.upper()
- return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, Name)), value
+ return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value
def apply_filters(stream, filters, lexer=None):
@@ -43,9 +44,11 @@ def apply_filters(stream, filters, lexer=None):
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
+
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
+
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
@@ -62,13 +65,14 @@ class LexerMeta(type):
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
- tokens = processed[state] = []
+ tokenlist = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
- tokens.extend(cls._process_state(unprocessed, processed, str(tdef)))
+ tokenlist.extend(cls._process_state(
+ unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
@@ -76,11 +80,13 @@ class LexerMeta(type):
try:
rex = re.compile(tdef[0], rflags).match
except Exception, err:
- raise ValueError("uncompilable regex %r in state %r of %r: %s" %
- (tdef[0], state, cls, err))
+ raise ValueError(("uncompilable regex %r in state"
+ " %r of %r: %s"
+ % (tdef[0], state, cls, err)))
- assert type(tdef[1]) is _TokenType or callable(tdef[1]), \
- 'token type must be simple type or callable, not %r' % (tdef[1],)
+ assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \
+ ('token type must be simple type or callable, not %r'
+ % (tdef[1],))
if len(tdef) == 2:
new_state = None
@@ -104,7 +110,8 @@ class LexerMeta(type):
cls._tmpname += 1
itokens = []
for istate in tdef2:
- assert istate != state, 'circular state ref %r' % istate
+ assert istate != state, \
+ 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
@@ -118,8 +125,8 @@ class LexerMeta(type):
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
- tokens.append((rex, tdef[1], new_state))
- return tokens
+ tokenlist.append((rex, tdef[1], new_state))
+ return tokenlist
def process_tokendef(cls):
cls._all_tokens = {}
@@ -143,9 +150,7 @@ class LexerMeta(type):
return type.__call__(cls, *args, **kwds)
-
-
-class Lexer:
+class Lexer(object):
__metaclass__ = LexerMeta
@@ -157,41 +162,53 @@ class Lexer:
tokens = {
'root': [
- (r'--.*?(\r|\n|\r\n)', Comment.Single),
- (r'(\r|\n|\r\n)', Newline),
- (r'\s+', Whitespace),
- (r'/\*', Comment.Multiline, 'multiline-comments'),
- (r':=', Assignment),
- (r'::', Punctuation),
- (r'[*]', Wildcard),
- (r"`(``|[^`])*`", Name),
- (r"´(´´|[^´])*´", Name),
- (r'@[a-zA-Z_][a-zA-Z0-9_]+', Name),
- (r'[+/<>=~!@#%^&|`?^-]', Operator),
- (r'[0-9]+', Number.Integer),
+ (r'--.*?(\r\n|\r|\n)', tokens.Comment.Single),
+ # $ matches *before* newline, therefore we have two patterns
+ # to match Comment.Single
+ (r'--.*?$', tokens.Comment.Single),
+ (r'(\r|\n|\r\n)', tokens.Newline),
+ (r'\s+', tokens.Whitespace),
+ (r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
+ (r':=', tokens.Assignment),
+ (r'::', tokens.Punctuation),
+ (r'[*]', tokens.Wildcard),
+ (r'CASE\b', tokens.Keyword), # extended CASE(foo)
+ (r"`(``|[^`])*`", tokens.Name),
+ (r"´(´´|[^´])*´", tokens.Name),
+ (r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', tokens.Name.Builtin),
+ (r'\?{1}', tokens.Name.Placeholder),
+ (r'[$:?%][a-zA-Z0-9_]+[^$:?%]?', tokens.Name.Placeholder),
+ (r'@[a-zA-Z_][a-zA-Z0-9_]+', tokens.Name),
+ (r'[a-zA-Z_][a-zA-Z0-9_]*(?=[.(])', tokens.Name), # see issue39
+ (r'[<>=~!]+', tokens.Operator.Comparison),
+ (r'[+/@#%^&|`?^-]+', tokens.Operator),
+ (r'0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
+ (r'[0-9]*\.[0-9]+', tokens.Number.Float),
+ (r'[0-9]+', tokens.Number.Integer),
# TODO: Backslash escapes?
- (r"'(''|[^'])*'", String.Single),
- (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
- (r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN', Keyword),
- (r'END( IF| LOOP)?', Keyword),
- (r'CREATE( OR REPLACE)?', Keyword.DDL),
+ (r"(''|'.*?[^\\]')", tokens.String.Single),
+ # not a real string literal in ANSI SQL:
+ (r'(""|".*?[^\\]")', tokens.String.Symbol),
+ (r'(\[.*[^\]]\])', tokens.Name),
+ (r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN\b', tokens.Keyword),
+ (r'END( IF| LOOP)?\b', tokens.Keyword),
+ (r'NOT NULL\b', tokens.Keyword),
+ (r'CREATE( OR REPLACE)?\b', tokens.Keyword.DDL),
(r'[a-zA-Z_][a-zA-Z0-9_]*', is_keyword),
- (r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', Name.Builtin),
- (r'[;:()\[\],\.]', Punctuation),
+ (r'[;:()\[\],\.]', tokens.Punctuation),
],
'multiline-comments': [
- (r'/\*', Comment.Multiline, 'multiline-comments'),
- (r'\*/', Comment.Multiline, '#pop'),
- (r'[^/\*]+', Comment.Multiline),
- (r'[/*]', Comment.Multiline)
- ]
- }
+ (r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
+ (r'\*/', tokens.Comment.Multiline, '#pop'),
+ (r'[^/\*]+', tokens.Comment.Multiline),
+ (r'[/*]', tokens.Comment.Multiline)
+ ]}
def __init__(self):
self.filters = []
def add_filter(self, filter_, **options):
- from sqlparse.filters import Filter
+ from debug_toolbar.utils.sqlparse.filters import Filter
if not isinstance(filter_, Filter):
filter_ = filter_(**options)
self.filters.append(filter_)
@@ -241,7 +258,6 @@ class Lexer:
stream = apply_filters(stream, self.filters, self)
return stream
-
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
@@ -261,7 +277,7 @@ class Lexer:
value = m.group()
if value in known_names:
yield pos, known_names[value], value
- elif type(action) is _TokenType:
+ elif type(action) is tokens._TokenType:
yield pos, action, value
elif hasattr(action, '__call__'):
ttype, value = action(value)
@@ -297,9 +313,9 @@ class Lexer:
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
- yield pos, Text, u'\n'
+ yield pos, tokens.Text, u'\n'
continue
- yield pos, Error, text[pos]
+ yield pos, tokens.Error, text[pos]
pos += 1
except IndexError:
break
diff --git a/debug_toolbar/utils/sqlparse/sql.py b/debug_toolbar/utils/sqlparse/sql.py
index 5bbb977..55bf804 100644
--- a/debug_toolbar/utils/sqlparse/sql.py
+++ b/debug_toolbar/utils/sqlparse/sql.py
@@ -3,7 +3,6 @@
"""This module contains classes representing syntactical elements of SQL."""
import re
-import types
from debug_toolbar.utils.sqlparse import tokens as T
@@ -16,14 +15,15 @@ class Token(object):
the type of the token.
"""
- __slots__ = ('value', 'ttype',)
+ __slots__ = ('value', 'ttype', 'parent')
def __init__(self, ttype, value):
self.value = value
self.ttype = ttype
+ self.parent = None
def __str__(self):
- return unicode(self).encode('latin-1')
+ return unicode(self).encode('utf-8')
def __repr__(self):
short = self._get_repr_value()
@@ -43,7 +43,7 @@ class Token(object):
def _get_repr_value(self):
raw = unicode(self)
if len(raw) > 7:
- short = raw[:6]+u'...'
+ short = raw[:6] + u'...'
else:
short = raw
return re.sub('\s+', ' ', short)
@@ -59,12 +59,12 @@ class Token(object):
type.
*values* is a list of possible values for this token. The values
are OR'ed together so if only one of the values matches ``True``
- is returned. Except for keyword tokens the comparsion is
+ is returned. Except for keyword tokens the comparison is
case-sensitive. For convenience it's ok to pass in a single string.
If *regex* is ``True`` (default is ``False``) the given values are
treated as regular expressions.
"""
- type_matched = self.ttype in ttype
+ type_matched = self.ttype is ttype
if not type_matched or values is None:
return type_matched
if isinstance(values, basestring):
@@ -79,7 +79,7 @@ class Token(object):
return True
return False
else:
- if self.ttype is T.Keyword:
+ if self.ttype in T.Keyword:
values = set([v.upper() for v in values])
return self.value.upper() in values
else:
@@ -93,6 +93,32 @@ class Token(object):
"""Return ``True`` if this token is a whitespace token."""
return self.ttype and self.ttype in T.Whitespace
+ def within(self, group_cls):
+ """Returns ``True`` if this token is within *group_cls*.
+
+ Use this method for example to check if an identifier is within
+ a function: ``t.within(sql.Function)``.
+ """
+ parent = self.parent
+ while parent:
+ if isinstance(parent, group_cls):
+ return True
+ parent = parent.parent
+ return False
+
+ def is_child_of(self, other):
+ """Returns ``True`` if this token is a direct child of *other*."""
+ return self.parent == other
+
+ def has_ancestor(self, other):
+ """Returns ``True`` if *other* is in this tokens ancestry."""
+ parent = self.parent
+ while parent:
+ if parent == other:
+ return True
+ parent = parent.parent
+ return False
+
class TokenList(Token):
"""A group of tokens.
@@ -113,24 +139,24 @@ class TokenList(Token):
return ''.join(unicode(x) for x in self.flatten())
def __str__(self):
- return unicode(self).encode('latin-1')
+ return unicode(self).encode('utf-8')
def _get_repr_name(self):
return self.__class__.__name__
- ## def _pprint_tree(self, max_depth=None, depth=0):
- ## """Pretty-print the object tree."""
- ## indent = ' '*(depth*2)
- ## for token in self.tokens:
- ## if token.is_group():
- ## pre = ' | '
- ## else:
- ## pre = ' | '
- ## print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(),
- ## token._get_repr_value())
- ## if (token.is_group() and max_depth is not None
- ## and depth < max_depth):
- ## token._pprint_tree(max_depth, depth+1)
+ def _pprint_tree(self, max_depth=None, depth=0):
+ """Pretty-print the object tree."""
+ indent = ' '*(depth*2)
+ for idx, token in enumerate(self.tokens):
+ if token.is_group():
+ pre = ' +-'
+ else:
+ pre = ' | '
+ print '%s%s%d %s \'%s\'' % (indent, pre, idx,
+ token._get_repr_name(),
+ token._get_repr_value())
+ if (token.is_group() and (max_depth is None or depth < max_depth)):
+ token._pprint_tree(max_depth, depth+1)
def flatten(self):
"""Generator yielding ungrouped tokens.
@@ -150,6 +176,10 @@ class TokenList(Token):
def get_sublists(self):
return [x for x in self.tokens if isinstance(x, TokenList)]
+ @property
+ def _groupable_tokens(self):
+ return self.tokens
+
def token_first(self, ignore_whitespace=True):
"""Returns the first child token.
@@ -190,7 +220,7 @@ class TokenList(Token):
def token_next_match(self, idx, ttype, value, regex=False):
"""Returns next token where it's ``match`` method returns ``True``."""
- if type(idx) != types.IntType:
+ if not isinstance(idx, int):
idx = self.token_index(idx)
for token in self.tokens[idx:]:
if token.match(ttype, value, regex):
@@ -202,8 +232,8 @@ class TokenList(Token):
passed = False
for func in funcs:
if func(token):
- passed = True
- break
+ passed = True
+ break
if not passed:
return token
return None
@@ -241,7 +271,7 @@ class TokenList(Token):
return None
if not isinstance(idx, int):
idx = self.token_index(idx)
- while idx < len(self.tokens)-1:
+ while idx < len(self.tokens) - 1:
idx += 1
if self.tokens[idx].is_whitespace() and skip_ws:
continue
@@ -257,18 +287,27 @@ class TokenList(Token):
If *exclude_end* is ``True`` (default is ``False``) the end token
is included too.
"""
+ # FIXME(andi): rename exclude_end to inlcude_end
if exclude_end:
offset = 0
else:
offset = 1
- return self.tokens[self.token_index(start):self.token_index(end)+offset]
+ end_idx = self.token_index(end) + offset
+ start_idx = self.token_index(start)
+ return self.tokens[start_idx:end_idx]
- def group_tokens(self, grp_cls, tokens):
+ def group_tokens(self, grp_cls, tokens, ignore_ws=False):
"""Replace tokens by an instance of *grp_cls*."""
idx = self.token_index(tokens[0])
+ if ignore_ws:
+ while tokens and tokens[-1].is_whitespace():
+ tokens = tokens[:-1]
for t in tokens:
self.tokens.remove(t)
grp = grp_cls(tokens)
+ for token in tokens:
+ token.parent = grp
+ grp.parent = self
self.tokens.insert(idx, grp)
return grp
@@ -290,7 +329,11 @@ class Statement(TokenList):
isn't a DML or DDL keyword "UNKNOWN" is returned.
"""
first_token = self.token_first()
- if first_token.ttype in (T.Keyword.DML, T.Keyword.DDL):
+ if first_token is None:
+ # An "empty" statement that either has not tokens at all
+ # or only whitespace tokens.
+ return 'UNKNOWN'
+ elif first_token.ttype in (T.Keyword.DML, T.Keyword.DDL):
return first_token.value.upper()
else:
return 'UNKNOWN'
@@ -397,27 +440,36 @@ class Parenthesis(TokenList):
"""Tokens between parenthesis."""
__slots__ = ('value', 'ttype', 'tokens')
+ @property
+ def _groupable_tokens(self):
+ return self.tokens[1:-1]
+
class Assignment(TokenList):
"""An assignment like 'var := val;'"""
__slots__ = ('value', 'ttype', 'tokens')
+
class If(TokenList):
"""An 'if' clause with possible 'else if' or 'else' parts."""
__slots__ = ('value', 'ttype', 'tokens')
+
class For(TokenList):
"""A 'FOR' loop."""
__slots__ = ('value', 'ttype', 'tokens')
-class Comparsion(TokenList):
- """A comparsion used for example in WHERE clauses."""
+
+class Comparison(TokenList):
+ """A comparison used for example in WHERE clauses."""
__slots__ = ('value', 'ttype', 'tokens')
+
class Comment(TokenList):
"""A comment."""
__slots__ = ('value', 'ttype', 'tokens')
+
class Where(TokenList):
"""A WHERE clause."""
__slots__ = ('value', 'ttype', 'tokens')
@@ -434,9 +486,12 @@ class Case(TokenList):
If an ELSE exists condition is None.
"""
ret = []
- in_condition = in_value = False
+ in_value = False
+ in_condition = True
for token in self.tokens:
- if token.match(T.Keyword, 'WHEN'):
+ if token.match(T.Keyword, 'CASE'):
+ continue
+ elif token.match(T.Keyword, 'WHEN'):
ret.append(([], []))
in_condition = True
in_value = False
@@ -450,8 +505,25 @@ class Case(TokenList):
elif token.match(T.Keyword, 'END'):
in_condition = False
in_value = False
+ if (in_condition or in_value) and not ret:
+ # First condition withou preceding WHEN
+ ret.append(([], []))
if in_condition:
ret[-1][0].append(token)
elif in_value:
ret[-1][1].append(token)
return ret
+
+
+class Function(TokenList):
+ """A function or procedure call."""
+
+ __slots__ = ('value', 'ttype', 'tokens')
+
+ def get_parameters(self):
+ """Return a list of parameters."""
+ parenthesis = self.tokens[-1]
+ for t in parenthesis.tokens:
+ if isinstance(t, IdentifierList):
+ return t.get_identifiers()
+ return []
diff --git a/debug_toolbar/utils/sqlparse/tokens.py b/debug_toolbar/utils/sqlparse/tokens.py
index 2c63c41..01a9b89 100644
--- a/debug_toolbar/utils/sqlparse/tokens.py
+++ b/debug_toolbar/utils/sqlparse/tokens.py
@@ -9,11 +9,6 @@
"""Tokens"""
-try:
- set
-except NameError:
- from sets import Set as set
-
class _TokenType(tuple):
parent = None
@@ -27,22 +22,14 @@ class _TokenType(tuple):
buf.reverse()
return buf
- def __init__(self, *args):
- # no need to call super.__init__
- self.subtypes = set()
-
def __contains__(self, val):
- return self is val or (
- type(val) is self.__class__ and
- val[:len(self)] == self
- )
+ return val is not None and (self is val or val[:len(self)] == self)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
- self.subtypes.add(new)
new.parent = self
return new
@@ -53,30 +40,31 @@ class _TokenType(tuple):
return 'Token' + (self and '.' or '') + '.'.join(self)
-Token = _TokenType()
+Token = _TokenType()
# Special token types
-Text = Token.Text
-Whitespace = Text.Whitespace
-Newline = Whitespace.Newline
-Error = Token.Error
+Text = Token.Text
+Whitespace = Text.Whitespace
+Newline = Whitespace.Newline
+Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
-Other = Token.Other
+Other = Token.Other
# Common token types for source code
-Keyword = Token.Keyword
-Name = Token.Name
-Literal = Token.Literal
-String = Literal.String
-Number = Literal.Number
+Keyword = Token.Keyword
+Name = Token.Name
+Literal = Token.Literal
+String = Literal.String
+Number = Literal.Number
Punctuation = Token.Punctuation
-Operator = Token.Operator
-Wildcard = Token.Wildcard
-Comment = Token.Comment
-Assignment = Token.Assignement
+Operator = Token.Operator
+Comparison = Operator.Comparison
+Wildcard = Token.Wildcard
+Comment = Token.Comment
+Assignment = Token.Assignement
# Generic types for non-source code
-Generic = Token.Generic
+Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
@@ -93,39 +81,3 @@ Group = Token.Group
Group.Parenthesis = Token.Group.Parenthesis
Group.Comment = Token.Group.Comment
Group.Where = Token.Group.Where
-
-
-def is_token_subtype(ttype, other):
- """
- Return True if ``ttype`` is a subtype of ``other``.
-
- exists for backwards compatibility. use ``ttype in other`` now.
- """
- return ttype in other
-
-
-def string_to_tokentype(s):
- """
- Convert a string into a token type::
-
- >>> string_to_token('String.Double')
- Token.Literal.String.Double
- >>> string_to_token('Token.Literal.Number')
- Token.Literal.Number
- >>> string_to_token('')
- Token
-
- Tokens that are already tokens are returned unchanged:
-
- >>> string_to_token(String)
- Token.Literal.String
- """
- if isinstance(s, _TokenType):
- return s
- if not s:
- return Token
- node = Token
- for item in s.split('.'):
- node = getattr(node, item)
- return node
-