|
@@ -43,6 +43,10 @@ class Parser(BisonParser):
|
|
|
docstrings. Scanner rules are in the 'lexscript' attribute.
|
|
docstrings. Scanner rules are in the 'lexscript' attribute.
|
|
|
"""
|
|
"""
|
|
|
|
|
|
|
|
|
|
+ # Words to be ignored by preprocessor
|
|
|
|
|
+ words = zip(*filter(lambda (s, op): TOKEN_MAP[op] == 'FUNCTION', \
|
|
|
|
|
+ OP_MAP.iteritems()))[0] + ('raise', 'graph')
|
|
|
|
|
+
|
|
|
# Output directory of generated pybison files, including a trailing slash.
|
|
# Output directory of generated pybison files, including a trailing slash.
|
|
|
buildDirectory = PYBISON_BUILD + '/'
|
|
buildDirectory = PYBISON_BUILD + '/'
|
|
|
|
|
|
|
@@ -51,8 +55,9 @@ class Parser(BisonParser):
|
|
|
# ----------------------------------------------------------------
|
|
# ----------------------------------------------------------------
|
|
|
# TODO: add a runtime check to verify that this token list match the list
|
|
# TODO: add a runtime check to verify that this token list match the list
|
|
|
# of tokens of the lex script.
|
|
# of tokens of the lex script.
|
|
|
- tokens = ['NUMBER', 'IDENTIFIER', 'NEWLINE', 'QUIT', 'RAISE', 'GRAPH', \
|
|
|
|
|
- 'LPAREN', 'RPAREN'] + TOKEN_MAP.values()
|
|
|
|
|
|
|
+ tokens = ['NUMBER', 'IDENTIFIER', 'NEWLINE', 'QUIT', 'RAISE', 'GRAPH',
|
|
|
|
|
+ 'LPAREN', 'RPAREN', 'FUNCTION'] \
|
|
|
|
|
+ + filter(lambda t: t != 'FUNCTION', TOKEN_MAP.values())
|
|
|
|
|
|
|
|
# ------------------------------
|
|
# ------------------------------
|
|
|
# precedences
|
|
# precedences
|
|
@@ -63,8 +68,9 @@ class Parser(BisonParser):
|
|
|
('left', ('TIMES', 'DIVIDE')),
|
|
('left', ('TIMES', 'DIVIDE')),
|
|
|
('left', ('EQ', )),
|
|
('left', ('EQ', )),
|
|
|
('left', ('NEG', )),
|
|
('left', ('NEG', )),
|
|
|
- ('right', ('SIN', 'COS', 'TAN', 'SOLVE', 'INT', 'SQRT')),
|
|
|
|
|
('right', ('POW', )),
|
|
('right', ('POW', )),
|
|
|
|
|
+ ('right', ('FUNCTION', )),
|
|
|
|
|
+ #('right', ('SIN', 'COS', 'TAN', 'SOLVE', 'INT', 'SQRT')),
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
interactive = 0
|
|
interactive = 0
|
|
@@ -156,7 +162,7 @@ class Parser(BisonParser):
|
|
|
left, right = filter(None, match.groups())
|
|
left, right = filter(None, match.groups())
|
|
|
|
|
|
|
|
# Filter words (otherwise they will be preprocessed as well)
|
|
# Filter words (otherwise they will be preprocessed as well)
|
|
|
- if (left + right).upper() in self.tokens:
|
|
|
|
|
|
|
+ if left + right in Parser.words:
|
|
|
return left + right
|
|
return left + right
|
|
|
|
|
|
|
|
# If all characters on the right are numbers. e.g. "a4", the
|
|
# If all characters on the right are numbers. e.g. "a4", the
|
|
@@ -336,12 +342,7 @@ class Parser(BisonParser):
|
|
|
def on_unary(self, target, option, names, values):
|
|
def on_unary(self, target, option, names, values):
|
|
|
"""
|
|
"""
|
|
|
unary : MINUS exp %prec NEG
|
|
unary : MINUS exp %prec NEG
|
|
|
- | SIN exp
|
|
|
|
|
- | COS exp
|
|
|
|
|
- | TAN exp
|
|
|
|
|
- | INT exp
|
|
|
|
|
- | SOLVE exp
|
|
|
|
|
- | SQRT exp
|
|
|
|
|
|
|
+ | FUNCTION exp
|
|
|
"""
|
|
"""
|
|
|
|
|
|
|
|
if option == 0: # rule: NEG exp
|
|
if option == 0: # rule: NEG exp
|
|
@@ -354,9 +355,10 @@ class Parser(BisonParser):
|
|
|
|
|
|
|
|
return values[1]
|
|
return values[1]
|
|
|
|
|
|
|
|
- if option < 7: # rule: SIN exp | COS exp | TAN exp | INT exp
|
|
|
|
|
- if values[1].type == TYPE_OPERATOR and values[1].op == OP_COMMA:
|
|
|
|
|
|
|
+ if option == 1: # rule: FUNCTION exp
|
|
|
|
|
+ if values[1].is_op(OP_COMMA):
|
|
|
return Node(values[0], *values[1])
|
|
return Node(values[0], *values[1])
|
|
|
|
|
+
|
|
|
return Node(*values)
|
|
return Node(*values)
|
|
|
|
|
|
|
|
raise BisonSyntaxError('Unsupported option %d in target "%s".'
|
|
raise BisonSyntaxError('Unsupported option %d in target "%s".'
|
|
@@ -408,10 +410,19 @@ class Parser(BisonParser):
|
|
|
# operator tokens
|
|
# operator tokens
|
|
|
# -----------------------------------------
|
|
# -----------------------------------------
|
|
|
operators = ''
|
|
operators = ''
|
|
|
|
|
+ functions = []
|
|
|
|
|
|
|
|
for op_str, op in OP_MAP.iteritems():
|
|
for op_str, op in OP_MAP.iteritems():
|
|
|
- operators += '"%s"%s{ returntoken(%s); }\n' \
|
|
|
|
|
- % (op_str, ' ' * (8 - len(op_str)), TOKEN_MAP[op])
|
|
|
|
|
|
|
+ if TOKEN_MAP[op] == 'FUNCTION':
|
|
|
|
|
+ functions.append(op_str)
|
|
|
|
|
+ else:
|
|
|
|
|
+ operators += '"%s"%s{ returntoken(%s); }\n' \
|
|
|
|
|
+ % (op_str, ' ' * (8 - len(op_str)), TOKEN_MAP[op])
|
|
|
|
|
+
|
|
|
|
|
+ # Put all functions in a single regex
|
|
|
|
|
+ if functions:
|
|
|
|
|
+ operators += '("%s") { returntoken(FUNCTION); }\n' \
|
|
|
|
|
+ % '"|"'.join(functions)
|
|
|
|
|
|
|
|
# -----------------------------------------
|
|
# -----------------------------------------
|
|
|
# raw lex script, verbatim here
|
|
# raw lex script, verbatim here
|