Commit 8c9a6c6e authored by Taddeus Kroes's avatar Taddeus Kroes

Introduces FUNCTION token instead of seperate tokens for each function.

parent 6139bd82
...@@ -79,12 +79,18 @@ TOKEN_MAP = { ...@@ -79,12 +79,18 @@ TOKEN_MAP = {
OP_MUL: 'TIMES', OP_MUL: 'TIMES',
OP_DIV: 'DIVIDE', OP_DIV: 'DIVIDE',
OP_POW: 'POW', OP_POW: 'POW',
OP_SQRT: 'SQRT', OP_SQRT: 'FUNCTION',
OP_SIN: 'SIN', OP_SIN: 'FUNCTION',
OP_COS: 'COS', OP_COS: 'FUNCTION',
OP_TAN: 'TAN', OP_TAN: 'FUNCTION',
OP_INT: 'INT', OP_INT: 'FUNCTION',
OP_SOLVE: 'SOLVE', OP_SOLVE: 'FUNCTION',
#OP_SQRT: 'SQRT',
#OP_SIN: 'SIN',
#OP_COS: 'COS',
#OP_TAN: 'TAN',
#OP_INT: 'INT',
#OP_SOLVE: 'SOLVE',
OP_EQ: 'EQ', OP_EQ: 'EQ',
OP_POSSIBILITIES: 'POSSIBILITIES', OP_POSSIBILITIES: 'POSSIBILITIES',
OP_HINT: 'HINT', OP_HINT: 'HINT',
......
...@@ -51,8 +51,9 @@ class Parser(BisonParser): ...@@ -51,8 +51,9 @@ class Parser(BisonParser):
# ---------------------------------------------------------------- # ----------------------------------------------------------------
# TODO: add a runtime check to verify that this token list match the list # TODO: add a runtime check to verify that this token list match the list
# of tokens of the lex script. # of tokens of the lex script.
tokens = ['NUMBER', 'IDENTIFIER', 'NEWLINE', 'QUIT', 'RAISE', 'GRAPH', \ tokens = ['NUMBER', 'IDENTIFIER', 'NEWLINE', 'QUIT', 'RAISE', 'GRAPH',
'LPAREN', 'RPAREN'] + TOKEN_MAP.values() 'LPAREN', 'RPAREN', 'FUNCTION'] \
+ filter(lambda t: t != 'FUNCTION', TOKEN_MAP.values())
# ------------------------------ # ------------------------------
# precedences # precedences
...@@ -64,7 +65,8 @@ class Parser(BisonParser): ...@@ -64,7 +65,8 @@ class Parser(BisonParser):
('left', ('EQ', )), ('left', ('EQ', )),
('left', ('NEG', )), ('left', ('NEG', )),
('right', ('POW', )), ('right', ('POW', )),
('right', ('SIN', 'COS', 'TAN', 'SOLVE', 'INT', 'SQRT')), ('right', ('FUNCTION', )),
#('right', ('SIN', 'COS', 'TAN', 'SOLVE', 'INT', 'SQRT')),
) )
interactive = 0 interactive = 0
...@@ -152,11 +154,14 @@ class Parser(BisonParser): ...@@ -152,11 +154,14 @@ class Parser(BisonParser):
+ '|([a-z])\s*([0-9])' # match: a4 result: a ^ 4 + '|([a-z])\s*([0-9])' # match: a4 result: a ^ 4
+ '|([0-9])\s+([0-9]))') # match: 4 4 result: 4 * 4 + '|([0-9])\s+([0-9]))') # match: 4 4 result: 4 * 4
words = zip(*filter(lambda (s, op): TOKEN_MAP[op] == 'FUNCTION', \
OP_MAP.iteritems()))[0] + ('raise', 'graph')
def preprocess_data(match): def preprocess_data(match):
left, right = filter(None, match.groups()) left, right = filter(None, match.groups())
# Filter words (otherwise they will be preprocessed as well) # Filter words (otherwise they will be preprocessed as well)
if (left + right).upper() in self.tokens: if left + right in words:
return left + right return left + right
# If all characters on the right are numbers. e.g. "a4", the # If all characters on the right are numbers. e.g. "a4", the
...@@ -336,12 +341,7 @@ class Parser(BisonParser): ...@@ -336,12 +341,7 @@ class Parser(BisonParser):
def on_unary(self, target, option, names, values): def on_unary(self, target, option, names, values):
""" """
unary : MINUS exp %prec NEG unary : MINUS exp %prec NEG
| SIN exp | FUNCTION exp
| COS exp
| TAN exp
| INT exp
| SOLVE exp
| SQRT exp
""" """
if option == 0: # rule: NEG exp if option == 0: # rule: NEG exp
...@@ -354,9 +354,10 @@ class Parser(BisonParser): ...@@ -354,9 +354,10 @@ class Parser(BisonParser):
return values[1] return values[1]
if option < 7: # rule: SIN exp | COS exp | TAN exp | INT exp if option == 1: # rule: FUNCTION exp
if values[1].type == TYPE_OPERATOR and values[1].op == OP_COMMA: if values[1].is_op(OP_COMMA):
return Node(values[0], *values[1]) return Node(values[0], *values[1])
return Node(*values) return Node(*values)
raise BisonSyntaxError('Unsupported option %d in target "%s".' raise BisonSyntaxError('Unsupported option %d in target "%s".'
...@@ -408,11 +409,20 @@ class Parser(BisonParser): ...@@ -408,11 +409,20 @@ class Parser(BisonParser):
# operator tokens # operator tokens
# ----------------------------------------- # -----------------------------------------
operators = '' operators = ''
functions = []
for op_str, op in OP_MAP.iteritems(): for op_str, op in OP_MAP.iteritems():
if TOKEN_MAP[op] == 'FUNCTION':
functions.append(op_str)
else:
operators += '"%s"%s{ returntoken(%s); }\n' \ operators += '"%s"%s{ returntoken(%s); }\n' \
% (op_str, ' ' * (8 - len(op_str)), TOKEN_MAP[op]) % (op_str, ' ' * (8 - len(op_str)), TOKEN_MAP[op])
# Put all functions in a single regex
if functions:
operators += '("%s") { returntoken(FUNCTION); }\n' \
% '"|"'.join(functions)
# ----------------------------------------- # -----------------------------------------
# raw lex script, verbatim here # raw lex script, verbatim here
# ----------------------------------------- # -----------------------------------------
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment