Răsfoiți Sursa

Introduces FUNCTION token instead of seperate tokens for each function.

Taddeus Kroes 14 ani în urmă
părinte
comite
8c9a6c6ed8
2 a modificat fișierele cu 36 adăugiri și 20 ștergeri
  1. 12 6
      src/node.py
  2. 24 14
      src/parser.py

+ 12 - 6
src/node.py

@@ -79,12 +79,18 @@ TOKEN_MAP = {
         OP_MUL: 'TIMES',
         OP_DIV: 'DIVIDE',
         OP_POW: 'POW',
-        OP_SQRT: 'SQRT',
-        OP_SIN: 'SIN',
-        OP_COS: 'COS',
-        OP_TAN: 'TAN',
-        OP_INT: 'INT',
-        OP_SOLVE: 'SOLVE',
+        OP_SQRT: 'FUNCTION',
+        OP_SIN: 'FUNCTION',
+        OP_COS: 'FUNCTION',
+        OP_TAN: 'FUNCTION',
+        OP_INT: 'FUNCTION',
+        OP_SOLVE: 'FUNCTION',
+        #OP_SQRT: 'SQRT',
+        #OP_SIN: 'SIN',
+        #OP_COS: 'COS',
+        #OP_TAN: 'TAN',
+        #OP_INT: 'INT',
+        #OP_SOLVE: 'SOLVE',
         OP_EQ: 'EQ',
         OP_POSSIBILITIES: 'POSSIBILITIES',
         OP_HINT: 'HINT',

+ 24 - 14
src/parser.py

@@ -51,8 +51,9 @@ class Parser(BisonParser):
     # ----------------------------------------------------------------
     # TODO: add a runtime check to verify that this token list match the list
     # of tokens of the lex script.
-    tokens = ['NUMBER', 'IDENTIFIER', 'NEWLINE', 'QUIT', 'RAISE', 'GRAPH', \
-              'LPAREN', 'RPAREN'] + TOKEN_MAP.values()
+    tokens = ['NUMBER', 'IDENTIFIER', 'NEWLINE', 'QUIT', 'RAISE', 'GRAPH',
+              'LPAREN', 'RPAREN', 'FUNCTION'] \
+             + filter(lambda t: t != 'FUNCTION', TOKEN_MAP.values())
 
     # ------------------------------
     # precedences
@@ -64,7 +65,8 @@ class Parser(BisonParser):
         ('left', ('EQ', )),
         ('left', ('NEG', )),
         ('right', ('POW', )),
-        ('right', ('SIN', 'COS', 'TAN', 'SOLVE', 'INT', 'SQRT')),
+        ('right', ('FUNCTION', )),
+        #('right', ('SIN', 'COS', 'TAN', 'SOLVE', 'INT', 'SQRT')),
         )
 
     interactive = 0
@@ -152,11 +154,14 @@ class Parser(BisonParser):
                 + '|([a-z])\s*([0-9])'    # match: a4  result: a ^ 4
                 + '|([0-9])\s+([0-9]))')  # match: 4 4 result: 4 * 4
 
+        words = zip(*filter(lambda (s, op): TOKEN_MAP[op] == 'FUNCTION', \
+                            OP_MAP.iteritems()))[0] + ('raise', 'graph')
+
         def preprocess_data(match):
             left, right = filter(None, match.groups())
 
             # Filter words (otherwise they will be preprocessed as well)
-            if (left + right).upper() in self.tokens:
+            if left + right in words:
                 return left + right
 
             # If all characters on the right are numbers. e.g. "a4", the
@@ -336,12 +341,7 @@ class Parser(BisonParser):
     def on_unary(self, target, option, names, values):
         """
         unary : MINUS exp %prec NEG
-              | SIN exp
-              | COS exp
-              | TAN exp
-              | INT exp
-              | SOLVE exp
-              | SQRT exp
+              | FUNCTION exp
         """
 
         if option == 0:  # rule: NEG exp
@@ -354,9 +354,10 @@ class Parser(BisonParser):
 
             return values[1]
 
-        if option < 7:  # rule: SIN exp | COS exp | TAN exp | INT exp
-            if values[1].type == TYPE_OPERATOR and values[1].op == OP_COMMA:
+        if option == 1:  # rule: FUNCTION exp
+            if values[1].is_op(OP_COMMA):
                 return Node(values[0], *values[1])
+
             return Node(*values)
 
         raise BisonSyntaxError('Unsupported option %d in target "%s".'
@@ -408,10 +409,19 @@ class Parser(BisonParser):
     # operator tokens
     # -----------------------------------------
     operators = ''
+    functions = []
 
     for op_str, op in OP_MAP.iteritems():
-        operators += '"%s"%s{ returntoken(%s); }\n' \
-                     % (op_str, ' ' * (8 - len(op_str)), TOKEN_MAP[op])
+        if TOKEN_MAP[op] == 'FUNCTION':
+            functions.append(op_str)
+        else:
+            operators += '"%s"%s{ returntoken(%s); }\n' \
+                         % (op_str, ' ' * (8 - len(op_str)), TOKEN_MAP[op])
+
+    # Put all functions in a single regex
+    if functions:
+        operators += '("%s") { returntoken(FUNCTION); }\n' \
+                     % '"|"'.join(functions)
 
     # -----------------------------------------
     # raw lex script, verbatim here