diff --git a/acid/README.md b/acid/README.md index a2fd38f..a67c8f1 100644 --- a/acid/README.md +++ b/acid/README.md @@ -1,29 +1,6 @@ acid ==== -Ce module est la racine de notre compilateur. Il définit des opérations et des -types pour les différentes étapes du processus: - -- *Tokenizing*: il s'agit du découpage du code en une liste de lexèmes. -- *Parsing*: cela consiste à regrouper nos lexèmes en un AST. - -Rappel: - -**Un lexème** est un bout de chaine de caractère auquel on associe un type. Ici -Je représente mes lexèmes par la classe `Token`, et leur types par l'énumération -`TokenType`. - -**Un AST** (de l'anglais *Abstract Syntax Tree*, arbre de syntaxe abstraite) -est la représentation du programme avant sa compilation ou son exécution. -On appelle ça un arbre car on représente souvent le résultat sous cette forme -(troisième étape): - -``` - parenthèse, plus, + - ┌───────┐ parenthèse, étoile, ┌────────┐ / \ -(+ (* 3 2) 7) ───┤ lexer ├─▶ nombre(3), nombre(2), ───┤ parser ├──▶ * 7 - └───────┘ parenthèse, nombre(7), └────────┘ / \ - parenthèse 3 2 - - CODE LEXEMES AST -``` +`acid` est le module racine de notre projet. Il est composé de deux sous-modules +`parser` et ̀`compiler`. Chacun de ces sous-modules est chargé d'une étape +spécifique, de la lecture de notre code Acid brut à son exécution. diff --git a/acid/__init__.py b/acid/__init__.py index a1e5c4e..d7eb3d3 100644 --- a/acid/__init__.py +++ b/acid/__init__.py @@ -2,6 +2,5 @@ # coding: utf-8 from acid.parser import * -from acid.lexer import * +from acid.compiler import * from acid.exception import * -from acid.types import * diff --git a/acid/__main__.py b/acid/__main__.py index 008a2b5..7aaed35 100644 --- a/acid/__main__.py +++ b/acid/__main__.py @@ -10,7 +10,7 @@ import os import argparse -from acid import tokenize, parse +from acid import * arg_parser = argparse.ArgumentParser( @@ -22,29 +22,32 @@ help='tokenize the given input file') arg_parser.add_argument('--ast', '-a', dest='ast', action='store_true', help='parse the given input file') -arg_parser.add_argument('--compile', '-c', dest='compile', action='store_true', - help='compile the given input file') +arg_parser.add_argument('--compile', '-c', dest='comp', action='store_true', + help='compile the given input file') -def main(path, lex=False, ast=False, compile=False): - with open(path) as file: - code = file.read() +def main(path, lex=False, ast=False, comp=False): + path = os.path.abspath(path) - if lex: - for token in tokenize(code): - print(token) + if lex: + for token in tokenize(code): + print(token) - elif ast: - tree = parse(code, os.path.abspath(path)) - print(tree) + elif ast: + parser = Parser.from_file(path) + tree = parser.run() + print(tree) - elif compile: - raise NotImplementedError('Compiling is not implemented yet') + elif comp: + compiler = Compiler.from_file(path) + compiler.dump() + else: + if path.endswith('.acidc'): + Compiler.execute_compiled_file(path) else: - raise NotImplementedError('The interpreter is not implemented yet') - # when the interpreter will be implemented - # execute(code) + compiler = Compiler.from_file(path) + compiler.execute() if __name__ == '__main__': args = arg_parser.parse_args() diff --git a/acid/compiler/README.md b/acid/compiler/README.md new file mode 100644 index 0000000..267856a --- /dev/null +++ b/acid/compiler/README.md @@ -0,0 +1,17 @@ +acid.compiler +============= + +Ce module s'occupe de traduire notre AST Acid en AST Python grâce au module `ast` +de la librairie standard de Python. Ainsi, grâce à notre AST Python obtenu, nous +pouvons "compiler" notre code Acid en objet compréhensible par l'interpréteur +Python via la fonction *built-in* `compile`. + +Notre classe `Compiler` est chargée de compiler notre code Acid brut en un objet +de code Python, et éventuellement de le stocker dans un fichier binaire en ROM. + +Ainsi, nous pouvons exécuter plus tard un code Acid sans passer par les étapes +du *lexing*, du *parsing*, et de la traduction en AST Python. + +Note: ce n'est pas de la compilation en code machine, mais plutôt en *bytecode* +de la machine virtuelle de Python. Nous ne pouvons pas transformer notre code +en exécutable (ni sous Windows, ni sous Linux/OS X) avec cette technique. diff --git a/acid/compiler/__init__.py b/acid/compiler/__init__.py new file mode 100644 index 0000000..e2dfa4b --- /dev/null +++ b/acid/compiler/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +from acid.compiler.compiler import * +from acid.compiler.translations import * diff --git a/acid/compiler/compiler.py b/acid/compiler/compiler.py new file mode 100644 index 0000000..fe0ac22 --- /dev/null +++ b/acid/compiler/compiler.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +""" +This module defines a compiler class which can compile and dump an Acid code +to a Python code object. + +Contributors: myrma +""" + +import os +import ast +import marshal + +from acid.parser import Parser +from acid.prelude import default_env + + +class Compiler: + """ + Compiles an Acid AST to a Python AST. + """ + + translations = {} + + def __init__(self, ast, path=None): + self.path = path + self.ast = ast + + @classmethod + def from_file(cls, path): + """ + Loads the Acid AST from a given path. + """ + + parser = Parser.from_file(path) + ast = parser.run() + return cls(ast, path) + + @classmethod + def execute_compiled_file(cls, path, prelude=default_env): + """ + Executes a Python code object stored in a file. + """ + + with open(path, 'rb') as compiled_file: + code = marshal.load(compiled_file) + + exec(code, prelude) + + @classmethod + def register(cls, *node_types): + """ + Registers a translation from an Acid AST node to a Python AST node. + """ + + def _decorator_wrapper(translation): + for node_type in node_types: + cls.translations[node_type] = translation + + return translation + + return _decorator_wrapper + + def translate(self, node): + """ + Translates an Acid AST node into a Python AST node. + """ + + py_ast = self.translations[type(node)](self, node) + return ast.fix_missing_locations(py_ast) + + def compile(self): + """ + Compiles the Acid AST to a Python code object. + """ + + py_ast = self.translate(self.ast) + + code = compile(py_ast, self.path or '', mode='exec') + return code + + def dump(self, target=None): + """ + Dumps the Python code object to a given path. + """ + + if target is None and self.path is None: + raise ValueError('Unspecified target path') + + code = self.compile() + target = target or os.path.basename(self.path).split('.')[0] + '.acidc' + + with open(target, 'wb') as dump_file: + marshal.dump(code, dump_file) + + def execute(self, prelude=default_env): + """ + Executes the resulting Python code object. + """ + + code = self.compile() + exec(code, prelude) diff --git a/acid/compiler/translations.py b/acid/compiler/translations.py new file mode 100644 index 0000000..7dc0d4f --- /dev/null +++ b/acid/compiler/translations.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +""" +This module defines some conversions between Acid's and Python's AST nodes. + +Contributors: myrma +""" + +import ast as python_ast + +from acid.compiler.compiler import Compiler +from acid.parser.ast import * + + +@Compiler.register(Program) +def translate_program(compiler, program): + instrs = map(compiler.translate, program.instructions) + module = python_ast.Module(body=list(instrs)) + return module + + +@Compiler.register(Declaration) +def translate_declaration(compiler, declaration): + assign = python_ast.Assign() + assign.targets = [ + python_ast.Name(id=declaration.name, ctx=python_ast.Store()) + ] + assign.value = compiler.translate(declaration.value) + return assign + + +@Compiler.register(TopLevelExpr) +def translate_toplevel_expr(compiler, expr): + return python_ast.Expr(compiler.translate(expr.expr)) + + +@Compiler.register(Call) +def translate_call(compiler, call): + return python_ast.Call( + func=compiler.translate(call.func), + args=list(map(compiler.translate, call.args)), + keywords=[] + ) + + +@Compiler.register(Lambda) +def translate_lambda(compiler, lambda_): + return python_ast.Lambda( + args=python_ast.arguments( + args=list(map(lambda n: python_ast.arg(arg=n, annotation=None), lambda_.params)), + vararg=None, + kwonlyargs=[], + kw_defaults=[], + kwarg=None, + defaults=[] + ), + body=compiler.translate(lambda_.body) + ) + + +@Compiler.register(Variable) +def translate_variable(compiler, var): + return python_ast.Name(var.name, python_ast.Load()) + + +@Compiler.register(IntLiteral, FloatLiteral) +def translate_num(compiler, num): + return python_ast.Num(num.value) diff --git a/acid/parser/README.md b/acid/parser/README.md new file mode 100644 index 0000000..7673268 --- /dev/null +++ b/acid/parser/README.md @@ -0,0 +1,29 @@ +acid.parser +=========== + +Ce module est la racine de notre parser. Il définit des opérations et des +types pour les différentes étapes du processus: + +- *Tokenizing*: il s'agit du découpage du code en une liste de lexèmes. +- *Parsing*: cela consiste à regrouper nos lexèmes en un AST. + +Rappel: + +**Un lexème** est un bout de chaine de caractère auquel on associe un type. Ici +Je représente mes lexèmes par la classe `Token`, et leur types par l'énumération +`TokenType`. + +**Un AST** (de l'anglais *Abstract Syntax Tree*, arbre de syntaxe abstraite) +est la représentation du programme avant sa compilation ou son exécution. +On appelle ça un arbre car on représente souvent le résultat sous cette forme +(troisième étape): + +``` + parenthèse, plus, + + ┌───────┐ parenthèse, étoile, ┌────────┐ / \ +(+ (* 3 2) 7) ───┤ lexer ├─▶ nombre(3), nombre(2), ───┤ parser ├──▶ * 7 + └───────┘ parenthèse, nombre(7), └────────┘ / \ + parenthèse 3 2 + + CODE LEXEMES AST +``` diff --git a/acid/parser/__init__.py b/acid/parser/__init__.py new file mode 100644 index 0000000..f609c3b --- /dev/null +++ b/acid/parser/__init__.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +from acid.parser.parser import * +from acid.parser.ast import * +from acid.parser.rules import * +from acid.parser.lexer import * +from acid.parser.types import * diff --git a/acid/parser/ast.py b/acid/parser/ast.py new file mode 100644 index 0000000..55d13e9 --- /dev/null +++ b/acid/parser/ast.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +""" +Defines the AST structure of programs and expressions. + +Contributors: myrma +""" + +__all__ = [ + 'Program', # program AST + 'Stmt', 'Expr', 'Literal', # abstract AST nodes + 'Declaration', 'TopLevelExpr', # top-level statement + 'Call', 'Lambda', # calls + 'Variable', 'IntLiteral', 'FloatLiteral', # atoms +] + + +class Program: + """ + Represents a sequence of instructions. + """ + + def __init__(self, instructions, path=None): + self.path = path + self.instructions = instructions + + def __repr__(self): + fmt = 'Program(path={0.path!r}, instructions={0.instructions})' + return fmt.format(self) + + +class Stmt: + """ + Abstract AST element representing a top-level statement. + """ + + +class Declaration(Stmt): + """ + Declaring a name. + ex: `(define pi 3.14)` + """ + + def __init__(self, name, value): + self.name = name + self.value = value + + def __repr__(self): + return 'Declaration(name={0.name!r}, value={0.value!r})'.format(self) + + +class TopLevelExpr(Stmt): + """ + Regular expression at top-level. + ex: `(+ 1 2)` + """ + + def __init__(self, expr): + self.expr = expr + + def __repr__(self): + return 'TopLevelExpr(expr={0.expr!r})'.format(self) + + +class Expr: + """ + Abstract AST element representing an expression node. + """ + + +class Call(Expr): + """ + Function call. + ex: `(func x y z)` + """ + + def __init__(self, func, args): + self.func = func + self.args = args + + def __repr__(self): + return 'Call(func={0.func!r}, args={0.args})'.format(self) + + +class Lambda(Expr): + """ + Lambda function definition. + ex: `(lambda (x y) (+ x y))` + """ + + def __init__(self, params, body): + self.params = params + self.body = body + + def __repr__(self): + return 'Lambda(params={0.params!r}, body={0.body!r})'.format(self) + + +class Variable(Expr): + """ + Variable name. + ex: `pi` + """ + + def __init__(self, name): + self.name = name + + def __repr__(self): + return 'Variable(name={0.name!r})'.format(self) + + +class Literal(Expr): + """ + Abstract literal expression. + """ + + def __init__(self, value): + self.value = value + + def __repr__(self): + return '{0.__class__.__name__}(value={0.value!r})'.format(self) + + +class IntLiteral(Literal): + """ + Integer literal expression. + ex: `42` + """ + + +class FloatLiteral(Literal): + """ + Floating point number literal expression. + ex: `3.14` + """ diff --git a/acid/parser/lexer.py b/acid/parser/lexer.py new file mode 100644 index 0000000..5920ed0 --- /dev/null +++ b/acid/parser/lexer.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +""" +Defines some types and functions for tokenizing a given code string. + +Contributors: myrma +""" + +__all__ = ['TokenType', 'Token', 'tokenize'] + + +import re +from enum import Enum +from itertools import dropwhile + +from acid.parser.types import SourcePos, SourceSpan +from acid.exception import ParseError + + +# derive from Enum to allow iteration through the token types +class TokenType(Enum): + """ + Lists every token type and stores their regular expression pattern. + """ + + def __init__(self, pattern): + self.regex = re.compile(pattern) + + DEFINE = r'define' + LAMBDA = r'lambda' + LINE_COMMENT = r'//' + COMMENT_START, COMMENT_END = r'/\*', r'\*/' + LPAREN, RPAREN = r'\(', r'\)' + FLOAT_LITERAL = r'\d+\.\d+' + INT_LITERAL = r'\d+' + ATOM = r"[\w+\-'*/:,$<>=~#&|@ç^_%!?.]+" + WHITESPACE = r'\s+' + + +class Token: + """ + Concrete lexeme type. + """ + + def __init__(self, type, value, span): + self.type = type + self.value = value + self.span = span + + def __repr__(self): + fmt = 'Token(type={tok.type}, value={tok.value!r}, span={tok.span!r})' + return fmt.format(tok=self) + + @property + def pos(self): + return self.span.start + + +def tokenize(code): + """ + Chop the given string in Token instances. + """ + + cursor = SourcePos(line=1, column=1) + + while code: + # iterates over all TokenType instances in order + for token_type in TokenType: + match = token_type.regex.match(code) + + if match is not None: + # source position before the code is consumed + startpos = cursor.copy() + + # pop the matched string + code = code[match.end():] + + # value is assigned to the entire match string + value = match.group(0) + + # update cursor position (line and column index) + cursor.feed(value) + + if token_type == TokenType.LINE_COMMENT: + # drop every character until newline + code = ''.join(dropwhile(lambda c: c != '\n', code)) + + elif token_type == TokenType.COMMENT_START: + # test if the code matches a comment ending token + m = TokenType.COMMENT_END.regex.match(code) + + # while the comment block is not ended + while m is None: + # feed a character from the comment string + cursor.feed(code[0]) + + # pop a single character + code = code[1:] + + # retest if the code matches a comment ending token + m = TokenType.COMMENT_END.regex.match(code) + + # pop the matched string + code = code[m.end():] + + # skipping whitespace + elif token_type is not TokenType.WHITESPACE: + # copy the cursor to avoid unwanted reference + endpos = cursor.copy() + + span = SourceSpan(startpos, endpos) + + yield Token(token_type, value, span) + + break + else: + # when every token type has been tried + raise ParseError(pos, "Failed to tokenize code") diff --git a/acid/parser/parser.py b/acid/parser/parser.py new file mode 100644 index 0000000..3d60b81 --- /dev/null +++ b/acid/parser/parser.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +""" +Declares the Parser class, which can transform a code string into an AST. + +Contributors: myrma +""" + +import functools + +from acid.parser.ast import * +from acid.parser.lexer import TokenType, tokenize +from acid.exception import ParseError + + +# todo: refactor consume_stmt and consume_expr, register_stmt and register_expr + +class Parser: + """ + Registers some consumers to parse the AST. + """ + + expr_consumers = [] + stmt_consumers = [] + + def __init__(self, code, path=None): + self.path = path + self.code = code + self.error = None + + @classmethod + def from_file(cls, path): + with open(path) as file: + code = file.read() + return cls(code, path) + + @classmethod + def register_expr(cls, priority=1): + """ + Registers a given consumer function with a priority. `priority` is an + integer defining the order in which expression types try to parse from + the token queue. The closest this number if from 1, the highest will be + its priority. + + `priority` must be greater than one (not strictly). + """ + + def _decorator_wrapper(consumer): + @functools.wraps(consumer) + def _consumer_wrapper(self, token_queue): + # copies the token list + tmp_queue = token_queue[:] + + try: + node = consumer(self, tmp_queue) + except ParseError: + raise + except IndexError: + # when the user tries to call token_queue.pop(0) + raise ParseError(token_queue[0].pos, 'Unexpected EOF') + else: + # assign tmp_queue to reference token_queue + token_queue[:] = tmp_queue + return node + + cls.expr_consumers.insert(priority - 1, _consumer_wrapper) + + return _decorator_wrapper + + @classmethod + def register_stmt(cls, priority=1): + """ + Registers a given consumer function with a priority. `priority` is an + integer defining the order in which expression types try to parse from + the token queue. The closest this number if from 1, the highest will be + its priority. + + `priority` must be greater than one (not strictly). + """ + + def _decorator_wrapper(consumer): + @functools.wraps(consumer) + def _consumer_wrapper(self, token_queue): + # copies the token list + tmp_queue = token_queue[:] + + try: + node = consumer(self, tmp_queue) + except ParseError: + raise + except IndexError: + # when the user tries to call token_queue.pop(0) + raise ParseError(token_queue[0].pos, 'Unexpected EOF') + else: + # assign tmp_queue to reference token_queue + token_queue[:] = tmp_queue + return node + + cls.stmt_consumers.insert(priority - 1, _consumer_wrapper) + + return _decorator_wrapper + + def consume_expr(self, token_queue): + """ + Tries to parse an Expr node from a token list. + This does not affect the list if the function failed to parse. + """ + + # tries every concrete Expr node + for consumer in self.expr_consumers: + try: + node = consumer(self, token_queue) + except ParseError as e: + error = e + continue + else: + return node + else: + # when every expr node has been tried, but none succeeded to parse + raise error + + def consume_stmt(self, token_queue): + """ + Tries to parse an Stmt node from a token list. + This does not affect the list if the function failed to parse. + """ + + # tries every concrete Expr node + for consumer in self.stmt_consumers: + try: + node = consumer(self, token_queue) + except ParseError as e: + error = e + continue + else: + return node + else: + # when every stmt node has been tried, but none succeeded to parse + raise error + + + def run(self): + """ + Parses a given string into a Program object. + """ + + token_queue = list(tokenize(self.code)) # the tokenized string + instrs = [] # the instructions of the program + + while token_queue: + try: + # tries to parse an expression from the token queue + instr = self.consume_stmt(token_queue) + except ParseError: + raise # when no expression could be parsed + else: + # append the instruction to the program + instrs.append(instr) + + # returns the resulting Program object. + return Program(instrs, self.path) + + +def expect(token_type, token_queue): + """ + Tries to consume a single token from the token queue. + Returns the token if the next token is of the given type, raises a + ParseError otherwise. + """ + + token = token_queue.pop(0) + + # if the next token is not of the expected type + if token.type != token_type: + msg = 'Expected {}, got {}'.format(token_type.name, token.type.name) + raise ParseError(token.pos, msg) + + return token + + +def parse(code, path=None): + """ + Parses the given code, without needing to instantiate a Parser object. + """ + + parser = Parser(code, path) + ast = parser.run() + return ast diff --git a/acid/parser/rules.py b/acid/parser/rules.py new file mode 100644 index 0000000..dbfe828 --- /dev/null +++ b/acid/parser/rules.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +""" +This module defines the parser rules. To define a custom parser rule, use this +snippet as a template: + + @Parser.register(priority=[n]) + def [rule_name](self, token_queue): + # To consume a token of a given type: + expected_token = expect([token type], token_queue) + + ... # Processing tokens + + return [AST node] + +Contributors: myrma +""" + +from acid.parser.parser import Parser, expect +from acid.parser.lexer import * +from acid.parser.ast import * +from acid.exception import * + + +@Parser.register_stmt(priority=1) +def consume_declaration(self, token_queue): + expect(TokenType.LPAREN, token_queue) + expect(TokenType.DEFINE, token_queue) + + atom = expect(TokenType.ATOM, token_queue) + name = atom.value + + value = self.consume_expr(token_queue) + expect(TokenType.RPAREN, token_queue) + return Declaration(name, value) + + +@Parser.register_stmt(priority=2) +def consume_toplevel_expr(self, token_queue): + expr = self.consume_expr(token_queue) + return TopLevelExpr(expr) + + +@Parser.register_expr(priority=2) +def consume_call(self, token_queue): + expect(TokenType.LPAREN, token_queue) + func = self.consume_expr(token_queue) + + args = [] + # consumes expressions as long as it parses. + while True: + try: + arg = self.consume_expr(token_queue) + except ParseError: + break + else: + args.append(arg) + + expect(TokenType.RPAREN, token_queue) + + return Call(func, args) + + +@Parser.register_expr(priority=1) +def consume_lambda(self, token_queue): + expect(TokenType.LPAREN, token_queue) + expect(TokenType.LAMBDA, token_queue) + expect(TokenType.LPAREN, token_queue) + + params = [] + while token_queue[0].type == TokenType.ATOM: + token = token_queue.pop(0) + params.append(token.value) + + expect(TokenType.RPAREN, token_queue) + body = self.consume_expr(token_queue) + expect(TokenType.RPAREN, token_queue) + return Lambda(params, body) + + +@Parser.register_expr(priority=1) +def consume_variable(self, token_queue): + atom = expect(TokenType.ATOM, token_queue) + return Variable(atom.value) + + +@Parser.register_expr(priority=1) +def consume_int_literal(self, token_queue): + token = expect(TokenType.INT_LITERAL, token_queue) + return IntLiteral(int(token.value)) + + +@Parser.register_expr(priority=1) +def consume_int_literal(self, token_queue): + token = expect(TokenType.FLOAT_LITERAL, token_queue) + return FloatLiteral(float(token.value)) diff --git a/acid/parser/types.py b/acid/parser/types.py new file mode 100644 index 0000000..8372fd1 --- /dev/null +++ b/acid/parser/types.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +""" +Defines some types that are used by several modules in the package. + +Contributors: myrma +""" + + +class SourcePos: + """ + Represents a position in a file. + """ + + def __init__(self, line, column): + self.line, self.column = line, column + + def __repr__(self): + return 'SourcePos(line={pos.line}, col={pos.column})'.format(pos=self) + + def __str__(self): + return 'line {pos.line}, column {pos.column}'.format(pos=self) + + def feed(self, string): + """ + Updates the position from a given string. + + Note: This code assumes that the string contains UNIX line terminators. + """ + + for char in string: + if char == '\n': + self.line += 1 # increment line + self.column = 1 # reset column index + else: + self.column += 1 + + def copy(self): + """ + Copies the instance to avoid unwanted references. + """ + + return SourcePos(line=self.line, column=self.column) + + +class SourceSpan: + """ + Represents a span between two positions in a file. + """ + + def __init__(self, start, end): + self.start = start + self.end = end + + def __repr__(self): + return 'SourceSpan(start={0.start!r}, end={0.end!r})'.format(self) + + def __str__(self): + if self.start.line == self.end.line: + fmt = 'line {line} from column {colstart} to column {colend}' + return fmt.format(line=self.start.line, + colstart=self.start.column, + colend=self.end.column) + + else: + return '{0.start!s} to {0.end!s}'.format(self) diff --git a/acid/prelude.py b/acid/prelude.py new file mode 100644 index 0000000..c6a0196 --- /dev/null +++ b/acid/prelude.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3.4 +# coding: utf-8 + +""" +This module defines the builtin values of Acid. + +Contributors: myrma +""" + +import operator as op +from functools import reduce + + +default_env = { + '+': lambda *xs: reduce(op.add, xs), # or just `sum` + '-': op.sub, + '*': lambda *xs: reduce(op.mul, xs), + '/': op.truediv, + '**': op.pow, + 'div': op.floordiv, + 'mod': op.mod, + '==': op.eq, + '!=': op.ne, + '<': op.lt, + '<=': op.le, + '>': op.gt, + '>=': op.ge, + 'and': op.and_, + 'xor': op.xor, + 'or': op.or_, + 'not': op.not_, + '<<': op.lshift, + '>>': op.rshift, + '~': op.invert, + '#': op.getitem, + '#=': op.setitem, + '#~': op.delitem, + 'negate': op.neg, + 'print': print, +}