Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added compound assignment operators and the while loop shorthand #112

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,4 @@ docs/_build/
.pytest_cache/

.DS_Store
build/
Empty file added __init__.py
Empty file.
25 changes: 15 additions & 10 deletions pico8/lua/lexer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

from .. import util


__all__ = [
'LexerError',
'Token',
Expand Down Expand Up @@ -200,15 +199,15 @@ def value(self):
if b'.' in self._data:
integer, frac = self._data.split(b'.')
return (
float(int(integer, 16)) +
float(int(frac, 16))/(16**len(frac)))
float(int(integer, 16)) +
float(int(frac, 16)) / (16 ** len(frac)))
return float(int(self._data, 16))
if b'b' in self._data:
if b'.' in self._data:
integer, frac = self._data.split(b'.')
return (
float(int(integer, 2)) +
float(int(frac, 2))/(2**len(frac)))
float(int(integer, 2)) +
float(int(frac, 2)) / (2 ** len(frac)))
return float(int(self._data, 2))
return float(self._data)

Expand Down Expand Up @@ -271,15 +270,22 @@ class TokSymbol(Token):
(re.compile(br'::[a-zA-Z_\x80-\xff][a-zA-Z0-9_\x80-\xff]*::'), TokLabel),
])
_TOKEN_MATCHERS.extend([
(re.compile(br'\b'+keyword+br'\b'), TokKeyword)
(re.compile(br'\b' + keyword + br'\b'), TokKeyword)
for keyword in LUA_KEYWORDS])
# REMINDER: token patterns are ordered! The lexer stops at the first matching
# pattern. This is especially tricky for the symbols because you have to make
# sure symbols like >>> appear before >>. (If >> appeared first in this list,
# >>> would never match.)
_TOKEN_MATCHERS.extend([
(re.compile(symbol), TokSymbol) for symbol in [

]])
_TOKEN_MATCHERS.extend([
(re.compile(symbol), TokSymbol) for symbol in [
br'\\=',
br'\+=', b'-=', br'\*=', b'/=', b'%=', br'\.\.=',
br'\^=', br'\|=', b'&=', br'\^\^=', b'<<=',
b'>>=', b'>>>=', b'<<>=', b'>><=',
b'==', b'~=', b'!=', b'<=', b'>=',
b'&', br'\|', br'\^\^', b'~', b'<<>', b'>>>', b'>><', b'<<', b'>>',
br'\\',
Expand Down Expand Up @@ -311,7 +317,6 @@ def __init__(self, version):
self._tokens = []
self._cur_lineno = 0
self._cur_charno = 0

# If inside a string literal (else None):
# * the pos of the start of the string
self._in_string_lineno = None
Expand Down Expand Up @@ -360,7 +365,7 @@ def _process_token(self, s):
if self._in_string is not None:
# Continue string literal.
while i < len(s):
c = s[i:i+1]
c = s[i:i + 1]

if c == self._in_string_delim:
# End string literal.
Expand All @@ -378,12 +383,12 @@ def _process_token(self, s):

if c == b'\\':
# Escape character.
num_m = re.match(br'\d{1,3}', s[i+1:])
num_m = re.match(br'\d{1,3}', s[i + 1:])
if num_m:
c = bytes([int(num_m.group(0))])
i += len(num_m.group(0))
else:
next_c = s[i+1:i+2]
next_c = s[i + 1:i + 2]
if next_c in _STRING_ESCAPES:
c = _STRING_ESCAPES[next_c]
i += 1
Expand Down
55 changes: 40 additions & 15 deletions pico8/lua/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from .. import util
from . import lexer


__all__ = [ # noqa: F822
'Parser',
'ParserError',
Expand Down Expand Up @@ -47,8 +46,8 @@
'Function',
'FunctionBody',
'TableConstructor',
'FieldOtherThing',
'FieldNamed',
'FieldExpKey',
'FieldNamedKey',
'FieldExp',
]

Expand Down Expand Up @@ -96,7 +95,7 @@ def _add_token_group(self, fieldname, fieldvalue, tokenlist, pos):
pos = self._add_token_group(
(fieldname, inner_i), inner, tokenlist, pos)
else:
self._token_groups.append(tokenlist[pos:pos+1])
self._token_groups.append(tokenlist[pos:pos + 1])
pos += 1
return pos

Expand Down Expand Up @@ -226,12 +225,12 @@ def node_init(self, *args, **kwargs):
for k in kwargs:
setattr(self, k, kwargs[k])


cls = type(name, (Node,), {'__init__': node_init,
'_name': name, '_fields': fields,
'_children': None})
globals()[name] = cls


# (!= is PICO-8 specific.)
BINOP_PATS = (tuple([lexer.TokSymbol(sym) for sym in [
b'&', b'|', b'^^', b'<<', b'>>', b'>>>', b'<<>', b'>><', b'\\',
Expand Down Expand Up @@ -305,15 +304,15 @@ def _accept(self, tok_pattern):
while True:
cur_tok = self._peek()
if (cur_tok is None or
cur_tok.matches(tok_pattern) or
(not isinstance(cur_tok, lexer.TokSpace) and
not isinstance(cur_tok, lexer.TokNewline) and
not isinstance(cur_tok, lexer.TokComment))):
cur_tok.matches(tok_pattern) or
(not isinstance(cur_tok, lexer.TokSpace) and
not isinstance(cur_tok, lexer.TokNewline) and
not isinstance(cur_tok, lexer.TokComment))):
break
self._pos += 1

if (cur_tok is not None and
cur_tok.matches(tok_pattern) and
cur_tok.matches(tok_pattern) and
(self._max_pos is None or self._pos < self._max_pos)):
self._pos += 1
return cur_tok
Expand Down Expand Up @@ -435,8 +434,18 @@ def _stat(self):
self._accept(lexer.TokSymbol(b'-=')) or
self._accept(lexer.TokSymbol(b'*=')) or
self._accept(lexer.TokSymbol(b'/=')) or
self._accept(lexer.TokSymbol(b'\=')) or
self._accept(lexer.TokSymbol(b'%=')) or
self._accept(lexer.TokSymbol(b'..=')))
self._accept(lexer.TokSymbol(b'^=')) or
self._accept(lexer.TokSymbol(b'..=')) or
self._accept(lexer.TokSymbol(b'|=')) or
self._accept(lexer.TokSymbol(b'&=')) or
self._accept(lexer.TokSymbol(b'^^=')) or
self._accept(lexer.TokSymbol(b'<<=')) or
self._accept(lexer.TokSymbol(b'>>=')) or
self._accept(lexer.TokSymbol(b'>>>=')) or
self._accept(lexer.TokSymbol(b'<<>=')) or
self._accept(lexer.TokSymbol(b'>><=')))
if assign_op is not None:
explist = self._assert(self._explist(),
'Expected expression in assignment')
Expand All @@ -456,6 +465,22 @@ def _stat(self):

if self._accept(lexer.TokKeyword(b'while')) is not None:
exp = self._assert(self._exp(), 'exp in while')
do_pos = self._pos
if (self._accept(lexer.TokKeyword(b'do')) is None and
(self._tokens[exp._end_token_pos - 1] == lexer.TokSymbol(b')'))):
# Check for PICO-8 short form.
do_end_pos = exp._end_token_pos
while (do_end_pos < len(self._tokens) and
not self._tokens[do_end_pos].matches(lexer.TokNewline)):
do_end_pos += 1
try:
self._max_pos = do_end_pos
block = self._assert(self._chunk(),
'valid chunk in short-while')
finally:
self._max_pos = None
return StatWhile(exp, block, start=pos, end=self._pos, short_while=True)
self._pos = do_pos
self._expect(lexer.TokKeyword(b'do'))
block = self._assert(self._chunk(), 'block in while')
self._expect(lexer.TokKeyword(b'end'))
Expand All @@ -475,7 +500,7 @@ def _stat(self):

then_pos = self._pos
if (self._accept(lexer.TokKeyword(b'then')) is None and
self._accept(lexer.TokKeyword(b'do')) is None and
self._accept(lexer.TokKeyword(b'do')) is None and
(self._tokens[exp._end_token_pos - 1] == lexer.TokSymbol(b')'))):
# Check for PICO-8 short form.

Expand Down Expand Up @@ -668,7 +693,7 @@ def _var(self):
"""
exp_prefix = self._prefixexp()
if (isinstance(exp_prefix, VarName) or
isinstance(exp_prefix, VarAttribute) or
isinstance(exp_prefix, VarAttribute) or
isinstance(exp_prefix, VarIndex)):
return exp_prefix
return None
Expand Down Expand Up @@ -920,8 +945,8 @@ def _functioncall(self):

full_exp = self._prefixexp()
if (full_exp is None or
(not isinstance(full_exp, FunctionCall) and
not isinstance(full_exp, FunctionCallMethod))):
(not isinstance(full_exp, FunctionCall) and
not isinstance(full_exp, FunctionCallMethod))):
self._pos = pos
return None
return full_exp
Expand Down