diff --git a/src/parse.py b/src/parse.py index 3725c05..ace60f4 100644 --- a/src/parse.py +++ b/src/parse.py @@ -51,7 +51,7 @@ def import_tokens(tokens): output = [] for t in tokens: - output.append(Syntax(t, t.location, SyntaxType.TOKEN)) + output.append(Syntax(t.value, t.location, SyntaxType.TOKEN)) return output @@ -59,7 +59,7 @@ def strip_whitespace(syntax): output = [] for s in syntax: - if s.type != SyntaxType.TOKEN or s.value.value not in ["\n", " "]: + if s.type != SyntaxType.TOKEN or s.value not in ["\n", " "]: output.append(s) return output @@ -74,7 +74,7 @@ return None elif s.type != SyntaxType.TOKEN: return None - elif s.value.value != "StartText": + elif s.value != "StartText": return None location = s.location # Parse following tokens @@ -87,13 +87,13 @@ elif s.type != SyntaxType.TOKEN: return None # Don't allow StartText in text - elif s.value.value in ["StartText"]: + elif s.value in ["StartText"]: return None # EndText found, end things - elif s.value.value == "EndText": + elif s.value == "EndText": break else: - buffer += s.value.value + buffer += s.value value = buffer.strip("\n\t ") type = SyntaxType.TEXT return Syntax(value, location, type) diff --git a/tests/test_parse.py b/tests/test_parse.py index 01ffa17..0e90a28 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -34,7 +34,7 @@ value = draw(test_tokenize.draw_token_classified()) location = draw(draw_syntax_location()) type = parse.SyntaxType.TOKEN - return parse.Syntax(value, location, type) + return parse.Syntax(value.value, location, type) # Draws a text syntax value @@ -99,7 +99,7 @@ input = draw(lists(test_tokenize.draw_token_random())) tokens = [] for t in input: - tokens.append(parse.Syntax(t, t.location, parse.SyntaxType.TOKEN)) + tokens.append(parse.Syntax(t.value, t.location, parse.SyntaxType.TOKEN)) return (input, tokens) @@ -121,7 +121,7 @@ input = draw(lists(draw_syntax_random())) syntax = [] for s in input: - if s.type != parse.SyntaxType.TOKEN or s.value.value not in ["\n", " "]: + if s.type != parse.SyntaxType.TOKEN or s.value not in ["\n", " "]: syntax.append(s) return (input, syntax) @@ -140,7 +140,7 @@ @composite def draw_text_value_token(draw): token = draw(draw_syntax_token()) - assume(token.value.value not in ["StartText", "EndText"]) + assume(token.value not in ["StartText", "EndText"]) return token @@ -150,14 +150,14 @@ tokens = draw(lists(draw_text_value_token())) value = "" for token in tokens: - value += token.value.value + value += token.value s_value = draw(test_tokenize.draw_token_keyword()) - s_value.value = "StartText" + s_value = "StartText" s_location = draw(draw_syntax_location()) s_type = parse.SyntaxType.TOKEN start = parse.Syntax(s_value, s_location, s_type) e_value = draw(test_tokenize.draw_token_keyword()) - e_value.value = "EndText" + e_value = "EndText" e_location = draw(draw_syntax_location()) e_type = parse.SyntaxType.TOKEN end = parse.Syntax(e_value, e_location, e_type) @@ -197,10 +197,7 @@ if draw(booleans()): token = draw(draw_syntax_random()) assume( - not ( - token.type == parse.SyntaxType.TOKEN - and token.value.value == "StartText" - ) + not (token.type == parse.SyntaxType.TOKEN and token.value == "StartText") ) new_tokens = [token] + tokens[1:0] return new_tokens @@ -228,7 +225,7 @@ def draw_syntax_text_invalid_extrastarttext(draw): (tokens, _) = draw(draw_syntax_text_valid()) s_value = draw(test_tokenize.draw_token_keyword()) - s_value.value = "StartText" + s_value = "StartText" s_location = draw(draw_syntax_location()) s_type = parse.SyntaxType.TOKEN start = parse.Syntax(s_value, s_location, s_type)