diff --git a/src/parse.py b/src/parse.py index 93236ab..fd3efbf 100644 --- a/src/parse.py +++ b/src/parse.py @@ -40,16 +40,19 @@ return output -# Removes whitespace tokens -def strip_whitespace(tokens): +# Removes whitespace syntax tokens +def strip_whitespace(syntax): output = [] - for t in tokens: - if t.type not in [tokenize.TokenType.SPACE, tokenize.TokenType.NEWLINE]: - output.append(t) + for s in syntax: + if s.type != SyntaxType.TOKEN: + pass + if s.value.type not in [tokenize.TokenType.SPACE, tokenize.TokenType.NEWLINE]: + output.append(s) return output # Parses tokens def parse(tokens): - stripped = strip_whitespace(tokens) + converted = import_tokens(tokens) + stripped = strip_whitespace(converted) return stripped diff --git a/tests/test_parse.py b/tests/test_parse.py index 236fafc..fa25f77 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -79,25 +79,27 @@ assert parse.import_tokens(input) == syntax -# Draws classified tokens and a list without whitespace in them +# Draws syntax and a syntax without whitespace in it @composite -def draw_tokens_whitespace(draw): - input = draw(lists(test_tokenize.draw_token_classified())) - tokens = [] - for t in input: - if t.type not in [tokenize.TokenType.SPACE, tokenize.TokenType.NEWLINE]: - tokens.append(t) - return (input, tokens) +def draw_syntax_whitespace(draw): + input = draw(lists(draw_syntax_random())) + syntax = [] + for s in input: + if s.type != parse.SyntaxType.TOKEN: + pass + if s.value.type not in [tokenize.TokenType.SPACE, tokenize.TokenType.NEWLINE]: + syntax.append(s) + return (input, syntax) # Tests strip_whitespace works correctly # We expect the following behaviour: -# - No tokens are modified +# - No syntax is modified # - Tokens of type SPACE or NEWLINE are removed from the output -@given(draw_tokens_whitespace()) +@given(draw_syntax_whitespace()) def test_parse_strip_whitespace(test_data): - (input, tokens) = test_data - assert parse.strip_whitespace(input) == tokens + (input, syntax) = test_data + assert parse.strip_whitespace(input) == syntax # Tests the parser wrapper works correctly @@ -105,6 +107,7 @@ # - Whitespace tokens are stripped @given(lists(test_tokenize.draw_token_classified())) def test_parse_fuzz(tokens): - stripped = parse.strip_whitespace(tokens) + converted = parse.import_tokens(tokens) + stripped = parse.strip_whitespace(converted) parsed = parse.parse(tokens) assert stripped == parsed