diff --git a/src/parse.py b/src/parse.py index 1a6f83c..29039ee 100644 --- a/src/parse.py +++ b/src/parse.py @@ -73,6 +73,7 @@ buffer = "" # Parse the starting tokens s = stream.pop() + # Error if there's not a valid StartText token if s is None: return None elif s.type != SyntaxType.TOKEN: @@ -85,8 +86,10 @@ # Parse following tokens while True: s = stream.pop() + # Error if there's no EndText if s is None: return None + # Error if any of the text isn't a token elif s.type != SyntaxType.TOKEN: return None # Don't allow StartText in text diff --git a/tests/test_parse.py b/tests/test_parse.py index 187bd2a..88c991d 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -152,7 +152,7 @@ # Tests parse_text works correctly # We expect the following behaviour: -# - Only the test expression is parsed +# - Only the text expression is parsed # - The resulting text is the value of tokens between StartText and EndText # - The resulting text has its surrounding whitespace stripped # - The Syntax's value is the resulting text @@ -170,15 +170,22 @@ # Generate text without StartText +# We expect the following behaviour: +# - Error if there is no StartText node at all +# - Error if StartText is not a SyntaxType.TOKEN +# - Error if StartText's token type is not a SyntaxType.KEYWORD +# - Error if StartText's token value is not "StartText" @composite def draw_syntax_text_invalid_nostarttext(draw): (tokens, _) = draw(draw_syntax_text_valid()) if draw(booleans()): token = draw(draw_syntax_random()) assume( - token.type == parse.SyntaxType.TOKEN - and token.value.type == tokenize.TokenType.KEYWORD - and token.value.value != "StartText" + not ( + token.type == parse.SyntaxType.TOKEN + and token.value.type == tokenize.TokenType.KEYWORD + and token.value.value == "StartText" + ) ) new_tokens = [token] + tokens[1:0] return new_tokens @@ -187,22 +194,15 @@ # Generate text without EndText +# We expect the following behaviour: +# - Error if there is no EndText node at all @composite def draw_syntax_text_invalid_noendtext(draw): (tokens, _) = draw(draw_syntax_text_valid()) - if draw(booleans()): - token = draw(draw_syntax_random()) - assume( - token.type == parse.SyntaxType.TOKEN - and token.value.type == tokenize.TokenType.KEYWORD - and token.value.value != "EndText" - ) - new_tokens = tokens[0:-1] + [token] - return new_tokens - else: - return tokens[0:-1] + return tokens[0:-1] +# Generate an invalid text case @composite def draw_syntax_text_invalid(draw): strategies = [ @@ -212,6 +212,7 @@ return draw(one_of(strategies)) +# Test that parse_text errors in invalid cases @given(one_of(draw_syntax_text_invalid())) def test_parse_text_invalid(test_data): tokens = test_data