diff --git a/tests/test_parse.py b/tests/test_parse.py index 19c48ff..4791883 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -306,6 +306,40 @@ return (error_token, soup) +# Generates a soup with a modified Text with an extra EndText inserted +# Any Text in the soup is modified +@composite +def draw_error_endtext_extra(draw): + tokens = draw(draw_tokens_valid()) + text_index = None + for i in range(0, len(tokens)): + if tokens[i].type == "text": + # Find a Text + text_index = i + if text_index is None: + token = draw(draw_token_text()) + min = 0 + max = len(tokens) + # Don't put a text before a shebang + if max > 0 and tokens[0].code[0:2] == "#!": + min = 1 + text_index = draw(integers(min_value=min, max_value=max)) + tokens.insert(text_index, token) + text_token = tokens[text_index] + lex_tokens = split_tokens(text_token.code, lexer_whitespace) + non_whitespace_count = (len(lex_tokens) + 1) / 2 + rand_index = draw(integers(min_value=0, max_value=non_whitespace_count)) + rand_index *= 2 # Skip non-whitespace + space = draw(draw_whitespace()) + lex_tokens.insert(rand_index, space) + lex_tokens.insert(rand_index, "EndText") + new_code = "".join(lex_tokens) + error_token = SampleToken(new_code, None, None) + tokens[text_index] = error_token + soup = draw(draw_token_soup(tokens)) + return (error_token, soup) + + # Test that we can lex tokens correctly @given(draw_soup_valid()) def test_lexer_valid(soup): @@ -362,6 +396,25 @@ # e.error will be random +# Test that adding an extra EndText to any Text pair causes +# some error, either from the text data being read as code or +# from the original EndText being a stray +@given(draw_error_endtext_extra()) +def test_lexer_error_endtext_extra(error_soup): + (error_token, soup) = error_soup + try: + tokenizer = parse.Tokenizer(soup.code, soup.filename) + tokens = tokenizer.tokenize() + assert False # Success? + except parse.ParseError as e: + location = error_token.location + assert e.context.parent == None + # e.context.context will be random + assert e.context.location.line >= location.line + if e.context.location.line == location.line: + assert e.context.location.column >= location.column + # e.error will be random + # General fuzz test, make sure the parser doesn't fall apart and spew # uncontrolled errors. @given(text(), text())