diff --git a/tests/test_parse.py b/tests/test_parse.py index 4791883..bb982dd 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -41,25 +41,6 @@ ] -# Splits a string in to tokens and their separators -def split_tokens(text, split_by): - output = [] - curr_token = "" - if text == "": - return output - curr_state = text[0] in split_by - for c in text: - new_state = c in split_by - if curr_state != new_state: - curr_state = new_state - output.append(curr_token) - curr_token = "" - curr_token += c - if curr_token != "": - output.append(curr_token) - return output - - # A sample token containing code to create a lexer token, and # the resulting lexer type and value # An type and value of 'None' is used for lexer code that @@ -265,81 +246,6 @@ return soup -# Generates a soup with a stray EndText -@composite -def draw_error_endtext_stray(draw): - tokens = draw(draw_tokens_valid()) - bad_token = SampleToken("EndText", None, None) - index = draw(integers(min_value=0, max_value=len(tokens))) - tokens.insert(index, bad_token) - soup = draw(draw_token_soup(tokens)) - error_token = soup.tokens[index] - return (error_token, soup) - - -# Generates a soup with a modified Text without EndText -# Only the latest Text in the soup is modified -@composite -def draw_error_endtext_remove(draw): - tokens = draw(draw_tokens_valid()) - text_index = None - for i in range(0, len(tokens)): - if tokens[i].type == "text": - # Find the last Text so when we remove - # and EndText the next Text won't close it - text_index = i - if text_index is None: - token = draw(draw_token_text()) - min = 0 - max = len(tokens) - # Don't put a text before a shebang - if max > 0 and tokens[0].code[0:2] == "#!": - min = 1 - text_index = draw(integers(min_value=min, max_value=max)) - tokens.insert(text_index, token) - text_token = tokens[text_index] - lex_tokens = split_tokens(text_token.code, lexer_whitespace) - new_code = "".join(lex_tokens[:-1]) - error_token = SampleToken(new_code, None, None) - tokens[text_index] = error_token - soup = draw(draw_token_soup(tokens)) - return (error_token, soup) - - -# Generates a soup with a modified Text with an extra EndText inserted -# Any Text in the soup is modified -@composite -def draw_error_endtext_extra(draw): - tokens = draw(draw_tokens_valid()) - text_index = None - for i in range(0, len(tokens)): - if tokens[i].type == "text": - # Find a Text - text_index = i - if text_index is None: - token = draw(draw_token_text()) - min = 0 - max = len(tokens) - # Don't put a text before a shebang - if max > 0 and tokens[0].code[0:2] == "#!": - min = 1 - text_index = draw(integers(min_value=min, max_value=max)) - tokens.insert(text_index, token) - text_token = tokens[text_index] - lex_tokens = split_tokens(text_token.code, lexer_whitespace) - non_whitespace_count = (len(lex_tokens) + 1) / 2 - rand_index = draw(integers(min_value=0, max_value=non_whitespace_count)) - rand_index *= 2 # Skip non-whitespace - space = draw(draw_whitespace()) - lex_tokens.insert(rand_index, space) - lex_tokens.insert(rand_index, "EndText") - new_code = "".join(lex_tokens) - error_token = SampleToken(new_code, None, None) - tokens[text_index] = error_token - soup = draw(draw_token_soup(tokens)) - return (error_token, soup) - - # Test that we can lex tokens correctly @given(draw_soup_valid()) def test_lexer_valid(soup): @@ -360,61 +266,6 @@ assert out_pos == len(tokens) -# Test that we can catch a stray EndText -@given(draw_error_endtext_stray()) -def test_lexer_error_endtext_stray(error_soup): - (error_token, soup) = error_soup - try: - tokenizer = parse.Tokenizer(soup.code, soup.filename) - tokens = tokenizer.tokenize() - assert False # Success? - except parse.ParseError as e: - location = error_token.location - assert e.context.parent == None - assert e.context.context == "reading word" - assert e.context.location == location - assert e.error == "Found stray EndText" - - -# Test that removing an EndText from the last available text -# pair causes some error, either from the text data being read as -# code or from the text being unterminated -@given(draw_error_endtext_remove()) -def test_lexer_error_endtext_remove(error_soup): - (error_token, soup) = error_soup - try: - tokenizer = parse.Tokenizer(soup.code, soup.filename) - tokens = tokenizer.tokenize() - assert False # Success? - except parse.ParseError as e: - location = error_token.location - assert e.context.parent == None - # e.context.context will be random - assert e.context.location.line >= location.line - if e.context.location.line == location.line: - assert e.context.location.column >= location.column - # e.error will be random - - -# Test that adding an extra EndText to any Text pair causes -# some error, either from the text data being read as code or -# from the original EndText being a stray -@given(draw_error_endtext_extra()) -def test_lexer_error_endtext_extra(error_soup): - (error_token, soup) = error_soup - try: - tokenizer = parse.Tokenizer(soup.code, soup.filename) - tokens = tokenizer.tokenize() - assert False # Success? - except parse.ParseError as e: - location = error_token.location - assert e.context.parent == None - # e.context.context will be random - assert e.context.location.line >= location.line - if e.context.location.line == location.line: - assert e.context.location.column >= location.column - # e.error will be random - # General fuzz test, make sure the parser doesn't fall apart and spew # uncontrolled errors. @given(text(), text())