diff --git a/tests/test_parse.py b/tests/test_parse.py index c8db01b..af9bba9 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -195,9 +195,9 @@ return SampleToken("", None, None) -# Generates a soup of tokens +# Generates a set of valid tokens @composite -def draw_token_soup(draw): +def draw_tokens_valid(draw): strategies = [ draw_token_text(), draw_token_bool(), @@ -206,10 +206,17 @@ draw_token_note(), draw_token_empty(), ] - filename = draw(text()) shebang = draw(lists(draw_token_shebang(), max_size=1)) tokens = draw(lists(one_of(strategies))) all_tokens = shebang + tokens + return tokens + + +# Generates a soup of tokens using a given strategy +@composite +def draw_token_soup(draw, strategy): + filename = draw(text()) + all_tokens = draw(strategy) code = "" curr_line = 1 curr_column = 1 @@ -241,8 +248,8 @@ # Test that we can lex tokens correctly -@given(draw_token_soup()) -def test_lexer_soup(soup): +@given(draw_token_soup(draw_tokens_valid())) +def test_lexer_valid(soup): tokens = safe_tokenize(soup.code, soup.filename) assert len(tokens) <= len(soup.tokens) in_pos = 0