diff --git a/tests/test_parse.py b/tests/test_parse.py index af661d6..c5710c6 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -119,14 +119,20 @@ return value +# Draws whitespace ignored by the lexer +@composite +def draw_whitespace(draw): + return "".join(draw(lists(sampled_from(lexer_whitespace), min_size=1))) + + # Generates a Text token @composite def draw_token_text(draw): value = draw(text()) text_tokens = split_by(value, lexer_whitespace) assume("StartText" not in text_tokens and "EndText" not in text_tokens) - space1 = draw(sampled_from(lexer_whitespace)) - space2 = draw(sampled_from(lexer_whitespace)) + space1 = draw(draw_whitespace()) + space2 = draw(draw_whitespace()) code = "StartText" + space1 + value + space2 + "EndText" return SampleToken(code, "text", value.strip(lexer_whitespace)) @@ -162,8 +168,8 @@ value = draw(text()) note_tokens = split_by(value, lexer_whitespace) assume("StartNote" not in note_tokens and "EndNote" not in note_tokens) - space1 = draw(sampled_from(lexer_whitespace)) - space2 = draw(sampled_from(lexer_whitespace)) + space1 = draw(draw_whitespace()) + space2 = draw(draw_whitespace()) code = "StartNote" + space1 + value + space2 + "EndNote" return SampleToken(code, None, None) @@ -198,7 +204,7 @@ all_tokens = shebang + tokens code = "" for token in all_tokens: - space = "".join(draw(lists(sampled_from(lexer_whitespace), min_size=1))) + space = draw(draw_whitespace()) code += token.code + space return SampleSoup(all_tokens, code)