diff --git a/tests/test_parse.py b/tests/test_parse.py index 9518eea..28e3d06 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -3,6 +3,8 @@ from src import parse +lexer_whitespace = "\n\t " + # Test with no data at all def test_parser_empty(): tokenizer = parse.Tokenizer("", "") @@ -44,9 +46,9 @@ # Test that we can make string literals using the BeginText and EndText syntax. -@given(text(), sampled_from("\n\t "), sampled_from("\n\t ")) +@given(text(), sampled_from(lexer_whitespace), sampled_from(lexer_whitespace)) def test_lexer_text(text, space1, space2): - text_tokens = split_by(text, "\n\t ") + text_tokens = split_by(text, lexer_whitespace) assume("BeginText" not in text_tokens and "EndText" not in text_tokens) code = "BeginText" + space1 + text + space2 + "EndText" tokenizer = parse.Tokenizer(code, "") @@ -60,9 +62,9 @@ # Test that we can make notes using BeginNote and EndNote syntax. -@given(text(), sampled_from("\n\t "), sampled_from("\n\t ")) +@given(text(), sampled_from(lexer_whitespace), sampled_from(lexer_whitespace)) def test_lexer_note(text, space1, space2): - text_tokens = split_by(text, "\n\t ") + text_tokens = split_by(text, lexer_whitespace) assume("BeginNote" not in text_tokens and "EndNote" not in text_tokens) code = "BeginNote" + space1 + text + space2 + "EndNote" tokenizer = parse.Tokenizer(code, "") @@ -118,7 +120,7 @@ reserved_words = keywords + ["StartText", "EndText", "StartNote", "EndNote"] # Test that we can make symbols -@given(text(alphabet=characters(blacklist_characters="\n\t "), min_size=1)) +@given(text(alphabet=characters(blacklist_characters=lexer_whitespace), min_size=1)) def test_lexer_symbols(symbol): assume(symbol not in reserved_words) # Reserved words aren't symbols assume(not symbol.startswith("#!")) # Shebangs aren't symbols