diff --git a/tests/test_parse.py b/tests/test_parse.py index 04c0567..95a5e4d 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -1,4 +1,4 @@ -from hypothesis import given +from hypothesis import given, assume from hypothesis.strategies import text, booleans, sampled_from, characters from src import parse @@ -84,9 +84,14 @@ assert tokens[1].type == "EOF" +# List of words the lexer understands +reserved_words = keywords + ["StartText", "EndText", "StartNote", "EndNote"] + # Test that we can make symbols @given(text(alphabet=characters(blacklist_characters="\n\t "), min_size=1)) def test_lexer_symbols(symbol): + assume(symbol not in reserved_words) # Reserved words aren't symbols + assume(not symbol.startswith("#!")) # Shebangs aren't symbols code = symbol tokenizer = parse.Tokenizer(code, "") tokens = tokenizer.tokenize()