diff --git a/tests/test_parse.py b/tests/test_parse.py index 28e3d06..77971d2 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -3,8 +3,26 @@ from src import parse +# Whitespace that separates lexer words lexer_whitespace = "\n\t " +# List of keywords the lexer understands +keywords = [ + "NewLang", + "Done", + "Set", + "To", + "EndSet", + "If", + "Then", + "Else", + "EndIf", +] + + +# List of words the lexer understands +reserved_words = keywords + ["StartText", "EndText", "StartNote", "EndNote"] + # Test with no data at all def test_parser_empty(): tokenizer = parse.Tokenizer("", "") @@ -15,19 +33,6 @@ assert tokens[0].location.file == "" -# General fuzz test, make sure the parser doesn't fall apart and spew -# uncontrolled errors. -@given(text(), text()) -def test_parser_fuzz(code, filename): - try: - tokenizer = parse.Tokenizer(code, filename) - tokens = tokenizer.tokenize() - parser = parse.Parser(tokens) - parser.parse_file() - except parse.ParseError: - pass - - # Quick function to split a string by different characters, used for checking # if generated text includes tokens def split_by(string, characters): @@ -45,6 +50,19 @@ return tokens +# General fuzz test, make sure the parser doesn't fall apart and spew +# uncontrolled errors. +@given(text(), text()) +def test_parser_fuzz(code, filename): + try: + tokenizer = parse.Tokenizer(code, filename) + tokens = tokenizer.tokenize() + parser = parse.Parser(tokens) + parser.parse_file() + except parse.ParseError: + pass + + # Test that we can make string literals using the BeginText and EndText syntax. @given(text(), sampled_from(lexer_whitespace), sampled_from(lexer_whitespace)) def test_lexer_text(text, space1, space2): @@ -89,19 +107,6 @@ assert tokens[1].type == "EOF" -# List of keywords the lexer understands -keywords = [ - "NewLang", - "Done", - "Set", - "To", - "EndSet", - "If", - "Then", - "Else", - "EndIf", -] - # Test that we can read keywords properly @given(sampled_from(keywords)) def test_lexer_boolean(keyword): @@ -116,9 +121,6 @@ assert tokens[1].type == "EOF" -# List of words the lexer understands -reserved_words = keywords + ["StartText", "EndText", "StartNote", "EndNote"] - # Test that we can make symbols @given(text(alphabet=characters(blacklist_characters=lexer_whitespace), min_size=1)) def test_lexer_symbols(symbol):