diff --git a/tests/test_parse.py b/tests/test_parse.py index 189ebab..8383a42 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -31,15 +31,6 @@ # List of words the lexer understands reserved_words = keywords + ["StartText", "EndText", "StartNote", "EndNote"] -# Test with no data at all -def test_parser_empty(): - tokenizer = parse.Tokenizer("", "") - tokens = tokenizer.tokenize() - assert tokens[0].type == "EOF" - assert tokens[0].location.line == 1 - assert tokens[0].location.column == 0 - assert tokens[0].location.file == "" - # Quick function to split a string by different characters, used for checking # if generated text includes tokens @@ -58,30 +49,6 @@ return tokens -# General fuzz test, make sure the parser doesn't fall apart and spew -# uncontrolled errors. -@given(text(), text()) -def test_parser_fuzz(code, filename): - try: - tokenizer = parse.Tokenizer(code, filename) - tokens = tokenizer.tokenize() - parser = parse.Parser(tokens) - parser.parse_file() - except parse.ParseError: - pass - - -# Test that we can make notes using BeginNote and EndNote syntax. -@given(text(), sampled_from(lexer_whitespace), sampled_from(lexer_whitespace)) -def test_lexer_note(text, space1, space2): - text_tokens = split_by(text, lexer_whitespace) - assume("BeginNote" not in text_tokens and "EndNote" not in text_tokens) - code = "BeginNote" + space1 + text + space2 + "EndNote" - tokenizer = parse.Tokenizer(code, "") - tokens = tokenizer.tokenize() - assert tokens[0].type == "EOF" - - class SampleToken: def __init__(self, code, type, value): self.code = code @@ -166,6 +133,17 @@ assert tokens[1].type == "EOF" +# Test that we can make notes using BeginNote and EndNote syntax. +@given(text(), sampled_from(lexer_whitespace), sampled_from(lexer_whitespace)) +def test_lexer_note(text, space1, space2): + text_tokens = split_by(text, lexer_whitespace) + assume("BeginNote" not in text_tokens and "EndNote" not in text_tokens) + code = "BeginNote" + space1 + text + space2 + "EndNote" + tokenizer = parse.Tokenizer(code, "") + tokens = tokenizer.tokenize() + assert tokens[0].type == "EOF" + + # Test that reserved words aren't read without whitespace @given(lists(sampled_from(reserved_words), min_size=2)) def test_lexer_conjoined_words(words): @@ -190,3 +168,26 @@ assert tokens[0].location.line == 2 assert tokens[0].location.column == 0 assert tokens[0].location.file == "" + + +# Test with no data at all +def test_parser_empty(): + tokenizer = parse.Tokenizer("", "") + tokens = tokenizer.tokenize() + assert tokens[0].type == "EOF" + assert tokens[0].location.line == 1 + assert tokens[0].location.column == 0 + assert tokens[0].location.file == "" + + +# General fuzz test, make sure the parser doesn't fall apart and spew +# uncontrolled errors. +@given(text(), text()) +def test_parser_fuzz(code, filename): + try: + tokenizer = parse.Tokenizer(code, filename) + tokens = tokenizer.tokenize() + parser = parse.Parser(tokens) + parser.parse_file() + except parse.ParseError: + pass