diff --git a/tests/parse/templates.py b/tests/parse/templates.py index 231fdc6..7e2695a 100644 --- a/tests/parse/templates.py +++ b/tests/parse/templates.py @@ -50,6 +50,14 @@ # - Parsing causes an error # - The parse error is as expected def template_parse_invalid(parser): + # Wrapper to add parse_context to our test_data + @composite + def wrapper(draw, func): + context = draw(draw_parse_context()) + (tokens, error) = draw(composite(func)(context)) + return (tokens, error, context) + + # test_data is the output of wrapper def do(test_data): (tokens, error, context) = test_data stream = SyntaxStream(tokens) @@ -59,9 +67,4 @@ except ParseErrorException as e: assert e == error - def wrapper(draw, func): - context = draw(draw_parse_context()) - (tokens, error) = draw(composite(func)(context)) - return (tokens, error, context) - - return lambda func: given(composite(wrapper)(func))(do) + return lambda func: given(wrapper(func))(do) diff --git a/tests/parse/test_parse.py b/tests/parse/test_parse.py index 6dee641..d559b5a 100644 --- a/tests/parse/test_parse.py +++ b/tests/parse/test_parse.py @@ -84,6 +84,7 @@ @given(lists(draw_token_classified()), draw_parse_context()) def test_parse_fuzz(tokens, context): result = None + parsed = None try: stream = SyntaxStream(tokens.copy()) result = NoteSkipper().clear_notes(stream, context) @@ -91,6 +92,6 @@ result = e try: parsed = parse(tokens, context) - assert parsed == result except ParseErrorException as e: - assert e == result + parsed = e + assert parsed == result diff --git a/tests/parse/test_text.py b/tests/parse/test_text.py index 315ba6f..516b552 100644 --- a/tests/parse/test_text.py +++ b/tests/parse/test_text.py @@ -93,7 +93,6 @@ # - Error if there is no StartText token at all @template_parse_invalid(Parser().parse_text) def test_parse_text_invalid_empty(draw, parent_context): - (tokens, _) = draw(draw_syntax_text_valid()) context = ParseContext(ParseTask.PARSE_TEXT, None, parent_context) error = ParseErrorException(ParseError.NO_TOKEN, None, None, context) return ([], error) diff --git a/tests/test_syntax.py b/tests/test_syntax.py index d6f2ca4..b24aa29 100644 --- a/tests/test_syntax.py +++ b/tests/test_syntax.py @@ -32,6 +32,12 @@ "EndText", ] +# Literals recognized by the language +literals = [ + "True", + "False", +] + # Draws a random syntax location @composite @@ -115,49 +121,49 @@ @composite def draw_token_unknown(draw): reserved = valid_spaces + single_newlines - token = draw(draw_token_random()) + location = draw(draw_syntax_location()) chars = characters(blacklist_characters=reserved) value = draw(text(alphabet=chars, min_size=1)) for v in multi_newlines: assume(v not in value) - assume(value not in ["True", "False"]) + assume(value not in literals) assume(value not in keywords) - return Syntax(value, token.location, SyntaxType.TOKEN) + return Syntax(value, location, SyntaxType.TOKEN) # Draws a space token @composite def draw_token_space(draw): - token = draw(draw_token_random()) + location = draw(draw_syntax_location()) value = draw(sampled_from(valid_spaces)) - return Syntax(value, token.location, SyntaxType.TOKEN) + return Syntax(value, location, SyntaxType.TOKEN) # Draws a new line token @composite def draw_token_newline(draw): - token = draw(draw_token_random()) + location = draw(draw_syntax_location()) value = draw(sampled_from(valid_newlines)) - return Syntax(value, token.location, SyntaxType.TOKEN) + return Syntax(value, location, SyntaxType.TOKEN) # Draws a bool token @composite def draw_token_bool(draw): - token = draw(draw_token_random()) + location = draw(draw_syntax_location()) if draw(booleans()): value = "True" else: value = "False" - return Syntax(value, token.location, SyntaxType.TOKEN) + return Syntax(value, location, SyntaxType.TOKEN) # Draws a keyword token @composite def draw_token_keyword(draw): - token = draw(draw_token_random()) + location = draw(draw_syntax_location()) value = draw(sampled_from(keywords)) - return Syntax(value, token.location, SyntaxType.TOKEN) + return Syntax(value, location, SyntaxType.TOKEN) # Draws a classified token @@ -242,7 +248,7 @@ real_times = times expected = None else: - real_times = times % len(stream.nodes) + real_times = times % node_count expected = nodes[0] for _ in range(0, real_times): node = stream.peek() @@ -257,13 +263,11 @@ def test_syntax_syntax_stream_mixed(nodes): stream = SyntaxStream(nodes.copy()) read = [] - node1 = stream.peek() - node2 = stream.pop() - assert node1 == node2 - while node2 is not None: - read.append(node2) - node1 = stream.peek() - node2 = stream.pop() - assert node1 == node2 - assert read == nodes + node = True + while node is not None: + peeked = stream.peek() + node = stream.pop() + read.append(node) + assert peeked == node + assert read[:-1] == nodes # Skip None at end assert stream.pop() is None diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index ec1d2aa..824a9eb 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -148,13 +148,12 @@ @composite def draw_tokens_whitespace(draw): input = draw(lists(draw_token_classified())) - syntax = [] + stripped = [] for s in input: - if s.type != SyntaxType.TOKEN or ( - s.value not in valid_spaces and s.value not in valid_newlines - ): - syntax.append(s) - return (input, syntax) + is_whitespace = s.value in valid_spaces or s.value in valid_newlines + if s.type != SyntaxType.TOKEN or not is_whitespace: + stripped.append(s) + return (input, stripped) # Test that the tokenizer can strip whitespace correctly