diff --git a/tests/parse/test_clear_notes.py b/tests/parse/test_clear_notes.py index 2552108..c4cac61 100644 --- a/tests/parse/test_clear_notes.py +++ b/tests/parse/test_clear_notes.py @@ -11,9 +11,9 @@ ParseErrorException, ParseTask, ) -from src.syntax import SyntaxStream +from src.syntax import SyntaxStream, SyntaxType from tests.parse.templates import template_parse_invalid -from tests.test_syntax import draw_token_by_value, draw_syntax_token +from tests.test_syntax import draw_token_by_value, draw_syntax_random # Dummy parse_note implementation for testing note clearing @@ -49,7 +49,7 @@ # Draws a random syntax suitable for note clearing testing @composite def draw_clear_notes_value_token(draw): - token = draw(draw_syntax_token()) + token = draw(draw_syntax_random()) assume(token.value not in ["EndNote"]) return token @@ -57,7 +57,7 @@ # Draws syntax to make a valid soup to clear notes @composite def draw_syntax_clear_notes_valid(draw): - tokens = draw(lists(draw_clear_notes_value_token())) + tokens = draw(lists(draw_clear_notes_value())) output = [] for token in tokens: if token.value != "StartNote": @@ -80,7 +80,7 @@ # Error when finding a specific token in a stream of notes # Used for testing clear_notes otherwise def error_on_token(draw, parent_context, value, error): - tokens = draw(lists(draw_clear_notes_value_token())) + tokens = draw(lists(draw_clear_notes_value())) # Ensure we have a value somewhere start = draw(draw_token_by_value(value)) new_tokens = tokens + [start] diff --git a/tests/parse/test_note.py b/tests/parse/test_note.py index b4090f6..d97d12e 100644 --- a/tests/parse/test_note.py +++ b/tests/parse/test_note.py @@ -22,7 +22,7 @@ ) from tests.test_syntax import ( draw_token_by_value, - draw_syntax_random, + draw_syntax_not_token, draw_syntax_token, ) @@ -38,7 +38,7 @@ # Draws a random syntax that isn't a StartNote token @composite def draw_syntax_not_startnote(draw): - token = draw(draw_syntax_random()) + token = draw(draw_syntax_token()) assume(not (token.type == SyntaxType.TOKEN and token.value == "StartNote")) return token diff --git a/tests/parse/test_parse.py b/tests/parse/test_parse.py index d559b5a..02a0217 100644 --- a/tests/parse/test_parse.py +++ b/tests/parse/test_parse.py @@ -19,7 +19,7 @@ ) from src.syntax import SyntaxStream from tests.templates import template_test_structure -from tests.test_syntax import draw_token_classified, draw_syntax_random +from tests.test_syntax import draw_syntax_token, draw_syntax_random # Draws a random parse task @@ -28,13 +28,12 @@ return draw(sampled_from(list(ParseTask))) -# Draws a random parse context with a dummy parent +# Draws a random parse context without a parent @composite def draw_parse_context(draw): task = draw(draw_parse_task()) syntax = draw(draw_syntax_random()) - parent = draw(text()) - return ParseContext(task, syntax, parent) + return ParseContext(task, syntax, None) # Test parse context structure @@ -81,7 +80,7 @@ # Tests the parser wrapper works correctly # We expect the following behaviour: # - Notes to be removed from the tokens -@given(lists(draw_token_classified()), draw_parse_context()) +@given(lists(draw_syntax_token()), draw_parse_context()) def test_parse_fuzz(tokens, context): result = None parsed = None diff --git a/tests/test_syntax.py b/tests/test_syntax.py index b24aa29..aaee057 100644 --- a/tests/test_syntax.py +++ b/tests/test_syntax.py @@ -75,14 +75,6 @@ return Syntax(value, location, type) -# Draws a random token -@composite -def draw_token_random(draw): - value = draw(text()) - location = draw(draw_syntax_location()) - return Syntax(value, location, SyntaxType.TOKEN) - - # Draws a token with a specific value but random location @composite def draw_token_by_value(draw, value): @@ -166,9 +158,9 @@ return Syntax(value, location, SyntaxType.TOKEN) -# Draws a classified token +# Draws a syntax token @composite -def draw_token_classified(draw): +def draw_syntax_token(draw): strategies = [ draw_token_unknown(), draw_token_space(), @@ -180,13 +172,12 @@ return token -# Draws a token syntax value +# Draws a text syntax with a token value @composite -def draw_syntax_token(draw): - value = draw(draw_token_classified()) - location = draw(draw_syntax_location()) - type = SyntaxType.TOKEN - return Syntax(value.value, location, type) +def draw_syntax_texttoken(draw): + token = draw(draw_syntax_token()) + type = SyntaxType.TEXT + return Syntax(token.value, token.location, type) # Draws a random syntax @@ -195,6 +186,7 @@ strategies = [ draw_syntax_token(), draw_syntax_text(), + draw_syntax_texttoken(), ] return draw(one_of(strategies)) @@ -204,6 +196,7 @@ def draw_syntax_not_token(draw): strategies = [ draw_syntax_text(), + draw_syntax_texttoken(), ] return draw(one_of(strategies)) diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index 824a9eb..7c89f6d 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -14,7 +14,7 @@ from src import tokenize from src.syntax import Syntax, SyntaxLocation, SyntaxType from tests.test_syntax import ( - draw_token_classified, + draw_syntax_token, draw_token_newline, draw_token_space, draw_token_unknown, @@ -102,7 +102,7 @@ # Generates a list of tokens with correct locations @composite def draw_tokens_locations(draw): - tokens = draw(lists(draw_token_classified())) + tokens = draw(lists(draw_syntax_token())) filename = draw(text()) located = [] line = 1 @@ -147,7 +147,7 @@ # Generates two list of tokens: One with whitespace and one without @composite def draw_tokens_whitespace(draw): - input = draw(lists(draw_token_classified())) + input = draw(lists(draw_syntax_token())) stripped = [] for s in input: is_whitespace = s.value in valid_spaces or s.value in valid_newlines @@ -179,8 +179,8 @@ # Draws a token and possibly add garbage # This is to ensure that tokens must completely match a value @composite -def draw_token_classified_garbled(draw): - token = draw(draw_token_classified()) +def draw_syntax_token_garbled(draw): + token = draw(draw_syntax_token()) value = token.value if draw(booleans()): value = draw(text(min_size=1)) + value @@ -192,7 +192,7 @@ # Draw a random string made of token values @composite def draw_source_fuzz(draw): - tokens = draw(lists(draw_token_classified())) + tokens = draw(lists(draw_syntax_token())) input = "" for t in tokens: input += t.value