diff --git a/tests/parse/templates.py b/tests/parse/templates.py index 4f82928..231fdc6 100644 --- a/tests/parse/templates.py +++ b/tests/parse/templates.py @@ -18,6 +18,8 @@ # Tests that something parses correctly # We expect the following behaviour: +# - The decoration supplies a generator for test data and expected output +# - The decorated function is unused # - Only the supplied tokens are parsed # - The supplied tokens parse to the expected value # - The Syntax's value is the expected value @@ -41,6 +43,12 @@ # Test that something parses incorrectly +# We expect the following behaviour: +# - The decoration supplies a parser function +# - The decorated function takes a generated parse context +# - The decorated function generates input tokens and an error +# - Parsing causes an error +# - The parse error is as expected def template_parse_invalid(parser): def do(test_data): (tokens, error, context) = test_data diff --git a/tests/parse/test_bool.py b/tests/parse/test_bool.py index 75dac9d..53e204f 100644 --- a/tests/parse/test_bool.py +++ b/tests/parse/test_bool.py @@ -38,7 +38,7 @@ pass -# Generate an invalid boolean +# Tests parsing of invalid booleans # We expect the following behaviour: # - Error if the token is not a SyntaxType.TOKEN # - Error if the token is not True or False @@ -53,7 +53,7 @@ return ([token], error) -# Generate no boolean +# Tests parsing of empty tokens # We expect the following behaviour: # - Error if there isn't a token @template_parse_invalid(Parser().parse_bool) diff --git a/tests/parse/test_clear_notes.py b/tests/parse/test_clear_notes.py index 009a445..2552108 100644 --- a/tests/parse/test_clear_notes.py +++ b/tests/parse/test_clear_notes.py @@ -18,6 +18,8 @@ # Dummy parse_note implementation for testing note clearing # This redefines skip_note to skip the StartNote and not do anything else +# Effectively this turns notes in to a single token for our tests, not needing +# a terminating EndNote def clear_notes_skip_note_valid(stream, parent_context): stream.pop() return None @@ -44,7 +46,7 @@ return skipper -# Draws a random token suitable for note clearing testing +# Draws a random syntax suitable for note clearing testing @composite def draw_clear_notes_value_token(draw): token = draw(draw_syntax_token()) @@ -52,7 +54,7 @@ return token -# Draws tokens to make a valid soup to clear notes +# Draws syntax to make a valid soup to clear notes @composite def draw_syntax_clear_notes_valid(draw): tokens = draw(lists(draw_clear_notes_value_token())) @@ -66,7 +68,7 @@ # Tests clear_notes works correctly # We expect the following behaviour: # - When StartNote is encountered skip_note is called to skip the note -# - Other tokens are passed through +# - Other syntax is passed through @given(draw_syntax_clear_notes_valid()) def test_parse_clear_notes_valid(test_data): (tokens, result) = test_data @@ -76,6 +78,7 @@ # Error when finding a specific token in a stream of notes +# Used for testing clear_notes otherwise def error_on_token(draw, parent_context, value, error): tokens = draw(lists(draw_clear_notes_value_token())) # Ensure we have a value somewhere @@ -90,7 +93,7 @@ # Tests clear_notes passes through skip_note errors # We expect the following behaviour: -# - When StartNote is encountered skip_note is called to skip the note +# - When a StartNote token is encountered skip_note is called to skip the note # - Any error skip_note gives is propagated through clear_notes @template_parse_invalid(dummy_skipper_invalid().clear_notes) def test_parse_clear_notes_startnote_propagation(draw, parent_context): @@ -99,7 +102,7 @@ # Tests clear_notes errors when finding an EndNote # We expect the following behaviour: -# - When EndNote is encountered a FOUND_ENDNOTE error is raised +# - When an EndNote token is encountered a FOUND_ENDNOTE error is raised @template_parse_invalid(dummy_skipper_valid().clear_notes) def test_parse_clear_notes_invalid_endnote(draw, parent_context): return error_on_token(draw, parent_context, "EndNote", ParseError.FOUND_ENDNOTE) diff --git a/tests/parse/test_note.py b/tests/parse/test_note.py index 9fb720a..b4090f6 100644 --- a/tests/parse/test_note.py +++ b/tests/parse/test_note.py @@ -62,12 +62,12 @@ pass -# Generate note without StartNote +# Tests parsing notes without StartNote # We expect the following behaviour: # - Error if StartNote is not a SyntaxType.TOKEN # - Error if StartNote's token value is not "StartNote" @template_parse_invalid(NoteSkipper().skip_note) -def test_parse_note_invalid_incorrect(draw, parent_context): +def test_parse_note_invalid_nostartnote(draw, parent_context): (tokens, _) = draw(draw_syntax_note_valid()) token = draw(draw_syntax_not_startnote()) new_tokens = [token] + tokens[1:0] @@ -79,9 +79,9 @@ return (new_tokens, error) -# Generate no note +# Tests parsing empty notes # We expect the following behaviour: -# - Error if there is no StartNote node at all +# - Error if there is no StartNote token at all @template_parse_invalid(NoteSkipper().skip_note) def test_parse_note_invalid_empty(draw, parent_context): context = ParseContext(ParseTask.PARSE_NOTE, None, parent_context) @@ -89,7 +89,7 @@ return ([], error) -# Generate note with a StartNote token in it +# Tests parsing a note with a StartNote token in it # We expect the following behaviour: # - Error if a StartNote token is in the note content @template_parse_invalid(NoteSkipper().skip_note) @@ -102,9 +102,9 @@ return (new_tokens, error) -# Generate note without EndNote +# Tests parsing a note without an EndNote token # We expect the following behaviour: -# - Error if there is no EndNote node at all +# - Error if there is no EndNote token at all @template_parse_invalid(NoteSkipper().skip_note) def test_parse_note_invalid_noendnote(draw, parent_context): (tokens, _) = draw(draw_syntax_note_valid()) diff --git a/tests/parse/test_text.py b/tests/parse/test_text.py index 3404c86..315ba6f 100644 --- a/tests/parse/test_text.py +++ b/tests/parse/test_text.py @@ -71,12 +71,12 @@ pass -# Generate text without StartText +# Test parsing text without StartText # We expect the following behaviour: # - Error if StartText is not a SyntaxType.TOKEN # - Error if StartText's token value is not "StartText" @template_parse_invalid(Parser().parse_text) -def test_parse_text_invalid_incorrect(draw, parent_context): +def test_parse_text_invalid_nostarttext(draw, parent_context): (tokens, _) = draw(draw_syntax_text_valid()) token = draw(draw_syntax_not_starttext()) new_tokens = [token] + tokens[1:0] @@ -88,9 +88,9 @@ return (new_tokens, error) -# Generate no text +# Tests parsing empty text # We expect the following behaviour: -# - Error if there is no StartText node at all +# - Error if there is no StartText token at all @template_parse_invalid(Parser().parse_text) def test_parse_text_invalid_empty(draw, parent_context): (tokens, _) = draw(draw_syntax_text_valid()) @@ -99,7 +99,7 @@ return ([], error) -# Generate text with invalid content tokens +# Tests parsing text with invalid content tokens # We expect the following behaviour: # - Error if a content token is not a SyntaxType.TOKEN @template_parse_invalid(Parser().parse_text) @@ -112,7 +112,7 @@ return (new_tokens, error) -# Generate text with a StartText token in it +# Tests parsing text with a StartText token in it # We expect the following behaviour: # - Error if a StartText token is in the text content @template_parse_invalid(Parser().parse_text) @@ -125,9 +125,9 @@ return (new_tokens, error) -# Generate text without EndText +# Tests parsing text without an EndText token # We expect the following behaviour: -# - Error if there is no EndText node at all +# - Error if there is no EndText token at all @template_parse_invalid(Parser().parse_text) def test_parse_text_invalid_noendtext(draw, parent_context): (tokens, _) = draw(draw_syntax_text_valid()) diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index f169fdf..ec1d2aa 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -39,10 +39,11 @@ merged = [] for curr in tokens[1:]: if prev.value == "\r" and curr.value == "\n": - # Don't append the \r - # Set prev to \r\n instead of \n + # Previous token is \r, don't append it + # Instead promote this \n token to \r\n prev = Syntax("\r\n", prev.location, SyntaxType.TOKEN) else: + # Append the previous token merged.append(prev) prev = curr merged.append(prev) @@ -55,7 +56,7 @@ def draw_tokens_to_split(draw): source = "" tokens = [] - elements = draw(lists(just(True))) + elements = draw(lists(just(True))) # Dummy list for sizing drawing_whitespace = draw(booleans()) for _ in elements: if drawing_whitespace: @@ -88,8 +89,8 @@ # U+2028 LINE SEPARATOR # U+2029 PARAGRAPH SEPARATOR # - Non-whitespace is anything else -# - Whitespace and non-whitespace are separated in to tokens -# - Whitespace sequences are split in to multiple tokens +# - Whitespace and non-whitespace are separated in to separate tokens +# - Whitespace sequences are split in to multiple adjacent tokens # - Non-whitespace code points are combined in to a single token # - Each token location is line 1 offset 1 of file "" @given(draw_tokens_to_split()) @@ -98,7 +99,7 @@ assert tokenize.split_tokens(source) == tokens -# Generates a list of tokens and their corrected locations +# Generates a list of tokens with correct locations @composite def draw_tokens_locations(draw): tokens = draw(lists(draw_token_classified())) @@ -143,7 +144,7 @@ assert tokenize.locate_tokens(input, filename) == located -# Generates a list of tokens with and without whitespace +# Generates two list of tokens: One with whitespace and one without @composite def draw_tokens_whitespace(draw): input = draw(lists(draw_token_classified()))