diff --git a/tests/test_parse.py b/tests/test_parse.py index 48d2b60..10fe87a 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -67,6 +67,10 @@ return tokens +# A sample token containing code to create a lexer token, and +# the resulting lexer type and value +# An type and value of 'None' is used for lexer code that +# should get ignored, such as shebangs and notes class SampleToken: def __init__(self, code, type, value): self.code = code @@ -81,6 +85,7 @@ ) +# A soup of sample tokens class SampleSoup: def __init__(self, tokens, code): self.tokens = tokens @@ -143,7 +148,7 @@ space1 = draw(sampled_from(lexer_whitespace)) space2 = draw(sampled_from(lexer_whitespace)) code = "StartNote" + space1 + value + space2 + "EndNote" - return SampleToken(code, "note", None) + return SampleToken(code, None, None) # Generates a shebang token @@ -151,13 +156,13 @@ def draw_token_shebang(draw): shebang = draw(text(alphabet=characters(blacklist_characters="\n"))) code = "#!" + shebang + "\n" - return SampleToken(code, "shebang", None) + return SampleToken(code, None, None) # Generates an empty token @composite def draw_token_empty(draw): - return SampleToken("", "empty", None) + return SampleToken("", None, None) # Generates a soup of tokens @@ -188,9 +193,8 @@ EOF = len(tokens) - 1 in_pos = 0 out_pos = 0 - ignore_types = ["note", "shebang", "empty"] while out_pos < EOF and in_pos < len(soup.tokens): - if soup.tokens[in_pos].type in ignore_types: + if not soup.tokens[in_pos].type: in_pos += 1 else: assert tokens[out_pos].type == soup.tokens[in_pos].type