diff --git a/tests/test_parse.py b/tests/test_parse.py index 7d38b50..a2c2607 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -98,14 +98,16 @@ # A soup of sample tokens class SampleSoup: - def __init__(self, tokens, code): + def __init__(self, tokens, code, filename): self.tokens = tokens self.code = code + self.filename = filename def __repr__(self): - return "SampleSoup(tokens %s, code '%s')" % ( + return "SampleSoup(tokens %s, code '%s', filename '%s')" % ( self.tokens, self.code, + self.filename, ) @@ -232,15 +234,15 @@ draw_token_note(), draw_token_empty(), ] + filename = draw(text()) shebang = draw(lists(draw_token_shebang(), max_size=1)) tokens = draw(lists(one_of(strategies))) all_tokens = shebang + tokens - file = "" code = "" curr_line = 1 curr_column = 1 for token in all_tokens: - token.location = parse.ParseLocation(curr_line, curr_column, file) + token.location = parse.ParseLocation(curr_line, curr_column, filename) space = draw(draw_whitespace()) new_code = token.code + space lines = new_code.split("\n") @@ -251,14 +253,14 @@ curr_column += len(new_code) code += new_code eof = SampleToken(None, "EOF", None) - eof.location = parse.ParseLocation(curr_line, curr_column - 1, "") - return SampleSoup(all_tokens + [eof], code) + eof.location = parse.ParseLocation(curr_line, curr_column - 1, filename) + return SampleSoup(all_tokens + [eof], code, filename) # Test that we can lex tokens correctly @given(draw_token_soup()) def test_lexer_soup(soup): - tokens = safe_tokenize(soup.code, "") + tokens = safe_tokenize(soup.code, soup.filename) assert len(tokens) <= len(soup.tokens) in_pos = 0 out_pos = 0