diff --git a/src/parse.py b/src/parse.py index 4903352..92e9278 100644 --- a/src/parse.py +++ b/src/parse.py @@ -18,6 +18,13 @@ self.file, ) + def __eq__(self, other): + return ( + self.line == other.line + and self.column == other.column + and self.file == other.file + ) + class ParseContext: def __init__(self, parent, context, location): diff --git a/tests/test_parse.py b/tests/test_parse.py index f3b5105..1477f2c 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -255,7 +255,7 @@ tokens.insert(index, bad_token) soup = draw(draw_token_soup(tokens)) error_token = soup.tokens[index] - return soup + return (error_token, soup) # Test that we can lex tokens correctly @@ -280,13 +280,18 @@ # Test that we can catch a stray EndText @given(draw_error_endtext()) -def test_lexer_error_endtext(soup): +def test_lexer_error_endtext(error_soup): + (error_token, soup) = error_soup try: tokenizer = parse.Tokenizer(soup.code, soup.filename) tokens = tokenizer.tokenize() assert False # Success? except parse.ParseError as e: - tokens = None + location = error_token.location + assert e.context.parent == None + assert e.context.context == "reading word" + assert e.context.location == location + assert e.error == "Found stray EndText" # General fuzz test, make sure the parser doesn't fall apart and spew