diff --git a/tests/test_parse.py b/tests/test_parse.py index a2c2607..f53e38c 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -40,34 +40,6 @@ ] -# Quick function to split a string by different characters, used for checking -# if generated text includes tokens -def split_by(string, characters): - tokens = [] - curr_token = "" - for c in string: - if c in characters: - if curr_token != "": - tokens.append(curr_token) - curr_token = "" - else: - curr_token += c - if curr_token != "": - tokens.append(curr_token) - return tokens - - -# Wrapper to report ParseError nicely to Hypothesis -def safe_tokenize(code, filename): - tokens = [] - try: - tokenizer = parse.Tokenizer(code, filename) - tokens = tokenizer.tokenize() - except parse.ParseError as e: - raise AssertionError("ParseError thrown: %s" % (e)) - return tokens - - # A sample token containing code to create a lexer token, and # the resulting lexer type and value # An type and value of 'None' is used for lexer code that @@ -257,6 +229,17 @@ return SampleSoup(all_tokens + [eof], code, filename) +# Wrapper to report ParseError nicely to Hypothesis +def safe_tokenize(code, filename): + tokens = [] + try: + tokenizer = parse.Tokenizer(code, filename) + tokens = tokenizer.tokenize() + except parse.ParseError as e: + raise AssertionError("ParseError thrown: %s" % (e)) + return tokens + + # Test that we can lex tokens correctly @given(draw_token_soup()) def test_lexer_soup(soup):