# SPDX-License-Identifier: LGPL-2.1-only # Copyright 2022 Jookia <contact@jookia.org> from hypothesis import given from hypothesis.strategies import composite, lists, text, sampled_from from src import tokenize from src import parse from tests import test_tokenize # Draws a random syntax location @composite def draw_syntax_location(draw): return draw(test_tokenize.draw_token_location()) # Draws a random syntax type @composite def draw_syntax_type(draw): return draw(sampled_from(list(parse.SyntaxType))) # Draws a token syntax value @composite def draw_syntax_token(draw): value = draw(test_tokenize.draw_token_random()) location = draw(draw_syntax_location()) type = parse.SyntaxType.TOKEN return parse.Syntax(value, location, type) # Draws a random syntax @composite def draw_syntax_random(draw): return draw(draw_syntax_token()) # Test syntax getters @given(text(), draw_syntax_location(), draw_syntax_type()) def test_parse_syntax_getters(value, location, type): # Use text as a somewhat random value test = parse.Syntax(value, location, type) assert test.value == value assert test.location == location assert test.type == type # Test syntax equals @given(draw_syntax_random(), draw_syntax_random()) def test_parse_syntax_equality(syntax1, syntax2): equals = ( syntax1.value == syntax2.value and syntax1.location == syntax2.location and syntax1.type == syntax2.type ) assert (syntax1 == syntax2) == equals # Draws syntax imported from tokens @composite def draw_syntax_imported(draw): input = draw(lists(test_tokenize.draw_token_random())) tokens = [] for t in input: tokens.append(parse.Syntax(t, t.location, parse.SyntaxType.TOKEN)) return (input, tokens) # Tests importing tokens works correctly # We expect the following behaviour: # - Each token is converted to a Syntax # - The Syntax's value is the token # - The Syntax's location is the token location # - The Syntax's type is SyntaxType.TOKEN @given(draw_syntax_imported()) def test_parse_import_tokens(test_data): (input, syntax) = test_data assert parse.import_tokens(input) == syntax # Draws classified tokens and a list without whitespace in them @composite def draw_tokens_whitespace(draw): input = draw(lists(test_tokenize.draw_token_classified())) tokens = [] for t in input: if t.type not in [tokenize.TokenType.SPACE, tokenize.TokenType.NEWLINE]: tokens.append(t) return (input, tokens) # Tests strip_whitespace works correctly # We expect the following behaviour: # - No tokens are modified # - Tokens of type SPACE or NEWLINE are removed from the output @given(draw_tokens_whitespace()) def test_parse_strip_whitespace(test_data): (input, tokens) = test_data assert parse.strip_whitespace(input) == tokens # Tests the parser wrapper works correctly # We expect the following behaviour: # - Whitespace tokens are stripped @given(lists(test_tokenize.draw_token_classified())) def test_parse_fuzz(tokens): stripped = parse.strip_whitespace(tokens) parsed = parse.parse(tokens) assert stripped == parsed