Newer
Older
NewLang / tests / test_parse.py
from hypothesis import given
from hypothesis.strategies import text, booleans, sampled_from, characters

from src import parse


# General fuzz test, make sure the parser doesn't fall apart and spew
# uncontrolled errors.
@given(text(), text())
def test_parser_fuzz(code, filename):
    try:
        tokenizer = parse.Tokenizer(code, filename)
        tokens = tokenizer.tokenize()
        parser = parse.Parser(tokens)
        parser.parse_file()
    except parse.ParseError:
        pass


# Test that we can make string literals using the BeginText and EndText syntax.
@given(text())
def test_lexer_text(text):
    code = "BeginText " + text + " EndText"
    tokenizer = parse.Tokenizer(code, "")
    tokens = tokenizer.tokenize()
    assert tokens[0].type == "text"
    assert tokens[0].value == text
    assert tokens[1].type == "EOF"


# Test that we can make notes using BeginNote and EndNote syntax.
@given(text())
def test_lexer_note(text):
    code = "BeginNote " + text + " EndNote"
    tokenizer = parse.Tokenizer(code, "")
    tokens = tokenizer.tokenize()
    assert tokens[0].type == "EOF"


# Test that we can make booleans using True and False
@given(booleans())
def test_lexer_boolean(bool):
    if bool == True:
        code = "True"
    else:
        code = "False"
    tokenizer = parse.Tokenizer(code, "")
    tokens = tokenizer.tokenize()
    assert tokens[0].type == "bool"
    assert tokens[0].value == bool
    assert tokens[1].type == "EOF"


# List of keywords the lexer understands
keywords = [
    "NewLang",
    "Done",
    "Set",
    "To",
    "EndSet",
    "If",
    "Then",
    "Else",
    "EndIf",
]

# Test that we can read keywords properly
@given(sampled_from(keywords))
def test_lexer_boolean(keyword):
    code = keyword
    tokenizer = parse.Tokenizer(code, "")
    tokens = tokenizer.tokenize()
    assert tokens[0].type == "keyword"
    assert tokens[0].value == keyword
    assert tokens[1].type == "EOF"


# Test that we can make symbols
@given(text(alphabet=characters(blacklist_characters="\n\t "), min_size=1))
def test_lexer_symbols(symbol):
    code = symbol
    tokenizer = parse.Tokenizer(code, "")
    tokens = tokenizer.tokenize()
    assert tokens[0].type == "symbol"
    assert tokens[0].value == symbol
    assert tokens[1].type == "EOF"


# Test that shebangs are skipped
@given(text(alphabet=characters(blacklist_characters="\n")))
def test_lexer_shebang(shebang):
    code = "#!" + shebang + "\n"
    tokenizer = parse.Tokenizer(code, "")
    tokens = tokenizer.tokenize()
    assert tokens[0].type == "EOF"
    assert tokens[0].location.line == 2
    assert tokens[0].location.column == 0