-
Notifications
You must be signed in to change notification settings - Fork 0
/
tetris_lexer_notParsingArrays.py
142 lines (113 loc) · 3.06 KB
/
tetris_lexer_notParsingArrays.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
'''
NOTE: PLEASE DOWNLOAD PLY, THE PYTHON LEX-YACC FROM HERE: https://www.dabeaz.com/ply/
IT IS NOT AVAILABLE FOR DOWNLOAD THROUGH PIP/CONDA
'''
# In this tetris_lexer.py, we do not try to parse Arrays/Matrices, instead we directly return a stream of induvidual tokens
import ply.lex as lex
import sys
# Defining the Reserved Keywords.
reserved = {
'level': 'LEVEL',
'setlevelspeed': 'SETLEVELSPEED',
'passlevelscore': 'PASSLEVELSCORE',
'board': 'BOARD',
'piece': 'PIECE',
'speed': 'SPEED',
'piececolor': 'PIECECOLOR',
'bonus': 'BONUS',
'sequence': 'SEQUENCE',
'random': 'RANDOM',
'startgame':'STARTGAME',
'scoring': 'SCORING',
'skipblock': 'SKIPBLOCK',
'moveconfig': 'MOVECONFIG',
'simultaneous': 'SIMULATANEOUS',
'WASD': 'WASD', # WASD Movement Key Config
'ARROW': 'ARROW', # Arrow Keys Movement Key Config
}
tokens = [
'RES', # Reserved Gamewords
'IDENTIFIER', # Variables for Pieces and sequences
# Types
'INT',
'COLORNAME', # Color names for pieces
'HEXCOLOR',
'MATRIX', # A matrix is used to specify a piece structure
'ARRAY', # An array is used to specify sequences of pieces, and also the points scheme
# Punctuations/Operators
'EQUALS',
'LEFT_BRT',
'RIGHT_BRT',
'LEFT_CURLY',
'RIGHT_CURLY',
'RIGHT_SQR',
'LEFT_SQR',
'SEMICOLON',
'COMMA',
'MULTIPLY',
# Comments and blank/whitespace:
'COMMENT',
'BLANKS',
] + list(reserved.values())
t_EQUALS = r'='
t_LEFT_BRT = r'\('
t_RIGHT_BRT = r'\)'
t_LEFT_CURLY = r'\{'
t_RIGHT_SQR = r'\['
t_LEFT_SQR = r'\]'
t_RIGHT_CURLY = r'\}'
t_SEMICOLON = r'\;'
t_COMMA = r'\,'
t_MULTIPLY = r'\*'
t_ignore = r' ' # Ignore Whitespaces
# More complicated tokens, such as tokens that are more than 1 character in length are defined using functions.
# Although blankspace is detected by t_ignore, we have to also detect tokens like \t, \n, \r
def t_BLANKS(t):
r'[\t\r\n]+'
t.type = 'BLANKS'
t.lexer.skip(0)
def t_COMMENT(t):
r'\/\*.*\*\/'
t.type = 'COMMENT'
return t
def t_RES(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value, 'IDENTIFIER') # Check for reserved words
return t
def t_INT(t):
r'[+-]?[0-9]+'
t.value = int(t.value)
return t
def t_COLORNAME(t):
r"\'[a-z]+\'"
t.type = 'COLORNAME'
return t
def t_HEXCOLOR(t):
r'\#[A-Fa-f0-9]{6}|[A-Fa-f0-9]{3}'
return t
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_error(t):
print("Compilation Error! - ")
return t
if(__name__ == "__main__"):
# Build the lexer
lexer = lex.lex()
filename = str(input())
inp = None
with open(filename, 'r') as file:
inp = file.read()
lexer.input(inp)
fail = 0
while True:
tok = lexer.token()
if not tok:
break
if(tok == 'COMPILE_ERROR'):
print("Compilation Error\n")
fail = 1
break
print(tok)
if(fail == 0):
print("Compiled Succesfully!\n")