1
|
|
|
""" |
2
|
|
|
Franca lexer. |
3
|
|
|
""" |
4
|
|
|
|
5
|
1 |
|
import ply.lex as lex |
|
|
|
|
6
|
|
|
|
7
|
|
|
|
8
|
1 |
|
class LexerException(Exception): |
9
|
|
|
|
10
|
1 |
|
def __init__(self, message): |
11
|
1 |
|
super(LexerException, self).__init__() |
12
|
1 |
|
self.message = message |
13
|
|
|
|
14
|
1 |
|
def __str__(self): |
15
|
1 |
|
return self.message |
16
|
|
|
|
17
|
|
|
|
18
|
1 |
|
class Lexer(object): |
19
|
|
|
""" |
20
|
|
|
Franca IDL PLY lexer. |
21
|
|
|
""" |
22
|
|
|
|
23
|
|
|
# Keywords |
24
|
1 |
|
keywords = [ |
25
|
|
|
"package", |
26
|
|
|
"import", |
27
|
|
|
"from", |
28
|
|
|
"model", |
29
|
|
|
"typeCollection", |
30
|
|
|
"version", |
31
|
|
|
"major", |
32
|
|
|
"minor", |
33
|
|
|
"typedef", |
34
|
|
|
"is", |
35
|
|
|
"interface", |
36
|
|
|
"attribute", |
37
|
|
|
"readonly", |
38
|
|
|
"noSubscriptions", |
39
|
|
|
"method", |
40
|
|
|
"fireAndForget", |
41
|
|
|
"in", |
42
|
|
|
"out", |
43
|
|
|
"error", |
44
|
|
|
"broadcast", |
45
|
|
|
"selective", |
46
|
|
|
"enumeration", |
47
|
|
|
"extends", |
48
|
|
|
"struct", |
49
|
|
|
"polymorphic", |
50
|
|
|
"array", |
51
|
|
|
"of", |
52
|
|
|
"map", |
53
|
|
|
"to", |
54
|
|
|
"const", |
55
|
|
|
|
56
|
|
|
|
57
|
|
|
# Types |
58
|
|
|
"Int8", |
59
|
|
|
"Int16", |
60
|
|
|
"Int32", |
61
|
|
|
"Int64", |
62
|
|
|
"UInt8", |
63
|
|
|
"UInt16", |
64
|
|
|
"UInt32", |
65
|
|
|
"UInt64", |
66
|
|
|
"Boolean", |
67
|
|
|
"Float", |
68
|
|
|
"Double", |
69
|
|
|
"String", |
70
|
|
|
"ByteBuffer", |
71
|
|
|
] |
72
|
|
|
|
73
|
|
|
# Tokens |
74
|
1 |
|
tokens = [keyword.upper() for keyword in keywords] + [ |
75
|
|
|
"ID", |
76
|
|
|
"INTEGER_VAL", |
77
|
|
|
"HEXADECIMAL_VAL", |
78
|
|
|
"BINARY_VAL", |
79
|
|
|
"REAL_VAL", |
80
|
|
|
"STRING_VAL", |
81
|
|
|
"BOOLEAN_VAL", |
82
|
|
|
"STRUCTURED_COMMENT" |
83
|
|
|
] |
84
|
|
|
|
85
|
|
|
# Ignored characters |
86
|
1 |
|
t_ignore = " \t" |
87
|
|
|
|
88
|
|
|
# Literals |
89
|
1 |
|
literals = [".", "{", "}", "*", "=", "[", "]"] |
90
|
|
|
|
91
|
|
|
# Identifiers and keywords |
92
|
1 |
|
_keyword_map = {} |
93
|
1 |
|
for keyword in keywords: |
94
|
1 |
|
_keyword_map[keyword] = keyword.upper() |
95
|
|
|
|
96
|
|
|
# Newlines |
97
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
98
|
1 |
|
@staticmethod |
99
|
|
|
def t_NEWLINE(t): |
100
|
|
|
# noinspection PySingleQuotedDocstring |
101
|
|
|
r"\n+" |
102
|
1 |
|
t.lexer.lineno += t.value.count("\n") |
103
|
|
|
|
104
|
|
|
# Line comments |
105
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
106
|
1 |
|
@staticmethod |
107
|
|
|
def t_LINE_COMMENT(t): |
108
|
|
|
# noinspection PySingleQuotedDocstring |
109
|
|
|
r"\/\/[^\r\n]*" |
110
|
1 |
|
t.lexer.lineno += t.value.count("\n") |
111
|
|
|
|
112
|
|
|
# Block comments |
113
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
114
|
1 |
|
@staticmethod |
115
|
|
|
def t_BLOCK_COMMENT(t): |
116
|
|
|
# noinspection PySingleQuotedDocstring |
117
|
|
|
r"/\*(.|\n)*?\*/" |
118
|
1 |
|
t.lexer.lineno += t.value.count("\n") |
119
|
|
|
|
120
|
|
|
# Structured comments |
121
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
122
|
1 |
|
@staticmethod |
123
|
|
|
def t_STRUCTURED_COMMENT(t): |
124
|
|
|
# noinspection PySingleQuotedDocstring |
125
|
|
|
r"<\*\*(.|\n)*?\*\*>" |
126
|
1 |
|
t.lexer.lineno += t.value.count("\n") |
127
|
1 |
|
t.value = t.value[3:-3].strip() |
128
|
1 |
|
return t |
129
|
|
|
|
130
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
131
|
1 |
|
@staticmethod |
132
|
|
|
def t_STRING_VAL(t): |
133
|
|
|
# noinspection PySingleQuotedDocstring |
134
|
|
|
r"\"[^\"]*\"" |
135
|
1 |
|
t.value = t.value[1:-1] |
136
|
1 |
|
return t |
137
|
|
|
|
138
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
139
|
1 |
|
@staticmethod |
140
|
|
|
def t_REAL_VAL(t): |
141
|
|
|
# noinspection PySingleQuotedDocstring |
142
|
|
|
r"[+-]?((((([0-9]*\.[0-9]+)|([0-9]+\.))([eE][-+]?[0-9]+)?)|([0-9]+([eE][-+]?[0-9]+)))[fFdD]?)" |
|
|
|
|
143
|
1 |
|
return t |
144
|
|
|
|
145
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
146
|
1 |
|
@staticmethod |
147
|
|
|
def t_HEXADECIMAL_VAL(t): |
148
|
|
|
# noinspection PySingleQuotedDocstring |
149
|
|
|
r"0[xX][0-9a-fA-F]+" |
150
|
1 |
|
t.value = int(t.value, 16) |
151
|
1 |
|
return t |
152
|
|
|
|
153
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
154
|
1 |
|
@staticmethod |
155
|
|
|
def t_BINARY_VAL(t): |
156
|
|
|
# noinspection PySingleQuotedDocstring |
157
|
|
|
r"0[bB][01]+" |
158
|
1 |
|
t.value = int(t.value, 2) |
159
|
1 |
|
return t |
160
|
|
|
|
161
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
162
|
1 |
|
@staticmethod |
163
|
|
|
def t_INTEGER_VAL(t): |
164
|
|
|
# noinspection PySingleQuotedDocstring |
165
|
|
|
r"[+-]?\d+" |
166
|
1 |
|
t.value = int(t.value, 10) |
167
|
1 |
|
return t |
168
|
|
|
|
169
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
170
|
1 |
|
@staticmethod |
171
|
|
|
def t_BOOLEAN_VAL(t): |
172
|
|
|
# noinspection PySingleQuotedDocstring |
173
|
|
|
r"(true|false)" |
174
|
1 |
|
t.value = t.value.strip() |
175
|
1 |
|
if t.value == "true": |
176
|
1 |
|
t.value = True |
177
|
|
|
else: |
178
|
1 |
|
t.value = False |
179
|
1 |
|
return t |
180
|
|
|
|
181
|
|
|
# Identifier |
182
|
|
|
# noinspection PyPep8Naming,PyIncorrectDocstring |
183
|
1 |
|
@staticmethod |
184
|
|
|
def t_ID(t): |
185
|
|
|
# noinspection PySingleQuotedDocstring |
186
|
|
|
r"[A-Za-z][A-Za-z0-9_]*" |
187
|
1 |
|
t.type = Lexer._keyword_map.get(t.value, "ID") |
188
|
1 |
|
return t |
189
|
|
|
|
190
|
1 |
|
@staticmethod |
191
|
|
|
def t_error(t): |
192
|
1 |
|
raise LexerException("Illegal character '{}' at line {}.".format( |
193
|
|
|
t.value[0], t.lineno)) |
194
|
|
|
|
195
|
1 |
|
def __init__(self, **kwargs): |
196
|
|
|
""" |
197
|
|
|
Constructor. |
198
|
|
|
""" |
199
|
1 |
|
self.lexer = lex.lex(module=self, **kwargs) |
200
|
|
|
|
201
|
1 |
|
def tokenize(self, data): |
202
|
|
|
""" |
203
|
|
|
Tokenize input data to stdout for testing purposes. |
204
|
|
|
|
205
|
|
|
:param data: Input text to parse. |
206
|
|
|
""" |
207
|
|
|
self.lexer.input(data) |
208
|
|
|
while True: |
209
|
|
|
tok = self.lexer.token() |
210
|
|
|
if not tok: |
211
|
|
|
break |
212
|
|
|
print(tok) |
213
|
|
|
|
214
|
1 |
|
def tokenize_data(self, data): |
215
|
|
|
""" |
216
|
|
|
Tokenize input data to stdout for testing purposes. |
217
|
|
|
|
218
|
|
|
:param data: Input text to parse. |
219
|
|
|
""" |
220
|
1 |
|
self.lexer.input(data) |
221
|
1 |
|
tokenized_data = [] |
222
|
1 |
|
while True: |
223
|
1 |
|
tok = self.lexer.token() |
224
|
1 |
|
if not tok: |
225
|
1 |
|
break |
226
|
1 |
|
tokenized_data.append(tok) |
227
|
1 |
|
return tokenized_data |
228
|
|
|
|
229
|
1 |
|
def tokenize_file(self, fspec): |
230
|
|
|
""" |
231
|
|
|
Tokenize input file to stdout for testing purposes. |
232
|
|
|
|
233
|
|
|
:param fspec: Input file to parse. |
234
|
|
|
""" |
235
|
|
|
with open(fspec, "r") as f: |
236
|
|
|
data = f.read() |
237
|
|
|
return self.tokenize(data) |
238
|
|
|
|
This can be caused by one of the following:
1. Missing Dependencies
This error could indicate a configuration issue of Pylint. Make sure that your libraries are available by adding the necessary commands.
2. Missing __init__.py files
This error could also result from missing
__init__.py
files in your module folders. Make sure that you place one file in each sub-folder.