1
|
|
|
package parser |
2
|
|
|
|
3
|
|
|
import ( |
4
|
|
|
"errors" |
5
|
|
|
"fmt" |
6
|
|
|
"strings" |
7
|
|
|
|
8
|
|
|
"github.com/Permify/permify/pkg/dsl/ast" |
9
|
|
|
"github.com/Permify/permify/pkg/dsl/lexer" |
10
|
|
|
"github.com/Permify/permify/pkg/dsl/token" |
11
|
|
|
"github.com/Permify/permify/pkg/dsl/utils" |
12
|
|
|
) |
13
|
|
|
|
14
|
|
|
const ( |
15
|
|
|
// iota is a special identifier that is automatically set to 0 in this case, and increments by 1 for each subsequent constant declaration. By assigning the value to the blank identifier _, it is effectively ignored. |
16
|
|
|
_ int = iota |
17
|
|
|
|
18
|
|
|
// LOWEST precedence level for lowest precedence |
19
|
|
|
LOWEST |
20
|
|
|
// AND_OR_NOT precedence level for logical operators (AND, OR) |
21
|
|
|
AND_OR_NOT |
22
|
|
|
) |
23
|
|
|
|
24
|
|
|
var precedences = map[token.Type]int{ // a map that assigns precedence levels to different token types |
25
|
|
|
token.AND: AND_OR_NOT, |
26
|
|
|
token.OR: AND_OR_NOT, |
27
|
|
|
token.NOT: AND_OR_NOT, |
28
|
|
|
} |
29
|
|
|
|
30
|
|
|
// Parser is a struct that contains information and functions related to parsing |
31
|
|
|
type Parser struct { |
32
|
|
|
// a pointer to a Lexer object that will provide tokens for parsing |
33
|
|
|
l *lexer.Lexer |
34
|
|
|
// the current token being processed |
35
|
|
|
currentToken token.Token |
36
|
|
|
// the token before currentToken |
37
|
|
|
previousToken token.Token |
38
|
|
|
// the next token after currentToken |
39
|
|
|
peekToken token.Token |
40
|
|
|
// a slice of error messages that are generated during parsing |
41
|
|
|
errors []string |
42
|
|
|
// a map that associates prefix parsing functions with token types |
43
|
|
|
prefixParseFns map[token.Type]prefixParseFn |
44
|
|
|
// a map that associates infix parsing functions with token types |
45
|
|
|
infixParseFunc map[token.Type]infixParseFn |
46
|
|
|
// references to entities, rules, relations, attributes, and permissions |
47
|
|
|
references *ast.References |
48
|
|
|
} |
49
|
|
|
|
50
|
|
|
type ( |
51
|
|
|
// a function that parses prefix expressions and returns an ast.Expression and error |
52
|
|
|
prefixParseFn func() (ast.Expression, error) |
53
|
|
|
|
54
|
|
|
// a function that parses infix expressions and returns an ast.Expression and error |
55
|
|
|
infixParseFn func(ast.Expression) (ast.Expression, error) |
56
|
|
|
) |
57
|
|
|
|
58
|
|
|
// NewParser creates a new Parser object with the given input string |
59
|
|
|
func NewParser(str string) (p *Parser) { |
60
|
|
|
// initialize a new Parser object with the given input string and default values for other fields |
61
|
|
|
p = &Parser{ |
62
|
|
|
l: lexer.NewLexer(str), // create a new Lexer object with the input string |
63
|
|
|
errors: []string{}, // initialize an empty slice of error messages |
64
|
|
|
references: ast.NewReferences(), // initialize an empty map for relational references |
65
|
|
|
} |
66
|
|
|
|
67
|
|
|
// register prefix parsing functions for token types IDENT and NOT |
68
|
|
|
p.prefixParseFns = make(map[token.Type]prefixParseFn) // initialize an empty map for prefix parsing functions |
69
|
|
|
p.registerPrefix(token.IDENT, p.parseIdentifierOrCall) // associate the parseIdentifier function with the IDENT token type |
70
|
|
|
|
71
|
|
|
// register infix parsing functions for token types AND, OR, NOT |
72
|
|
|
p.infixParseFunc = make(map[token.Type]infixParseFn) // initialize an empty map for infix parsing functions |
73
|
|
|
p.registerInfix(token.AND, p.parseInfixExpression) // associate the parseInfixExpression function with the AND token type |
74
|
|
|
p.registerInfix(token.OR, p.parseInfixExpression) // associate the parseInfixExpression function with the OR token type |
75
|
|
|
p.registerInfix(token.NOT, p.parseInfixExpression) // associate the parseInfixExpression function with the OR token type |
76
|
|
|
|
77
|
|
|
return p // return the newly created Parser object and no error |
78
|
|
|
} |
79
|
|
|
|
80
|
|
|
// next retrieves the next non-ignored token from the Parser's lexer and updates the Parser's currentToken and peekToken fields |
81
|
|
|
func (p *Parser) next() { |
82
|
|
|
for { |
83
|
|
|
// retrieve the next token from the lexer |
84
|
|
|
peek := p.l.NextToken() |
85
|
|
|
// if the token is not an ignored token (e.g. whitespace or comments), update the currentToken and peekToken fields and exit the loop |
86
|
|
|
if !token.IsIgnores(peek.Type) { |
87
|
|
|
// set the previousToken before changing currentToken |
88
|
|
|
p.previousToken = p.currentToken |
89
|
|
|
// set the currentToken field to the previous peekToken value |
90
|
|
|
p.currentToken = p.peekToken |
91
|
|
|
// set the peekToken field to the new peek value |
92
|
|
|
p.peekToken = peek |
93
|
|
|
// exit the loop |
94
|
|
|
break |
95
|
|
|
} |
96
|
|
|
} |
97
|
|
|
} |
98
|
|
|
|
99
|
|
|
// nextWithIgnores advances the parser's token stream by one position. |
100
|
|
|
// It updates the currentToken and peekToken of the Parser. |
101
|
|
|
func (p *Parser) nextWithIgnores() { |
102
|
|
|
// Get the next token in the lexers token stream and store it in the variable peek. |
103
|
|
|
peek := p.l.NextToken() |
104
|
|
|
|
105
|
|
|
// Update the currentToken with the value of peekToken. |
106
|
|
|
p.currentToken = p.peekToken |
107
|
|
|
|
108
|
|
|
// Update the peekToken with the value of peek (the new next token in the lexers stream). |
109
|
|
|
p.peekToken = peek |
110
|
|
|
} |
111
|
|
|
|
112
|
|
|
// currentTokenIs checks if the Parser's currentToken is any of the given token types |
113
|
|
|
func (p *Parser) currentTokenIs(tokens ...token.Type) bool { |
114
|
|
|
// iterate through the given token types and check if any of them match the currentToken's type |
115
|
|
|
for _, t := range tokens { |
116
|
|
|
if p.currentToken.Type == t { |
117
|
|
|
// if a match is found, return true |
118
|
|
|
return true |
119
|
|
|
} |
120
|
|
|
} |
121
|
|
|
// if no match is found, return false |
122
|
|
|
return false |
123
|
|
|
} |
124
|
|
|
|
125
|
|
|
// previousTokenIs checks if the Parser's previousToken type is any of the given types |
126
|
|
|
func (p *Parser) previousTokenIs(tokens ...token.Type) bool { |
127
|
|
|
for _, t := range tokens { |
128
|
|
|
if p.previousToken.Type == t { |
129
|
|
|
// if a match is found, return true |
130
|
|
|
return true |
131
|
|
|
} |
132
|
|
|
} |
133
|
|
|
// if no match is found, return false |
134
|
|
|
return false |
135
|
|
|
} |
136
|
|
|
|
137
|
|
|
// peekTokenIs checks if the Parser's peekToken is any of the given token types |
138
|
|
|
func (p *Parser) peekTokenIs(tokens ...token.Type) bool { |
139
|
|
|
// iterate through the given token types and check if any of them match the peekToken's type |
140
|
|
|
for _, t := range tokens { |
141
|
|
|
if p.peekToken.Type == t { |
142
|
|
|
// if a match is found, return true |
143
|
|
|
return true |
144
|
|
|
} |
145
|
|
|
} |
146
|
|
|
// if no match is found, return false |
147
|
|
|
return false |
148
|
|
|
} |
149
|
|
|
|
150
|
|
|
// Error returns an error if there are any errors in the Parser's errors slice |
151
|
|
|
func (p *Parser) Error() error { |
152
|
|
|
// if there are no errors, return nil |
153
|
|
|
if len(p.errors) == 0 { |
154
|
|
|
return nil |
155
|
|
|
} |
156
|
|
|
// if there are errors, return the first error message in the errors slice as an error type |
157
|
|
|
return errors.New(p.errors[0]) |
158
|
|
|
} |
159
|
|
|
|
160
|
|
|
// Parse reads and parses the input string and returns an AST representation of the schema, along with any errors encountered during parsing |
161
|
|
|
func (p *Parser) Parse() (*ast.Schema, error) { |
162
|
|
|
// create a new Schema object to store the parsed statements |
163
|
|
|
schema := ast.NewSchema() |
164
|
|
|
schema.Statements = []ast.Statement{} |
165
|
|
|
|
166
|
|
|
// loop through the input string until the end is reached |
167
|
|
|
for !p.currentTokenIs(token.EOF) { |
168
|
|
|
// parse the next statement in the input string |
169
|
|
|
stmt, err := p.parseStatement() |
170
|
|
|
if err != nil { |
171
|
|
|
// if there was an error parsing the statement, return the error message |
172
|
|
|
return nil, p.Error() |
173
|
|
|
} |
174
|
|
|
if stmt != nil { |
175
|
|
|
// add the parsed statement to the schema's Statements field if it is not nil |
176
|
|
|
schema.Statements = append(schema.Statements, stmt) |
177
|
|
|
} |
178
|
|
|
|
179
|
|
|
// move to the next token in the input string |
180
|
|
|
p.next() |
181
|
|
|
} |
182
|
|
|
|
183
|
|
|
schema.SetReferences(p.references) |
184
|
|
|
|
185
|
|
|
// return the parsed schema object and nil to indicate that there were no errors |
186
|
|
|
return schema, nil |
187
|
|
|
} |
188
|
|
|
|
189
|
|
|
func (p *Parser) ParsePartial(entityName string) (ast.Statement, error) { |
190
|
|
|
for !p.currentTokenIs(token.EOF) { |
191
|
|
|
// parse the next statement in the input string |
192
|
|
|
stmt, err := p.parsePartialStatement(entityName) |
193
|
|
|
if err != nil { |
194
|
|
|
return nil, p.Error() |
195
|
|
|
} |
196
|
|
|
if stmt != nil { |
197
|
|
|
return stmt, nil |
198
|
|
|
} |
199
|
|
|
p.next() |
200
|
|
|
} |
201
|
|
|
return nil, errors.New("no valid statement found") |
202
|
|
|
} |
203
|
|
|
|
204
|
|
|
func (p *Parser) parsePartialStatement(entityName string) (ast.Statement, error) { |
205
|
|
|
switch p.currentToken.Type { |
206
|
|
|
case token.ATTRIBUTE: |
207
|
|
|
return p.parseAttributeStatement(entityName) |
208
|
|
|
case token.RELATION: |
209
|
|
|
return p.parseRelationStatement(entityName) |
210
|
|
|
case token.PERMISSION: |
211
|
|
|
return p.parsePermissionStatement(entityName) |
212
|
|
|
default: |
213
|
|
|
return nil, nil |
214
|
|
|
} |
215
|
|
|
} |
216
|
|
|
|
217
|
|
|
// parseStatement method parses the current statement based on its defined token types |
218
|
|
|
func (p *Parser) parseStatement() (ast.Statement, error) { |
219
|
|
|
// switch on the currentToken's type to determine which type of statement to parse |
220
|
|
|
switch p.currentToken.Type { |
221
|
|
|
case token.ENTITY: |
222
|
|
|
// if the currentToken is ENTITY, parse an EntityStatement |
223
|
|
|
return p.parseEntityStatement() |
224
|
|
|
case token.RULE: |
225
|
|
|
// if the currentToken is RULE, parse a RuleStatement |
226
|
|
|
return p.parseRuleStatement() |
227
|
|
|
default: |
228
|
|
|
return nil, nil |
229
|
|
|
} |
230
|
|
|
} |
231
|
|
|
|
232
|
|
|
// parseEntityStatement method parses an ENTITY statement and returns an EntityStatement AST node |
233
|
|
|
func (p *Parser) parseEntityStatement() (*ast.EntityStatement, error) { |
234
|
|
|
// create a new EntityStatement object and set its Entity field to the currentToken |
235
|
|
|
stmt := &ast.EntityStatement{Entity: p.currentToken} |
236
|
|
|
// expect the next token to be an identifier token, and set the EntityStatement's Name field to the identifier's value |
237
|
|
|
if !p.expectAndNext(token.IDENT) { |
238
|
|
|
return nil, p.Error() |
239
|
|
|
} |
240
|
|
|
stmt.Name = p.currentToken |
241
|
|
|
|
242
|
|
|
// add the entity reference to the Parser's entityReferences map |
243
|
|
|
err := p.references.AddEntityReference(stmt.Name.Literal) |
244
|
|
|
if err != nil { |
245
|
|
|
p.duplicationError(stmt.Name.Literal) // Generate an error message indicating a duplication error |
246
|
|
|
return nil, p.Error() |
247
|
|
|
} |
248
|
|
|
|
249
|
|
|
// expect the next token to be a left brace token, indicating the start of the entity's body |
250
|
|
|
if !p.expectAndNext(token.LCB) { |
251
|
|
|
return nil, p.Error() |
252
|
|
|
} |
253
|
|
|
|
254
|
|
|
// loop through the entity's body until a right brace token is encountered |
255
|
|
|
for !p.currentTokenIs(token.RCB) { |
256
|
|
|
// if the currentToken is EOF, raise an error and return nil for both the statement and error values |
257
|
|
|
if p.currentTokenIs(token.EOF) { |
258
|
|
|
p.currentError(token.RCB) |
259
|
|
|
return nil, p.Error() |
260
|
|
|
} |
261
|
|
|
// based on the currentToken's type, parse a RelationStatement or PermissionStatement and add it to the EntityStatement's corresponding field |
262
|
|
|
switch p.currentToken.Type { |
263
|
|
|
case token.RELATION: |
264
|
|
|
relation, err := p.parseRelationStatement(stmt.Name.Literal) |
265
|
|
|
if err != nil { |
266
|
|
|
return nil, p.Error() |
267
|
|
|
} |
268
|
|
|
stmt.RelationStatements = append(stmt.RelationStatements, relation) |
269
|
|
|
case token.ATTRIBUTE: |
270
|
|
|
attribute, err := p.parseAttributeStatement(stmt.Name.Literal) |
271
|
|
|
if err != nil { |
272
|
|
|
return nil, p.Error() |
273
|
|
|
} |
274
|
|
|
stmt.AttributeStatements = append(stmt.AttributeStatements, attribute) |
275
|
|
|
case token.PERMISSION: |
276
|
|
|
action, err := p.parsePermissionStatement(stmt.Name.Literal) |
277
|
|
|
if err != nil { |
278
|
|
|
return nil, p.Error() |
279
|
|
|
} |
280
|
|
|
stmt.PermissionStatements = append(stmt.PermissionStatements, action) |
281
|
|
|
default: |
282
|
|
|
// if the currentToken is not recognized, check if it is a newline, left brace, or right brace token, and skip it if it is |
283
|
|
|
if !p.currentTokenIs(token.NEWLINE) && !p.currentTokenIs(token.LCB) && !p.currentTokenIs(token.RCB) { |
284
|
|
|
// if the currentToken is not recognized and not a newline, left brace, or right brace token, raise an error and return nil for both the statement and error values |
285
|
|
|
p.currentError(token.RELATION, token.PERMISSION, token.ATTRIBUTE) |
286
|
|
|
return nil, p.Error() |
287
|
|
|
} |
288
|
|
|
} |
289
|
|
|
// move to the next token in the input string |
290
|
|
|
p.next() |
291
|
|
|
} |
292
|
|
|
|
293
|
|
|
// return the parsed EntityStatement and nil for the error value |
294
|
|
|
return stmt, nil |
295
|
|
|
} |
296
|
|
|
|
297
|
|
|
// parseRuleStatement is responsible for parsing a rule statement in the form: |
298
|
|
|
// |
299
|
|
|
// rule name(typ1 string, typ2 boolean) { |
300
|
|
|
// EXPRESSION |
301
|
|
|
// } |
302
|
|
|
// |
303
|
|
|
// This method assumes the current token points to the 'rule' token when it is called. |
304
|
|
|
func (p *Parser) parseRuleStatement() (*ast.RuleStatement, error) { |
305
|
|
|
// Create a new RuleStatement |
306
|
|
|
stmt := &ast.RuleStatement{Rule: p.currentToken} |
307
|
|
|
|
308
|
|
|
// Expect the next token to be an identifier (the name of the rule). |
309
|
|
|
// If it's not an identifier, return an error. |
310
|
|
|
if !p.expectAndNext(token.IDENT) { |
311
|
|
|
return nil, p.Error() |
312
|
|
|
} |
313
|
|
|
stmt.Name = p.currentToken |
314
|
|
|
|
315
|
|
|
// Expect the next token to be a left parenthesis '(' starting the argument list. |
316
|
|
|
if !p.expectAndNext(token.LP) { |
317
|
|
|
return nil, p.Error() |
318
|
|
|
} |
319
|
|
|
|
320
|
|
|
arguments := map[token.Token]ast.AttributeTypeStatement{} |
321
|
|
|
args := map[string]string{} |
322
|
|
|
|
323
|
|
|
// Loop over the tokens until a right parenthesis ')' is encountered. |
324
|
|
|
// In each iteration, two tokens are processed: an identifier (arg name) and its type. |
325
|
|
|
for !p.peekTokenIs(token.RP) { |
326
|
|
|
// Expect the first token to be the parameter's identifier. |
327
|
|
|
if !p.expectAndNext(token.IDENT) { |
328
|
|
|
return nil, p.Error() |
329
|
|
|
} |
330
|
|
|
argument := p.currentToken |
331
|
|
|
arg := p.currentToken.Literal |
332
|
|
|
|
333
|
|
|
// Expect the second token to be the parameter's type. |
334
|
|
|
if !p.expectAndNext(token.IDENT) { |
335
|
|
|
return nil, p.Error() |
336
|
|
|
} |
337
|
|
|
|
338
|
|
|
if p.peekTokenIs(token.LSB) { // Check if the next token is '[' |
339
|
|
|
arguments[argument] = ast.AttributeTypeStatement{ |
340
|
|
|
Type: p.currentToken, |
341
|
|
|
IsArray: true, // Marking the type as an array |
342
|
|
|
} |
343
|
|
|
args[arg] = p.currentToken.Literal + "[]" // Store the argument type as string with "[]" suffix |
344
|
|
|
p.next() // Move to the '[' token |
345
|
|
|
if !p.expectAndNext(token.RSB) { // Expect and move to the ']' token |
346
|
|
|
return nil, p.Error() |
347
|
|
|
} |
348
|
|
|
} else { |
349
|
|
|
arguments[argument] = ast.AttributeTypeStatement{ |
350
|
|
|
Type: p.currentToken, |
351
|
|
|
IsArray: false, // Marking the type as not an array |
352
|
|
|
} |
353
|
|
|
args[arg] = p.currentToken.Literal // Store the regular argument type |
354
|
|
|
} |
355
|
|
|
|
356
|
|
|
// If the next token is a comma, there are more parameters to parse. |
357
|
|
|
// Continue to the next iteration. |
358
|
|
|
if p.peekTokenIs(token.COMMA) { |
359
|
|
|
p.next() |
360
|
|
|
continue |
361
|
|
|
} else if !p.peekTokenIs(token.RP) { |
362
|
|
|
// If the next token is not a comma, it must be a closing parenthesis. |
363
|
|
|
// If it's not, return an error. |
364
|
|
|
p.peekError(token.RP) |
365
|
|
|
return nil, p.Error() |
366
|
|
|
} |
367
|
|
|
} |
368
|
|
|
|
369
|
|
|
// Save parsed arguments to the statement |
370
|
|
|
stmt.Arguments = arguments |
371
|
|
|
|
372
|
|
|
// Consume the right parenthesis. |
373
|
|
|
p.next() |
374
|
|
|
|
375
|
|
|
// Expect the next token to be a left curly bracket '{' starting the body. |
376
|
|
|
if !p.expectAndNext(token.LCB) { |
377
|
|
|
return nil, p.Error() |
378
|
|
|
} |
379
|
|
|
|
380
|
|
|
p.next() |
381
|
|
|
|
382
|
|
|
// Collect tokens for the body until a closing curly bracket '}' is encountered. |
383
|
|
|
var bodyTokens []token.Token |
384
|
|
|
for !p.peekTokenIs(token.RCB) { |
385
|
|
|
// If there's no closing bracket, return an error. |
386
|
|
|
if p.peekTokenIs(token.EOF) { |
387
|
|
|
p.peekError(token.RCB) |
388
|
|
|
return nil, p.Error() |
389
|
|
|
} |
390
|
|
|
|
391
|
|
|
bodyTokens = append(bodyTokens, p.currentToken) |
392
|
|
|
p.nextWithIgnores() |
393
|
|
|
} |
394
|
|
|
|
395
|
|
|
// Combine all the body tokens into a single string |
396
|
|
|
var bodyStr strings.Builder |
397
|
|
|
for _, t := range bodyTokens { |
398
|
|
|
bodyStr.WriteString(t.Literal) |
399
|
|
|
} |
400
|
|
|
stmt.Expression = bodyStr.String() |
401
|
|
|
|
402
|
|
|
// Expect and consume the closing curly bracket '}'. |
403
|
|
|
if !p.expectAndNext(token.RCB) { |
404
|
|
|
return nil, p.Error() |
405
|
|
|
} |
406
|
|
|
|
407
|
|
|
// Register the parsed rule in the parser's references. |
408
|
|
|
err := p.references.AddRuleReference(stmt.Name.Literal, args) |
409
|
|
|
if err != nil { |
410
|
|
|
// If there's an error (e.g., a duplicate rule), return an error. |
411
|
|
|
p.duplicationError(stmt.Name.Literal) |
412
|
|
|
return nil, p.Error() |
413
|
|
|
} |
414
|
|
|
|
415
|
|
|
// Return the successfully parsed RuleStatement. |
416
|
|
|
return stmt, nil |
417
|
|
|
} |
418
|
|
|
|
419
|
|
|
// parseRelationStatement method parses a RELATION statement and returns a RelationStatement AST node |
420
|
|
|
func (p *Parser) parseAttributeStatement(entityName string) (*ast.AttributeStatement, error) { |
421
|
|
|
// create a new RelationStatement object and set its Relation field to the currentToken |
422
|
|
|
stmt := &ast.AttributeStatement{Attribute: p.currentToken} |
423
|
|
|
|
424
|
|
|
// expect the next token to be an identifier token, and set the RelationStatement's Name field to the identifier's value |
425
|
|
|
if !p.expectAndNext(token.IDENT) { |
426
|
|
|
return nil, p.Error() |
427
|
|
|
} |
428
|
|
|
stmt.Name = p.currentToken |
429
|
|
|
|
430
|
|
|
if !p.expectAndNext(token.IDENT) { |
431
|
|
|
return nil, p.Error() |
432
|
|
|
} |
433
|
|
|
|
434
|
|
|
atstmt := ast.AttributeTypeStatement{Type: p.currentToken} |
435
|
|
|
atstmt.IsArray = false |
436
|
|
|
|
437
|
|
|
if p.peekTokenIs(token.LSB) { |
438
|
|
|
p.next() |
439
|
|
|
if !p.expectAndNext(token.RSB) { |
440
|
|
|
return nil, p.Error() |
441
|
|
|
} |
442
|
|
|
atstmt.IsArray = true |
443
|
|
|
} |
444
|
|
|
|
445
|
|
|
stmt.AttributeType = atstmt |
446
|
|
|
|
447
|
|
|
key := utils.Key(entityName, stmt.Name.Literal) |
448
|
|
|
// add the relation reference to the Parser's relationReferences and relationalReferences maps |
449
|
|
|
err := p.references.AddAttributeReferences(key, atstmt) |
450
|
|
|
if err != nil { |
451
|
|
|
p.duplicationError(key) // Generate an error message indicating a duplication error |
452
|
|
|
return nil, p.Error() |
453
|
|
|
} |
454
|
|
|
|
455
|
|
|
// return the parsed RelationStatement and nil for the error value |
456
|
|
|
return stmt, nil |
457
|
|
|
} |
458
|
|
|
|
459
|
|
|
// parseRelationStatement method parses a RELATION statement and returns a RelationStatement AST node |
460
|
|
|
func (p *Parser) parseRelationStatement(entityName string) (*ast.RelationStatement, error) { |
461
|
|
|
// create a new RelationStatement object and set its Relation field to the currentToken |
462
|
|
|
stmt := &ast.RelationStatement{Relation: p.currentToken} |
463
|
|
|
|
464
|
|
|
// expect the next token to be an identifier token, and set the RelationStatement's Name field to the identifier's value |
465
|
|
|
if !p.expectAndNext(token.IDENT) { |
466
|
|
|
return nil, p.Error() |
467
|
|
|
} |
468
|
|
|
stmt.Name = p.currentToken |
469
|
|
|
relationName := stmt.Name.Literal |
470
|
|
|
|
471
|
|
|
// expect the next token to be a SIGN token, indicating the start of the relation type(s) |
472
|
|
|
if !p.expect(token.SIGN) { |
473
|
|
|
return nil, p.Error() |
474
|
|
|
} |
475
|
|
|
|
476
|
|
|
// loop through the relation types until no more SIGN tokens are encountered |
477
|
|
|
for p.peekTokenIs(token.SIGN) { |
478
|
|
|
// parse a RelationTypeStatement and append it to the RelationStatement's RelationTypes field |
479
|
|
|
relationStatement, err := p.parseRelationTypeStatement() |
480
|
|
|
if err != nil { |
481
|
|
|
return nil, p.Error() |
482
|
|
|
} |
483
|
|
|
stmt.RelationTypes = append(stmt.RelationTypes, *relationStatement) |
484
|
|
|
} |
485
|
|
|
|
486
|
|
|
key := utils.Key(entityName, relationName) |
487
|
|
|
|
488
|
|
|
// add the relation reference to the Parser's relationReferences and relationalReferences maps |
489
|
|
|
err := p.references.AddRelationReferences(key, stmt.RelationTypes) |
490
|
|
|
if err != nil { |
491
|
|
|
p.duplicationError(key) // Generate an error message indicating a duplication error |
492
|
|
|
return nil, p.Error() |
493
|
|
|
} |
494
|
|
|
|
495
|
|
|
// return the parsed RelationStatement and nil for the error value |
496
|
|
|
return stmt, nil |
497
|
|
|
} |
498
|
|
|
|
499
|
|
|
// parseRelationTypeStatement method parses a single relation type within a RELATION statement and returns a RelationTypeStatement AST node |
500
|
|
|
func (p *Parser) parseRelationTypeStatement() (*ast.RelationTypeStatement, error) { |
501
|
|
|
// expect the currentToken to be a SIGN token, indicating the start of the relation type |
502
|
|
|
if !p.expectAndNext(token.SIGN) { |
503
|
|
|
return nil, p.Error() |
504
|
|
|
} |
505
|
|
|
// create a new RelationTypeStatement object and set its Sign field to the SIGN token |
506
|
|
|
stmt := &ast.RelationTypeStatement{Sign: p.currentToken} |
507
|
|
|
|
508
|
|
|
// expect the next token to be an identifier token, and set the RelationTypeStatement's Type field to the identifier's value |
509
|
|
|
if !p.expectAndNext(token.IDENT) { |
510
|
|
|
return nil, p.Error() |
511
|
|
|
} |
512
|
|
|
stmt.Type = p.currentToken |
513
|
|
|
|
514
|
|
|
// if the next token is a HASH token, indicating that a specific relation within the relation type is being referenced, parse it and set the RelationTypeStatement's Relation field to the identifier's value |
515
|
|
|
if p.peekTokenIs(token.HASH) { |
516
|
|
|
p.next() |
517
|
|
|
if !p.expectAndNext(token.IDENT) { |
518
|
|
|
return nil, p.Error() |
519
|
|
|
} |
520
|
|
|
stmt.Relation = p.currentToken |
521
|
|
|
} |
522
|
|
|
|
523
|
|
|
// return the parsed RelationTypeStatement and nil for the error value |
524
|
|
|
return stmt, nil |
525
|
|
|
} |
526
|
|
|
|
527
|
|
|
// parsePermissionStatement method parses an PERMISSION statement and returns an PermissionStatement AST node |
528
|
|
|
func (p *Parser) parsePermissionStatement(entityName string) (ast.Statement, error) { |
529
|
|
|
// create a new PermissionStatement object and set its Permission field to the currentToken |
530
|
|
|
stmt := &ast.PermissionStatement{Permission: p.currentToken} |
531
|
|
|
|
532
|
|
|
// expect the next token to be an identifier token, and set the PermissionStatement's Name field to the identifier's value |
533
|
|
|
if !p.expectAndNext(token.IDENT) { |
534
|
|
|
return nil, p.Error() |
535
|
|
|
} |
536
|
|
|
stmt.Name = p.currentToken |
537
|
|
|
|
538
|
|
|
key := utils.Key(entityName, stmt.Name.Literal) |
539
|
|
|
// add the action reference to the Parser's actionReferences and relationalReferences maps |
540
|
|
|
err := p.references.AddPermissionReference(key) |
541
|
|
|
if err != nil { |
542
|
|
|
p.duplicationError(key) // Generate an error message indicating a duplication error |
543
|
|
|
return nil, p.Error() |
544
|
|
|
} |
545
|
|
|
|
546
|
|
|
// expect the next token to be an ASSIGN token, indicating the start of the expression to be assigned to the action |
547
|
|
|
if !p.expectAndNext(token.ASSIGN) { |
548
|
|
|
return nil, p.Error() |
549
|
|
|
} |
550
|
|
|
|
551
|
|
|
p.next() |
552
|
|
|
|
553
|
|
|
// parse the expression statement and set it as the PermissionStatement's ExpressionStatement field |
554
|
|
|
ex, err := p.parseExpressionStatement() |
555
|
|
|
if err != nil { |
556
|
|
|
return nil, p.Error() |
557
|
|
|
} |
558
|
|
|
stmt.ExpressionStatement = ex |
559
|
|
|
|
560
|
|
|
// return the parsed PermissionStatement and nil for the error value |
561
|
|
|
return stmt, nil |
562
|
|
|
} |
563
|
|
|
|
564
|
|
|
// parseExpressionStatement method parses an expression statement and returns an ExpressionStatement AST node |
565
|
|
|
func (p *Parser) parseExpressionStatement() (*ast.ExpressionStatement, error) { |
566
|
|
|
// create a new ExpressionStatement object |
567
|
|
|
stmt := &ast.ExpressionStatement{} |
568
|
|
|
var err error |
569
|
|
|
// parse the expression using the lowest precedence value as the initial precedence level |
570
|
|
|
stmt.Expression, err = p.parseExpression(LOWEST) |
571
|
|
|
if err != nil { |
572
|
|
|
return nil, p.Error() |
573
|
|
|
} |
574
|
|
|
|
575
|
|
|
// return the parsed ExpressionStatement and nil for the error value |
576
|
|
|
return stmt, nil |
577
|
|
|
} |
578
|
|
|
|
579
|
|
|
// expectAndNext method checks if the next token is of the expected type and advances the lexer to the next token if it is. It returns true if the next token is of the expected type, and false otherwise. |
580
|
|
|
func (p *Parser) expectAndNext(t token.Type) bool { |
581
|
|
|
// if the next token is of the expected type, advance the lexer to the next token and return true |
582
|
|
|
if p.peekTokenIs(t) { |
583
|
|
|
p.next() |
584
|
|
|
return true |
585
|
|
|
} |
586
|
|
|
// otherwise, generate an error message indicating that the expected token type was not found and return false |
587
|
|
|
p.peekError(t) |
588
|
|
|
return false |
589
|
|
|
} |
590
|
|
|
|
591
|
|
|
// expect method checks if the next token is of the expected type, without advancing the lexer. It returns true if the next token is of the expected type, and false otherwise. |
592
|
|
|
func (p *Parser) expect(t token.Type) bool { |
593
|
|
|
// if the next token is of the expected type, return true |
594
|
|
|
if p.peekTokenIs(t) { |
595
|
|
|
return true |
596
|
|
|
} |
597
|
|
|
// otherwise, generate an error message indicating that the expected token type was not found and return false |
598
|
|
|
p.peekError(t) |
599
|
|
|
return false |
600
|
|
|
} |
601
|
|
|
|
602
|
|
|
// parseExpression method parses an expression with a given precedence level and returns the parsed expression as an AST node. It takes an integer value indicating the precedence level. |
603
|
|
|
func (p *Parser) parseExpression(precedence int) (ast.Expression, error) { |
604
|
|
|
var exp ast.Expression |
605
|
|
|
var err error |
606
|
|
|
|
607
|
|
|
if p.currentTokenIs(token.NEWLINE) && p.previousTokenIs(token.LP, token.AND, token.OR, token.NOT, token.ASSIGN) { |
608
|
|
|
// advance to the next token |
609
|
|
|
p.next() |
610
|
|
|
} |
611
|
|
|
|
612
|
|
|
if p.currentTokenIs(token.LP) { |
613
|
|
|
p.next() // Consume the left parenthesis. |
614
|
|
|
exp, err = p.parseExpression(LOWEST) |
615
|
|
|
if err != nil { |
616
|
|
|
return nil, err |
617
|
|
|
} |
618
|
|
|
|
619
|
|
|
if !p.expect(token.RP) { |
620
|
|
|
return nil, p.Error() |
621
|
|
|
} |
622
|
|
|
p.next() // Consume the right parenthesis. |
623
|
|
|
} else { |
624
|
|
|
// get the prefix parsing function for the current token type |
625
|
|
|
prefix := p.prefixParseFns[p.currentToken.Type] |
626
|
|
|
if prefix == nil { |
627
|
|
|
p.noPrefixParseFnError(p.currentToken.Type) |
628
|
|
|
return nil, p.Error() |
629
|
|
|
} |
630
|
|
|
|
631
|
|
|
// parse the prefix expression |
632
|
|
|
exp, err = prefix() |
633
|
|
|
if err != nil { |
634
|
|
|
return nil, p.Error() |
635
|
|
|
} |
636
|
|
|
} |
637
|
|
|
|
638
|
|
|
// continue parsing the expression while the next token has a higher precedence level than the current precedence level |
639
|
|
|
for !p.peekTokenIs(token.NEWLINE) && precedence < p.peekPrecedence() { |
640
|
|
|
// get the infix parsing function for the next token type |
641
|
|
|
infix := p.infixParseFunc[p.peekToken.Type] |
642
|
|
|
if infix == nil { |
643
|
|
|
return exp, nil |
644
|
|
|
} |
645
|
|
|
p.next() |
646
|
|
|
// parse the infix expression with the current expression as its left-hand side |
647
|
|
|
exp, err = infix(exp) |
648
|
|
|
if err != nil { |
649
|
|
|
return nil, p.Error() |
650
|
|
|
} |
651
|
|
|
} |
652
|
|
|
|
653
|
|
|
// return the parsed expression and nil for the error value |
654
|
|
|
return exp, nil |
655
|
|
|
} |
656
|
|
|
|
657
|
|
|
// parseInfixExpression parses an infix expression that has a left operand and an operator followed by |
658
|
|
|
// a right operand, such as "a or b" or "x and y". |
659
|
|
|
// It takes the left operand as an argument, constructs an InfixExpression with the current operator |
660
|
|
|
// and left operand, and parses the right operand with a higher precedence to construct the final |
661
|
|
|
// expression tree. |
662
|
|
|
// It returns the resulting InfixExpression and any error encountered. |
663
|
|
|
func (p *Parser) parseInfixExpression(left ast.Expression) (ast.Expression, error) { |
664
|
|
|
// Ensure the current token is a valid infix operator before proceeding. |
665
|
|
|
if !p.isInfixOperator(p.currentToken.Type) { |
666
|
|
|
p.currentError(token.AND, token.OR, token.NOT) // Replace with your actual valid infix token types |
667
|
|
|
return nil, p.Error() |
668
|
|
|
} |
669
|
|
|
|
670
|
|
|
// Create a new InfixExpression with the left operand and the current operator. |
671
|
|
|
expression := &ast.InfixExpression{ |
672
|
|
|
Op: p.currentToken, |
673
|
|
|
Left: left, |
674
|
|
|
Operator: ast.Operator(p.currentToken.Literal), |
675
|
|
|
} |
676
|
|
|
|
677
|
|
|
// Get the precedence of the current operator and consume the operator token. |
678
|
|
|
precedence := p.currentPrecedence() |
679
|
|
|
p.next() |
680
|
|
|
|
681
|
|
|
// Parse the right operand with a higher precedence to construct the final expression tree. |
682
|
|
|
right, err := p.parseExpression(precedence) |
683
|
|
|
if err != nil { |
684
|
|
|
return nil, err |
685
|
|
|
} |
686
|
|
|
|
687
|
|
|
// Ensure the right operand is not nil. |
688
|
|
|
if right == nil { |
689
|
|
|
p.currentError(token.IDENT, token.LP) // Replace with your actual valid right operand token types |
690
|
|
|
return nil, p.Error() |
691
|
|
|
} |
692
|
|
|
|
693
|
|
|
// Set the right operand of the InfixExpression and return it. |
694
|
|
|
expression.Right = right |
695
|
|
|
return expression, nil |
696
|
|
|
} |
697
|
|
|
|
698
|
|
|
// parseIntegerLiteral parses an integer literal and returns the resulting IntegerLiteral expression. |
699
|
|
|
func (p *Parser) isInfixOperator(tokenType token.Type) bool { |
700
|
|
|
return tokenType == token.AND || tokenType == token.OR || tokenType == token.NOT |
701
|
|
|
} |
702
|
|
|
|
703
|
|
|
// peekPrecedence returns the precedence of the next token in the input, if it is a known |
704
|
|
|
// operator, or the lowest precedence otherwise. |
705
|
|
|
func (p *Parser) peekPrecedence() int { |
706
|
|
|
if pr, ok := precedences[p.peekToken.Type]; ok { |
707
|
|
|
return pr |
708
|
|
|
} |
709
|
|
|
return LOWEST |
710
|
|
|
} |
711
|
|
|
|
712
|
|
|
// currentPrecedence returns the precedence of the current token in the input, if it is a known |
713
|
|
|
// operator, or the lowest precedence otherwise. |
714
|
|
|
func (p *Parser) currentPrecedence() int { |
715
|
|
|
if pr, ok := precedences[p.currentToken.Type]; ok { |
716
|
|
|
return pr |
717
|
|
|
} |
718
|
|
|
return LOWEST |
719
|
|
|
} |
720
|
|
|
|
721
|
|
|
func (p *Parser) parseIdentifierOrCall() (ast.Expression, error) { |
722
|
|
|
// Ensure the current token is a valid identifier before proceeding. |
723
|
|
|
if !p.currentTokenIs(token.IDENT) { |
724
|
|
|
return nil, fmt.Errorf("unexpected token type for identifier expression: %s", p.currentToken.Type) |
725
|
|
|
} |
726
|
|
|
|
727
|
|
|
if p.peekTokenIs(token.LP) { |
728
|
|
|
return p.parseCallExpression() |
729
|
|
|
} |
730
|
|
|
|
731
|
|
|
return p.parseIdentifierExpression() |
732
|
|
|
} |
733
|
|
|
|
734
|
|
|
// parseIdentifier parses an identifier expression that may consist of one or more dot-separated |
735
|
|
|
// identifiers, such as "x", "foo.bar", or "a.b.c.d". |
736
|
|
|
// It constructs a new Identifier expression with the first token as the prefix and subsequent |
737
|
|
|
// tokens as identifiers, and returns the resulting expression and any error encountered. |
738
|
|
|
func (p *Parser) parseIdentifierExpression() (ast.Expression, error) { |
739
|
|
|
// Ensure the current token is a valid identifier before proceeding. |
740
|
|
|
if !p.currentTokenIs(token.IDENT) { |
741
|
|
|
p.currentError(token.IDENT) |
742
|
|
|
return nil, p.Error() |
743
|
|
|
} |
744
|
|
|
|
745
|
|
|
// Create a new Identifier expression with the first token as the prefix. |
746
|
|
|
ident := &ast.Identifier{Idents: []token.Token{p.currentToken}} |
747
|
|
|
|
748
|
|
|
// If the next token is a dot, consume it and continue parsing the next identifier. |
749
|
|
|
for p.peekTokenIs(token.DOT) { |
750
|
|
|
p.next() // Consume the dot token |
751
|
|
|
|
752
|
|
|
// Check if the next token after the dot is a valid identifier |
753
|
|
|
if !p.expectAndNext(token.IDENT) { |
754
|
|
|
return nil, p.Error() |
755
|
|
|
} |
756
|
|
|
|
757
|
|
|
ident.Idents = append(ident.Idents, p.currentToken) |
758
|
|
|
} |
759
|
|
|
|
760
|
|
|
// Return the resulting Identifier expression. |
761
|
|
|
return ident, nil |
762
|
|
|
} |
763
|
|
|
|
764
|
|
|
// call_func(variable1, variable2) |
765
|
|
|
func (p *Parser) parseCallExpression() (ast.Expression, error) { |
766
|
|
|
// Ensure the current token is a valid identifier before proceeding. |
767
|
|
|
if !p.currentTokenIs(token.IDENT) { |
768
|
|
|
p.currentError(token.IDENT) |
769
|
|
|
return nil, p.Error() |
770
|
|
|
} |
771
|
|
|
|
772
|
|
|
// Create a new Identifier expression with the first token as the prefix. |
773
|
|
|
call := &ast.Call{Name: p.currentToken} |
774
|
|
|
|
775
|
|
|
if !p.expectAndNext(token.LP) { |
776
|
|
|
return nil, p.Error() |
777
|
|
|
} |
778
|
|
|
|
779
|
|
|
// Check if there are no arguments |
780
|
|
|
if p.peekTokenIs(token.RP) { |
781
|
|
|
p.next() // consume the RP token |
782
|
|
|
return call, nil |
783
|
|
|
} |
784
|
|
|
|
785
|
|
|
p.next() |
786
|
|
|
|
787
|
|
|
// Parse the first argument |
788
|
|
|
ident, err := p.parseIdentifierExpression() |
789
|
|
|
if err != nil { |
790
|
|
|
return nil, err |
791
|
|
|
} |
792
|
|
|
|
793
|
|
|
i, ok := ident.(*ast.Identifier) |
794
|
|
|
if !ok { |
795
|
|
|
return nil, fmt.Errorf("expected identifier, got %T", ident) |
796
|
|
|
} |
797
|
|
|
call.Arguments = append(call.Arguments, *i) |
798
|
|
|
|
799
|
|
|
// Parse remaining arguments |
800
|
|
|
for p.peekTokenIs(token.COMMA) { |
801
|
|
|
p.next() |
802
|
|
|
|
803
|
|
|
if !p.expectAndNext(token.IDENT) { |
804
|
|
|
return nil, p.Error() |
805
|
|
|
} |
806
|
|
|
|
807
|
|
|
ident, err = p.parseIdentifierExpression() |
808
|
|
|
if err != nil { |
809
|
|
|
return nil, err |
810
|
|
|
} |
811
|
|
|
|
812
|
|
|
i, ok = ident.(*ast.Identifier) |
813
|
|
|
if !ok { |
814
|
|
|
return nil, fmt.Errorf("expected identifier, got %T", ident) |
815
|
|
|
} |
816
|
|
|
call.Arguments = append(call.Arguments, *i) |
817
|
|
|
} |
818
|
|
|
|
819
|
|
|
if !p.expectAndNext(token.RP) { |
820
|
|
|
return nil, p.Error() |
821
|
|
|
} |
822
|
|
|
|
823
|
|
|
// Return the resulting Identifier expression. |
824
|
|
|
return call, nil |
825
|
|
|
} |
826
|
|
|
|
827
|
|
|
// registerPrefix safely registers a parsing function for a prefix token type in the parser's prefixParseFns map. |
828
|
|
|
// It takes a token type and a prefix parsing function as arguments, and stores the function in the map |
829
|
|
|
// under the given token type key. |
830
|
|
|
func (p *Parser) registerPrefix(tokenType token.Type, fn prefixParseFn) { |
831
|
|
|
if fn == nil { |
832
|
|
|
p.duplicationError(fmt.Sprintf("registerPrefix: nil function for token type %s", tokenType)) |
833
|
|
|
return |
834
|
|
|
} |
835
|
|
|
|
836
|
|
|
if _, exists := p.prefixParseFns[tokenType]; exists { |
837
|
|
|
p.duplicationError(fmt.Sprintf("registerPrefix: token type %s already registered", tokenType)) |
838
|
|
|
return |
839
|
|
|
} |
840
|
|
|
|
841
|
|
|
p.prefixParseFns[tokenType] = fn |
842
|
|
|
} |
843
|
|
|
|
844
|
|
|
// registerInfix safely registers a parsing function for an infix token type in the parser's infixParseFunc map. |
845
|
|
|
// It takes a token type and an infix parsing function as arguments, and stores the function in the map |
846
|
|
|
// under the given token type key. |
847
|
|
|
func (p *Parser) registerInfix(tokenType token.Type, fn infixParseFn) { |
848
|
|
|
if fn == nil { |
849
|
|
|
p.duplicationError(fmt.Sprintf("registerInfix: nil function for token type %s", tokenType)) |
850
|
|
|
return |
851
|
|
|
} |
852
|
|
|
|
853
|
|
|
if _, exists := p.infixParseFunc[tokenType]; exists { |
854
|
|
|
p.duplicationError(fmt.Sprintf("registerInfix: token type %s already registered", tokenType)) |
855
|
|
|
return |
856
|
|
|
} |
857
|
|
|
|
858
|
|
|
p.infixParseFunc[tokenType] = fn |
859
|
|
|
} |
860
|
|
|
|
861
|
|
|
// duplicationError adds an error message to the parser's error list indicating that a duplication was found. |
862
|
|
|
// It takes a key string as an argument that is used to identify the source of the duplication in the input. |
863
|
|
|
func (p *Parser) duplicationError(key string) { |
864
|
|
|
msg := fmt.Sprintf("%v:%v:duplication found for %s", p.l.GetLinePosition(), p.l.GetColumnPosition(), key) |
865
|
|
|
p.errors = append(p.errors, msg) |
866
|
|
|
} |
867
|
|
|
|
868
|
|
|
// noPrefixParseFnError adds an error message to the parser's error list indicating that no prefix parsing |
869
|
|
|
// function was found for a given token type. |
870
|
|
|
// It takes a token type as an argument that indicates the type of the token for which a parsing function is missing. |
871
|
|
|
func (p *Parser) noPrefixParseFnError(t token.Type) { |
872
|
|
|
msg := fmt.Sprintf("%v:%v:no prefix parse function for %s found", p.l.GetLinePosition(), p.l.GetColumnPosition(), t) |
873
|
|
|
p.errors = append(p.errors, msg) |
874
|
|
|
} |
875
|
|
|
|
876
|
|
|
// peekError adds an error message to the parser's error list indicating that the next token in the input |
877
|
|
|
// did not match the expected type(s). |
878
|
|
|
// It takes one or more token types as arguments that indicate the expected types. |
879
|
|
|
func (p *Parser) peekError(t ...token.Type) { |
880
|
|
|
expected := strings.Join(tokenTypesToStrings(t), ", ") |
881
|
|
|
msg := fmt.Sprintf("%v:%v:expected next token to be %s, got %s instead", p.l.GetLinePosition(), p.l.GetColumnPosition(), expected, p.peekToken.Type) |
882
|
|
|
p.errors = append(p.errors, msg) |
883
|
|
|
} |
884
|
|
|
|
885
|
|
|
// currentError adds an error message to the parser's error list indicating that the current token in the input |
886
|
|
|
// did not match the expected type(s). |
887
|
|
|
// It takes one or more token types as arguments that indicate the expected types. |
888
|
|
|
func (p *Parser) currentError(t ...token.Type) { |
889
|
|
|
expected := strings.Join(tokenTypesToStrings(t), ", ") |
890
|
|
|
msg := fmt.Sprintf("%v:%v:expected token to be %s, got %s instead", p.l.GetLinePosition(), |
891
|
|
|
p.l.GetColumnPosition(), expected, p.currentToken.Type) |
892
|
|
|
p.errors = append(p.errors, msg) |
893
|
|
|
} |
894
|
|
|
|
895
|
|
|
// tokenTypesToStrings converts a slice of token types to a slice of their string representations. |
896
|
|
|
func tokenTypesToStrings(types []token.Type) []string { |
897
|
|
|
strs := make([]string, len(types)) |
898
|
|
|
for i, t := range types { |
899
|
|
|
strs[i] = t.String() |
900
|
|
|
} |
901
|
|
|
return strs |
902
|
|
|
} |
903
|
|
|
|