| Conditions | 41 |
| Total Lines | 113 |
| Code Lines | 103 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like lexer.*Lexer.NextToken often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | package lexer |
||
| 61 | func (l *Lexer) NextToken() (tok token.Token) { |
||
| 62 | // switch statement to determine the type of token based on the current character |
||
| 63 | switch l.ch { |
||
| 64 | case '\t': |
||
| 65 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.TAB, l.ch) |
||
| 66 | case ' ': |
||
| 67 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.SPACE, l.ch) |
||
| 68 | case '\n': |
||
| 69 | l.newLine() |
||
| 70 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.NEWLINE, l.ch) |
||
| 71 | case '\r': |
||
| 72 | l.newLine() |
||
| 73 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.NEWLINE, l.ch) |
||
| 74 | case ';': |
||
| 75 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.NEWLINE, l.ch) |
||
| 76 | case ':': |
||
| 77 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.COLON, l.ch) |
||
| 78 | case '=': |
||
| 79 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.ASSIGN, l.ch) |
||
| 80 | case '@': |
||
| 81 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.SIGN, l.ch) |
||
| 82 | case '(': |
||
| 83 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.LP, l.ch) |
||
| 84 | case ')': |
||
| 85 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.RP, l.ch) |
||
| 86 | case '{': |
||
| 87 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.LCB, l.ch) |
||
| 88 | case '}': |
||
| 89 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.RCB, l.ch) |
||
| 90 | case '[': |
||
| 91 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.LSB, l.ch) |
||
| 92 | case ']': |
||
| 93 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.RSB, l.ch) |
||
| 94 | case '+': |
||
| 95 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.PLUS, l.ch) |
||
| 96 | case '-': |
||
| 97 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.MINUS, l.ch) |
||
| 98 | case '*': |
||
| 99 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.TIMES, l.ch) |
||
| 100 | case '%': |
||
| 101 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.MOD, l.ch) |
||
| 102 | case '^': |
||
| 103 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.POW, l.ch) |
||
| 104 | case '>': |
||
| 105 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.GT, l.ch) |
||
| 106 | case '<': |
||
| 107 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.LT, l.ch) |
||
| 108 | case '!': |
||
| 109 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.EXCL, l.ch) |
||
| 110 | case '?': |
||
| 111 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.QM, l.ch) |
||
| 112 | case ',': |
||
| 113 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.COMMA, l.ch) |
||
| 114 | case '#': |
||
| 115 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.HASH, l.ch) |
||
| 116 | case '.': |
||
| 117 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.DOT, l.ch) |
||
| 118 | case '\'': |
||
| 119 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.APOS, l.ch) |
||
| 120 | case '&': |
||
| 121 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.AMPERSAND, l.ch) |
||
| 122 | case 0: |
||
| 123 | tok = token.Token{PositionInfo: positionInfo(l.linePosition, l.columnPosition), Type: token.EOF, Literal: ""} |
||
| 124 | case '/': |
||
| 125 | switch l.peekChar() { // Check next character after slash |
||
| 126 | case '/': // Single-line comment |
||
| 127 | tok.PositionInfo = positionInfo(l.linePosition, l.columnPosition) |
||
| 128 | tok.Literal = l.lexSingleLineComment() |
||
| 129 | tok.Type = token.SINGLE_LINE_COMMENT |
||
| 130 | return tok |
||
| 131 | case '*': // Multi-line comment |
||
| 132 | tok.PositionInfo = positionInfo(l.linePosition, l.columnPosition) |
||
| 133 | tok.Literal = l.lexMultiLineComment() |
||
| 134 | tok.Type = token.MULTI_LINE_COMMENT |
||
| 135 | return tok |
||
| 136 | default: // Division operator |
||
| 137 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.DIVIDE, l.ch) |
||
| 138 | } |
||
| 139 | case '"': |
||
| 140 | // check if the character is a double quote, indicating a string |
||
| 141 | tok.PositionInfo = positionInfo(l.linePosition, l.columnPosition) |
||
| 142 | tok.Literal = l.lexString() |
||
| 143 | tok.Type = token.STRING |
||
| 144 | return tok |
||
| 145 | default: |
||
| 146 | // check if the character is a letter, and if so, lex the identifier and look up the keyword |
||
| 147 | if isLetter(l.ch) { |
||
| 148 | tok.PositionInfo = positionInfo(l.linePosition, l.columnPosition) |
||
| 149 | tok.Literal = l.lexIdent() |
||
| 150 | if tok.Literal == "true" || tok.Literal == "false" { |
||
| 151 | tok.Type = token.BOOLEAN |
||
| 152 | return tok |
||
| 153 | } |
||
| 154 | tok.Type = token.LookupKeywords(tok.Literal) |
||
| 155 | return tok |
||
| 156 | } else if isDigit(l.ch) { |
||
| 157 | var isDouble bool |
||
| 158 | tok.PositionInfo = positionInfo(l.linePosition, l.columnPosition) |
||
| 159 | tok.Literal, isDouble = l.lexNumber() |
||
| 160 | if isDouble { |
||
| 161 | tok.Type = token.DOUBLE |
||
| 162 | } else { |
||
| 163 | tok.Type = token.INTEGER |
||
| 164 | } |
||
| 165 | return tok |
||
| 166 | } else { |
||
| 167 | // if none of the above cases match, create an illegal token with the current character |
||
| 168 | tok = token.New(positionInfo(l.linePosition, l.columnPosition), token.ILLEGAL, l.ch) |
||
| 169 | } |
||
| 170 | } |
||
| 171 | // read the next character and return the token |
||
| 172 | l.readChar() |
||
| 173 | return tok |
||
| 174 | } |
||
| 292 |