| Conditions | 17 | 
| Total Lines | 95 | 
| Code Lines | 50 | 
| Lines | 0 | 
| Ratio | 0 % | 
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like engines.*BulkChecker.ExecuteRequests often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | package engines  | 
            ||
| 148 | func (c *BulkChecker) ExecuteRequests(size uint32) error { | 
            ||
| 149 | // Stop collecting new requests and close the RequestChan to ensure no more requests are added  | 
            ||
| 150 | c.StopCollectingRequests()  | 
            ||
| 151 | |||
| 152 | // Wait for request collection to complete before proceeding  | 
            ||
| 153 | c.wg.Wait()  | 
            ||
| 154 | |||
| 155 | // Track the number of successful permission checks  | 
            ||
| 156 | successCount := int64(0)  | 
            ||
| 157 | // Semaphore to control the maximum number of concurrent permission checks  | 
            ||
| 158 | sem := semaphore.NewWeighted(int64(c.concurrencyLimit))  | 
            ||
| 159 | var mu sync.Mutex  | 
            ||
| 160 | |||
| 161 | // Lock the mutex to prevent race conditions while sorting and copying the list of requests  | 
            ||
| 162 | c.mu.Lock()  | 
            ||
| 163 | c.sortRequests() // Sort requests based on id  | 
            ||
| 164 | 	listCopy := append([]BulkCheckerRequest{}, c.list...) // Create a copy of the list to avoid modifying the original during processing | 
            ||
| 165 | c.mu.Unlock() // Unlock the mutex after sorting and copying  | 
            ||
| 166 | |||
| 167 | // Pre-allocate a slice to store the results of the permission checks  | 
            ||
| 168 | results := make([]base.CheckResult, len(listCopy))  | 
            ||
| 169 | // Track the index of the last processed request to ensure results are processed in order  | 
            ||
| 170 | processedIndex := 0  | 
            ||
| 171 | |||
| 172 | // Loop through each request in the copied list  | 
            ||
| 173 | 	for i, currentRequest := range listCopy { | 
            ||
| 174 | // If we've reached the success limit, stop processing further requests  | 
            ||
| 175 | 		if atomic.LoadInt64(&successCount) >= int64(size) { | 
            ||
| 176 | break  | 
            ||
| 177 | }  | 
            ||
| 178 | |||
| 179 | index := i  | 
            ||
| 180 | req := currentRequest  | 
            ||
| 181 | |||
| 182 | // Use errgroup to manage the goroutines, which allows for error handling and synchronization  | 
            ||
| 183 | 		c.g.Go(func() error { | 
            ||
| 184 | // Acquire a slot in the semaphore to control concurrency  | 
            ||
| 185 | 			if err := sem.Acquire(c.ctx, 1); err != nil { | 
            ||
| 186 | return err // Return an error if semaphore acquisition fails  | 
            ||
| 187 | }  | 
            ||
| 188 | defer sem.Release(1) // Ensure the semaphore slot is released after processing  | 
            ||
| 189 | |||
| 190 | var result base.CheckResult  | 
            ||
| 191 | 			if req.Result == base.CheckResult_CHECK_RESULT_UNSPECIFIED { | 
            ||
| 192 | // Perform the permission check if the result is not already specified  | 
            ||
| 193 | cr, err := c.checker.Check(c.ctx, req.Request)  | 
            ||
| 194 | 				if err != nil { | 
            ||
| 195 | return err // Return an error if the check fails  | 
            ||
| 196 | }  | 
            ||
| 197 | result = cr.GetCan() // Get the result from the check  | 
            ||
| 198 | 			} else { | 
            ||
| 199 | // Use the already specified result  | 
            ||
| 200 | result = req.Result  | 
            ||
| 201 | }  | 
            ||
| 202 | |||
| 203 | // Lock the mutex to safely update shared resources  | 
            ||
| 204 | mu.Lock()  | 
            ||
| 205 | results[index] = result // Store the result in the pre-allocated slice  | 
            ||
| 206 | |||
| 207 | // Process the results in order, starting from the current processed index  | 
            ||
| 208 | 			for processedIndex < len(listCopy) && results[processedIndex] != base.CheckResult_CHECK_RESULT_UNSPECIFIED { | 
            ||
| 209 | // If the result at the processed index is allowed, call the callback function  | 
            ||
| 210 | 				if results[processedIndex] == base.CheckResult_CHECK_RESULT_ALLOWED { | 
            ||
| 211 | 					if atomic.AddInt64(&successCount, 1) <= int64(size) { | 
            ||
| 212 | ct := ""  | 
            ||
| 213 | 						if processedIndex+1 < len(listCopy) { | 
            ||
| 214 | // If there is a next item, create a continuous token with the next ID  | 
            ||
| 215 | 							if c.typ == BULK_ENTITY { | 
            ||
| 216 | ct = utils.NewContinuousToken(listCopy[processedIndex+1].Request.GetEntity().GetId()).Encode().String()  | 
            ||
| 217 | 							} else if c.typ == BULK_SUBJECT { | 
            ||
| 218 | ct = utils.NewContinuousToken(listCopy[processedIndex+1].Request.GetSubject().GetId()).Encode().String()  | 
            ||
| 219 | }  | 
            ||
| 220 | }  | 
            ||
| 221 | // Depending on the type of check (entity or subject), call the appropriate callback  | 
            ||
| 222 | 						if c.typ == BULK_ENTITY { | 
            ||
| 223 | c.callback(listCopy[processedIndex].Request.GetEntity().GetId(), ct)  | 
            ||
| 224 | 						} else if c.typ == BULK_SUBJECT { | 
            ||
| 225 | c.callback(listCopy[processedIndex].Request.GetSubject().GetId(), ct)  | 
            ||
| 226 | }  | 
            ||
| 227 | }  | 
            ||
| 228 | }  | 
            ||
| 229 | processedIndex++ // Move to the next index for processing  | 
            ||
| 230 | }  | 
            ||
| 231 | mu.Unlock() // Unlock the mutex after updating the results and processed index  | 
            ||
| 232 | |||
| 233 | return nil // Return nil to indicate successful processing  | 
            ||
| 234 | })  | 
            ||
| 235 | }  | 
            ||
| 236 | |||
| 237 | // Wait for all goroutines to complete and check for any errors  | 
            ||
| 238 | 	if err := c.g.Wait(); err != nil { | 
            ||
| 239 | return err // Return the error if any goroutine returned an error  | 
            ||
| 240 | }  | 
            ||
| 241 | |||
| 242 | return nil // Return nil if all processing completed successfully  | 
            ||
| 243 | }  | 
            ||
| 310 |