Conditions | 51 |
Total Lines | 400 |
Code Lines | 246 |
Lines | 0 |
Ratio | 0 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like cmd.serve often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | package cmd |
||
153 | func serve() func(cmd *cobra.Command, args []string) error { |
||
154 | return func(cmd *cobra.Command, args []string) error { |
||
155 | var cfg *config.Config |
||
156 | var err error |
||
157 | cfgFile := viper.GetString("config.file") |
||
158 | if cfgFile != "" { |
||
159 | cfg, err = config.NewConfigWithFile(cfgFile) |
||
160 | if err != nil { |
||
161 | return fmt.Errorf("failed to create new config: %w", err) |
||
|
|||
162 | } |
||
163 | |||
164 | if err = viper.Unmarshal(cfg); err != nil { |
||
165 | return fmt.Errorf("failed to unmarshal config: %w", err) |
||
166 | } |
||
167 | } else { |
||
168 | // Load configuration |
||
169 | cfg, err = config.NewConfig() |
||
170 | if err != nil { |
||
171 | return fmt.Errorf("failed to create new config: %w", err) |
||
172 | } |
||
173 | |||
174 | if err = viper.Unmarshal(cfg); err != nil { |
||
175 | return fmt.Errorf("failed to unmarshal config: %w", err) |
||
176 | } |
||
177 | } |
||
178 | |||
179 | // Print banner and initialize logger |
||
180 | internal.PrintBanner() |
||
181 | |||
182 | // Set up context and signal handling |
||
183 | ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) |
||
184 | defer stop() |
||
185 | |||
186 | internal.Identifier = cfg.AccountID |
||
187 | |||
188 | var logger *slog.Logger |
||
189 | var handler slog.Handler |
||
190 | |||
191 | switch cfg.Log.Output { |
||
192 | case "json": |
||
193 | handler = slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ |
||
194 | Level: getLogLevel(cfg.Log.Level), |
||
195 | }) |
||
196 | case "text": |
||
197 | handler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ |
||
198 | Level: getLogLevel(cfg.Log.Level), |
||
199 | }) |
||
200 | default: |
||
201 | handler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ |
||
202 | Level: getLogLevel(cfg.Log.Level), |
||
203 | }) |
||
204 | } |
||
205 | |||
206 | if cfg.Log.Enabled { |
||
207 | headers := map[string]string{} |
||
208 | for _, header := range cfg.Log.Headers { |
||
209 | h := strings.Split(header, ":") |
||
210 | if len(h) != 2 { |
||
211 | return errors.New("invalid header format; expected 'key:value'") |
||
212 | } |
||
213 | headers[h[0]] = h[1] |
||
214 | } |
||
215 | |||
216 | customHandler, err := HandlerFactory( |
||
217 | cfg.Log.Exporter, |
||
218 | cfg.Log.Endpoint, |
||
219 | cfg.Log.Insecure, |
||
220 | cfg.Log.URLPath, |
||
221 | headers, |
||
222 | cfg.Log.Protocol, |
||
223 | getLogLevel(cfg.Log.Level), |
||
224 | ) |
||
225 | |||
226 | if err != nil { |
||
227 | slog.Error("invalid logger exporter", slog.Any("error", err)) |
||
228 | logger = slog.New(handler) |
||
229 | } else { |
||
230 | logger = slog.New( |
||
231 | slogmulti.Fanout( |
||
232 | customHandler, |
||
233 | handler, |
||
234 | ), |
||
235 | ) |
||
236 | } |
||
237 | } else { |
||
238 | logger = slog.New(handler) |
||
239 | } |
||
240 | |||
241 | slog.SetDefault(logger) |
||
242 | |||
243 | if internal.Identifier == "" { |
||
244 | message := "Account ID is not set. Please fill in the Account ID for better support. Get your Account ID from https://permify.co/account" |
||
245 | slog.Error(message) |
||
246 | |||
247 | ticker := time.NewTicker(24 * time.Hour) |
||
248 | defer ticker.Stop() |
||
249 | |||
250 | go func() { |
||
251 | for range ticker.C { |
||
252 | slog.Error(message) |
||
253 | } |
||
254 | }() |
||
255 | } |
||
256 | |||
257 | slog.Info("🚀 starting permify service...") |
||
258 | |||
259 | // Run database migration if enabled |
||
260 | if cfg.Database.AutoMigrate { |
||
261 | err = storage.Migrate(cfg.Database) |
||
262 | if err != nil { |
||
263 | slog.Error("failed to migrate database", slog.Any("error", err)) |
||
264 | return err |
||
265 | } |
||
266 | } |
||
267 | |||
268 | // Initialize database |
||
269 | db, err := factories.DatabaseFactory(cfg.Database) |
||
270 | if err != nil { |
||
271 | slog.Error("failed to initialize database", slog.Any("error", err)) |
||
272 | return err |
||
273 | } |
||
274 | defer func() { |
||
275 | if err = db.Close(); err != nil { |
||
276 | slog.Error("failed to close database", slog.Any("error", err)) |
||
277 | } |
||
278 | }() |
||
279 | |||
280 | // Tracing |
||
281 | if cfg.Tracer.Enabled { |
||
282 | headers := map[string]string{} |
||
283 | for _, header := range cfg.Tracer.Headers { |
||
284 | h := strings.Split(header, ":") |
||
285 | if len(h) != 2 { |
||
286 | return errors.New("invalid header format; expected 'key:value'") |
||
287 | } |
||
288 | headers[h[0]] = h[1] |
||
289 | } |
||
290 | |||
291 | var exporter trace.SpanExporter |
||
292 | exporter, err = tracerexporters.ExporterFactory( |
||
293 | cfg.Tracer.Exporter, |
||
294 | cfg.Tracer.Endpoint, |
||
295 | cfg.Tracer.Insecure, |
||
296 | cfg.Tracer.URLPath, |
||
297 | headers, |
||
298 | cfg.Tracer.Protocol, |
||
299 | ) |
||
300 | if err != nil { |
||
301 | slog.Error(err.Error()) |
||
302 | } |
||
303 | |||
304 | shutdown := telemetry.NewTracer(exporter) |
||
305 | |||
306 | defer func() { |
||
307 | if err = shutdown(ctx); err != nil { |
||
308 | slog.Error(err.Error()) |
||
309 | } |
||
310 | }() |
||
311 | } |
||
312 | |||
313 | // Garbage collection |
||
314 | if cfg.Database.GarbageCollection.Timeout > 0 && cfg.Database.GarbageCollection.Enabled && cfg.Database.Engine != "memory" { |
||
315 | slog.Info("🗑️ starting database garbage collection...") |
||
316 | |||
317 | garbageCollector := gc.NewGC( |
||
318 | db.(*PQDatabase.Postgres), |
||
319 | gc.Interval(cfg.Database.GarbageCollection.Interval), |
||
320 | gc.Window(cfg.Database.GarbageCollection.Window), |
||
321 | gc.Timeout(cfg.Database.GarbageCollection.Timeout), |
||
322 | ) |
||
323 | |||
324 | go func() { |
||
325 | err = garbageCollector.Start(ctx) |
||
326 | if err != nil { |
||
327 | slog.Error(err.Error()) |
||
328 | } |
||
329 | }() |
||
330 | } |
||
331 | |||
332 | // Meter |
||
333 | if cfg.Meter.Enabled { |
||
334 | headers := map[string]string{} |
||
335 | for _, header := range cfg.Meter.Headers { |
||
336 | h := strings.Split(header, ":") |
||
337 | if len(h) != 2 { |
||
338 | return errors.New("invalid header format; expected 'key:value'") |
||
339 | } |
||
340 | headers[h[0]] = h[1] |
||
341 | } |
||
342 | |||
343 | var exporter metric.Exporter |
||
344 | exporter, err = meterexporters.ExporterFactory( |
||
345 | cfg.Meter.Exporter, |
||
346 | cfg.Meter.Endpoint, |
||
347 | cfg.Meter.Insecure, |
||
348 | cfg.Meter.URLPath, |
||
349 | headers, |
||
350 | cfg.Meter.Protocol, |
||
351 | ) |
||
352 | |||
353 | if err != nil { |
||
354 | slog.Error(err.Error()) |
||
355 | } |
||
356 | |||
357 | shutdown := telemetry.NewMeter(exporter, time.Duration(cfg.Meter.Interval)*time.Second) |
||
358 | |||
359 | defer func() { |
||
360 | if err = shutdown(ctx); err != nil { |
||
361 | slog.Error(err.Error()) |
||
362 | } |
||
363 | }() |
||
364 | } |
||
365 | |||
366 | // schema cache |
||
367 | var schemaCache pkgcache.Cache |
||
368 | schemaCache, err = ristretto.New(ristretto.NumberOfCounters(cfg.Service.Schema.Cache.NumberOfCounters), ristretto.MaxCost(cfg.Service.Schema.Cache.MaxCost)) |
||
369 | if err != nil { |
||
370 | slog.Error(err.Error()) |
||
371 | return err |
||
372 | } |
||
373 | |||
374 | // engines cache cache |
||
375 | var engineKeyCache pkgcache.Cache |
||
376 | engineKeyCache, err = ristretto.New(ristretto.NumberOfCounters(cfg.Service.Permission.Cache.NumberOfCounters), ristretto.MaxCost(cfg.Service.Permission.Cache.MaxCost)) |
||
377 | if err != nil { |
||
378 | slog.Error(err.Error()) |
||
379 | return err |
||
380 | } |
||
381 | |||
382 | watcher := storage.NewNoopWatcher() |
||
383 | if cfg.Service.Watch.Enabled { |
||
384 | watcher = factories.WatcherFactory(db) |
||
385 | } |
||
386 | |||
387 | // Initialize the storage with factory methods |
||
388 | dataReader := factories.DataReaderFactory(db) |
||
389 | dataWriter := factories.DataWriterFactory(db) |
||
390 | bundleReader := factories.BundleReaderFactory(db) |
||
391 | bundleWriter := factories.BundleWriterFactory(db) |
||
392 | schemaReader := factories.SchemaReaderFactory(db) |
||
393 | schemaWriter := factories.SchemaWriterFactory(db) |
||
394 | tenantReader := factories.TenantReaderFactory(db) |
||
395 | tenantWriter := factories.TenantWriterFactory(db) |
||
396 | |||
397 | // Add caching to the schema reader using a decorator |
||
398 | schemaReader = cacheDecorator.NewSchemaReader(schemaReader, schemaCache) |
||
399 | |||
400 | dataReader = sfDecorator.NewDataReader(dataReader) |
||
401 | schemaReader = sfDecorator.NewSchemaReader(schemaReader) |
||
402 | |||
403 | // Check if circuit breaker should be enabled for services |
||
404 | if cfg.Service.CircuitBreaker { |
||
405 | var cb *gobreaker.CircuitBreaker |
||
406 | var st gobreaker.Settings |
||
407 | st.Name = "storage" |
||
408 | st.ReadyToTrip = func(counts gobreaker.Counts) bool { |
||
409 | failureRatio := float64(counts.TotalFailures) / float64(counts.Requests) |
||
410 | return counts.Requests >= 10 && failureRatio >= 0.6 |
||
411 | } |
||
412 | |||
413 | cb = gobreaker.NewCircuitBreaker(st) |
||
414 | |||
415 | // Add circuit breaker to the relationship reader using decorator |
||
416 | dataReader = cbDecorator.NewDataReader(dataReader, cb) |
||
417 | |||
418 | // Add circuit breaker to the bundle reader using decorators |
||
419 | bundleReader = cbDecorator.NewBundleReader(bundleReader, cb) |
||
420 | |||
421 | // Add circuit breaker to the schema reader using decorator |
||
422 | schemaReader = cbDecorator.NewSchemaReader(schemaReader, cb) |
||
423 | |||
424 | // Add circuit breaker to the tenant reader using decorator |
||
425 | tenantReader = cbDecorator.NewTenantReader(tenantReader, cb) |
||
426 | } |
||
427 | |||
428 | // Initialize the engines using the key manager, schema reader, and relationship reader |
||
429 | checkEngine := engines.NewCheckEngine(schemaReader, dataReader, engines.CheckConcurrencyLimit(cfg.Service.Permission.ConcurrencyLimit)) |
||
430 | expandEngine := engines.NewExpandEngine(schemaReader, dataReader) |
||
431 | |||
432 | // Declare a variable `checker` of type `invoke.Check`. |
||
433 | var checker invoke.Check |
||
434 | |||
435 | // Create the checker either with load balancing or caching capabilities. |
||
436 | if cfg.Distributed.Enabled { |
||
437 | |||
438 | if cfg.Authn.Enabled && cfg.Authn.Method == "oidc" { |
||
439 | return errors.New("OIDC authentication method cannot be used in distributed mode. Please check your configuration") |
||
440 | } |
||
441 | |||
442 | checker, err = balancer.NewCheckEngineWithBalancer( |
||
443 | ctx, |
||
444 | checkEngine, |
||
445 | schemaReader, |
||
446 | &cfg.Distributed, |
||
447 | &cfg.Server.GRPC, |
||
448 | &cfg.Authn, |
||
449 | ) |
||
450 | // Handle potential error during checker creation. |
||
451 | if err != nil { |
||
452 | return err |
||
453 | } |
||
454 | checker = cache.NewCheckEngineWithCache( |
||
455 | checker, |
||
456 | schemaReader, |
||
457 | engineKeyCache, |
||
458 | ) |
||
459 | } else { |
||
460 | checker = cache.NewCheckEngineWithCache( |
||
461 | checkEngine, |
||
462 | schemaReader, |
||
463 | engineKeyCache, |
||
464 | ) |
||
465 | } |
||
466 | |||
467 | // Create a localChecker which directly checks without considering distributed setup. |
||
468 | // This also includes caching capabilities. |
||
469 | localChecker := cache.NewCheckEngineWithCache( |
||
470 | checkEngine, |
||
471 | schemaReader, |
||
472 | engineKeyCache, |
||
473 | ) |
||
474 | |||
475 | // Initialize the lookupEngine, which is responsible for looking up certain entities or values. |
||
476 | lookupEngine := engines.NewLookupEngine( |
||
477 | checker, |
||
478 | schemaReader, |
||
479 | dataReader, |
||
480 | // Set concurrency limit based on the configuration. |
||
481 | engines.LookupConcurrencyLimit(cfg.Service.Permission.BulkLimit), |
||
482 | ) |
||
483 | |||
484 | // Initialize the subjectPermissionEngine, responsible for handling subject permissions. |
||
485 | subjectPermissionEngine := engines.NewSubjectPermission( |
||
486 | checker, |
||
487 | schemaReader, |
||
488 | // Set concurrency limit for the subject permission checks. |
||
489 | engines.SubjectPermissionConcurrencyLimit(cfg.Service.Permission.ConcurrencyLimit), |
||
490 | ) |
||
491 | |||
492 | // Create a new invoker that is used to directly call various functions or engines. |
||
493 | // It encompasses the schema, data, checker, and other engines. |
||
494 | invoker := invoke.NewDirectInvoker( |
||
495 | schemaReader, |
||
496 | dataReader, |
||
497 | checker, |
||
498 | expandEngine, |
||
499 | lookupEngine, |
||
500 | subjectPermissionEngine, |
||
501 | ) |
||
502 | |||
503 | // Associate the invoker with the checkEngine. |
||
504 | checkEngine.SetInvoker(invoker) |
||
505 | |||
506 | // Create a local invoker for local operations. |
||
507 | localInvoker := invoke.NewDirectInvoker( |
||
508 | schemaReader, |
||
509 | dataReader, |
||
510 | localChecker, |
||
511 | expandEngine, |
||
512 | lookupEngine, |
||
513 | subjectPermissionEngine, |
||
514 | ) |
||
515 | |||
516 | // Initialize the container which brings together multiple components such as the invoker, data readers/writers, and schema handlers. |
||
517 | container := servers.NewContainer( |
||
518 | invoker, |
||
519 | dataReader, |
||
520 | dataWriter, |
||
521 | bundleReader, |
||
522 | bundleWriter, |
||
523 | schemaReader, |
||
524 | schemaWriter, |
||
525 | tenantReader, |
||
526 | tenantWriter, |
||
527 | watcher, |
||
528 | ) |
||
529 | |||
530 | // Create an error group with the provided context |
||
531 | var g *errgroup.Group |
||
532 | g, ctx = errgroup.WithContext(ctx) |
||
533 | |||
534 | // Add the container.Run function to the error group |
||
535 | g.Go(func() error { |
||
536 | return container.Run( |
||
537 | ctx, |
||
538 | &cfg.Server, |
||
539 | logger, |
||
540 | &cfg.Distributed, |
||
541 | &cfg.Authn, |
||
542 | &cfg.Profiler, |
||
543 | localInvoker, |
||
544 | ) |
||
545 | }) |
||
546 | |||
547 | // Wait for the error group to finish and log any errors |
||
548 | if err = g.Wait(); err != nil { |
||
549 | slog.Error(err.Error()) |
||
550 | } |
||
551 | |||
552 | return nil |
||
553 | } |
||
571 |