Conditions | 51 |
Total Lines | 394 |
Code Lines | 242 |
Lines | 0 |
Ratio | 0 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like cmd.serve often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | package cmd |
||
157 | func serve() func(cmd *cobra.Command, args []string) error { |
||
158 | return func(cmd *cobra.Command, args []string) error { |
||
159 | var cfg *config.Config |
||
160 | var err error |
||
161 | cfgFile := viper.GetString("config.file") |
||
162 | if cfgFile != "" { |
||
163 | cfg, err = config.NewConfigWithFile(cfgFile) |
||
164 | if err != nil { |
||
165 | return fmt.Errorf("failed to create new config: %w", err) |
||
|
|||
166 | } |
||
167 | |||
168 | if err = viper.Unmarshal(cfg); err != nil { |
||
169 | return fmt.Errorf("failed to unmarshal config: %w", err) |
||
170 | } |
||
171 | } else { |
||
172 | // Load configuration |
||
173 | cfg, err = config.NewConfig() |
||
174 | if err != nil { |
||
175 | return fmt.Errorf("failed to create new config: %w", err) |
||
176 | } |
||
177 | |||
178 | if err = viper.Unmarshal(cfg); err != nil { |
||
179 | return fmt.Errorf("failed to unmarshal config: %w", err) |
||
180 | } |
||
181 | } |
||
182 | |||
183 | // Print banner and initialize logger |
||
184 | internal.PrintBanner() |
||
185 | |||
186 | // Set up context and signal handling |
||
187 | ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) |
||
188 | defer stop() |
||
189 | |||
190 | internal.Identifier = cfg.AccountID |
||
191 | |||
192 | var logger *slog.Logger |
||
193 | var handler slog.Handler |
||
194 | |||
195 | switch cfg.Log.Output { |
||
196 | case "json": |
||
197 | handler = slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ |
||
198 | Level: getLogLevel(cfg.Log.Level), |
||
199 | }) |
||
200 | case "text": |
||
201 | handler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ |
||
202 | Level: getLogLevel(cfg.Log.Level), |
||
203 | }) |
||
204 | default: |
||
205 | handler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ |
||
206 | Level: getLogLevel(cfg.Log.Level), |
||
207 | }) |
||
208 | } |
||
209 | |||
210 | if cfg.Log.Enabled { |
||
211 | headers := map[string]string{} |
||
212 | for _, header := range cfg.Log.Headers { |
||
213 | h := strings.Split(header, ":") |
||
214 | if len(h) != 2 { |
||
215 | return errors.New("invalid header format; expected 'key:value'") |
||
216 | } |
||
217 | headers[h[0]] = h[1] |
||
218 | } |
||
219 | |||
220 | customHandler, err := telemetry.HandlerFactory( |
||
221 | cfg.Log.Exporter, |
||
222 | cfg.Log.Endpoint, |
||
223 | cfg.Log.Insecure, |
||
224 | cfg.Log.Urlpath, |
||
225 | headers, |
||
226 | cfg.Log.Protocol, |
||
227 | getLogLevel(cfg.Log.Level), |
||
228 | ) |
||
229 | |||
230 | if err != nil { |
||
231 | slog.Error("invalid logger exporter", slog.Any("error", err)) |
||
232 | logger = slog.New(handler) |
||
233 | } else { |
||
234 | logger = slog.New( |
||
235 | slogmulti.Fanout( |
||
236 | customHandler, |
||
237 | handler, |
||
238 | ), |
||
239 | ) |
||
240 | } |
||
241 | } else { |
||
242 | logger = slog.New(handler) |
||
243 | } |
||
244 | |||
245 | slog.SetDefault(logger) |
||
246 | |||
247 | if internal.Identifier == "" { |
||
248 | message := "Account ID is not set. Please fill in the Account ID for better support. Get your Account ID from https://permify.co/account" |
||
249 | slog.Error(message) |
||
250 | |||
251 | ticker := time.NewTicker(24 * time.Hour) |
||
252 | defer ticker.Stop() |
||
253 | |||
254 | go func() { |
||
255 | for range ticker.C { |
||
256 | slog.Error(message) |
||
257 | } |
||
258 | }() |
||
259 | } |
||
260 | |||
261 | slog.Info("🚀 starting permify service...") |
||
262 | |||
263 | // Run database migration if enabled |
||
264 | if cfg.Database.AutoMigrate { |
||
265 | err = storage.Migrate(cfg.Database) |
||
266 | if err != nil { |
||
267 | slog.Error("failed to migrate database", slog.Any("error", err)) |
||
268 | return err |
||
269 | } |
||
270 | } |
||
271 | |||
272 | // Initialize database |
||
273 | db, err := factories.DatabaseFactory(cfg.Database) |
||
274 | if err != nil { |
||
275 | slog.Error("failed to initialize database", slog.Any("error", err)) |
||
276 | return err |
||
277 | } |
||
278 | defer func() { |
||
279 | if err = db.Close(); err != nil { |
||
280 | slog.Error("failed to close database", slog.Any("error", err)) |
||
281 | } |
||
282 | }() |
||
283 | |||
284 | // Tracing |
||
285 | if cfg.Tracer.Enabled { |
||
286 | headers := map[string]string{} |
||
287 | for _, header := range cfg.Tracer.Headers { |
||
288 | h := strings.Split(header, ":") |
||
289 | if len(h) != 2 { |
||
290 | return errors.New("invalid header format; expected 'key:value'") |
||
291 | } |
||
292 | headers[h[0]] = h[1] |
||
293 | } |
||
294 | |||
295 | var exporter trace.SpanExporter |
||
296 | exporter, err = tracerexporters.ExporterFactory( |
||
297 | cfg.Tracer.Exporter, |
||
298 | cfg.Tracer.Endpoint, |
||
299 | cfg.Tracer.Insecure, |
||
300 | cfg.Tracer.Urlpath, |
||
301 | headers, |
||
302 | cfg.Tracer.Protocol, |
||
303 | ) |
||
304 | if err != nil { |
||
305 | slog.Error(err.Error()) |
||
306 | } |
||
307 | |||
308 | shutdown := telemetry.NewTracer(exporter) |
||
309 | |||
310 | defer func() { |
||
311 | if err = shutdown(ctx); err != nil { |
||
312 | slog.Error(err.Error()) |
||
313 | } |
||
314 | }() |
||
315 | } |
||
316 | |||
317 | // Garbage collection |
||
318 | if cfg.Database.GarbageCollection.Timeout > 0 && cfg.Database.GarbageCollection.Enabled && cfg.Database.Engine != "memory" { |
||
319 | slog.Info("🗑️ starting database garbage collection...") |
||
320 | |||
321 | garbageCollector := gc.NewGC( |
||
322 | db.(*PQDatabase.Postgres), |
||
323 | gc.Interval(cfg.Database.GarbageCollection.Interval), |
||
324 | gc.Window(cfg.Database.GarbageCollection.Window), |
||
325 | gc.Timeout(cfg.Database.GarbageCollection.Timeout), |
||
326 | ) |
||
327 | |||
328 | go func() { |
||
329 | err = garbageCollector.Start(ctx) |
||
330 | if err != nil { |
||
331 | slog.Error(err.Error()) |
||
332 | } |
||
333 | }() |
||
334 | } |
||
335 | |||
336 | // Meter |
||
337 | if cfg.Meter.Enabled { |
||
338 | headers := map[string]string{} |
||
339 | for _, header := range cfg.Meter.Headers { |
||
340 | h := strings.Split(header, ":") |
||
341 | if len(h) != 2 { |
||
342 | return errors.New("invalid header format; expected 'key:value'") |
||
343 | } |
||
344 | headers[h[0]] = h[1] |
||
345 | } |
||
346 | |||
347 | var exporter metric.Exporter |
||
348 | exporter, err = meterexporters.ExporterFactory( |
||
349 | cfg.Meter.Exporter, |
||
350 | cfg.Meter.Endpoint, |
||
351 | cfg.Meter.Insecure, |
||
352 | cfg.Meter.Urlpath, |
||
353 | headers, |
||
354 | cfg.Meter.Protocol, |
||
355 | ) |
||
356 | if err != nil { |
||
357 | slog.Error(err.Error()) |
||
358 | } |
||
359 | |||
360 | shutdown := telemetry.NewMeter(exporter, time.Duration(cfg.Meter.Interval)*time.Second) |
||
361 | |||
362 | defer func() { |
||
363 | if err = shutdown(ctx); err != nil { |
||
364 | slog.Error(err.Error()) |
||
365 | } |
||
366 | }() |
||
367 | } |
||
368 | |||
369 | // schema cache |
||
370 | var schemaCache pkgcache.Cache |
||
371 | schemaCache, err = ristretto.New(ristretto.NumberOfCounters(cfg.Service.Schema.Cache.NumberOfCounters), ristretto.MaxCost(cfg.Service.Schema.Cache.MaxCost)) |
||
372 | if err != nil { |
||
373 | slog.Error(err.Error()) |
||
374 | return err |
||
375 | } |
||
376 | |||
377 | // engines cache cache |
||
378 | var engineKeyCache pkgcache.Cache |
||
379 | engineKeyCache, err = ristretto.New(ristretto.NumberOfCounters(cfg.Service.Permission.Cache.NumberOfCounters), ristretto.MaxCost(cfg.Service.Permission.Cache.MaxCost)) |
||
380 | if err != nil { |
||
381 | slog.Error(err.Error()) |
||
382 | return err |
||
383 | } |
||
384 | |||
385 | watcher := storage.NewNoopWatcher() |
||
386 | if cfg.Service.Watch.Enabled { |
||
387 | watcher = factories.WatcherFactory(db) |
||
388 | } |
||
389 | |||
390 | // Initialize the storage with factory methods |
||
391 | dataReader := factories.DataReaderFactory(db) |
||
392 | dataWriter := factories.DataWriterFactory(db) |
||
393 | bundleReader := factories.BundleReaderFactory(db) |
||
394 | bundleWriter := factories.BundleWriterFactory(db) |
||
395 | schemaReader := factories.SchemaReaderFactory(db) |
||
396 | schemaWriter := factories.SchemaWriterFactory(db) |
||
397 | tenantReader := factories.TenantReaderFactory(db) |
||
398 | tenantWriter := factories.TenantWriterFactory(db) |
||
399 | |||
400 | // Add caching to the schema reader using a decorator |
||
401 | schemaReader = cacheDecorator.NewSchemaReader(schemaReader, schemaCache) |
||
402 | |||
403 | dataReader = sfDecorator.NewDataReader(dataReader) |
||
404 | schemaReader = sfDecorator.NewSchemaReader(schemaReader) |
||
405 | |||
406 | // Check if circuit breaker should be enabled for services |
||
407 | if cfg.Service.CircuitBreaker { |
||
408 | var cb *gobreaker.CircuitBreaker |
||
409 | var st gobreaker.Settings |
||
410 | st.Name = "storage" |
||
411 | st.ReadyToTrip = func(counts gobreaker.Counts) bool { |
||
412 | failureRatio := float64(counts.TotalFailures) / float64(counts.Requests) |
||
413 | return counts.Requests >= 10 && failureRatio >= 0.6 |
||
414 | } |
||
415 | |||
416 | cb = gobreaker.NewCircuitBreaker(st) |
||
417 | |||
418 | // Add circuit breaker to the relationship reader using decorator |
||
419 | dataReader = cbDecorator.NewDataReader(dataReader, cb) |
||
420 | |||
421 | // Add circuit breaker to the bundle reader using decorators |
||
422 | bundleReader = cbDecorator.NewBundleReader(bundleReader, cb) |
||
423 | |||
424 | // Add circuit breaker to the schema reader using decorator |
||
425 | schemaReader = cbDecorator.NewSchemaReader(schemaReader, cb) |
||
426 | |||
427 | // Add circuit breaker to the tenant reader using decorator |
||
428 | tenantReader = cbDecorator.NewTenantReader(tenantReader, cb) |
||
429 | } |
||
430 | |||
431 | // Initialize the engines using the key manager, schema reader, and relationship reader |
||
432 | checkEngine := engines.NewCheckEngine(schemaReader, dataReader, engines.CheckConcurrencyLimit(cfg.Service.Permission.ConcurrencyLimit)) |
||
433 | expandEngine := engines.NewExpandEngine(schemaReader, dataReader) |
||
434 | |||
435 | // Declare a variable `checker` of type `invoke.Check`. |
||
436 | var checker invoke.Check |
||
437 | |||
438 | checker = cache.NewCheckEngineWithCache( |
||
439 | checkEngine, |
||
440 | schemaReader, |
||
441 | engineKeyCache, |
||
442 | ) |
||
443 | |||
444 | // Create the checker either with load balancing or caching capabilities. |
||
445 | if cfg.Distributed.Enabled { |
||
446 | if cfg.Authn.Enabled && cfg.Authn.Method == "oidc" { |
||
447 | return errors.New("OIDC authentication method cannot be used in distributed mode. Please check your configuration") |
||
448 | } |
||
449 | |||
450 | checker, err = balancer.NewCheckEngineWithBalancer( |
||
451 | ctx, |
||
452 | checker, |
||
453 | schemaReader, |
||
454 | cfg.NameOverride, |
||
455 | &cfg.Distributed, |
||
456 | &cfg.Server.GRPC, |
||
457 | &cfg.Authn, |
||
458 | ) |
||
459 | // Handle potential error during checker creation. |
||
460 | if err != nil { |
||
461 | return err |
||
462 | } |
||
463 | } |
||
464 | |||
465 | // Create a localChecker which directly checks without considering distributed setup. |
||
466 | // This also includes caching capabilities. |
||
467 | localChecker := cache.NewCheckEngineWithCache( |
||
468 | checkEngine, |
||
469 | schemaReader, |
||
470 | engineKeyCache, |
||
471 | ) |
||
472 | |||
473 | // Initialize the lookupEngine, which is responsible for looking up certain entities or values. |
||
474 | lookupEngine := engines.NewLookupEngine( |
||
475 | checker, |
||
476 | schemaReader, |
||
477 | dataReader, |
||
478 | // Set concurrency limit based on the configuration. |
||
479 | engines.LookupConcurrencyLimit(cfg.Service.Permission.BulkLimit), |
||
480 | ) |
||
481 | |||
482 | // Initialize the subjectPermissionEngine, responsible for handling subject permissions. |
||
483 | subjectPermissionEngine := engines.NewSubjectPermission( |
||
484 | checker, |
||
485 | schemaReader, |
||
486 | // Set concurrency limit for the subject permission checks. |
||
487 | engines.SubjectPermissionConcurrencyLimit(cfg.Service.Permission.ConcurrencyLimit), |
||
488 | ) |
||
489 | |||
490 | // Create a new invoker that is used to directly call various functions or engines. |
||
491 | // It encompasses the schema, data, checker, and other engines. |
||
492 | invoker := invoke.NewDirectInvoker( |
||
493 | schemaReader, |
||
494 | dataReader, |
||
495 | checker, |
||
496 | expandEngine, |
||
497 | lookupEngine, |
||
498 | subjectPermissionEngine, |
||
499 | ) |
||
500 | |||
501 | // Associate the invoker with the checkEngine. |
||
502 | checkEngine.SetInvoker(invoker) |
||
503 | |||
504 | // Create a local invoker for local operations. |
||
505 | localInvoker := invoke.NewDirectInvoker( |
||
506 | schemaReader, |
||
507 | dataReader, |
||
508 | localChecker, |
||
509 | expandEngine, |
||
510 | lookupEngine, |
||
511 | subjectPermissionEngine, |
||
512 | ) |
||
513 | |||
514 | // Initialize the container which brings together multiple components such as the invoker, data readers/writers, and schema handlers. |
||
515 | container := servers.NewContainer( |
||
516 | invoker, |
||
517 | dataReader, |
||
518 | dataWriter, |
||
519 | bundleReader, |
||
520 | bundleWriter, |
||
521 | schemaReader, |
||
522 | schemaWriter, |
||
523 | tenantReader, |
||
524 | tenantWriter, |
||
525 | watcher, |
||
526 | ) |
||
527 | |||
528 | // Create an error group with the provided context |
||
529 | var g *errgroup.Group |
||
530 | g, ctx = errgroup.WithContext(ctx) |
||
531 | |||
532 | // Add the container.Run function to the error group |
||
533 | g.Go(func() error { |
||
534 | return container.Run( |
||
535 | ctx, |
||
536 | &cfg.Server, |
||
537 | logger, |
||
538 | &cfg.Distributed, |
||
539 | &cfg.Authn, |
||
540 | &cfg.Profiler, |
||
541 | localInvoker, |
||
542 | ) |
||
543 | }) |
||
544 | |||
545 | // Wait for the error group to finish and log any errors |
||
546 | if err = g.Wait(); err != nil { |
||
547 | slog.Error(err.Error()) |
||
548 | } |
||
549 | |||
550 | return nil |
||
551 | } |
||
569 |