Passed
Push — master ( e29be5...bace01 )
by Tolga
01:31 queued 31s
created

cmd.serve   F

Complexity

Conditions 46

Size

Total Lines 363
Code Lines 223

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 46
eloc 223
nop 0
dl 0
loc 363
rs 0
c 0
b 0
f 0

How to fix   Long Method    Complexity   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

Complexity

Complex classes like cmd.serve often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
package cmd
2
3
import (
4
	"context"
5
	"errors"
6
	"fmt"
7
	"log/slog"
8
	"os"
9
	"os/signal"
10
	"strings"
11
	"syscall"
12
	"time"
13
14
	"github.com/sony/gobreaker"
15
	"github.com/spf13/cobra"
16
	"github.com/spf13/viper"
17
	"go.opentelemetry.io/otel/sdk/metric"
18
19
	"github.com/Permify/permify/internal/engines/balancer"
20
	"github.com/Permify/permify/internal/engines/cache"
21
	"github.com/Permify/permify/internal/invoke"
22
	cacheDecorator "github.com/Permify/permify/internal/storage/decorators/cache"
23
	cbDecorator "github.com/Permify/permify/internal/storage/decorators/circuitBreaker"
24
	sfDecorator "github.com/Permify/permify/internal/storage/decorators/singleflight"
25
	"github.com/Permify/permify/internal/storage/postgres/gc"
26
	"github.com/Permify/permify/pkg/cmd/flags"
27
	PQDatabase "github.com/Permify/permify/pkg/database/postgres"
28
29
	"go.opentelemetry.io/otel/sdk/trace"
30
	"golang.org/x/sync/errgroup"
31
32
	"github.com/Permify/permify/internal"
33
	"github.com/Permify/permify/internal/config"
34
	"github.com/Permify/permify/internal/engines"
35
	"github.com/Permify/permify/internal/factories"
36
	"github.com/Permify/permify/internal/servers"
37
	"github.com/Permify/permify/internal/storage"
38
	pkgcache "github.com/Permify/permify/pkg/cache"
39
	"github.com/Permify/permify/pkg/cache/ristretto"
40
	"github.com/Permify/permify/pkg/telemetry"
41
	"github.com/Permify/permify/pkg/telemetry/meterexporters"
42
	"github.com/Permify/permify/pkg/telemetry/tracerexporters"
43
)
44
45
// NewServeCommand returns a new Cobra command that can be used to run the "permify serve" command.
46
// The command takes no arguments and runs the serve() function to start the Permify service.
47
// The command has a short description of what it does.
48
func NewServeCommand() *cobra.Command {
49
	command := &cobra.Command{
50
		Use:   "serve",
51
		Short: "serve the Permify server",
52
		RunE:  serve(),
53
		Args:  cobra.NoArgs,
54
	}
55
56
	conf := config.DefaultConfig()
57
	f := command.Flags()
58
	f.StringP("config", "c", "", "config file (default is $HOME/.permify.yaml)")
59
	f.Bool("http-enabled", conf.Server.HTTP.Enabled, "switch option for HTTP server")
60
	f.String("account-id", conf.AccountID, "account id")
61
	f.Int64("server-rate-limit", conf.Server.RateLimit, "the maximum number of requests the server should handle per second")
62
	f.String("grpc-port", conf.Server.GRPC.Port, "port that GRPC server run on")
63
	f.Bool("grpc-tls-enabled", conf.Server.GRPC.TLSConfig.Enabled, "switch option for GRPC tls server")
64
	f.String("grpc-tls-key-path", conf.Server.GRPC.TLSConfig.KeyPath, "GRPC tls key path")
65
	f.String("grpc-tls-cert-path", conf.Server.GRPC.TLSConfig.CertPath, "GRPC tls certificate path")
66
	f.String("http-port", conf.Server.HTTP.Port, "HTTP port address")
67
	f.Bool("http-tls-enabled", conf.Server.HTTP.TLSConfig.Enabled, "switch option for HTTP tls server")
68
	f.String("http-tls-key-path", conf.Server.HTTP.TLSConfig.KeyPath, "HTTP tls key path")
69
	f.String("http-tls-cert-path", conf.Server.HTTP.TLSConfig.CertPath, "HTTP tls certificate path")
70
	f.StringSlice("http-cors-allowed-origins", conf.Server.HTTP.CORSAllowedOrigins, "CORS allowed origins for http gateway")
71
	f.StringSlice("http-cors-allowed-headers", conf.Server.HTTP.CORSAllowedHeaders, "CORS allowed headers for http gateway")
72
	f.Bool("profiler-enabled", conf.Profiler.Enabled, "switch option for profiler")
73
	f.String("profiler-port", conf.Profiler.Port, "profiler port address")
74
	f.String("log-level", conf.Log.Level, "set log verbosity ('info', 'debug', 'error', 'warning')")
75
	f.String("log-output", conf.Log.Output, "logger output valid values json, text")
76
	f.Bool("authn-enabled", conf.Authn.Enabled, "enable server authentication")
77
	f.String("authn-method", conf.Authn.Method, "server authentication method")
78
	f.StringSlice("authn-preshared-keys", conf.Authn.Preshared.Keys, "preshared key/keys for server authentication")
79
	f.String("authn-oidc-issuer", conf.Authn.Oidc.Issuer, "issuer identifier of the OpenID Connect Provider")
80
	f.String("authn-oidc-audience", conf.Authn.Oidc.Audience, "intended audience of the OpenID Connect token")
81
	f.Duration("authn-oidc-refresh-interval", conf.Authn.Oidc.RefreshInterval, "refresh interval for the OpenID Connect configuration")
82
	f.StringSlice("authn-oidc-valid-methods", conf.Authn.Oidc.ValidMethods, "list of valid JWT signing methods for OpenID Connect")
83
	f.Bool("tracer-enabled", conf.Tracer.Enabled, "switch option for tracing")
84
	f.String("tracer-exporter", conf.Tracer.Exporter, "can be; jaeger, signoz, zipkin or otlp. (integrated tracing tools)")
85
	f.String("tracer-endpoint", conf.Tracer.Endpoint, "export uri for tracing data")
86
	f.Bool("tracer-insecure", conf.Tracer.Insecure, "use https or http for tracer data, only used for otlp exporter or signoz")
87
	f.String("tracer-urlpath", conf.Tracer.URLPath, "allow to set url path for otlp exporter")
88
	f.StringSlice("tracer-headers", conf.Tracer.Headers, "allows setting custom headers for the tracer exporter in key-value pairs")
89
	f.Bool("meter-enabled", conf.Meter.Enabled, "switch option for metric")
90
	f.String("meter-exporter", conf.Meter.Exporter, "can be; otlp. (integrated metric tools)")
91
	f.String("meter-endpoint", conf.Meter.Endpoint, "export uri for metric data")
92
	f.Bool("meter-insecure", conf.Meter.Insecure, "use https or http for metric data")
93
	f.String("meter-urlpath", conf.Meter.URLPath, "allow to set url path for otlp exporter")
94
	f.StringSlice("meter-headers", conf.Meter.Headers, "allows setting custom headers for the metric exporter in key-value pairs")
95
	f.Bool("service-circuit-breaker", conf.Service.CircuitBreaker, "switch option for service circuit breaker")
96
	f.Bool("service-watch-enabled", conf.Service.Watch.Enabled, "switch option for watch service")
97
	f.Int64("service-schema-cache-number-of-counters", conf.Service.Schema.Cache.NumberOfCounters, "schema service cache number of counters")
98
	f.String("service-schema-cache-max-cost", conf.Service.Schema.Cache.MaxCost, "schema service cache max cost")
99
	f.Int("service-permission-bulk-limit", conf.Service.Permission.BulkLimit, "bulk operations limit")
100
	f.Int("service-permission-concurrency-limit", conf.Service.Permission.ConcurrencyLimit, "concurrency limit")
101
	f.Int64("service-permission-cache-number-of-counters", conf.Service.Permission.Cache.NumberOfCounters, "permission service cache number of counters")
102
	f.String("service-permission-cache-max-cost", conf.Service.Permission.Cache.MaxCost, "permission service cache max cost")
103
	f.String("database-engine", conf.Database.Engine, "data source. e.g. postgres, memory")
104
	f.String("database-uri", conf.Database.URI, "uri of your data source to store relation tuples and schema")
105
	f.Bool("database-auto-migrate", conf.Database.AutoMigrate, "auto migrate database tables")
106
	f.Int("database-max-open-connections", conf.Database.MaxOpenConnections, "maximum number of parallel connections that can be made to the database at any time")
107
	f.Int("database-max-idle-connections", conf.Database.MaxIdleConnections, "maximum number of idle connections that can be made to the database at any time")
108
	f.Duration("database-max-connection-lifetime", conf.Database.MaxConnectionLifetime, "maximum amount of time a connection may be reused")
109
	f.Duration("database-max-connection-idle-time", conf.Database.MaxConnectionIdleTime, "maximum amount of time a connection may be idle")
110
	f.Int("database-max-data-per-write", conf.Database.MaxDataPerWrite, "sets the maximum amount of data per write operation to the database")
111
	f.Int("database-max-retries", conf.Database.MaxRetries, "defines the maximum number of retries for database operations in case of failure")
112
	f.Int("database-watch-buffer-size", conf.Database.WatchBufferSize, "specifies the buffer size for database watch operations, impacting how many changes can be queued")
113
	f.Bool("database-garbage-collection-enabled", conf.Database.GarbageCollection.Enabled, "use database garbage collection for expired relationships and attributes")
114
	f.Duration("database-garbage-collection-interval", conf.Database.GarbageCollection.Interval, "interval for database garbage collection")
115
	f.Duration("database-garbage-collection-timeout", conf.Database.GarbageCollection.Timeout, "timeout for database garbage collection")
116
	f.Duration("database-garbage-collection-window", conf.Database.GarbageCollection.Window, "window for database garbage collection")
117
	f.Bool("distributed-enabled", conf.Distributed.Enabled, "enable distributed")
118
	f.String("distributed-address", conf.Distributed.Address, "distributed address")
119
	f.String("distributed-port", conf.Distributed.Port, "distributed port")
120
121
	// SilenceUsage is set to true to suppress usage when an error occurs
122
	command.SilenceUsage = true
123
124
	command.PreRun = func(cmd *cobra.Command, args []string) {
125
		flags.RegisterServeFlags(f)
126
	}
127
128
	return command
129
}
130
131
// serve is the main function for the "permify serve" command. It starts the Permify service by configuring and starting the necessary components.
132
// It initializes the configuration, logger, database, tracing and metering components, and creates instances of the necessary engines, services, and decorators.
133
// It then creates a ServiceContainer and runs it with the given configuration.
134
// The function uses errgroup to manage the goroutines and gracefully shuts down the service upon receiving a termination signal.
135
// It returns an error if there is an issue with any of the components or if any goroutine fails.
136
func serve() func(cmd *cobra.Command, args []string) error {
137
	return func(cmd *cobra.Command, args []string) error {
138
		var cfg *config.Config
139
		var err error
140
		cfgFile := viper.GetString("config.file")
141
		if cfgFile != "" {
142
			cfg, err = config.NewConfigWithFile(cfgFile)
143
			if err != nil {
144
				return fmt.Errorf("failed to create new config: %w", err)
0 ignored issues
show
introduced by
unrecognized printf verb 'w'
Loading history...
145
			}
146
147
			if err = viper.Unmarshal(cfg); err != nil {
148
				return fmt.Errorf("failed to unmarshal config: %w", err)
0 ignored issues
show
introduced by
unrecognized printf verb 'w'
Loading history...
149
			}
150
		} else {
151
			// Load configuration
152
			cfg, err = config.NewConfig()
153
			if err != nil {
154
				return fmt.Errorf("failed to create new config: %w", err)
0 ignored issues
show
introduced by
unrecognized printf verb 'w'
Loading history...
155
			}
156
157
			if err = viper.Unmarshal(cfg); err != nil {
158
				return fmt.Errorf("failed to unmarshal config: %w", err)
0 ignored issues
show
introduced by
unrecognized printf verb 'w'
Loading history...
159
			}
160
		}
161
162
		// Print banner and initialize logger
163
		internal.PrintBanner()
164
165
		var handler slog.Handler
166
		switch cfg.Log.Output {
167
		case "json":
168
			handler = slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
169
				Level: getLogLevel(cfg.Log.Level),
170
			})
171
		case "text":
172
			handler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
173
				Level: getLogLevel(cfg.Log.Level),
174
			})
175
		default:
176
			handler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
177
				Level: getLogLevel(cfg.Log.Level),
178
			})
179
		}
180
181
		logger := slog.New(handler)
182
183
		slog.SetDefault(logger)
184
185
		slog.Info("🚀 starting permify service...")
186
187
		internal.Identifier = cfg.AccountID
188
		if internal.Identifier == "" {
189
			message := "Account ID is not set. Please fill in the Account ID for better support. Get your Account ID from https://permify.co/account"
190
			slog.Error(message)
191
192
			ticker := time.NewTicker(24 * time.Hour)
193
			defer ticker.Stop()
194
195
			go func() {
196
				for range ticker.C {
197
					slog.Error(message)
198
				}
199
			}()
200
		}
201
202
		// Set up context and signal handling
203
		ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
204
		defer stop()
205
206
		// Run database migration if enabled
207
		if cfg.Database.AutoMigrate {
208
			err = storage.Migrate(cfg.Database)
209
			if err != nil {
210
				slog.Error("failed to migrate database", slog.Any("error", err))
211
			}
212
		}
213
214
		// Initialize database
215
		db, err := factories.DatabaseFactory(cfg.Database)
216
		if err != nil {
217
			slog.Error("failed to initialize database", slog.Any("error", err))
218
			return err
219
		}
220
		defer func() {
221
			if err = db.Close(); err != nil {
222
				slog.Error("failed to close database", slog.Any("error", err))
223
			}
224
		}()
225
226
		// Tracing
227
		if cfg.Tracer.Enabled {
228
			headers := map[string]string{}
229
			for _, header := range cfg.Tracer.Headers {
230
				h := strings.Split(header, ":")
231
				if len(h) != 2 {
232
					return errors.New("invalid header format; expected 'key:value'")
233
				}
234
				headers[h[0]] = h[1]
235
			}
236
237
			var exporter trace.SpanExporter
238
			exporter, err = tracerexporters.ExporterFactory(
239
				cfg.Tracer.Exporter,
240
				cfg.Tracer.Endpoint,
241
				cfg.Tracer.Insecure,
242
				cfg.Tracer.URLPath,
243
				headers,
244
			)
245
			if err != nil {
246
				slog.Error(err.Error())
247
			}
248
249
			shutdown := telemetry.NewTracer(exporter)
250
251
			defer func() {
252
				if err = shutdown(context.Background()); err != nil {
253
					slog.Error(err.Error())
254
				}
255
			}()
256
		}
257
258
		// Garbage collection
259
		if cfg.Database.GarbageCollection.Timeout > 0 && cfg.Database.GarbageCollection.Enabled && cfg.Database.Engine != "memory" {
260
			slog.Info("🗑️ starting database garbage collection...")
261
262
			garbageCollector := gc.NewGC(
263
				db.(*PQDatabase.Postgres),
264
				gc.Interval(cfg.Database.GarbageCollection.Interval),
265
				gc.Window(cfg.Database.GarbageCollection.Window),
266
				gc.Timeout(cfg.Database.GarbageCollection.Timeout),
267
			)
268
269
			go func() {
270
				err = garbageCollector.Start(ctx)
271
				if err != nil {
272
					slog.Error(err.Error())
273
				}
274
			}()
275
		}
276
277
		// Meter
278
		meter := telemetry.NewNoopMeter()
279
		if cfg.Meter.Enabled {
280
			headers := map[string]string{}
281
			for _, header := range cfg.Meter.Headers {
282
				h := strings.Split(header, ":")
283
				if len(h) != 2 {
284
					return errors.New("invalid header format; expected 'key:value'")
285
				}
286
				headers[h[0]] = h[1]
287
			}
288
289
			var exporter metric.Exporter
290
			exporter, err = meterexporters.ExporterFactory(
291
				cfg.Meter.Exporter,
292
				cfg.Meter.Endpoint,
293
				cfg.Meter.Insecure,
294
				cfg.Meter.URLPath,
295
				headers,
296
			)
297
			if err != nil {
298
				slog.Error(err.Error())
299
			}
300
301
			meter, err = telemetry.NewMeter(exporter)
302
			if err != nil {
303
				slog.Error(err.Error())
304
			}
305
		}
306
307
		// schema cache
308
		var schemaCache pkgcache.Cache
309
		schemaCache, err = ristretto.New(ristretto.NumberOfCounters(cfg.Service.Schema.Cache.NumberOfCounters), ristretto.MaxCost(cfg.Service.Schema.Cache.MaxCost))
310
		if err != nil {
311
			slog.Error(err.Error())
312
			return err
313
		}
314
315
		// engines cache cache
316
		var engineKeyCache pkgcache.Cache
317
		engineKeyCache, err = ristretto.New(ristretto.NumberOfCounters(cfg.Service.Permission.Cache.NumberOfCounters), ristretto.MaxCost(cfg.Service.Permission.Cache.MaxCost))
318
		if err != nil {
319
			slog.Error(err.Error())
320
			return err
321
		}
322
323
		watcher := storage.NewNoopWatcher()
324
		if cfg.Service.Watch.Enabled {
325
			watcher = factories.WatcherFactory(db)
326
		}
327
328
		// Initialize the storage with factory methods
329
		dataReader := factories.DataReaderFactory(db)
330
		dataWriter := factories.DataWriterFactory(db)
331
		bundleReader := factories.BundleReaderFactory(db)
332
		bundleWriter := factories.BundleWriterFactory(db)
333
		schemaReader := factories.SchemaReaderFactory(db)
334
		schemaWriter := factories.SchemaWriterFactory(db)
335
		tenantReader := factories.TenantReaderFactory(db)
336
		tenantWriter := factories.TenantWriterFactory(db)
337
338
		// Add caching to the schema reader using a decorator
339
		schemaReader = cacheDecorator.NewSchemaReader(schemaReader, schemaCache)
340
341
		dataReader = sfDecorator.NewDataReader(dataReader)
342
		schemaReader = sfDecorator.NewSchemaReader(schemaReader)
343
344
		// Check if circuit breaker should be enabled for services
345
		if cfg.Service.CircuitBreaker {
346
			var cb *gobreaker.CircuitBreaker
347
			var st gobreaker.Settings
348
			st.Name = "storage"
349
			st.ReadyToTrip = func(counts gobreaker.Counts) bool {
350
				failureRatio := float64(counts.TotalFailures) / float64(counts.Requests)
351
				return counts.Requests >= 10 && failureRatio >= 0.6
352
			}
353
354
			cb = gobreaker.NewCircuitBreaker(st)
355
356
			// Add circuit breaker to the relationship reader using decorator
357
			dataReader = cbDecorator.NewDataReader(dataReader, cb)
358
359
			// Add circuit breaker to the bundle reader using decorators
360
			bundleReader = cbDecorator.NewBundleReader(bundleReader, cb)
361
362
			// Add circuit breaker to the schema reader using decorator
363
			schemaReader = cbDecorator.NewSchemaReader(schemaReader, cb)
364
365
			// Add circuit breaker to the tenant reader using decorator
366
			tenantReader = cbDecorator.NewTenantReader(tenantReader, cb)
367
		}
368
369
		// Initialize the engines using the key manager, schema reader, and relationship reader
370
		checkEngine := engines.NewCheckEngine(schemaReader, dataReader, engines.CheckConcurrencyLimit(cfg.Service.Permission.ConcurrencyLimit))
371
		expandEngine := engines.NewExpandEngine(schemaReader, dataReader)
372
373
		// Declare a variable `checker` of type `invoke.Check`.
374
		var checker invoke.Check
375
376
		// Create the checker either with load balancing or caching capabilities.
377
		if cfg.Distributed.Enabled {
378
379
			if cfg.Authn.Enabled && cfg.Authn.Method == "oidc" {
380
				return errors.New("OIDC authentication method cannot be used in distributed mode. Please check your configuration")
381
			}
382
383
			checker, err = balancer.NewCheckEngineWithBalancer(
384
				context.Background(),
385
				checkEngine,
386
				schemaReader,
387
				&cfg.Distributed,
388
				&cfg.Server.GRPC,
389
				&cfg.Authn,
390
			)
391
			// Handle potential error during checker creation.
392
			if err != nil {
393
				return err
394
			}
395
			checker = cache.NewCheckEngineWithCache(
396
				checker,
397
				schemaReader,
398
				engineKeyCache,
399
				meter,
400
			)
401
		} else {
402
			checker = cache.NewCheckEngineWithCache(
403
				checkEngine,
404
				schemaReader,
405
				engineKeyCache,
406
				meter,
407
			)
408
		}
409
410
		// Create a localChecker which directly checks without considering distributed setup.
411
		// This also includes caching capabilities.
412
		localChecker := cache.NewCheckEngineWithCache(
413
			checkEngine,
414
			schemaReader,
415
			engineKeyCache,
416
			meter,
417
		)
418
419
		// Initialize the lookupEngine, which is responsible for looking up certain entities or values.
420
		lookupEngine := engines.NewLookupEngine(
421
			checker,
422
			schemaReader,
423
			dataReader,
424
			// Set concurrency limit based on the configuration.
425
			engines.LookupConcurrencyLimit(cfg.Service.Permission.BulkLimit),
426
		)
427
428
		// Initialize the subjectPermissionEngine, responsible for handling subject permissions.
429
		subjectPermissionEngine := engines.NewSubjectPermission(
430
			checker,
431
			schemaReader,
432
			// Set concurrency limit for the subject permission checks.
433
			engines.SubjectPermissionConcurrencyLimit(cfg.Service.Permission.ConcurrencyLimit),
434
		)
435
436
		// Create a new invoker that is used to directly call various functions or engines.
437
		// It encompasses the schema, data, checker, and other engines.
438
		invoker := invoke.NewDirectInvoker(
439
			schemaReader,
440
			dataReader,
441
			checker,
442
			expandEngine,
443
			lookupEngine,
444
			subjectPermissionEngine,
445
			meter,
446
		)
447
448
		// Associate the invoker with the checkEngine.
449
		checkEngine.SetInvoker(invoker)
450
451
		// Create a local invoker for local operations.
452
		localInvoker := invoke.NewDirectInvoker(
453
			schemaReader,
454
			dataReader,
455
			localChecker,
456
			expandEngine,
457
			lookupEngine,
458
			subjectPermissionEngine,
459
			meter,
460
		)
461
462
		// Initialize the container which brings together multiple components such as the invoker, data readers/writers, and schema handlers.
463
		container := servers.NewContainer(
464
			invoker,
465
			dataReader,
466
			dataWriter,
467
			bundleReader,
468
			bundleWriter,
469
			schemaReader,
470
			schemaWriter,
471
			tenantReader,
472
			tenantWriter,
473
			watcher,
474
		)
475
476
		// Create an error group with the provided context
477
		var g *errgroup.Group
478
		g, ctx = errgroup.WithContext(ctx)
479
480
		// Add the container.Run function to the error group
481
		g.Go(func() error {
482
			return container.Run(
483
				ctx,
484
				&cfg.Server,
485
				logger,
486
				&cfg.Distributed,
487
				&cfg.Authn,
488
				&cfg.Profiler,
489
				localInvoker,
490
			)
491
		})
492
493
		// Wait for the error group to finish and log any errors
494
		if err = g.Wait(); err != nil {
495
			slog.Error(err.Error())
496
		}
497
498
		return nil
499
	}
500
}
501
502
// getLogLevel converts a string representation of log level to its corresponding slog.Level value.
503
func getLogLevel(level string) slog.Level {
504
	switch level {
505
	case "info":
506
		return slog.LevelInfo // Return Info level
507
	case "warn":
508
		return slog.LevelWarn // Return Warning level
509
	case "error":
510
		return slog.LevelError // Return Error level
511
	case "debug":
512
		return slog.LevelDebug // Return Debug level
513
	default:
514
		return slog.LevelInfo // Default to Info level if unrecognized
515
	}
516
}
517