diff --git a/cmd/config/configuration.go b/cmd/config/configuration.go index eb4bbc4..f60a1ae 100644 --- a/cmd/config/configuration.go +++ b/cmd/config/configuration.go @@ -44,7 +44,7 @@ const ( type ( // Configuration used to configure seelf commands. Configuration interface { - serve.Options // The configuration should provide every settings needed by the seelf server + serve.Options Initialize(log.ConfigurableLogger, string) error // Initialize the configuration by loading it (from config file, env vars, etc.) } diff --git a/cmd/serve/server.go b/cmd/serve/server.go index 1e8a312..d1b18f6 100644 --- a/cmd/serve/server.go +++ b/cmd/serve/server.go @@ -63,14 +63,14 @@ func newHttpServer(options ServerOptions, root startup.ServerRoot) *server { _ = s.router.SetTrustedProxies(nil) // Configure the session store - store := cookie.NewStore(s.options.Secret()) + store := cookie.NewStore(options.Secret()) store.Options(sessions.Options{ - Secure: s.options.IsSecure(), + Secure: options.IsSecure(), HttpOnly: true, SameSite: http.SameSiteStrictMode, }) - if s.options.IsDebug() { + if options.IsDebug() { s.router.Use(s.requestLogger) } diff --git a/cmd/startup/server.go b/cmd/startup/server.go index bbf7fe2..723ce15 100644 --- a/cmd/startup/server.go +++ b/cmd/startup/server.go @@ -47,7 +47,6 @@ type ( } serverRoot struct { - options ServerOptions bus bus.Bus logger log.Logger db *sqlite.Database @@ -61,13 +60,15 @@ type ( // needed by the server. func Server(options ServerOptions, logger log.Logger) (ServerRoot, error) { s := &serverRoot{ - options: options, - logger: logger, + logger: logger, } + // embedded.NewBus() + // embedded.NewScheduler() + s.bus = memory.NewBus() - db, err := sqlite.Open(s.options.ConnectionString(), s.logger, s.bus) + db, err := sqlite.Open(options.ConnectionString(), s.logger, s.bus) if err != nil { return nil, err @@ -81,13 +82,13 @@ func Server(options ServerOptions, logger log.Logger) (ServerRoot, error) { return nil, err } - s.scheduler = bus.NewScheduler(s.schedulerStore, s.logger, s.bus, s.options.RunnersPollInterval(), + s.scheduler = bus.NewScheduler(s.schedulerStore, s.logger, s.bus, options.RunnersPollInterval(), bus.WorkerGroup{ - Size: s.options.RunnersDeploymentCount(), + Size: options.RunnersDeploymentCount(), Messages: []string{deploy.Command{}.Name_()}, }, bus.WorkerGroup{ - Size: s.options.RunnersCleanupCount(), + Size: options.RunnersCleanupCount(), Messages: []string{ cleanup_app.Command{}.Name_(), delete_app.Command{}.Name_(), @@ -105,7 +106,7 @@ func Server(options ServerOptions, logger log.Logger) (ServerRoot, error) { // Setups deployment infrastructure if err = deploymentinfra.Setup( - s.options, + options, s.logger, s.db, s.bus, @@ -125,7 +126,7 @@ func Server(options ServerOptions, logger log.Logger) (ServerRoot, error) { } // Create the target needed to expose seelf itself and manage certificates if needed - if exposedUrl, isSet := s.options.AppExposedUrl().TryGet(); isSet { + if exposedUrl, isSet := options.AppExposedUrl().TryGet(); isSet { container := exposedUrl.User().Get("") s.logger.Infow("exposing seelf container using the local target, creating it if needed, the container may restart once done", diff --git a/internal/deployment/infra/artifact/local_artifact_manager.go b/internal/deployment/infra/artifact/local_artifact_manager.go index 5a2f150..a5711d3 100644 --- a/internal/deployment/infra/artifact/local_artifact_manager.go +++ b/internal/deployment/infra/artifact/local_artifact_manager.go @@ -121,15 +121,15 @@ func (a *localArtifactManager) appPath(appID domain.AppID) string { return filepath.Join(a.appsDirectory, string(appID)) } -func (a *localArtifactManager) deploymentPath(depl domain.Deployment) (string, error) { +func (a *localArtifactManager) deploymentPath(deployment domain.Deployment) (string, error) { var w strings.Builder if err := a.options.DeploymentDirTemplate().Execute(&w, deploymentTemplateData{ - Number: depl.ID().DeploymentNumber(), - Environment: depl.Config().Environment(), + Number: deployment.ID().DeploymentNumber(), + Environment: deployment.Config().Environment(), }); err != nil { return "", err } - return filepath.Join(a.appPath(depl.ID().AppID()), w.String()), nil + return filepath.Join(a.appPath(deployment.ID().AppID()), w.String()), nil } diff --git a/pkg/bus/dispatcher.go b/pkg/bus/dispatcher.go index ed1d876..4cf2d7f 100644 --- a/pkg/bus/dispatcher.go +++ b/pkg/bus/dispatcher.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/YuukanOO/seelf/pkg/storage" + "github.com/YuukanOO/seelf/pkg/types" ) var ErrNoHandlerRegistered = errors.New("no_handler_registered") @@ -30,7 +31,10 @@ type ( } ) -// Register an handler for a specific request on the provided bus. +// Register an handler for a specific request on the provided bus. You should always +// prefer this registration method. +// If the provided message is an async one, it will be automatically registered on +// the Marshallable mapper to make things easier. func Register[TResult any, TMsg TypedRequest[TResult]](bus Bus, handler RequestHandler[TResult, TMsg]) { var ( msg TMsg @@ -42,8 +46,12 @@ func Register[TResult any, TMsg TypedRequest[TResult]](bus Bus, handler RequestH bus.Register(msg, h) // If the message is schedulable, register the unmarshaller automatically. - if _, isSchedulable := any(msg).(Schedulable); isSchedulable { - Marshallable.Register(msg, func(s string) (Request, error) { return storage.UnmarshalJSON[TMsg](s) }) + // This is done here because of the known type TMsg but maybe I should try to + // move it to bus/memory in the future. + if types.Is[Schedulable](msg) { + Marshallable.Register(msg, func(s string) (Request, error) { + return storage.UnmarshalJSON[TMsg](s) + }) } } diff --git a/pkg/bus/memory/dispatcher.go b/pkg/bus/memory/dispatcher.go index a4beac7..64c5819 100644 --- a/pkg/bus/memory/dispatcher.go +++ b/pkg/bus/memory/dispatcher.go @@ -4,6 +4,7 @@ import ( "context" "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/types" ) type ( @@ -28,12 +29,12 @@ func (b *dispatcher) Register(msg bus.Message, handler bus.NextFunc) { name := msg.Name_() _, exists := b.handlers[name] - // Apply middlewares to avoid doing it at runtime + // Apply middlewares here to avoid doing it at runtime for i := len(b.middlewares) - 1; i >= 0; i-- { handler = b.middlewares[i](handler) } - if msg.Kind_() == bus.MessageKindNotification { + if types.Is[bus.Signal](msg) { if !exists { b.handlers[name] = []bus.NextFunc{handler} } else { @@ -61,15 +62,15 @@ func (b *dispatcher) Send(ctx context.Context, msg bus.Request) (any, error) { func (b *dispatcher) Notify(ctx context.Context, msgs ...bus.Signal) error { for _, msg := range msgs { - handlers := b.handlers[msg.Name_()] + value := b.handlers[msg.Name_()] - if handlers == nil { + if value == nil { continue } - hdls := handlers.([]bus.NextFunc) + handlers := value.([]bus.NextFunc) - for _, h := range hdls { + for _, h := range handlers { _, err := h(ctx, msg) if err != nil { diff --git a/pkg/bus/scheduler.go b/pkg/bus/scheduler.go index 954b135..f37d318 100644 --- a/pkg/bus/scheduler.go +++ b/pkg/bus/scheduler.go @@ -10,8 +10,6 @@ import ( "github.com/YuukanOO/seelf/pkg/storage" ) -var _ Scheduler = (*defaultScheduler)(nil) // Validate interface implementation - const ( JobPolicyRetryPreserveOrder JobPolicy = 1 << iota // Retry the job but preserve the order among the group JobPolicyWaitForOthersResourceID // Wait for other jobs on the same resource id to finish before processing diff --git a/pkg/storage/discriminated.go b/pkg/storage/discriminated.go index e782f1c..8c87197 100644 --- a/pkg/storage/discriminated.go +++ b/pkg/storage/discriminated.go @@ -29,6 +29,8 @@ func NewDiscriminatedMapper[T any]( } // Register a new concrete type available to the mapper. +// It will panic if a type is already registered with the same discriminator since it +// should never happen. func (m *DiscriminatedMapper[T]) Register(concreteType T, mapper DiscriminatedMapperFunc[T]) { discriminator := m.extractor(concreteType)