diff --git a/cmd/serve/jobs.go b/cmd/serve/jobs.go index 45ce879d..61aa4dfd 100644 --- a/cmd/serve/jobs.go +++ b/cmd/serve/jobs.go @@ -2,6 +2,7 @@ package serve import ( "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/bus/embedded/get_jobs" "github.com/YuukanOO/seelf/pkg/http" "github.com/gin-gonic/gin" ) @@ -12,13 +13,13 @@ type listJobsFilters struct { func (s *server) listJobsHandler() gin.HandlerFunc { return http.Bind(s, func(ctx *gin.Context, request listJobsFilters) error { - var filters bus.GetJobsFilters + var filters get_jobs.Query if request.Page != 0 { filters.Page.Set(request.Page) } - jobs, err := s.scheduledJobsStore.GetAllJobs(ctx.Request.Context(), filters) + jobs, err := bus.Send(s.bus, ctx.Request.Context(), filters) if err != nil { return err @@ -30,11 +31,11 @@ func (s *server) listJobsHandler() gin.HandlerFunc { func (s *server) deleteJobsHandler() gin.HandlerFunc { return http.Send(s, func(ctx *gin.Context) error { - err := s.scheduledJobsStore.Delete(ctx.Request.Context(), ctx.Param("id")) + // err := s.scheduledJobsStore.Delete(ctx.Request.Context(), ctx.Param("id")) - if err != nil { - return err - } + // if err != nil { + // return err + // } return http.NoContent(ctx) }) diff --git a/cmd/serve/middlewares.go b/cmd/serve/middlewares.go index 3e0b4a48..50625909 100644 --- a/cmd/serve/middlewares.go +++ b/cmd/serve/middlewares.go @@ -7,7 +7,9 @@ import ( "strings" "time" + "github.com/YuukanOO/seelf/internal/auth/app/api_login" "github.com/YuukanOO/seelf/internal/auth/domain" + "github.com/YuukanOO/seelf/pkg/bus" httputils "github.com/YuukanOO/seelf/pkg/http" "github.com/gin-contrib/sessions" "github.com/gin-gonic/gin" @@ -25,8 +27,8 @@ var errUnauthorized = errors.New("unauthorized") func (s *server) authenticate(withApiAccess bool) gin.HandlerFunc { return func(ctx *gin.Context) { // First, try to find a user id in the encrypted session cookie - sess := sessions.Default(ctx) - uid, ok := sess.Get(userSessionKey).(string) + userSession := sessions.Default(ctx) + uid, ok := userSession.Get(userSessionKey).(string) failed := !ok || uid == "" // If it failed and api access is not allowed, return early @@ -50,15 +52,17 @@ func (s *server) authenticate(withApiAccess bool) gin.HandlerFunc { return } - id, err := s.usersReader.GetIDFromAPIKey(ctx.Request.Context(), domain.APIKey(authHeader[apiAuthPrefixLength:])) + id, err := bus.Send(s.bus, ctx.Request.Context(), api_login.Query{ + Key: authHeader[apiAuthPrefixLength:], + }) if err != nil { _ = ctx.AbortWithError(http.StatusUnauthorized, errUnauthorized) return } - // Attach the user id to the context passed down in every usecases. - ctx.Request = ctx.Request.WithContext(domain.WithUserID(ctx.Request.Context(), id)) + // Attach the user id to the context passed down in every use cases. + ctx.Request = ctx.Request.WithContext(domain.WithUserID(ctx.Request.Context(), domain.UserID(id))) ctx.Next() } diff --git a/cmd/serve/server.go b/cmd/serve/server.go index d1b18f63..2b0fbf4a 100644 --- a/cmd/serve/server.go +++ b/cmd/serve/server.go @@ -13,7 +13,6 @@ import ( "time" "github.com/YuukanOO/seelf/cmd/startup" - "github.com/YuukanOO/seelf/internal/auth/domain" "github.com/YuukanOO/seelf/pkg/bus" "github.com/YuukanOO/seelf/pkg/log" "github.com/gin-contrib/sessions" @@ -39,12 +38,10 @@ type ( } server struct { - options ServerOptions - router *gin.Engine - bus bus.Dispatcher - logger log.Logger - usersReader domain.UsersReader - scheduledJobsStore bus.ScheduledJobsStore + options ServerOptions + router *gin.Engine + bus bus.Dispatcher + logger log.Logger } ) @@ -52,12 +49,10 @@ func newHttpServer(options ServerOptions, root startup.ServerRoot) *server { gin.SetMode(gin.ReleaseMode) s := &server{ - options: options, - router: gin.New(), - usersReader: root.UsersReader(), - scheduledJobsStore: root.ScheduledJobsStore(), - bus: root.Bus(), - logger: root.Logger(), + options: options, + router: gin.New(), + bus: root.Bus(), + logger: root.Logger(), } _ = s.router.SetTrustedProxies(nil) diff --git a/cmd/startup/server.go b/cmd/startup/server.go index 723ce15b..8054f252 100644 --- a/cmd/startup/server.go +++ b/cmd/startup/server.go @@ -10,14 +10,12 @@ import ( "github.com/YuukanOO/seelf/internal/deployment/app/cleanup_app" "github.com/YuukanOO/seelf/internal/deployment/app/cleanup_target" "github.com/YuukanOO/seelf/internal/deployment/app/configure_target" - "github.com/YuukanOO/seelf/internal/deployment/app/delete_app" - "github.com/YuukanOO/seelf/internal/deployment/app/delete_target" "github.com/YuukanOO/seelf/internal/deployment/app/deploy" "github.com/YuukanOO/seelf/internal/deployment/app/expose_seelf_container" deploymentdomain "github.com/YuukanOO/seelf/internal/deployment/domain" deploymentinfra "github.com/YuukanOO/seelf/internal/deployment/infra" "github.com/YuukanOO/seelf/pkg/bus" - "github.com/YuukanOO/seelf/pkg/bus/memory" + "github.com/YuukanOO/seelf/pkg/bus/embedded" bussqlite "github.com/YuukanOO/seelf/pkg/bus/sqlite" "github.com/YuukanOO/seelf/pkg/log" "github.com/YuukanOO/seelf/pkg/monad" @@ -30,8 +28,6 @@ type ( Cleanup() error Bus() bus.Dispatcher Logger() log.Logger - UsersReader() domain.UsersReader - ScheduledJobsStore() bus.ScheduledJobsStore } ServerOptions interface { @@ -47,12 +43,10 @@ type ( } serverRoot struct { - bus bus.Bus - logger log.Logger - db *sqlite.Database - usersReader domain.UsersReader - schedulerStore bus.ScheduledJobsStore - scheduler bus.RunnableScheduler + bus bus.Bus + logger log.Logger + db *sqlite.Database + scheduler *embedded.Runner } ) @@ -63,10 +57,7 @@ func Server(options ServerOptions, logger log.Logger) (ServerRoot, error) { logger: logger, } - // embedded.NewBus() - // embedded.NewScheduler() - - s.bus = memory.NewBus() + s.bus = embedded.NewBus() db, err := sqlite.Open(options.ConnectionString(), s.logger, s.bus) @@ -76,31 +67,29 @@ func Server(options ServerOptions, logger log.Logger) (ServerRoot, error) { s.db = db - s.schedulerStore = bussqlite.NewScheduledJobsStore(s.db) + jobsStore, err := bussqlite.Setup(s.bus, s.db) - if err = s.schedulerStore.Setup(); err != nil { + if err != nil { return nil, err } - s.scheduler = bus.NewScheduler(s.schedulerStore, s.logger, s.bus, options.RunnersPollInterval(), - bus.WorkerGroup{ + s.scheduler = embedded.NewRunner(jobsStore, s.logger, s.bus, options.RunnersPollInterval(), + embedded.WorkerGroup{ Size: options.RunnersDeploymentCount(), - Messages: []string{deploy.Command{}.Name_()}, + Requests: []bus.AsyncRequest{deploy.Command{}}, }, - bus.WorkerGroup{ + embedded.WorkerGroup{ Size: options.RunnersCleanupCount(), - Messages: []string{ - cleanup_app.Command{}.Name_(), - delete_app.Command{}.Name_(), - configure_target.Command{}.Name_(), - cleanup_target.Command{}.Name_(), - delete_target.Command{}.Name_(), + Requests: []bus.AsyncRequest{ + cleanup_app.Command{}, + cleanup_target.Command{}, + configure_target.Command{}, }, }, ) // Setup auth infrastructure - if s.usersReader, err = authinfra.Setup(s.logger, s.db, s.bus); err != nil { + if err = authinfra.Setup(s.logger, s.db, s.bus); err != nil { return nil, err } @@ -110,7 +99,7 @@ func Server(options ServerOptions, logger log.Logger) (ServerRoot, error) { s.logger, s.db, s.bus, - s.scheduler, + jobsStore, ); err != nil { return nil, err } @@ -153,7 +142,5 @@ func (s *serverRoot) Cleanup() error { return s.db.Close() } -func (s *serverRoot) Bus() bus.Dispatcher { return s.bus } -func (s *serverRoot) Logger() log.Logger { return s.logger } -func (s *serverRoot) UsersReader() domain.UsersReader { return s.usersReader } -func (s *serverRoot) ScheduledJobsStore() bus.ScheduledJobsStore { return s.schedulerStore } +func (s *serverRoot) Bus() bus.Dispatcher { return s.bus } +func (s *serverRoot) Logger() log.Logger { return s.logger } diff --git a/internal/auth/app/api_login/api_login.go b/internal/auth/app/api_login/api_login.go new file mode 100644 index 00000000..dea99346 --- /dev/null +++ b/internal/auth/app/api_login/api_login.go @@ -0,0 +1,13 @@ +package api_login + +import "github.com/YuukanOO/seelf/pkg/bus" + +type Query struct { + bus.Query[string] + + Key string +} + +func (Query) Name_() string { return "auth.command.api_login" } + +// Implemented directly by the gateway for now diff --git a/internal/auth/domain/user.go b/internal/auth/domain/user.go index f1c7dd7b..7ede4378 100644 --- a/internal/auth/domain/user.go +++ b/internal/auth/domain/user.go @@ -42,7 +42,6 @@ type ( UsersReader interface { GetAdminUser(context.Context) (User, error) - GetIDFromAPIKey(context.Context, APIKey) (UserID, error) CheckEmailAvailability(context.Context, Email, ...UserID) (EmailRequirement, error) GetByEmail(context.Context, Email) (User, error) GetByID(context.Context, UserID) (User, error) diff --git a/internal/auth/infra/mod.go b/internal/auth/infra/mod.go index 64ef3186..4ce062bb 100644 --- a/internal/auth/infra/mod.go +++ b/internal/auth/infra/mod.go @@ -9,7 +9,6 @@ import ( "github.com/YuukanOO/seelf/internal/auth/app/login" "github.com/YuukanOO/seelf/internal/auth/app/refresh_api_key" "github.com/YuukanOO/seelf/internal/auth/app/update_user" - "github.com/YuukanOO/seelf/internal/auth/domain" "github.com/YuukanOO/seelf/internal/auth/infra/crypto" authsqlite "github.com/YuukanOO/seelf/internal/auth/infra/sqlite" ) @@ -19,9 +18,9 @@ func Setup( logger log.Logger, db *sqlite.Database, b bus.Bus, -) (domain.UsersReader, error) { +) error { usersStore := authsqlite.NewUsersStore(db) - authQueryHandler := authsqlite.NewGateway(db) + gateway := authsqlite.NewGateway(db) passwordHasher := crypto.NewBCryptHasher() keyGenerator := crypto.NewKeyGenerator() @@ -30,7 +29,8 @@ func Setup( bus.Register(b, create_first_account.Handler(usersStore, usersStore, passwordHasher, keyGenerator)) bus.Register(b, update_user.Handler(usersStore, usersStore, passwordHasher)) bus.Register(b, refresh_api_key.Handler(usersStore, usersStore, keyGenerator)) - bus.Register(b, authQueryHandler.GetProfile) + bus.Register(b, gateway.GetIDFromAPIKey) + bus.Register(b, gateway.GetProfile) - return usersStore, db.Migrate(authsqlite.Migrations) + return db.Migrate(authsqlite.Migrations) } diff --git a/internal/auth/infra/sqlite/gateway.go b/internal/auth/infra/sqlite/gateway.go index 180f26b4..ca70c42f 100644 --- a/internal/auth/infra/sqlite/gateway.go +++ b/internal/auth/infra/sqlite/gateway.go @@ -3,21 +3,22 @@ package sqlite import ( "context" + "github.com/YuukanOO/seelf/internal/auth/app/api_login" "github.com/YuukanOO/seelf/internal/auth/app/get_profile" "github.com/YuukanOO/seelf/pkg/storage" "github.com/YuukanOO/seelf/pkg/storage/sqlite" "github.com/YuukanOO/seelf/pkg/storage/sqlite/builder" ) -type gateway struct { +type Gateway struct { db *sqlite.Database } -func NewGateway(db *sqlite.Database) *gateway { - return &gateway{db} +func NewGateway(db *sqlite.Database) *Gateway { + return &Gateway{db} } -func (s *gateway) GetProfile(ctx context.Context, q get_profile.Query) (get_profile.Profile, error) { +func (s *Gateway) GetProfile(ctx context.Context, q get_profile.Query) (get_profile.Profile, error) { return builder. Query[get_profile.Profile](` SELECT @@ -30,6 +31,12 @@ func (s *gateway) GetProfile(ctx context.Context, q get_profile.Query) (get_prof One(s.db, ctx, profileMapper) } +func (s *Gateway) GetIDFromAPIKey(ctx context.Context, c api_login.Query) (string, error) { + return builder. + Query[string]("SELECT id FROM users WHERE api_key = ?", c.Key). + Extract(s.db, ctx) +} + func profileMapper(row storage.Scanner) (p get_profile.Profile, err error) { err = row.Scan( &p.ID, diff --git a/internal/auth/infra/sqlite/users.go b/internal/auth/infra/sqlite/users.go index 0149f604..b2d00ecc 100644 --- a/internal/auth/infra/sqlite/users.go +++ b/internal/auth/infra/sqlite/users.go @@ -77,14 +77,8 @@ func (s *usersStore) GetByEmail(ctx context.Context, email domain.Email) (u doma One(s.db, ctx, domain.UserFrom) } -func (s *usersStore) GetIDFromAPIKey(ctx context.Context, key domain.APIKey) (domain.UserID, error) { - return builder. - Query[domain.UserID]("SELECT id FROM users WHERE api_key = ?", key). - Extract(s.db, ctx) -} - func (s *usersStore) Write(c context.Context, users ...*domain.User) error { - return sqlite.WriteAndDispatch(s.db, c, users, func(ctx context.Context, e event.Event) error { + return sqlite.WriteEvents(s.db, c, users, func(ctx context.Context, e event.Event) error { switch evt := e.(type) { case domain.UserRegistered: return builder. diff --git a/internal/deployment/app/cleanup_app/cleanup_app.go b/internal/deployment/app/cleanup_app/cleanup_app.go index 31c6b763..6ef6aed7 100644 --- a/internal/deployment/app/cleanup_app/cleanup_app.go +++ b/internal/deployment/app/cleanup_app/cleanup_app.go @@ -15,7 +15,7 @@ import ( // It will be skipped if the target is being deleted or if no successful deployment has been made // in the interval represented by the `From` and `To` parameters. type Command struct { - bus.Command[bus.UnitType] + bus.AsyncCommand AppID string `json:"app_id"` TargetID string `json:"target_id"` @@ -26,21 +26,24 @@ type Command struct { func (Command) Name_() string { return "deployment.command.cleanup_app" } func (c Command) ResourceID() string { return c.AppID } +func (c Command) Group() string { return bus.Group(c.AppID, c.Environment, c.TargetID) } func Handler( reader domain.TargetsReader, deploymentsReader domain.DeploymentsReader, + appsReader domain.AppsReader, + appsWriter domain.AppsWriter, provider domain.Provider, -) bus.RequestHandler[bus.UnitType, Command] { - return func(ctx context.Context, cmd Command) (bus.UnitType, error) { - target, err := reader.GetByID(ctx, domain.TargetID(cmd.TargetID)) +) bus.RequestHandler[bus.AsyncResult, Command] { + return func(ctx context.Context, cmd Command) (result bus.AsyncResult, finalErr error) { + target, finalErr := reader.GetByID(ctx, domain.TargetID(cmd.TargetID)) - if err != nil { - if errors.Is(err, apperr.ErrNotFound) { - return bus.Unit, nil + if finalErr != nil { + if errors.Is(finalErr, apperr.ErrNotFound) { + finalErr = nil } - return bus.Unit, err + return } var ( @@ -51,20 +54,49 @@ func Handler( successful domain.HasSuccessfulDeploymentsOnAppTargetEnv ) - if interval, err = shared.NewTimeInterval(cmd.From, cmd.To); err != nil { - return bus.Unit, err + if interval, finalErr = shared.NewTimeInterval(cmd.From, cmd.To); finalErr != nil { + return } - if runningOrPending, successful, err = deploymentsReader.HasDeploymentsOnAppTargetEnv(ctx, appid, target.ID(), env, interval); err != nil { - return bus.Unit, err + if runningOrPending, successful, finalErr = deploymentsReader.HasDeploymentsOnAppTargetEnv(ctx, appid, target.ID(), env, interval); finalErr != nil { + return } - strategy, err := target.AppCleanupStrategy(runningOrPending, successful) + strategy, finalErr := target.CanAppBeCleaned(runningOrPending, successful) - if err != nil { - return bus.Unit, err + if finalErr != nil { + if errors.Is(finalErr, domain.ErrTargetConfigurationInProgress) || + errors.Is(finalErr, domain.ErrRunningOrPendingDeployments) { + finalErr = nil + result = bus.AsyncResultDelay + } + + return } - return bus.Unit, provider.Cleanup(ctx, appid, target, env, strategy) + defer func() { + if finalErr != nil { + return + } + + app, err := appsReader.GetByID(ctx, appid) + + if err != nil { + // Application does not exist anymore, nothing specific to do + if errors.Is(err, apperr.ErrNotFound) { + return + } + + finalErr = err + return + } + + app.CleanedUp(env, target.ID()) + + finalErr = appsWriter.Write(ctx, &app) + }() + + finalErr = provider.Cleanup(ctx, appid, target, env, strategy) + return } } diff --git a/internal/deployment/app/cleanup_app/cleanup_app_test.go b/internal/deployment/app/cleanup_app/cleanup_app_test.go index 263a8b43..4568a29b 100644 --- a/internal/deployment/app/cleanup_app/cleanup_app_test.go +++ b/internal/deployment/app/cleanup_app/cleanup_app_test.go @@ -17,11 +17,11 @@ import ( func Test_CleanupApp(t *testing.T) { arrange := func(tb testing.TB, provider domain.Provider, seed ...fixture.SeedBuilder) ( - bus.RequestHandler[bus.UnitType, cleanup_app.Command], + bus.RequestHandler[bus.AsyncResult, cleanup_app.Command], context.Context, ) { context := fixture.PrepareDatabase(tb, seed...) - return cleanup_app.Handler(context.TargetsStore, context.DeploymentsStore, provider), context.Context + return cleanup_app.Handler(context.TargetsStore, context.DeploymentsStore, context.AppsStore, context.AppsStore, provider), context.Context } t.Run("should fail silently if the target does not exist anymore", func(t *testing.T) { @@ -31,11 +31,11 @@ func Test_CleanupApp(t *testing.T) { r, err := handler(ctx, cleanup_app.Command{}) assert.Nil(t, err) - assert.Equal(t, bus.Unit, r) + assert.Equal(t, bus.AsyncResultProcessed, r) assert.False(t, provider.called) }) - t.Run("should fail if at least one deployment is running", func(t *testing.T) { + t.Run("should delay if at least one deployment is running", func(t *testing.T) { var provider mockProvider user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) @@ -56,7 +56,7 @@ func Test_CleanupApp(t *testing.T) { fixture.WithDeployments(&deployment), ) - _, err := handler(ctx, cleanup_app.Command{ + result, err := handler(ctx, cleanup_app.Command{ TargetID: string(target.ID()), AppID: string(app.ID()), Environment: string(domain.Production), @@ -64,11 +64,12 @@ func Test_CleanupApp(t *testing.T) { To: deployment.Requested().At().Add(1 * time.Hour), }) - assert.ErrorIs(t, domain.ErrRunningOrPendingDeployments, err) + assert.Equal(t, bus.AsyncResultDelay, result) + assert.Nil(t, err) assert.False(t, provider.called) }) - t.Run("should fail if the target is configuring and at least one successful deployment has been made", func(t *testing.T) { + t.Run("should delay if the target is configuring and at least one successful deployment has been made", func(t *testing.T) { var provider mockProvider user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) @@ -90,7 +91,7 @@ func Test_CleanupApp(t *testing.T) { fixture.WithDeployments(&deployment), ) - _, err := handler(ctx, cleanup_app.Command{ + result, err := handler(ctx, cleanup_app.Command{ TargetID: string(target.ID()), AppID: string(app.ID()), Environment: string(domain.Production), @@ -98,7 +99,8 @@ func Test_CleanupApp(t *testing.T) { To: deployment.Requested().At().Add(1 * time.Hour), }) - assert.ErrorIs(t, domain.ErrTargetConfigurationInProgress, err) + assert.Equal(t, bus.AsyncResultDelay, result) + assert.Nil(t, err) assert.False(t, provider.called) }) @@ -107,7 +109,7 @@ func Test_CleanupApp(t *testing.T) { user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.RequestDelete(false, "uid")) handler, ctx := arrange(t, &provider, fixture.WithUsers(&user), diff --git a/internal/deployment/app/cleanup_app/on_app_cleanup_requested.go b/internal/deployment/app/cleanup_app/on_app_cleanup_requested.go index 2ba7aee5..726a091f 100644 --- a/internal/deployment/app/cleanup_app/on_app_cleanup_requested.go +++ b/internal/deployment/app/cleanup_app/on_app_cleanup_requested.go @@ -19,7 +19,7 @@ func OnAppCleanupRequestedHandler(scheduler bus.Scheduler) bus.SignalHandler[dom TargetID: string(evt.ProductionConfig.Target()), From: evt.ProductionConfig.Version(), To: now, - }, bus.WithPolicy(bus.JobPolicyCancellable)); err != nil { + }); err != nil { return err } @@ -29,6 +29,6 @@ func OnAppCleanupRequestedHandler(scheduler bus.Scheduler) bus.SignalHandler[dom TargetID: string(evt.StagingConfig.Target()), From: evt.StagingConfig.Version(), To: now, - }, bus.WithPolicy(bus.JobPolicyCancellable)) + }) } } diff --git a/internal/deployment/app/cleanup_app/on_app_deleted.go b/internal/deployment/app/cleanup_app/on_app_deleted.go new file mode 100644 index 00000000..6a63f128 --- /dev/null +++ b/internal/deployment/app/cleanup_app/on_app_deleted.go @@ -0,0 +1,14 @@ +package cleanup_app + +import ( + "context" + + "github.com/YuukanOO/seelf/internal/deployment/domain" + "github.com/YuukanOO/seelf/pkg/bus" +) + +func OnAppDeletedHandler(artifactManager domain.ArtifactManager) bus.SignalHandler[domain.AppDeleted] { + return func(ctx context.Context, evt domain.AppDeleted) error { + return artifactManager.Cleanup(ctx, evt.ID) + } +} diff --git a/internal/deployment/app/cleanup_app/on_app_env_changed.go b/internal/deployment/app/cleanup_app/on_app_env_changed.go index 3b141380..a3eb8433 100644 --- a/internal/deployment/app/cleanup_app/on_app_env_changed.go +++ b/internal/deployment/app/cleanup_app/on_app_env_changed.go @@ -20,6 +20,6 @@ func OnAppEnvChangedHandler(scheduler bus.Scheduler) bus.SignalHandler[domain.Ap Environment: string(evt.Environment), From: evt.OldConfig.Version(), To: time.Now().UTC(), - }, bus.WithPolicy(bus.JobPolicyCancellable)) + }) } } diff --git a/internal/deployment/app/cleanup_app/on_job_dismissed.go b/internal/deployment/app/cleanup_app/on_job_dismissed.go new file mode 100644 index 00000000..e3fcc4a4 --- /dev/null +++ b/internal/deployment/app/cleanup_app/on_job_dismissed.go @@ -0,0 +1,39 @@ +package cleanup_app + +import ( + "context" + "errors" + + "github.com/YuukanOO/seelf/internal/deployment/domain" + "github.com/YuukanOO/seelf/pkg/apperr" + "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/bus/embedded" +) + +func OnJobDismissedHandler( + reader domain.AppsReader, + writer domain.AppsWriter, +) bus.SignalHandler[embedded.JobDismissed] { + return func(ctx context.Context, evt embedded.JobDismissed) error { + cmd, isCleanupJob := evt.Command.(Command) + + if !isCleanupJob { + return nil + } + + app, err := reader.GetByID(ctx, domain.AppID(cmd.AppID)) + + if err != nil { + // App deleted, no need to go further + if errors.Is(err, apperr.ErrNotFound) { + return nil + } + + return err + } + + app.CleanedUp(domain.Environment(cmd.Environment), domain.TargetID(cmd.TargetID)) + + return writer.Write(ctx, &app) + } +} diff --git a/internal/deployment/app/cleanup_target/cleanup_target.go b/internal/deployment/app/cleanup_target/cleanup_target.go index c90411ac..34cc5ec6 100644 --- a/internal/deployment/app/cleanup_target/cleanup_target.go +++ b/internal/deployment/app/cleanup_target/cleanup_target.go @@ -11,43 +11,71 @@ import ( // Cleanup a target and all its associated resources. type Command struct { - bus.Command[bus.UnitType] + bus.AsyncCommand ID string `json:"id"` } func (Command) Name_() string { return "deployment.command.cleanup_target" } func (c Command) ResourceID() string { return c.ID } +func (c Command) Group() string { return c.ID } func Handler( reader domain.TargetsReader, + writer domain.TargetsWriter, deploymentsReader domain.DeploymentsReader, provider domain.Provider, -) bus.RequestHandler[bus.UnitType, Command] { - return func(ctx context.Context, cmd Command) (bus.UnitType, error) { +) bus.RequestHandler[bus.AsyncResult, Command] { + return func(ctx context.Context, cmd Command) (result bus.AsyncResult, finalErr error) { target, err := reader.GetByID(ctx, domain.TargetID(cmd.ID)) if err != nil { // If the target doesn't exist anymore, may be it has been processed by another job in rare case, so just returns if errors.Is(err, apperr.ErrNotFound) { - return bus.Unit, nil + return result, nil } - return bus.Unit, err + return result, err } ongoing, err := deploymentsReader.HasRunningOrPendingDeploymentsOnTarget(ctx, target.ID()) if err != nil { - return bus.Unit, err + return result, err } - strategy, err := target.CleanupStrategy(ongoing) + strategy, err := target.CanBeCleaned(ongoing) if err != nil { - return bus.Unit, err + if errors.Is(err, domain.ErrRunningOrPendingDeployments) { + return bus.AsyncResultDelay, nil + } + + return result, err } - return bus.Unit, provider.CleanupTarget(ctx, target, strategy) + defer func() { + if finalErr != nil { + return + } + + if target, err = reader.GetByID(ctx, target.ID()); err != nil { + if errors.Is(err, apperr.ErrNotFound) { + return + } + + finalErr = err + return + } + + if finalErr = target.CleanedUp(); finalErr != nil { + return + } + + finalErr = writer.Write(ctx, &target) + }() + + finalErr = provider.CleanupTarget(ctx, target, strategy) + return } } diff --git a/internal/deployment/app/cleanup_target/cleanup_target_test.go b/internal/deployment/app/cleanup_target/cleanup_target_test.go index a0af545e..7f2a12d4 100644 --- a/internal/deployment/app/cleanup_target/cleanup_target_test.go +++ b/internal/deployment/app/cleanup_target/cleanup_target_test.go @@ -11,108 +11,97 @@ import ( "github.com/YuukanOO/seelf/internal/deployment/fixture" "github.com/YuukanOO/seelf/pkg/assert" "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/bus/spy" ) func Test_CleanupTarget(t *testing.T) { arrange := func(tb testing.TB, provider domain.Provider, seed ...fixture.SeedBuilder) ( - bus.RequestHandler[bus.UnitType, cleanup_target.Command], + bus.RequestHandler[bus.AsyncResult, cleanup_target.Command], context.Context, + spy.Dispatcher, ) { context := fixture.PrepareDatabase(tb, seed...) - return cleanup_target.Handler(context.TargetsStore, context.DeploymentsStore, provider), context.Context + return cleanup_target.Handler(context.TargetsStore, context.TargetsStore, context.DeploymentsStore, provider), context.Context, context.Dispatcher } t.Run("should silently fail if the target does not exist anymore", func(t *testing.T) { var provider dummyProvider - handler, ctx := arrange(t, &provider) + handler, ctx, _ := arrange(t, &provider) - _, err := handler(ctx, cleanup_target.Command{}) + r, err := handler(ctx, cleanup_target.Command{}) assert.Nil(t, err) + assert.Equal(t, bus.AsyncResultProcessed, r) assert.False(t, provider.called) }) - t.Run("should skip the cleanup if the target has never been configured correctly", func(t *testing.T) { - var provider dummyProvider - user := authfixture.User() - target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - target.Configured(target.CurrentVersion(), nil, errors.New("configuration_failed")) - handler, ctx := arrange(t, &provider, - fixture.WithUsers(&user), - fixture.WithTargets(&target), - ) - - _, err := handler(ctx, cleanup_target.Command{ - ID: string(target.ID()), - }) - - assert.Nil(t, err) - assert.False(t, provider.called) - }) - - t.Run("should fail if a deployment is running on this target", func(t *testing.T) { + t.Run("should fail if the target has not been requested for cleanup", func(t *testing.T) { var provider dummyProvider user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - app := fixture.App(fixture.WithAppCreatedBy(user.ID()), - fixture.WithEnvironmentConfig( - domain.NewEnvironmentConfig(target.ID()), - domain.NewEnvironmentConfig(target.ID()), - )) - deployment := fixture.Deployment(fixture.FromApp(app), - fixture.ForEnvironment(domain.Production), - fixture.WithDeploymentRequestedBy(user.ID())) - assert.Nil(t, deployment.HasStarted()) - handler, ctx := arrange(t, &provider, + handler, ctx, _ := arrange(t, &provider, fixture.WithUsers(&user), fixture.WithTargets(&target), - fixture.WithApps(&app), - fixture.WithDeployments(&deployment), ) - _, err := handler(ctx, cleanup_target.Command{ + r, err := handler(ctx, cleanup_target.Command{ ID: string(target.ID()), }) - assert.ErrorIs(t, domain.ErrRunningOrPendingDeployments, err) + assert.ErrorIs(t, domain.ErrTargetCleanupNeeded, err) + assert.Equal(t, bus.AsyncResultProcessed, r) assert.False(t, provider.called) }) - t.Run("should fail if being configured", func(t *testing.T) { + t.Run("should skip the cleanup if the target has never been configured correctly", func(t *testing.T) { var provider dummyProvider user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - handler, ctx := arrange(t, &provider, + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, errors.New("configuration_failed"))) + assert.Nil(t, target.RequestDelete(false, user.ID())) + handler, ctx, _ := arrange(t, &provider, fixture.WithUsers(&user), fixture.WithTargets(&target), ) - _, err := handler(ctx, cleanup_target.Command{ + r, err := handler(ctx, cleanup_target.Command{ ID: string(target.ID()), }) - assert.ErrorIs(t, domain.ErrTargetConfigurationInProgress, err) + assert.Nil(t, err) + assert.Equal(t, bus.AsyncResultProcessed, r) assert.False(t, provider.called) }) - t.Run("should fail if has been configured in the past but is now unreachable", func(t *testing.T) { + t.Run("should be delayed if a deployment is running on this target", func(t *testing.T) { var provider dummyProvider user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.Reconfigure()) - target.Configured(target.CurrentVersion(), nil, errors.New("configuration_failed")) - handler, ctx := arrange(t, &provider, + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, user.ID())) + app := fixture.App(fixture.WithAppCreatedBy(user.ID()), + fixture.WithEnvironmentConfig( + domain.NewEnvironmentConfig(target.ID()), + domain.NewEnvironmentConfig(target.ID()), + )) + deployment := fixture.Deployment(fixture.FromApp(app), + fixture.ForEnvironment(domain.Production), + fixture.WithDeploymentRequestedBy(user.ID())) + assert.Nil(t, deployment.HasStarted()) + handler, ctx, _ := arrange(t, &provider, fixture.WithUsers(&user), fixture.WithTargets(&target), + fixture.WithApps(&app), + fixture.WithDeployments(&deployment), ) - _, err := handler(ctx, cleanup_target.Command{ + r, err := handler(ctx, cleanup_target.Command{ ID: string(target.ID()), }) - assert.ErrorIs(t, domain.ErrTargetConfigurationFailed, err) + assert.Nil(t, err) + assert.Equal(t, bus.AsyncResultDelay, r) assert.False(t, provider.called) }) @@ -120,8 +109,9 @@ func Test_CleanupTarget(t *testing.T) { var provider dummyProvider user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - target.Configured(target.CurrentVersion(), nil, nil) - handler, ctx := arrange(t, &provider, + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, user.ID())) + handler, ctx, dispatcher := arrange(t, &provider, fixture.WithUsers(&user), fixture.WithTargets(&target), ) @@ -132,6 +122,11 @@ func Test_CleanupTarget(t *testing.T) { assert.Nil(t, err) assert.True(t, provider.called) + assert.HasLength(t, 1, dispatcher.Signals()) + deleted := assert.Is[domain.TargetDeleted](t, dispatcher.Signals()[0]) + assert.Equal(t, domain.TargetDeleted{ + ID: target.ID(), + }, deleted) }) } diff --git a/internal/deployment/app/cleanup_target/on_job_dismissed.go b/internal/deployment/app/cleanup_target/on_job_dismissed.go new file mode 100644 index 00000000..01eb9f0c --- /dev/null +++ b/internal/deployment/app/cleanup_target/on_job_dismissed.go @@ -0,0 +1,39 @@ +package cleanup_target + +import ( + "context" + "errors" + + "github.com/YuukanOO/seelf/internal/deployment/domain" + "github.com/YuukanOO/seelf/pkg/apperr" + "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/bus/embedded" +) + +func OnJobDismissedHandler( + reader domain.TargetsReader, + writer domain.TargetsWriter, +) bus.SignalHandler[embedded.JobDismissed] { + return func(ctx context.Context, evt embedded.JobDismissed) error { + if _, isCleanupJob := evt.Command.(Command); !isCleanupJob { + return nil + } + + target, err := reader.GetByID(ctx, domain.TargetID(evt.Command.ResourceID())) + + if err != nil { + // Target deleted, no need to go further + if errors.Is(err, apperr.ErrNotFound) { + return nil + } + + return err + } + + if err = target.CleanedUp(); err != nil { + return err + } + + return writer.Write(ctx, &target) + } +} diff --git a/internal/deployment/app/cleanup_target/on_target_cleanup_requested.go b/internal/deployment/app/cleanup_target/on_target_cleanup_requested.go index 95689ffc..069bd4d1 100644 --- a/internal/deployment/app/cleanup_target/on_target_cleanup_requested.go +++ b/internal/deployment/app/cleanup_target/on_target_cleanup_requested.go @@ -11,6 +11,6 @@ func OnTargetCleanupRequestedHandler(scheduler bus.Scheduler) bus.SignalHandler[ return func(ctx context.Context, evt domain.TargetCleanupRequested) error { return scheduler.Queue(ctx, Command{ ID: string(evt.ID), - }, bus.WithPolicy(bus.JobPolicyCancellable)) + }) } } diff --git a/internal/deployment/app/cleanup_target/on_target_deleted.go b/internal/deployment/app/cleanup_target/on_target_deleted.go new file mode 100644 index 00000000..2de2ed88 --- /dev/null +++ b/internal/deployment/app/cleanup_target/on_target_deleted.go @@ -0,0 +1,14 @@ +package cleanup_target + +import ( + "context" + + "github.com/YuukanOO/seelf/internal/deployment/domain" + "github.com/YuukanOO/seelf/pkg/bus" +) + +func OnTargetDeletedHandler(provider domain.Provider) bus.SignalHandler[domain.TargetDeleted] { + return func(ctx context.Context, evt domain.TargetDeleted) error { + return provider.RemoveConfiguration(ctx, evt.ID) + } +} diff --git a/internal/deployment/app/configure_target/configure_target.go b/internal/deployment/app/configure_target/configure_target.go index 71a209cd..feffd270 100644 --- a/internal/deployment/app/configure_target/configure_target.go +++ b/internal/deployment/app/configure_target/configure_target.go @@ -11,7 +11,7 @@ import ( ) type Command struct { - bus.Command[bus.UnitType] + bus.AsyncCommand ID string `json:"id"` Version time.Time `json:"version"` @@ -19,26 +19,27 @@ type Command struct { func (Command) Name_() string { return "deployment.command.configure_target" } func (c Command) ResourceID() string { return c.ID } +func (c Command) Group() string { return c.ID } func Handler( reader domain.TargetsReader, writer domain.TargetsWriter, provider domain.Provider, -) bus.RequestHandler[bus.UnitType, Command] { - return func(ctx context.Context, cmd Command) (result bus.UnitType, finalErr error) { - target, err := reader.GetByID(ctx, domain.TargetID(cmd.ID)) +) bus.RequestHandler[bus.AsyncResult, Command] { + return func(ctx context.Context, cmd Command) (result bus.AsyncResult, finalErr error) { + target, finalErr := reader.GetByID(ctx, domain.TargetID(cmd.ID)) - if err != nil { + if finalErr != nil { // Target not found, already deleted - if errors.Is(err, apperr.ErrNotFound) { - return bus.Unit, nil + if errors.Is(finalErr, apperr.ErrNotFound) { + finalErr = nil } - return bus.Unit, err + return } if target.IsOutdated(cmd.Version) { - return bus.Unit, nil + return } var assigned domain.TargetEntrypointsAssigned @@ -46,9 +47,9 @@ func Handler( // Same as for the deployment, since the configuration can take some time, retrieve the latest // target version before updating its state. defer func() { - target, err = reader.GetByID(ctx, domain.TargetID(cmd.ID)) + var err error - if err != nil { + if target, err = reader.GetByID(ctx, target.ID()); err != nil { // Target not found, already deleted if errors.Is(err, apperr.ErrNotFound) { err = nil @@ -58,13 +59,16 @@ func Handler( return } - target.Configured(cmd.Version, assigned, finalErr) + if err = target.Configured(cmd.Version, assigned, finalErr); err != nil && + !errors.Is(err, domain.ErrTargetConfigurationOutdated) { + finalErr = err + return + } finalErr = writer.Write(ctx, &target) }() assigned, finalErr = provider.Setup(ctx, target) - return } } diff --git a/internal/deployment/app/configure_target/configure_target_test.go b/internal/deployment/app/configure_target/configure_target_test.go index c7b85db0..1bb9dd00 100644 --- a/internal/deployment/app/configure_target/configure_target_test.go +++ b/internal/deployment/app/configure_target/configure_target_test.go @@ -18,7 +18,7 @@ import ( func Test_ConfigureTarget(t *testing.T) { arrange := func(tb testing.TB, provider domain.Provider, seed ...fixture.SeedBuilder) ( - bus.RequestHandler[bus.UnitType, configure_target.Command], + bus.RequestHandler[bus.AsyncResult, configure_target.Command], spy.Dispatcher, ) { context := fixture.PrepareDatabase(tb, seed...) diff --git a/internal/deployment/app/configure_target/on_target_created.go b/internal/deployment/app/configure_target/on_target_created.go index c7d46500..ae980cc6 100644 --- a/internal/deployment/app/configure_target/on_target_created.go +++ b/internal/deployment/app/configure_target/on_target_created.go @@ -3,7 +3,6 @@ package configure_target import ( "context" - "github.com/YuukanOO/seelf/internal/deployment/app" "github.com/YuukanOO/seelf/internal/deployment/domain" "github.com/YuukanOO/seelf/pkg/bus" ) @@ -13,6 +12,6 @@ func OnTargetCreatedHandler(scheduler bus.Scheduler) bus.SignalHandler[domain.Ta return scheduler.Queue(ctx, Command{ ID: string(evt.ID), Version: evt.State.Version(), - }, bus.WithGroup(app.TargetConfigurationGroup(evt.ID)), bus.WithPolicy(bus.JobPolicyMerge)) + }) } } diff --git a/internal/deployment/app/configure_target/on_target_state_changed.go b/internal/deployment/app/configure_target/on_target_state_changed.go index 3c2a55e7..ad29507a 100644 --- a/internal/deployment/app/configure_target/on_target_state_changed.go +++ b/internal/deployment/app/configure_target/on_target_state_changed.go @@ -3,7 +3,6 @@ package configure_target import ( "context" - "github.com/YuukanOO/seelf/internal/deployment/app" "github.com/YuukanOO/seelf/internal/deployment/domain" "github.com/YuukanOO/seelf/pkg/bus" ) @@ -17,6 +16,6 @@ func OnTargetStateChangedHandler(scheduler bus.Scheduler) bus.SignalHandler[doma return scheduler.Queue(ctx, Command{ ID: string(evt.ID), Version: evt.State.Version(), - }, bus.WithGroup(app.TargetConfigurationGroup(evt.ID)), bus.WithPolicy(bus.JobPolicyMerge)) + }) } } diff --git a/internal/deployment/app/create_app/create_app_test.go b/internal/deployment/app/create_app/create_app_test.go index c63c4aee..2c3c0ce5 100644 --- a/internal/deployment/app/create_app/create_app_test.go +++ b/internal/deployment/app/create_app/create_app_test.go @@ -128,6 +128,10 @@ func Test_CreateApp(t *testing.T) { Production: created.Production, Staging: created.Staging, Created: shared.ActionFrom(user.ID(), assert.NotZero(t, created.Created.At())), + History: domain.AppTargetHistory{ + domain.Production: []domain.TargetID{created.Production.Target()}, + domain.Staging: []domain.TargetID{created.Staging.Target()}, + }, }, created) assert.Equal(t, target.ID(), created.Production.Target()) assert.Equal(t, target.ID(), created.Staging.Target()) diff --git a/internal/deployment/app/delete_app/delete_app.go b/internal/deployment/app/delete_app/delete_app.go deleted file mode 100644 index f6420102..00000000 --- a/internal/deployment/app/delete_app/delete_app.go +++ /dev/null @@ -1,49 +0,0 @@ -package delete_app - -import ( - "context" - "errors" - - "github.com/YuukanOO/seelf/internal/deployment/domain" - "github.com/YuukanOO/seelf/pkg/apperr" - "github.com/YuukanOO/seelf/pkg/bus" -) - -// Cleanup an application artifacts, images, networks, volumes and so on... -type Command struct { - bus.Command[bus.UnitType] - - ID string `json:"id"` -} - -func (Command) Name_() string { return "deployment.command.delete_app" } -func (c Command) ResourceID() string { return c.ID } - -func Handler( - reader domain.AppsReader, - writer domain.AppsWriter, - artifactManager domain.ArtifactManager, -) bus.RequestHandler[bus.UnitType, Command] { - return func(ctx context.Context, cmd Command) (bus.UnitType, error) { - app, err := reader.GetByID(ctx, domain.AppID(cmd.ID)) - - if err != nil { - if errors.Is(err, apperr.ErrNotFound) { - return bus.Unit, nil - } - - return bus.Unit, err - } - - // Resources have been cleaned up here thanks to the scheduler policy - if err = app.Delete(true); err != nil { - return bus.Unit, err - } - - if err = artifactManager.Cleanup(ctx, app.ID()); err != nil { - return bus.Unit, err - } - - return bus.Unit, writer.Write(ctx, &app) - } -} diff --git a/internal/deployment/app/delete_app/delete_app_test.go b/internal/deployment/app/delete_app/delete_app_test.go deleted file mode 100644 index e5adab8b..00000000 --- a/internal/deployment/app/delete_app/delete_app_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package delete_app_test - -import ( - "context" - "testing" - - authfixture "github.com/YuukanOO/seelf/internal/auth/fixture" - "github.com/YuukanOO/seelf/internal/deployment/app/delete_app" - "github.com/YuukanOO/seelf/internal/deployment/domain" - "github.com/YuukanOO/seelf/internal/deployment/fixture" - "github.com/YuukanOO/seelf/internal/deployment/infra/artifact" - "github.com/YuukanOO/seelf/pkg/assert" - "github.com/YuukanOO/seelf/pkg/bus" - "github.com/YuukanOO/seelf/pkg/bus/spy" - "github.com/YuukanOO/seelf/pkg/log" -) - -func Test_DeleteApp(t *testing.T) { - - arrange := func(tb testing.TB, seed ...fixture.SeedBuilder) ( - bus.RequestHandler[bus.UnitType, delete_app.Command], - spy.Dispatcher, - ) { - context := fixture.PrepareDatabase(tb, seed...) - logger, _ := log.NewLogger() - artifactManager := artifact.NewLocal(context.Config, logger) - return delete_app.Handler(context.AppsStore, context.AppsStore, artifactManager), context.Dispatcher - } - - t.Run("should fail silently if the application does not exist anymore", func(t *testing.T) { - handler, dispatcher := arrange(t) - - r, err := handler(context.Background(), delete_app.Command{ - ID: "some-id", - }) - - assert.Nil(t, err) - assert.Equal(t, bus.Unit, r) - assert.HasLength(t, 0, dispatcher.Signals()) - }) - - t.Run("should fail if the application cleanup has not been requested first", func(t *testing.T) { - user := authfixture.User() - target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - app := fixture.App( - fixture.WithAppCreatedBy(user.ID()), - fixture.WithEnvironmentConfig( - domain.NewEnvironmentConfig(target.ID()), - domain.NewEnvironmentConfig(target.ID()), - ), - ) - handler, _ := arrange(t, - fixture.WithUsers(&user), - fixture.WithTargets(&target), - fixture.WithApps(&app), - ) - - r, err := handler(context.Background(), delete_app.Command{ - ID: string(app.ID()), - }) - - assert.ErrorIs(t, domain.ErrAppCleanupNeeded, err) - assert.Equal(t, bus.Unit, r) - }) - - t.Run("should succeed if everything is good", func(t *testing.T) { - user := authfixture.User() - target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - app := fixture.App( - fixture.WithAppCreatedBy(user.ID()), - fixture.WithEnvironmentConfig( - domain.NewEnvironmentConfig(target.ID()), - domain.NewEnvironmentConfig(target.ID()), - ), - ) - app.RequestCleanup(user.ID()) - handler, dispatcher := arrange(t, - fixture.WithUsers(&user), - fixture.WithTargets(&target), - fixture.WithApps(&app), - ) - - r, err := handler(context.Background(), delete_app.Command{ - ID: string(app.ID()), - }) - - assert.Nil(t, err) - assert.Equal(t, bus.Unit, r) - assert.HasLength(t, 1, dispatcher.Signals()) - - deleted := assert.Is[domain.AppDeleted](t, dispatcher.Signals()[0]) - assert.Equal(t, domain.AppDeleted{ - ID: app.ID(), - }, deleted) - }) -} diff --git a/internal/deployment/app/delete_app/on_app_cleanup_requested.go b/internal/deployment/app/delete_app/on_app_cleanup_requested.go deleted file mode 100644 index 00b2596a..00000000 --- a/internal/deployment/app/delete_app/on_app_cleanup_requested.go +++ /dev/null @@ -1,17 +0,0 @@ -package delete_app - -import ( - "context" - - "github.com/YuukanOO/seelf/internal/deployment/domain" - "github.com/YuukanOO/seelf/pkg/bus" -) - -// Upon receiving a cleanup request, queue a job to remove everything related to the application. -func OnAppCleanupRequestedHandler(scheduler bus.Scheduler) bus.SignalHandler[domain.AppCleanupRequested] { - return func(ctx context.Context, evt domain.AppCleanupRequested) error { - return scheduler.Queue(ctx, Command{ - ID: string(evt.ID), - }, bus.WithPolicy(bus.JobPolicyWaitForOthersResourceID)) - } -} diff --git a/internal/deployment/app/delete_target/delete_target.go b/internal/deployment/app/delete_target/delete_target.go deleted file mode 100644 index acb6fa8f..00000000 --- a/internal/deployment/app/delete_target/delete_target.go +++ /dev/null @@ -1,49 +0,0 @@ -package delete_target - -import ( - "context" - "errors" - - "github.com/YuukanOO/seelf/internal/deployment/domain" - "github.com/YuukanOO/seelf/pkg/apperr" - "github.com/YuukanOO/seelf/pkg/bus" -) - -type Command struct { - bus.Command[bus.UnitType] - - ID string `json:"id"` -} - -func (Command) Name_() string { return "deployment.command.delete_target" } -func (c Command) ResourceID() string { return c.ID } - -func Handler( - reader domain.TargetsReader, - writer domain.TargetsWriter, - provider domain.Provider, -) bus.RequestHandler[bus.UnitType, Command] { - return func(ctx context.Context, cmd Command) (bus.UnitType, error) { - target, err := reader.GetByID(ctx, domain.TargetID(cmd.ID)) - - if err != nil { - if errors.Is(err, apperr.ErrNotFound) { - return bus.Unit, nil - } - - return bus.Unit, err - } - - // Resources have been cleaned up here thanks to the scheduler policy - if err = target.Delete(true); err != nil { - return bus.Unit, err - } - - // Either way, remove eventual configuration tied to the target - if err = provider.RemoveConfiguration(ctx, target); err != nil { - return bus.Unit, err - } - - return bus.Unit, writer.Write(ctx, &target) - } -} diff --git a/internal/deployment/app/delete_target/delete_target_test.go b/internal/deployment/app/delete_target/delete_target_test.go deleted file mode 100644 index ce0f4c32..00000000 --- a/internal/deployment/app/delete_target/delete_target_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package delete_target_test - -import ( - "context" - "testing" - - authfixture "github.com/YuukanOO/seelf/internal/auth/fixture" - "github.com/YuukanOO/seelf/internal/deployment/app/delete_target" - "github.com/YuukanOO/seelf/internal/deployment/domain" - "github.com/YuukanOO/seelf/internal/deployment/fixture" - "github.com/YuukanOO/seelf/pkg/assert" - "github.com/YuukanOO/seelf/pkg/bus" - "github.com/YuukanOO/seelf/pkg/bus/spy" -) - -func Test_DeleteTarget(t *testing.T) { - - arrange := func(tb testing.TB, provider domain.Provider, seed ...fixture.SeedBuilder) ( - bus.RequestHandler[bus.UnitType, delete_target.Command], - spy.Dispatcher, - ) { - context := fixture.PrepareDatabase(tb, seed...) - return delete_target.Handler(context.TargetsStore, context.TargetsStore, provider), context.Dispatcher - } - - t.Run("should fail silently if the target does not exist anymore", func(t *testing.T) { - var provider dummyProvider - handler, dispatcher := arrange(t, &provider) - - _, err := handler(context.Background(), delete_target.Command{}) - - assert.Nil(t, err) - assert.False(t, provider.called) - assert.HasLength(t, 0, dispatcher.Signals()) - }) - - t.Run("should fail if the target has not been requested for cleanup", func(t *testing.T) { - var provider dummyProvider - user := authfixture.User() - target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - handler, dispatcher := arrange(t, &provider, fixture.WithUsers(&user), fixture.WithTargets(&target)) - - _, err := handler(context.Background(), delete_target.Command{ - ID: string(target.ID()), - }) - - assert.ErrorIs(t, domain.ErrTargetCleanupNeeded, err) - assert.False(t, provider.called) - assert.HasLength(t, 0, dispatcher.Signals()) - }) - - t.Run("should succeed if everything is good", func(t *testing.T) { - var provider dummyProvider - user := authfixture.User() - target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, user.ID())) - handler, dispatcher := arrange(t, &provider, fixture.WithUsers(&user), fixture.WithTargets(&target)) - - _, err := handler(context.Background(), delete_target.Command{ - ID: string(target.ID()), - }) - - assert.Nil(t, err) - assert.True(t, provider.called) - assert.HasLength(t, 1, dispatcher.Signals()) - - deleted := assert.Is[domain.TargetDeleted](t, dispatcher.Signals()[0]) - assert.Equal(t, domain.TargetDeleted{ - ID: target.ID(), - }, deleted) - }) -} - -type dummyProvider struct { - domain.Provider - called bool -} - -func (d *dummyProvider) RemoveConfiguration(ctx context.Context, target domain.Target) error { - d.called = true - return nil -} diff --git a/internal/deployment/app/delete_target/on_target_cleanup_requested.go b/internal/deployment/app/delete_target/on_target_cleanup_requested.go deleted file mode 100644 index d5b3a44e..00000000 --- a/internal/deployment/app/delete_target/on_target_cleanup_requested.go +++ /dev/null @@ -1,17 +0,0 @@ -package delete_target - -import ( - "context" - - "github.com/YuukanOO/seelf/internal/deployment/domain" - "github.com/YuukanOO/seelf/pkg/bus" -) - -// Upon receiving a cleanup request, queue a job to delete the target record when every other tasks are done. -func OnTargetCleanupRequestedHandler(scheduler bus.Scheduler) bus.SignalHandler[domain.TargetCleanupRequested] { - return func(ctx context.Context, evt domain.TargetCleanupRequested) error { - return scheduler.Queue(ctx, Command{ - ID: string(evt.ID), - }, bus.WithPolicy(bus.JobPolicyWaitForOthersResourceID)) - } -} diff --git a/internal/deployment/app/deploy/deploy.go b/internal/deployment/app/deploy/deploy.go index 8a23c2e8..16296c57 100644 --- a/internal/deployment/app/deploy/deploy.go +++ b/internal/deployment/app/deploy/deploy.go @@ -12,14 +12,17 @@ import ( // Process a deployment, this is where the magic happen! type Command struct { - bus.Command[bus.UnitType] + bus.AsyncCommand AppID string `json:"app_id"` DeploymentNumber int `json:"deployment_number"` + Environment string `json:"environment"` + TargetID string `json:"target_id"` } func (Command) Name_() string { return "deployment.command.deploy" } func (c Command) ResourceID() string { return c.AppID + "-" + strconv.Itoa(c.DeploymentNumber) } +func (c Command) Group() string { return bus.Group(c.AppID, c.Environment, c.TargetID) } // Handle the deployment process. // If an unexpected error occurs during this process, it uses the bus.PreserveOrder function @@ -32,11 +35,9 @@ func Handler( provider domain.Provider, targetsReader domain.TargetsReader, registriesReader domain.RegistriesReader, -) bus.RequestHandler[bus.UnitType, Command] { - return func(ctx context.Context, cmd Command) (result bus.UnitType, finalErr error) { - result = bus.Unit - - depl, err := reader.GetByID(ctx, domain.DeploymentIDFrom( +) bus.RequestHandler[bus.AsyncResult, Command] { + return func(ctx context.Context, cmd Command) (result bus.AsyncResult, finalErr error) { + deployment, err := reader.GetByID(ctx, domain.DeploymentIDFrom( domain.AppID(cmd.AppID), domain.DeploymentNumber(cmd.DeploymentNumber), )) @@ -52,13 +53,13 @@ func Handler( // If the target does not exist, fail the deployment // If the target is not ready, returns early without starting the deployment - target, targetErr := targetsReader.GetByID(ctx, depl.Config().Target()) + target, targetErr := targetsReader.GetByID(ctx, deployment.Config().Target()) if targetErr != nil && !errors.Is(targetErr, apperr.ErrNotFound) { return result, targetErr } - if err = depl.HasStarted(); err != nil { + if err = deployment.HasStarted(); err != nil { // If the deployment could not be started, it probably means the // application has been requested for cleanup and the deployment has been // cancelled, so the deploy job will never succeed. @@ -73,11 +74,11 @@ func Handler( // Target configuration is in progress, just retry the job later without writing // the deployment, keep it in pending state if errors.Is(targetAvailabilityErr, domain.ErrTargetConfigurationInProgress) { - return result, targetAvailabilityErr + return bus.AsyncResultDelay, nil } } - if err = writer.Write(ctx, &depl); err != nil { + if err = writer.Write(ctx, &deployment); err != nil { return result, err } @@ -94,50 +95,43 @@ func Handler( defer func() { // Since the deployment process could take some time, retrieve a fresh version of the // deployment right now - if depl, err = reader.GetByID(ctx, depl.ID()); err != nil { + if deployment, err = reader.GetByID(ctx, deployment.ID()); err != nil { if errors.Is(err, apperr.ErrNotFound) { - finalErr = nil - } else { - finalErr = err + err = nil } + + finalErr = err return } // An error means it has already been handled - if err = depl.HasEnded(services, finalErr); err != nil { + if err = deployment.HasEnded(services, finalErr); err != nil { finalErr = nil return } - if err = writer.Write(ctx, &depl); err != nil { - finalErr = err - return - } - - finalErr = nil + finalErr = writer.Write(ctx, &deployment) }() // Prepare the build directory - if deploymentCtx, finalErr = artifactManager.PrepareBuild(ctx, depl); finalErr != nil { + if deploymentCtx, finalErr = artifactManager.PrepareBuild(ctx, deployment); finalErr != nil { return } defer deploymentCtx.Logger().Close() // If the target does not exist, let's fail the deployment correctly - if targetErr != nil { - finalErr = targetErr + if finalErr = targetErr; finalErr != nil { return } // Target not available, fail the deployment - if targetAvailabilityErr != nil { - finalErr = targetAvailabilityErr + if finalErr = targetAvailabilityErr; finalErr != nil { return } // Fetch deployment files - if finalErr = source.Fetch(ctx, deploymentCtx, depl); finalErr != nil { + if finalErr = source.Fetch(ctx, deploymentCtx, deployment); finalErr != nil { return } @@ -147,7 +141,7 @@ func Handler( } // Ask the provider to actually deploy the app - if services, finalErr = provider.Deploy(ctx, deploymentCtx, depl, target, registries); finalErr != nil { + if services, finalErr = provider.Deploy(ctx, deploymentCtx, deployment, target, registries); finalErr != nil { return } diff --git a/internal/deployment/app/deploy/deploy_test.go b/internal/deployment/app/deploy/deploy_test.go index 37ef978b..82d421d7 100644 --- a/internal/deployment/app/deploy/deploy_test.go +++ b/internal/deployment/app/deploy/deploy_test.go @@ -25,7 +25,7 @@ func Test_Deploy(t *testing.T) { provider domain.Provider, seed ...fixture.SeedBuilder, ) ( - bus.RequestHandler[bus.UnitType, deploy.Command], + bus.RequestHandler[bus.AsyncResult, deploy.Command], context.Context, spy.Dispatcher, ) { @@ -41,10 +41,10 @@ func Test_Deploy(t *testing.T) { r, err := handler(ctx, deploy.Command{}) assert.Nil(t, err) - assert.Equal(t, bus.Unit, r) + assert.Equal(t, bus.AsyncResultProcessed, r) }) - t.Run("should mark the deployment has failed if the target is configuring", func(t *testing.T) { + t.Run("should delay the deployment if the target is configuring", func(t *testing.T) { user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) app := fixture.App( @@ -65,19 +65,20 @@ func Test_Deploy(t *testing.T) { fixture.WithDeployments(&deployment), ) - _, err := handler(ctx, deploy.Command{ + r, err := handler(ctx, deploy.Command{ AppID: string(deployment.ID().AppID()), DeploymentNumber: int(deployment.ID().DeploymentNumber()), }) - assert.ErrorIs(t, domain.ErrTargetConfigurationInProgress, err) + assert.Nil(t, err) + assert.Equal(t, bus.AsyncResultDelay, r) assert.HasLength(t, 0, dispatcher.Signals()) }) t.Run("should mark the deployment has failed if source does not succeed", func(t *testing.T) { user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) app := fixture.App( fixture.WithAppCreatedBy(user.ID()), fixture.WithEnvironmentConfig( @@ -103,7 +104,7 @@ func Test_Deploy(t *testing.T) { }) assert.Nil(t, err) - assert.Equal(t, bus.Unit, r) + assert.Equal(t, bus.AsyncResultProcessed, r) changed := assert.Is[domain.DeploymentStateChanged](t, dispatcher.Signals()[0]) assert.Equal(t, domain.DeploymentStatusRunning, changed.State.Status()) @@ -113,10 +114,10 @@ func Test_Deploy(t *testing.T) { assert.Equal(t, sourceErr.Error(), changed.State.ErrCode().MustGet()) }) - t.Run("should mark the deployment has failed in the target is not correctly configured", func(t *testing.T) { + t.Run("should mark the deployment has failed if the target is not correctly configured", func(t *testing.T) { user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - target.Configured(target.CurrentVersion(), nil, errors.New("target_failed")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, errors.New("target_failed"))) app := fixture.App( fixture.WithAppCreatedBy(user.ID()), fixture.WithEnvironmentConfig( @@ -141,7 +142,7 @@ func Test_Deploy(t *testing.T) { }) assert.Nil(t, err) - assert.Equal(t, bus.Unit, r) + assert.Equal(t, bus.AsyncResultProcessed, r) changed := assert.Is[domain.DeploymentStateChanged](t, dispatcher.Signals()[0]) assert.Equal(t, domain.DeploymentStatusRunning, changed.State.Status()) @@ -154,7 +155,7 @@ func Test_Deploy(t *testing.T) { t.Run("should mark the deployment has failed if provider does not run the deployment successfully", func(t *testing.T) { user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) app := fixture.App( fixture.WithAppCreatedBy(user.ID()), fixture.WithEnvironmentConfig( @@ -180,7 +181,7 @@ func Test_Deploy(t *testing.T) { }) assert.Nil(t, err) - assert.Equal(t, bus.Unit, r) + assert.Equal(t, bus.AsyncResultProcessed, r) changed := assert.Is[domain.DeploymentStateChanged](t, dispatcher.Signals()[0]) assert.Equal(t, domain.DeploymentStatusRunning, changed.State.Status()) @@ -193,7 +194,7 @@ func Test_Deploy(t *testing.T) { t.Run("should mark the deployment has succeeded if all is good", func(t *testing.T) { user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) app := fixture.App( fixture.WithAppCreatedBy(user.ID()), fixture.WithEnvironmentConfig( @@ -218,7 +219,7 @@ func Test_Deploy(t *testing.T) { }) assert.Nil(t, err) - assert.Equal(t, bus.Unit, r) + assert.Equal(t, bus.AsyncResultProcessed, r) changed := assert.Is[domain.DeploymentStateChanged](t, dispatcher.Signals()[0]) assert.Equal(t, domain.DeploymentStatusRunning, changed.State.Status()) diff --git a/internal/deployment/app/deploy/on_deployment_created.go b/internal/deployment/app/deploy/on_deployment_created.go index 9b0186e2..e0dd3e8e 100644 --- a/internal/deployment/app/deploy/on_deployment_created.go +++ b/internal/deployment/app/deploy/on_deployment_created.go @@ -3,7 +3,6 @@ package deploy import ( "context" - "github.com/YuukanOO/seelf/internal/deployment/app" "github.com/YuukanOO/seelf/internal/deployment/domain" "github.com/YuukanOO/seelf/pkg/bus" ) @@ -14,6 +13,8 @@ func OnDeploymentCreatedHandler(scheduler bus.Scheduler) bus.SignalHandler[domai return scheduler.Queue(ctx, Command{ AppID: string(evt.ID.AppID()), DeploymentNumber: int(evt.ID.DeploymentNumber()), - }, bus.WithGroup(app.DeploymentGroup(evt.Config)), bus.WithPolicy(bus.JobPolicyRetryPreserveOrder)) + Environment: string(evt.Config.Environment()), + TargetID: string(evt.Config.Target()), + }) } } diff --git a/internal/deployment/app/fail_pending_deployments/on_target_delete_requested.go b/internal/deployment/app/fail_pending_deployments/on_target_cleanup_requested.go similarity index 79% rename from internal/deployment/app/fail_pending_deployments/on_target_delete_requested.go rename to internal/deployment/app/fail_pending_deployments/on_target_cleanup_requested.go index 1e04ac5d..ad9a5814 100644 --- a/internal/deployment/app/fail_pending_deployments/on_target_delete_requested.go +++ b/internal/deployment/app/fail_pending_deployments/on_target_cleanup_requested.go @@ -8,7 +8,7 @@ import ( "github.com/YuukanOO/seelf/pkg/monad" ) -func OnTargetDeleteRequestedHandler(writer domain.DeploymentsWriter) bus.SignalHandler[domain.TargetCleanupRequested] { +func OnTargetCleanupRequestedHandler(writer domain.DeploymentsWriter) bus.SignalHandler[domain.TargetCleanupRequested] { return func(ctx context.Context, evt domain.TargetCleanupRequested) error { return writer.FailDeployments(ctx, domain.ErrTargetCleanupRequested, domain.FailCriteria{ Status: monad.Value(domain.DeploymentStatusPending), diff --git a/internal/deployment/app/group.go b/internal/deployment/app/group.go deleted file mode 100644 index 76fc27c6..00000000 --- a/internal/deployment/app/group.go +++ /dev/null @@ -1,16 +0,0 @@ -package app - -import ( - "github.com/YuukanOO/seelf/internal/deployment/domain" -) - -// Group for deployment to prevent multiple deployment at the same time on the same -// environment. -func DeploymentGroup(config domain.ConfigSnapshot) string { - return "deployment.deployment.deploy." + config.ProjectName() -} - -// Group for target operation to prevent multiple target configuration at the same time. -func TargetConfigurationGroup(id domain.TargetID) string { - return "deployment.target.configure." + string(id) -} diff --git a/internal/deployment/app/reconfigure_target/reconfigure_target_test.go b/internal/deployment/app/reconfigure_target/reconfigure_target_test.go index 0fb97d01..1006c658 100644 --- a/internal/deployment/app/reconfigure_target/reconfigure_target_test.go +++ b/internal/deployment/app/reconfigure_target/reconfigure_target_test.go @@ -48,7 +48,7 @@ func Test_ReconfigureTarget(t *testing.T) { user := authfixture.User() target := fixture.Target(fixture.WithTargetCreatedBy(user.ID())) target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, user.ID())) + assert.Nil(t, target.RequestDelete(false, user.ID())) handler, _ := arrange(t, fixture.WithUsers(&user), fixture.WithTargets(&target)) _, err := handler(context.Background(), reconfigure_target.Command{ diff --git a/internal/deployment/app/request_app_cleanup/request_app_cleanup.go b/internal/deployment/app/request_app_cleanup/request_app_cleanup.go index 9671ed3b..f6f4cffe 100644 --- a/internal/deployment/app/request_app_cleanup/request_app_cleanup.go +++ b/internal/deployment/app/request_app_cleanup/request_app_cleanup.go @@ -28,7 +28,7 @@ func Handler( return bus.Unit, err } - app.RequestCleanup(auth.CurrentUser(ctx).MustGet()) + app.RequestDelete(auth.CurrentUser(ctx).MustGet()) return bus.Unit, writer.Write(ctx, &app) } diff --git a/internal/deployment/app/request_target_cleanup/request_target_cleanup.go b/internal/deployment/app/request_target_cleanup/request_target_cleanup.go index 8983c5cc..55ac30ca 100644 --- a/internal/deployment/app/request_target_cleanup/request_target_cleanup.go +++ b/internal/deployment/app/request_target_cleanup/request_target_cleanup.go @@ -34,7 +34,7 @@ func Handler( return bus.Unit, err } - if err = target.RequestCleanup(apps, auth.CurrentUser(ctx).MustGet()); err != nil { + if err = target.RequestDelete(apps, auth.CurrentUser(ctx).MustGet()); err != nil { return bus.Unit, err } diff --git a/internal/deployment/app/update_app/update_app_test.go b/internal/deployment/app/update_app/update_app_test.go index 576ce916..ffe92410 100644 --- a/internal/deployment/app/update_app/update_app_test.go +++ b/internal/deployment/app/update_app/update_app_test.go @@ -137,17 +137,45 @@ func Test_UpdateApp(t *testing.T) { assert.Nil(t, err) assert.Equal(t, string(app.ID()), id) - assert.HasLength(t, 2, dispatcher.Signals()) + assert.HasLength(t, 4, dispatcher.Signals()) changed := assert.Is[domain.AppEnvChanged](t, dispatcher.Signals()[0]) assert.Equal(t, domain.Production, changed.Environment) assert.Equal(t, otherTarget.ID(), changed.Config.Target()) assert.False(t, changed.Config.Vars().HasValue()) - - changed = assert.Is[domain.AppEnvChanged](t, dispatcher.Signals()[1]) + historyChanged := assert.Is[domain.AppHistoryChanged](t, dispatcher.Signals()[1]) + assert.DeepEqual(t, domain.AppHistoryChanged{ + ID: app.ID(), + History: domain.AppTargetHistory{ + domain.Production: []domain.TargetID{ + target.ID(), + otherTarget.ID(), + }, + domain.Staging: []domain.TargetID{ + target.ID(), + otherTarget.ID(), + }, + }, + }, historyChanged) + + changed = assert.Is[domain.AppEnvChanged](t, dispatcher.Signals()[2]) assert.Equal(t, domain.Staging, changed.Environment) assert.Equal(t, otherTarget.ID(), changed.Config.Target()) assert.False(t, changed.Config.Vars().HasValue()) + historyChanged = assert.Is[domain.AppHistoryChanged](t, dispatcher.Signals()[3]) + assert.DeepEqual(t, domain.AppHistoryChanged{ + ID: app.ID(), + History: domain.AppTargetHistory{ + domain.Production: []domain.TargetID{ + target.ID(), + otherTarget.ID(), + }, + domain.Staging: []domain.TargetID{ + target.ID(), + otherTarget.ID(), + }, + }, + }, historyChanged) }) t.Run("should update an application env variables", func(t *testing.T) { @@ -243,7 +271,7 @@ func Test_UpdateApp(t *testing.T) { domain.NewEnvironmentConfig(target.ID()), ), ) - app.RequestCleanup(user.ID()) + assert.Nil(t, app.RequestDelete(user.ID())) handler, ctx, _ := arrange(t, fixture.WithUsers(&user), fixture.WithTargets(&target), diff --git a/internal/deployment/domain/app.go b/internal/deployment/domain/app.go index 5413bf35..501c1362 100644 --- a/internal/deployment/domain/app.go +++ b/internal/deployment/domain/app.go @@ -2,6 +2,7 @@ package domain import ( "context" + "database/sql/driver" "time" "github.com/YuukanOO/seelf/internal/auth/domain" @@ -32,6 +33,7 @@ type ( id AppID name AppName versionControl monad.Maybe[VersionControl] + history AppTargetHistory production EnvironmentConfig staging EnvironmentConfig cleanupRequested monad.Maybe[shared.Action[domain.UserID]] @@ -69,6 +71,7 @@ type ( Name AppName Production EnvironmentConfig Staging EnvironmentConfig + History AppTargetHistory Created shared.Action[domain.UserID] } @@ -103,6 +106,13 @@ type ( Requested shared.Action[domain.UserID] } + AppHistoryChanged struct { + bus.Notification + + ID AppID + History AppTargetHistory + } + AppDeleted struct { bus.Notification @@ -117,6 +127,7 @@ func (AppVersionControlConfigured) Name_() string { } func (AppVersionControlRemoved) Name_() string { return "deployment.event.app_version_control_removed" } func (AppCleanupRequested) Name_() string { return "deployment.event.app_cleanup_requested" } +func (AppHistoryChanged) Name_() string { return "deployment.event.app_history_changed" } func (AppDeleted) Name_() string { return "deployment.event.app_deleted" } func (e AppEnvChanged) TargetHasChanged() bool { return e.Config.target != e.OldConfig.target } @@ -145,7 +156,11 @@ func NewApp( Name: name, Production: production, Staging: staging, - Created: shared.NewAction(createdBy), + History: AppTargetHistory{ + Production: []TargetID{production.target}, + Staging: []TargetID{staging.target}, + }, + Created: shared.NewAction(createdBy), }) return app, nil @@ -175,6 +190,7 @@ func AppFrom(scanner storage.Scanner) (a App, err error) { &a.staging.vars, &cleanupRequestedAt, &cleanupRequestedBy, + &a.history, &createdAt, &createdBy, ) @@ -246,10 +262,11 @@ func (a *App) HasStagingConfig(configRequirement EnvironmentConfigRequirement) e return a.tryUpdateEnvironmentConfig(Staging, a.staging, configRequirement) } -// Request cleaning for this application. This marks the application for deletion. -func (a *App) RequestCleanup(requestedBy domain.UserID) { +// Request application deletion meaning the application resources should be removed +// and the application deleted when every resources are freed. +func (a *App) RequestDelete(requestedBy domain.UserID) error { if a.cleanupRequested.HasValue() { - return + return ErrAppCleanupRequested } a.apply(AppCleanupRequested{ @@ -258,19 +275,23 @@ func (a *App) RequestCleanup(requestedBy domain.UserID) { StagingConfig: a.staging, Requested: shared.NewAction(requestedBy), }) + + return nil } -// Delete the application. -func (a *App) Delete(cleanedUp bool) error { - if !a.cleanupRequested.HasValue() || !cleanedUp { - return ErrAppCleanupNeeded +// Marks the application has being cleaned for a specific environment and a specific target. +func (a *App) CleanedUp(environment Environment, target TargetID) { + if a.history.remove(environment, target) && a.cleanupRequested.HasValue() { + a.apply(AppDeleted{ + ID: a.id, + }) + return } - a.apply(AppDeleted{ - ID: a.id, + a.apply(AppHistoryChanged{ + ID: a.id, + History: a.history, }) - - return nil } func (a *App) ID() AppID { return a.id } @@ -296,7 +317,7 @@ func (a *App) tryUpdateEnvironmentConfig( return nil } - updatedConfig.consolidate(existingConfig) + targetChanged := updatedConfig.consolidate(existingConfig) a.apply(AppEnvChanged{ ID: a.id, @@ -305,6 +326,15 @@ func (a *App) tryUpdateEnvironmentConfig( OldConfig: existingConfig, }) + if targetChanged { + a.history.push(env, updatedConfig.target) + + a.apply(AppHistoryChanged{ + ID: a.id, + History: a.history, + }) + } + return nil } @@ -316,6 +346,7 @@ func (a *App) apply(e event.Event) { a.production = evt.Production a.staging = evt.Staging a.created = evt.Created + a.history = evt.History case AppEnvChanged: switch evt.Environment { case Production: @@ -329,7 +360,51 @@ func (a *App) apply(e event.Event) { a.versionControl.Unset() case AppCleanupRequested: a.cleanupRequested.Set(evt.Requested) + case AppHistoryChanged: + a.history = evt.History } event.Store(a, e) } + +// Represents the list of targets per env where the application could have been deployed +// and where resources should be cleaned up. +// You should never update it directly and maybe I should embed the map in a struct +// to make it more explicit. +type AppTargetHistory map[Environment][]TargetID + +func (a AppTargetHistory) push(environment Environment, target TargetID) { + targets, exists := a[environment] + + if !exists { + a[environment] = []TargetID{target} + return + } + + a[environment] = append(targets, target) +} + +// Remove the given target and environment history and returns true if the history is empty. +func (a AppTargetHistory) remove(environment Environment, target TargetID) bool { + targets, exists := a[environment] + + if !exists { + return a.isEmpty() + } + + for i, existingTarget := range targets { + if existingTarget == target { + a[environment] = append(targets[:i], targets[i+1:]...) + if len(a[environment]) == 0 { + delete(a, environment) + } + break + } + } + + return a.isEmpty() +} +func (a AppTargetHistory) isEmpty() bool { return len(a) == 0 } + +func (a AppTargetHistory) Value() (driver.Value, error) { return storage.ValueJSON(a) } +func (a *AppTargetHistory) Scan(value any) error { return storage.ScanJSON(value, a) } diff --git a/internal/deployment/domain/app_test.go b/internal/deployment/domain/app_test.go index f43ce746..73e0cba9 100644 --- a/internal/deployment/domain/app_test.go +++ b/internal/deployment/domain/app_test.go @@ -81,6 +81,10 @@ func Test_App(t *testing.T) { Created: shared.ActionFrom(uid, assert.NotZero(t, evt.Created.At())), Production: production, Staging: staging, + History: domain.AppTargetHistory{ + domain.Production: []domain.TargetID{production.Target()}, + domain.Staging: []domain.TargetID{staging.Target()}, + }, }, evt) }) @@ -139,7 +143,7 @@ func Test_App(t *testing.T) { t.Run("does not allow to modify the vcs config if the app is marked for deletion", func(t *testing.T) { app := fixture.App() - app.RequestCleanup("uid") + assert.Nil(t, app.RequestDelete("uid")) assert.ErrorIs(t, domain.ErrAppCleanupRequested, app.UseVersionControl( domain.NewVersionControl(must.Panic(domain.UrlFrom("http://somewhere.com"))))) @@ -195,84 +199,140 @@ func Test_App(t *testing.T) { assert.Nil(t, app.HasProductionConfig(domain.NewEnvironmentConfigRequirement(newConfig, true, true))) assert.Nil(t, app.HasStagingConfig(domain.NewEnvironmentConfigRequirement(newConfig, true, true))) - assert.HasNEvents(t, 3, &app, "new configs should trigger new events") + assert.HasNEvents(t, 5, &app, "new configs should trigger new events") changed := assert.EventIs[domain.AppEnvChanged](t, &app, 1) - assert.DeepEqual(t, domain.AppEnvChanged{ ID: app.ID(), Environment: domain.Production, Config: newConfig, OldConfig: production, }, changed) + historyChanged := assert.EventIs[domain.AppHistoryChanged](t, &app, 2) + assert.DeepEqual(t, domain.AppHistoryChanged{ + ID: app.ID(), + History: domain.AppTargetHistory{ + domain.Production: []domain.TargetID{ + production.Target(), + newConfig.Target(), + }, + domain.Staging: []domain.TargetID{ + staging.Target(), + newConfig.Target(), + }, + }, + }, historyChanged) - changed = assert.EventIs[domain.AppEnvChanged](t, &app, 2) - + changed = assert.EventIs[domain.AppEnvChanged](t, &app, 3) assert.DeepEqual(t, domain.AppEnvChanged{ ID: app.ID(), Environment: domain.Staging, Config: newConfig, OldConfig: staging, }, changed) + historyChanged = assert.EventIs[domain.AppHistoryChanged](t, &app, 4) + assert.DeepEqual(t, domain.AppHistoryChanged{ + ID: app.ID(), + History: domain.AppTargetHistory{ + domain.Production: []domain.TargetID{ + production.Target(), + newConfig.Target(), + }, + domain.Staging: []domain.TargetID{ + staging.Target(), + newConfig.Target(), + }, + }, + }, historyChanged) }) t.Run("does not allow to modify the environment config if the app is marked for deletion", func(t *testing.T) { app := fixture.App() - app.RequestCleanup("uid") + assert.Nil(t, app.RequestDelete("uid")) assert.ErrorIs(t, domain.ErrAppCleanupRequested, app.HasProductionConfig(domain.NewEnvironmentConfigRequirement(domain.NewEnvironmentConfig("another-target"), true, true))) assert.ErrorIs(t, domain.ErrAppCleanupRequested, app.HasStagingConfig(domain.NewEnvironmentConfigRequirement(domain.NewEnvironmentConfig("another-target"), true, true))) }) - t.Run("could be marked for deletion only if not already the case", func(t *testing.T) { + t.Run("could be marked has deleting", func(t *testing.T) { production := domain.NewEnvironmentConfig("production-target") staging := domain.NewEnvironmentConfig("staging-target") app := fixture.App(fixture.WithEnvironmentConfig(production, staging)) - app.RequestCleanup("uid") - app.RequestCleanup("uid") - - assert.HasNEvents(t, 2, &app, "should raise the event once") - evt := assert.EventIs[domain.AppCleanupRequested](t, &app, 1) + err := app.RequestDelete("uid") + assert.Nil(t, err) + assert.HasNEvents(t, 2, &app) + requested := assert.EventIs[domain.AppCleanupRequested](t, &app, 1) assert.DeepEqual(t, domain.AppCleanupRequested{ ID: app.ID(), ProductionConfig: production, StagingConfig: staging, - Requested: shared.ActionFrom[auth.UserID]("uid", evt.Requested.At()), - }, evt) - }) - - t.Run("should not allow a deletion if app resources have not been cleaned up", func(t *testing.T) { - app := fixture.App() - app.RequestCleanup("uid") - - err := app.Delete(false) - - assert.ErrorIs(t, domain.ErrAppCleanupNeeded, err) - assert.HasNEvents(t, 2, &app) + Requested: shared.ActionFrom[auth.UserID]("uid", requested.Requested.At()), + }, requested) }) - t.Run("raise an error if delete is called for a non cleaned up app", func(t *testing.T) { - app := fixture.App() + t.Run("should returns an error if already marked as being deleted", func(t *testing.T) { + production := domain.NewEnvironmentConfig("production-target") + staging := domain.NewEnvironmentConfig("staging-target") + app := fixture.App(fixture.WithEnvironmentConfig(production, staging)) + assert.Nil(t, app.RequestDelete("uid")) - err := app.Delete(false) + assert.ErrorIs(t, domain.ErrAppCleanupRequested, app.RequestDelete("uid")) - assert.ErrorIs(t, domain.ErrAppCleanupNeeded, err) + assert.HasNEvents(t, 2, &app, "should raise the event once") + requested := assert.EventIs[domain.AppCleanupRequested](t, &app, 1) + assert.DeepEqual(t, domain.AppCleanupRequested{ + ID: app.ID(), + ProductionConfig: production, + StagingConfig: staging, + Requested: shared.ActionFrom[auth.UserID]("uid", requested.Requested.At()), + }, requested) }) - t.Run("could be deleted", func(t *testing.T) { - app := fixture.App() - app.RequestCleanup("uid") - - err := app.Delete(true) - - assert.Nil(t, err) - assert.HasNEvents(t, 3, &app) - evt := assert.EventIs[domain.AppDeleted](t, &app, 2) + t.Run("should be able to mark resources as being cleanup on specific env and target", func(t *testing.T) { + t.Run("should not raise the app delete if the application is not being deleted", func(t *testing.T) { + production := domain.NewEnvironmentConfig("production-target") + staging := domain.NewEnvironmentConfig("staging-target") + app := fixture.App(fixture.WithEnvironmentConfig(production, staging)) + + app.CleanedUp(domain.Production, production.Target()) + app.CleanedUp(domain.Staging, staging.Target()) + + assert.HasNEvents(t, 3, &app) + historyChanged := assert.EventIs[domain.AppHistoryChanged](t, &app, 1) + assert.DeepEqual(t, domain.AppHistoryChanged{ + ID: app.ID(), + History: domain.AppTargetHistory{}, + }, historyChanged) + historyChanged = assert.EventIs[domain.AppHistoryChanged](t, &app, 2) + assert.DeepEqual(t, domain.AppHistoryChanged{ + ID: app.ID(), + History: domain.AppTargetHistory{}, + }, historyChanged) + }) - assert.Equal(t, domain.AppDeleted{ - ID: app.ID(), - }, evt) + t.Run("should delete the app if being requested and all resources cleaned up", func(t *testing.T) { + production := domain.NewEnvironmentConfig("production-target") + staging := domain.NewEnvironmentConfig("staging-target") + app := fixture.App(fixture.WithEnvironmentConfig(production, staging)) + assert.Nil(t, app.HasProductionConfig(domain.NewEnvironmentConfigRequirement(domain.NewEnvironmentConfig("another-target"), true, true))) + assert.Nil(t, app.RequestDelete("uid")) + + app.CleanedUp(domain.Production, production.Target()) + app.CleanedUp(domain.Staging, staging.Target()) + app.CleanedUp(domain.Production, "another-target") + + assert.HasNEvents(t, 7, &app) + historyChanged := assert.EventIs[domain.AppHistoryChanged](t, &app, 4) + assert.DeepEqual(t, domain.AppHistoryChanged{ + ID: app.ID(), + History: domain.AppTargetHistory{}, + }, historyChanged) + deleted := assert.EventIs[domain.AppDeleted](t, &app, 6) + assert.DeepEqual(t, domain.AppDeleted{ + ID: app.ID(), + }, deleted) + }) }) } diff --git a/internal/deployment/domain/deployment.go b/internal/deployment/domain/deployment.go index 9dbdcf28..dc873f9c 100644 --- a/internal/deployment/domain/deployment.go +++ b/internal/deployment/domain/deployment.go @@ -17,15 +17,6 @@ var ( ErrCouldNotPromoteProductionDeployment = apperr.New("could_not_promote_production_deployment") ErrRunningOrPendingDeployments = apperr.New("running_or_pending_deployments") ErrInvalidSourceDeployment = apperr.New("invalid_source_deployment") - ErrNotInPendingState = apperr.New("not_in_pending_state") - ErrNotInRunningState = apperr.New("not_in_running_state") -) - -const ( - DeploymentStatusPending DeploymentStatus = iota - DeploymentStatusRunning - DeploymentStatusFailed - DeploymentStatusSucceeded ) type ( @@ -241,6 +232,18 @@ func (d *Deployment) apply(e event.Event) { event.Store(d, e) } +var ( + ErrNotInPendingState = apperr.New("not_in_pending_state") + ErrNotInRunningState = apperr.New("not_in_running_state") +) + +const ( + DeploymentStatusPending DeploymentStatus = iota + DeploymentStatusRunning + DeploymentStatusFailed + DeploymentStatusSucceeded +) + type ( DeploymentStatus uint8 diff --git a/internal/deployment/domain/deployment_test.go b/internal/deployment/domain/deployment_test.go index 14f5801f..ff80a491 100644 --- a/internal/deployment/domain/deployment_test.go +++ b/internal/deployment/domain/deployment_test.go @@ -22,7 +22,7 @@ func Test_Deployment(t *testing.T) { t.Run("should require an app without cleanup requested", func(t *testing.T) { app := fixture.App() - app.RequestCleanup("uid") + assert.Nil(t, app.RequestDelete("uid")) _, err := app.NewDeployment(1, fixture.SourceData(), domain.Production, "uid") diff --git a/internal/deployment/domain/environment.go b/internal/deployment/domain/environment.go index 88d1380d..48cb502c 100644 --- a/internal/deployment/domain/environment.go +++ b/internal/deployment/domain/environment.go @@ -73,12 +73,15 @@ func (e EnvironmentConfig) Target() TargetID { return e.target } func (e EnvironmentConfig) Version() time.Time { return e.version } func (e EnvironmentConfig) Vars() monad.Maybe[ServicesEnv] { return e.vars } -func (e *EnvironmentConfig) consolidate(other EnvironmentConfig) { +// Update the config version with the old one if target has not changed. +// Returns wether or not target has changed. +func (e *EnvironmentConfig) consolidate(other EnvironmentConfig) bool { if e.target != other.target { - return + return true } e.version = other.version + return false } // Builds the map of services variables from a raw value. diff --git a/internal/deployment/domain/provider.go b/internal/deployment/domain/provider.go index a68cc395..b6b44358 100644 --- a/internal/deployment/domain/provider.go +++ b/internal/deployment/domain/provider.go @@ -32,7 +32,7 @@ type ( // Setup a target by deploying the needed stuff to actually serve deployments. Setup(context.Context, Target) (TargetEntrypointsAssigned, error) // Remove target related configuration. - RemoveConfiguration(context.Context, Target) error + RemoveConfiguration(context.Context, TargetID) error // Cleanup a target, removing every resources managed by seelf on it. CleanupTarget(context.Context, Target, CleanupStrategy) error // Cleanup an application on the specified target and environment, which means removing every possible stuff related to it diff --git a/internal/deployment/domain/target.go b/internal/deployment/domain/target.go index c09af041..6ac71a82 100644 --- a/internal/deployment/domain/target.go +++ b/internal/deployment/domain/target.go @@ -31,12 +31,6 @@ const ( CleanupStrategySkip // Skip the cleanup because no resource has been deployed or we can't remove them anymore ) -const ( - TargetStatusConfiguring TargetStatus = iota - TargetStatusFailed - TargetStatusReady -) - type ( TargetID string CleanupStrategy uint8 // Strategy to use when deleting a target (on the provider side) based on wether it has been successfully configured or not @@ -190,7 +184,7 @@ func TargetFrom(scanner storage.Scanner) (t Target, err error) { &providerData, &t.state.status, &t.state.version, - &t.state.errcode, + &t.state.errCode, &t.state.lastReadyVersion, &t.customEntrypoints, &deleteRequestedAt, @@ -317,19 +311,18 @@ func (t *Target) HasProvider(providerRequirement ProviderConfigRequirement) erro // Check the target availability and returns an appropriate error. func (t *Target) CheckAvailability() error { - if t.state.status == TargetStatusConfiguring { - return ErrTargetConfigurationInProgress - } - if t.cleanupRequested.HasValue() { return ErrTargetCleanupRequested } - if t.state.status != TargetStatusReady { + switch t.state.status { + case TargetStatusConfiguring: + return ErrTargetConfigurationInProgress + case TargetStatusReady: + return nil + default: return ErrTargetConfigurationFailed } - - return nil } // Force the target reconfiguration. @@ -349,9 +342,9 @@ func (t *Target) Reconfigure() error { // Mark the target (in the given version) has configured (by an external system). // If the given version does not match the current one, nothing will be done. -func (t *Target) Configured(version time.Time, assigned TargetEntrypointsAssigned, err error) { - if !t.state.configured(version, err) { - return +func (t *Target) Configured(version time.Time, assigned TargetEntrypointsAssigned, err error) error { + if stateErr := t.state.configured(version, err); stateErr != nil { + return stateErr } if err == nil && t.customEntrypoints.assign(assigned) { @@ -365,6 +358,8 @@ func (t *Target) Configured(version time.Time, assigned TargetEntrypointsAssigne ID: t.id, State: t.state, }) + + return nil } // Inform the target that it should exposes entrypoints inside the services array @@ -399,10 +394,11 @@ func (t *Target) UnExposeEntrypoints(app AppID, envs ...Environment) { t.raiseEntrypointsChangedAndReconfigure() } -// Request the target cleanup, meaning it will be deleted with all its related data. -func (t *Target) RequestCleanup(apps HasAppsOnTarget, by auth.UserID) error { +// Request the target deletion, meaning every resources should be removed and the +// target deleted when its done. +func (t *Target) RequestDelete(apps HasAppsOnTarget, by auth.UserID) error { if t.cleanupRequested.HasValue() { - return nil + return ErrTargetCleanupRequested } if apps { @@ -422,7 +418,11 @@ func (t *Target) RequestCleanup(apps HasAppsOnTarget, by auth.UserID) error { } // Check the target cleanup strategy to determine how the target resources should be handled. -func (t *Target) CleanupStrategy(deployments HasRunningOrPendingDeploymentsOnTarget) (CleanupStrategy, error) { +func (t *Target) CanBeCleaned(deployments HasRunningOrPendingDeploymentsOnTarget) (CleanupStrategy, error) { + if !t.cleanupRequested.HasValue() { + return CleanupStrategyDefault, ErrTargetCleanupNeeded + } + if deployments { return CleanupStrategyDefault, ErrRunningOrPendingDeployments } @@ -430,21 +430,16 @@ func (t *Target) CleanupStrategy(deployments HasRunningOrPendingDeploymentsOnTar switch t.state.status { case TargetStatusConfiguring: return CleanupStrategyDefault, ErrTargetConfigurationInProgress - case TargetStatusReady: - return CleanupStrategyDefault, nil + case TargetStatusFailed: + return CleanupStrategySkip, nil default: - // Never reachable or target has been marked for deletion, no way to update it anymore, just skip the cleanup - if !t.state.lastReadyVersion.HasValue() || t.cleanupRequested.HasValue() { - return CleanupStrategySkip, nil - } - - return CleanupStrategyDefault, ErrTargetConfigurationFailed + return CleanupStrategyDefault, nil } } // Check the cleanup strategy for a specific application to determine how related resources // should be handled. -func (t *Target) AppCleanupStrategy( +func (t *Target) CanAppBeCleaned( ongoing HasRunningOrPendingDeploymentsOnAppTargetEnv, successful HasSuccessfulDeploymentsOnAppTargetEnv, ) (CleanupStrategy, error) { @@ -473,9 +468,9 @@ func (t *Target) AppCleanupStrategy( } } -// Deletes the target. -func (t *Target) Delete(cleanedUp bool) error { - if !t.cleanupRequested.HasValue() || !cleanedUp { +// Mark the target has being cleaned up, making it safe to be deleted. +func (t *Target) CleanedUp() error { + if !t.cleanupRequested.HasValue() { return ErrTargetCleanupNeeded } @@ -548,13 +543,21 @@ func (t *Target) apply(e event.Event) { event.Store(t, e) } +const ( + TargetStatusConfiguring TargetStatus = iota + TargetStatusFailed + TargetStatusReady +) + +var ErrTargetConfigurationOutdated = apperr.New("target_configuration_outdated") + type ( TargetStatus uint8 TargetState struct { status TargetStatus version time.Time - errcode monad.Maybe[string] + errCode monad.Maybe[string] lastReadyVersion monad.Maybe[time.Time] // Hold down the last time the target was marked as ready } ) @@ -568,33 +571,32 @@ func newTargetState() (t TargetState) { func (t *TargetState) reconfigure() { t.status = TargetStatusConfiguring t.version = time.Now().UTC() - t.errcode.Unset() + t.errCode.Unset() } -// Update the state based on wether or not an error is given and returns a boolean indicating -// if the state has changed. +// Update the state based on wether or not an error is given. // // If there is no error, the target will be considered ready. // If an error is given, the target will be marked as failed. // // In either case, if the state has changed since it has been processed (the version param), // it will return without doing anything because the result is outdated. -func (t *TargetState) configured(version time.Time, err error) bool { +func (t *TargetState) configured(version time.Time, err error) error { if t.isOutdated(version) { - return false + return ErrTargetConfigurationOutdated } if err != nil { t.status = TargetStatusFailed - t.errcode.Set(err.Error()) - return true + t.errCode.Set(err.Error()) + return nil } t.status = TargetStatusReady t.lastReadyVersion.Set(version) - t.errcode.Unset() + t.errCode.Unset() - return true + return nil } // Returns true if the given version is different from the current one or if the one @@ -604,7 +606,7 @@ func (t TargetState) isOutdated(version time.Time) bool { } func (t TargetState) Status() TargetStatus { return t.status } -func (t TargetState) ErrCode() monad.Maybe[string] { return t.errcode } +func (t TargetState) ErrCode() monad.Maybe[string] { return t.errCode } func (t TargetState) Version() time.Time { return t.version } func (t TargetState) LastReadyVersion() monad.Maybe[time.Time] { return t.lastReadyVersion } diff --git a/internal/deployment/domain/target_test.go b/internal/deployment/domain/target_test.go index 1fcf3269..2ae958ae 100644 --- a/internal/deployment/domain/target_test.go +++ b/internal/deployment/domain/target_test.go @@ -70,7 +70,7 @@ func Test_Target(t *testing.T) { t.Run("should return true if the target is not in a configuring state", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) assert.True(t, target.IsOutdated(target.CurrentVersion())) }) @@ -102,10 +102,10 @@ func Test_Target(t *testing.T) { }, renamed) }) - t.Run("should returns an error if the target cleanup has been requested", func(t *testing.T) { + t.Run("should returns an error if the target is being deleted", func(t *testing.T) { target := fixture.Target(fixture.WithTargetName("old-name")) - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) assert.ErrorIs(t, domain.ErrTargetCleanupRequested, target.Rename("new-name")) }) @@ -145,10 +145,10 @@ func Test_Target(t *testing.T) { assert.HasNEvents(t, 3, &target) }) - t.Run("should returns an error if the target cleanup has been requested", func(t *testing.T) { + t.Run("should returns an error if the target is being deleted", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) assert.ErrorIs(t, domain.ErrTargetCleanupRequested, target.ExposeServicesAutomatically( domain.NewTargetUrlRequirement(must.Panic(domain.UrlFrom("http://example.com")), true), @@ -156,7 +156,7 @@ func Test_Target(t *testing.T) { }) }) - t.Run("could be configured as exposing services manually without url", func(t *testing.T) { + t.Run("could be configured as exposing services manually without an url", func(t *testing.T) { t.Run("should raise the event if the target had previously an url", func(t *testing.T) { target := fixture.Target() assert.Nil(t, target.ExposeServicesAutomatically(domain.NewTargetUrlRequirement(must.Panic(domain.UrlFrom("http://example.com")), true))) @@ -179,10 +179,10 @@ func Test_Target(t *testing.T) { assert.HasNEvents(t, 1, &target) }) - t.Run("should returns an error if the target cleanup has been requested", func(t *testing.T) { + t.Run("should returns an error if the target is being deleted", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) assert.ErrorIs(t, domain.ErrTargetCleanupRequested, target.ExposeServicesManually()) }) @@ -245,10 +245,10 @@ func Test_Target(t *testing.T) { assert.HasNEvents(t, 1, &target) }) - t.Run("should returns an error if the target cleanup has been requested", func(t *testing.T) { + t.Run("should returns an error if the target is being deleted", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) assert.ErrorIs(t, domain.ErrTargetCleanupRequested, target.HasProvider(domain.NewProviderConfigRequirement(fixture.ProviderConfig(), true))) }) @@ -369,10 +369,10 @@ func Test_Target(t *testing.T) { assert.HasNEvents(t, 2, &target) }) - t.Run("should be ignored if the target is being configured", func(t *testing.T) { + t.Run("should be ignored if the target is being deleted", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) target.ExposeEntrypoints(deployment.Config().AppID(), domain.Production, domain.Services{app.Service, db.Service}) @@ -381,11 +381,11 @@ func Test_Target(t *testing.T) { }) t.Run("could be marked as configured", func(t *testing.T) { - t.Run("should do nothing if the version does not match", func(t *testing.T) { + t.Run("should returns an error if the version does not match", func(t *testing.T) { target := fixture.Target() target.ExposeEntrypoints(deployment.Config().AppID(), domain.Production, domain.Services{app.Service, db.Service}) - target.Configured(target.CurrentVersion().Add(-1*time.Second), domain.TargetEntrypointsAssigned{ + err := target.Configured(target.CurrentVersion().Add(-1*time.Second), domain.TargetEntrypointsAssigned{ deployment.Config().AppID(): { domain.Production: { http.Name(): 3000, @@ -394,15 +394,17 @@ func Test_Target(t *testing.T) { }, }, nil) + assert.ErrorIs(t, domain.ErrTargetConfigurationOutdated, err) assert.HasNEvents(t, 2, &target) }) - t.Run("should do nothing if the version has already been configured", func(t *testing.T) { + t.Run("should returns an error if the version has already been configured", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) - target.Configured(target.CurrentVersion(), nil, nil) + err := target.Configured(target.CurrentVersion(), nil, nil) + assert.ErrorIs(t, domain.ErrTargetConfigurationOutdated, err) assert.HasNEvents(t, 2, &target) stateChanged := assert.EventIs[domain.TargetStateChanged](t, &target, 1) assert.Equal(t, domain.TargetStatusReady, stateChanged.State.Status()) @@ -411,21 +413,22 @@ func Test_Target(t *testing.T) { t.Run("should be marked as failed if an error is given", func(t *testing.T) { target := fixture.Target() target.ExposeEntrypoints(deployment.Config().AppID(), domain.Production, domain.Services{app.Service, db.Service}) - err := errors.New("an error") + expectedErr := errors.New("an error") - target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ + err := target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ deployment.Config().AppID(): { domain.Production: { http.Name(): 3000, tcp.Name(): 3001, }, }, - }, err) + }, expectedErr) + assert.Nil(t, err) assert.HasNEvents(t, 3, &target) stateChanged := assert.EventIs[domain.TargetStateChanged](t, &target, 2) assert.Equal(t, domain.TargetStatusFailed, stateChanged.State.Status()) - assert.Equal(t, err.Error(), stateChanged.State.ErrCode().Get("")) + assert.Equal(t, expectedErr.Error(), stateChanged.State.ErrCode().Get("")) assert.Zero(t, stateChanged.State.LastReadyVersion()) }) @@ -434,7 +437,7 @@ func Test_Target(t *testing.T) { target.ExposeEntrypoints(deployment.Config().AppID(), domain.Production, domain.Services{app.Service, db.Service}) target.ExposeEntrypoints(deployment.Config().AppID(), domain.Staging, domain.Services{app.Service, db.Service}) - target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ + err := target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ "another-app": { domain.Production: { "some-entrypoint": 5000, @@ -448,6 +451,7 @@ func Test_Target(t *testing.T) { }, }, nil) + assert.Nil(t, err) assert.HasNEvents(t, 5, &target) entrypointsChanged := assert.EventIs[domain.TargetEntrypointsChanged](t, &target, 3) assert.DeepEqual(t, domain.TargetEntrypointsChanged{ @@ -486,7 +490,7 @@ func Test_Target(t *testing.T) { target.ExposeEntrypoints("app", domain.Production, domain.Services{app.Service, db.Service}) target.ExposeEntrypoints(deployment.Config().AppID(), domain.Production, domain.Services{app.Service, db.Service}) target.ExposeEntrypoints(deployment.Config().AppID(), domain.Staging, domain.Services{app.Service, db.Service}) - target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ + assert.Nil(t, target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ deployment.Config().AppID(): { domain.Production: { http.Name(): 3000, @@ -497,7 +501,7 @@ func Test_Target(t *testing.T) { tcp.Name(): 3003, }, }, - }, nil) + }, nil)) target.UnExposeEntrypoints(deployment.Config().AppID()) @@ -521,7 +525,7 @@ func Test_Target(t *testing.T) { target := fixture.Target() target.ExposeEntrypoints(deployment.Config().AppID(), domain.Production, domain.Services{app.Service, db.Service}) target.ExposeEntrypoints(deployment.Config().AppID(), domain.Staging, domain.Services{app.Service, db.Service}) - target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ + assert.Nil(t, target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ deployment.Config().AppID(): { domain.Production: { http.Name(): 3000, @@ -532,7 +536,7 @@ func Test_Target(t *testing.T) { tcp.Name(): 3003, }, }, - }, nil) + }, nil)) target.UnExposeEntrypoints(deployment.Config().AppID(), domain.Production) @@ -556,7 +560,7 @@ func Test_Target(t *testing.T) { assert.Nil(t, target.ExposeServicesAutomatically(domain.NewTargetUrlRequirement(must.Panic(domain.UrlFrom("https://example.com")), true))) target.ExposeEntrypoints(deployment.Config().AppID(), domain.Production, domain.Services{app.Service, db.Service}) target.ExposeEntrypoints(deployment.Config().AppID(), domain.Staging, domain.Services{app.Service, db.Service}) - target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ + assert.Nil(t, target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ deployment.Config().AppID(): { domain.Production: { http.Name(): 3000, @@ -567,7 +571,7 @@ func Test_Target(t *testing.T) { tcp.Name(): 3003, }, }, - }, nil) + }, nil)) target.UnExposeEntrypoints(deployment.Config().AppID(), domain.Production) @@ -589,18 +593,18 @@ func Test_Target(t *testing.T) { }) }) - t.Run("should be ignored if the target cleanup has been requested", func(t *testing.T) { + t.Run("should be ignored if the target is being deleted", func(t *testing.T) { target := fixture.Target() target.ExposeEntrypoints(deployment.Config().AppID(), domain.Production, domain.Services{app.Service, db.Service}) - target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ + assert.Nil(t, target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ deployment.Config().AppID(): { domain.Production: { http.Name(): 3000, tcp.Name(): 3001, }, }, - }, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + }, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) target.UnExposeEntrypoints(deployment.Config().AppID(), domain.Production) @@ -617,22 +621,22 @@ func Test_Target(t *testing.T) { t.Run("when configuration failed", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, errors.New("configuration failed")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, errors.New("configuration failed"))) assert.ErrorIs(t, domain.ErrTargetConfigurationFailed, target.CheckAvailability()) }) t.Run("when ready", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) assert.Nil(t, target.CheckAvailability()) }) - t.Run("when cleanup requested", func(t *testing.T) { + t.Run("when deleting", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) assert.ErrorIs(t, domain.ErrTargetCleanupRequested, target.CheckAvailability()) }) @@ -645,97 +649,91 @@ func Test_Target(t *testing.T) { assert.ErrorIs(t, domain.ErrTargetConfigurationInProgress, target.Reconfigure()) }) - t.Run("should fail if cleanup requested", func(t *testing.T) { + t.Run("should fail if deleting", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) assert.ErrorIs(t, domain.ErrTargetCleanupRequested, target.Reconfigure()) }) t.Run("should succeed otherwise", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + firstVersion := target.CurrentVersion() + assert.Nil(t, target.Configured(firstVersion, nil, nil)) assert.Nil(t, target.Reconfigure()) assert.HasNEvents(t, 3, &target) stateChanged := assert.EventIs[domain.TargetStateChanged](t, &target, 2) assert.Equal(t, domain.TargetStatusConfiguring, stateChanged.State.Status()) + assert.Equal(t, firstVersion, stateChanged.State.LastReadyVersion().Get(time.Time{})) }) }) - t.Run("could be marked for cleanup", func(t *testing.T) { + t.Run("could be marked for deletion", func(t *testing.T) { t.Run("should returns an err if some applications are using it", func(t *testing.T) { target := fixture.Target() - assert.ErrorIs(t, domain.ErrTargetInUse, target.RequestCleanup(true, "uid")) + assert.ErrorIs(t, domain.ErrTargetInUse, target.RequestDelete(true, "uid")) }) t.Run("should returns an err if configuring", func(t *testing.T) { target := fixture.Target() - assert.ErrorIs(t, domain.ErrTargetConfigurationInProgress, target.RequestCleanup(false, "uid")) + assert.ErrorIs(t, domain.ErrTargetConfigurationInProgress, target.RequestDelete(false, "uid")) }) - t.Run("should succeed otherwise", func(t *testing.T) { + t.Run("should returns an error if already requested", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.ErrorIs(t, domain.ErrTargetCleanupRequested, target.RequestDelete(false, "uid")) assert.HasNEvents(t, 3, &target) - requested := assert.EventIs[domain.TargetCleanupRequested](t, &target, 2) - assert.Equal(t, domain.TargetCleanupRequested{ - ID: target.ID(), - Requested: shared.ActionFrom[auth.UserID]("uid", assert.NotZero(t, requested.Requested.At())), - }, requested) }) - t.Run("should do nothing if already being cleaned up", func(t *testing.T) { + t.Run("should succeed otherwise", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.RequestDelete(false, "uid")) assert.HasNEvents(t, 3, &target) + requested := assert.EventIs[domain.TargetCleanupRequested](t, &target, 2) + assert.Equal(t, domain.TargetCleanupRequested{ + ID: target.ID(), + Requested: shared.ActionFrom(auth.UserID("uid"), assert.NotZero(t, requested.Requested.At())), + }, requested) }) }) t.Run("should expose a cleanup strategy to determine how the target resources should be handled", func(t *testing.T) { - t.Run("should returns an error if there are running or pending deployments on the target", func(t *testing.T) { + t.Run("should returns an error if the target is not being deleted", func(t *testing.T) { target := fixture.Target() - _, err := target.CleanupStrategy(true) + _, err := target.CanBeCleaned(false) - assert.ErrorIs(t, domain.ErrRunningOrPendingDeployments, err) + assert.ErrorIs(t, domain.ErrTargetCleanupNeeded, err) }) - t.Run("should returns an error if the target is being configured", func(t *testing.T) { - target := fixture.Target() - - _, err := target.CleanupStrategy(false) - - assert.ErrorIs(t, domain.ErrTargetConfigurationInProgress, err) - }) - - t.Run("should returns an error if the target configuration has failed and it has been at least ready once", func(t *testing.T) { + t.Run("should returns an error if there are running or pending deployments on the target", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.Reconfigure()) - target.Configured(target.CurrentVersion(), nil, errors.New("failed")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) - _, err := target.CleanupStrategy(false) + _, err := target.CanBeCleaned(true) - assert.ErrorIs(t, domain.ErrTargetConfigurationFailed, err) + assert.ErrorIs(t, domain.ErrRunningOrPendingDeployments, err) }) - t.Run("should returns the skip strategy if the target has never been correctly configured and is currently failing", func(t *testing.T) { + t.Run("should returns the skip strategy if the target is currently failing", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, errors.New("failed")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, errors.New("failed"))) + assert.Nil(t, target.RequestDelete(false, "uid")) - strategy, err := target.CleanupStrategy(false) + strategy, err := target.CanBeCleaned(false) assert.Nil(t, err) assert.Equal(t, domain.CleanupStrategySkip, strategy) @@ -743,9 +741,10 @@ func Test_Target(t *testing.T) { t.Run("should returns the default strategy if the target is ready", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) - strategy, err := target.CleanupStrategy(false) + strategy, err := target.CanBeCleaned(false) assert.Nil(t, err) assert.Equal(t, domain.CleanupStrategyDefault, strategy) @@ -755,10 +754,10 @@ func Test_Target(t *testing.T) { t.Run("should expose an application cleanup strategy to determine how application resources should be handled", func(t *testing.T) { t.Run("should returns the skip strategy if the target is being cleaned up", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) - strategy, err := target.AppCleanupStrategy(false, true) + strategy, err := target.CanAppBeCleaned(false, true) assert.Nil(t, err) assert.Equal(t, domain.CleanupStrategySkip, strategy) @@ -767,7 +766,7 @@ func Test_Target(t *testing.T) { t.Run("should returns an error if there are still running deployments on the target for this application", func(t *testing.T) { target := fixture.Target() - _, err := target.AppCleanupStrategy(true, true) + _, err := target.CanAppBeCleaned(true, true) assert.ErrorIs(t, domain.ErrRunningOrPendingDeployments, err) }) @@ -775,7 +774,7 @@ func Test_Target(t *testing.T) { t.Run("should returns the skip strategy if no successful deployment has been made and no one is running", func(t *testing.T) { target := fixture.Target() - strategy, err := target.AppCleanupStrategy(false, false) + strategy, err := target.CanAppBeCleaned(false, false) assert.Nil(t, err) assert.Equal(t, domain.CleanupStrategySkip, strategy) @@ -784,50 +783,46 @@ func Test_Target(t *testing.T) { t.Run("should returns an error if the target is being configured", func(t *testing.T) { target := fixture.Target() - _, err := target.AppCleanupStrategy(false, true) + _, err := target.CanAppBeCleaned(false, true) assert.ErrorIs(t, domain.ErrTargetConfigurationInProgress, err) }) t.Run("should returns an error if the target configuration has failed", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, errors.New("failed")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, errors.New("failed"))) - _, err := target.AppCleanupStrategy(false, true) + _, err := target.CanAppBeCleaned(false, true) assert.ErrorIs(t, domain.ErrTargetConfigurationFailed, err) }) t.Run("should returns the default strategy if the target is ready", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) - strategy, err := target.AppCleanupStrategy(false, true) + strategy, err := target.CanAppBeCleaned(false, true) assert.Nil(t, err) assert.Equal(t, domain.CleanupStrategyDefault, strategy) }) }) - t.Run("could be deleted", func(t *testing.T) { + t.Run("could be mark as cleaned up", func(t *testing.T) { t.Run("should returns an error if the target has not been mark for cleanup", func(t *testing.T) { target := fixture.Target() - assert.ErrorIs(t, domain.ErrTargetCleanupNeeded, target.Delete(true)) - }) - - t.Run("should returns an error if the target resources has not been cleaned up", func(t *testing.T) { - target := fixture.Target() - - assert.ErrorIs(t, domain.ErrTargetCleanupNeeded, target.Delete(false)) + assert.ErrorIs(t, domain.ErrTargetCleanupNeeded, target.CleanedUp()) }) t.Run("should succeed otherwise", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) - assert.Nil(t, target.RequestCleanup(false, "uid")) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) + assert.Nil(t, target.RequestDelete(false, "uid")) - assert.Nil(t, target.Delete(true)) + err := target.CleanedUp() + + assert.Nil(t, err) assert.HasNEvents(t, 4, &target) deleted := assert.EventIs[domain.TargetDeleted](t, &target, 3) assert.Equal(t, domain.TargetDeleted{ @@ -838,10 +833,10 @@ func Test_Target(t *testing.T) { } func Test_TargetEvents(t *testing.T) { - t.Run("should provide a function to check for configuration changes", func(t *testing.T) { + t.Run("should provide a function to check if the target went to the configuring state", func(t *testing.T) { t.Run("should return false if the state is not configuring", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) evt := assert.EventIs[domain.TargetStateChanged](t, &target, 1) assert.False(t, evt.WentToConfiguringState()) @@ -849,7 +844,7 @@ func Test_TargetEvents(t *testing.T) { t.Run("should return true if going to the configuring state", func(t *testing.T) { target := fixture.Target() - target.Configured(target.CurrentVersion(), nil, nil) + assert.Nil(t, target.Configured(target.CurrentVersion(), nil, nil)) assert.Nil(t, target.Reconfigure()) evt := assert.EventIs[domain.TargetStateChanged](t, &target, 2) diff --git a/internal/deployment/infra/mod.go b/internal/deployment/infra/mod.go index 568ec385..03fb101f 100644 --- a/internal/deployment/infra/mod.go +++ b/internal/deployment/infra/mod.go @@ -10,9 +10,7 @@ import ( "github.com/YuukanOO/seelf/internal/deployment/app/create_app" "github.com/YuukanOO/seelf/internal/deployment/app/create_registry" "github.com/YuukanOO/seelf/internal/deployment/app/create_target" - "github.com/YuukanOO/seelf/internal/deployment/app/delete_app" "github.com/YuukanOO/seelf/internal/deployment/app/delete_registry" - "github.com/YuukanOO/seelf/internal/deployment/app/delete_target" "github.com/YuukanOO/seelf/internal/deployment/app/deploy" "github.com/YuukanOO/seelf/internal/deployment/app/expose_seelf_container" "github.com/YuukanOO/seelf/internal/deployment/app/fail_pending_deployments" @@ -58,7 +56,7 @@ func Setup( deploymentsStore := deploymentsqlite.NewDeploymentsStore(db) targetsStore := deploymentsqlite.NewTargetsStore(db) registriesStore := deploymentsqlite.NewRegistriesStore(db) - deploymentQueryHandler := deploymentsqlite.NewGateway(db) + gateway := deploymentsqlite.NewGateway(db) artifactManager := artifact.NewLocal(opts, logger) @@ -79,8 +77,7 @@ func Setup( bus.Register(b, queue_deployment.Handler(appsStore, deploymentsStore, deploymentsStore, sourceFacade)) bus.Register(b, deploy.Handler(deploymentsStore, deploymentsStore, artifactManager, sourceFacade, providerFacade, targetsStore, registriesStore)) bus.Register(b, request_app_cleanup.Handler(appsStore, appsStore)) - bus.Register(b, delete_app.Handler(appsStore, appsStore, artifactManager)) - bus.Register(b, cleanup_app.Handler(targetsStore, deploymentsStore, providerFacade)) + bus.Register(b, cleanup_app.Handler(targetsStore, deploymentsStore, appsStore, appsStore, providerFacade)) bus.Register(b, get_deployment_log.Handler(deploymentsStore, artifactManager)) bus.Register(b, redeploy.Handler(appsStore, deploymentsStore, deploymentsStore)) bus.Register(b, promote.Handler(appsStore, deploymentsStore, deploymentsStore)) @@ -89,35 +86,36 @@ func Setup( bus.Register(b, reconfigure_target.Handler(targetsStore, targetsStore)) bus.Register(b, update_target.Handler(targetsStore, targetsStore, providerFacade)) bus.Register(b, request_target_cleanup.Handler(targetsStore, targetsStore, appsStore)) - bus.Register(b, cleanup_target.Handler(targetsStore, deploymentsStore, providerFacade)) - bus.Register(b, delete_target.Handler(targetsStore, targetsStore, providerFacade)) + bus.Register(b, cleanup_target.Handler(targetsStore, targetsStore, deploymentsStore, providerFacade)) bus.Register(b, create_registry.Handler(registriesStore, registriesStore)) bus.Register(b, update_registry.Handler(registriesStore, registriesStore)) bus.Register(b, delete_registry.Handler(registriesStore, registriesStore)) - bus.Register(b, deploymentQueryHandler.GetAllApps) - bus.Register(b, deploymentQueryHandler.GetAppByID) - bus.Register(b, deploymentQueryHandler.GetAllDeploymentsByApp) - bus.Register(b, deploymentQueryHandler.GetDeploymentByID) - bus.Register(b, deploymentQueryHandler.GetAllTargets) - bus.Register(b, deploymentQueryHandler.GetTargetByID) - bus.Register(b, deploymentQueryHandler.GetRegistries) - bus.Register(b, deploymentQueryHandler.GetRegistryByID) + bus.Register(b, gateway.GetAllApps) + bus.Register(b, gateway.GetAppByID) + bus.Register(b, gateway.GetAllDeploymentsByApp) + bus.Register(b, gateway.GetDeploymentByID) + bus.Register(b, gateway.GetAllTargets) + bus.Register(b, gateway.GetTargetByID) + bus.Register(b, gateway.GetRegistries) + bus.Register(b, gateway.GetRegistryByID) bus.On(b, deploy.OnDeploymentCreatedHandler(scheduler)) bus.On(b, redeploy.OnAppEnvChangedHandler(appsStore, deploymentsStore, deploymentsStore)) - bus.On(b, delete_app.OnAppCleanupRequestedHandler(scheduler)) + bus.On(b, cleanup_app.OnAppDeletedHandler(artifactManager)) bus.On(b, cleanup_app.OnAppEnvChangedHandler(scheduler)) bus.On(b, cleanup_app.OnAppCleanupRequestedHandler(scheduler)) - bus.On(b, fail_pending_deployments.OnTargetDeleteRequestedHandler(deploymentsStore)) + bus.On(b, cleanup_app.OnJobDismissedHandler(appsStore, appsStore)) + bus.On(b, fail_pending_deployments.OnTargetCleanupRequestedHandler(deploymentsStore)) bus.On(b, fail_pending_deployments.OnAppCleanupRequestedHandler(deploymentsStore)) bus.On(b, fail_pending_deployments.OnAppEnvChangedHandler(deploymentsStore)) bus.On(b, cleanup_target.OnTargetCleanupRequestedHandler(scheduler)) + bus.On(b, cleanup_target.OnTargetDeletedHandler((providerFacade))) + bus.On(b, cleanup_target.OnJobDismissedHandler(targetsStore, targetsStore)) bus.On(b, configure_target.OnTargetCreatedHandler(scheduler)) bus.On(b, configure_target.OnTargetStateChangedHandler(scheduler)) bus.On(b, configure_target.OnDeploymentStateChangedHandler(targetsStore, targetsStore)) bus.On(b, configure_target.OnAppEnvChangedHandler(targetsStore, targetsStore)) bus.On(b, configure_target.OnAppCleanupRequestedHandler(targetsStore, targetsStore)) - bus.On(b, delete_target.OnTargetCleanupRequestedHandler(scheduler)) if err := db.Migrate(deploymentsqlite.Migrations); err != nil { return err diff --git a/internal/deployment/infra/provider/docker/provider.go b/internal/deployment/infra/provider/docker/provider.go index aeffbd82..cf16f4f7 100644 --- a/internal/deployment/infra/provider/docker/provider.go +++ b/internal/deployment/infra/provider/docker/provider.go @@ -198,8 +198,8 @@ func (d *docker) Setup(ctx context.Context, target domain.Target) (domain.Target }) } -func (d *docker) RemoveConfiguration(_ context.Context, target domain.Target) error { - return d.sshConfig.Remove(string(target.ID())) +func (d *docker) RemoveConfiguration(_ context.Context, target domain.TargetID) error { + return d.sshConfig.Remove(string(target)) } func (d *docker) PrepareLocal(context.Context) (domain.ProviderConfig, error) { diff --git a/internal/deployment/infra/provider/docker/provider_test.go b/internal/deployment/infra/provider/docker/provider_test.go index e632f88f..bf945815 100644 --- a/internal/deployment/infra/provider/docker/provider_test.go +++ b/internal/deployment/infra/provider/docker/provider_test.go @@ -431,14 +431,14 @@ wSD0v0RcmkITP1ZR0AAAAYcHF1ZXJuYUBMdWNreUh5ZHJvLmxvY2FsAQID tcp := service.AddTCPEntrypoint(5432, true) udp := service.AddUDPEntrypoint(5433, true) target.ExposeEntrypoints(deployment.ID().AppID(), deployment.Config().Environment(), builder.Services()) - target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ + assert.Nil(t, target.Configured(target.CurrentVersion(), domain.TargetEntrypointsAssigned{ deployment.ID().AppID(): { deployment.Config().Environment(): { tcp.Name(): 5432, udp.Name(): 5433, }, }, - }, nil) + }, nil)) newTcp := service.AddTCPEntrypoint(5434, true) newUdp := service.AddUDPEntrypoint(5435, true) target.ExposeEntrypoints(deployment.ID().AppID(), deployment.Config().Environment(), builder.Services()) diff --git a/internal/deployment/infra/provider/facade.go b/internal/deployment/infra/provider/facade.go index 8df1501b..52238da5 100644 --- a/internal/deployment/infra/provider/facade.go +++ b/internal/deployment/infra/provider/facade.go @@ -53,14 +53,14 @@ func (f *facade) Setup(ctx context.Context, target domain.Target) (domain.Target return provider.Setup(ctx, target) } -func (f *facade) RemoveConfiguration(ctx context.Context, target domain.Target) error { - provider, err := f.providerForTarget(target) - - if err != nil { - return err +func (f *facade) RemoveConfiguration(ctx context.Context, target domain.TargetID) error { + for _, p := range f.providers { + if err := p.RemoveConfiguration(ctx, target); err != nil { + return err + } } - return provider.RemoveConfiguration(ctx, target) + return nil } func (f *facade) CleanupTarget(ctx context.Context, target domain.Target, strategy domain.CleanupStrategy) error { diff --git a/internal/deployment/infra/provider/facade_test.go b/internal/deployment/infra/provider/facade_test.go index 014bc92e..43220241 100644 --- a/internal/deployment/infra/provider/facade_test.go +++ b/internal/deployment/infra/provider/facade_test.go @@ -38,13 +38,13 @@ func Test_Facade(t *testing.T) { assert.ErrorIs(t, domain.ErrNoValidProviderFound, err) }) - t.Run("should return an error if no provider can unconfigure the target", func(t *testing.T) { + t.Run("should return nil if no provider can unconfigure the target", func(t *testing.T) { sut := provider.NewFacade() target := fixture.Target() - err := sut.RemoveConfiguration(context.Background(), target) + err := sut.RemoveConfiguration(context.Background(), target.ID()) - assert.ErrorIs(t, domain.ErrNoValidProviderFound, err) + assert.Nil(t, err) }) t.Run("should return an error if no provider can cleanup the target", func(t *testing.T) { diff --git a/internal/deployment/infra/sqlite/apps.go b/internal/deployment/infra/sqlite/apps.go index db5a2b07..bd842d56 100644 --- a/internal/deployment/infra/sqlite/apps.go +++ b/internal/deployment/infra/sqlite/apps.go @@ -143,6 +143,7 @@ func (s *appsStore) GetByID(ctx context.Context, id domain.AppID) (domain.App, e ,staging_vars ,cleanup_requested_at ,cleanup_requested_by + ,history ,created_at ,created_by FROM apps @@ -151,7 +152,7 @@ func (s *appsStore) GetByID(ctx context.Context, id domain.AppID) (domain.App, e } func (s *appsStore) Write(c context.Context, apps ...*domain.App) error { - return sqlite.WriteAndDispatch(s.db, c, apps, func(ctx context.Context, e event.Event) error { + return sqlite.WriteEvents(s.db, c, apps, func(ctx context.Context, e event.Event) error { switch evt := e.(type) { case domain.AppCreated: return builder. @@ -164,10 +165,18 @@ func (s *appsStore) Write(c context.Context, apps ...*domain.App) error { "staging_target": evt.Staging.Target(), "staging_version": evt.Staging.Version(), "staging_vars": evt.Staging.Vars(), + "history": evt.History, "created_at": evt.Created.At(), "created_by": evt.Created.By(), }). Exec(s.db, ctx) + case domain.AppHistoryChanged: + return builder. + Update("apps", builder.Values{ + "history": evt.History, + }). + F("WHERE id = ?", evt.ID). + Exec(s.db, ctx) case domain.AppEnvChanged: // This is safe to interpolate the column name here since events are raised by our // own code. diff --git a/internal/deployment/infra/sqlite/deployments.go b/internal/deployment/infra/sqlite/deployments.go index db61b94d..be083468 100644 --- a/internal/deployment/infra/sqlite/deployments.go +++ b/internal/deployment/infra/sqlite/deployments.go @@ -131,7 +131,7 @@ func (s *deploymentsStore) HasDeploymentsOnAppTargetEnv(ctx context.Context, app domain.HasSuccessfulDeploymentsOnAppTargetEnv(c.successful), err } -func (s *deploymentsStore) FailDeployments(ctx context.Context, reason error, criterias domain.FailCriteria) error { +func (s *deploymentsStore) FailDeployments(ctx context.Context, reason error, criteria domain.FailCriteria) error { now := time.Now().UTC() return builder.Update("deployments", builder.Values{ @@ -142,16 +142,16 @@ func (s *deploymentsStore) FailDeployments(ctx context.Context, reason error, cr }). F("WHERE TRUE"). S( - builder.MaybeValue(criterias.App, "AND app_id = ?"), - builder.MaybeValue(criterias.Target, "AND config_target = ?"), - builder.MaybeValue(criterias.Status, "AND state_status = ?"), - builder.MaybeValue(criterias.Environment, "AND config_environment = ?"), + builder.MaybeValue(criteria.App, "AND app_id = ?"), + builder.MaybeValue(criteria.Target, "AND config_target = ?"), + builder.MaybeValue(criteria.Status, "AND state_status = ?"), + builder.MaybeValue(criteria.Environment, "AND config_environment = ?"), ). Exec(s.db, ctx) } func (s *deploymentsStore) Write(c context.Context, deployments ...*domain.Deployment) error { - return sqlite.WriteAndDispatch(s.db, c, deployments, func(ctx context.Context, e event.Event) error { + return sqlite.WriteEvents(s.db, c, deployments, func(ctx context.Context, e event.Event) error { switch evt := e.(type) { case domain.DeploymentCreated: return builder. diff --git a/internal/deployment/infra/sqlite/gateway.go b/internal/deployment/infra/sqlite/gateway.go index 6db4bfda..16bb78c9 100644 --- a/internal/deployment/infra/sqlite/gateway.go +++ b/internal/deployment/infra/sqlite/gateway.go @@ -19,15 +19,15 @@ import ( "github.com/YuukanOO/seelf/pkg/storage/sqlite/builder" ) -type gateway struct { +type Gateway struct { db *sqlite.Database } -func NewGateway(db *sqlite.Database) *gateway { - return &gateway{db} +func NewGateway(db *sqlite.Database) *Gateway { + return &Gateway{db} } -func (s *gateway) GetAllApps(ctx context.Context, cmd get_apps.Query) ([]get_apps.App, error) { +func (s *Gateway) GetAllApps(ctx context.Context, cmd get_apps.Query) ([]get_apps.App, error) { return builder. Query[get_apps.App](` SELECT @@ -53,7 +53,7 @@ func (s *gateway) GetAllApps(ctx context.Context, cmd get_apps.Query) ([]get_app All(s.db, ctx, appDataMapper, getDeploymentDataloader) } -func (s *gateway) GetAppByID(ctx context.Context, cmd get_app_detail.Query) (get_app_detail.App, error) { +func (s *Gateway) GetAppByID(ctx context.Context, cmd get_app_detail.Query) (get_app_detail.App, error) { return builder. Query[get_app_detail.App](` SELECT @@ -84,7 +84,7 @@ func (s *gateway) GetAppByID(ctx context.Context, cmd get_app_detail.Query) (get One(s.db, ctx, appDetailDataMapper, getDeploymentDetailDataloader) } -func (s *gateway) GetAllDeploymentsByApp(ctx context.Context, cmd get_app_deployments.Query) (storage.Paginated[get_app_deployments.Deployment], error) { +func (s *Gateway) GetAllDeploymentsByApp(ctx context.Context, cmd get_app_deployments.Query) (storage.Paginated[get_app_deployments.Deployment], error) { return builder. Select[get_app_deployments.Deployment](` deployments.app_id @@ -114,7 +114,7 @@ func (s *gateway) GetAllDeploymentsByApp(ctx context.Context, cmd get_app_deploy Paginate(s.db, ctx, deploymentMapper(nil), cmd.Page.Get(1), 5) } -func (s *gateway) GetDeploymentByID(ctx context.Context, cmd get_deployment.Query) (get_deployment.Deployment, error) { +func (s *Gateway) GetDeploymentByID(ctx context.Context, cmd get_deployment.Query) (get_deployment.Deployment, error) { return builder. Query[get_deployment.Deployment](` SELECT @@ -144,7 +144,7 @@ func (s *gateway) GetDeploymentByID(ctx context.Context, cmd get_deployment.Quer One(s.db, ctx, deploymentDetailMapper(nil)) } -func (s *gateway) GetAllTargets(ctx context.Context, cmd get_targets.Query) ([]get_target.Target, error) { +func (s *Gateway) GetAllTargets(ctx context.Context, cmd get_targets.Query) ([]get_target.Target, error) { return builder. Query[get_target.Target](` SELECT @@ -171,7 +171,7 @@ func (s *gateway) GetAllTargets(ctx context.Context, cmd get_targets.Query) ([]g All(s.db, ctx, targetMapper) } -func (s *gateway) GetTargetByID(ctx context.Context, cmd get_target.Query) (get_target.Target, error) { +func (s *Gateway) GetTargetByID(ctx context.Context, cmd get_target.Query) (get_target.Target, error) { return builder. Query[get_target.Target](` SELECT @@ -196,7 +196,7 @@ func (s *gateway) GetTargetByID(ctx context.Context, cmd get_target.Query) (get_ One(s.db, ctx, targetMapper) } -func (s *gateway) GetRegistries(ctx context.Context, cmd get_registries.Query) ([]get_registry.Registry, error) { +func (s *Gateway) GetRegistries(ctx context.Context, cmd get_registries.Query) ([]get_registry.Registry, error) { return builder. Query[get_registry.Registry](` SELECT @@ -213,7 +213,7 @@ func (s *gateway) GetRegistries(ctx context.Context, cmd get_registries.Query) ( All(s.db, ctx, registryMapper) } -func (s *gateway) GetRegistryByID(ctx context.Context, cmd get_registry.Query) (get_registry.Registry, error) { +func (s *Gateway) GetRegistryByID(ctx context.Context, cmd get_registry.Query) (get_registry.Registry, error) { return builder. Query[get_registry.Registry](` SELECT diff --git a/internal/deployment/infra/sqlite/migrations/1728345059_add_app_cleanup_needed_on.up.sql b/internal/deployment/infra/sqlite/migrations/1728345059_add_app_cleanup_needed_on.up.sql new file mode 100644 index 00000000..ec868546 --- /dev/null +++ b/internal/deployment/infra/sqlite/migrations/1728345059_add_app_cleanup_needed_on.up.sql @@ -0,0 +1,32 @@ +-- Since deployment jobs group has changed, just mark all pending deployments as failed +UPDATE deployments +SET + state_status = 2, + state_errcode = 'seelf_incompatible_upgrade', + state_started_at = datetime('now'), + state_finished_at = datetime('now') +WHERE state_status = 0; -- Running jobs will be failed with the server_reset error so no need to handle them here + +ALTER TABLE apps ADD history TEXT NOT NULL DEFAULT '{}'; +UPDATE apps SET history = '{"production": ["' || production_target || '"], "staging": ["'|| staging_target ||'"]}'; + +-- Since the group has changed for configure and cleanup, just update those jobs. +UPDATE scheduled_jobs +SET + [group] = resource_id +WHERE message_name IN ('deployment.command.configure_target', 'deployment.command.cleanup_target'); + +-- Since those messages no longer exists. +DELETE FROM scheduled_jobs WHERE message_name IN ('deployment.command.delete_target', 'deployment.command.delete_app'); + +-- When a target is configured, no need to keep old configure jobs since they are outdated. +CREATE TRIGGER IF NOT EXISTS on_target_configure_remove_outdated_jobs +BEFORE INSERT ON scheduled_jobs +WHEN NEW.message_name = 'deployment.command.configure_target' +BEGIN + DELETE FROM scheduled_jobs + WHERE + resource_id = NEW.resource_id + AND message_name = 'deployment.command.configure_target' + AND retrieved = 0; +END \ No newline at end of file diff --git a/internal/deployment/infra/sqlite/registries.go b/internal/deployment/infra/sqlite/registries.go index 5b0d859d..a0170694 100644 --- a/internal/deployment/infra/sqlite/registries.go +++ b/internal/deployment/infra/sqlite/registries.go @@ -66,7 +66,7 @@ func (s *registriesStore) GetAll(ctx context.Context) ([]domain.Registry, error) } func (s *registriesStore) Write(ctx context.Context, registries ...*domain.Registry) error { - return sqlite.WriteAndDispatch(s.db, ctx, registries, func(ctx context.Context, e event.Event) error { + return sqlite.WriteEvents(s.db, ctx, registries, func(ctx context.Context, e event.Event) error { switch evt := e.(type) { case domain.RegistryCreated: return builder. diff --git a/internal/deployment/infra/sqlite/targets.go b/internal/deployment/infra/sqlite/targets.go index 5eee1ae2..ed99dac3 100644 --- a/internal/deployment/infra/sqlite/targets.go +++ b/internal/deployment/infra/sqlite/targets.go @@ -92,7 +92,7 @@ func (s *targetsStore) GetByID(ctx context.Context, id domain.TargetID) (domain. } func (s *targetsStore) Write(c context.Context, targets ...*domain.Target) error { - return sqlite.WriteAndDispatch(s.db, c, targets, func(ctx context.Context, e event.Event) error { + return sqlite.WriteEvents(s.db, c, targets, func(ctx context.Context, e event.Event) error { switch evt := e.(type) { case domain.TargetCreated: return builder. diff --git a/pkg/bus/dispatcher.go b/pkg/bus/dispatcher.go index 4cf2d7fb..aad4881e 100644 --- a/pkg/bus/dispatcher.go +++ b/pkg/bus/dispatcher.go @@ -5,7 +5,6 @@ import ( "errors" "github.com/YuukanOO/seelf/pkg/storage" - "github.com/YuukanOO/seelf/pkg/types" ) var ErrNoHandlerRegistered = errors.New("no_handler_registered") @@ -13,8 +12,10 @@ var ErrNoHandlerRegistered = errors.New("no_handler_registered") type ( // Handler for a specific message. RequestHandler[TResult any, TMsg TypedRequest[TResult]] func(context.Context, TMsg) (TResult, error) - // Handler for signal. + + // Handler for a signal. SignalHandler[TSignal Signal] func(context.Context, TSignal) error + // Generic handler (as seen by middlewares). NextFunc func(context.Context, Message) (any, error) @@ -45,12 +46,18 @@ func Register[TResult any, TMsg TypedRequest[TResult]](bus Bus, handler RequestH bus.Register(msg, h) - // If the message is schedulable, register the unmarshaller automatically. + // If the message is an AsyncRequest, register the unmarshaller automatically. // This is done here because of the known type TMsg but maybe I should try to // move it to bus/memory in the future. - if types.Is[Schedulable](msg) { - Marshallable.Register(msg, func(s string) (Request, error) { - return storage.UnmarshalJSON[TMsg](s) + if asyncRequest, ok := any(msg).(AsyncRequest); ok { + Marshallable.Register(asyncRequest, func(s string) (AsyncRequest, error) { + m, err := storage.UnmarshalJSON[TMsg](s) + + if err != nil { + return nil, err + } + + return any(m).(AsyncRequest), nil }) } } diff --git a/pkg/bus/embedded/dismiss_job/dismiss_job.go b/pkg/bus/embedded/dismiss_job/dismiss_job.go new file mode 100644 index 00000000..2516ec33 --- /dev/null +++ b/pkg/bus/embedded/dismiss_job/dismiss_job.go @@ -0,0 +1,11 @@ +package dismiss_job + +import "github.com/YuukanOO/seelf/pkg/bus" + +type Command struct { + bus.Command[bus.UnitType] + + ID string `json:"id"` +} + +func (Command) Name_() string { return "bus.command.dismiss_job" } diff --git a/pkg/bus/memory/dispatcher.go b/pkg/bus/embedded/dispatcher.go similarity index 99% rename from pkg/bus/memory/dispatcher.go rename to pkg/bus/embedded/dispatcher.go index 64c58197..b2911324 100644 --- a/pkg/bus/memory/dispatcher.go +++ b/pkg/bus/embedded/dispatcher.go @@ -1,4 +1,4 @@ -package memory +package embedded import ( "context" diff --git a/pkg/bus/memory/dispatcher_test.go b/pkg/bus/embedded/dispatcher_test.go similarity index 92% rename from pkg/bus/memory/dispatcher_test.go rename to pkg/bus/embedded/dispatcher_test.go index b7bbb02b..fdc0b643 100644 --- a/pkg/bus/memory/dispatcher_test.go +++ b/pkg/bus/embedded/dispatcher_test.go @@ -1,4 +1,4 @@ -package memory_test +package embedded_test import ( "context" @@ -7,12 +7,12 @@ import ( "github.com/YuukanOO/seelf/pkg/assert" "github.com/YuukanOO/seelf/pkg/bus" - "github.com/YuukanOO/seelf/pkg/bus/memory" + "github.com/YuukanOO/seelf/pkg/bus/embedded" ) -func TestBus(t *testing.T) { +func Test_Bus(t *testing.T) { t.Run("should accepts registration of all message kind", func(t *testing.T) { - local := memory.NewBus() + local := embedded.NewBus() bus.Register(local, addCommandHandler) bus.Register(local, getQueryHandler) @@ -27,14 +27,14 @@ func TestBus(t *testing.T) { } }() - local := memory.NewBus() + local := embedded.NewBus() bus.Register(local, addCommandHandler) bus.Register(local, addCommandHandler) }) t.Run("should returns an error if no handler is registered for a given request", func(t *testing.T) { - local := memory.NewBus() + local := embedded.NewBus() _, err := bus.Send(local, context.Background(), &addCommand{}) @@ -42,7 +42,7 @@ func TestBus(t *testing.T) { }) t.Run("should returns the request handler error back if any", func(t *testing.T) { - local := memory.NewBus() + local := embedded.NewBus() expectedErr := errors.New("handler error") bus.Register(local, func(ctx context.Context, cmd addCommand) (int, error) { @@ -55,7 +55,7 @@ func TestBus(t *testing.T) { }) t.Run("should call the appropriate request handler and returns the result", func(t *testing.T) { - local := memory.NewBus() + local := embedded.NewBus() bus.Register(local, addCommandHandler) bus.Register(local, getQueryHandler) @@ -74,7 +74,7 @@ func TestBus(t *testing.T) { }) t.Run("should do nothing if no signal handler is registered for a given signal", func(t *testing.T) { - local := memory.NewBus() + local := embedded.NewBus() err := local.Notify(context.Background(), registeredNotification{}) @@ -82,7 +82,7 @@ func TestBus(t *testing.T) { }) t.Run("should returns a signal handler error back if any", func(t *testing.T) { - local := memory.NewBus() + local := embedded.NewBus() expectedErr := errors.New("handler error") bus.On(local, func(ctx context.Context, notif registeredNotification) error { @@ -100,7 +100,7 @@ func TestBus(t *testing.T) { t.Run("should call every signal handlers registered for the given signal", func(t *testing.T) { var ( - local = memory.NewBus() + local = embedded.NewBus() firstOneCalled = false secondOneCalled = false ) @@ -124,7 +124,7 @@ func TestBus(t *testing.T) { t.Run("should call every middlewares registered", func(t *testing.T) { calls := make([]int, 0) - local := memory.NewBus( + local := embedded.NewBus( func(next bus.NextFunc) bus.NextFunc { return func(ctx context.Context, m bus.Message) (any, error) { calls = append(calls, 1) diff --git a/pkg/bus/embedded/get_jobs/get_jobs.go b/pkg/bus/embedded/get_jobs/get_jobs.go new file mode 100644 index 00000000..28a8c2b1 --- /dev/null +++ b/pkg/bus/embedded/get_jobs/get_jobs.go @@ -0,0 +1,31 @@ +package get_jobs + +import ( + "time" + + "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/monad" + "github.com/YuukanOO/seelf/pkg/storage" +) + +type ( + Job struct { + ID string `json:"id"` + ResourceID string `json:"resource_id"` + Group string `json:"group"` + MessageName string `json:"message_name"` + MessageData string `json:"message_data"` + QueuedAt time.Time `json:"queued_at"` + NotBefore time.Time `json:"not_before"` + ErrorCode monad.Maybe[string] `json:"error_code"` + Retrieved bool `json:"retrieved"` + } + + Query struct { + bus.Query[storage.Paginated[Job]] + + Page monad.Maybe[int] `form:"page"` + } +) + +func (q Query) Name_() string { return "bus.query.get_jobs" } diff --git a/pkg/bus/embedded/retry_job/retry_job.go b/pkg/bus/embedded/retry_job/retry_job.go new file mode 100644 index 00000000..467c2b97 --- /dev/null +++ b/pkg/bus/embedded/retry_job/retry_job.go @@ -0,0 +1,11 @@ +package retry_job + +import "github.com/YuukanOO/seelf/pkg/bus" + +type Command struct { + bus.Command[bus.UnitType] + + ID string `json:"id"` +} + +func (Command) Name_() string { return "bus.command.retry_job" } diff --git a/pkg/bus/embedded/runner.go b/pkg/bus/embedded/runner.go new file mode 100644 index 00000000..6bc60f87 --- /dev/null +++ b/pkg/bus/embedded/runner.go @@ -0,0 +1,226 @@ +package embedded + +import ( + "context" + "sync" + "time" + + "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/log" +) + +type ( + Job interface { + ID() string + Command() bus.AsyncRequest + } + + // Should be raised by the store when a job has been dismissed by a user. + JobDismissed struct { + bus.Notification + + ID string + Command bus.AsyncRequest + } + + JobsStore interface { + GetNextPendingJobs(context.Context) ([]Job, error) + Failed(context.Context, Job, error) error + Delay(context.Context, Job) error + Done(context.Context, Job) error + } + + Runner struct { + dispatcher bus.Dispatcher + pollInterval time.Duration + started bool + store JobsStore + logger log.Logger + done []chan bool + exitGroup sync.WaitGroup + groups []*workerGroup + messageNameToWorkerIdx map[string]int + } + + workerGroup struct { + jobs chan Job + size int + } + + // Represents a worker group configuration used by a scheduler to spawn the appropriate + // workers. + WorkerGroup struct { + Size int // Number of workers to start + Requests []bus.AsyncRequest // List of message types to handle + } +) + +func (JobDismissed) Name_() string { return "bus.event.job_dismissed" } + +// In-process runner which process commands in specific worker groups using +// goroutines. +func NewRunner( + store JobsStore, + logger log.Logger, + dispatcher bus.Dispatcher, + pollInterval time.Duration, + groups ...WorkerGroup, +) *Runner { + s := &Runner{ + dispatcher: dispatcher, + pollInterval: pollInterval, + store: store, + logger: logger, + groups: make([]*workerGroup, len(groups)), + messageNameToWorkerIdx: make(map[string]int), + } + + for i, g := range groups { + // Should always have at least one worker + if g.Size < 1 { + g.Size = 1 + } + + s.groups[i] = &workerGroup{ + jobs: make(chan Job), + size: g.Size, + } + + for _, msg := range g.Requests { + s.messageNameToWorkerIdx[msg.Name_()] = i + } + } + + return s +} + +func (s *Runner) Start() { + if s.started { + return + } + + s.started = true + + s.startGroupRunners() + s.startPolling() +} + +func (s *Runner) Stop() { + if !s.started { + return + } + + s.logger.Info("waiting for current jobs to finish") + + for _, done := range s.done { + done <- true + } + + s.exitGroup.Wait() +} + +// Tiny helper to run a function in a goroutine and keep track of done channels. +func (s *Runner) run(fn func(<-chan bool)) { + done := make(chan bool, 1) + s.done = append(s.done, done) + + s.exitGroup.Add(1) + go func(d <-chan bool) { + defer s.exitGroup.Done() + fn(d) + }(done) +} + +func (s *Runner) startPolling() { + s.run(func(done <-chan bool) { + var ( + delay time.Duration + lastRun time.Time = time.Now() + ) + + for { + delay = s.pollInterval - time.Since(lastRun) + + select { + case <-done: + return + case <-time.After(delay): + } + + lastRun = time.Now() + + jobs, err := s.store.GetNextPendingJobs(context.Background()) + + if err != nil { + s.logger.Errorw("error while retrieving pending jobs", + "error", err) + continue + } + + for _, job := range jobs { + idx, handled := s.messageNameToWorkerIdx[job.Command().Name_()] + + if !handled { + s.handleJobReturn(context.Background(), job, bus.AsyncResultProcessed, bus.ErrNoHandlerRegistered) + continue + } + + s.groups[idx].jobs <- job + } + } + }) +} + +func (s *Runner) startGroupRunners() { + for _, g := range s.groups { + group := g + for i := 0; i < group.size; i++ { + s.run(func(done <-chan bool) { + for { + select { + case <-done: + return + case job := <-group.jobs: + ctx := context.Background() + result, err := bus.Send(s.dispatcher, ctx, job.Command()) + + s.handleJobReturn(ctx, job, result, err) + } + } + }) + } + } +} + +func (s *Runner) handleJobReturn(ctx context.Context, job Job, result bus.AsyncResult, err error) { + var storeErr error + + defer func() { + if storeErr == nil { + return + } + + s.logger.Errorw("error while updating job status", + "job", job.ID(), + "name", job.Command().Name_(), + "error", storeErr, + ) + }() + + if err != nil { + storeErr = s.store.Failed(ctx, job, err) + s.logger.Errorw("error while processing job", + "job", job.ID(), + "name", job.Command().Name_(), + "error", err, + ) + return + } + + if result == bus.AsyncResultDelay { + storeErr = s.store.Delay(ctx, job) + return + } + + storeErr = s.store.Done(ctx, job) +} diff --git a/pkg/bus/embedded/runner_test.go b/pkg/bus/embedded/runner_test.go new file mode 100644 index 00000000..6156effe --- /dev/null +++ b/pkg/bus/embedded/runner_test.go @@ -0,0 +1,203 @@ +package embedded_test + +import ( + "context" + "errors" + "sync" + "testing" + + "github.com/YuukanOO/seelf/pkg/assert" + "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/bus/embedded" + "github.com/YuukanOO/seelf/pkg/id" + "github.com/YuukanOO/seelf/pkg/log" + "github.com/YuukanOO/seelf/pkg/must" +) + +func Test_Runner(t *testing.T) { + logger := must.Panic(log.NewLogger()) + b := embedded.NewBus() + // Register an handler which will just return the inner cmd error to test how the scheduler behave. + bus.Register(b, func(_ context.Context, cmd returnCommand) (bus.AsyncResult, error) { + return cmd.result, cmd.err + }) + + t.Run("should fail the job if no handler is registered", func(t *testing.T) { + var ( + adapter adapter + cmd bus.AsyncRequest = unhandledCommand{} + ) + + runner := embedded.NewRunner(&adapter, logger, b, 0, embedded.WorkerGroup{ + Requests: []bus.AsyncRequest{returnCommand{}}, + }) + + runner.Start() + defer runner.Stop() + + assert.Nil(t, adapter.Queue(context.Background(), cmd)) + + adapter.wait() + + assert.HasLength(t, 0, adapter.done) + assert.HasLength(t, 0, adapter.delayed) + assert.HasLength(t, 1, adapter.failed) + assert.Equal(t, cmd, adapter.failed[0].Command()) + assert.Equal(t, bus.ErrNoHandlerRegistered, adapter.failed[0].err) + }) + + t.Run("should fail the job if there was an error", func(t *testing.T) { + var ( + adapter adapter + jobErr = errors.New("some error") + cmd bus.AsyncRequest = returnCommand{err: jobErr} + ) + + runner := embedded.NewRunner(&adapter, logger, b, 0, embedded.WorkerGroup{ + Requests: []bus.AsyncRequest{returnCommand{}}, + }) + + runner.Start() + defer runner.Stop() + + assert.Nil(t, adapter.Queue(context.Background(), cmd)) + + adapter.wait() + + assert.HasLength(t, 0, adapter.done) + assert.HasLength(t, 0, adapter.delayed) + assert.HasLength(t, 1, adapter.failed) + assert.Equal(t, cmd, adapter.failed[0].Command()) + assert.Equal(t, jobErr, adapter.failed[0].err) + }) + + t.Run("should delay the job if the handler returns an AsyncResultDelay", func(t *testing.T) { + var ( + adapter adapter + cmd bus.AsyncRequest = returnCommand{result: bus.AsyncResultDelay} + ) + + runner := embedded.NewRunner(&adapter, logger, b, 0, embedded.WorkerGroup{ + Requests: []bus.AsyncRequest{returnCommand{}}, + }) + + runner.Start() + defer runner.Stop() + + assert.Nil(t, adapter.Queue(context.Background(), cmd)) + + adapter.wait() + + assert.HasLength(t, 0, adapter.done) + assert.HasLength(t, 1, adapter.delayed) + assert.HasLength(t, 0, adapter.failed) + assert.Equal(t, cmd, adapter.delayed[0].Command()) + assert.Nil(t, adapter.delayed[0].err) + }) + + t.Run("should mark the job as done if there is no error", func(t *testing.T) { + var ( + adapter adapter + cmd bus.AsyncRequest = returnCommand{} + ) + + runner := embedded.NewRunner(&adapter, logger, b, 0, embedded.WorkerGroup{ + Requests: []bus.AsyncRequest{returnCommand{}}, + }) + + runner.Start() + defer runner.Stop() + + assert.Nil(t, adapter.Queue(context.Background(), cmd)) + + adapter.wait() + + assert.HasLength(t, 1, adapter.done) + assert.HasLength(t, 0, adapter.delayed) + assert.HasLength(t, 0, adapter.failed) + assert.Equal(t, cmd, adapter.done[0].Command()) + }) +} + +type returnCommand struct { + bus.AsyncCommand + + result bus.AsyncResult + err error +} + +func (r returnCommand) Name_() string { return "returnCommand" } +func (r returnCommand) ResourceID() string { return "" } +func (r returnCommand) Group() string { return "" } + +type unhandledCommand struct { + bus.AsyncCommand +} + +func (u unhandledCommand) Name_() string { return "unhandledCommand" } +func (u unhandledCommand) ResourceID() string { return "" } +func (u unhandledCommand) Group() string { return "" } + +var ( + _ embedded.JobsStore = (*adapter)(nil) + _ bus.Scheduler = (*adapter)(nil) +) + +type job struct { + id string + msg bus.AsyncRequest + err error +} + +func (j *job) ID() string { return j.id } +func (j *job) Command() bus.AsyncRequest { return j.msg } + +type adapter struct { + wg sync.WaitGroup + jobs []*job + failed []*job + delayed []*job + done []*job +} + +func (a *adapter) Queue(ctx context.Context, msg bus.AsyncRequest) error { + a.wg.Add(1) + a.jobs = append(a.jobs, &job{id: id.New[string](), msg: msg}) + return nil +} + +func (a *adapter) Delay(ctx context.Context, j embedded.Job) error { + defer a.wg.Done() + a.delayed = append(a.delayed, j.(*job)) + return nil +} + +func (a *adapter) Done(ctx context.Context, j embedded.Job) error { + defer a.wg.Done() + a.done = append(a.done, j.(*job)) + return nil +} + +func (a *adapter) Failed(ctx context.Context, j embedded.Job, jobErr error) error { + defer a.wg.Done() + jo := j.(*job) + jo.err = jobErr + a.failed = append(a.failed, jo) + return nil +} + +func (a *adapter) GetNextPendingJobs(context.Context) ([]embedded.Job, error) { + j := make([]embedded.Job, len(a.jobs)) + + for i, job := range a.jobs { + j[i] = job + } + + a.jobs = nil + + return j, nil +} + +func (a *adapter) wait() { + a.wg.Wait() +} diff --git a/pkg/bus/group.go b/pkg/bus/group.go new file mode 100644 index 00000000..c00290b2 --- /dev/null +++ b/pkg/bus/group.go @@ -0,0 +1,14 @@ +package bus + +import ( + "slices" + "strings" +) + +const groupSeparator = "." + +// Creates a group identifier from an array of strings representing subjects. +func Group(parts ...string) string { + slices.Sort(parts) + return strings.Join(parts, groupSeparator) +} diff --git a/pkg/bus/group_test.go b/pkg/bus/group_test.go new file mode 100644 index 00000000..c74bbd6e --- /dev/null +++ b/pkg/bus/group_test.go @@ -0,0 +1,15 @@ +package bus_test + +import ( + "testing" + + "github.com/YuukanOO/seelf/pkg/assert" + "github.com/YuukanOO/seelf/pkg/bus" +) + +func Test_Group(t *testing.T) { + t.Run("should returns a new group identifier with parts sorted", func(t *testing.T) { + assert.Equal(t, "bar.foo", bus.Group("bar", "foo")) + assert.Equal(t, "bar.foo", bus.Group("foo", "bar")) + }) +} diff --git a/pkg/bus/message.go b/pkg/bus/message.go index a9cbc118..b5e7b2f7 100644 --- a/pkg/bus/message.go +++ b/pkg/bus/message.go @@ -6,36 +6,33 @@ // and a way to add middlewares to your handlers. package bus -import "github.com/YuukanOO/seelf/pkg/storage" - -const ( - MessageKindNotification MessageKind = iota - MessageKindCommand - MessageKindQuery +import ( + "github.com/YuukanOO/seelf/pkg/storage" ) // Constant unit value to return when a request does not need a specific result set. // In my mind, it should avoid the cost of allocating something not needed. const Unit UnitType = iota +const ( + AsyncResultProcessed AsyncResult = iota // Async job has been handled correctly (could have failed but in this case the error will not be nil) + AsyncResultDelay // Async job could not be processed right now and should be retried later +) + // Contains message which can be unmarshalled from a raw string (= those used in the scheduler). -var Marshallable = storage.NewDiscriminatedMapper(func(r Request) string { return r.Name_() }) +var Marshallable = storage.NewDiscriminatedMapper(func(r AsyncRequest) string { return r.Name_() }) type ( // Sometimes, you may not need a result type on a request but the RequestHandler expect one, just // use this type as the result type and the bus.Unit as the return value. UnitType uint8 - // Represent the kind of a message being dispatched. This is especially useful for middlewares - // to adapt their behavior depending on the message kind. - // - // For example, a command may need a transaction whereas a query may not. - MessageKind uint8 + // Async result of an async request, one processed by the scheduler. + AsyncResult uint8 // Message which can be sent in the bus and handled by a registered handler. Message interface { - Name_() string // Unique name of the message (here to not require reflection) - Kind_() MessageKind // Type of the message to be able to customize middlewares + Name_() string // Unique name of the message (here to not require reflection) } // Signal which do not need a result. @@ -56,23 +53,51 @@ type ( isTypedRequest() T // Marker method. Without it, the compiler will not be able to infer the T. } + // Request to mutate the system. + MutateRequest interface { + Request + isMutateRequest() + } + + // Async request which extend the MutateRequest with additional information. + AsyncRequest interface { + MutateRequest + TypedRequest[AsyncResult] + ResourceID() string // ID of the main resource processed by the request + Group() string // Work group for this request, at most one request per group is processed at any given time + } + + // Request to query the system. + QueryRequest interface { + Request + isQueryRequest() + } + // Message without result implementing the Signal interface. Notification struct{} - // Request to mutate the system. Implements the TypedRequest interface. + // Request to mutate the system. Implements the TypedRequest and MutateRequest interface. + // The Name_() method is not implemented by this to make sure you do not forget to declare + // it. Command[T any] struct{} - // Request to query the system. Implements the TypedRequest interface. + // Async command for stuff that should be processed in the background by the Scheduler. + AsyncCommand struct { + Command[AsyncResult] + } + + // Request to query the system. Implements the TypedRequest and QueryRequest interface. + // The Name_() method is not implemented by this to make sure you do not forget to declare + // it. Query[T any] struct{} ) -func (Notification) Kind_() MessageKind { return MessageKindNotification } -func (Notification) isSignal() {} +func (Notification) isSignal() {} -func (Command[T]) Kind_() MessageKind { return MessageKindCommand } func (Command[T]) isRequest() {} +func (Command[T]) isMutateRequest() {} func (Command[T]) isTypedRequest() (t T) { return t } -func (Query[T]) Kind_() MessageKind { return MessageKindQuery } func (Query[T]) isRequest() {} +func (Query[T]) isQueryRequest() {} func (Query[T]) isTypedRequest() (t T) { return t } diff --git a/pkg/bus/message_test.go b/pkg/bus/message_test.go index a445d079..d8512884 100644 --- a/pkg/bus/message_test.go +++ b/pkg/bus/message_test.go @@ -1,26 +1,171 @@ package bus_test import ( + "context" "testing" "github.com/YuukanOO/seelf/pkg/assert" "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/storage" ) -func TestMessage(t *testing.T) { - t.Run("should have the appropriate kind", func(t *testing.T) { - var ( - command addCommand - query getQuery - notif registeredNotification - ) - - assert.Equal(t, bus.MessageKindCommand, command.Kind_()) - assert.Equal(t, bus.MessageKindQuery, query.Kind_()) - assert.Equal(t, bus.MessageKindNotification, notif.Kind_()) +func Test_Message(t *testing.T) { + t.Run("Command should implements correct interfaces", func(t *testing.T) { + t.Run("should implements bus.Message, bus.Request, bus.TypedRequest and bus.MutateRequest interfaces", func(t *testing.T) { + var cmd addCommand + + _, ok := any(cmd).(bus.MutateRequest) + assert.True(t, ok) + + _, ok = any(cmd).(bus.Request) + assert.True(t, ok) + + _, ok = any(cmd).(bus.TypedRequest[int]) + assert.True(t, ok) + + _, ok = any(cmd).(bus.Message) + assert.True(t, ok) + }) + + t.Run("should not implements bus.Signal, bus.AsyncRequest and bus.QueryRequest interface", func(t *testing.T) { + var cmd addCommand + + _, ok := any(cmd).(bus.Signal) + assert.False(t, ok) + + _, ok = any(cmd).(bus.QueryRequest) + assert.False(t, ok) + + _, ok = any(cmd).(bus.AsyncRequest) + assert.False(t, ok) + }) + }) + + t.Run("Query should implements correct interfaces", func(t *testing.T) { + t.Run("should implements bus.Message, bus.Request, bus.TypedRequest and bus.QueryRequest interface", func(t *testing.T) { + var cmd getQuery + + _, ok := any(cmd).(bus.Message) + assert.True(t, ok) + + _, ok = any(cmd).(bus.Request) + assert.True(t, ok) + + _, ok = any(cmd).(bus.TypedRequest[int]) + assert.True(t, ok) + + _, ok = any(cmd).(bus.QueryRequest) + assert.True(t, ok) + }) + + t.Run("should not implements bus.Signal, bus.AsyncRequest and bus.MutateRequest interface", func(t *testing.T) { + var cmd getQuery + + _, ok := any(cmd).(bus.Signal) + assert.False(t, ok) + + _, ok = any(cmd).(bus.AsyncRequest) + assert.False(t, ok) + + _, ok = any(cmd).(bus.MutateRequest) + assert.False(t, ok) + }) + }) + + t.Run("AsyncCommand should implements correct interfaces", func(t *testing.T) { + t.Run("should implements bus.Message, bus.Request, bus.TypedRequest, bus.MutateRequest and bus.AsyncRequest interface", func(t *testing.T) { + var cmd asyncCommand + + _, ok := any(cmd).(bus.MutateRequest) + assert.True(t, ok) + + _, ok = any(cmd).(bus.Request) + assert.True(t, ok) + + _, ok = any(cmd).(bus.TypedRequest[bus.AsyncResult]) + assert.True(t, ok) + + _, ok = any(cmd).(bus.Message) + assert.True(t, ok) + + _, ok = any(cmd).(bus.AsyncRequest) + assert.True(t, ok) + }) + + t.Run("should not implements bus.Signal and bus.QueryRequest interface", func(t *testing.T) { + var cmd asyncCommand + + _, ok := any(cmd).(bus.Signal) + assert.False(t, ok) + + _, ok = any(cmd).(bus.QueryRequest) + assert.False(t, ok) + }) + }) + + t.Run("Notification should implements correct interfaces", func(t *testing.T) { + t.Run("should implements bus.Message and bus.Signal interface", func(t *testing.T) { + var evt somethingCreated + + _, ok := any(evt).(bus.Message) + assert.True(t, ok) + + _, ok = any(evt).(bus.Signal) + assert.True(t, ok) + }) + + t.Run("should not implements bus.Request, bus.TypedRequest, bus.AsyncRequest, bus.MutateRequest and bus.QueryRequest interface", func(t *testing.T) { + var evt somethingCreated + + _, ok := any(evt).(bus.Request) + assert.False(t, ok) + + _, ok = any(evt).(bus.TypedRequest[int]) + assert.False(t, ok) + + _, ok = any(evt).(bus.AsyncRequest) + assert.False(t, ok) + + _, ok = any(evt).(bus.MutateRequest) + assert.False(t, ok) + + _, ok = any(evt).(bus.QueryRequest) + assert.False(t, ok) + }) + }) + + t.Run("should be registerable on a bus", func(t *testing.T) { + var b dummyBus + + bus.Register(b, func(context.Context, addCommand) (int, error) { return 0, nil }) + bus.Register(b, func(context.Context, getQuery) (int, error) { return 0, nil }) + bus.On(b, func(context.Context, somethingCreated) error { return nil }) + + _, _ = bus.Send(b, context.Background(), addCommand{A: 1, B: 2}) + + _, err := bus.Marshallable.From(addCommand{}.Name_(), "") + assert.ErrorIs(t, storage.ErrCouldNotUnmarshalGivenType, err, "should not have been registered on the discriminated union mapper") + }) + + t.Run("should automatically register a mapper for async request", func(t *testing.T) { + var b dummyBus + + bus.Register(b, func(context.Context, asyncCommand) (bus.AsyncResult, error) { return 0, nil }) + + r, err := bus.Marshallable.From(asyncCommand{}.Name_(), `{"SomeValue":42}`) + assert.Nil(t, err) + assert.Equal(t, asyncCommand{ + SomeValue: 42, + }, r.(asyncCommand)) }) } +type dummyBus struct{} + +func (dummyBus) Register(bus.Message, bus.NextFunc) {} +func (dummyBus) Send(context.Context, bus.Request) (any, error) { return 0, nil } +func (dummyBus) Notify(context.Context, ...bus.Signal) error { return nil } + type addCommand struct { bus.Command[int] @@ -28,8 +173,17 @@ type addCommand struct { B int } -func (addCommand) Name_() string { return "AddCommand" } -func (addCommand) ResourceID() string { return "" } +func (addCommand) Name_() string { return "AddCommand" } + +type asyncCommand struct { + bus.AsyncCommand + + SomeValue int +} + +func (asyncCommand) Name_() string { return "AsyncCommand" } +func (asyncCommand) ResourceID() string { return "" } +func (asyncCommand) Group() string { return "" } type getQuery struct { bus.Query[int] @@ -37,10 +191,10 @@ type getQuery struct { func (getQuery) Name_() string { return "GetQuery" } -type registeredNotification struct { +type somethingCreated struct { bus.Notification - Id int + ID int } -func (registeredNotification) Name_() string { return "RegisteredNotification" } +func (somethingCreated) Name_() string { return "SomethingCreated" } diff --git a/pkg/bus/scheduler.go b/pkg/bus/scheduler.go index f37d3180..4a1e60c9 100644 --- a/pkg/bus/scheduler.go +++ b/pkg/bus/scheduler.go @@ -1,279 +1,9 @@ package bus -import ( - "context" - "sync" - "time" +import "context" - "github.com/YuukanOO/seelf/pkg/log" - "github.com/YuukanOO/seelf/pkg/monad" - "github.com/YuukanOO/seelf/pkg/storage" -) - -const ( - JobPolicyRetryPreserveOrder JobPolicy = 1 << iota // Retry the job but preserve the order among the group - JobPolicyWaitForOthersResourceID // Wait for other jobs on the same resource id to finish before processing - JobPolicyCancellable // The job can be cancellable by a user - JobPolicyMerge // If another job for the same resource and the same message name exists and is pending, replace it's payload -) - -type ( - JobPolicy uint8 - - // Represents a schedulable request, one that can be queued for later dispatching. - Schedulable interface { - Request - ResourceID() string // Resource id for which the task will be created - } - - // Enable scheduled dispatching of a message. - Scheduler interface { - // Queue a request to be dispatched asynchronously at a later time. - Queue(context.Context, Schedulable, ...JobOptions) error - } - - // Job option passed down to adapter. - CreateOptions struct { - Group monad.Maybe[string] - Policy JobPolicy - } - - JobOptions func(*CreateOptions) - - // Represents a scheduler that can be started and stopped. - RunnableScheduler interface { - Scheduler - Start() - Stop() - } - - // Represents a request that has been queued for dispatching. - ScheduledJob interface { - ID() string - Message() Request - Policy() JobPolicy - } - - GetJobsFilters struct { - Page monad.Maybe[int] `form:"page"` - } - - // Adapter used to store scheduled jobs. Could be anything from a database to a file or - // an in-memory store. - ScheduledJobsStore interface { - Setup() error // Setup the store - Create(context.Context, Schedulable, CreateOptions) error // Create a new scheduled job - Delete(context.Context, string) error // Try to delete a job from the store - GetAllJobs(context.Context, GetJobsFilters) (storage.Paginated[ScheduledJob], error) // Retrieve all jobs from the store - GetNextPendingJobs(context.Context) ([]ScheduledJob, error) // Get the next pending jobs to be dispatched - Retry(context.Context, ScheduledJob, error) error // Retry the given job with the given reason - Done(context.Context, ScheduledJob) error // Mark the given job as done - } - - defaultScheduler struct { - bus Dispatcher - pollInterval time.Duration - logger log.Logger - store ScheduledJobsStore - started bool - done []chan bool - exitGroup sync.WaitGroup - groups []*workerGroup - messageNameToWorkerIdx map[string]int - } - - // Represents a worker group configuration used by a scheduler to spawn the appropriate - // workers. - WorkerGroup struct { - Size int // Number of workers to start - Messages []string // List of message names to handle, mandatory - } - - workerGroup struct { - jobs chan ScheduledJob - size int - } -) - -// Builds up a new scheduler used to queue messages for later dispatching using the -// provided adapter. -func NewScheduler(adapter ScheduledJobsStore, log log.Logger, bus Dispatcher, pollInterval time.Duration, groups ...WorkerGroup) RunnableScheduler { - s := &defaultScheduler{ - bus: bus, - pollInterval: pollInterval, - logger: log, - store: adapter, - groups: make([]*workerGroup, len(groups)), - messageNameToWorkerIdx: make(map[string]int), - } - - for i, g := range groups { - // Should always have at least one worker - if g.Size < 1 { - g.Size = 1 - } - - s.groups[i] = &workerGroup{ - jobs: make(chan ScheduledJob), - size: g.Size, - } - - for _, msg := range g.Messages { - s.messageNameToWorkerIdx[msg] = i - } - } - - return s -} - -func (s *defaultScheduler) Queue( - ctx context.Context, - msg Schedulable, - options ...JobOptions, -) error { - var opts CreateOptions - - for _, opt := range options { - opt(&opts) - } - - return s.store.Create(ctx, msg, opts) -} - -func (s *defaultScheduler) Start() { - if s.started { - return - } - - s.started = true - - s.startGroupRunners() - s.startPolling() -} - -func (s *defaultScheduler) Stop() { - if !s.started { - return - } - - s.logger.Info("waiting for current jobs to finish") - - for _, done := range s.done { - done <- true - } - - s.exitGroup.Wait() -} - -// Tiny helper to run a function in a goroutine and keep track of done channels. -func (s *defaultScheduler) run(fn func(<-chan bool)) { - done := make(chan bool, 1) - s.done = append(s.done, done) - - s.exitGroup.Add(1) - go func(d <-chan bool) { - defer s.exitGroup.Done() - fn(d) - }(done) -} - -func (s *defaultScheduler) startPolling() { - s.run(func(done <-chan bool) { - var ( - delay time.Duration - lastRun time.Time = time.Now() - ) - - for { - delay = s.pollInterval - time.Since(lastRun) - - select { - case <-done: - return - case <-time.After(delay): - } - - lastRun = time.Now() - - jobs, err := s.store.GetNextPendingJobs(context.Background()) - - if err != nil { - s.logger.Errorw("error while retrieving pending jobs", - "error", err) - continue - } - - for _, job := range jobs { - idx, handled := s.messageNameToWorkerIdx[job.Message().Name_()] - - if !handled { - s.handleJobReturn(context.Background(), job, ErrNoHandlerRegistered) - continue - } - - s.groups[idx].jobs <- job - } - } - }) -} - -func (s *defaultScheduler) handleJobReturn(ctx context.Context, job ScheduledJob, err error) { - if err == nil { - if err = s.store.Done(ctx, job); err != nil { - s.logger.Errorw("error while marking job as done", - "job", job.ID(), - "name", job.Message().Name_(), - "error", err) - } - return - } - - s.logger.Warnw("error while processing job, it will be retried later", - "job", job.ID(), - "name", job.Message().Name_(), - "error", err) - - if err = s.store.Retry(ctx, job, err); err != nil { - s.logger.Errorw("error while retrying job", - "job", job.ID(), - "name", job.Message().Name_(), - "error", err) - } -} - -func (s *defaultScheduler) startGroupRunners() { - for _, g := range s.groups { - group := g - for i := 0; i < group.size; i++ { - s.run(func(done <-chan bool) { - for { - select { - case <-done: - return - case job := <-group.jobs: - ctx := context.Background() - _, err := s.bus.Send(ctx, job.Message()) - - s.handleJobReturn(ctx, job, err) - } - } - }) - } - } -} - -// Attach the job being queued to a specific group meaning only one job of a group -// can be processed at a time. -func WithGroup(name string) JobOptions { - return func(o *CreateOptions) { - o.Group.Set(name) - } -} - -// Attach given policies to the job being queued. It will determine how the job -// will be handled. -func WithPolicy(policy JobPolicy) JobOptions { - return func(o *CreateOptions) { - o.Policy = policy - } +// Enable scheduled dispatching of a message. +type Scheduler interface { + // Queue a request to be dispatched asynchronously at a later time. + Queue(context.Context, AsyncRequest) error } diff --git a/pkg/bus/scheduler_test.go b/pkg/bus/scheduler_test.go deleted file mode 100644 index ec5ed897..00000000 --- a/pkg/bus/scheduler_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package bus_test - -import ( - "context" - "errors" - "slices" - "strconv" - "sync" - "testing" - - "github.com/YuukanOO/seelf/pkg/assert" - "github.com/YuukanOO/seelf/pkg/bus" - "github.com/YuukanOO/seelf/pkg/bus/memory" - "github.com/YuukanOO/seelf/pkg/flag" - "github.com/YuukanOO/seelf/pkg/log" - "github.com/YuukanOO/seelf/pkg/must" - "github.com/YuukanOO/seelf/pkg/storage" -) - -func TestScheduler(t *testing.T) { - logger := must.Panic(log.NewLogger()) - b := memory.NewBus() - // Register an handler which will just return the inner cmd error to test how the scheduler behave. - bus.Register(b, func(_ context.Context, cmd returnCommand) (bus.UnitType, error) { - return bus.Unit, cmd.err - }) - - t.Run("should queue and handle the job return appropriately", func(t *testing.T) { - adapter := &adapter{} - scheduler := bus.NewScheduler(adapter, logger, b, 0, bus.WorkerGroup{ - Size: 4, - Messages: []string{returnCommand{}.Name_()}, - }) - - scheduler.Start() - defer scheduler.Stop() - - innerErr := errors.New("some error") - - withoutErr := returnCommand{} - withUnwrapedErr := returnCommand{err: innerErr} - withPreservedOrderErr := returnCommand{err: innerErr} - - assert.Nil(t, scheduler.Queue(context.Background(), withoutErr)) - assert.Nil(t, scheduler.Queue(context.Background(), withUnwrapedErr)) - assert.Nil(t, scheduler.Queue(context.Background(), withPreservedOrderErr, bus.WithPolicy(bus.JobPolicyRetryPreserveOrder))) - assert.Nil(t, scheduler.Queue(context.Background(), addCommand{})) - - adapter.wait() - - assert.HasLength(t, 1, adapter.done) - slices.SortFunc(adapter.done, func(a, b *job) int { - return a.id - b.id - }) - - assert.Equal(t, 0, adapter.done[0].id) - - assert.HasLength(t, 3, adapter.retried) - slices.SortFunc(adapter.retried, func(a, b *job) int { - return a.id - b.id - }) - - assert.Equal(t, 1, adapter.retried[0].id) - assert.ErrorIs(t, innerErr, adapter.retried[0].err) - assert.False(t, adapter.retried[0].preserveOrder) - - assert.Equal(t, 2, adapter.retried[1].id) - assert.ErrorIs(t, innerErr, adapter.retried[1].err) - assert.True(t, adapter.retried[1].preserveOrder) - - assert.Equal(t, 3, adapter.retried[2].id) - assert.ErrorIs(t, bus.ErrNoHandlerRegistered, adapter.retried[2].err) - }) -} - -var ( - _ bus.ScheduledJob = (*job)(nil) - _ bus.ScheduledJobsStore = (*adapter)(nil) - _ bus.Request = (*returnCommand)(nil) -) - -type ( - job struct { - id int - msg bus.Request - policy bus.JobPolicy - err error - preserveOrder bool - } - - adapter struct { - wg sync.WaitGroup - jobs []*job - done []*job - retried []*job - } - - returnCommand struct { - bus.Command[bus.UnitType] - - err error - } -) - -func (r returnCommand) Name_() string { return "returnCommand" } -func (r returnCommand) ResourceID() string { return "" } - -func (j *job) ID() string { return strconv.Itoa(j.id) } -func (j *job) Message() bus.Request { return j.msg } -func (j *job) Policy() bus.JobPolicy { return j.policy } - -func (a *adapter) Setup() error { return nil } - -func (a *adapter) GetAllJobs(context.Context, bus.GetJobsFilters) (storage.Paginated[bus.ScheduledJob], error) { - return storage.Paginated[bus.ScheduledJob]{}, nil -} - -func (a *adapter) Create(_ context.Context, msg bus.Schedulable, opts bus.CreateOptions) error { - a.wg.Add(1) - a.jobs = append(a.jobs, &job{id: len(a.jobs), msg: msg, policy: opts.Policy}) - return nil -} - -func (a *adapter) Delete(context.Context, string) error { return nil } - -func (a *adapter) wait() { - a.wg.Wait() -} - -func (a *adapter) GetNextPendingJobs(context.Context) ([]bus.ScheduledJob, error) { - j := make([]bus.ScheduledJob, len(a.jobs)) - - for i, job := range a.jobs { - j[i] = job - } - - a.jobs = nil - - return j, nil -} - -func (a *adapter) Retry(_ context.Context, j bus.ScheduledJob, jobErr error) error { - defer a.wg.Done() - jo := j.(*job) - jo.err = jobErr - jo.preserveOrder = flag.IsSet(j.Policy(), bus.JobPolicyRetryPreserveOrder) - - a.retried = append(a.retried, jo) - return nil - -} - -func (a *adapter) Done(_ context.Context, j bus.ScheduledJob) error { - defer a.wg.Done() - a.done = append(a.done, j.(*job)) - return nil -} diff --git a/pkg/bus/spy/dispatcher_test.go b/pkg/bus/spy/dispatcher_test.go new file mode 100644 index 00000000..2dacb315 --- /dev/null +++ b/pkg/bus/spy/dispatcher_test.go @@ -0,0 +1,58 @@ +package spy_test + +import ( + "context" + "testing" + + "github.com/YuukanOO/seelf/pkg/assert" + "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/bus/spy" +) + +func Test_Dispatcher(t *testing.T) { + t.Run("should store dispatched requests", func(t *testing.T) { + dispatcher := spy.NewDispatcher() + + var req request + + _, err := dispatcher.Send(context.Background(), req) + + assert.Nil(t, err) + assert.HasLength(t, 1, dispatcher.Requests()) + assert.Equal(t, req, dispatcher.Requests()[0].(request)) + }) + + t.Run("should store dispatched signals", func(t *testing.T) { + dispatcher := spy.NewDispatcher() + + var sig signal + + err := dispatcher.Notify(context.Background(), sig) + + assert.Nil(t, err) + assert.HasLength(t, 1, dispatcher.Signals()) + assert.Equal(t, sig, dispatcher.Signals()[0].(signal)) + }) + + t.Run("could reset dispatched requests and signals", func(t *testing.T) { + dispatcher := spy.NewDispatcher() + + _, _ = dispatcher.Send(context.Background(), request{}) + _ = dispatcher.Notify(context.Background(), signal{}) + + dispatcher.Reset() + + assert.HasLength(t, 0, dispatcher.Requests()) + assert.HasLength(t, 0, dispatcher.Signals()) + }) +} + +type request struct { + bus.Command[int] +} + +func (request) Name_() string { return "request" } + +type signal struct { + bus.Signal +} diff --git a/pkg/bus/sqlite/migrations/1706004450_add_resource_id.up.sql b/pkg/bus/sqlite/migrations/1706004450_add_resource_id.up.sql index c80fc20a..04b7b4cb 100644 --- a/pkg/bus/sqlite/migrations/1706004450_add_resource_id.up.sql +++ b/pkg/bus/sqlite/migrations/1706004450_add_resource_id.up.sql @@ -1,4 +1,4 @@ -ALTER TABLE scheduled_jobs RENAME TO tmp_scheduled_jobs; +ALTER TABLE scheduled_jobs RENAME TO tmp_scheduled_jobs; -- should've used a temporary table DROP INDEX IF EXISTS idx_scheduled_jobs_dedupe_name; CREATE TABLE scheduled_jobs diff --git a/pkg/bus/sqlite/migrations/1727345059_nullable_policy.up.sql b/pkg/bus/sqlite/migrations/1727345059_nullable_policy.up.sql new file mode 100644 index 00000000..77f94a27 --- /dev/null +++ b/pkg/bus/sqlite/migrations/1727345059_nullable_policy.up.sql @@ -0,0 +1,7 @@ +ALTER TABLE scheduled_jobs DROP COLUMN policy; + +-- Make the policy column nullable since it's not used anymore. +-- Since migrations are executed by domain orders (scheduler, auth, deployment) and +-- I have failed by making deployment rely on the scheduled_jobs table, I must keep +-- it or else I have to update the migration (which I think is worse). +ALTER TABLE scheduled_jobs ADD policy INTEGER NULL; diff --git a/pkg/bus/sqlite/mod.go b/pkg/bus/sqlite/mod.go new file mode 100644 index 00000000..a8b5a3c6 --- /dev/null +++ b/pkg/bus/sqlite/mod.go @@ -0,0 +1,28 @@ +package sqlite + +import ( + "context" + + "github.com/YuukanOO/seelf/pkg/bus" + "github.com/YuukanOO/seelf/pkg/storage/sqlite" +) + +// Setup the needed infrastructure for the scheduler. +func Setup( + b bus.Bus, + db *sqlite.Database, +) (*JobsStore, error) { + jobsStore := NewJobsStore(db, b) + + // Register some handlers to operate on jobs if needed. + bus.Register(b, jobsStore.GetAllJobs) + bus.Register(b, jobsStore.RetryJob) + bus.Register(b, jobsStore.DismissJob) + + if err := db.Migrate(Migrations); err != nil { + return nil, err + } + + // And reset retrieved jobs to make sure they can be retried + return jobsStore, jobsStore.ResetRetrievedJobs(context.Background()) +} diff --git a/pkg/bus/sqlite/store.go b/pkg/bus/sqlite/store.go index d55c21ba..6912537f 100644 --- a/pkg/bus/sqlite/store.go +++ b/pkg/bus/sqlite/store.go @@ -7,9 +7,11 @@ import ( "github.com/YuukanOO/seelf/pkg/apperr" "github.com/YuukanOO/seelf/pkg/bus" - "github.com/YuukanOO/seelf/pkg/flag" + "github.com/YuukanOO/seelf/pkg/bus/embedded" + "github.com/YuukanOO/seelf/pkg/bus/embedded/dismiss_job" + "github.com/YuukanOO/seelf/pkg/bus/embedded/get_jobs" + "github.com/YuukanOO/seelf/pkg/bus/embedded/retry_job" "github.com/YuukanOO/seelf/pkg/id" - "github.com/YuukanOO/seelf/pkg/monad" "github.com/YuukanOO/seelf/pkg/storage" "github.com/YuukanOO/seelf/pkg/storage/sqlite" "github.com/YuukanOO/seelf/pkg/storage/sqlite/builder" @@ -20,55 +22,36 @@ var ( migrations embed.FS Migrations = sqlite.NewMigrationsModule("scheduler", "migrations", migrations) -) - -type ( - job struct { - id string - msg bus.Request - policy bus.JobPolicy - } - jobQuery struct { - JobID string `json:"id"` - ResourceID string `json:"resource_id"` - Group string `json:"group"` - MessageName string `json:"message_name"` - MessageData string `json:"message_data"` - QueuedAt time.Time `json:"queued_at"` - NotBefore time.Time `json:"not_before"` - ErrorCode monad.Maybe[string] `json:"error_code"` - JobPolicy bus.JobPolicy `json:"policy"` - Retrieved bool `json:"retrieved"` - } - - store struct { - db *sqlite.Database - } + _ embedded.Job = (*job)(nil) + _ embedded.JobsStore = (*JobsStore)(nil) + _ bus.Scheduler = (*JobsStore)(nil) ) -func (j *job) ID() string { return j.id } -func (j *job) Message() bus.Request { return j.msg } -func (j *job) Policy() bus.JobPolicy { return j.policy } +// Inner job representation satisfying the embedded bus.Job interface +type job struct { + id string + msg bus.AsyncRequest +} -func (j *jobQuery) ID() string { return j.JobID } -func (j *jobQuery) Message() bus.Request { panic("not implemented") } // Should never happen because this is a query only job -func (j *jobQuery) Policy() bus.JobPolicy { return j.JobPolicy } +func (j *job) ID() string { return j.id } +func (j *job) Command() bus.AsyncRequest { return j.msg } -// Builds a new adapter persisting jobs in the given sqlite database. -// For it to work, commands must be (de)serializable using the bus.Marshallable mapper. -func NewScheduledJobsStore(db *sqlite.Database) bus.ScheduledJobsStore { - return &store{db} +type JobsStore struct { + db *sqlite.Database + dispatcher bus.Dispatcher } -// Setup the scheduler adapter, migrate the database and reset running jobs by marking -// them as not retrieved so they will be picked up next time GetNextPendingJobs is called. -// You MUST call this method at the application startup. -func (s *store) Setup() error { - if err := s.db.Migrate(Migrations); err != nil { - return err +// Builds a new adapter persisting jobs in the given sqlite database. +// For it to work, commands must be (de)serializable using the bus.Marshallable mapper. +func NewJobsStore(db *sqlite.Database, dispatcher bus.Dispatcher) *JobsStore { + return &JobsStore{ + db: db, + dispatcher: dispatcher, } +} +func (s *JobsStore) ResetRetrievedJobs(ctx context.Context) error { _, err := s.db.ExecContext(context.Background(), ` UPDATE scheduled_jobs SET retrieved = false @@ -77,158 +60,151 @@ func (s *store) Setup() error { return err } -func (s *store) Create( - ctx context.Context, - msg bus.Schedulable, - options bus.CreateOptions, -) error { - jobId := id.New[string]() - now := time.Now().UTC() - msgValue, err := storage.ValueJSON(msg) +func (s *JobsStore) GetAllJobs(ctx context.Context, query get_jobs.Query) (storage.Paginated[get_jobs.Job], error) { + return builder. + Select[get_jobs.Job](` + id + ,resource_id + ,[group] + ,message_name + ,message_data + ,queued_at + ,not_before + ,errcode + ,retrieved + `). + F("FROM scheduled_jobs ORDER BY queued_at"). + Paginate(s.db, ctx, jobQueryMapper, query.Page.Get(1), 10) +} + +func (s *JobsStore) RetryJob(ctx context.Context, cmd retry_job.Command) (bus.UnitType, error) { + result, err := s.db.ExecContext(ctx, ` + UPDATE scheduled_jobs + SET errcode = NULL + WHERE id = ? AND errcode IS NOT NULL AND retrieved = false + `, cmd.ID) if err != nil { - return err + return bus.Unit, err } - var ( - msgName = msg.Name_() - resourceId = msg.ResourceID() - ) + affected, err := result.RowsAffected() - // Could not use the ON CONFLICT here :'( - if flag.IsSet(options.Policy, bus.JobPolicyMerge) { - result, err := s.db.ExecContext(ctx, ` - UPDATE scheduled_jobs - SET message_data = ? - WHERE id = ( - SELECT id - FROM scheduled_jobs - WHERE resource_id = ? AND message_name = ? AND retrieved = false - )`, msgValue, resourceId, msgName) - - if affected, _ := result.RowsAffected(); affected > 0 { - return err - } + if err != nil { + return bus.Unit, err } - return builder. - Insert("scheduled_jobs", builder.Values{ - "id": jobId, - "resource_id": resourceId, - "[group]": options.Group.Get(jobId), // Default to the job id if no group set - "message_name": msgName, - "message_data": msgValue, - "queued_at": now, - "not_before": now, - "policy": options.Policy, - "retrieved": false, - }). - Exec(s.db, ctx) + if affected == 0 { + return bus.Unit, apperr.ErrNotFound + } + + return bus.Unit, err } -func (s *store) Delete(ctx context.Context, id string) error { - r, err := s.db.ExecContext(ctx, "DELETE FROM scheduled_jobs WHERE id = ? AND (policy & ?) != 0", - id, bus.JobPolicyCancellable) +func (s *JobsStore) DismissJob(ctx context.Context, cmd dismiss_job.Command) (bus.UnitType, error) { + job, err := builder. + Query[embedded.Job](` + SELECT + id, message_name, message_data + FROM scheduled_jobs + WHERE id = ? AND errcode IS NOT NULL AND retrieved = false + `, cmd.ID). + One(s.db, ctx, jobMapper) if err != nil { - return err + return bus.Unit, err } - affected, err := r.RowsAffected() + if err = s.Done(ctx, job); err != nil { + return bus.Unit, err + } + + return bus.Unit, s.dispatcher.Notify(ctx, embedded.JobDismissed{ + ID: job.ID(), + Command: job.Command(), + }) +} + +func (s *JobsStore) Queue( + ctx context.Context, + msg bus.AsyncRequest, +) error { + now := time.Now().UTC() + msgData, err := storage.ValueJSON(msg) if err != nil { return err } - if affected == 0 { - return apperr.ErrNotFound - } - - return nil -} - -func (s *store) GetAllJobs(ctx context.Context, filters bus.GetJobsFilters) (storage.Paginated[bus.ScheduledJob], error) { return builder. - Select[bus.ScheduledJob](` - id - ,resource_id - ,[group] - ,message_name - ,message_data - ,queued_at - ,not_before - ,errcode - ,policy - ,retrieved - `). - F("FROM scheduled_jobs ORDER BY queued_at"). - Paginate(s.db, ctx, jobQueryMapper, filters.Page.Get(1), 10) + Insert("scheduled_jobs", builder.Values{ + "id": id.New[string](), + "resource_id": msg.ResourceID(), + "[group]": msg.Group(), + "message_name": msg.Name_(), + "message_data": msgData, + "queued_at": now, + "not_before": now, + "retrieved": false, + }). + Exec(s.db, ctx) } -func (s *store) GetNextPendingJobs(ctx context.Context) ([]bus.ScheduledJob, error) { +func (s *JobsStore) GetNextPendingJobs(ctx context.Context) ([]embedded.Job, error) { // This query will lock the database to make sure we can't retrieved the same job twice. return builder. - Query[bus.ScheduledJob](` + Query[embedded.Job](` UPDATE scheduled_jobs SET retrieved = true WHERE id IN (SELECT id FROM ( SELECT id, MIN(not_before) FROM scheduled_jobs sj WHERE sj.retrieved = false + AND sj.errcode IS NULL AND sj.not_before <= DATETIME('now') AND sj.[group] NOT IN (SELECT DISTINCT [group] FROM scheduled_jobs WHERE retrieved = true) - AND (sj.policy & ? = 0 OR (SELECT COUNT(resource_id) FROM scheduled_jobs WHERE resource_id = sj.resource_id) <= 1) GROUP BY sj.[group] ) ) - RETURNING id, message_name, message_data, policy`, bus.JobPolicyWaitForOthersResourceID). + RETURNING id, message_name, message_data`). All(s.db, ctx, jobMapper) } -func (s *store) Retry(ctx context.Context, j bus.ScheduledJob, jobErr error) error { - // If we don't need to preserve the order of related tasks, we simply update the job to queue it again - // in the future. - if !flag.IsSet(j.Policy(), bus.JobPolicyRetryPreserveOrder) { - if _, err := s.db.ExecContext(ctx, ` +func (s *JobsStore) Failed(ctx context.Context, job embedded.Job, jobErr error) error { + _, err := s.db.ExecContext(ctx, ` UPDATE scheduled_jobs SET errcode = ? - ,not_before = DATETIME('now', '+15 seconds') ,retrieved = false - WHERE id = ?`, jobErr.Error(), j.ID(), - ); err != nil { - return err - } - } + WHERE id = ?`, jobErr.Error(), job.ID(), + ) + return err +} - // If instead, we want all jobs sharing the same dedupe_name to be updated all at once, - // we should make sure to set all of them in the future by a specific amount to preserve - // the job order. +func (s *JobsStore) Delay(ctx context.Context, job embedded.Job) error { + // To preserve jobs order inside the same group, every job will be postponed by an amount relative to their ROWID _, err := s.db.ExecContext(ctx, ` UPDATE scheduled_jobs SET - errcode = v.errcode - ,not_before = v.updated_date + not_before = v.updated_date ,retrieved = false FROM ( SELECT id - ,CASE WHEN id = ? THEN ? ELSE errcode END AS errcode - ,DATETIME('now', '+' || CAST(14 + 1 * ROW_NUMBER() OVER (ORDER BY not_before) AS TEXT) || ' seconds') AS updated_date + ,DATETIME('now', '+' || CAST(14 + 1 * ROW_NUMBER() OVER (ORDER BY queued_at) AS TEXT) || ' seconds') AS updated_date FROM scheduled_jobs - WHERE [group] = (SELECT [group] FROM scheduled_jobs WHERE id = ?) + WHERE errcode IS NULL AND [group] = ? ) v - WHERE scheduled_jobs.id = v.id`, j.ID(), jobErr.Error(), j.ID()) - + WHERE scheduled_jobs.id = v.id`, job.Command().Group()) return err } -func (s *store) Done(ctx context.Context, j bus.ScheduledJob) error { - _, err := s.db.ExecContext(ctx, "DELETE FROM scheduled_jobs WHERE id = ?", j.ID()) +func (s *JobsStore) Done(ctx context.Context, job embedded.Job) error { + _, err := s.db.ExecContext(ctx, "DELETE FROM scheduled_jobs WHERE id = ?", job.ID()) return err } -func jobMapper(scanner storage.Scanner) (bus.ScheduledJob, error) { +func jobMapper(scanner storage.Scanner) (embedded.Job, error) { var ( j job msgName string @@ -239,7 +215,6 @@ func jobMapper(scanner storage.Scanner) (bus.ScheduledJob, error) { &j.id, &msgName, &msgData, - &j.policy, ) if err != nil { @@ -251,11 +226,9 @@ func jobMapper(scanner storage.Scanner) (bus.ScheduledJob, error) { return &j, err } -func jobQueryMapper(scanner storage.Scanner) (bus.ScheduledJob, error) { - var j jobQuery - - err := scanner.Scan( - &j.JobID, +func jobQueryMapper(scanner storage.Scanner) (j get_jobs.Job, err error) { + err = scanner.Scan( + &j.ID, &j.ResourceID, &j.Group, &j.MessageName, @@ -263,9 +236,8 @@ func jobQueryMapper(scanner storage.Scanner) (bus.ScheduledJob, error) { &j.QueuedAt, &j.NotBefore, &j.ErrorCode, - &j.JobPolicy, &j.Retrieved, ) - return &j, err + return j, err } diff --git a/pkg/storage/sqlite/database.go b/pkg/storage/sqlite/database.go index f5d84555..1260c468 100644 --- a/pkg/storage/sqlite/database.go +++ b/pkg/storage/sqlite/database.go @@ -8,6 +8,7 @@ import ( "github.com/YuukanOO/seelf/pkg/bus" "github.com/YuukanOO/seelf/pkg/event" "github.com/YuukanOO/seelf/pkg/log" + "github.com/YuukanOO/seelf/pkg/storage" "github.com/YuukanOO/seelf/pkg/storage/sqlite/builder" "github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4/database/sqlite3" @@ -21,7 +22,10 @@ const ( transactionContextKey contextKey = "sqlitetx" ) -var _ builder.Executor = (*Database)(nil) // Ensure Database implements the Executor interface +var ( + _ builder.Executor = (*Database)(nil) // Ensure Database implements the Executor interface + _ storage.UnitOfWorkFactory = (*Database)(nil) +) type ( // Represents a single module for database migrations. @@ -61,6 +65,38 @@ func (db *Database) Close() error { return db.conn.Close() } +// Execute the given function in a transaction managing the commit and rollback +// based on the returned error if any. +func (db *Database) Create(ctx context.Context, fn func(context.Context) error) (finalErr error) { + var ( + tx *sql.Tx + created bool + ) + + ctx, tx, created = db.WithTransaction(ctx) + + defer func() { + if !created { + return + } + + var err error + + if finalErr != nil { + err = tx.Rollback() + } else { + err = tx.Commit() + } + + if err != nil { + finalErr = err + } + }() + + finalErr = fn(ctx) + return +} + // Migrates the opened database to the latest version. func (db *Database) Migrate(modules ...MigrationsModule) error { for _, module := range modules { @@ -171,53 +207,34 @@ func (db *Database) tryGetTransaction(ctx context.Context) builder.Executor { // There's no way to add this method to the DB without type conversion so this is the easiest way // for now. Without the generics, I will always have to convert an array of entities to []event.Source // which is not very convenient. -func WriteAndDispatch[T event.Source]( +func WriteEvents[T event.Source]( db *Database, ctx context.Context, entities []T, switcher func(context.Context, event.Event) error, -) (finalErr error) { - var ( - tx *sql.Tx - created bool - ) - - ctx, tx, created = db.WithTransaction(ctx) - - defer func() { - if !created { - return - } - - if finalErr != nil { - if err := tx.Rollback(); err != nil { - finalErr = err +) error { + return db.Create(ctx, func(ctx context.Context) error { + for _, ent := range entities { + events := event.Unwrap(ent) + notifications := make([]bus.Signal, len(events)) // It's a shame Go could not accept an array of events as a slice of signals since Event are effectively Signal + + for i, evt := range events { + if err := switcher(ctx, evt); err != nil { + return err + } + + notifications[i] = evt } - } else { - finalErr = tx.Commit() - } - }() - - for _, ent := range entities { - events := event.Unwrap(ent) - notifs := make([]bus.Signal, len(events)) // It's a shame Go could not accept an array of events as a slice of signals since Event are effectively Signal - for i, evt := range events { - if finalErr = switcher(ctx, evt); finalErr != nil { - return + if err := db.bus.Notify(ctx, notifications...); err != nil { + return err } - notifs[i] = evt - } - - if finalErr = db.bus.Notify(ctx, notifs...); finalErr != nil { - return + // TODO: clear entities events (see #71) } - // TODO: clear entities events (see #71) - } - - return nil + return nil + }) } // Builds a new migrations module with the given module name (used as a migrations history table name prefix) diff --git a/pkg/storage/unit_of_work.go b/pkg/storage/unit_of_work.go new file mode 100644 index 00000000..ad230535 --- /dev/null +++ b/pkg/storage/unit_of_work.go @@ -0,0 +1,8 @@ +package storage + +import "context" + +// Factory to retrieve a transactional context to make sure everything is atomic. +type UnitOfWorkFactory interface { + Create(context.Context, func(context.Context) error) error +}