diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 112431bc0..ab11f8c0c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -41,7 +41,7 @@ jobs: - uses: golangci/golangci-lint-action@v3 with: - version: v1.50.1 + version: v1.53 args: --timeout 3m --verbose start: diff --git a/README.md b/README.md index 7387f17be..7b340c4dd 100644 --- a/README.md +++ b/README.md @@ -154,7 +154,7 @@ The CLI is a WIP and we're still exploring the design, so expect a lot of breaki To run from source: ```sh -# Go >= 1.18 +# Go >= 1.20 go run . help ``` diff --git a/go.mod b/go.mod index e6854df7a..a24fa88ff 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/supabase/cli -go 1.18 +go 1.20 require ( github.com/BurntSushi/toml v1.3.2 diff --git a/internal/db/reset/reset.go b/internal/db/reset/reset.go index c3f3d4d17..9f4723fff 100644 --- a/internal/db/reset/reset.go +++ b/internal/db/reset/reset.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/stdcopy" "github.com/jackc/pgconn" "github.com/jackc/pgerrcode" @@ -71,6 +72,11 @@ func resetDatabase(ctx context.Context, fsys afero.Fs, options ...func(*pgx.Conn if err := initDatabase(ctx, options...); err != nil { return err } + if utils.Config.Db.MajorVersion > 14 { + if err := InitSchema15(ctx, utils.DbId); err != nil { + return err + } + } if err := RestartDatabase(ctx, os.Stderr); err != nil { return err } @@ -154,24 +160,16 @@ func RestartDatabase(ctx context.Context, w io.Writer) error { if !WaitForHealthyService(ctx, utils.DbId, healthTimeout) { return ErrDatabase } - // TODO: update storage-api to handle postgres restarts - if err := utils.Docker.ContainerRestart(ctx, utils.StorageId, container.StopOptions{}); err != nil { - return fmt.Errorf("failed to restart storage-api: %w", err) - } - // Reload PostgREST schema cache. - if err := utils.Docker.ContainerKill(ctx, utils.RestId, "SIGUSR1"); err != nil { - return fmt.Errorf("failed to reload PostgREST schema cache: %w", err) - } - // TODO: update gotrue to handle postgres restarts - if err := utils.Docker.ContainerRestart(ctx, utils.GotrueId, container.StopOptions{}); err != nil { - return fmt.Errorf("failed to restart gotrue: %w", err) - } - // TODO: update realtime to handle postgres restarts - if err := utils.Docker.ContainerRestart(ctx, utils.RealtimeId, container.StopOptions{}); err != nil { - return fmt.Errorf("failed to restart realtime: %w", err) - } - // Wait for services with internal schema migrations - return WaitForServiceReady(ctx, []string{utils.StorageId, utils.GotrueId}) + // No need to restart PostgREST because it automatically reconnects and listens for schema changes + services := []string{utils.StorageId, utils.GotrueId, utils.RealtimeId} + result := utils.WaitAll(services, func(id string) error { + if err := utils.Docker.ContainerRestart(ctx, id, container.StopOptions{}); err != nil && !errdefs.IsNotFound(err) { + return fmt.Errorf("Failed to restart %s: %w", id, err) + } + return nil + }) + // Do not wait for service healthy as those services may be excluded from starting + return errors.Join(result...) } func RetryEverySecond(ctx context.Context, callback func() bool, timeout time.Duration) bool { @@ -278,3 +276,28 @@ func likeEscapeSchema(schemas []string) (result []string) { } return result } + +func InitSchema15(ctx context.Context, host string) error { + // Apply service migrations + if err := utils.DockerRunOnceWithStream(ctx, utils.StorageImage, []string{ + "ANON_KEY=" + utils.Config.Auth.AnonKey, + "SERVICE_KEY=" + utils.Config.Auth.ServiceRoleKey, + "PGRST_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + fmt.Sprintf("DATABASE_URL=postgresql://supabase_storage_admin:%s@%s:5432/postgres", utils.Config.Db.Password, host), + fmt.Sprintf("FILE_SIZE_LIMIT=%v", utils.Config.Storage.FileSizeLimit), + "STORAGE_BACKEND=file", + "TENANT_ID=stub", + // TODO: https://github.com/supabase/storage-api/issues/55 + "REGION=stub", + "GLOBAL_S3_BUCKET=stub", + }, []string{"node", "dist/scripts/migrate-call.js"}, io.Discard, os.Stderr); err != nil { + return err + } + return utils.DockerRunOnceWithStream(ctx, utils.GotrueImage, []string{ + "GOTRUE_LOG_LEVEL=error", + "GOTRUE_DB_DRIVER=postgres", + fmt.Sprintf("GOTRUE_DB_DATABASE_URL=postgresql://supabase_auth_admin:%s@%s:5432/postgres", utils.Config.Db.Password, host), + "GOTRUE_SITE_URL=" + utils.Config.Auth.SiteUrl, + "GOTRUE_JWT_SECRET=" + utils.Config.Auth.JwtSecret, + }, []string{"gotrue", "migrate"}, io.Discard, os.Stderr) +} diff --git a/internal/db/reset/reset_test.go b/internal/db/reset/reset_test.go index fe9afafc5..069db9b11 100644 --- a/internal/db/reset/reset_test.go +++ b/internal/db/reset/reset_test.go @@ -25,6 +25,15 @@ import ( ) func TestResetCommand(t *testing.T) { + t.Run("throws error on connect failure", func(t *testing.T) { + // Setup in-memory fs + fsys := afero.NewMemMapFs() + // Run test + err := Run(context.Background(), pgconn.Config{Password: "postgres"}, fsys) + // Check error + assert.ErrorContains(t, err, "invalid port (outside range)") + }) + t.Run("throws error on missing config", func(t *testing.T) { err := Run(context.Background(), pgconn.Config{}, afero.NewMemMapFs()) assert.ErrorIs(t, err, os.ErrNotExist) @@ -253,46 +262,59 @@ func TestRestartDatabase(t *testing.T) { Health: &types.Health{Status: "healthy"}, }, }}) - // Restarts postgREST - utils.RestId = "test-rest" - gock.New(utils.Docker.DaemonHost()). - Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.RestId + "/kill"). - Reply(http.StatusOK) - // Restarts storage-api + // Restarts services utils.StorageId = "test-storage" - gock.New(utils.Docker.DaemonHost()). - Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.StorageId + "/restart"). - Reply(http.StatusOK) - // Restarts gotrue utils.GotrueId = "test-auth" - gock.New(utils.Docker.DaemonHost()). - Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.GotrueId + "/restart"). - Reply(http.StatusOK) - // Restarts realtime utils.RealtimeId = "test-realtime" + for _, container := range []string{utils.StorageId, utils.GotrueId, utils.RealtimeId} { + gock.New(utils.Docker.DaemonHost()). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + container + "/restart"). + Reply(http.StatusOK) + } + // Run test + err := RestartDatabase(context.Background(), io.Discard) + // Check error + assert.NoError(t, err) + assert.Empty(t, apitest.ListUnmatchedRequests()) + }) + + t.Run("throws error on service restart failure", func(t *testing.T) { + utils.DbId = "test-reset" + // Setup mock docker + require.NoError(t, apitest.MockDocker(utils.Docker)) + defer gock.OffAll() + // Restarts postgres gock.New(utils.Docker.DaemonHost()). - Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.RealtimeId + "/restart"). + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/restart"). Reply(http.StatusOK) - // Wait for services ready - for _, container := range []string{utils.StorageId, utils.GotrueId} { + gock.New(utils.Docker.DaemonHost()). + Get("/v" + utils.Docker.ClientVersion() + "/containers/" + utils.DbId + "/json"). + Reply(http.StatusOK). + JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Running: true, + Health: &types.Health{Status: "healthy"}, + }, + }}) + // Restarts services + utils.StorageId = "test-storage" + utils.GotrueId = "test-auth" + utils.RealtimeId = "test-realtime" + for _, container := range []string{utils.StorageId, utils.GotrueId, utils.RealtimeId} { gock.New(utils.Docker.DaemonHost()). - Get("/v" + utils.Docker.ClientVersion() + "/containers/" + container + "/json"). - Reply(http.StatusOK). - JSON(types.ContainerJSON{ContainerJSONBase: &types.ContainerJSONBase{ - State: &types.ContainerState{ - Running: true, - Health: &types.Health{Status: "healthy"}, - }, - }}) + Post("/v" + utils.Docker.ClientVersion() + "/containers/" + container + "/restart"). + Reply(http.StatusServiceUnavailable) } // Run test err := RestartDatabase(context.Background(), io.Discard) // Check error - assert.NoError(t, err) + assert.ErrorContains(t, err, "Failed to restart "+utils.StorageId) + assert.ErrorContains(t, err, "Failed to restart "+utils.GotrueId) + assert.ErrorContains(t, err, "Failed to restart "+utils.RealtimeId) assert.Empty(t, apitest.ListUnmatchedRequests()) }) - t.Run("logs error on restart failure", func(t *testing.T) { + t.Run("throws error on db restart failure", func(t *testing.T) { utils.DbId = "test-db" // Setup mock docker require.NoError(t, apitest.MockDocker(utils.Docker)) @@ -308,7 +330,7 @@ func TestRestartDatabase(t *testing.T) { assert.Empty(t, apitest.ListUnmatchedRequests()) }) - t.Run("timeout health check", func(t *testing.T) { + t.Run("throws error on health check timeout", func(t *testing.T) { utils.DbId = "test-reset" healthTimeout = 0 * time.Second // Setup mock docker diff --git a/internal/db/start/start.go b/internal/db/start/start.go index 01aa66dba..0f9aed1be 100644 --- a/internal/db/start/start.go +++ b/internal/db/start/start.go @@ -140,7 +140,7 @@ func initSchema(ctx context.Context, conn *pgx.Conn, host string, w io.Writer) e if utils.Config.Db.MajorVersion <= 14 { return initSchema14(ctx, conn) } - return initSchema15(ctx, host) + return reset.InitSchema15(ctx, host) } func initSchema14(ctx context.Context, conn *pgx.Conn) error { @@ -150,31 +150,6 @@ func initSchema14(ctx context.Context, conn *pgx.Conn) error { return apply.BatchExecDDL(ctx, conn, strings.NewReader(utils.InitialSchemaSql)) } -func initSchema15(ctx context.Context, host string) error { - // Apply service migrations - if err := utils.DockerRunOnceWithStream(ctx, utils.StorageImage, []string{ - "ANON_KEY=" + utils.Config.Auth.AnonKey, - "SERVICE_KEY=" + utils.Config.Auth.ServiceRoleKey, - "PGRST_JWT_SECRET=" + utils.Config.Auth.JwtSecret, - fmt.Sprintf("DATABASE_URL=postgresql://supabase_storage_admin:%s@%s:5432/postgres", utils.Config.Db.Password, host), - fmt.Sprintf("FILE_SIZE_LIMIT=%v", utils.Config.Storage.FileSizeLimit), - "STORAGE_BACKEND=file", - "TENANT_ID=stub", - // TODO: https://github.com/supabase/storage-api/issues/55 - "REGION=stub", - "GLOBAL_S3_BUCKET=stub", - }, []string{"node", "dist/scripts/migrate-call.js"}, io.Discard, os.Stderr); err != nil { - return err - } - return utils.DockerRunOnceWithStream(ctx, utils.GotrueImage, []string{ - "GOTRUE_LOG_LEVEL=error", - "GOTRUE_DB_DRIVER=postgres", - fmt.Sprintf("GOTRUE_DB_DATABASE_URL=postgresql://supabase_auth_admin:%s@%s:5432/postgres", utils.Config.Db.Password, host), - "GOTRUE_SITE_URL=" + utils.Config.Auth.SiteUrl, - "GOTRUE_JWT_SECRET=" + utils.Config.Auth.JwtSecret, - }, []string{"gotrue", "migrate"}, io.Discard, os.Stderr) -} - func setupDatabase(ctx context.Context, fsys afero.Fs, w io.Writer, options ...func(*pgx.ConnConfig)) error { conn, err := utils.ConnectLocalPostgres(ctx, pgconn.Config{}, options...) if err != nil { diff --git a/internal/stop/stop.go b/internal/stop/stop.go index a3604db65..decedc986 100644 --- a/internal/stop/stop.go +++ b/internal/stop/stop.go @@ -3,12 +3,15 @@ package stop import ( "context" _ "embed" + "errors" "fmt" "io" "os" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" "github.com/spf13/afero" "github.com/supabase/cli/internal/utils" ) @@ -50,7 +53,12 @@ func stop(ctx context.Context, backup bool, w io.Writer) error { } } fmt.Fprintln(w, "Stopping containers...") - utils.WaitAll(ids, utils.DockerStop) + result := utils.WaitAll(ids, func(id string) error { + return utils.Docker.ContainerStop(ctx, id, container.StopOptions{}) + }) + if err := errors.Join(result...); err != nil { + return err + } if _, err := utils.Docker.ContainersPrune(ctx, args); err != nil { return err } @@ -62,11 +70,15 @@ func stop(ctx context.Context, backup bool, w io.Writer) error { } else { // TODO: label named volumes to use VolumesPrune for branch support volumes := []string{utils.ConfigId, utils.DbId, utils.StorageId} - utils.WaitAll(volumes, func(name string) { - if err := utils.Docker.VolumeRemove(ctx, name, true); err != nil { - fmt.Fprintln(os.Stderr, "failed to remove volume:", name, err) + result = utils.WaitAll(volumes, func(name string) error { + if err := utils.Docker.VolumeRemove(ctx, name, true); err != nil && !errdefs.IsNotFound(err) { + return fmt.Errorf("Failed to remove volume %s: %w", name, err) } + return nil }) + if err := errors.Join(result...); err != nil { + return err + } } // Remove networks. _, err = utils.Docker.NetworksPrune(ctx, args) diff --git a/internal/utils/connect_test.go b/internal/utils/connect_test.go index 74cd05596..a154d6b35 100644 --- a/internal/utils/connect_test.go +++ b/internal/utils/connect_test.go @@ -100,6 +100,6 @@ func TestConnectLocal(t *testing.T) { t.Run("connects with debug log", func(t *testing.T) { viper.Set("DEBUG", true) _, err := ConnectLocalPostgres(context.Background(), pgconn.Config{Host: "0"}) - assert.ErrorContains(t, err, "connect: connection refused") + assert.Error(t, err) }) } diff --git a/internal/utils/docker.go b/internal/utils/docker.go index d642a1b09..418f72a3a 100644 --- a/internal/utils/docker.go +++ b/internal/utils/docker.go @@ -99,27 +99,29 @@ var ( Volumes []string ) -func WaitAll(containers []string, exec func(container string)) { +func WaitAll(containers []string, exec func(container string) error) []error { var wg sync.WaitGroup - for _, container := range containers { + result := make([]error, len(containers)) + for i, container := range containers { wg.Add(1) - go func(container string) { + go func(i int, container string) { defer wg.Done() - exec(container) - }(container) + result[i] = exec(container) + }(i, container) } wg.Wait() + return result } func DockerRemoveAll(ctx context.Context) { - WaitAll(Containers, func(container string) { - _ = Docker.ContainerRemove(ctx, container, types.ContainerRemoveOptions{ + _ = WaitAll(Containers, func(container string) error { + return Docker.ContainerRemove(ctx, container, types.ContainerRemoveOptions{ RemoveVolumes: true, Force: true, }) }) - WaitAll(Volumes, func(name string) { - _ = Docker.VolumeRemove(ctx, name, true) + _ = WaitAll(Volumes, func(name string) error { + return Docker.VolumeRemove(ctx, name, true) }) _ = Docker.NetworkRemove(ctx, NetId) } @@ -242,12 +244,6 @@ func DockerPullImageIfNotCached(ctx context.Context, imageName string) error { return DockerImagePullWithRetry(ctx, imageUrl, 2) } -func DockerStop(containerID string) { - if err := Docker.ContainerStop(context.Background(), containerID, container.StopOptions{}); err != nil { - fmt.Fprintln(os.Stderr, "Failed to stop container:", containerID, err) - } -} - func DockerStart(ctx context.Context, config container.Config, hostConfig container.HostConfig, containerName string) (string, error) { // Pull container image if err := DockerPullImageIfNotCached(ctx, config.Image); err != nil {