From 9788132d68ad46eac5b30f38259e418848b6e1b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Cie=C5=9Blak?= Date: Mon, 10 Jun 2024 18:00:57 +0200 Subject: [PATCH] Fix listing db users for leaf databases obtained through access requests (#42679) --- integration/teleterm_test.go | 219 +++++++++++++++++++++ lib/teleterm/clusters/cluster_databases.go | 2 +- 2 files changed, 220 insertions(+), 1 deletion(-) diff --git a/integration/teleterm_test.go b/integration/teleterm_test.go index d68c151205b31..a9a39c3de84ce 100644 --- a/integration/teleterm_test.go +++ b/integration/teleterm_test.go @@ -51,6 +51,7 @@ import ( wancli "github.com/gravitational/teleport/lib/auth/webauthncli" wantypes "github.com/gravitational/teleport/lib/auth/webauthntypes" "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/modules" "github.com/gravitational/teleport/lib/service" "github.com/gravitational/teleport/lib/service/servicecfg" "github.com/gravitational/teleport/lib/services" @@ -58,6 +59,7 @@ import ( "github.com/gravitational/teleport/lib/teleterm/apiserver/handler" "github.com/gravitational/teleport/lib/teleterm/clusters" "github.com/gravitational/teleport/lib/teleterm/daemon" + "github.com/gravitational/teleport/lib/tlsca" libutils "github.com/gravitational/teleport/lib/utils" ) @@ -129,6 +131,15 @@ func TestTeleterm(t *testing.T) { testClientCache(t, pack, creds) }) + t.Run("ListDatabaseUsers", func(t *testing.T) { + // ListDatabaseUsers cannot be run in parallel as it modifies the default roles of users set up + // through the test pack. + // TODO(ravicious): After some optimizations, those tests could run in parallel. Instead of + // modifying existing roles, they could create new users with new roles and then update the role + // mapping between the root the leaf cluster through authServer.UpdateUserCARoleMap. + testListDatabaseUsers(t, pack) + }) + t.Run("with MFA", func(t *testing.T) { authServer := pack.Root.Cluster.Process.GetAuthServer() rpID, _, err := net.SplitHostPort(pack.Root.Cluster.Web) @@ -1054,6 +1065,214 @@ func testDeleteConnectMyComputerNode(t *testing.T, pack *dbhelpers.DatabasePack) }, time.Minute, time.Second, "waiting for node to be deleted") } +// testListDatabaseUsers adds a unique string under spec.allow.db_users of the role automatically +// given to a user by [dbhelpers.DatabasePack] and then checks if that string is returned when +// calling [handler.Handler.ListDatabaseUsers]. +func testListDatabaseUsers(t *testing.T, pack *dbhelpers.DatabasePack) { + ctx := context.Background() + + mustAddDBUserToUserRole := func(ctx context.Context, t *testing.T, cluster *helpers.TeleInstance, user, dbUser string) { + t.Helper() + authServer := cluster.Process.GetAuthServer() + roleName := services.RoleNameForUser(user) + role, err := authServer.GetRole(ctx, roleName) + require.NoError(t, err) + + dbUsers := role.GetDatabaseUsers(types.Allow) + dbUsers = append(dbUsers, dbUser) + role.SetDatabaseUsers(types.Allow, dbUsers) + _, err = authServer.UpdateRole(ctx, role) + require.NoError(t, err) + + require.EventuallyWithT(t, func(collect *assert.CollectT) { + role, err := authServer.GetRole(ctx, roleName) + if assert.NoError(collect, err) { + assert.Equal(collect, dbUsers, role.GetDatabaseUsers(types.Allow)) + } + }, 10*time.Second, 100*time.Millisecond) + } + + mustUpdateUserRoles := func(ctx context.Context, t *testing.T, cluster *helpers.TeleInstance, userName string, roles []string) { + t.Helper() + authServer := cluster.Process.GetAuthServer() + user, err := authServer.GetUser(ctx, userName, false /* withSecrets */) + require.NoError(t, err) + + user.SetRoles(roles) + _, err = authServer.UpdateUser(ctx, user) + require.NoError(t, err) + + require.EventuallyWithT(t, func(collect *assert.CollectT) { + user, err := authServer.GetUser(ctx, userName, false /* withSecrets */) + if assert.NoError(collect, err) { + assert.Equal(collect, roles, user.GetRoles()) + } + }, 10*time.Second, 100*time.Millisecond) + } + + // Allow resource access requests to be created. + currentModules := modules.GetModules() + t.Cleanup(func() { modules.SetModules(currentModules) }) + modules.SetModules(&modules.TestModules{TestBuildType: modules.BuildEnterprise}) + + rootClusterName, _, err := net.SplitHostPort(pack.Root.Cluster.Web) + require.NoError(t, err) + rootDatabaseURI := uri.NewClusterURI(rootClusterName).AppendDB(pack.Root.PostgresService.Name) + leafDatabaseURI := uri.NewClusterURI(rootClusterName).AppendLeafCluster(pack.Leaf.Cluster.Secrets.SiteName).AppendDB(pack.Leaf.PostgresService.Name) + + rootDBUser := fmt.Sprintf("root-db-user-%s", uuid.NewString()) + leafDBUser := fmt.Sprintf("leaf-db-user-%s", uuid.NewString()) + leafDBUserWithAccessRequest := fmt.Sprintf("leaf-db-user-with-access-request-%s", uuid.NewString()) + + rootUserName := pack.Root.User.GetName() + leafUserName := pack.Leaf.User.GetName() + rootRoleName := services.RoleNameForUser(rootUserName) + + tests := []struct { + name string + dbURI uri.ResourceURI + wantDBUser string + prepareRole func(ctx context.Context, t *testing.T) + createAccessRequest func(ctx context.Context, t *testing.T) string + }{ + { + name: "root cluster", + dbURI: rootDatabaseURI, + wantDBUser: rootDBUser, + prepareRole: func(ctx context.Context, t *testing.T) { + mustAddDBUserToUserRole(ctx, t, pack.Root.Cluster, rootUserName, rootDBUser) + }, + }, + { + name: "leaf cluster", + dbURI: leafDatabaseURI, + wantDBUser: leafDBUser, + prepareRole: func(ctx context.Context, t *testing.T) { + mustAddDBUserToUserRole(ctx, t, pack.Leaf.Cluster, leafUserName, leafDBUser) + }, + }, + { + name: "leaf cluster with resource access request", + dbURI: leafDatabaseURI, + wantDBUser: leafDBUserWithAccessRequest, + // Remove role from root-user and move it to search_as_roles. + // + // root-user has access to leafDatabaseURI through the user:root-user role which gets mapped + // to a corresponding leaf cluster role. + // We want to create a resource access request for that database. To do this, we need to + // create a new role which lets root-user request the database. + prepareRole: func(ctx context.Context, t *testing.T) { + mustAddDBUserToUserRole(ctx, t, pack.Leaf.Cluster, leafUserName, leafDBUserWithAccessRequest) + + authServer := pack.Root.Cluster.Process.GetAuthServer() + + // Create new role that lets root-user request the database. + requesterRole, err := types.NewRole(fmt.Sprintf("requester-%s", uuid.NewString()), types.RoleSpecV6{ + Allow: types.RoleConditions{ + Request: &types.AccessRequestConditions{ + SearchAsRoles: []string{rootRoleName}, + }, + }, + }) + require.NoError(t, err) + requesterRole, err = authServer.CreateRole(ctx, requesterRole) + require.NoError(t, err) + + user, err := authServer.GetUser(ctx, rootUserName, false /* withSecrets */) + require.NoError(t, err) + + // Delete rootRoleName from roles, add requester role. Restore original role set after test + // is done. + currentRoles := user.GetRoles() + t.Cleanup(func() { mustUpdateUserRoles(ctx, t, pack.Root.Cluster, rootUserName, currentRoles) }) + mustUpdateUserRoles(ctx, t, pack.Root.Cluster, rootUserName, []string{requesterRole.GetName()}) + }, + createAccessRequest: func(ctx context.Context, t *testing.T) string { + req, err := services.NewAccessRequestWithResources(rootUserName, []string{rootRoleName}, []types.ResourceID{ + types.ResourceID{ + ClusterName: pack.Leaf.Cluster.Secrets.SiteName, + Kind: types.KindDatabase, + Name: pack.Leaf.PostgresService.Name, + }, + }) + require.NoError(t, err) + + authServer := pack.Root.Cluster.Process.GetAuthServer() + req, err = authServer.CreateAccessRequestV2(ctx, req, tlsca.Identity{}) + require.NoError(t, err) + + err = authServer.SetAccessRequestState(ctx, types.AccessRequestUpdate{ + RequestID: req.GetName(), + State: types.RequestState_APPROVED, + }) + require.NoError(t, err) + + return req.GetName() + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.prepareRole(ctx, t) + var accessRequestID string + if test.createAccessRequest != nil { + accessRequestID = test.createAccessRequest(ctx, t) + + if accessRequestID == "" { + require.FailNow(t, "createAccessRequest returned empty access request ID") + } + } + + creds, err := helpers.GenerateUserCreds(helpers.UserCredsRequest{ + Process: pack.Root.Cluster.Process, + Username: rootUserName, + }) + require.NoError(t, err) + + tc := mustLogin(t, rootUserName, pack, creds) + + storage, err := clusters.NewStorage(clusters.Config{ + Dir: tc.KeysDir, + InsecureSkipVerify: tc.InsecureSkipVerify, + }) + require.NoError(t, err) + + daemonService, err := daemon.New(daemon.Config{ + Storage: storage, + KubeconfigsDir: t.TempDir(), + AgentsDir: t.TempDir(), + }) + require.NoError(t, err) + t.Cleanup(func() { + daemonService.Stop() + }) + + handler, err := handler.New( + handler.Config{ + DaemonService: daemonService, + }, + ) + require.NoError(t, err) + + if accessRequestID != "" { + _, err := handler.AssumeRole(ctx, &api.AssumeRoleRequest{ + RootClusterUri: test.dbURI.GetRootClusterURI().String(), + AccessRequestIds: []string{accessRequestID}, + }) + require.NoError(t, err) + } + + res, err := handler.ListDatabaseUsers(ctx, &api.ListDatabaseUsersRequest{ + DbUri: test.dbURI.String(), + }) + require.NoError(t, err) + require.Contains(t, res.Users, test.wantDBUser) + }) + } + +} + // mustLogin logs in as the given user by completely skipping the actual login flow and saving valid // certs to disk. clusters.Storage can then be pointed to tc.KeysDir and daemon.Service can act as // if the user was successfully logged in. diff --git a/lib/teleterm/clusters/cluster_databases.go b/lib/teleterm/clusters/cluster_databases.go index 608a2d9f2a782..e6e6a31d59984 100644 --- a/lib/teleterm/clusters/cluster_databases.go +++ b/lib/teleterm/clusters/cluster_databases.go @@ -164,7 +164,7 @@ func (c *Cluster) GetAllowedDatabaseUsers(ctx context.Context, authClient authcl return nil, trace.Wrap(err) } - accessChecker, err := services.NewAccessCheckerForRemoteCluster(ctx, c.status.AccessInfo(), c.status.Cluster, authClient) + accessChecker, err := services.NewAccessCheckerForRemoteCluster(ctx, c.status.AccessInfo(), c.clusterClient.SiteName, authClient) if err != nil { return nil, trace.Wrap(err) }