Skip to content

Commit

Permalink
Fix listing db users for leaf databases obtained through access reque…
Browse files Browse the repository at this point in the history
…sts (#42681)
  • Loading branch information
ravicious authored Jun 10, 2024
1 parent d7491dc commit 291ba34
Show file tree
Hide file tree
Showing 2 changed files with 220 additions and 1 deletion.
219 changes: 219 additions & 0 deletions integration/teleterm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,15 @@ import (
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/modules"
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/service/servicecfg"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/teleterm/api/uri"
"github.com/gravitational/teleport/lib/teleterm/apiserver/handler"
"github.com/gravitational/teleport/lib/teleterm/clusters"
"github.com/gravitational/teleport/lib/teleterm/daemon"
"github.com/gravitational/teleport/lib/tlsca"
libutils "github.com/gravitational/teleport/lib/utils"
)

Expand Down Expand Up @@ -111,6 +113,15 @@ func TestTeleterm(t *testing.T) {
t.Parallel()
testDeleteConnectMyComputerNode(t, pack)
})

t.Run("ListDatabaseUsers", func(t *testing.T) {
// ListDatabaseUsers cannot be run in parallel as it modifies the default roles of users set up
// through the test pack.
// TODO(ravicious): After some optimizations, those tests could run in parallel. Instead of
// modifying existing roles, they could create new users with new roles and then update the role
// mapping between the root the leaf cluster through authServer.UpdateUserCARoleMap.
testListDatabaseUsers(t, pack)
})
}

func testAddingRootCluster(t *testing.T, pack *dbhelpers.DatabasePack, creds *helpers.UserCreds) {
Expand Down Expand Up @@ -832,6 +843,214 @@ func testDeleteConnectMyComputerNode(t *testing.T, pack *dbhelpers.DatabasePack)
}, time.Minute, time.Second, "waiting for node to be deleted")
}

// testListDatabaseUsers adds a unique string under spec.allow.db_users of the role automatically
// given to a user by [dbhelpers.DatabasePack] and then checks if that string is returned when
// calling [handler.Handler.ListDatabaseUsers].
func testListDatabaseUsers(t *testing.T, pack *dbhelpers.DatabasePack) {
ctx := context.Background()

mustAddDBUserToUserRole := func(ctx context.Context, t *testing.T, cluster *helpers.TeleInstance, user, dbUser string) {
t.Helper()
authServer := cluster.Process.GetAuthServer()
roleName := services.RoleNameForUser(user)
role, err := authServer.GetRole(ctx, roleName)
require.NoError(t, err)

dbUsers := role.GetDatabaseUsers(types.Allow)
dbUsers = append(dbUsers, dbUser)
role.SetDatabaseUsers(types.Allow, dbUsers)
err = authServer.UpsertRole(ctx, role)
require.NoError(t, err)

require.EventuallyWithT(t, func(collect *assert.CollectT) {
role, err := authServer.GetRole(ctx, roleName)
if assert.NoError(collect, err) {
assert.Equal(collect, dbUsers, role.GetDatabaseUsers(types.Allow))
}
}, 10*time.Second, 100*time.Millisecond)
}

mustUpdateUserRoles := func(ctx context.Context, t *testing.T, cluster *helpers.TeleInstance, userName string, roles []string) {
t.Helper()
authServer := cluster.Process.GetAuthServer()
user, err := authServer.GetUser(userName, false /* withSecrets */)
require.NoError(t, err)

user.SetRoles(roles)
err = authServer.UpdateUser(ctx, user)
require.NoError(t, err)

require.EventuallyWithT(t, func(collect *assert.CollectT) {
user, err := authServer.GetUser(userName, false /* withSecrets */)
if assert.NoError(collect, err) {
assert.Equal(collect, roles, user.GetRoles())
}
}, 10*time.Second, 100*time.Millisecond)
}

// Allow resource access requests to be created.
currentModules := modules.GetModules()
t.Cleanup(func() { modules.SetModules(currentModules) })
modules.SetModules(&modules.TestModules{TestBuildType: modules.BuildEnterprise})

rootClusterName, _, err := net.SplitHostPort(pack.Root.Cluster.Web)
require.NoError(t, err)
rootDatabaseURI := uri.NewClusterURI(rootClusterName).AppendDB(pack.Root.PostgresService.Name)
leafDatabaseURI := uri.NewClusterURI(rootClusterName).AppendLeafCluster(pack.Leaf.Cluster.Secrets.SiteName).AppendDB(pack.Leaf.PostgresService.Name)

rootDBUser := fmt.Sprintf("root-db-user-%s", uuid.NewString())
leafDBUser := fmt.Sprintf("leaf-db-user-%s", uuid.NewString())
leafDBUserWithAccessRequest := fmt.Sprintf("leaf-db-user-with-access-request-%s", uuid.NewString())

rootUserName := pack.Root.User.GetName()
leafUserName := pack.Leaf.User.GetName()
rootRoleName := services.RoleNameForUser(rootUserName)

tests := []struct {
name string
dbURI uri.ResourceURI
wantDBUser string
prepareRole func(ctx context.Context, t *testing.T)
createAccessRequest func(ctx context.Context, t *testing.T) string
}{
{
name: "root cluster",
dbURI: rootDatabaseURI,
wantDBUser: rootDBUser,
prepareRole: func(ctx context.Context, t *testing.T) {
mustAddDBUserToUserRole(ctx, t, pack.Root.Cluster, rootUserName, rootDBUser)
},
},
{
name: "leaf cluster",
dbURI: leafDatabaseURI,
wantDBUser: leafDBUser,
prepareRole: func(ctx context.Context, t *testing.T) {
mustAddDBUserToUserRole(ctx, t, pack.Leaf.Cluster, leafUserName, leafDBUser)
},
},
{
name: "leaf cluster with resource access request",
dbURI: leafDatabaseURI,
wantDBUser: leafDBUserWithAccessRequest,
// Remove role from root-user and move it to search_as_roles.
//
// root-user has access to leafDatabaseURI through the user:root-user role which gets mapped
// to a corresponding leaf cluster role.
// We want to create a resource access request for that database. To do this, we need to
// create a new role which lets root-user request the database.
prepareRole: func(ctx context.Context, t *testing.T) {
mustAddDBUserToUserRole(ctx, t, pack.Leaf.Cluster, leafUserName, leafDBUserWithAccessRequest)

authServer := pack.Root.Cluster.Process.GetAuthServer()

// Create new role that lets root-user request the database.
requesterRole, err := types.NewRole(fmt.Sprintf("requester-%s", uuid.NewString()), types.RoleSpecV6{
Allow: types.RoleConditions{
Request: &types.AccessRequestConditions{
SearchAsRoles: []string{rootRoleName},
},
},
})
require.NoError(t, err)
err = authServer.CreateRole(ctx, requesterRole)
require.NoError(t, err)

user, err := authServer.GetUser(rootUserName, false /* withSecrets */)
require.NoError(t, err)

// Delete rootRoleName from roles, add requester role. Restore original role set after test
// is done.
currentRoles := user.GetRoles()
t.Cleanup(func() { mustUpdateUserRoles(ctx, t, pack.Root.Cluster, rootUserName, currentRoles) })
mustUpdateUserRoles(ctx, t, pack.Root.Cluster, rootUserName, []string{requesterRole.GetName()})
},
createAccessRequest: func(ctx context.Context, t *testing.T) string {
req, err := services.NewAccessRequestWithResources(rootUserName, []string{rootRoleName}, []types.ResourceID{
types.ResourceID{
ClusterName: pack.Leaf.Cluster.Secrets.SiteName,
Kind: types.KindDatabase,
Name: pack.Leaf.PostgresService.Name,
},
})
require.NoError(t, err)

authServer := pack.Root.Cluster.Process.GetAuthServer()
req, err = authServer.CreateAccessRequestV2(ctx, req, tlsca.Identity{})
require.NoError(t, err)

err = authServer.SetAccessRequestState(ctx, types.AccessRequestUpdate{
RequestID: req.GetName(),
State: types.RequestState_APPROVED,
})
require.NoError(t, err)

return req.GetName()
},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.prepareRole(ctx, t)
var accessRequestID string
if test.createAccessRequest != nil {
accessRequestID = test.createAccessRequest(ctx, t)

if accessRequestID == "" {
require.FailNow(t, "createAccessRequest returned empty access request ID")
}
}

creds, err := helpers.GenerateUserCreds(helpers.UserCredsRequest{
Process: pack.Root.Cluster.Process,
Username: rootUserName,
})
require.NoError(t, err)

tc := mustLogin(t, rootUserName, pack, creds)

storage, err := clusters.NewStorage(clusters.Config{
Dir: tc.KeysDir,
InsecureSkipVerify: tc.InsecureSkipVerify,
})
require.NoError(t, err)

daemonService, err := daemon.New(daemon.Config{
Storage: storage,
KubeconfigsDir: t.TempDir(),
AgentsDir: t.TempDir(),
})
require.NoError(t, err)
t.Cleanup(func() {
daemonService.Stop()
})

handler, err := handler.New(
handler.Config{
DaemonService: daemonService,
},
)
require.NoError(t, err)

if accessRequestID != "" {
_, err := handler.AssumeRole(ctx, &api.AssumeRoleRequest{
RootClusterUri: test.dbURI.GetRootClusterURI().String(),
AccessRequestIds: []string{accessRequestID},
})
require.NoError(t, err)
}

res, err := handler.ListDatabaseUsers(ctx, &api.ListDatabaseUsersRequest{
DbUri: test.dbURI.String(),
})
require.NoError(t, err)
require.Contains(t, res.Users, test.wantDBUser)
})
}

}

// mustLogin logs in as the given user by completely skipping the actual login flow and saving valid
// certs to disk. clusters.Storage can then be pointed to tc.KeysDir and daemon.Service can act as
// if the user was successfully logged in.
Expand Down
2 changes: 1 addition & 1 deletion lib/teleterm/clusters/cluster_databases.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ func (c *Cluster) GetAllowedDatabaseUsers(ctx context.Context, dbURI string) ([]
}
defer authClient.Close()

accessChecker, err := services.NewAccessCheckerForRemoteCluster(ctx, c.status.AccessInfo(), c.status.Cluster, authClient)
accessChecker, err := services.NewAccessCheckerForRemoteCluster(ctx, c.status.AccessInfo(), c.clusterClient.SiteName, authClient)
if err != nil {
return nil, trace.Wrap(err)
}
Expand Down

0 comments on commit 291ba34

Please sign in to comment.