From d9fd03544738c0b716364eb3c0b9c9337e50a3fc Mon Sep 17 00:00:00 2001 From: df-wg Date: Wed, 5 Feb 2025 15:00:08 +0200 Subject: [PATCH] fix(router): enable redis to work with auth (#1563) --- .github/workflows/router-ci.yaml | 22 +++- docker-compose.yml | 21 ++-- docker/redis/redis-cluster-create.sh | 14 ++- docker/redis/redis-cluster.conf | 5 +- .../automatic_persisted_queries_test.go | 8 +- router-tests/ratelimit_test.go | 106 +++++++++++++++++- router-tests/testenv/testenv.go | 9 ++ .../operationstorage/redis/rdcloser.go | 87 ++++++++++---- .../operationstorage/redis/rdcloser_test.go | 22 +++- router/pkg/config/config.schema.json | 2 +- 10 files changed, 248 insertions(+), 48 deletions(-) diff --git a/.github/workflows/router-ci.yaml b/.github/workflows/router-ci.yaml index 113a5cfdaf..7db9b9c7b5 100644 --- a/.github/workflows/router-ci.yaml +++ b/.github/workflows/router-ci.yaml @@ -159,13 +159,23 @@ jobs: - name: Setup Redis Cluster (for Cluster tests) uses: vishnudxb/redis-cluster@1.0.9 with: - master1-port: 7000 - master2-port: 7001 - master3-port: 7002 - slave1-port: 7003 - slave2-port: 7004 - slave3-port: 7005 + master1-port: 7001 + master2-port: 7002 + master3-port: 7003 + slave1-port: 7004 + slave2-port: 7005 + slave3-port: 7006 sleep-duration: 5 + - name: Configure Redis Authentication & ACL + run: | + sudo apt-get install -y redis-tools + docker ps -a + # Set a password for each master node + for port in 7001 7002 7003; do + redis-cli -h 127.0.0.1 -p $port ACL SETUSER cosmo on ">test" "~*" "+@all" + redis-cli -u "redis://cosmo:test@127.0.0.1:$port" ping + echo "ACL user 'cosmo' created with full access on port $port" + done - uses: nick-fields/retry@v3 with: timeout_minutes: 30 diff --git a/docker-compose.yml b/docker-compose.yml index fb4435c4ed..890604acb6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -243,38 +243,41 @@ services: profiles: - dev -# 3 node minimum for a cluster, per redis documentation - redis-cluster-node-0: + # 3 node minimum for a cluster, per redis documentation + redis-cluster-node-1: image: redis:${DC_REDIS_VERSION:-7.2.4}-alpine command: redis-server /usr/local/etc/redis/redis.conf networks: - primary ports: - - "7000:6379" + - "7001:6379" + - "16371:16379" volumes: - ./docker/redis/redis-cluster.conf:/usr/local/etc/redis/redis.conf profiles: - dev - redis-cluster-node-1: + redis-cluster-node-2: image: redis:${DC_REDIS_VERSION:-7.2.4}-alpine command: redis-server /usr/local/etc/redis/redis.conf networks: - primary ports: - - "7001:6379" + - "7002:6379" + - "16372:16379" volumes: - ./docker/redis/redis-cluster.conf:/usr/local/etc/redis/redis.conf profiles: - dev - redis-cluster-node-2: + redis-cluster-node-3: image: redis:${DC_REDIS_VERSION:-7.2.4}-alpine command: redis-server /usr/local/etc/redis/redis.conf networks: - primary ports: - - "7002:6379" + - "7003:6379" + - "16373:16379" volumes: - ./docker/redis/redis-cluster.conf:/usr/local/etc/redis/redis.conf profiles: @@ -286,9 +289,9 @@ services: networks: - primary depends_on: - - redis-cluster-node-0 - redis-cluster-node-1 - redis-cluster-node-2 + - redis-cluster-node-3 volumes: - ./docker/redis/:/usr/local/etc/redis/ profiles: @@ -331,6 +334,6 @@ volumes: redis: redis-slave: redis-cluster-configure: - redis-cluster-node-0: redis-cluster-node-1: redis-cluster-node-2: + redis-cluster-node-3: diff --git a/docker/redis/redis-cluster-create.sh b/docker/redis/redis-cluster-create.sh index f9ca4fda67..418a55df70 100755 --- a/docker/redis/redis-cluster-create.sh +++ b/docker/redis/redis-cluster-create.sh @@ -1,13 +1,21 @@ # wait for the docker-compose depends_on to spin up the redis nodes usually takes this long sleep 10 -node_0_ip=$(getent hosts redis-cluster-node-0 | awk '{ print $1 }') node_1_ip=$(getent hosts redis-cluster-node-1 | awk '{ print $1 }') node_2_ip=$(getent hosts redis-cluster-node-2 | awk '{ print $1 }') +node_3_ip=$(getent hosts redis-cluster-node-3 | awk '{ print $1 }') +# Set cluster properties dynamically for each node +for ip in $node_1_ip $node_2_ip $node_3_ip; do + echo "Configuring Redis node at $ip" + redis-cli -h $ip -p 6379 CONFIG SET cluster-announce-ip "$ip" +done +# Create the cluster redis-cli --cluster create \ - $node_0_ip:6379 \ $node_1_ip:6379 \ $node_2_ip:6379 \ - --cluster-replicas 0 --cluster-yes \ No newline at end of file + $node_3_ip:6379 \ + --cluster-replicas 0 --cluster-yes + +echo "Redis Cluster setup complete!" \ No newline at end of file diff --git a/docker/redis/redis-cluster.conf b/docker/redis/redis-cluster.conf index 53d443eb7c..96d74f0626 100644 --- a/docker/redis/redis-cluster.conf +++ b/docker/redis/redis-cluster.conf @@ -6,4 +6,7 @@ protected-mode no maxmemory 100mb maxmemory-policy noeviction save 60 1 -appendonly no \ No newline at end of file +appendonly no +cluster-announce-port 6379 +cluster-announce-bus-port 16379 +user cosmo on >test ~* &* +@all diff --git a/router-tests/automatic_persisted_queries_test.go b/router-tests/automatic_persisted_queries_test.go index 0f7717e65e..32ea032594 100644 --- a/router-tests/automatic_persisted_queries_test.go +++ b/router-tests/automatic_persisted_queries_test.go @@ -163,6 +163,7 @@ func TestAutomaticPersistedQueries(t *testing.T) { var ( redisLocalUrl = "localhost:6379" redisUrl = fmt.Sprintf("redis://%s", redisLocalUrl) + redisUser = "cosmo" redisPassword = "test" client = redis.NewClient(&redis.Options{Addr: redisLocalUrl, Password: redisPassword}) ) @@ -366,10 +367,11 @@ func TestAutomaticPersistedQueries(t *testing.T) { t.Run("works with cluster mode", func(t *testing.T) { t.Parallel() - - clusterUrls := []string{"localhost:7000", "localhost:7001"} + clusterUrls := []string{"redis://cosmo:test@localhost:7002", "redis://cosmo:test@localhost:7001"} + noSchemeClusterUrls := []string{"localhost:7002", "localhost:7001"} clusterClient := redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: clusterUrls, + Addrs: noSchemeClusterUrls, + Username: redisUser, Password: redisPassword, }) diff --git a/router-tests/ratelimit_test.go b/router-tests/ratelimit_test.go index 2abaa2860a..46883386cf 100644 --- a/router-tests/ratelimit_test.go +++ b/router-tests/ratelimit_test.go @@ -675,16 +675,112 @@ func TestRateLimit(t *testing.T) { }) t.Run("Cluster Mode", func(t *testing.T) { var ( - clusterUrlSlice = []string{"localhost:7000", "localhost:7001", "localhost:7002"} - password = "test" + clusterUrlSlice = []string{"redis://cosmo:test@localhost:7001", "redis://cosmo:test@localhost:7002", "redis://cosmo:test@localhost:7003"} + noSchemeClusterUrls = []string{"localhost:7001", "localhost:7002", "localhost:7003"} + user = "cosmo" + password = "test" ) + t.Run("correctly parses url options and authentication", func(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + clusterUrlSlice []string + }{ + { + name: "should successfully use auth from first url", + clusterUrlSlice: []string{"redis://cosmo:test@localhost:7003", "redis://cosmo1:test1@localhost:7001", "redis://cosmo2:test2@localhost:7002"}, + }, + { + name: "should successfully use auth from later url if no auth in first urls", + clusterUrlSlice: []string{"redis://localhost:7003", "rediss://localhost:7001", "rediss://cosmo:test@localhost:7002"}, + }, + { + name: "should successfully work with two urls", + clusterUrlSlice: []string{"redis://cosmo:test@localhost:7002", "rediss://localhost:7001"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + key := uuid.New().String() + t.Cleanup(func() { + client := redis.NewClusterClient(&redis.ClusterOptions{Addrs: noSchemeClusterUrls, Username: user, Password: password}) + del := client.Del(context.Background(), key) + require.NoError(t, del.Err()) + }) + + testenv.Run(t, &testenv.Config{ + RouterOptions: []core.Option{ + core.WithRateLimitConfig(&config.RateLimitConfiguration{ + Enabled: true, + Strategy: "simple", + SimpleStrategy: config.RateLimitSimpleStrategy{ + Rate: 1, + Burst: 1, + Period: time.Second * 2, + RejectExceedingRequests: false, + }, + Storage: config.RedisConfiguration{ + ClusterEnabled: true, + URLs: tt.clusterUrlSlice, + KeyPrefix: key, + }, + Debug: true, + }), + }, + }, func(t *testing.T, xEnv *testenv.Environment) { + res := xEnv.MakeGraphQLRequestOK(testenv.GraphQLRequest{ + Query: `query ($n:Int!) { employee(id:$n) { id details { forename surname } } }`, + Variables: json.RawMessage(`{"n":1}`), + }) + require.Equal(t, fmt.Sprintf(`{"data":{"employee":{"id":1,"details":{"forename":"Jens","surname":"Neuse"}}},"extensions":{"rateLimit":{"key":"%s","requestRate":1,"remaining":0,"retryAfterMs":1234,"resetAfterMs":1234}}}`, key), res.Body) + }) + }) + } + + t.Run("should fail with bad auth", func(t *testing.T) { + t.Parallel() + clusterUrlSlice = []string{"redis://cosmo1:test1@localhost:7001", "redis://cosmo:test@localhost:7003", "redis://cosmo2:test2@localhost:7002"} + + key := uuid.New().String() + t.Cleanup(func() { + client := redis.NewClusterClient(&redis.ClusterOptions{Addrs: noSchemeClusterUrls, Username: user, Password: password}) + del := client.Del(context.Background(), key) + require.NoError(t, del.Err()) + }) + testenv.FailsOnStartup(t, &testenv.Config{ + RouterOptions: []core.Option{ + core.WithRateLimitConfig(&config.RateLimitConfiguration{ + Enabled: true, + Strategy: "simple", + SimpleStrategy: config.RateLimitSimpleStrategy{ + Rate: 1, + Burst: 1, + Period: time.Second * 2, + RejectExceedingRequests: false, + }, + Storage: config.RedisConfiguration{ + ClusterEnabled: true, + URLs: clusterUrlSlice, + KeyPrefix: key, + }, + Debug: true, + }), + }, + }, func(t *testing.T, err error) { + require.Contains(t, err.Error(), "failed to create a functioning redis client") + }) + }) + }) t.Run("enabled - below limit", func(t *testing.T) { t.Parallel() key := uuid.New().String() t.Cleanup(func() { - client := redis.NewClusterClient(&redis.ClusterOptions{Addrs: clusterUrlSlice, Password: password}) + client := redis.NewClusterClient(&redis.ClusterOptions{Addrs: noSchemeClusterUrls, Username: user, Password: password}) del := client.Del(context.Background(), key) require.NoError(t, del.Err()) }) @@ -720,7 +816,7 @@ func TestRateLimit(t *testing.T) { key := uuid.New().String() t.Cleanup(func() { - client := redis.NewClusterClient(&redis.ClusterOptions{Addrs: clusterUrlSlice, Password: password}) + client := redis.NewClusterClient(&redis.ClusterOptions{Addrs: noSchemeClusterUrls, Username: user, Password: password}) del := client.Del(context.Background(), fmt.Sprintf("%s:localhost", key)) require.NoError(t, del.Err()) }) @@ -760,7 +856,7 @@ func TestRateLimit(t *testing.T) { key := uuid.New().String() t.Cleanup(func() { - client := redis.NewClusterClient(&redis.ClusterOptions{Addrs: clusterUrlSlice, Password: password}) + client := redis.NewClusterClient(&redis.ClusterOptions{Addrs: noSchemeClusterUrls, Username: user, Password: password}) del := client.Del(context.Background(), key) require.NoError(t, del.Err()) }) diff --git a/router-tests/testenv/testenv.go b/router-tests/testenv/testenv.go index d689907d5f..26ad12010b 100644 --- a/router-tests/testenv/testenv.go +++ b/router-tests/testenv/testenv.go @@ -106,6 +106,15 @@ func Run(t *testing.T, cfg *Config, f func(t *testing.T, xEnv *Environment)) { } } +// FailsOnStartup runs the test and ensures that the router fails during bootstrapping +func FailsOnStartup(t *testing.T, cfg *Config, f func(t *testing.T, err error)) { + t.Helper() + env, err := createTestEnv(t, cfg) + require.Error(t, err) + require.Nil(t, env) + f(t, err) +} + // RunWithError runs the test but returns an error instead of failing the test // Useful when you want to assert errors during router bootstrapping func RunWithError(t *testing.T, cfg *Config, f func(t *testing.T, xEnv *Environment)) error { diff --git a/router/internal/persistedoperation/operationstorage/redis/rdcloser.go b/router/internal/persistedoperation/operationstorage/redis/rdcloser.go index e7029d6e61..46fba0f7c0 100644 --- a/router/internal/persistedoperation/operationstorage/redis/rdcloser.go +++ b/router/internal/persistedoperation/operationstorage/redis/rdcloser.go @@ -5,15 +5,14 @@ import ( "fmt" "github.com/redis/go-redis/v9" "go.uber.org/zap" - "io" + "net/url" "strings" ) // RDCloser is an interface that combines the redis.Cmdable and io.Closer interfaces, ensuring that we can close the // client connection. type RDCloser interface { - redis.Cmdable - io.Closer + redis.UniversalClient } type RedisCloserOptions struct { @@ -29,44 +28,94 @@ func NewRedisCloser(opts *RedisCloserOptions) (RDCloser, error) { } var rdb RDCloser - // If provided, prefer cluster URLs to single URL + // If provided, we create a cluster client if opts.ClusterEnabled { opts.Logger.Info("Detected that redis is running in cluster mode.") - strippedUrls := []string{} - for _, url := range opts.URLs { - strippedUrls = append(strippedUrls, strings.ReplaceAll(url, "redis://", "")) + + // Parse the first URL to get the cluster options. We assume that the first URL provided is the primary URL + // and append further URLs as secondary addr params to the URL, as required by the go-redis library. + // e.g. redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 + parsedUrl, err := url.Parse(opts.URLs[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse the redis connection url: %w", err) } - rdb = redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: strippedUrls, - Password: opts.Password, - }) + + // This operates on the URL query, and if there are more urls, appends them to the addr param + addClusterUrlsToQuery(opts, parsedUrl) + // Parse the cluster URL using the library method, to pick up all of the options encoded + clusterOps, err := redis.ParseClusterURL(parsedUrl.String()) + + if err != nil { + return nil, fmt.Errorf("failed to parse the redis connection url into ops: %w", err) + } + if opts.Password != "" { + // If they explicitly provide a password, assume that it's overwriting the URL password or that none was + // provided in the URL + clusterOps.Password = opts.Password + } + + rdb = redis.NewClusterClient(clusterOps) } else { - options, err := redis.ParseURL(opts.URLs[0]) + urlEncodedOpts, err := redis.ParseURL(opts.URLs[0]) if err != nil { return nil, fmt.Errorf("failed to parse the redis connection url: %w", err) } - options.Password = opts.Password - rdb = redis.NewClient(options) + if opts.Password != "" { + // If they explicitly provide a password, assume that it's overwriting the URL password or that none was + // provided in the URL + urlEncodedOpts.Password = opts.Password + } + rdb = redis.NewClient(urlEncodedOpts) if isClusterClient(rdb) { opts.Logger.Warn("Detected that redis is running in cluster mode. You may encounter issues as a result") } } - if !IsFunctioningClient(rdb) { - return rdb, fmt.Errorf("failed to create a functioning redis client") + if isFunctioning, err := IsFunctioningClient(rdb); !isFunctioning { + return rdb, fmt.Errorf("failed to create a functioning redis client with the provided URLs: %w", err) } return rdb, nil } -func IsFunctioningClient(rdb RDCloser) bool { +// addClusterUrlsToQuery iterates over all the provided URLs, after the first one, and ensures that they are +// appended to the URL as required by the go-redis library. +// e.g. redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 +func addClusterUrlsToQuery(opts *RedisCloserOptions, parsedUrl *url.URL) { + if len(opts.URLs) <= 1 { + return + } + + queryVals := parsedUrl.Query() + for _, rawURL := range opts.URLs[1:] { + secondaryURL, parseErr := url.Parse(rawURL) + if parseErr != nil { + opts.Logger.Warn(fmt.Sprintf("Skipping invalid Redis URL %q: %v", rawURL, parseErr)) + continue + } + + // Strip schema, username, and password + addr := secondaryURL.Host + if secondaryURL.User != nil && parsedUrl.User != nil && parsedUrl.User.String() != "" && secondaryURL.User.Username() != parsedUrl.User.Username() { + opts.Logger.Warn("Stripping credentials from secondary Redis address", zap.String("address", addr)) + } + if secondaryURL.Scheme != parsedUrl.Scheme { + opts.Logger.Warn("Mismatched Redis schemes provided", zap.String("firstScheme", parsedUrl.Scheme), zap.String("secondScheme", secondaryURL.Scheme)) + } + + queryVals.Add("addr", addr) + } + parsedUrl.RawQuery = queryVals.Encode() +} + +func IsFunctioningClient(rdb RDCloser) (bool, error) { if rdb == nil { - return false + return false, nil } res, err := rdb.Ping(context.Background()).Result() - return err == nil && res == "PONG" + return err == nil && res == "PONG", err } func isClusterClient(rdb RDCloser) bool { diff --git a/router/internal/persistedoperation/operationstorage/redis/rdcloser_test.go b/router/internal/persistedoperation/operationstorage/redis/rdcloser_test.go index 3390f5ae5d..6bd4cbd807 100644 --- a/router/internal/persistedoperation/operationstorage/redis/rdcloser_test.go +++ b/router/internal/persistedoperation/operationstorage/redis/rdcloser_test.go @@ -30,7 +30,27 @@ func TestRedisCloser(t *testing.T) { require.NoError(t, err) require.NotNil(t, cl) - require.True(t, IsFunctioningClient(cl)) + isFunctioning, err := IsFunctioningClient(cl) + require.True(t, isFunctioning) + require.NoError(t, err) + require.False(t, isClusterClient(cl)) + }) + + t.Run("Works with auth", func(t *testing.T) { + mr := miniredis.RunT(t) + mr.RequireUserAuth("user", "pass") + + authUrl := fmt.Sprintf("redis://user:pass@%s", mr.Addr()) + cl, err := NewRedisCloser(&RedisCloserOptions{ + Logger: zaptest.NewLogger(t), + URLs: []string{authUrl}, + }) + + require.NoError(t, err) + require.NotNil(t, cl) + isFunctioning, err := IsFunctioningClient(cl) + require.True(t, isFunctioning) + require.NoError(t, err) require.False(t, isClusterClient(cl)) }) diff --git a/router/pkg/config/config.schema.json b/router/pkg/config/config.schema.json index fe965241c9..d9763c301c 100644 --- a/router/pkg/config/config.schema.json +++ b/router/pkg/config/config.schema.json @@ -1546,7 +1546,7 @@ }, "urls": { "type": "array", - "description": "The Redis connection URLs. The values are specified as a string with the format 'scheme://host:port'. If cluster is enabled, will use them to instantiate a cluster connection. f", + "description": "The Redis connection URLs. The values are specified as a string with the format 'scheme://host:port', with optional auth and options added in to the URL. If cluster is enabled, will use them to instantiate a cluster connection.", "default": [], "items": { "type": "string"