diff --git a/.golangci.yaml b/.golangci.yaml index 4f25a5020c..487e0c8dfc 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -7,6 +7,7 @@ linters: enable: - errcheck - forbidigo + - unused issues: exclude-rules: diff --git a/cmd/404-server-with-metrics/server-with-metrics.go b/cmd/404-server-with-metrics/server-with-metrics.go index f2f94507de..e9206ff1d5 100644 --- a/cmd/404-server-with-metrics/server-with-metrics.go +++ b/cmd/404-server-with-metrics/server-with-metrics.go @@ -39,7 +39,6 @@ import ( var ( port = flag.Int("port", 8080, "Port number to serve default backend 404 page.") metricsPort = flag.Int("metricsPort", 8081, "Port number to serve metrics for the default backend 404 page.") - serverTimeout = flag.Duration("timeout", 5*time.Second, "Time in seconds to wait before forcefully terminating the server.") readTimeout = flag.Duration("read_timeout", 10*time.Second, "Time in seconds to read the entire request before timing out.") readHeaderTimeout = flag.Duration("read_header_timeout", 10*time.Second, "Time in seconds to read the request header before timing out.") writeTimeout = flag.Duration("write_timeout", 10*time.Second, "Time in seconds to write response before timing out.") diff --git a/cmd/check-gke-ingress/app/ingress/ingress.go b/cmd/check-gke-ingress/app/ingress/ingress.go index 6f1d77ec2a..763be17dd8 100644 --- a/cmd/check-gke-ingress/app/ingress/ingress.go +++ b/cmd/check-gke-ingress/app/ingress/ingress.go @@ -20,8 +20,6 @@ import ( "context" "fmt" "os" - "reflect" - "runtime" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -84,7 +82,6 @@ func RunChecks(ingresses []networkingv1.Ingress, client kubernetes.Interface, be Checks: []*report.Check{}, } ingressChecker := &IngressChecker{ - client: client, ingress: &ingress, } @@ -177,7 +174,3 @@ func addCheckResult(ingressRes *report.Resource, checkName, msg, res string) { Result: res, }) } - -func getCheckName(check func()) string { - return runtime.FuncForPC(reflect.ValueOf(check).Pointer()).Name() -} diff --git a/cmd/check-gke-ingress/app/ingress/rule.go b/cmd/check-gke-ingress/app/ingress/rule.go index 792a767778..74a48b3ce4 100644 --- a/cmd/check-gke-ingress/app/ingress/rule.go +++ b/cmd/check-gke-ingress/app/ingress/rule.go @@ -29,7 +29,6 @@ import ( "k8s.io/ingress-gce/cmd/check-gke-ingress/app/report" "k8s.io/ingress-gce/pkg/annotations" beconfigv1 "k8s.io/ingress-gce/pkg/apis/backendconfig/v1" - feconfigv1beta1 "k8s.io/ingress-gce/pkg/apis/frontendconfig/v1beta1" beconfigclient "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned" feconfigclient "k8s.io/ingress-gce/pkg/frontendconfig/client/clientset/versioned" ) @@ -48,8 +47,6 @@ const ( ) type IngressChecker struct { - // Kubernetes client - client clientset.Interface // Ingress object to be checked ingress *networkingv1.Ingress } @@ -89,8 +86,6 @@ type FrontendConfigChecker struct { namespace string // Name of the frontendConfig name string - // FrontendConfig object to be checked - feConfig *feconfigv1beta1.FrontendConfig } type ingressCheckFunc func(c *IngressChecker) (string, string, string) @@ -250,14 +245,13 @@ func CheckHealthCheckTimeout(c *BackendConfigChecker) (string, string, string) { // CheckFrontendConfigExistence checks whether a FrontendConfig exists. func CheckFrontendConfigExistence(c *FrontendConfigChecker) (string, string, string) { - feConfig, err := c.client.NetworkingV1beta1().FrontendConfigs(c.namespace).Get(context.TODO(), c.name, metav1.GetOptions{}) + _, err := c.client.NetworkingV1beta1().FrontendConfigs(c.namespace).Get(context.TODO(), c.name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return FrontendConfigExistenceCheck, report.Failed, fmt.Sprintf("FrontendConfig %s/%s does not exist", c.namespace, c.name) } return FrontendConfigExistenceCheck, report.Failed, fmt.Sprintf("Failed to get frontendConfig %s/%s", c.namespace, c.name) } - c.feConfig = feConfig return FrontendConfigExistenceCheck, report.Passed, fmt.Sprintf("FrontendConfig %s/%s found", c.namespace, c.name) } diff --git a/cmd/e2e-test/upgrade/psc.go b/cmd/e2e-test/upgrade/psc.go index 7f02271c5f..cd4a4e6e51 100644 --- a/cmd/e2e-test/upgrade/psc.go +++ b/cmd/e2e-test/upgrade/psc.go @@ -34,7 +34,6 @@ type PSC struct { t *testing.T s *e2e.Sandbox framework *e2e.Framework - gceSAURL string } func NewPSCUpgradeTest() e2e.UpgradeTest { diff --git a/cmd/fuzzer/app/validate.go b/cmd/fuzzer/app/validate.go index 9910f2e1e6..2ca468f517 100644 --- a/cmd/fuzzer/app/validate.go +++ b/cmd/fuzzer/app/validate.go @@ -29,7 +29,6 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - backendconfig "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned" "k8s.io/ingress-gce/pkg/e2e" "k8s.io/ingress-gce/pkg/fuzz" "k8s.io/ingress-gce/pkg/fuzz/features" @@ -177,11 +176,3 @@ func k8sClientSet(config *rest.Config) *kubernetes.Clientset { } return clientset } - -func backendConfigClientset(config *rest.Config) *backendconfig.Clientset { - clientset, err := backendconfig.NewForConfig(config) - if err != nil { - panic(err.Error()) - } - return clientset -} diff --git a/pkg/backends/integration_test.go b/pkg/backends/integration_test.go index 61c772c557..13639d4ef8 100644 --- a/pkg/backends/integration_test.go +++ b/pkg/backends/integration_test.go @@ -41,7 +41,6 @@ type Jig struct { fakeInstancePool instancegroups.Manager linker Linker syncer Syncer - pool Pool } func newTestJig(fakeGCE *gce.Cloud) *Jig { @@ -78,7 +77,6 @@ func newTestJig(fakeGCE *gce.Cloud) *Jig { fakeInstancePool: fakeInstancePool, linker: NewInstanceGroupLinker(fakeInstancePool, fakeBackendPool, klog.TODO()), syncer: NewBackendSyncer(fakeBackendPool, fakeHealthChecks, fakeGCE), - pool: fakeBackendPool, } } diff --git a/pkg/composite/metrics/metrics.go b/pkg/composite/metrics/metrics.go index aa80469a3d..e213cdaec3 100644 --- a/pkg/composite/metrics/metrics.go +++ b/pkg/composite/metrics/metrics.go @@ -22,13 +22,6 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -const ( - // Version strings for recording metrics. - computeV1Version = "v1" - computeAlphaVersion = "alpha" - computeBetaVersion = "beta" -) - type apiCallMetrics struct { latency *prometheus.HistogramVec errors *prometheus.CounterVec diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 2b8052ec2e..0e97608c37 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -63,8 +63,6 @@ import ( type LoadBalancerController struct { ctx *context.ControllerContext - nodeLister cache.Indexer - // TODO: Watch secrets ingQueue utils.TaskQueue Translator *legacytranslator.Translator @@ -101,9 +99,6 @@ type LoadBalancerController struct { // Ingress usage metrics. metrics metrics.IngressMetricsCollector - ingClassLister cache.Indexer - ingParamsLister cache.Indexer - ZoneGetter *zonegetter.ZoneGetter logger klog.Logger @@ -132,7 +127,6 @@ func NewLoadBalancerController( lbc := LoadBalancerController{ ctx: ctx, - nodeLister: ctx.NodeInformer.GetIndexer(), Translator: ctx.Translator, stopCh: stopCh, hasSynced: ctx.HasSynced, @@ -146,11 +140,6 @@ func NewLoadBalancerController( logger: logger, } - if ctx.IngClassInformer != nil { - lbc.ingClassLister = ctx.IngClassInformer.GetIndexer() - lbc.ingParamsLister = ctx.IngParamsInformer.GetIndexer() - } - lbc.ingSyncer = ingsync.NewIngressSyncer(&lbc, logger) lbc.ingQueue = utils.NewPeriodicTaskQueueWithMultipleWorkers("ingress", "ingresses", flags.F.NumIngressWorkers, lbc.sync, logger) lbc.backendSyncer.Init(lbc.Translator) diff --git a/pkg/controller/translator/translator.go b/pkg/controller/translator/translator.go index a8ece8b6ce..f07e79b828 100644 --- a/pkg/controller/translator/translator.go +++ b/pkg/controller/translator/translator.go @@ -86,7 +86,6 @@ func NewTranslator(serviceInformer cache.SharedIndexInformer, EndpointSliceInformer: endpointSliceInformer, KubeClient: kubeClient, enableTHC: enableTHC, - recorderGetter: recorderGetter, enableL7XLBRegional: enableL7XLBRegional, logger: logger.WithName("Translator"), } @@ -100,7 +99,6 @@ type Translator struct { PodInformer cache.SharedIndexInformer EndpointSliceInformer cache.SharedIndexInformer KubeClient kubernetes.Interface - recorderGetter healthchecks.RecorderGetter enableTHC bool enableL7XLBRegional bool diff --git a/pkg/controller/translator/translator_test.go b/pkg/controller/translator/translator_test.go index 179027cb4f..8c8235353c 100644 --- a/pkg/controller/translator/translator_test.go +++ b/pkg/controller/translator/translator_test.go @@ -19,12 +19,13 @@ package translator import ( "encoding/json" "fmt" - "k8s.io/klog/v2" "os" "reflect" "testing" "time" + "k8s.io/klog/v2" + "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/kr/pretty" @@ -1136,14 +1137,6 @@ func gceURLMapFromFile(t *testing.T, filename string) *utils.GCEURLMap { return v } -func int64ToMap(l []int64) map[int64]bool { - ret := map[int64]bool{} - for _, i := range l { - ret[i] = true - } - return ret -} - func TestSetTrafficScaling(t *testing.T) { // No t.Parallel() @@ -1383,7 +1376,6 @@ func TestSetThcOptInOnSvc(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { translator := fakeTranslator() - translator.recorderGetter = healthchecks.NewFakeRecorderGetter(0) translator.enableTHC = tc.enableTHC sp := *tc.sp diff --git a/pkg/controller/utils_test.go b/pkg/controller/utils_test.go index cea39dfa29..9885ae2e14 100644 --- a/pkg/controller/utils_test.go +++ b/pkg/controller/utils_test.go @@ -19,11 +19,9 @@ package controller import ( "reflect" "testing" - "time" "google.golang.org/api/compute/v1" - api_v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" @@ -35,10 +33,6 @@ import ( "k8s.io/ingress-gce/pkg/utils/zonegetter" ) -// Pods created in loops start from this time, for routines that -// sort on timestamp. -var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC) - func TestZoneListing(t *testing.T) { lbc := newLoadBalancerController() zoneToNode := map[string][]string{ @@ -65,41 +59,6 @@ func TestZoneListing(t *testing.T) { } } -/* -* TODO(rramkumar): Move to pkg/instances in another PR -func TestInstancesAddedToZones(t *testing.T) { - lbc := newLoadBalancerController() - zoneToNode := map[string][]string{ - "zone-1": {"n1", "n2"}, - "zone-2": {"n3"}, - } - addNodes(lbc, zoneToNode) - - // Create 2 igs, one per zone. - testIG := "test-ig" - lbc.instancePool.EnsureInstanceGroupsAndPorts(testIG, []int64{int64(3001)}) - - // node pool syncs kube-nodes, this will add them to both igs. - lbc.instancePool.Sync([]string{"n1", "n2", "n3"}) - gotZonesToNode := lbc.instancePool.GetInstancesByZone() - - for z, nodeNames := range zoneToNode { - if ig, err := lbc.instancePool.GetInstanceGroup(testIG, z); err != nil { - t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err) - } - expNodes := sets.NewString(nodeNames...) - gotNodes := sets.NewString(gotZonesToNode[z]...) - if !gotNodes.Equal(expNodes) { - t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes) - } - } -} -*/ - -func getProbePath(p *api_v1.Probe) string { - return p.ProbeHandler.HTTPGet.Path -} - func TestAddInstanceGroupsAnnotation(t *testing.T) { testCases := []struct { Igs []*compute.InstanceGroup diff --git a/pkg/crd/meta.go b/pkg/crd/meta.go index 15bef72937..97a8de9706 100644 --- a/pkg/crd/meta.go +++ b/pkg/crd/meta.go @@ -31,8 +31,6 @@ type CRDMeta struct { singular string plural string shortNames []string - typeSource string - fn common.GetOpenAPIDefinitions } // NewCRDMeta creates a CRDMeta type which can be passed to a CRDHandler in diff --git a/pkg/experimental/workload/daemon/provider/gce/vm.go b/pkg/experimental/workload/daemon/provider/gce/vm.go index d4a787d9bb..8d3dbd554d 100644 --- a/pkg/experimental/workload/daemon/provider/gce/vm.go +++ b/pkg/experimental/workload/daemon/provider/gce/vm.go @@ -42,7 +42,6 @@ type VM struct { instanceName string hostname string internalIP string - externalIP string projectID string region string zone string @@ -256,7 +255,6 @@ func NewVM(logger klog.Logger) (vm *VM, err error) { instanceName: getAttrOrPanic(metadata.InstanceName, "InstanceName", logger), hostname: getAttrOrPanic(metadata.Hostname, "Hostname", logger), internalIP: getAttrOrPanic(metadata.InternalIP, "InternalIP", logger), - externalIP: getAttrOrPanic(metadata.ExternalIP, "ExternalIP", logger), projectID: getAttrOrPanic(metadata.ProjectID, "ProjectID", logger), zone: getAttrOrPanic(metadata.Zone, "Zone", logger), // Fetch the cluster name and zone diff --git a/pkg/experimental/workload/daemon/utils/template.go b/pkg/experimental/workload/daemon/utils/template.go index 66f3b65479..8b9ba20a00 100644 --- a/pkg/experimental/workload/daemon/utils/template.go +++ b/pkg/experimental/workload/daemon/utils/template.go @@ -41,23 +41,3 @@ users: expiry-key: '{.token_expiry}' token-key: '{.access_token}' name: {{.authProvider}}` - -const kubeConfigKsaTemp = ` -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: {{.clusterCa}} - server: https://{{.clusterIP}} - name: {{.clusterName}} -contexts: -- context: - cluster: {{.clusterName}} - user: {{.saName}} - name: {{.clusterName}} -current-context: {{.clusterName}} -kind: Config -preferences: {} -users: -- name: {{.saName}} - user: - token: {{.accessToken}}` diff --git a/pkg/firewalls/firewalls_l7_cr.go b/pkg/firewalls/firewalls_l7_cr.go index 09f20fdf12..e986064989 100644 --- a/pkg/firewalls/firewalls_l7_cr.go +++ b/pkg/firewalls/firewalls_l7_cr.go @@ -35,7 +35,6 @@ import ( // FirewallRules manages firewall rules. type FirewallCR struct { - cloud Firewall namer *namer_util.Namer srcRanges []string // TODO(rramkumar): Eliminate this variable. We should just pass in @@ -56,7 +55,6 @@ func NewFirewallCRPool(client firewallclient.Interface, cloud Firewall, namer *n klog.Fatalf("Could not parse L7 src ranges %v for firewall rule: %v", l7SrcRanges, err) } return &FirewallCR{ - cloud: cloud, namer: namer, srcRanges: l7SrcRanges, nodePortRanges: nodePortRanges, diff --git a/pkg/fuzz/features/ilb.go b/pkg/fuzz/features/ilb.go index 7ad8c0c3a1..14376bb55a 100644 --- a/pkg/fuzz/features/ilb.go +++ b/pkg/fuzz/features/ilb.go @@ -42,9 +42,6 @@ func (*ILBFeature) Name() string { // ILBValidator is an example validator. type ILBValidator struct { fuzz.NullValidator - - ing *v1.Ingress - env fuzz.ValidatorEnv } // Name implements fuzz.FeatureValidator. @@ -54,9 +51,6 @@ func (*ILBValidator) Name() string { // ConfigureAttributes implements fuzz.FeatureValidator. func (v *ILBValidator) ConfigureAttributes(env fuzz.ValidatorEnv, ing *v1.Ingress, a *fuzz.IngressValidatorAttributes) error { - // Capture the env for use later in CheckResponse. - v.ing = ing - v.env = env return nil } diff --git a/pkg/healthchecks/healthchecks_test.go b/pkg/healthchecks/healthchecks_test.go index 5e755fa866..3b995f33b7 100644 --- a/pkg/healthchecks/healthchecks_test.go +++ b/pkg/healthchecks/healthchecks_test.go @@ -1203,11 +1203,7 @@ func hcFromBC(bchcc *backendconfigv1.HealthCheckConfig, neg bool, json bool) *co func (f *syncSPFixture) hcs() *compute.HealthCheck { return f.toS(f.hc()) } func (f *syncSPFixture) hc2() *compute.HealthCheck { return f.to2(f.hc()) } func (f *syncSPFixture) negs() *compute.HealthCheck { return f.toS(f.neg()) } -func (f *syncSPFixture) neg2() *compute.HealthCheck { return f.to2(f.neg()) } func (f *syncSPFixture) ilbs() *compute.HealthCheck { return f.toS(f.ilb()) } -func (f *syncSPFixture) ilb2() *compute.HealthCheck { return f.to2(f.ilb()) } -func (f *syncSPFixture) thcs() *compute.HealthCheck { panic("no such thing exists") } -func (f *syncSPFixture) thc2() *compute.HealthCheck { panic("no such thing exists") } func (f *syncSPFixture) toS(h *compute.HealthCheck) *compute.HealthCheck { h.Type = "HTTPS" diff --git a/pkg/instancegroups/controller.go b/pkg/instancegroups/controller.go index d3c591a6cf..77b3fc7ff0 100644 --- a/pkg/instancegroups/controller.go +++ b/pkg/instancegroups/controller.go @@ -30,8 +30,6 @@ import ( // Controller synchronizes the state of the nodes to the unmanaged instance // groups. type Controller struct { - // lister is a cache of the k8s Node resources. - lister cache.Indexer // queue is the TaskQueue used to manage the node worker updates. queue utils.TaskQueue // igManager is an interface to manage instance groups. @@ -67,7 +65,6 @@ var defaultNodeObj = &apiv1.Node{ func NewController(config *ControllerConfig, logger klog.Logger) *Controller { logger = logger.WithName("InstanceGroupsController") c := &Controller{ - lister: config.NodeInformer.GetIndexer(), zoneGetter: config.ZoneGetter, igManager: config.IGManager, hasSynced: config.HasSynced, diff --git a/pkg/instancegroups/fakes.go b/pkg/instancegroups/fakes.go index 156ae7a2b7..5801756484 100644 --- a/pkg/instancegroups/fakes.go +++ b/pkg/instancegroups/fakes.go @@ -47,7 +47,6 @@ type IGsToInstances map[*compute.InstanceGroup]sets.String // FakeInstanceGroups fakes out the instance groups api. type FakeInstanceGroups struct { - getResult *compute.InstanceGroup calls []int zonesToIGsToInstances map[string]IGsToInstances maxIGSize int diff --git a/pkg/l4lb/l4controller.go b/pkg/l4lb/l4controller.go index 00ec01f343..13557250a0 100644 --- a/pkg/l4lb/l4controller.go +++ b/pkg/l4lb/l4controller.go @@ -21,7 +21,6 @@ import ( "math/rand" "reflect" "strings" - "sync" "time" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" @@ -61,7 +60,6 @@ type L4Controller struct { client kubernetes.Interface svcQueue utils.TaskQueue numWorkers int - serviceLister cache.Indexer networkLister cache.Indexer gkeNetworkParamSetLister cache.Indexer networkResolver network.Resolver @@ -75,10 +73,9 @@ type L4Controller struct { // enqueueTracker tracks the latest time an update was enqueued enqueueTracker utils.TimeTracker // syncTracker tracks the latest time an enqueued service was synced - syncTracker utils.TimeTracker - forwardingRules ForwardingRulesGetter - sharedResourcesLock sync.Mutex - enableDualStack bool + syncTracker utils.TimeTracker + forwardingRules ForwardingRulesGetter + enableDualStack bool hasSynced func() bool @@ -97,7 +94,6 @@ func NewILBController(ctx *context.ControllerContext, stopCh <-chan struct{}, lo l4c := &L4Controller{ ctx: ctx, client: ctx.KubeClient, - serviceLister: ctx.ServiceInformer.GetIndexer(), stopCh: stopCh, numWorkers: ctx.NumL4Workers, namer: ctx.L4Namer, diff --git a/pkg/l4lb/l4netlbcontroller.go b/pkg/l4lb/l4netlbcontroller.go index 362d85b9f7..22a300e05a 100644 --- a/pkg/l4lb/l4netlbcontroller.go +++ b/pkg/l4lb/l4netlbcontroller.go @@ -60,7 +60,6 @@ type backendLinkType int64 type L4NetLBController struct { ctx *context.ControllerContext svcQueue utils.TaskQueue - serviceLister cache.Indexer networkResolver network.Resolver stopCh <-chan struct{} @@ -99,7 +98,6 @@ func NewL4NetLBController( backendPool := backends.NewPoolWithConnectionTrackingPolicy(ctx.Cloud, ctx.L4Namer, ctx.EnableL4StrongSessionAffinity) l4netLBc := &L4NetLBController{ ctx: ctx, - serviceLister: ctx.ServiceInformer.GetIndexer(), stopCh: stopCh, zoneGetter: ctx.ZoneGetter, backendPool: backendPool, diff --git a/pkg/l4lb/l4netlbcontroller_test.go b/pkg/l4lb/l4netlbcontroller_test.go index 95f30d4c8a..2618f3bc99 100644 --- a/pkg/l4lb/l4netlbcontroller_test.go +++ b/pkg/l4lb/l4netlbcontroller_test.go @@ -1748,13 +1748,11 @@ func TestProcessDualStackNetLBServiceOnUserError(t *testing.T) { type fakeNEGLinker struct { called bool sp utils.ServicePort - groups []backends.GroupKey } func (l *fakeNEGLinker) Link(sp utils.ServicePort, groups []backends.GroupKey) error { l.called = true l.sp = sp - l.groups = groups return nil } diff --git a/pkg/loadbalancers/fakes.go b/pkg/loadbalancers/fakes.go index e25e74f6cd..777aa13c61 100644 --- a/pkg/loadbalancers/fakes.go +++ b/pkg/loadbalancers/fakes.go @@ -18,7 +18,6 @@ package loadbalancers import ( "context" - "fmt" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" @@ -27,17 +26,6 @@ import ( const FakeCertQuota = 15 -var testIPManager = testIP{} - -type testIP struct { - start int -} - -func (t *testIP) ip() string { - t.start++ - return fmt.Sprintf("0.0.0.%v", t.start) -} - func InsertGlobalForwardingRuleHook(ctx context.Context, key *meta.Key, obj *compute.ForwardingRule, m *cloud.MockGlobalForwardingRules, options ...cloud.Option) (b bool, e error) { if obj.IPAddress == "" { obj.IPAddress = "0.0.0.1" diff --git a/pkg/loadbalancers/l4_test.go b/pkg/loadbalancers/l4_test.go index 2019ccfc94..18c4f41b56 100644 --- a/pkg/loadbalancers/l4_test.go +++ b/pkg/loadbalancers/l4_test.go @@ -906,22 +906,15 @@ func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) { } type EnsureILBParams struct { - clusterName string - clusterID string service *v1.Service - existingFwdRule *composite.ForwardingRule networkResolver network.Resolver } // newEnsureILBParams is the constructor of EnsureILBParams. func newEnsureILBParams() *EnsureILBParams { - vals := gce.DefaultTestClusterValues() return &EnsureILBParams{ - vals.ClusterName, - vals.ClusterID, test.NewL4ILBService(false, 8080), nil, - nil, } } @@ -962,9 +955,6 @@ func TestEnsureInternalLoadBalancerErrors(t *testing.T) { }, }, "Delete region forwarding rule failed": { - adjustParams: func(params *EnsureILBParams) { - params.existingFwdRule = &composite.ForwardingRule{BackendService: "badBackendService"} - }, injectMock: func(c *cloud.MockGCE) { c.MockForwardingRules.DeleteHook = mock.DeleteForwardingRuleErrHook }, diff --git a/pkg/loadbalancers/loadbalancers_test.go b/pkg/loadbalancers/loadbalancers_test.go index 8d88dbbfb3..d00e0d129a 100644 --- a/pkg/loadbalancers/loadbalancers_test.go +++ b/pkg/loadbalancers/loadbalancers_test.go @@ -61,9 +61,7 @@ type testJig struct { fakeGCE *gce.Cloud mock *cloud.MockGCE namer *namer_util.Namer - ing *networkingv1.Ingress feNamer namer_util.IngressFrontendNamer - t *testing.T } func newTestJig(t *testing.T) *testJig { @@ -132,9 +130,7 @@ func newTestJig(t *testing.T) *testJig { fakeGCE: fakeGCE, mock: mockGCE, namer: namer, - ing: ing, feNamer: feNamer, - t: t, } } @@ -1443,16 +1439,6 @@ func createCert(key string, contents string, name string) *translator.TLSCerts { return &translator.TLSCerts{Key: key, Cert: contents, Name: name, CertHash: translator.GetCertHash(contents)} } -func syncPool(j *testJig, t *testing.T, lbInfo *L7RuntimeInfo) { - if _, err := j.pool.Ensure(lbInfo); err != nil { - t.Fatalf("j.pool.Ensure() = err %v", err) - } - l7, err := j.pool.Ensure(lbInfo) - if err != nil || l7 == nil { - t.Fatalf("Expected l7 not created") - } -} - func TestList(t *testing.T) { j := newTestJig(t) diff --git a/pkg/neg/controller.go b/pkg/neg/controller.go index 2ed0e36f09..0ed8f086ee 100644 --- a/pkg/neg/controller.go +++ b/pkg/neg/controller.go @@ -64,7 +64,6 @@ func init() { // It determines whether NEG for a service port is needed, then signals NegSyncerManager to sync it. type Controller struct { manager negtypes.NegSyncerManager - resyncPeriod time.Duration gcPeriod time.Duration recorder record.EventRecorder namer negtypes.NetworkEndpointGroupNamer @@ -216,7 +215,6 @@ func NewController( negController := &Controller{ client: kubeClient, manager: manager, - resyncPeriod: resyncPeriod, gcPeriod: gcPeriod, recorder: recorder, zoneGetter: zoneGetter, diff --git a/pkg/neg/controller_test.go b/pkg/neg/controller_test.go index fe9d235eca..eeb7c7fe25 100644 --- a/pkg/neg/controller_test.go +++ b/pkg/neg/controller_test.go @@ -60,8 +60,7 @@ const ( ) var ( - metricsInterval = 10 * time.Minute - defaultBackend = utils.ServicePort{ + defaultBackend = utils.ServicePort{ ID: utils.ServicePortID{ Service: types.NamespacedName{ Name: "default-http-backend", diff --git a/pkg/neg/metrics/metrics.go b/pkg/neg/metrics/metrics.go index 70bcb83a6a..a2de2b5500 100644 --- a/pkg/neg/metrics/metrics.go +++ b/pkg/neg/metrics/metrics.go @@ -56,8 +56,6 @@ const ( ListNEHealthRequest = "ListNEHealth" ) -type syncType string - var ( NegOperationLatency = prometheus.NewHistogramVec( prometheus.HistogramOpts{ diff --git a/pkg/neg/readiness/reflector.go b/pkg/neg/readiness/reflector.go index 9a13259566..2b052418b9 100644 --- a/pkg/neg/readiness/reflector.go +++ b/pkg/neg/readiness/reflector.go @@ -70,8 +70,7 @@ type readinessReflector struct { podLister cache.Indexer lookup NegLookup - eventBroadcaster record.EventBroadcaster - eventRecorder record.EventRecorder + eventRecorder record.EventRecorder queue workqueue.RateLimitingInterface @@ -97,7 +96,6 @@ func NewReadinessReflector(kubeClient, eventRecorderClient kubernetes.Interface, podLister: podLister, clock: clock.RealClock{}, lookup: lookup, - eventBroadcaster: broadcaster, eventRecorder: recorder, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), zoneGetter: zoneGetter, diff --git a/pkg/neg/syncers/endpoints_calculator.go b/pkg/neg/syncers/endpoints_calculator.go index 07cf392476..be730a8dc0 100644 --- a/pkg/neg/syncers/endpoints_calculator.go +++ b/pkg/neg/syncers/endpoints_calculator.go @@ -138,8 +138,6 @@ func (l *LocalL4ILBEndpointsCalculator) ValidateEndpoints(endpointData []types.E // In this mode, the endpoints of the NEG are calculated by selecting nodes at random. Up to 25(subset size limit in this // mode) are selected. type ClusterL4ILBEndpointsCalculator struct { - // nodeLister is used for listing all the nodes in the cluster when calculating the subset. - nodeLister listers.NodeLister // zoneGetter looks up the zone for a given node when calculating subsets. zoneGetter *zonegetter.ZoneGetter // subsetSizeLimit is the max value of the subset size in this mode. @@ -153,7 +151,6 @@ type ClusterL4ILBEndpointsCalculator struct { func NewClusterL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter *zonegetter.ZoneGetter, svcId string, logger klog.Logger, networkInfo *network.NetworkInfo) *ClusterL4ILBEndpointsCalculator { return &ClusterL4ILBEndpointsCalculator{ - nodeLister: nodeLister, zoneGetter: zoneGetter, subsetSizeLimit: maxSubsetSizeDefault, svcId: svcId, diff --git a/pkg/neg/syncers/endpoints_calculator_test.go b/pkg/neg/syncers/endpoints_calculator_test.go index 304a211146..98125e055c 100644 --- a/pkg/neg/syncers/endpoints_calculator_test.go +++ b/pkg/neg/syncers/endpoints_calculator_test.go @@ -801,17 +801,3 @@ func updateNodes(t *testing.T, nodeNames []string, nodeLabels map[string]map[str } } - -func deleteNodes(t *testing.T, nodeNames []string, nodeIndexer cache.Indexer) { - t.Helper() - for _, nodeName := range nodeNames { - node, exists, err := nodeIndexer.GetByKey(nodeName) - if err != nil || !exists { - t.Errorf("Could not lookup node %q, err - %v", nodeName, err) - continue - } - if err := nodeIndexer.Delete(node); err != nil { - t.Errorf("Failed to delete node %q, err - %v", nodeName, err) - } - } -} diff --git a/pkg/neg/syncers/syncer_test.go b/pkg/neg/syncers/syncer_test.go index c17e0ed867..9188faf745 100644 --- a/pkg/neg/syncers/syncer_test.go +++ b/pkg/neg/syncers/syncer_test.go @@ -22,14 +22,10 @@ import ( "testing" "time" - v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/backoff" negtypes "k8s.io/ingress-gce/pkg/neg/types" - "k8s.io/ingress-gce/pkg/utils" "k8s.io/klog/v2" ) @@ -43,19 +39,6 @@ const ( kubeSystemUID = "kube-system-id" ) -var ( - defaultBackend = utils.ServicePort{ - ID: utils.ServicePortID{ - Service: types.NamespacedName{ - Name: "default-http-backend", - Namespace: "kube-system", - }, - Port: v1.ServiceBackendPort{Name: "http"}, - }, - TargetPort: intstr.FromInt(9376), - } -) - type syncerTester struct { syncer negtypes.NegSyncer // keep track of the number of syncs diff --git a/pkg/neg/syncers/transaction_test.go b/pkg/neg/syncers/transaction_test.go index 95053404eb..cd521cf17e 100644 --- a/pkg/neg/syncers/transaction_test.go +++ b/pkg/neg/syncers/transaction_test.go @@ -2272,12 +2272,6 @@ func TestCollectLabelStats(t *testing.T) { } } -func newL4ILBTestTransactionSyncer(fakeGCE negtypes.NetworkEndpointGroupCloud, mode negtypes.EndpointsCalculatorMode) (negtypes.NegSyncer, *transactionSyncer) { - negsyncer, ts := newTestTransactionSyncer(fakeGCE, negtypes.VmIpEndpointType, false) - ts.endpointsCalculator = GetEndpointsCalculator(ts.podLister, ts.nodeLister, ts.serviceLister, ts.zoneGetter, ts.NegSyncerKey, mode, klog.TODO(), false, nil, &network.NetworkInfo{IsDefault: true}) - return negsyncer, ts -} - func newTestTransactionSyncer(fakeGCE negtypes.NetworkEndpointGroupCloud, negType negtypes.NetworkEndpointType, customName bool) (negtypes.NegSyncer, *transactionSyncer) { testContext := negtypes.NewTestContext() svcPort := negtypes.NegSyncerKey{ @@ -2336,14 +2330,6 @@ func newTestTransactionSyncer(fakeGCE negtypes.NetworkEndpointGroupCloud, negTyp return negsyncer, transactionSyncer } -func copyMap(endpointMap map[string]negtypes.NetworkEndpointSet) map[string]negtypes.NetworkEndpointSet { - ret := map[string]negtypes.NetworkEndpointSet{} - for k, v := range endpointMap { - ret[k] = negtypes.NewNetworkEndpointSet(v.List()...) - } - return ret -} - func generateTransaction(table networkEndpointTransactionTable, entry transactionEntry, initialIp net.IP, num int, instance string, targetPort string) { endpointSet := generateEndpointSet(initialIp, num, instance, targetPort) for _, encodedEndpoint := range endpointSet.List() { diff --git a/pkg/neg/types/cloudprovideradapter.go b/pkg/neg/types/cloudprovideradapter.go index ee00978b20..5ad8fbf418 100644 --- a/pkg/neg/types/cloudprovideradapter.go +++ b/pkg/neg/types/cloudprovideradapter.go @@ -32,14 +32,10 @@ import ( ) const ( - // aggregatedListZonalKeyPrefix is the prefix for the zonal key from AggregatedList - aggregatedListZonalKeyPrefix = "zones" - // aggregatedListGlobalKey is the global key from AggregatedList - aggregatedListGlobalKey = "global" - negServiceName = "NetworkEndpointGroups" - listNetworkEndpoints = "ListNetworkEndpoints" - attachNetworkEndpoints = "AttachNetworkEndpoints" - detachNetworkEndpoints = "DetachNetworkEndpoints" + negServiceName = "NetworkEndpointGroups" + listNetworkEndpoints = "ListNetworkEndpoints" + attachNetworkEndpoints = "AttachNetworkEndpoints" + detachNetworkEndpoints = "DetachNetworkEndpoints" ) // NewAdapter takes a Cloud and returns a NetworkEndpointGroupCloud. diff --git a/pkg/psc/controller.go b/pkg/psc/controller.go index 89f174afdc..5f30b8a421 100644 --- a/pkg/psc/controller.go +++ b/pkg/psc/controller.go @@ -33,7 +33,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" @@ -85,8 +84,6 @@ var ( // It watches ServiceAttachment resources and creates, deletes, and manages // corresponding GCE Service Attachment resources type Controller struct { - client kubernetes.Interface - cloud *gce.Cloud saClient serviceattachmentclient.Interface svcAttachmentQueue workqueue.RateLimitingInterface @@ -118,7 +115,6 @@ func NewController(ctx *context.ControllerContext, stopCh <-chan struct{}, logge logger = logger.WithName("PSCController") saNamer := namer.NewServiceAttachmentNamer(ctx.ClusterNamer, string(ctx.KubeSystemUID)) controller := &Controller{ - client: ctx.KubeClient, cloud: ctx.Cloud, saClient: ctx.SAClient, saNamer: saNamer, diff --git a/pkg/ratelimit/ratelimit.go b/pkg/ratelimit/ratelimit.go index e3b34e02df..a0f4238faa 100644 --- a/pkg/ratelimit/ratelimit.go +++ b/pkg/ratelimit/ratelimit.go @@ -43,8 +43,6 @@ type GCERateLimiter struct { // Minimum polling interval for getting operations. Underlying operations rate limiter // may increase the time. operationPollInterval time.Duration - - logger klog.Logger } // strategyRateLimiter implements cloud.RateLimiter and uses underlying throttling.Strategy @@ -127,7 +125,6 @@ func NewGCERateLimiter(specs []string, operationPollInterval time.Duration, logg rateLimitImpls: rateLimitImpls, strategyRLs: strategyRLs, operationPollInterval: operationPollInterval, - logger: logger, }, nil } diff --git a/pkg/utils/namer/frontendnamer.go b/pkg/utils/namer/frontendnamer.go index e1b9e764cd..d388562c07 100644 --- a/pkg/utils/namer/frontendnamer.go +++ b/pkg/utils/namer/frontendnamer.go @@ -59,7 +59,6 @@ type Scheme string // V1IngressFrontendNamer implements IngressFrontendNamer. This is a wrapper on top of namer.Namer. type V1IngressFrontendNamer struct { - ing *v1.Ingress namer *Namer lbName LoadBalancerName } @@ -67,7 +66,7 @@ type V1IngressFrontendNamer struct { // newV1IngressFrontendNamer returns v1 frontend namer for given ingress. func newV1IngressFrontendNamer(ing *v1.Ingress, namer *Namer, logger klog.Logger) IngressFrontendNamer { lbName := namer.LoadBalancer(common.IngressKeyFunc(ing, logger)) - return &V1IngressFrontendNamer{ing: ing, namer: namer, lbName: lbName} + return &V1IngressFrontendNamer{namer: namer, lbName: lbName} } // newV1IngressFrontendNamerForLoadBalancer returns v1 frontend namer for load balancer. @@ -123,7 +122,6 @@ func (ln *V1IngressFrontendNamer) IsValidLoadBalancer() bool { // V2IngressFrontendNamer implements IngressFrontendNamer. type V2IngressFrontendNamer struct { - ing *v1.Ingress // prefix for all resource names (ex.: "k8s"). prefix string // Load balancer name to be included in resource name. @@ -147,7 +145,7 @@ type V2IngressFrontendNamer struct { // SSL Certificate : k8s2-cr-uid01234-- func newV2IngressFrontendNamer(ing *v1.Ingress, kubeSystemUID string, prefix string) IngressFrontendNamer { clusterUID := common.ContentHash(kubeSystemUID, clusterUIDLength) - namer := &V2IngressFrontendNamer{ing: ing, prefix: prefix, clusterUID: clusterUID} + namer := &V2IngressFrontendNamer{prefix: prefix, clusterUID: clusterUID} // Initialize lbName. truncFields := TrimFieldsEvenly(maximumAllowedCombinedLength, ing.Namespace, ing.Name) truncNamespace := truncFields[0] diff --git a/pkg/utils/namer/serviceattachmentnamer.go b/pkg/utils/namer/serviceattachmentnamer.go index 1d77235b5d..78037ba82f 100644 --- a/pkg/utils/namer/serviceattachmentnamer.go +++ b/pkg/utils/namer/serviceattachmentnamer.go @@ -30,7 +30,6 @@ const ( // V1ServiceAttachment implements ServiceAttachmentNamer. This is a wrapper on top of namer.Namer. type V1ServiceAttachmentNamer struct { - namer *Namer kubeSystemUID string prefix string @@ -43,7 +42,6 @@ type V1ServiceAttachmentNamer struct { // NewServiceAttachmentNamer returns a v1 namer for Service Attachments func NewServiceAttachmentNamer(namer *Namer, kubeSystemUID string) ServiceAttachmentNamer { return &V1ServiceAttachmentNamer{ - namer: namer, kubeSystemUID: kubeSystemUID, prefix: namer.prefix, maxDescriptiveLabel: maxSADescriptiveLabel - len(namer.prefix), diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 250c1a6a99..c7f28bd8a9 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -494,54 +494,6 @@ func NodeIsReady(node *api_v1.Node) bool { return false } -func nodePredicateInternal(node *api_v1.Node, includeUnreadyNodes, excludeUpgradingNodes bool, logger klog.Logger) bool { - // Get all nodes that have a taint with NoSchedule effect - for _, taint := range node.Spec.Taints { - if taint.Key == ToBeDeletedTaint { - return false - } - } - - // As of 1.6, we will taint the master, but not necessarily mark it unschedulable. - // Recognize nodes labeled as master, and filter them also, as we were doing previously. - if _, hasMasterRoleLabel := node.Labels[LabelNodeRoleMaster]; hasMasterRoleLabel { - return false - } - - // Will be removed in 1.18 - if _, hasExcludeBalancerLabel := node.Labels[LabelAlphaNodeRoleExcludeBalancer]; hasExcludeBalancerLabel { - return false - } - - if _, hasExcludeBalancerLabel := node.Labels[LabelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel { - return false - } - if excludeUpgradingNodes { - // This node is about to be upgraded or deleted as part of resize. - if operation, _ := node.Labels[GKECurrentOperationLabel]; operation == NodeDrain { - return false - } - } - - // If we have no info, don't accept - if len(node.Status.Conditions) == 0 { - return false - } - if includeUnreadyNodes { - return true - } - for _, cond := range node.Status.Conditions { - // We consider the node for load balancing only when its NodeReady condition status - // is ConditionTrue - if cond.Type == api_v1.NodeReady && cond.Status != api_v1.ConditionTrue { - logger.V(4).Info("Ignoring node", "nodeName", node.Name, "conditionType", cond.Type, "conditionStatus", cond.Status) - return false - } - } - return true - -} - // GetNodePrimaryIP returns a primary internal IP address of the node. func GetNodePrimaryIP(inputNode *api_v1.Node, logger klog.Logger) string { ip, err := getPreferredNodeAddress(inputNode, []api_v1.NodeAddressType{api_v1.NodeInternalIP})