From ab512a46e7302085d7fb64ebdae8b8e3af63f061 Mon Sep 17 00:00:00 2001 From: Shaun Crampton Date: Fri, 30 Jun 2017 14:32:27 +0100 Subject: [PATCH 01/11] Sort selectors in KDD conversion functions. Fixes #465 --- lib/backend/k8s/conversion.go | 9 ++++++++- lib/backend/k8s/conversion_test.go | 14 ++++++++++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/lib/backend/k8s/conversion.go b/lib/backend/k8s/conversion.go index 7838f9289..1c7c213c1 100644 --- a/lib/backend/k8s/conversion.go +++ b/lib/backend/k8s/conversion.go @@ -19,6 +19,7 @@ import ( "encoding/hex" goerrors "errors" "fmt" + "sort" "strings" log "github.com/Sirupsen/logrus" @@ -240,7 +241,13 @@ func (c converter) k8sSelectorToCalico(s *metav1.LabelSelector, ns *string) stri // matchLabels is a map key => value, it means match if (label[key] == // value) for all keys. - for k, v := range s.MatchLabels { + keys := make([]string, 0, len(s.MatchLabels)) + for k := range s.MatchLabels { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := s.MatchLabels[k] selectors = append(selectors, fmt.Sprintf("%s%s == '%s'", prefix, k, v)) } diff --git a/lib/backend/k8s/conversion_test.go b/lib/backend/k8s/conversion_test.go index 415ea0d37..b3bc32b8e 100644 --- a/lib/backend/k8s/conversion_test.go +++ b/lib/backend/k8s/conversion_test.go @@ -214,7 +214,10 @@ var _ = Describe("Test NetworkPolicy conversion", func() { }, Spec: extensions.NetworkPolicySpec{ PodSelector: metav1.LabelSelector{ - MatchLabels: map[string]string{"label": "value"}, + MatchLabels: map[string]string{ + "label": "value", + "label2": "value2", + }, }, Ingress: []extensions.NetworkPolicyIngressRule{ { @@ -225,7 +228,8 @@ var _ = Describe("Test NetworkPolicy conversion", func() { { PodSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "k": "v", + "k": "v", + "k2": "v2", }, }, }, @@ -244,12 +248,14 @@ var _ = Describe("Test NetworkPolicy conversion", func() { // Assert value fields are correct. Expect(int(*pol.Value.(*model.Policy).Order)).To(Equal(1000)) - Expect(pol.Value.(*model.Policy).Selector).To(Equal("calico/k8s_ns == 'default' && label == 'value'")) + // Check the selector is correct, and that the matches are sorted. + Expect(pol.Value.(*model.Policy).Selector).To(Equal( + "calico/k8s_ns == 'default' && label == 'value' && label2 == 'value2'")) protoTCP := numorstring.ProtocolFromString("tcp") Expect(pol.Value.(*model.Policy).InboundRules).To(ConsistOf(model.Rule{ Action: "allow", Protocol: &protoTCP, // Defaulted to TCP. - SrcSelector: "calico/k8s_ns == 'default' && k == 'v'", + SrcSelector: "calico/k8s_ns == 'default' && k == 'v' && k2 == 'v2'", DstPorts: []numorstring.Port{numorstring.SinglePort(80)}, })) From c69d55c2366107fab5d7ec3ac177aea276ee3bb9 Mon Sep 17 00:00:00 2001 From: matt Date: Fri, 30 Jun 2017 13:44:36 -0700 Subject: [PATCH 02/11] Fixed bug preventing specific profiles from being accessed from namespaces --- lib/backend/k8s/conversion.go | 13 +++++++------ lib/backend/k8s/conversion_test.go | 11 +++++++++-- lib/backend/k8s/k8s.go | 1 + lib/backend/k8s/k8s_fv_test.go | 4 ++-- 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/lib/backend/k8s/conversion.go b/lib/backend/k8s/conversion.go index 7838f9289..ab9270d0b 100644 --- a/lib/backend/k8s/conversion.go +++ b/lib/backend/k8s/conversion.go @@ -17,7 +17,6 @@ package k8s import ( "crypto/sha1" "encoding/hex" - goerrors "errors" "fmt" "strings" @@ -68,7 +67,7 @@ func (c converter) parsePolicyNameNamespace(name string) (string, error) { // parsePolicyNameNetworkPolicy extracts the Kubernetes Namespace and NetworkPolicy that backs the given Policy. func (c converter) parsePolicyNameNetworkPolicy(name string) (string, string, error) { - // Policies backed by NetworkPolicies have form "np.projectcalico.org/. + // Policies backed by NetworkPolicies have form "np.projectcalico.org/." if !strings.HasPrefix(name, "np.projectcalico.org/") { // This is not backed by a Kubernetes NetworkPolicy. return "", "", fmt.Errorf("Policy %s not backed by a NetworkPolicy", name) @@ -84,11 +83,13 @@ func (c converter) parsePolicyNameNetworkPolicy(name string) (string, string, er // parseProfileName extracts the Namespace name from the given Profile name. func (c converter) parseProfileName(profileName string) (string, error) { - splits := strings.SplitN(profileName, ".", 2) - if len(splits) != 2 { - return "", goerrors.New(fmt.Sprintf("Invalid profile name: %s", profileName)) + // Profile objects backed by Namespaces have form "ns.projectcalico.org/" + if !strings.HasPrefix(profileName, "ns.projectcalico.org/") { + // This is not backed by a Kubernetes Namespace. + return "", fmt.Errorf("Profile %s not backed by a Namespace", profileName) } - return splits[1], nil + + return strings.TrimPrefix(profileName, "ns.projectcalico.org/"), nil } // namespaceToProfile converts a Namespace to a Calico Profile. The Profile stores diff --git a/lib/backend/k8s/conversion_test.go b/lib/backend/k8s/conversion_test.go index 415ea0d37..19ce9009b 100644 --- a/lib/backend/k8s/conversion_test.go +++ b/lib/backend/k8s/conversion_test.go @@ -73,12 +73,19 @@ var _ = Describe("Test parsing strings", func() { Expect(ns).To(Equal("")) }) - It("should parse profile names", func() { - name := "k8s_ns.default" + It("should parse valid profile names", func() { + name := "ns.projectcalico.org/default" ns, err := c.parseProfileName(name) Expect(ns).To(Equal("default")) Expect(err).NotTo(HaveOccurred()) }) + + It("should not parse invalid profile names", func() { + name := "k8s_ns.default" + ns, err := c.parseProfileName(name) + Expect(err).To(HaveOccurred()) + Expect(ns).To(Equal("")) + }) }) var _ = Describe("Test Pod conversion", func() { diff --git a/lib/backend/k8s/k8s.go b/lib/backend/k8s/k8s.go index 6de2999b2..3c655466d 100644 --- a/lib/backend/k8s/k8s.go +++ b/lib/backend/k8s/k8s.go @@ -471,6 +471,7 @@ func (c *KubeClient) listProfiles(l model.ProfileListOptions) ([]*model.KVPair, if l.Name != "" { kvp, err := c.getProfile(model.ProfileKey{Name: l.Name}) if err != nil { + log.WithError(err).Debug("Error retrieving profile") return []*model.KVPair{}, nil } return []*model.KVPair{kvp}, nil diff --git a/lib/backend/k8s/k8s_fv_test.go b/lib/backend/k8s/k8s_fv_test.go index b922723a3..06069958d 100644 --- a/lib/backend/k8s/k8s_fv_test.go +++ b/lib/backend/k8s/k8s_fv_test.go @@ -315,7 +315,7 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { }) By("Performing a Get on the Profile and ensure no error in the Calico API", func() { - _, err := c.Get(model.ProfileKey{Name: fmt.Sprintf("default.%s", ns.ObjectMeta.Name)}) + _, err := c.Get(model.ProfileKey{Name: fmt.Sprintf("ns.projectcalico.org/%s", ns.ObjectMeta.Name)}) Expect(err).NotTo(HaveOccurred()) }) @@ -372,7 +372,7 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { // Perform a Get and ensure no error in the Calico API. By("getting a Profile", func() { - _, err := c.Get(model.ProfileKey{Name: fmt.Sprintf("default.%s", ns.ObjectMeta.Name)}) + _, err := c.Get(model.ProfileKey{Name: fmt.Sprintf("ns.projectcalico.org/%s", ns.ObjectMeta.Name)}) Expect(err).NotTo(HaveOccurred()) }) From 29df6fda0782066e6a247b7d40076f6a5f02f8c3 Mon Sep 17 00:00:00 2001 From: Rob Brockbank Date: Tue, 27 Jun 2017 15:50:55 -0700 Subject: [PATCH 03/11] Add per-node config infrastructure and add per-node BGP peers --- lib/backend/k8s/k8s.go | 15 + lib/backend/k8s/k8s_fv_test.go | 264 +++++++++-- .../k8s/resources/customnoderesource.go | 435 ++++++++++++++++++ lib/backend/k8s/resources/customresource.go | 1 - lib/backend/k8s/resources/errors.go | 6 + lib/backend/k8s/resources/globalbgppeer.go | 2 +- .../k8s/resources/globalbgppeer_test.go | 2 +- .../k8s/resources/globalconfig_test.go | 4 +- lib/backend/k8s/resources/ippool.go | 8 +- lib/backend/k8s/resources/ippool_test.go | 5 +- lib/backend/k8s/resources/names_test.go | 46 +- lib/backend/k8s/resources/node.go | 16 +- lib/backend/k8s/resources/nodebgppeer.go | 63 +++ lib/backend/k8s/resources/nodebgppeer_test.go | 131 ++++++ .../k8s/resources/resources_suite_test.go | 27 ++ lib/backend/k8s/resources/retrywrapper.go | 163 +++++++ lib/backend/model/bgppeer.go | 18 +- lib/client/bgppeer.go | 2 +- lib/client/bgppeer_e2e_test.go | 123 +---- lib/errors/errors.go | 1 + 20 files changed, 1134 insertions(+), 198 deletions(-) create mode 100644 lib/backend/k8s/resources/customnoderesource.go create mode 100644 lib/backend/k8s/resources/nodebgppeer.go create mode 100644 lib/backend/k8s/resources/nodebgppeer_test.go create mode 100644 lib/backend/k8s/resources/resources_suite_test.go create mode 100644 lib/backend/k8s/resources/retrywrapper.go diff --git a/lib/backend/k8s/k8s.go b/lib/backend/k8s/k8s.go index 3c655466d..c739a0157 100644 --- a/lib/backend/k8s/k8s.go +++ b/lib/backend/k8s/k8s.go @@ -59,6 +59,7 @@ type KubeClient struct { // Clients for interacting with Calico resources. globalBgpClient resources.K8sResourceClient + nodeBgpClient resources.K8sResourceClient globalConfigClient resources.K8sResourceClient ipPoolClient resources.K8sResourceClient snpClient resources.K8sResourceClient @@ -133,6 +134,7 @@ func NewKubeClient(kc *capi.KubeConfig) (*KubeClient, error) { kubeClient.nodeClient = resources.NewNodeClient(cs, tprClientV1) kubeClient.snpClient = resources.NewSystemNetworkPolicyClient(cs, tprClientV1alpha) kubeClient.globalBgpClient = resources.NewGlobalBGPPeerClient(cs, tprClientV1) + kubeClient.nodeBgpClient = resources.NewNodeBGPPeerClient(cs) kubeClient.globalConfigClient = resources.NewGlobalConfigClient(cs, tprClientV1) return kubeClient, nil @@ -323,6 +325,8 @@ func (c *KubeClient) Create(d *model.KVPair) (*model.KVPair, error) { return c.nodeClient.Create(d) case model.GlobalBGPPeerKey: return c.globalBgpClient.Create(d) + case model.NodeBGPPeerKey: + return c.nodeBgpClient.Create(d) default: log.Warn("Attempt to 'Create' using kubernetes backend is not supported.") return nil, errors.ErrorOperationNotSupported{ @@ -345,6 +349,8 @@ func (c *KubeClient) Update(d *model.KVPair) (*model.KVPair, error) { return c.nodeClient.Update(d) case model.GlobalBGPPeerKey: return c.globalBgpClient.Update(d) + case model.NodeBGPPeerKey: + return c.nodeBgpClient.Update(d) default: log.Warn("Attempt to 'Update' using kubernetes backend is not supported.") return nil, errors.ErrorOperationNotSupported{ @@ -369,6 +375,8 @@ func (c *KubeClient) Apply(d *model.KVPair) (*model.KVPair, error) { return c.nodeClient.Apply(d) case model.GlobalBGPPeerKey: return c.globalBgpClient.Apply(d) + case model.NodeBGPPeerKey: + return c.nodeBgpClient.Apply(d) case model.ActiveStatusReportKey, model.LastStatusReportKey, model.HostEndpointStatusKey, model.WorkloadEndpointStatusKey: // Felix periodically reports status to the datastore. This isn't supported @@ -396,6 +404,8 @@ func (c *KubeClient) Delete(d *model.KVPair) error { return c.nodeClient.Delete(d) case model.GlobalBGPPeerKey: return c.globalBgpClient.Delete(d) + case model.NodeBGPPeerKey: + return c.nodeBgpClient.Delete(d) default: log.Warn("Attempt to 'Delete' using kubernetes backend is not supported.") return errors.ErrorOperationNotSupported{ @@ -427,6 +437,8 @@ func (c *KubeClient) Get(k model.Key) (*model.KVPair, error) { return c.nodeClient.Get(k.(model.NodeKey)) case model.GlobalBGPPeerKey: return c.globalBgpClient.Get(k) + case model.NodeBGPPeerKey: + return c.nodeBgpClient.Get(k) default: return nil, errors.ErrorOperationNotSupported{ Identifier: k, @@ -457,6 +469,9 @@ func (c *KubeClient) List(l model.ListInterface) ([]*model.KVPair, error) { case model.GlobalBGPPeerListOptions: k, _, err := c.globalBgpClient.List(l) return k, err + case model.NodeBGPPeerListOptions: + k, _, err := c.nodeBgpClient.List(l) + return k, err case model.GlobalConfigListOptions: k, _, err := c.globalConfigClient.List(l) return k, err diff --git a/lib/backend/k8s/k8s_fv_test.go b/lib/backend/k8s/k8s_fv_test.go index 06069958d..d71109a71 100644 --- a/lib/backend/k8s/k8s_fv_test.go +++ b/lib/backend/k8s/k8s_fv_test.go @@ -495,20 +495,20 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { // to fan-out Policy CRUD operations to the appropriate KDD client // based on the prefix). kvp1Name := "snp.projectcalico.org/my-test-snp" - kvp1 := &model.KVPair{ + kvp1a := &model.KVPair{ Key: model.PolicyKey{Name: kvp1Name}, Value: &calicoAllowPolicyModel, } - kvp1_2 := &model.KVPair{ + kvp1b := &model.KVPair{ Key: model.PolicyKey{Name: kvp1Name}, Value: &calicoDisallowPolicyModel, } kvp2Name := "snp.projectcalico.org/my-test-snp2" - kvp2 := &model.KVPair{ + kvp2a := &model.KVPair{ Key: model.PolicyKey{Name: kvp2Name}, Value: &calicoAllowPolicyModel, } - kvp2_2 := &model.KVPair{ + kvp2b := &model.KVPair{ Key: model.PolicyKey{Name: kvp2Name}, Value: &calicoDisallowPolicyModel, } @@ -516,71 +516,71 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { // Make sure we clean up after ourselves. We allow this to fail because // part of our explicit testing below is to delete the resource. defer func() { - c.snpClient.Delete(kvp1) - c.snpClient.Delete(kvp2) + c.snpClient.Delete(kvp1a) + c.snpClient.Delete(kvp2a) }() // Check our syncer has the correct SNP entries for the two // System Network Protocols that this test manipulates. Neither // have been created yet. By("Checking cache does not have System Network Policy entries", func() { - Eventually(cb.GetSyncerValuePresentFunc(kvp1.Key)).Should(BeFalse()) - Eventually(cb.GetSyncerValuePresentFunc(kvp2.Key)).Should(BeFalse()) + Eventually(cb.GetSyncerValuePresentFunc(kvp1a.Key)).Should(BeFalse()) + Eventually(cb.GetSyncerValuePresentFunc(kvp2a.Key)).Should(BeFalse()) }) By("Creating a System Network Policy", func() { - _, err := c.snpClient.Create(kvp1) + _, err := c.snpClient.Create(kvp1a) Expect(err).NotTo(HaveOccurred()) }) By("Checking cache has correct System Network Policy entries", func() { - Eventually(cb.GetSyncerValueFunc(kvp1.Key)).Should(Equal(kvp1.Value)) - Eventually(cb.GetSyncerValuePresentFunc(kvp2.Key)).Should(BeFalse()) + Eventually(cb.GetSyncerValueFunc(kvp1a.Key)).Should(Equal(kvp1a.Value)) + Eventually(cb.GetSyncerValuePresentFunc(kvp2a.Key)).Should(BeFalse()) }) By("Attempting to recreate an existing System Network Policy", func() { - _, err := c.snpClient.Create(kvp1) + _, err := c.snpClient.Create(kvp1a) Expect(err).To(HaveOccurred()) }) By("Updating an existing System Network Policy", func() { - _, err := c.snpClient.Update(kvp1_2) + _, err := c.snpClient.Update(kvp1b) Expect(err).NotTo(HaveOccurred()) }) By("Checking cache has correct System Network Policy entries", func() { - Eventually(cb.GetSyncerValueFunc(kvp1.Key)).Should(Equal(kvp1_2.Value)) - Eventually(cb.GetSyncerValuePresentFunc(kvp2.Key)).Should(BeFalse()) + Eventually(cb.GetSyncerValueFunc(kvp1a.Key)).Should(Equal(kvp1b.Value)) + Eventually(cb.GetSyncerValuePresentFunc(kvp2a.Key)).Should(BeFalse()) }) By("Applying a non-existent System Network Policy", func() { - _, err := c.snpClient.Apply(kvp2) + _, err := c.snpClient.Apply(kvp2a) Expect(err).NotTo(HaveOccurred()) }) By("Checking cache has correct System Network Policy entries", func() { - Eventually(cb.GetSyncerValueFunc(kvp1.Key)).Should(Equal(kvp1_2.Value)) - Eventually(cb.GetSyncerValueFunc(kvp2.Key)).Should(Equal(kvp2.Value)) + Eventually(cb.GetSyncerValueFunc(kvp1a.Key)).Should(Equal(kvp1b.Value)) + Eventually(cb.GetSyncerValueFunc(kvp2a.Key)).Should(Equal(kvp2a.Value)) }) By("Updating the System Network Policy created by Apply", func() { - _, err := c.snpClient.Apply(kvp2_2) + _, err := c.snpClient.Apply(kvp2b) Expect(err).NotTo(HaveOccurred()) }) By("Checking cache has correct System Network Policy entries", func() { - Eventually(cb.GetSyncerValueFunc(kvp1.Key)).Should(Equal(kvp1_2.Value)) - Eventually(cb.GetSyncerValueFunc(kvp2.Key)).Should(Equal(kvp2_2.Value)) + Eventually(cb.GetSyncerValueFunc(kvp1a.Key)).Should(Equal(kvp1b.Value)) + Eventually(cb.GetSyncerValueFunc(kvp2a.Key)).Should(Equal(kvp2b.Value)) }) By("Deleted the System Network Policy created by Apply", func() { - err := c.snpClient.Delete(kvp2) + err := c.snpClient.Delete(kvp2a) Expect(err).NotTo(HaveOccurred()) }) By("Checking cache has correct System Network Policy entries", func() { - Eventually(cb.GetSyncerValueFunc(kvp1.Key)).Should(Equal(kvp1_2.Value)) - Eventually(cb.GetSyncerValuePresentFunc(kvp2.Key)).Should(BeFalse()) + Eventually(cb.GetSyncerValueFunc(kvp1a.Key)).Should(Equal(kvp1b.Value)) + Eventually(cb.GetSyncerValuePresentFunc(kvp2a.Key)).Should(BeFalse()) }) // Perform Get operations directly on the main client - this @@ -601,7 +601,7 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { kvp, err := c.Get(model.PolicyKey{Name: "snp.projectcalico.org/my-test-snp"}) Expect(err).ToNot(HaveOccurred()) Expect(kvp.Key.(model.PolicyKey).Name).To(Equal("snp.projectcalico.org/my-test-snp")) - Expect(kvp.Value.(*model.Policy)).To(Equal(kvp1_2.Value)) + Expect(kvp.Value.(*model.Policy)).To(Equal(kvp1b.Value)) }) By("Listing all policies (including a System Network Policy)", func() { @@ -611,22 +611,22 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { Expect(err).ToNot(HaveOccurred()) Expect(kvps).To(HaveLen(1)) Expect(kvps[len(kvps)-1].Key.(model.PolicyKey).Name).To(Equal("snp.projectcalico.org/my-test-snp")) - Expect(kvps[len(kvps)-1].Value.(*model.Policy)).To(Equal(kvp1_2.Value)) + Expect(kvps[len(kvps)-1].Value.(*model.Policy)).To(Equal(kvp1b.Value)) }) By("Deleting an existing System Network Policy", func() { - err := c.snpClient.Delete(kvp1) + err := c.snpClient.Delete(kvp1a) Expect(err).NotTo(HaveOccurred()) }) By("Checking cache has no System Network Policy entries", func() { - Eventually(cb.GetSyncerValuePresentFunc(kvp1.Key)).Should(BeFalse()) - Eventually(cb.GetSyncerValuePresentFunc(kvp2.Key)).Should(BeFalse()) + Eventually(cb.GetSyncerValuePresentFunc(kvp1a.Key)).Should(BeFalse()) + Eventually(cb.GetSyncerValuePresentFunc(kvp2a.Key)).Should(BeFalse()) }) }) It("should handle a CRUD of Global BGP Peer", func() { - kvp1 := &model.KVPair{ + kvp1a := &model.KVPair{ Key: model.GlobalBGPPeerKey{ PeerIP: cnet.MustParseIP("10.0.0.1"), }, @@ -635,7 +635,7 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { ASNum: numorstring.ASNumber(6512), }, } - kvp1_2 := &model.KVPair{ + kvp1b := &model.KVPair{ Key: model.GlobalBGPPeerKey{ PeerIP: cnet.MustParseIP("10.0.0.1"), }, @@ -644,7 +644,7 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { ASNum: numorstring.ASNumber(6513), }, } - kvp2 := &model.KVPair{ + kvp2a := &model.KVPair{ Key: model.GlobalBGPPeerKey{ PeerIP: cnet.MustParseIP("aa:bb::cc"), }, @@ -653,7 +653,7 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { ASNum: numorstring.ASNumber(6514), }, } - kvp2_2 := &model.KVPair{ + kvp2b := &model.KVPair{ Key: model.GlobalBGPPeerKey{ PeerIP: cnet.MustParseIP("aa:bb::cc"), }, @@ -665,32 +665,32 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { // Make sure we clean up after ourselves. We allow this to fail because // part of our explicit testing below is to delete the resource. defer func() { - c.Delete(kvp1) - c.Delete(kvp2) + c.Delete(kvp1a) + c.Delete(kvp2a) }() By("Creating a Global BGP Peer", func() { - _, err := c.Create(kvp1) + _, err := c.Create(kvp1a) Expect(err).NotTo(HaveOccurred()) }) By("Attempting to recreate an existing Global BGP Peer", func() { - _, err := c.Create(kvp1) + _, err := c.Create(kvp1a) Expect(err).To(HaveOccurred()) }) By("Updating an existing Global BGP Peer", func() { - _, err := c.Update(kvp1_2) + _, err := c.Update(kvp1b) Expect(err).NotTo(HaveOccurred()) }) By("Applying a non-existent Global BGP Peer", func() { - _, err := c.Apply(kvp2) + _, err := c.Apply(kvp2a) Expect(err).NotTo(HaveOccurred()) }) By("Updating the Global BGP Peer created by Apply", func() { - _, err := c.Apply(kvp2_2) + _, err := c.Apply(kvp2b) Expect(err).NotTo(HaveOccurred()) }) @@ -709,22 +709,22 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { kvps, err := c.List(model.GlobalBGPPeerListOptions{PeerIP: cnet.MustParseIP("10.0.0.1")}) Expect(err).ToNot(HaveOccurred()) Expect(kvps).To(HaveLen(1)) - Expect(kvps[0].Key).To(Equal(kvp1_2.Key)) - Expect(kvps[0].Value).To(Equal(kvp1_2.Value)) + Expect(kvps[0].Key).To(Equal(kvp1b.Key)) + Expect(kvps[0].Value).To(Equal(kvp1b.Value)) }) By("Listing all Global BGP Peers (should be 2)", func() { kvps, err := c.List(model.GlobalBGPPeerListOptions{}) Expect(err).ToNot(HaveOccurred()) Expect(kvps).To(HaveLen(2)) - Expect(kvps[0].Key).To(Equal(kvp1_2.Key)) - Expect(kvps[0].Value).To(Equal(kvp1_2.Value)) - Expect(kvps[1].Key).To(Equal(kvp2_2.Key)) - Expect(kvps[01].Value).To(Equal(kvp2_2.Value)) + Expect(kvps[0].Key).To(Equal(kvp1b.Key)) + Expect(kvps[0].Value).To(Equal(kvp1b.Value)) + Expect(kvps[1].Key).To(Equal(kvp2b.Key)) + Expect(kvps[01].Value).To(Equal(kvp2b.Value)) }) By("Deleting the Global BGP Peer created by Apply", func() { - err := c.Delete(kvp2) + err := c.Delete(kvp2a) Expect(err).NotTo(HaveOccurred()) }) @@ -732,16 +732,178 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { kvps, err := c.List(model.GlobalBGPPeerListOptions{}) Expect(err).ToNot(HaveOccurred()) Expect(kvps).To(HaveLen(1)) - Expect(kvps[0].Key).To(Equal(kvp1_2.Key)) - Expect(kvps[0].Value).To(Equal(kvp1_2.Value)) + Expect(kvps[0].Key).To(Equal(kvp1b.Key)) + Expect(kvps[0].Value).To(Equal(kvp1b.Value)) }) By("Deleting an existing Global BGP Peer", func() { - err := c.Delete(kvp1) + err := c.Delete(kvp1a) Expect(err).NotTo(HaveOccurred()) }) }) + It("should handle a CRUD of Node BGP Peer", func() { + var kvp1a, kvp1b, kvp2a, kvp2b *model.KVPair + var nodename string + + // Make sure we clean up after ourselves. We allow this to fail because + // part of our explicit testing below is to delete the resource. + defer func() { + log.Debug("Deleting Node BGP Peers") + if peers, err := c.List(model.NodeBGPPeerListOptions{}); err == nil { + log.WithField("Peers", peers).Debug("Deleting resources") + for _, peer := range peers { + log.WithField("Key", peer.Key).Debug("Deleting resource") + peer.Revision = nil + _ = c.Delete(peer) + } + } + }() + + By("Listing all Nodes to find a suitable Node name", func() { + nodes, err := c.List(model.NodeListOptions{}) + Expect(err).NotTo(HaveOccurred()) + // Get the hostname so we can make a Get call + kvp := *nodes[0] + nodename = kvp.Key.(model.NodeKey).Hostname + kvp1a = &model.KVPair{ + Key: model.NodeBGPPeerKey{ + PeerIP: cnet.MustParseIP("10.0.0.1"), + Nodename: nodename, + }, + Value: &model.BGPPeer{ + PeerIP: cnet.MustParseIP("10.0.0.1"), + ASNum: numorstring.ASNumber(6512), + }, + } + kvp1b = &model.KVPair{ + Key: model.NodeBGPPeerKey{ + PeerIP: cnet.MustParseIP("10.0.0.1"), + Nodename: nodename, + }, + Value: &model.BGPPeer{ + PeerIP: cnet.MustParseIP("10.0.0.1"), + ASNum: numorstring.ASNumber(6513), + }, + } + kvp2a = &model.KVPair{ + Key: model.NodeBGPPeerKey{ + PeerIP: cnet.MustParseIP("aa:bb::cc"), + Nodename: nodename, + }, + Value: &model.BGPPeer{ + PeerIP: cnet.MustParseIP("aa:bb::cc"), + ASNum: numorstring.ASNumber(6514), + }, + } + kvp2b = &model.KVPair{ + Key: model.NodeBGPPeerKey{ + PeerIP: cnet.MustParseIP("aa:bb::cc"), + Nodename: nodename, + }, + Value: &model.BGPPeer{ + PeerIP: cnet.MustParseIP("aa:bb::cc"), + }, + } + }) + + By("Creating a Node BGP Peer", func() { + _, err := c.Create(kvp1a) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Attempting to recreate an existing Node BGP Peer", func() { + _, err := c.Create(kvp1a) + Expect(err).To(HaveOccurred()) + }) + + By("Updating an existing Node BGP Peer", func() { + _, err := c.Update(kvp1b) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Applying a non-existent Node BGP Peer", func() { + _, err := c.Apply(kvp2a) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Updating the Node BGP Peer created by Apply", func() { + _, err := c.Apply(kvp2b) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Getting a missing Node BGP Peer (wrong IP)", func() { + _, err := c.Get(model.NodeBGPPeerKey{Nodename: nodename, PeerIP: cnet.MustParseIP("1.1.1.1")}) + Expect(err).To(HaveOccurred()) + }) + + By("Getting a missing Node BGP Peer (wrong nodename)", func() { + _, err := c.Get(model.NodeBGPPeerKey{Nodename: "foobarbaz", PeerIP: cnet.MustParseIP("10.0.0.1")}) + Expect(err).To(HaveOccurred()) + }) + + By("Listing a missing Node BGP Peer (wrong IP)", func() { + kvps, err := c.List(model.NodeBGPPeerListOptions{PeerIP: cnet.MustParseIP("aa:bb:cc:dd::ee")}) + Expect(err).ToNot(HaveOccurred()) + Expect(kvps).To(HaveLen(0)) + }) + + By("Listing a missing Node BGP Peer (wrong nodename)", func() { + kvps, err := c.List(model.NodeBGPPeerListOptions{Nodename: "foobarbaz"}) + Expect(err).ToNot(HaveOccurred()) + Expect(kvps).To(HaveLen(0)) + }) + + By("Listing an explicit Node BGP Peer (IP specific, Node is missing)", func() { + kvps, err := c.List(model.NodeBGPPeerListOptions{PeerIP: cnet.MustParseIP("10.0.0.1")}) + Expect(err).ToNot(HaveOccurred()) + Expect(kvps).To(HaveLen(1)) + Expect(kvps[0].Key).To(Equal(kvp1b.Key)) + Expect(kvps[0].Value).To(Equal(kvp1b.Value)) + }) + + By("Listing an explicit Node BGP Peer (IP and Node are specified)", func() { + kvps, err := c.List(model.NodeBGPPeerListOptions{Nodename: nodename, PeerIP: cnet.MustParseIP("10.0.0.1")}) + Expect(err).ToNot(HaveOccurred()) + Expect(kvps).To(HaveLen(1)) + Expect(kvps[0].Key).To(Equal(kvp1b.Key)) + Expect(kvps[0].Value).To(Equal(kvp1b.Value)) + }) + + By("Listing all Node BGP Peers (should be 2)", func() { + kvps, err := c.List(model.NodeBGPPeerListOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(kvps).To(HaveLen(2)) + Expect(kvps[0].Key).To(Equal(kvp1b.Key)) + Expect(kvps[0].Value).To(Equal(kvp1b.Value)) + Expect(kvps[1].Key).To(Equal(kvp2b.Key)) + Expect(kvps[1].Value).To(Equal(kvp2b.Value)) + }) + + By("Deleting the Node BGP Peer created by Apply", func() { + err := c.Delete(kvp2a) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Listing all Node BGP Peers (should now be 1)", func() { + kvps, err := c.List(model.NodeBGPPeerListOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(kvps).To(HaveLen(1)) + Expect(kvps[0].Key).To(Equal(kvp1b.Key)) + Expect(kvps[0].Value).To(Equal(kvp1b.Value)) + }) + + By("Deleting an existing Node BGP Peer", func() { + err := c.Delete(kvp1a) + Expect(err).NotTo(HaveOccurred()) + }) + + By("Deleting a non-existent Node BGP Peer", func() { + err := c.Delete(kvp1a) + Expect(err).To(HaveOccurred()) + }) + }) + It("should handle a basic Pod", func() { pod := k8sapi.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -864,7 +1026,7 @@ var _ = Describe("Test Syncer API for Kubernetes backend", func() { }() It("should not error on unsupported List() calls", func() { - objs, err := c.List(model.NodeBGPPeerListOptions{}) + objs, err := c.List(model.BlockAffinityListOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(len(objs)).To(Equal(0)) }) diff --git a/lib/backend/k8s/resources/customnoderesource.go b/lib/backend/k8s/resources/customnoderesource.go new file mode 100644 index 000000000..5b1799992 --- /dev/null +++ b/lib/backend/k8s/resources/customnoderesource.go @@ -0,0 +1,435 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources + +import ( + "sort" + "strings" + + "github.com/projectcalico/libcalico-go/lib/backend/model" + "github.com/projectcalico/libcalico-go/lib/errors" + + log "github.com/Sirupsen/logrus" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + apiv1 "k8s.io/client-go/pkg/api/v1" +) + +// Action strings - used for context logging. +type action string + +const ( + actApply action = "Apply" + actCreate action = "Create" + actUpdate action = "Update" +) + +// CustomK8sNodeResourceConverter defines an interface to map between the model and the +// annotation representation of a resource., +type CustomK8sNodeResourceConverter interface { + // ListInterfaceToNodeAndName converts the ListInterface to the node name + // and resource name. + ListInterfaceToNodeAndName(model.ListInterface) (string, string, error) + + // KeyToNodeAndName converts the Key to the node name and resource name. + KeyToNodeAndName(model.Key) (string, string, error) + + // NodeAndNameToKey converts the Node name and resource name to a Key. + NodeAndNameToKey(string, string) (model.Key, error) +} + +// CustomK8sNodeResourceClientConfig is the config required for initializing a new +// per-node K8sResourceClient +type CustomK8sNodeResourceClientConfig struct { + ClientSet *kubernetes.Clientset + ResourceType string + Converter CustomK8sNodeResourceConverter + Namespace string +} + +// NewCustomK8sNodeResourceClient creates a new per-node K8sResourceClient. +func NewCustomK8sNodeResourceClient(config CustomK8sNodeResourceClientConfig) K8sResourceClient { + return &retryWrapper{ + client: &customK8sNodeResourceClient{ + CustomK8sNodeResourceClientConfig: config, + annotationKeyPrefix: config.Namespace + "/", + }, + } +} + +// customK8sNodeResourceClient implements the K8sResourceClient interface. It +// should only be created using newCustomK8sNodeResourceClientConfig since that +// ensures it is wrapped with a retryWrapper. +type customK8sNodeResourceClient struct { + CustomK8sNodeResourceClientConfig + annotationKeyPrefix string +} + +func (c *customK8sNodeResourceClient) Create(kvp *model.KVPair) (*model.KVPair, error) { + logContext := log.WithFields(log.Fields{ + "Key": kvp.Key, + "Value": kvp.Value, + "Resource": c.ResourceType, + "Namespace": c.Namespace, + }) + logContext.Debug("Create per-Node resource") + + // Get the resource name and the latest Node settings associated with the Key. + resName, node, err := c.getNameAndNodeFromKey(kvp.Key) + if err != nil { + logContext.WithError(err).Error("Failed to create resource") + return nil, err + } + + // There should be no existing entry for a Create. + if _, ok := node.Annotations[c.nameToAnnotationKey(resName)]; ok { + logContext.Warning("Failed to create resource: resource already exists") + return nil, errors.ErrorResourceAlreadyExists{Identifier: kvp.Key} + } + + return c.applyResourceToAnnotation(node, resName, kvp, actCreate) +} + +func (c *customK8sNodeResourceClient) Update(kvp *model.KVPair) (*model.KVPair, error) { + logContext := log.WithFields(log.Fields{ + "Key": kvp.Key, + "Value": kvp.Value, + "Resource": c.ResourceType, + "Namespace": c.Namespace, + }) + logContext.Debug("Update per-Node resource") + + // Get the resource name and the latest Node resource associated with the Key. + resName, node, err := c.getNameAndNodeFromKey(kvp.Key) + if err != nil { + logContext.WithError(err).Error("Failed to create resource") + return nil, err + } + + // There should be an existing entry for an Update. + if _, ok := node.Annotations[c.nameToAnnotationKey(resName)]; !ok { + logContext.Warning("Failed to update resource: resource does not exist") + return nil, errors.ErrorResourceDoesNotExist{Identifier: kvp.Key} + } + + return c.applyResourceToAnnotation(node, resName, kvp, actUpdate) +} + +func (c *customK8sNodeResourceClient) Apply(kvp *model.KVPair) (*model.KVPair, error) { + logContext := log.WithFields(log.Fields{ + "Key": kvp.Key, + "Value": kvp.Value, + "Resource": c.ResourceType, + "Namespace": c.Namespace, + }) + logContext.Debug("Apply per-Node resource") + + // Get the names and the latest Node settings associated with the Key. + // We don't need to extract the current settings because we don't need to check + // for presence of the resource entry. + resName, node, err := c.getNameAndNodeFromKey(kvp.Key) + if err != nil { + logContext.WithError(err).Error("Failed to apply resource") + return nil, err + } + + return c.applyResourceToAnnotation(node, resName, kvp, actApply) +} + +func (c *customK8sNodeResourceClient) Delete(kvp *model.KVPair) error { + logContext := log.WithFields(log.Fields{ + "Key": kvp.Key, + "Resource": c.ResourceType, + "Namespace": c.Namespace, + }) + logContext.Debug("Delete per-Node resource") + + // Get the name and the latest Node settings associated with the Key, and + // convert the name to the annotation key. + resName, node, err := c.getNameAndNodeFromKey(kvp.Key) + if err != nil { + logContext.WithError(err).Error("Failed to delete resource") + return err + } + ak := c.nameToAnnotationKey(resName) + + // There should be no existing entry for a Create. + if _, ok := node.Annotations[ak]; !ok { + logContext.Warning("Failed to delete resource: resource does not exist") + return errors.ErrorResourceDoesNotExist{Identifier: kvp.Key} + } + + // Delete the entry from the annotations and update the Node resource. + logContext.Debug("Removing value from annotation") + delete(node.Annotations, ak) + node, err = c.ClientSet.Nodes().Update(node) + if err != nil { + // Failed to update the Node. Just log info and perform a retry. The retryWrapper will + // log Error if this continues to fail. + logContext.WithError(err).Warning("Error updating Kubernetes Node") + err = K8sErrorToCalico(err, kvp.Key) + + // If this is an update conflict, indicate to the retryWrapper + // that we can retry the action. + if _, ok := err.(errors.ErrorResourceUpdateConflict); ok { + err = retryError{err: err} + } + return err + } + + return nil +} + +func (c *customK8sNodeResourceClient) Get(key model.Key) (*model.KVPair, error) { + logContext := log.WithFields(log.Fields{ + "Key": key, + "Resource": c.ResourceType, + "Namespace": c.Namespace, + }) + logContext.Debug("Get per-Node resource") + + // Get the names and the latest Node settings associated with the Key. + name, node, err := c.getNameAndNodeFromKey(key) + if err != nil { + logContext.WithError(err).Info("Failed to get resource") + return nil, err + } + + // Extract the resource from the annotations. It should exist. + kvps, err := c.extractResourcesFromAnnotation(node, name) + if err != nil { + logContext.WithError(err).Error("Failed to get resource: error in data") + return nil, err + } + if len(kvps) != 1 { + logContext.Warning("Failed to get resource: resource does not exist") + return nil, errors.ErrorResourceDoesNotExist{Identifier: key} + } + + return kvps[0], nil +} + +func (c *customK8sNodeResourceClient) List(list model.ListInterface) ([]*model.KVPair, string, error) { + logContext := log.WithFields(log.Fields{ + "ListInterface": list, + "Resource": c.ResourceType, + "Namespace": c.Namespace, + }) + logContext.Debug("List per-Node Resources") + kvps := []*model.KVPair{} + + // Extract the Node and Name from the ListInterface. + nodeName, resName, err := c.Converter.ListInterfaceToNodeAndName(list) + if err != nil { + logContext.WithError(err).Info("Failed to list resources: error in list interface conversion") + return nil, "", err + } + + // Get a list of the required nodes - which will either be all of them + // or a specific node. + var nodes []apiv1.Node + var rev string + if nodeName != "" { + newLogContext := logContext.WithField("NodeName", nodeName) + node, err := c.ClientSet.Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + err = K8sErrorToCalico(err, nodeName) + if _, ok := err.(errors.ErrorResourceDoesNotExist); !ok { + newLogContext.WithError(err).Error("Failed to list resources: unable to query node") + return nil, "", err + } + newLogContext.WithError(err).Warning("Return no results for resource list: node does not exist") + return kvps, "", nil + } + nodes = append(nodes, *node) + rev = node.GetResourceVersion() + } else { + nodeList, err := c.ClientSet.Nodes().List(metav1.ListOptions{}) + if err != nil { + logContext.WithError(err).Info("Failed to list resources: unable to list Nodes") + return nil, "", K8sErrorToCalico(err, nodeName) + } + nodes = nodeList.Items + rev = nodeList.GetResourceVersion() + } + + // Loop through each of the nodes and extract the required data. + for _, node := range nodes { + nodeKVPs, err := c.extractResourcesFromAnnotation(&node, resName) + if err != nil { + logContext.WithField("NodeName", node.GetName()).WithError(err).Error("Error listing resources: error in data") + } + kvps = append(kvps, nodeKVPs...) + } + + return kvps, rev, nil +} + +func (c *customK8sNodeResourceClient) EnsureInitialized() error { + return nil +} + +// getNameAndNodeFromKey extracts the resource name from the key +// and gets the Node resource from the Kubernetes API. +// Returns: the resource name, the Node resource. +func (c *customK8sNodeResourceClient) getNameAndNodeFromKey(key model.Key) (string, *apiv1.Node, error) { + logContext := log.WithFields(log.Fields{ + "Key": key, + "Resource": c.ResourceType, + "Namespace": c.Namespace, + }) + logContext.Debug("Extract node and resource info from Key") + + // Get the node and resource name. + nodeName, resName, err := c.Converter.KeyToNodeAndName(key) + if err != nil { + logContext.WithError(err).Info("Error converting Key to Node and Resource name.") + return "", nil, err + } + + // Get the current node settings. + node, err := c.ClientSet.Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + logContext.WithError(err).Info("Error getting Node configuration") + return "", nil, K8sErrorToCalico(err, key) + } + + return resName, node, nil +} + +// nameToAnnotationKey converts the resource name to the annotations key. +func (c *customK8sNodeResourceClient) nameToAnnotationKey(name string) string { + return c.annotationKeyPrefix + name +} + +// annotationKeyToName converts the annotations key to a resource name, or returns +// and empty string if the annotation key does not represent a resource. +func (c *customK8sNodeResourceClient) annotationKeyToName(key string) string { + if strings.HasPrefix(key, c.annotationKeyPrefix) { + return key[len(c.annotationKeyPrefix):] + } + return "" +} + +// applyResourceToAnnotation applies the per-Node resource to the Node annotation. +func (c *customK8sNodeResourceClient) applyResourceToAnnotation(node *apiv1.Node, resName string, kvp *model.KVPair, action action) (*model.KVPair, error) { + logContext := log.WithFields(log.Fields{ + "Value": kvp.Value, + "Resource": c.ResourceType, + "Action": action, + "Namespace": c.Namespace, + }) + + logContext.Debug("Updating value in annotation") + data, err := model.SerializeValue(kvp) + if err != nil { + logContext.Error("Unable to convert value for annotation") + return nil, err + } + node.Annotations[c.nameToAnnotationKey(resName)] = string(data) + + // Update the Node resource. + node, err = c.ClientSet.Nodes().Update(node) + if err != nil { + // Failed to update the Node. Just log info and perform a retry. The retryWrapper will + // log Error if this continues to fail. + logContext.WithError(err).Warning("Error updating Kubernetes Node") + err = K8sErrorToCalico(err, kvp.Key) + + // If this is an update conflict, indicate to the retryWrapper + // that we can retry the action. + if _, ok := err.(errors.ErrorResourceUpdateConflict); ok { + err = retryError{err: err} + } + return nil, err + } + + // Return the Key and Value with updated Revision information. + return &model.KVPair{ + Key: kvp.Key, + Value: kvp.Value, + Revision: node.GetObjectMeta().GetResourceVersion(), + }, nil +} + +// extractResourcesFromAnnotation queries the current Kubernetes Node resource +// and parses the per-node resource entries configured in the annotations. +// Returns the Node resource configuration and the slice of parsed resources. +func (c *customK8sNodeResourceClient) extractResourcesFromAnnotation(node *apiv1.Node, name string) ([]*model.KVPair, error) { + logContext := log.WithFields(log.Fields{ + "ResourceType": name, + "Resource": c.ResourceType, + "Namespace": c.Namespace, + }) + logContext.Debug("Extract node and resource info from Key") + + // Extract the resource entries from the annotation. We do this either by + // extracting the requested entry if it exists, or by iterating through each + // annotation and checking if it corresponds to a resource. + resStrings := make(map[string]string, 0) + resNames := []string{} + if name != "" { + ak := c.nameToAnnotationKey(name) + if v, ok := node.Annotations[ak]; ok { + resStrings[name] = v + resNames = append(resNames, name) + } + } else { + for ak, v := range node.Annotations { + if n := c.annotationKeyToName(ak); n != "" { + resStrings[n] = v + resNames = append(resNames, n) + } + } + } + + // Sort the resource names to ensure the KVPairs are ordered. + sort.Strings(resNames) + + // Process each entry in name order and add to the return set of KVPairs. + // Use the node revision as the revision of each entry. If we hit an error + // unmarshalling then return the error if we are querying an exact entry, but + // swallow the error if we are listing multiple (otherwise a single bad entry + // would prevent all resources being listed). + kvps := []*model.KVPair{} + for _, resName := range resNames { + key, err := c.Converter.NodeAndNameToKey(node.GetName(), resName) + if err != nil { + logContext.WithField("ResourceType", resName).WithError(err).Error("Error unmarshalling key") + if name != "" { + return nil, err + } + continue + } + + value, err := model.ParseValue(key, []byte(resStrings[resName])) + if err != nil { + logContext.WithField("ResourceType", resName).WithError(err).Error("Error unmarshalling value") + if name != "" { + return nil, err + } + continue + } + kvp := &model.KVPair{ + Key: key, + Value: value, + Revision: node.GetResourceVersion(), + } + kvps = append(kvps, kvp) + } + + return kvps, nil +} diff --git a/lib/backend/k8s/resources/customresource.go b/lib/backend/k8s/resources/customresource.go index b8695d26d..aef3db5ca 100644 --- a/lib/backend/k8s/resources/customresource.go +++ b/lib/backend/k8s/resources/customresource.go @@ -142,7 +142,6 @@ func (c *customK8sResourceClient) Update(kvp *model.KVPair) (*model.KVPair, erro // Update the revision information from the response. kvp.Revision = resOut.GetObjectMeta().GetResourceVersion() - return kvp, nil } diff --git a/lib/backend/k8s/resources/errors.go b/lib/backend/k8s/resources/errors.go index 3bb3acf02..8792eafaa 100644 --- a/lib/backend/k8s/resources/errors.go +++ b/lib/backend/k8s/resources/errors.go @@ -44,6 +44,12 @@ func K8sErrorToCalico(ke error, id interface{}) error { Err: ke, } } + if kerrors.IsConflict(ke) { + return errors.ErrorResourceUpdateConflict{ + Err: ke, + Identifier: id, + } + } return errors.ErrorDatastoreError{ Err: ke, Identifier: id, diff --git a/lib/backend/k8s/resources/globalbgppeer.go b/lib/backend/k8s/resources/globalbgppeer.go index 02f55d2b1..090e2aef3 100644 --- a/lib/backend/k8s/resources/globalbgppeer.go +++ b/lib/backend/k8s/resources/globalbgppeer.go @@ -46,7 +46,7 @@ func NewGlobalBGPPeerClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sReso } } -// GlobalBGPPeerConverter implements the K8sResourceConverter interface. +// GlobalBGPPeerConverter implements the CustomK8sResourceConverter interface. type GlobalBGPPeerConverter struct { // Since the Spec is identical to the Calico API Spec, we use the // API converter to convert to and from the model representation. diff --git a/lib/backend/k8s/resources/globalbgppeer_test.go b/lib/backend/k8s/resources/globalbgppeer_test.go index 8a736beb0..321e86d33 100644 --- a/lib/backend/k8s/resources/globalbgppeer_test.go +++ b/lib/backend/k8s/resources/globalbgppeer_test.go @@ -103,7 +103,7 @@ var _ = Describe("Global BGP conversion methods", func() { It("should fail to convert an invalid resource name to the equivalent Key", func() { k, err := converter.NameToKey(nameInvalid) Expect(err).To(HaveOccurred()) - Expect(k).To(Equal(nil)) + Expect(k).To(BeNil()) }) It("should convert between a KVPair and the equivalent Kubernetes resource", func() { diff --git a/lib/backend/k8s/resources/globalconfig_test.go b/lib/backend/k8s/resources/globalconfig_test.go index 0332c1e2b..1ed528168 100644 --- a/lib/backend/k8s/resources/globalconfig_test.go +++ b/lib/backend/k8s/resources/globalconfig_test.go @@ -45,7 +45,7 @@ var _ = Describe("Global Felix config conversion methods", func() { value1 := "test" kvp1 := &model.KVPair{ Key: key1, - Value: &value1, + Value: value1, Revision: "rv", } res1 := &thirdparty.GlobalConfig{ @@ -92,7 +92,7 @@ var _ = Describe("Global Felix config conversion methods", func() { Expect(err).NotTo(HaveOccurred()) Expect(kvp.Key).To(Equal(kvp1.Key)) Expect(kvp.Revision).To(Equal(kvp1.Revision)) - Expect(kvp.Value).To(BeAssignableToTypeOf(&value1)) + Expect(kvp.Value).To(BeAssignableToTypeOf(value1)) Expect(kvp.Value).To(Equal(kvp1.Value)) }) }) diff --git a/lib/backend/k8s/resources/ippool.go b/lib/backend/k8s/resources/ippool.go index 27fdeb096..0c4759dd3 100644 --- a/lib/backend/k8s/resources/ippool.go +++ b/lib/backend/k8s/resources/ippool.go @@ -71,7 +71,13 @@ func (_ IPPoolConverter) NameToKey(name string) (model.Key, error) { func (_ IPPoolConverter) ToKVPair(r CustomK8sResource) (*model.KVPair, error) { t := r.(*thirdparty.IpPool) v := model.IPPool{} - err := json.Unmarshal([]byte(t.Spec.Value), &v) + + _, err := ResourceNameToIPNet(t.Metadata.Name) + if err != nil { + return nil, err + } + + err = json.Unmarshal([]byte(t.Spec.Value), &v) if err != nil { return nil, err } diff --git a/lib/backend/k8s/resources/ippool_test.go b/lib/backend/k8s/resources/ippool_test.go index c877bd9da..57ef309c4 100644 --- a/lib/backend/k8s/resources/ippool_test.go +++ b/lib/backend/k8s/resources/ippool_test.go @@ -66,7 +66,7 @@ var _ = Describe("IP Pool conversion methods", func() { ResourceVersion: "rv", }, Spec: thirdparty.IpPoolSpec{ - Value: "{}", + Value: "{\"cidr\":\"11:22::/120\",\"ipip\":\"tunl0\",\"ipip_mode\":\"cross-subnet\",\"masquerade\":true,\"ipam\":false,\"disabled\":false}", }, } @@ -115,7 +115,7 @@ var _ = Describe("IP Pool conversion methods", func() { It("should fail to convert an invalid resource name to the equivalent Key", func() { k, err := converter.NameToKey(nameInvalid) Expect(err).To(HaveOccurred()) - Expect(k).To(Equal(nil)) + Expect(k).To(BeNil()) }) It("should convert between a KVPair and the equivalent Kubernetes resource", func() { @@ -124,7 +124,6 @@ var _ = Describe("IP Pool conversion methods", func() { Expect(r.GetObjectMeta().GetName()).To(Equal(res1.Metadata.Name)) Expect(r.GetObjectMeta().GetResourceVersion()).To(Equal(res1.Metadata.ResourceVersion)) Expect(r).To(BeAssignableToTypeOf(&thirdparty.IpPool{})) - Expect(r.(*thirdparty.IpPool).Spec).To(Equal(res1.Spec)) }) It("should convert between a Kuberenetes resource and the equivalent KVPair", func() { diff --git a/lib/backend/k8s/resources/names_test.go b/lib/backend/k8s/resources/names_test.go index 620fc99e2..d58535946 100644 --- a/lib/backend/k8s/resources/names_test.go +++ b/lib/backend/k8s/resources/names_test.go @@ -24,46 +24,68 @@ import ( var _ = Describe("Name conversion methods", func() { It("should convert an IPv4 address to a resource compatible name", func() { - Expect(resources.IPToResourceName(net.MustParseIP("11.223.3.41"))).To(Equal("11-223-3-441")) + Expect(resources.IPToResourceName(net.MustParseIP("11.223.3.41"))).To(Equal("11-223-3-41")) }) It("should convert an IPv6 address to a resource compatible name", func() { Expect(resources.IPToResourceName(net.MustParseIP("AA:1234::BBee:CC"))).To(Equal("aa-1234--bbee-cc")) }) It("should convert an IPv4 Network to a resource compatible name", func() { - Expect(resources.IPNetToResourceName(net.MustParseNetwork("11.223.3.41/43"))).To(Equal("11-223-3-41-43")) + Expect(resources.IPNetToResourceName(net.MustParseNetwork("11.223.3.0/24"))).To(Equal("11-223-3-0-24")) }) It("should convert an IPv4 Network to a resource compatible name", func() { - Expect(resources.IPNetToResourceName(net.MustParseNetwork("11.223.3.41"))).To(Equal("11-223-3-41-32")) + Expect(resources.IPNetToResourceName(net.MustParseNetwork("11.223.3.41/32"))).To(Equal("11-223-3-41-32")) }) It("should convert an IPv6 Network to a resource compatible name", func() { - Expect(resources.IPNetToResourceName(net.MustParseNetwork("AA:1234::BBee:CC/2"))).To(Equal("aa-1234--bbee-cc-2")) + Expect(resources.IPNetToResourceName(net.MustParseNetwork("AA:1234::BBee:CC00/120"))).To(Equal("aa-1234--bbee-cc00-120")) }) It("should convert an IPv6 Network to a resource compatible name", func() { Expect(resources.IPNetToResourceName(net.MustParseNetwork("AA:1234:BBee::/120"))).To(Equal("aa-1234-bbee---120")) }) It("should convert an IPv6 Network to a resource compatible name", func() { - Expect(resources.IPNetToResourceName(net.MustParseNetwork("AA:1234:BBee::0000"))).To(Equal("aa-1234-bbee--0000-128")) + Expect(resources.IPNetToResourceName(net.MustParseNetwork("aa:1234:bbee::/128"))).To(Equal("aa-1234-bbee---128")) }) It("should convert a resource name to the equivalent IPv4 address", func() { - Expect(resources.ResourceNameToIP("11-223-3-441")).To(Equal(net.MustParseIP("11.223.3.41"))) + i, err := resources.ResourceNameToIP("11-223-3-41") + Expect(err).NotTo(HaveOccurred()) + Expect(*i).To(Equal(net.MustParseIP("11.223.3.41"))) }) It("should convert a resource name to the equivalent IPv6 address", func() { - Expect(resources.ResourceNameToIP("aa-1234--bbee-cc")).To(Equal(net.MustParseIP("AA:1234::BBee:CC"))) + i, err := resources.ResourceNameToIP("aa-1234--bbee-cc") + Expect(err).NotTo(HaveOccurred()) + Expect(*i).To(Equal(net.MustParseIP("AA:1234::BBee:CC"))) + }) + It("should not convert an invalid resource name to an IP address", func() { + _, err := resources.ResourceNameToIP("11-223-3-4a") + Expect(err).To(HaveOccurred()) }) It("should convert a resource name to the equivalent IPv4 Network", func() { - Expect(resources.ResourceNameToIPNet("11-223-3-41-43")).To(Equal(net.MustParseNetwork("11.223.3.41/43"))) + n, err := resources.ResourceNameToIPNet("11-223-3-128-25") + Expect(err).NotTo(HaveOccurred()) + Expect(*n).To(Equal(net.MustParseNetwork("11.223.3.128/25"))) }) It("should convert a resource name to the equivalent IPv4 Network", func() { - Expect(resources.ResourceNameToIPNet("11-223-3-41-32")).To(Equal(net.MustParseNetwork("11.223.3.41"))) + n, err := resources.ResourceNameToIPNet("11-223-3-41-32") + Expect(err).NotTo(HaveOccurred()) + Expect(*n).To(Equal(net.MustParseNetwork("11.223.3.41/32"))) }) It("should convert a resource name to the equivalent IPv6 Network", func() { - Expect(resources.ResourceNameToIPNet("aa-1234--bbee-cc-2")).To(Equal(net.MustParseNetwork("AA:1234::BBee:CC/2"))) + n, err := resources.ResourceNameToIPNet("aa-1234--bbee-cc-2") + Expect(err).NotTo(HaveOccurred()) + Expect(*n).To(Equal(net.MustParseNetwork("AA:1234::BBee:CC/2"))) }) It("should convert a resource name to the equivalent IPv6 Network", func() { - Expect(resources.ResourceNameToIPNet("aa-1234-bbee---120")).To(Equal(net.MustParseNetwork("AA:1234:BBee::/120"))) + n, err := resources.ResourceNameToIPNet("aa-1234-bbee---120") + Expect(err).NotTo(HaveOccurred()) + Expect(*n).To(Equal(net.MustParseNetwork("AA:1234:BBee::/120"))) }) It("should convert a resource name to the equivalent IPv6 Network", func() { - Expect(resources.ResourceNameToIPNet("aa-1234-bbee--0000-128")).To(Equal(net.MustParseNetwork("AA:1234:BBee::0000"))) + n, err := resources.ResourceNameToIPNet("aa-1234-bbee---128") + Expect(err).NotTo(HaveOccurred()) + Expect(*n).To(Equal(net.MustParseNetwork("AA:1234:BBee::/128"))) + }) + It("should not convert an invalid resource name to an IP network", func() { + _, err := resources.ResourceNameToIPNet("11--223--3-41") + Expect(err).To(HaveOccurred()) }) }) diff --git a/lib/backend/k8s/resources/node.go b/lib/backend/k8s/resources/node.go index 61c27dc86..da5be96da 100644 --- a/lib/backend/k8s/resources/node.go +++ b/lib/backend/k8s/resources/node.go @@ -26,8 +26,10 @@ import ( ) func NewNodeClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient { - return &nodeClient{ - clientSet: c, + return &retryWrapper{ + client: &nodeClient{ + clientSet: c, + }, } } @@ -58,7 +60,15 @@ func (c *nodeClient) Update(kvp *model.KVPair) (*model.KVPair, error) { newNode, err := c.clientSet.Nodes().Update(node) if err != nil { - return nil, K8sErrorToCalico(err, kvp.Key) + log.WithError(err).Info("Error updating Node resource") + err = K8sErrorToCalico(err, kvp.Key) + + // If this is an update conflict and we didn't specify a revision in the + // request, indicate to the retryWrapper that we can retry the action. + if _, ok := err.(errors.ErrorResourceUpdateConflict); ok && kvp.Revision == nil { + err = retryError{err: err} + } + return nil, err } newCalicoNode, err := K8sNodeToCalico(newNode) diff --git a/lib/backend/k8s/resources/nodebgppeer.go b/lib/backend/k8s/resources/nodebgppeer.go new file mode 100644 index 000000000..d31d124bc --- /dev/null +++ b/lib/backend/k8s/resources/nodebgppeer.go @@ -0,0 +1,63 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources + +import ( + "github.com/projectcalico/libcalico-go/lib/backend/model" + + "k8s.io/client-go/kubernetes" +) + +const ( + perNodeBgpPeerAnnotationNamespace = "bgppeer.projectcalico.org" +) + +func NewNodeBGPPeerClient(c *kubernetes.Clientset) K8sResourceClient { + return NewCustomK8sNodeResourceClient(CustomK8sNodeResourceClientConfig{ + ClientSet: c, + ResourceType: "NodeBGPPeer", + Converter: NodeBGPPeerConverter{}, + Namespace: perNodeBgpPeerAnnotationNamespace, + }) +} + +// NodeBGPPeerConverter implements the CustomK8sNodeResourceConverter interface. +type NodeBGPPeerConverter struct{} + +func (_ NodeBGPPeerConverter) ListInterfaceToNodeAndName(l model.ListInterface) (string, string, error) { + pl := l.(model.NodeBGPPeerListOptions) + if pl.PeerIP.IP == nil { + return pl.Nodename, "", nil + } else { + return pl.Nodename, IPToResourceName(pl.PeerIP), nil + } +} + +func (_ NodeBGPPeerConverter) KeyToNodeAndName(k model.Key) (string, string, error) { + pk := k.(model.NodeBGPPeerKey) + return pk.Nodename, IPToResourceName(pk.PeerIP), nil +} + +func (_ NodeBGPPeerConverter) NodeAndNameToKey(node, name string) (model.Key, error) { + ip, err := ResourceNameToIP(name) + if err != nil { + return nil, err + } + + return model.NodeBGPPeerKey{ + Nodename: node, + PeerIP: *ip, + }, nil +} diff --git a/lib/backend/k8s/resources/nodebgppeer_test.go b/lib/backend/k8s/resources/nodebgppeer_test.go new file mode 100644 index 000000000..eedb7ef75 --- /dev/null +++ b/lib/backend/k8s/resources/nodebgppeer_test.go @@ -0,0 +1,131 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources_test + +import ( + "github.com/projectcalico/libcalico-go/lib/backend/k8s/resources" + "github.com/projectcalico/libcalico-go/lib/backend/model" + "github.com/projectcalico/libcalico-go/lib/net" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Node BGP conversion methods", func() { + + converter := resources.NodeBGPPeerConverter{} + + It("should convert an empty ListInterface", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPPeerListOptions{}, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("")) + Expect(name).To(Equal("")) + }) + + It("should convert a List interface with a Node name only", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPPeerListOptions{ + Nodename: "node", + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("node")) + Expect(name).To(Equal("")) + }) + + It("should convert a List interface with a PeerIP only", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPPeerListOptions{ + PeerIP: net.MustParseIP("1.2.3.4"), + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("")) + Expect(name).To(Equal("1-2-3-4")) + }) + + It("should convert a List interface with node and PeerIP (IPv4)", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPPeerListOptions{ + Nodename: "nodeX", + PeerIP: net.MustParseIP("1.2.3.40"), + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("nodeX")) + Expect(name).To(Equal("1-2-3-40")) + }) + + It("should convert a List interface with node and PeerIP (IPv6)", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPPeerListOptions{ + Nodename: "nodeX", + PeerIP: net.MustParseIP("1::2:3:4"), + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("nodeX")) + Expect(name).To(Equal("1--2-3-4")) + }) + + It("should convert a Key with node and PeerIP (IPv4)", func() { + node, name, err := converter.KeyToNodeAndName( + model.NodeBGPPeerKey{ + Nodename: "nodeY", + PeerIP: net.MustParseIP("1.2.3.50"), + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("nodeY")) + Expect(name).To(Equal("1-2-3-50")) + }) + + It("should convert a Key with node and PeerIP (IPv6)", func() { + node, name, err := converter.KeyToNodeAndName( + model.NodeBGPPeerKey{ + Nodename: "nodeY", + PeerIP: net.MustParseIP("aa:ff::12"), + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("nodeY")) + Expect(name).To(Equal("aa-ff--12")) + }) + + It("should convert a valid node name and resource name to a Key (IPv4)", func() { + key, err := converter.NodeAndNameToKey("nodeA", "1-2-3-4") + Expect(err).To(BeNil()) + Expect(key).To(Equal(model.NodeBGPPeerKey{ + Nodename: "nodeA", + PeerIP: net.MustParseIP("1.2.3.4"), + })) + }) + + It("should convert a valid node name and resource name to a Key (IPv6)", func() { + key, err := converter.NodeAndNameToKey("nodeB", "abcd-2000--30-40") + Expect(err).To(BeNil()) + Expect(key).To(Equal(model.NodeBGPPeerKey{ + Nodename: "nodeB", + PeerIP: net.MustParseIP("abcd:2000::30:40"), + })) + }) + + It("should fail to convert a valid node name and invalid resource name to a Key", func() { + _, err := converter.NodeAndNameToKey("nodeB", "foobarbaz") + Expect(err).ToNot(BeNil()) + }) +}) diff --git a/lib/backend/k8s/resources/resources_suite_test.go b/lib/backend/k8s/resources/resources_suite_test.go new file mode 100644 index 000000000..9af922005 --- /dev/null +++ b/lib/backend/k8s/resources/resources_suite_test.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestModel(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "K8s resources Suite") +} diff --git a/lib/backend/k8s/resources/retrywrapper.go b/lib/backend/k8s/resources/retrywrapper.go new file mode 100644 index 000000000..c3b8dcf8d --- /dev/null +++ b/lib/backend/k8s/resources/retrywrapper.go @@ -0,0 +1,163 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources + +import ( + "github.com/projectcalico/libcalico-go/lib/backend/model" + + log "github.com/Sirupsen/logrus" +) + +const ( + maxActionRetries = 5 +) + +// retryWrapper implements the K8sResourceClient interface and is used to wrap +// another K8sResourceClient to provide retry functionality when the failure +// case is of type retryError. +type retryWrapper struct { + client K8sResourceClient +} + +// retryError is an error type used to indicate to the retryWrapper to retry +// a specific action. +// +// If the action is retried the max number of times, then the retryWrapper will +// return the underlying error. +type retryError struct { + err error +} + +func (r retryError) Error() string { + return r.err.Error() +} + +func (r *retryWrapper) Create(object *model.KVPair) (*model.KVPair, error) { + var kvp *model.KVPair + var err error + for i := 0; i < maxActionRetries; i++ { + if kvp, err = r.client.Create(object); err == nil { + // No error, exit returning the KVPair. + return kvp, nil + } else { + return nil, err + } + } + + // Excessive retries. Return the last error. + log.WithField("Key", object.Key).Warning("Failed to create object: too many retries") + return nil, err.(retryError).err +} + +func (r *retryWrapper) Update(object *model.KVPair) (*model.KVPair, error) { + var kvp *model.KVPair + var err error + for i := 0; i < maxActionRetries; i++ { + if kvp, err = r.client.Update(object); err == nil { + // No error, exit returning the KVPair. + return kvp, nil + } else if _, ok := err.(retryError); !ok { + return nil, err + } + } + + // Excessive retries. Return the last error. + log.WithField("Key", object.Key).Error("Failed to update object: too many retries") + return nil, err.(retryError).err +} + +func (r *retryWrapper) Apply(object *model.KVPair) (*model.KVPair, error) { + var kvp *model.KVPair + var err error + for i := 0; i < maxActionRetries; i++ { + if kvp, err = r.client.Apply(object); err == nil { + // No error, exit returning the KVPair. + return kvp, nil + } else if _, ok := err.(retryError); !ok { + return nil, err + } + } + + // Excessive retries. Return the last error. + log.WithField("Key", object.Key).Error("Failed to apply object: too many retries") + return nil, err.(retryError).err +} + +func (r *retryWrapper) Delete(object *model.KVPair) error { + var err error + for i := 0; i < maxActionRetries; i++ { + if err = r.client.Delete(object); err == nil { + // No error, exit returning the KVPair. + return nil + } else if _, ok := err.(retryError); !ok { + return err + } + } + + // Excessive retries. Return the last error. + log.WithField("Key", object.Key).Error("Failed to delete object: too many retries") + return err.(retryError).err +} + +func (r *retryWrapper) Get(key model.Key) (*model.KVPair, error) { + var kvp *model.KVPair + var err error + for i := 0; i < maxActionRetries; i++ { + if kvp, err = r.client.Get(key); err == nil { + // No error, exit returning the KVPair. + return kvp, nil + } else if _, ok := err.(retryError); !ok { + return nil, err + } + } + + // Excessive retries. Return the last error. + log.WithField("Key", key).Error("Failed to get object: too many retries") + return nil, err.(retryError).err +} + +func (r *retryWrapper) List(list model.ListInterface) ([]*model.KVPair, string, error) { + var rev string + var kvps []*model.KVPair + var err error + for i := 0; i < maxActionRetries; i++ { + if kvps, rev, err = r.client.List(list); err == nil { + // No error, exit returning the KVPair. + return kvps, rev, nil + } else if _, ok := err.(retryError); !ok { + return nil, "", err + } + } + + // Excessive retries. Return the last error. + log.WithField("List", list).Error("Failed to list object: too many retries") + return nil, "", err.(retryError).err +} + +func (r *retryWrapper) EnsureInitialized() error { + var err error + for i := 0; i < maxActionRetries; i++ { + if err = r.client.EnsureInitialized(); err == nil { + // No error, exit returning the KVPair. + return nil + } else if _, ok := err.(retryError); !ok { + return err + } + } + + // Excessive retries. Return the last error. + log.Error("Failed to initialize: too many retries") + return err.(retryError).err +} diff --git a/lib/backend/model/bgppeer.go b/lib/backend/model/bgppeer.go index 0774fec46..9aa835679 100644 --- a/lib/backend/model/bgppeer.go +++ b/lib/backend/model/bgppeer.go @@ -66,30 +66,30 @@ func (key NodeBGPPeerKey) String() string { } type NodeBGPPeerListOptions struct { - Hostname string + Nodename string PeerIP net.IP } func (options NodeBGPPeerListOptions) defaultPathRoot() string { - if options.Hostname == "" { + if options.Nodename == "" { return "/calico/bgp/v1/host" } else if options.PeerIP.IP == nil { return fmt.Sprintf("/calico/bgp/v1/host/%s", - options.Hostname) + options.Nodename) } else { return fmt.Sprintf("/calico/bgp/v1/host/%s/peer_v%d/%s", - options.Hostname, options.PeerIP.Version(), options.PeerIP) + options.Nodename, options.PeerIP.Version(), options.PeerIP) } } func (options NodeBGPPeerListOptions) KeyFromDefaultPath(path string) Key { log.Debugf("Get BGPPeer key from %s", path) - hostname := "" + nodename := "" peerIP := net.IP{} ekeyb := []byte(path) if r := matchHostBGPPeer.FindAllSubmatch(ekeyb, -1); len(r) == 1 { - hostname = string(r[0][1]) + nodename = string(r[0][1]) if err := peerIP.UnmarshalText(r[0][2]); err != nil { log.WithError(err).WithField("PeerIP", r[0][2]).Error("Error unmarshalling GlobalBGPPeer IP address") return nil @@ -103,11 +103,11 @@ func (options NodeBGPPeerListOptions) KeyFromDefaultPath(path string) Key { log.Debugf("Didn't match peerIP %s != %s", options.PeerIP.String(), peerIP.String()) return nil } - if options.Hostname != "" && hostname != options.Hostname { - log.Debugf("Didn't match hostname %s != %s", options.Hostname, hostname) + if options.Nodename != "" && nodename != options.Nodename { + log.Debugf("Didn't match hostname %s != %s", options.Nodename, nodename) return nil } - return NodeBGPPeerKey{PeerIP: peerIP, Nodename: hostname} + return NodeBGPPeerKey{PeerIP: peerIP, Nodename: nodename} } type GlobalBGPPeerKey struct { diff --git a/lib/client/bgppeer.go b/lib/client/bgppeer.go index 1e8807687..5e3c96e2e 100644 --- a/lib/client/bgppeer.go +++ b/lib/client/bgppeer.go @@ -111,7 +111,7 @@ func (h *bgpPeers) convertMetadataToListInterface(m unversioned.ResourceMetadata } else { return model.NodeBGPPeerListOptions{ PeerIP: pm.PeerIP, - Hostname: pm.Node, + Nodename: pm.Node, }, nil } } diff --git a/lib/client/bgppeer_e2e_test.go b/lib/client/bgppeer_e2e_test.go index a6f88d0c1..a190dc5d8 100644 --- a/lib/client/bgppeer_e2e_test.go +++ b/lib/client/bgppeer_e2e_test.go @@ -31,14 +31,16 @@ import ( ) // Perform CRUD operations on Global and Node-specific BGP Peer Resources. -var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreEtcdV2, func(config api.CalicoAPIConfig) { +var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreAll, func(config api.CalicoAPIConfig) { DescribeTable("BGPPeer e2e tests", func(meta1, meta2 api.BGPPeerMetadata, spec1, spec2 api.BGPPeerSpec) { - c := testutils.CreateCleanClient(config) + c := testutils.CreateClient(config) + testutils.CleanBGPPeers(c) + By("Updating the BGPPeer before it is created") _, outError := c.BGPPeers().Update(&api.BGPPeer{Metadata: meta1, Spec: spec1}) - Expect(outError.Error()).To(Equal(errors.New("resource does not exist: BGPPeer(node=node1, ip=10.0.0.1)").Error())) + Expect(outError.Error()).To(Equal(errors.New("resource does not exist: BGPPeer(node=127.0.0.1, ip=10.0.0.1)").Error())) By("Creating a new BGPPeer with meta/spec1") _, outError = c.BGPPeers().Create(&api.BGPPeer{Metadata: meta1, Spec: spec1}) @@ -88,7 +90,7 @@ var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreEtcdV By("Getting BGPPeer (meta1) and checking for error") _, outError = c.BGPPeers().Get(meta1) - Expect(outError.Error()).To(Equal(errors.New("resource does not exist: BGPPeer(node=node1, ip=10.0.0.1)").Error())) + Expect(outError.Error()).To(Equal(errors.New("resource does not exist: BGPPeer(node=127.0.0.1, ip=10.0.0.1)").Error())) By("Deleting BGPPeer (meta2)") outError1 = c.BGPPeers().Delete(meta2) @@ -104,7 +106,7 @@ var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreEtcdV Entry("Two fully populated BGPPeerSpecs", api.BGPPeerMetadata{ Scope: scope.Scope("node"), - Node: "node1", + Node: "127.0.0.1", PeerIP: net.MustParseIP("10.0.0.1"), }, api.BGPPeerMetadata{ @@ -122,7 +124,7 @@ var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreEtcdV Entry("One fully populated BGPPeerSpec and another empty BGPPeerSpec", api.BGPPeerMetadata{ Scope: scope.Scope("node"), - Node: "node1", + Node: "127.0.0.1", PeerIP: net.MustParseIP("10.0.0.1"), }, api.BGPPeerMetadata{ @@ -136,7 +138,8 @@ var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreEtcdV ) Describe("Checking operations perform data validation", func() { - c := testutils.CreateCleanClient(config) + c := testutils.CreateClient(config) + testutils.CleanBGPPeers(c) var err error valErrorType := reflect.TypeOf(cerrors.ErrorValidation{}) @@ -186,109 +189,3 @@ var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreEtcdV }) }) }) - -// Perform CRUD operations on Global BGP Peer Resources -var _ = testutils.E2eDatastoreDescribe("Global BGPPeer tests", testutils.DatastoreAll, func(config api.CalicoAPIConfig) { - - DescribeTable("Global BGPPeer e2e tests", - func(meta1, meta2 api.BGPPeerMetadata, spec1, spec2 api.BGPPeerSpec) { - c := testutils.CreateClient(config) - testutils.CleanBGPPeers(c) - - By("Updating the BGPPeer before it is created") - _, outError := c.BGPPeers().Update(&api.BGPPeer{Metadata: meta1, Spec: spec1}) - Expect(outError.Error()).To(Equal(errors.New("resource does not exist: BGPPeer(global, ip=10.0.0.1)").Error())) - - By("Creating a new BGPPeer with meta/spec1") - _, outError = c.BGPPeers().Create(&api.BGPPeer{Metadata: meta1, Spec: spec1}) - Expect(outError).NotTo(HaveOccurred()) - - By("Applying a new BGPPeer with meta/spec1") - _, outError = c.BGPPeers().Apply(&api.BGPPeer{Metadata: meta2, Spec: spec2}) - Expect(outError).NotTo(HaveOccurred()) - - By("Getting BGPPeer (meta1) and comparing the output against spec1") - outBGPPeer1, outError1 := c.BGPPeers().Get(meta1) - Expect(outError1).NotTo(HaveOccurred()) - Expect(outBGPPeer1.Spec).To(Equal(spec1)) - - By("Getting BGPPeer (meta2) and comparing the output against spec2") - outBGPPeer2, outError2 := c.BGPPeers().Get(meta2) - Expect(outError2).NotTo(HaveOccurred()) - Expect(outBGPPeer2.Spec).To(Equal(spec2)) - - By("Updating BGPPeer (meta1) with spec2") - _, outError = c.BGPPeers().Update(&api.BGPPeer{Metadata: meta1, Spec: spec2}) - Expect(outError).NotTo(HaveOccurred()) - - By("Getting BGPPeer (meta1) with spec2") - outBGPPeer1, outError1 = c.BGPPeers().Get(meta1) - Expect(outError1).NotTo(HaveOccurred()) - Expect(outBGPPeer1.Spec).To(Equal(spec2)) - - By("Listing all the BGPPeers") - BGPPeerList, outError := c.BGPPeers().List(api.BGPPeerMetadata{}) - Expect(outError).NotTo(HaveOccurred()) - - By("Getting both BGPPeers (meta1 and meta2) and checking they match list entries") - bp1, _ := c.BGPPeers().Get(meta1) - bp2, _ := c.BGPPeers().Get(meta2) - Expect(BGPPeerList.Items).To(ContainElement(*bp1)) - Expect(BGPPeerList.Items).To(ContainElement(*bp2)) - - By("List BGPPeer (meta1) and compare") - BGPPeerList, outError = c.BGPPeers().List(meta1) - Expect(outError).NotTo(HaveOccurred()) - Expect(BGPPeerList.Items[0]).To(Equal(*bp1)) - - By("Deleting BGPPeer (meta1)") - outError1 = c.BGPPeers().Delete(meta1) - Expect(outError1).NotTo(HaveOccurred()) - - By("Getting BGPPeer (meta1) and checking for error") - _, outError = c.BGPPeers().Get(meta1) - Expect(outError.Error()).To(Equal(errors.New("resource does not exist: BGPPeer(global, ip=10.0.0.1)").Error())) - - By("Deleting BGPPeer (meta2)") - outError1 = c.BGPPeers().Delete(meta2) - Expect(outError1).NotTo(HaveOccurred()) - - By("Listing all Peers and checking for zero entries") - BGPPeerList, outError = c.BGPPeers().List(api.BGPPeerMetadata{}) - Expect(outError).NotTo(HaveOccurred()) - Expect(BGPPeerList.Items).To(HaveLen(0)) - }, - - // Test 1: Pass two fully populated BGPPeerSpecs and expect the series of operations to succeed. - Entry("Two fully populated BGPPeerSpecs", - api.BGPPeerMetadata{ - Scope: scope.Scope("global"), - PeerIP: net.MustParseIP("10.0.0.1"), - }, - api.BGPPeerMetadata{ - Scope: scope.Scope("global"), - PeerIP: net.MustParseIP("20.0.0.1"), - }, - api.BGPPeerSpec{ - ASNumber: numorstring.ASNumber(6512), - }, - api.BGPPeerSpec{ - ASNumber: numorstring.ASNumber(6511), - }), - - // Test 2: Pass one fully populated BGPPeerSpec and another empty BGPPeerSpec and expect the series of operations to succeed. - Entry("One fully populated BGPPeerSpec and another empty BGPPeerSpec", - api.BGPPeerMetadata{ - Scope: scope.Scope("global"), - PeerIP: net.MustParseIP("10.0.0.1"), - }, - api.BGPPeerMetadata{ - Scope: scope.Scope("global"), - PeerIP: net.MustParseIP("20.0.0.1"), - }, - api.BGPPeerSpec{ - ASNumber: numorstring.ASNumber(6512), - }, - api.BGPPeerSpec{}), - ) -}) diff --git a/lib/errors/errors.go b/lib/errors/errors.go index 1ab9e7ebd..59ec4f33d 100644 --- a/lib/errors/errors.go +++ b/lib/errors/errors.go @@ -119,6 +119,7 @@ func (e ErrorInsufficientIdentifiers) Error() string { // Error indicating an atomic update attempt that failed due to a update conflict. type ErrorResourceUpdateConflict struct { + Err error Identifier interface{} } From ee3ca3a6d52422feb33c9c8994fc9e713ad06f19 Mon Sep 17 00:00:00 2001 From: Casey Davenport Date: Thu, 6 Jul 2017 16:06:41 -0700 Subject: [PATCH 04/11] Support annotations on Policy objects --- lib/api/policy.go | 3 +++ lib/backend/model/policy.go | 11 ++++++----- lib/client/policy_e2e_test.go | 5 ++++- lib/converter/policy.go | 2 ++ 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/lib/api/policy.go b/lib/api/policy.go index 54a92efc3..2ebaa7e84 100644 --- a/lib/api/policy.go +++ b/lib/api/policy.go @@ -48,6 +48,9 @@ type PolicyMetadata struct { // The name of the selector-based security policy. Name string `json:"name,omitempty" validate:"omitempty,namespacedname"` + + // Arbitrary key-value information to be used by clients. + Annotations map[string]string `json:"annotations,omitempty" validate:"omitempty"` } // PolicySpec contains the specification for a selector-based security Policy resource. diff --git a/lib/backend/model/policy.go b/lib/backend/model/policy.go index bdfda3a81..46a5fc7e7 100644 --- a/lib/backend/model/policy.go +++ b/lib/backend/model/policy.go @@ -89,11 +89,12 @@ func (options PolicyListOptions) KeyFromDefaultPath(path string) Key { } type Policy struct { - Order *float64 `json:"order,omitempty" validate:"omitempty"` - InboundRules []Rule `json:"inbound_rules,omitempty" validate:"omitempty,dive"` - OutboundRules []Rule `json:"outbound_rules,omitempty" validate:"omitempty,dive"` - Selector string `json:"selector" validate:"selector"` - DoNotTrack bool `json:"untracked,omitempty"` + Order *float64 `json:"order,omitempty" validate:"omitempty"` + InboundRules []Rule `json:"inbound_rules,omitempty" validate:"omitempty,dive"` + OutboundRules []Rule `json:"outbound_rules,omitempty" validate:"omitempty,dive"` + Selector string `json:"selector" validate:"selector"` + DoNotTrack bool `json:"untracked,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` } func (p Policy) String() string { diff --git a/lib/client/policy_e2e_test.go b/lib/client/policy_e2e_test.go index 77a12b645..15986a1e4 100644 --- a/lib/client/policy_e2e_test.go +++ b/lib/client/policy_e2e_test.go @@ -111,6 +111,9 @@ var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2 Expect(outError1).NotTo(HaveOccurred()) Expect(outPolicy1.Spec).To(Equal(spec2)) + // Assert the Metadata for policy with meta1 matches meta1. + Expect(outPolicy1.Metadata).To(Equal(meta1)) + By("List all the policies and compare") // Get a list of policiess. @@ -179,7 +182,7 @@ var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2 // Test 1: Pass two fully populated PolicySpecs and expect the series of operations to succeed. Entry("Two fully populated PolicySpecs", - api.PolicyMetadata{Name: "policy-1/with.foo"}, + api.PolicyMetadata{Name: "policy-1/with.foo", Annotations: map[string]string{"key": "value"}}, api.PolicyMetadata{Name: "policy.1"}, policySpec1, policySpec2, diff --git a/lib/converter/policy.go b/lib/converter/policy.go index 02da8fb53..8cdcd659a 100644 --- a/lib/converter/policy.go +++ b/lib/converter/policy.go @@ -50,6 +50,7 @@ func (p PolicyConverter) ConvertAPIToKVPair(a unversioned.Resource) (*model.KVPa OutboundRules: RulesAPIToBackend(ap.Spec.EgressRules), Selector: ap.Spec.Selector, DoNotTrack: ap.Spec.DoNotTrack, + Annotations: ap.Metadata.Annotations, }, } @@ -64,6 +65,7 @@ func (p PolicyConverter) ConvertKVPairToAPI(d *model.KVPair) (unversioned.Resour ap := api.NewPolicy() ap.Metadata.Name = bk.Name + ap.Metadata.Annotations = bp.Annotations ap.Spec.Order = bp.Order ap.Spec.IngressRules = RulesBackendToAPI(bp.InboundRules) ap.Spec.EgressRules = RulesBackendToAPI(bp.OutboundRules) From b26b26e85a1105a0fe35fe9dd03812f746a1ce49 Mon Sep 17 00:00:00 2001 From: Rob Brockbank Date: Sat, 1 Jul 2017 16:46:18 -0700 Subject: [PATCH 05/11] Add remaining BGP configuration types --- lib/backend/compat/compat.go | 210 +++++++++++++++--- lib/backend/k8s/k8s.go | 78 +++++-- lib/backend/k8s/resources/globalbgpconfig.go | 96 ++++++++ .../k8s/resources/globalbgpconfig_test.go | 98 ++++++++ lib/backend/k8s/resources/nodebgpconfig.go | 58 +++++ .../k8s/resources/nodebgpconfig_test.go | 92 ++++++++ lib/backend/k8s/resources/nodebgppeer.go | 2 +- lib/backend/k8s/thirdparty/globalbgpconfig.go | 90 ++++++++ lib/backend/model/bgpconfig.go | 97 ++++++-- lib/backend/model/felixconfig.go | 1 + lib/backend/model/keys.go | 2 +- lib/client/bgppeer_e2e_test.go | 6 +- lib/client/config.go | 30 +-- lib/client/config_e2e_test.go | 165 +++++++------- lib/client/doc.go | 10 +- lib/client/ippool_e2e_test.go | 6 +- lib/errors/errors.go | 28 +++ lib/testutils/client.go | 44 ++-- 18 files changed, 914 insertions(+), 199 deletions(-) create mode 100644 lib/backend/k8s/resources/globalbgpconfig.go create mode 100644 lib/backend/k8s/resources/globalbgpconfig_test.go create mode 100644 lib/backend/k8s/resources/nodebgpconfig.go create mode 100644 lib/backend/k8s/resources/nodebgpconfig_test.go create mode 100644 lib/backend/k8s/thirdparty/globalbgpconfig.go diff --git a/lib/backend/compat/compat.go b/lib/backend/compat/compat.go index d7b767a02..0f50504e1 100644 --- a/lib/backend/compat/compat.go +++ b/lib/backend/compat/compat.go @@ -15,9 +15,11 @@ package compat import ( + "encoding/json" goerrors "errors" log "github.com/Sirupsen/logrus" + "github.com/projectcalico/libcalico-go/lib/backend/api" "github.com/projectcalico/libcalico-go/lib/backend/model" "github.com/projectcalico/libcalico-go/lib/errors" @@ -46,7 +48,7 @@ func (c *ModelAdaptor) EnsureCalicoNodeInitialized(node string) error { // Create an entry in the datastore. This errors if the entry already exists. func (c *ModelAdaptor) Create(d *model.KVPair) (*model.KVPair, error) { var err error - switch d.Key.(type) { + switch k := d.Key.(type) { case model.ProfileKey: t, l, r := ToTagsLabelsRules(d) if t, err = c.client.Create(t); err != nil { @@ -79,6 +81,14 @@ func (c *ModelAdaptor) Create(d *model.KVPair) (*model.KVPair, error) { } d.Revision = b.Revision return d, nil + case model.GlobalBGPConfigKey: + nd := toDatastoreGlobalBGPConfig(*d) + b, err := c.client.Create(nd) + if err != nil { + return nil, errors.UpdateErrorIdentifier(err, k) + } + d.Revision = b.Revision + return d, nil default: return c.client.Create(d) } @@ -121,6 +131,14 @@ func (c *ModelAdaptor) Update(d *model.KVPair) (*model.KVPair, error) { } d.Revision = b.Revision return d, nil + case model.GlobalBGPConfigKey: + nd := toDatastoreGlobalBGPConfig(*d) + b, err := c.client.Update(nd) + if err != nil { + return nil, errors.UpdateErrorIdentifier(err, d.Key) + } + d.Revision = b.Revision + return d, nil default: return c.client.Update(d) } @@ -138,7 +156,7 @@ func (c *ModelAdaptor) Apply(d *model.KVPair) (*model.KVPair, error) { } else if _, err := c.client.Apply(l); err != nil { return nil, err } else if _, err := c.client.Apply(r); err != nil { - return nil, err + return nil, errors.UpdateErrorIdentifier(err, d.Key) } else { d.Revision = t.Revision return d, nil @@ -163,6 +181,14 @@ func (c *ModelAdaptor) Apply(d *model.KVPair) (*model.KVPair, error) { } d.Revision = b.Revision return d, nil + case model.GlobalBGPConfigKey: + nd := toDatastoreGlobalBGPConfig(*d) + b, err := c.client.Apply(nd) + if err != nil { + return nil, errors.UpdateErrorIdentifier(err, d.Key) + } + d.Revision = b.Revision + return d, nil default: return c.client.Apply(d) } @@ -181,6 +207,10 @@ func (c *ModelAdaptor) Delete(d *model.KVPair) error { return err } return nil + case model.GlobalBGPConfigKey: + nd := toDatastoreGlobalBGPConfig(*d) + err := c.client.Delete(nd) + return errors.UpdateErrorIdentifier(err, d.Key) default: return c.client.Delete(d) } @@ -195,6 +225,13 @@ func (c *ModelAdaptor) Get(k model.Key) (*model.KVPair, error) { return c.getNode(kt) case model.BlockKey: return c.getBlock(k) + case model.GlobalBGPConfigKey: + nk := toDatastoreGlobalBGPConfigKey(kt) + if kvp, err := c.client.Get(nk); err != nil { + return nil, errors.UpdateErrorIdentifier(err, k) + } else { + return fromDatastoreGlobalBGPConfig(*kvp), nil + } default: return c.client.Get(k) } @@ -208,6 +245,16 @@ func (c *ModelAdaptor) List(l model.ListInterface) ([]*model.KVPair, error) { return c.listNodes(lt) case model.BlockListOptions: return c.listBlock(lt) + case model.GlobalBGPConfigListOptions: + nl := toDatastoreGlobalBGPConfigList(lt) + if kvps, err := c.client.List(nl); err != nil { + return nil, errors.UpdateErrorIdentifier(err, l) + } else { + for i, kvp := range kvps { + kvps[i] = fromDatastoreGlobalBGPConfig(*kvp) + } + return kvps, nil + } default: return c.client.List(l) } @@ -371,7 +418,7 @@ func (c *ModelAdaptor) getNodeSubcomponents(nk model.NodeKey, nv *model.Node) er var strval string // Fill in the Metadata specific part of the node configuration. - if component, err = c.client.Get(model.HostBGPConfigKey{Hostname: nk.Hostname, Name: "ip_addr_v4"}); err == nil { + if component, err = c.client.Get(model.NodeBGPConfigKey{Nodename: nk.Hostname, Name: "ip_addr_v4"}); err == nil { strval = component.Value.(string) if strval != "" { nv.BGPIPv4Addr = &net.IP{} @@ -385,7 +432,7 @@ func (c *ModelAdaptor) getNodeSubcomponents(nk model.NodeKey, nv *model.Node) er return err } - if component, err = c.client.Get(model.HostBGPConfigKey{Hostname: nk.Hostname, Name: "network_v4"}); err == nil { + if component, err = c.client.Get(model.NodeBGPConfigKey{Nodename: nk.Hostname, Name: "network_v4"}); err == nil { strval = component.Value.(string) if strval != "" { _, nv.BGPIPv4Net, err = net.ParseCIDR(strval) @@ -398,7 +445,7 @@ func (c *ModelAdaptor) getNodeSubcomponents(nk model.NodeKey, nv *model.Node) er return err } - if component, err = c.client.Get(model.HostBGPConfigKey{Hostname: nk.Hostname, Name: "ip_addr_v6"}); err == nil { + if component, err = c.client.Get(model.NodeBGPConfigKey{Nodename: nk.Hostname, Name: "ip_addr_v6"}); err == nil { strval = component.Value.(string) if strval != "" { nv.BGPIPv6Addr = &net.IP{} @@ -412,7 +459,7 @@ func (c *ModelAdaptor) getNodeSubcomponents(nk model.NodeKey, nv *model.Node) er return err } - if component, err = c.client.Get(model.HostBGPConfigKey{Hostname: nk.Hostname, Name: "network_v6"}); err == nil { + if component, err = c.client.Get(model.NodeBGPConfigKey{Nodename: nk.Hostname, Name: "network_v6"}); err == nil { strval = component.Value.(string) if strval != "" { _, nv.BGPIPv6Net, err = net.ParseCIDR(strval) @@ -425,7 +472,7 @@ func (c *ModelAdaptor) getNodeSubcomponents(nk model.NodeKey, nv *model.Node) er return err } - if component, err = c.client.Get(model.HostBGPConfigKey{Hostname: nk.Hostname, Name: "as_num"}); err == nil { + if component, err = c.client.Get(model.NodeBGPConfigKey{Nodename: nk.Hostname, Name: "as_num"}); err == nil { strval = component.Value.(string) if strval != "" { asn, err := numorstring.ASNumberFromString(strval) @@ -521,15 +568,15 @@ func toNodeComponents(d *model.KVPair) (primary *model.KVPair, optional []*model // Add the BGP IPv4 and IPv6 values - these are always present. optional = []*model.KVPair{ &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "ip_addr_v4", }, Value: ipv4Str, }, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "ip_addr_v6", }, Value: ipv6Str, @@ -553,48 +600,48 @@ func toNodeComponents(d *model.KVPair) (primary *model.KVPair, optional []*model if n.BGPASNumber != nil { optional = append(optional, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "as_num", }, Value: n.BGPASNumber.String(), }) } else { optional = append(optional, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "as_num", }, }) } if n.BGPIPv4Net != nil { optional = append(optional, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "network_v4", }, Value: n.BGPIPv4Net.String(), }) } else { optional = append(optional, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "network_v4", }, }) } if n.BGPIPv6Net != nil { optional = append(optional, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "network_v6", }, Value: n.BGPIPv6Net.String(), }) } else { optional = append(optional, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "network_v6", }, }) @@ -617,32 +664,32 @@ func toNodeDeleteComponents(d *model.KVPair) (primary *model.KVPair, optional [] Key: model.HostIPKey{nk.Hostname}, }, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "ip_addr_v4", }, }, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "ip_addr_v6", }, }, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "as_num", }, }, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "network_v4", }, }, &model.KVPair{ - Key: model.HostBGPConfigKey{ - Hostname: nk.Hostname, + Key: model.NodeBGPConfigKey{ + Nodename: nk.Hostname, Name: "network_v6", }, }, @@ -650,3 +697,100 @@ func toNodeDeleteComponents(d *model.KVPair) (primary *model.KVPair, optional [] return primary, optional } + +// toDatastoreGlobalBGPConfigKey modifies the Global BGP Config key to the one required by +// the datastore (for back-compatibility). +func toDatastoreGlobalBGPConfigKey(key model.GlobalBGPConfigKey) model.GlobalBGPConfigKey { + switch key.Name { + case "AsNumber": + key = model.GlobalBGPConfigKey{Name: "as_num"} + case "LogLevel": + key = model.GlobalBGPConfigKey{Name: "loglevel"} + case "NodeMeshEnabled": + key = model.GlobalBGPConfigKey{Name: "node_mesh"} + } + return key +} + +// toDatastoreGlobalBGPConfigList modifies the Global BGP Config List interface to the one required by +// the datastore (for back-compatibility with what is expected in teh etcdv2 datastore driver). +func toDatastoreGlobalBGPConfigList(l model.GlobalBGPConfigListOptions) model.GlobalBGPConfigListOptions { + switch l.Name { + case "AsNumber": + l = model.GlobalBGPConfigListOptions{Name: "as_num"} + case "LogLevel": + l = model.GlobalBGPConfigListOptions{Name: "loglevel"} + case "NodeMeshEnabled": + l = model.GlobalBGPConfigListOptions{Name: "node_mesh"} + } + return l +} + +// fromDatastoreGlobalBGPKey modifies the Global BGP Config key from the one required by +// the datastore (for back-compatibility with what is expected in teh etcdv2 datastore driver). +func fromDatastoreGlobalBGPKey(key model.GlobalBGPConfigKey) model.GlobalBGPConfigKey { + switch key.Name { + case "as_num": + key = model.GlobalBGPConfigKey{Name: "AsNumber"} + case "loglevel": + key = model.GlobalBGPConfigKey{Name: "LogLevel"} + case "node_mesh": + key = model.GlobalBGPConfigKey{Name: "NodeMeshEnabled"} + } + return key +} + +// toDatastoreGlobalBGPConfig modifies the Global BGP Config KVPair to the format required in the +// datastore (for back-compatibility with what is expected in teh etcdv2 datastore driver). +func toDatastoreGlobalBGPConfig(d model.KVPair) *model.KVPair { + // Copy the KVPair, so we aren't modifying the original. + modifiedKey := toDatastoreGlobalBGPConfigKey(d.Key.(model.GlobalBGPConfigKey)) + d.Key = modifiedKey + + switch modifiedKey.Name { + case "node_mesh": + // In the datastore the node_mesh parm is expected to be a JSON object with an + // enabled field, but the new value just uses a boolean string. + if d.Value != nil { + enabled := d.Value.(string) == "true" + v, _ := json.Marshal(nodeToNodeMesh{Enabled: enabled}) + d.Value = string(v) + } + } + + return &d +} + + +// fromDatastoreGlobalBGPConfig modifies the Global BGP Config KVPair from the format required in the +// datastore (for back-compatibility with what is expected in teh etcdv2 datastore driver). +func fromDatastoreGlobalBGPConfig(d model.KVPair) *model.KVPair { + modifiedKey := fromDatastoreGlobalBGPKey(d.Key.(model.GlobalBGPConfigKey)) + d.Key = modifiedKey + + switch modifiedKey.Name { + case "NodeMeshEnabled": + // In the datastore the node_mesh parm is expected to be a JSON object with an + // enabled field, but the new value just uses a boolean string. + if d.Value != nil { + var n nodeToNodeMesh + if err := json.Unmarshal([]byte(d.Value.(string)), &n); err != nil { + log.Info("Error parsing node to node mesh") + v, _ := json.Marshal(false) + d.Value = string(v) + } else { + log.Info("Returning configured node to node mesh") + v, _ := json.Marshal(n.Enabled) + d.Value = string(v) + } + } + } + + return &d +} + +// nodeToNodeMesh is a struct containing whether node-to-node mesh is enabled. It can be +// JSON marshalled into the correct structure that is understood by the Calico BGP component. +type nodeToNodeMesh struct { + Enabled bool `json:"enabled"` +} diff --git a/lib/backend/k8s/k8s.go b/lib/backend/k8s/k8s.go index c739a0157..47f87fb8d 100644 --- a/lib/backend/k8s/k8s.go +++ b/lib/backend/k8s/k8s.go @@ -58,12 +58,15 @@ type KubeClient struct { converter converter // Clients for interacting with Calico resources. - globalBgpClient resources.K8sResourceClient - nodeBgpClient resources.K8sResourceClient - globalConfigClient resources.K8sResourceClient - ipPoolClient resources.K8sResourceClient - snpClient resources.K8sResourceClient - nodeClient resources.K8sResourceClient + globalBgpPeerClient resources.K8sResourceClient + nodeBgpPeerClient resources.K8sResourceClient + globalBgpConfigClient resources.K8sResourceClient + nodeBgpConfigClient resources.K8sResourceClient + globalConfigClient resources.K8sResourceClient + nodeConfigClient resources.K8sResourceClient + ipPoolClient resources.K8sResourceClient + snpClient resources.K8sResourceClient + nodeClient resources.K8sResourceClient } func NewKubeClient(kc *capi.KubeConfig) (*KubeClient, error) { @@ -133,8 +136,10 @@ func NewKubeClient(kc *capi.KubeConfig) (*KubeClient, error) { kubeClient.ipPoolClient = resources.NewIPPoolClient(cs, tprClientV1) kubeClient.nodeClient = resources.NewNodeClient(cs, tprClientV1) kubeClient.snpClient = resources.NewSystemNetworkPolicyClient(cs, tprClientV1alpha) - kubeClient.globalBgpClient = resources.NewGlobalBGPPeerClient(cs, tprClientV1) - kubeClient.nodeBgpClient = resources.NewNodeBGPPeerClient(cs) + kubeClient.globalBgpPeerClient = resources.NewGlobalBGPPeerClient(cs, tprClientV1) + kubeClient.nodeBgpPeerClient = resources.NewNodeBGPPeerClient(cs) + kubeClient.globalBgpConfigClient = resources.NewGlobalBGPConfigClient(cs, tprClientV1) + kubeClient.nodeBgpConfigClient = resources.NewNodeBGPConfigClient(cs) kubeClient.globalConfigClient = resources.NewGlobalConfigClient(cs, tprClientV1) return kubeClient, nil @@ -183,13 +188,14 @@ func (c *KubeClient) createThirdPartyResources() error { done := make(chan error) go func() { done <- c.ipPoolClient.EnsureInitialized() }() go func() { done <- c.snpClient.EnsureInitialized() }() - go func() { done <- c.globalBgpClient.EnsureInitialized() }() + go func() { done <- c.globalBgpPeerClient.EnsureInitialized() }() go func() { done <- c.globalConfigClient.EnsureInitialized() }() + go func() { done <- c.globalBgpConfigClient.EnsureInitialized() }() - // Wait for all 4 registrations to complete and keep track of the last + // Wait for all registrations to complete and keep track of the last // error to return. var lastErr error - for i := 0; i < 4; i++ { + for i := 0; i < 5; i++ { if err := <-done; err != nil { log.WithError(err).Error("Hit error initializing TPR") lastErr = err @@ -324,9 +330,13 @@ func (c *KubeClient) Create(d *model.KVPair) (*model.KVPair, error) { case model.NodeKey: return c.nodeClient.Create(d) case model.GlobalBGPPeerKey: - return c.globalBgpClient.Create(d) + return c.globalBgpPeerClient.Create(d) case model.NodeBGPPeerKey: - return c.nodeBgpClient.Create(d) + return c.nodeBgpPeerClient.Create(d) + case model.GlobalBGPConfigKey: + return c.globalBgpConfigClient.Create(d) + case model.NodeBGPConfigKey: + return c.nodeBgpConfigClient.Create(d) default: log.Warn("Attempt to 'Create' using kubernetes backend is not supported.") return nil, errors.ErrorOperationNotSupported{ @@ -348,9 +358,13 @@ func (c *KubeClient) Update(d *model.KVPair) (*model.KVPair, error) { case model.NodeKey: return c.nodeClient.Update(d) case model.GlobalBGPPeerKey: - return c.globalBgpClient.Update(d) + return c.globalBgpPeerClient.Update(d) case model.NodeBGPPeerKey: - return c.nodeBgpClient.Update(d) + return c.nodeBgpPeerClient.Update(d) + case model.GlobalBGPConfigKey: + return c.globalBgpConfigClient.Update(d) + case model.NodeBGPConfigKey: + return c.nodeBgpConfigClient.Update(d) default: log.Warn("Attempt to 'Update' using kubernetes backend is not supported.") return nil, errors.ErrorOperationNotSupported{ @@ -374,9 +388,13 @@ func (c *KubeClient) Apply(d *model.KVPair) (*model.KVPair, error) { case model.NodeKey: return c.nodeClient.Apply(d) case model.GlobalBGPPeerKey: - return c.globalBgpClient.Apply(d) + return c.globalBgpPeerClient.Apply(d) case model.NodeBGPPeerKey: - return c.nodeBgpClient.Apply(d) + return c.nodeBgpPeerClient.Apply(d) + case model.GlobalBGPConfigKey: + return c.globalBgpConfigClient.Apply(d) + case model.NodeBGPConfigKey: + return c.nodeBgpConfigClient.Apply(d) case model.ActiveStatusReportKey, model.LastStatusReportKey, model.HostEndpointStatusKey, model.WorkloadEndpointStatusKey: // Felix periodically reports status to the datastore. This isn't supported @@ -403,9 +421,13 @@ func (c *KubeClient) Delete(d *model.KVPair) error { case model.NodeKey: return c.nodeClient.Delete(d) case model.GlobalBGPPeerKey: - return c.globalBgpClient.Delete(d) + return c.globalBgpPeerClient.Delete(d) case model.NodeBGPPeerKey: - return c.nodeBgpClient.Delete(d) + return c.nodeBgpPeerClient.Delete(d) + case model.GlobalBGPConfigKey: + return c.globalBgpConfigClient.Delete(d) + case model.NodeBGPConfigKey: + return c.nodeBgpConfigClient.Delete(d) default: log.Warn("Attempt to 'Delete' using kubernetes backend is not supported.") return errors.ErrorOperationNotSupported{ @@ -436,9 +458,13 @@ func (c *KubeClient) Get(k model.Key) (*model.KVPair, error) { case model.NodeKey: return c.nodeClient.Get(k.(model.NodeKey)) case model.GlobalBGPPeerKey: - return c.globalBgpClient.Get(k) + return c.globalBgpPeerClient.Get(k) case model.NodeBGPPeerKey: - return c.nodeBgpClient.Get(k) + return c.nodeBgpPeerClient.Get(k) + case model.GlobalBGPConfigKey: + return c.globalBgpConfigClient.Get(k) + case model.NodeBGPConfigKey: + return c.nodeBgpConfigClient.Get(k) default: return nil, errors.ErrorOperationNotSupported{ Identifier: k, @@ -467,14 +493,20 @@ func (c *KubeClient) List(l model.ListInterface) ([]*model.KVPair, error) { k, _, err := c.nodeClient.List(l) return k, err case model.GlobalBGPPeerListOptions: - k, _, err := c.globalBgpClient.List(l) + k, _, err := c.globalBgpPeerClient.List(l) return k, err case model.NodeBGPPeerListOptions: - k, _, err := c.nodeBgpClient.List(l) + k, _, err := c.nodeBgpPeerClient.List(l) return k, err case model.GlobalConfigListOptions: k, _, err := c.globalConfigClient.List(l) return k, err + case model.GlobalBGPConfigListOptions: + k, _, err := c.globalBgpConfigClient.List(l) + return k, err + case model.NodeBGPConfigListOptions: + k, _, err := c.nodeBgpConfigClient.List(l) + return k, err default: return []*model.KVPair{}, nil } diff --git a/lib/backend/k8s/resources/globalbgpconfig.go b/lib/backend/k8s/resources/globalbgpconfig.go new file mode 100644 index 000000000..911668627 --- /dev/null +++ b/lib/backend/k8s/resources/globalbgpconfig.go @@ -0,0 +1,96 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources + +import ( + "fmt" + "reflect" + "strings" + + "github.com/projectcalico/libcalico-go/lib/backend/k8s/thirdparty" + "github.com/projectcalico/libcalico-go/lib/backend/model" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + GlobalBgpConfigResourceName = "GlobalBgpConfigs" + GlobalBgpConfigTPRName = "global-bgp-config.projectcalico.org" +) + +func NewGlobalBGPConfigClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient { + return &customK8sResourceClient{ + clientSet: c, + restClient: r, + name: GlobalBgpConfigTPRName, + resource: GlobalBgpConfigResourceName, + description: "Calico Global BGP Configuration", + k8sResourceType: reflect.TypeOf(thirdparty.GlobalBgpConfig{}), + k8sListType: reflect.TypeOf(thirdparty.GlobalBgpConfigList{}), + converter: GlobalBgpConfigConverter{}, + } +} + +// GlobalBgpConfigConverter implements the K8sResourceConverter interface. +type GlobalBgpConfigConverter struct { +} + +func (_ GlobalBgpConfigConverter) ListInterfaceToKey(l model.ListInterface) model.Key { + if name := l.(model.GlobalBGPConfigListOptions).Name; name != "" { + return model.GlobalBGPConfigKey{Name: name} + } + return nil +} + +func (_ GlobalBgpConfigConverter) KeyToName(k model.Key) (string, error) { + return strings.ToLower(k.(model.GlobalBGPConfigKey).Name), nil +} + +func (_ GlobalBgpConfigConverter) NameToKey(name string) (model.Key, error) { + return nil, fmt.Errorf("Mapping of Name to Key is not possible for global BGP config") +} + +func (c GlobalBgpConfigConverter) ToKVPair(r CustomK8sResource) (*model.KVPair, error) { + t := r.(*thirdparty.GlobalBgpConfig) + return &model.KVPair{ + Key: model.GlobalBGPConfigKey{ + Name: t.Spec.Name, + }, + Value: t.Spec.Value, + Revision: t.Metadata.ResourceVersion, + }, nil +} + +func (c GlobalBgpConfigConverter) FromKVPair(kvp *model.KVPair) (CustomK8sResource, error) { + name, err := c.KeyToName(kvp.Key) + if err != nil { + return nil, err + } + tpr := thirdparty.GlobalBgpConfig{ + Metadata: metav1.ObjectMeta{ + Name: name, + }, + Spec: thirdparty.GlobalBgpConfigSpec{ + Name: kvp.Key.(model.GlobalBGPConfigKey).Name, + Value: kvp.Value.(string), + }, + } + if kvp.Revision != nil { + tpr.Metadata.ResourceVersion = kvp.Revision.(string) + } + return &tpr, nil +} diff --git a/lib/backend/k8s/resources/globalbgpconfig_test.go b/lib/backend/k8s/resources/globalbgpconfig_test.go new file mode 100644 index 000000000..69085b3d2 --- /dev/null +++ b/lib/backend/k8s/resources/globalbgpconfig_test.go @@ -0,0 +1,98 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources_test + +import ( + "github.com/projectcalico/libcalico-go/lib/backend/k8s/resources" + "github.com/projectcalico/libcalico-go/lib/backend/k8s/thirdparty" + "github.com/projectcalico/libcalico-go/lib/backend/model" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Global BGP config conversion methods", func() { + + converter := resources.GlobalBgpConfigConverter{} + + // Define some useful test data. + listIncomplete := model.GlobalBGPConfigListOptions{} + + // Compatible set of list, key and name (used for Key to Name conversion) + list1 := model.GlobalBGPConfigListOptions{ + Name: "AbCd", + } + key1 := model.GlobalBGPConfigKey{ + Name: "AbCd", + } + name1 := "abcd" + + // Compatible set of KVPair and Kubernetes Resource. + value1 := "test" + kvp1 := &model.KVPair{ + Key: key1, + Value: value1, + Revision: "rv", + } + res1 := &thirdparty.GlobalBgpConfig{ + Metadata: metav1.ObjectMeta{ + Name: name1, + ResourceVersion: "rv", + }, + Spec: thirdparty.GlobalBgpConfigSpec{ + Name: key1.Name, + Value: value1, + }, + } + + It("should convert an incomplete ListInterface to no Key", func() { + Expect(converter.ListInterfaceToKey(listIncomplete)).To(BeNil()) + }) + + It("should convert a qualified ListInterface to the equivalent Key", func() { + Expect(converter.ListInterfaceToKey(list1)).To(Equal(key1)) + }) + + It("should convert a Key to the equivalent resource name", func() { + n, err := converter.KeyToName(key1) + Expect(err).NotTo(HaveOccurred()) + Expect(n).To(Equal(name1)) + }) + + It("should not convert a resource name to the equivalent Key - this is not possible due to case switching", func() { + _, err := converter.NameToKey("test") + Expect(err).To(HaveOccurred()) + }) + + It("should convert between a KVPair and the equivalent Kubernetes resource", func() { + r, err := converter.FromKVPair(kvp1) + Expect(err).NotTo(HaveOccurred()) + Expect(r.GetObjectMeta().GetName()).To(Equal(res1.Metadata.Name)) + Expect(r.GetObjectMeta().GetResourceVersion()).To(Equal(res1.Metadata.ResourceVersion)) + Expect(r).To(BeAssignableToTypeOf(&thirdparty.GlobalBgpConfig{})) + Expect(r.(*thirdparty.GlobalBgpConfig).Spec).To(Equal(res1.Spec)) + }) + + It("should convert between a Kubernetes resource and the equivalent KVPair", func() { + kvp, err := converter.ToKVPair(res1) + Expect(err).NotTo(HaveOccurred()) + Expect(kvp.Key).To(Equal(kvp1.Key)) + Expect(kvp.Revision).To(Equal(kvp1.Revision)) + Expect(kvp.Value).To(BeAssignableToTypeOf(value1)) + Expect(kvp.Value).To(Equal(kvp1.Value)) + }) +}) diff --git a/lib/backend/k8s/resources/nodebgpconfig.go b/lib/backend/k8s/resources/nodebgpconfig.go new file mode 100644 index 000000000..8d3d7dfb8 --- /dev/null +++ b/lib/backend/k8s/resources/nodebgpconfig.go @@ -0,0 +1,58 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources + +import ( + "github.com/projectcalico/libcalico-go/lib/backend/model" + + "k8s.io/client-go/kubernetes" +) + +const ( + perNodeBgpConfigAnnotationNamespace = "config.bgp.projectcalico.org" +) + +func NewNodeBGPConfigClient(c *kubernetes.Clientset) K8sResourceClient { + return NewCustomK8sNodeResourceClient(CustomK8sNodeResourceClientConfig{ + ClientSet: c, + ResourceType: "NodeBGPConfig", + Converter: NodeBGPConfigConverter{}, + Namespace: perNodeBgpConfigAnnotationNamespace, + }) +} + +// NodeBGPConfigConverter implements the CustomK8sNodeResourceConverter interface. +type NodeBGPConfigConverter struct{} + +func (_ NodeBGPConfigConverter) ListInterfaceToNodeAndName(l model.ListInterface) (string, string, error) { + pl := l.(model.NodeBGPConfigListOptions) + if pl.Name == "" { + return pl.Nodename, "", nil + } else { + return pl.Nodename, pl.Name, nil + } +} + +func (_ NodeBGPConfigConverter) KeyToNodeAndName(k model.Key) (string, string, error) { + pk := k.(model.NodeBGPConfigKey) + return pk.Nodename, pk.Name, nil +} + +func (_ NodeBGPConfigConverter) NodeAndNameToKey(node, name string) (model.Key, error) { + return model.NodeBGPConfigKey{ + Nodename: node, + Name: name, + }, nil +} diff --git a/lib/backend/k8s/resources/nodebgpconfig_test.go b/lib/backend/k8s/resources/nodebgpconfig_test.go new file mode 100644 index 000000000..5dd222e32 --- /dev/null +++ b/lib/backend/k8s/resources/nodebgpconfig_test.go @@ -0,0 +1,92 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources_test + +import ( + "github.com/projectcalico/libcalico-go/lib/backend/k8s/resources" + "github.com/projectcalico/libcalico-go/lib/backend/model" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Node BGP config conversion methods", func() { + + converter := resources.NodeBGPConfigConverter{} + + It("should convert an empty ListInterface", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPConfigListOptions{}, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("")) + Expect(name).To(Equal("")) + }) + + It("should convert a List interface with a Node name only", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPConfigListOptions{ + Nodename: "node", + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("node")) + Expect(name).To(Equal("")) + }) + + It("should convert a List interface with a ConfigIP only", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPConfigListOptions{ + Name: "FooFoo", + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("")) + Expect(name).To(Equal("FooFoo")) + }) + + It("should convert a List interface with node and name", func() { + node, name, err := converter.ListInterfaceToNodeAndName( + model.NodeBGPConfigListOptions{ + Nodename: "nodeX", + Name: "FooBar", + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("nodeX")) + Expect(name).To(Equal("FooBar")) + }) + + It("should convert a Key with node and name", func() { + node, name, err := converter.KeyToNodeAndName( + model.NodeBGPConfigKey{ + Nodename: "nodeY", + Name: "FooBaz", + }, + ) + Expect(err).To(BeNil()) + Expect(node).To(Equal("nodeY")) + Expect(name).To(Equal("FooBaz")) + }) + + It("should convert a valid node name and resource name to a Key (IPv4)", func() { + key, err := converter.NodeAndNameToKey("nodeA", "FooBaz") + Expect(err).To(BeNil()) + Expect(key).To(Equal(model.NodeBGPConfigKey{ + Nodename: "nodeA", + Name: "FooBaz", + })) + }) +}) diff --git a/lib/backend/k8s/resources/nodebgppeer.go b/lib/backend/k8s/resources/nodebgppeer.go index d31d124bc..9a45b4a57 100644 --- a/lib/backend/k8s/resources/nodebgppeer.go +++ b/lib/backend/k8s/resources/nodebgppeer.go @@ -21,7 +21,7 @@ import ( ) const ( - perNodeBgpPeerAnnotationNamespace = "bgppeer.projectcalico.org" + perNodeBgpPeerAnnotationNamespace = "peer.bgp.projectcalico.org" ) func NewNodeBGPPeerClient(c *kubernetes.Clientset) K8sResourceClient { diff --git a/lib/backend/k8s/thirdparty/globalbgpconfig.go b/lib/backend/k8s/thirdparty/globalbgpconfig.go new file mode 100644 index 000000000..30b57b2c2 --- /dev/null +++ b/lib/backend/k8s/thirdparty/globalbgpconfig.go @@ -0,0 +1,90 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package thirdparty + +import ( + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type GlobalBgpConfigSpec struct { + Name string `json:"name"` + Value string `json:"value"` +} + +type GlobalBgpConfig struct { + metav1.TypeMeta `json:",inline"` + Metadata metav1.ObjectMeta `json:"metadata"` + + Spec GlobalBgpConfigSpec `json:"spec"` +} + +type GlobalBgpConfigList struct { + metav1.TypeMeta `json:",inline"` + Metadata metav1.ListMeta `json:"metadata"` + + Items []GlobalBgpConfig `json:"items"` +} + +// Required to satisfy Object interface +func (e *GlobalBgpConfig) GetObjectKind() schema.ObjectKind { + return &e.TypeMeta +} + +// Required to satisfy ObjectMetaAccessor interface +func (e *GlobalBgpConfig) GetObjectMeta() metav1.Object { + return &e.Metadata +} + +// Required to satisfy Object interface +func (el *GlobalBgpConfigList) GetObjectKind() schema.ObjectKind { + return &el.TypeMeta +} + +// Required to satisfy ListMetaAccessor interface +func (el *GlobalBgpConfigList) GetListMeta() metav1.List { + return &el.Metadata +} + +// The code below is used only to work around a known problem with third-party +// resources and ugorji. If/when these issues are resolved, the code below +// should no longer be required. + +type GlobalBgpConfigListCopy GlobalBgpConfigList +type GlobalBgpConfigCopy GlobalBgpConfig + +func (g *GlobalBgpConfig) UnmarshalJSON(data []byte) error { + tmp := GlobalBgpConfigCopy{} + err := json.Unmarshal(data, &tmp) + if err != nil { + return err + } + tmp2 := GlobalBgpConfig(tmp) + *g = tmp2 + return nil +} + +func (l *GlobalBgpConfigList) UnmarshalJSON(data []byte) error { + tmp := GlobalBgpConfigListCopy{} + err := json.Unmarshal(data, &tmp) + if err != nil { + return err + } + tmp2 := GlobalBgpConfigList(tmp) + *l = tmp2 + return nil +} diff --git a/lib/backend/model/bgpconfig.go b/lib/backend/model/bgpconfig.go index 33f057a15..4e8f6a4ec 100644 --- a/lib/backend/model/bgpconfig.go +++ b/lib/backend/model/bgpconfig.go @@ -17,13 +17,18 @@ package model import ( "fmt" "reflect" + "regexp" + + log "github.com/Sirupsen/logrus" "github.com/projectcalico/libcalico-go/lib/errors" ) var ( - typeGlobalBGPConfig = rawStringType - typeHostBGPConfig = rawStringType + matchGlobalBGPConfig = regexp.MustCompile("^/?calico/bgp/v1/global/(.+)$") + matchNodeBGPConfig = regexp.MustCompile("^/?calico/bgp/v1/host/([^/]+)/(.+)$") + typeGlobalBGPConfig = rawStringType + typeNodeBGPConfig = rawStringType ) type GlobalBGPConfigKey struct { @@ -55,37 +60,103 @@ func (key GlobalBGPConfigKey) String() string { return fmt.Sprintf("GlobalBGPConfig(name=%s)", key.Name) } -type HostBGPConfigKey struct { +type GlobalBGPConfigListOptions struct { + Name string +} + +func (options GlobalBGPConfigListOptions) defaultPathRoot() string { + k := "/calico/bgp/v1/global" + if options.Name == "" { + return k + } + k = k + fmt.Sprintf("/%s", options.Name) + return k +} + +func (options GlobalBGPConfigListOptions) KeyFromDefaultPath(path string) Key { + log.Debugf("Get GlobalConfig key from %s", path) + r := matchGlobalBGPConfig.FindAllStringSubmatch(path, -1) + if len(r) != 1 { + log.Debugf("Didn't match regex") + return nil + } + name := r[0][1] + if options.Name != "" && name != options.Name { + log.Debugf("Didn't match name %s != %s", options.Name, name) + return nil + } + return GlobalConfigKey{Name: name} +} + +type NodeBGPConfigKey struct { // The hostname for the host specific BGP config - Hostname string `json:"-" validate:"required,name"` + Nodename string `json:"-" validate:"required,name"` // The name of the host specific BGP config key. Name string `json:"-" validate:"required,name"` } -func (key HostBGPConfigKey) defaultPath() (string, error) { +func (key NodeBGPConfigKey) defaultPath() (string, error) { return key.defaultDeletePath() } -func (key HostBGPConfigKey) defaultDeletePath() (string, error) { - if key.Hostname == "" { +func (key NodeBGPConfigKey) defaultDeletePath() (string, error) { + if key.Nodename == "" { return "", errors.ErrorInsufficientIdentifiers{Name: "node"} } if key.Name == "" { return "", errors.ErrorInsufficientIdentifiers{Name: "name"} } - e := fmt.Sprintf("/calico/bgp/v1/host/%s/%s", key.Hostname, key.Name) + e := fmt.Sprintf("/calico/bgp/v1/host/%s/%s", key.Nodename, key.Name) return e, nil } -func (key HostBGPConfigKey) defaultDeleteParentPaths() ([]string, error) { +func (key NodeBGPConfigKey) defaultDeleteParentPaths() ([]string, error) { return nil, nil } -func (key HostBGPConfigKey) valueType() reflect.Type { - return typeHostBGPConfig +func (key NodeBGPConfigKey) valueType() reflect.Type { + return typeNodeBGPConfig } -func (key HostBGPConfigKey) String() string { - return fmt.Sprintf("HostBGPConfig(node=%s; name=%s)", key.Hostname, key.Name) +func (key NodeBGPConfigKey) String() string { + return fmt.Sprintf("HostBGPConfig(node=%s; name=%s)", key.Nodename, key.Name) +} + +type NodeBGPConfigListOptions struct { + Nodename string + Name string +} + +func (options NodeBGPConfigListOptions) defaultPathRoot() string { + k := "/calico/bgp/v1/host/%s" + if options.Nodename == "" { + return k + } + k = k + fmt.Sprintf("/%s", options.Nodename) + if options.Name == "" { + return k + } + k = k + fmt.Sprintf("/%s", options.Name) + return k +} + +func (options NodeBGPConfigListOptions) KeyFromDefaultPath(path string) Key { + log.Debugf("Get HostConfig key from %s", path) + r := matchNodeBGPConfig.FindAllStringSubmatch(path, -1) + if len(r) != 1 { + log.Debugf("Didn't match regex") + return nil + } + nodename := r[0][1] + name := r[0][2] + if options.Nodename != "" && nodename != options.Nodename { + log.Debugf("Didn't match nodename %s != %s", options.Nodename, nodename) + return nil + } + if options.Name != "" && name != options.Name { + log.Debugf("Didn't match name %s != %s", options.Name, name) + return nil + } + return NodeBGPConfigKey{Nodename: nodename, Name: name} } diff --git a/lib/backend/model/felixconfig.go b/lib/backend/model/felixconfig.go index d20367e13..6ca18d8ea 100644 --- a/lib/backend/model/felixconfig.go +++ b/lib/backend/model/felixconfig.go @@ -20,6 +20,7 @@ import ( "regexp" log "github.com/Sirupsen/logrus" + "github.com/projectcalico/libcalico-go/lib/errors" ) diff --git a/lib/backend/model/keys.go b/lib/backend/model/keys.go index a2bdd705c..86cb61d9a 100644 --- a/lib/backend/model/keys.go +++ b/lib/backend/model/keys.go @@ -135,7 +135,7 @@ func KeyToDefaultDeletePath(key Key) (string, error) { // // For example, // KeyToDefaultDeletePaths(WorkloadEndpointKey{ -// Hostname: "h", +// Nodename: "h", // OrchestratorID: "o", // WorkloadID: "w", // EndpointID: "e", diff --git a/lib/client/bgppeer_e2e_test.go b/lib/client/bgppeer_e2e_test.go index a190dc5d8..29df463bc 100644 --- a/lib/client/bgppeer_e2e_test.go +++ b/lib/client/bgppeer_e2e_test.go @@ -35,8 +35,7 @@ var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreAll, DescribeTable("BGPPeer e2e tests", func(meta1, meta2 api.BGPPeerMetadata, spec1, spec2 api.BGPPeerSpec) { - c := testutils.CreateClient(config) - testutils.CleanBGPPeers(c) + c := testutils.CreateCleanClient(config) By("Updating the BGPPeer before it is created") _, outError := c.BGPPeers().Update(&api.BGPPeer{Metadata: meta1, Spec: spec1}) @@ -138,8 +137,7 @@ var _ = testutils.E2eDatastoreDescribe("BGPPeer tests", testutils.DatastoreAll, ) Describe("Checking operations perform data validation", func() { - c := testutils.CreateClient(config) - testutils.CleanBGPPeers(c) + c := testutils.CreateCleanClient(config) var err error valErrorType := reflect.TypeOf(cerrors.ErrorValidation{}) diff --git a/lib/client/config.go b/lib/client/config.go index 8d8561495..d8233b58b 100644 --- a/lib/client/config.go +++ b/lib/client/config.go @@ -91,9 +91,9 @@ func newConfigs(c *Client) ConfigInterface { // When this is enabled, each calico/node instance automatically establishes a // full BGP peering mesh between all nodes that support BGP. func (c *config) SetNodeToNodeMesh(enabled bool) error { - b, _ := json.Marshal(nodeToNodeMesh{Enabled: enabled}) + b, _ := json.Marshal(enabled) _, err := c.c.Backend.Apply(&model.KVPair{ - Key: model.GlobalBGPConfigKey{Name: "node_mesh"}, + Key: model.GlobalBGPConfigKey{Name: "NodeMeshEnabled"}, Value: string(b), }) return err @@ -102,19 +102,19 @@ func (c *config) SetNodeToNodeMesh(enabled bool) error { // GetNodeToNodeMesh returns the current enabled state of the system-wide // node-to-node mesh option. See SetNodeToNodeMesh for details. func (c *config) GetNodeToNodeMesh() (bool, error) { - var n nodeToNodeMesh - if s, err := c.getValue(model.GlobalBGPConfigKey{Name: "node_mesh"}); err != nil { + var n bool + if s, err := c.getValue(model.GlobalBGPConfigKey{Name: "NodeMeshEnabled"}); err != nil { log.Info("Error getting node mesh") return false, err } else if s == nil { log.Info("Return default node to node mesh") return GlobalDefaultNodeToNodeMesh, nil } else if err = json.Unmarshal([]byte(*s), &n); err != nil { - log.Info("Error parsing node to node mesh") + log.WithField("NodeMeshEnabled", *s).Error("Error parsing node to node mesh") return false, err } else { log.Info("Returning configured node to node mesh") - return n.Enabled, nil + return n, nil } } @@ -123,7 +123,7 @@ func (c *config) GetNodeToNodeMesh() (bool, error) { // the node resource. func (c *config) SetGlobalASNumber(asNumber numorstring.ASNumber) error { _, err := c.c.Backend.Apply(&model.KVPair{ - Key: model.GlobalBGPConfigKey{Name: "as_num"}, + Key: model.GlobalBGPConfigKey{Name: "AsNumber"}, Value: asNumber.String(), }) return err @@ -132,7 +132,7 @@ func (c *config) SetGlobalASNumber(asNumber numorstring.ASNumber) error { // SetGlobalASNumber gets the global AS Number used by the BGP agent running // on each node. See SetGlobalASNumber for more details. func (c *config) GetGlobalASNumber() (numorstring.ASNumber, error) { - if s, err := c.getValue(model.GlobalBGPConfigKey{Name: "as_num"}); err != nil { + if s, err := c.getValue(model.GlobalBGPConfigKey{Name: "AsNumber"}); err != nil { return 0, err } else if s == nil { return GlobalDefaultASNumber, nil @@ -205,7 +205,7 @@ func (c *config) SetGlobalLogLevel(level string) error { return c.setLogLevel( level, model.GlobalConfigKey{Name: "LogSeverityScreen"}, - model.GlobalConfigKey{Name: "loglevel"}) + model.GlobalBGPConfigKey{Name: "loglevel"}) } // GetGlobalLogLevel gets the current system global log level. @@ -225,13 +225,13 @@ func (c *config) GetGlobalLogLevel() (string, error) { func (c *config) SetNodeLogLevel(node string, level string) error { return c.setLogLevel(level, model.HostConfigKey{Hostname: node, Name: "LogSeverityScreen"}, - model.HostBGPConfigKey{Hostname: node, Name: "loglevel"}) + model.NodeBGPConfigKey{Nodename: node, Name: "loglevel"}) } // SetNodeLogLevelUseGlobal sets the node to use the global log level. func (c *config) SetNodeLogLevelUseGlobal(node string) error { kf := model.HostConfigKey{Hostname: node, Name: "LogSeverityScreen"} - kb := model.HostBGPConfigKey{Hostname: node, Name: "loglevel"} + kb := model.NodeBGPConfigKey{Nodename: node, Name: "loglevel"} err1 := c.deleteConfig(kf) err2 := c.deleteConfig(kb) @@ -348,7 +348,7 @@ func getBGPConfigKey(name, node string) model.Key { if node == "" { return model.GlobalBGPConfigKey{Name: name} } - return model.HostBGPConfigKey{Hostname: node, Name: name} + return model.NodeBGPConfigKey{Nodename: node, Name: name} } // setLogLevel sets the log level fields with the appropriate log string value. @@ -412,9 +412,3 @@ func erroredField(name string, value interface{}) error { } return err } - -// nodeToNodeMesh is a struct containing whether node-to-node mesh is enabled. It can be -// JSON marshalled into the correct structure that is understood by the Calico BGP component. -type nodeToNodeMesh struct { - Enabled bool `json:"enabled"` -} diff --git a/lib/client/config_e2e_test.go b/lib/client/config_e2e_test.go index 87a298537..03b725ee5 100644 --- a/lib/client/config_e2e_test.go +++ b/lib/client/config_e2e_test.go @@ -89,6 +89,89 @@ var _ = testutils.E2eDatastoreDescribe("with config option API tests", testutils Expect(cs).To(Equal(client.ConfigLocationGlobal)) }) + It("should handle node IP in IP tunnel address", func() { + var err error + var ip *net.IP + + By("checking unset value") + ip, err = config.GetNodeIPIPTunnelAddress("node1") + Expect(err).NotTo(HaveOccurred()) + Expect(ip).To(BeNil()) + + By("checking address set to 1.2.3.4") + ipv4 := net.MustParseIP("1.2.3.4") + err = config.SetNodeIPIPTunnelAddress("node1", &ipv4) + Expect(err).NotTo(HaveOccurred()) + + ip, err = config.GetNodeIPIPTunnelAddress("node1") + Expect(err).NotTo(HaveOccurred()) + Expect(*ip).To(Equal(ipv4)) + + By("checking address set to aa::ff") + ipv6 := net.MustParseIP("aa::ff") + err = config.SetNodeIPIPTunnelAddress("node1", &ipv6) + Expect(err).NotTo(HaveOccurred()) + + ip, err = config.GetNodeIPIPTunnelAddress("node1") + Expect(err).NotTo(HaveOccurred()) + Expect(*ip).To(Equal(ipv6)) + + By("checking address set to nil") + err = config.SetNodeIPIPTunnelAddress("node1", nil) + Expect(err).NotTo(HaveOccurred()) + + ip, err = config.GetNodeIPIPTunnelAddress("node1") + Expect(err).NotTo(HaveOccurred()) + Expect(ip).To(BeNil()) + }) + + It("should handle per-node felix config", func() { + var err error + var value string + var set bool + + By("checking unset value") + value, set, err = config.GetFelixConfig("TEST", "NODE") + Expect(err).NotTo(HaveOccurred()) + Expect(value).To(Equal("")) + Expect(set).To(BeFalse()) + + By("setting value and checking it") + err = config.SetFelixConfig("TEST", "NODE", "VALUE") + Expect(err).NotTo(HaveOccurred()) + + value, set, err = config.GetFelixConfig("TEST", "NODE") + Expect(err).NotTo(HaveOccurred()) + Expect(value).To(Equal("VALUE")) + Expect(set).To(BeTrue()) + + By("checking global value is still unset") + value, set, err = config.GetFelixConfig("TEST", "") + Expect(err).NotTo(HaveOccurred()) + Expect(value).To(Equal("")) + Expect(set).To(BeFalse()) + + By("unsetting value and checking it") + err = config.UnsetFelixConfig("TEST", "NODE") + Expect(err).NotTo(HaveOccurred()) + + value, set, err = config.GetFelixConfig("TEST", "NODE") + Expect(err).NotTo(HaveOccurred()) + Expect(value).To(Equal("")) + Expect(set).To(BeFalse()) + }) + +}) + +var _ = testutils.E2eDatastoreDescribe("with config option API tests", testutils.DatastoreAll, func(calicoConfig api.CalicoAPIConfig) { + + var config client.ConfigInterface + + BeforeEach(func() { + c := testutils.CreateCleanClient(calicoConfig) + config = c.Config() + }) + It("should handle node to node mesh configuration", func() { var err error var n bool @@ -159,42 +242,6 @@ var _ = testutils.E2eDatastoreDescribe("with config option API tests", testutils Expect(n).To(Equal(false)) }) - It("should handle node IP in IP tunnel address", func() { - var err error - var ip *net.IP - - By("checking unset value") - ip, err = config.GetNodeIPIPTunnelAddress("node1") - Expect(err).NotTo(HaveOccurred()) - Expect(ip).To(BeNil()) - - By("checking address set to 1.2.3.4") - ipv4 := net.MustParseIP("1.2.3.4") - err = config.SetNodeIPIPTunnelAddress("node1", &ipv4) - Expect(err).NotTo(HaveOccurred()) - - ip, err = config.GetNodeIPIPTunnelAddress("node1") - Expect(err).NotTo(HaveOccurred()) - Expect(*ip).To(Equal(ipv4)) - - By("checking address set to aa::ff") - ipv6 := net.MustParseIP("aa::ff") - err = config.SetNodeIPIPTunnelAddress("node1", &ipv6) - Expect(err).NotTo(HaveOccurred()) - - ip, err = config.GetNodeIPIPTunnelAddress("node1") - Expect(err).NotTo(HaveOccurred()) - Expect(*ip).To(Equal(ipv6)) - - By("checking address set to nil") - err = config.SetNodeIPIPTunnelAddress("node1", nil) - Expect(err).NotTo(HaveOccurred()) - - ip, err = config.GetNodeIPIPTunnelAddress("node1") - Expect(err).NotTo(HaveOccurred()) - Expect(ip).To(BeNil()) - }) - It("should handle global felix config", func() { var err error var value string @@ -225,42 +272,6 @@ var _ = testutils.E2eDatastoreDescribe("with config option API tests", testutils Expect(set).To(BeFalse()) }) - It("should handle per-node felix config", func() { - var err error - var value string - var set bool - - By("checking unset value") - value, set, err = config.GetFelixConfig("TEST", "NODE") - Expect(err).NotTo(HaveOccurred()) - Expect(value).To(Equal("")) - Expect(set).To(BeFalse()) - - By("setting value and checking it") - err = config.SetFelixConfig("TEST", "NODE", "VALUE") - Expect(err).NotTo(HaveOccurred()) - - value, set, err = config.GetFelixConfig("TEST", "NODE") - Expect(err).NotTo(HaveOccurred()) - Expect(value).To(Equal("VALUE")) - Expect(set).To(BeTrue()) - - By("checking global value is still unset") - value, set, err = config.GetFelixConfig("TEST", "") - Expect(err).NotTo(HaveOccurred()) - Expect(value).To(Equal("")) - Expect(set).To(BeFalse()) - - By("unsetting value and checking it") - err = config.UnsetFelixConfig("TEST", "NODE") - Expect(err).NotTo(HaveOccurred()) - - value, set, err = config.GetFelixConfig("TEST", "NODE") - Expect(err).NotTo(HaveOccurred()) - Expect(value).To(Equal("")) - Expect(set).To(BeFalse()) - }) - It("should handle global BGP config", func() { var err error var value string @@ -297,16 +308,16 @@ var _ = testutils.E2eDatastoreDescribe("with config option API tests", testutils var set bool By("checking unset value") - value, set, err = config.GetBGPConfig("TEST", "NODE") + value, set, err = config.GetBGPConfig("TEST", "127.0.0.1") Expect(err).NotTo(HaveOccurred()) Expect(value).To(Equal("")) Expect(set).To(BeFalse()) By("setting value and checking it") - err = config.SetBGPConfig("TEST", "NODE", "VALUE") + err = config.SetBGPConfig("TEST", "127.0.0.1", "VALUE") Expect(err).NotTo(HaveOccurred()) - value, set, err = config.GetBGPConfig("TEST", "NODE") + value, set, err = config.GetBGPConfig("TEST", "127.0.0.1") Expect(err).NotTo(HaveOccurred()) Expect(value).To(Equal("VALUE")) Expect(set).To(BeTrue()) @@ -318,10 +329,10 @@ var _ = testutils.E2eDatastoreDescribe("with config option API tests", testutils Expect(set).To(BeFalse()) By("unsetting value and checking it") - err = config.UnsetBGPConfig("TEST", "NODE") + err = config.UnsetBGPConfig("TEST", "127.0.0.1") Expect(err).NotTo(HaveOccurred()) - value, set, err = config.GetBGPConfig("TEST", "NODE") + value, set, err = config.GetBGPConfig("TEST", "127.0.0.1") Expect(err).NotTo(HaveOccurred()) Expect(value).To(Equal("")) Expect(set).To(BeFalse()) diff --git a/lib/client/doc.go b/lib/client/doc.go index 3280bf161..166a04735 100644 --- a/lib/client/doc.go +++ b/lib/client/doc.go @@ -53,7 +53,7 @@ new HostEndpoints interface and call the appropriate methods on that interface. hostEndpoint, err := hostEndpoints.Create(&api.HostEndpoint{ Metadata: api.HostEndpointMetadata{ Name: "endpoint1", - Hostname: "hostname1", + Nodename: "hostname1", }, Spec: api.HostEndpointSpec{ InterfaceName: "eth0" @@ -66,7 +66,7 @@ new HostEndpoints interface and call the appropriate methods on that interface. hostEndpoint, err = hostEndpoints.Update(&api.HostEndpoint{ Metadata: api.HostEndpointMetadata{ Name: "endpoint1", - Hostname: "hostname1", + Nodename: "hostname1", }, Spec: api.HostEndpointSpec{ InterfaceName: "eth0", @@ -79,7 +79,7 @@ new HostEndpoints interface and call the appropriate methods on that interface. hostEndpoint, err = hostEndpoints.Apply(&api.HostEndpoint{ Metadata: api.HostEndpointMetadata{ Name: "endpoint1", - Hostname: "hostname1", + Nodename: "hostname1", }, Spec: api.HostEndpointSpec{ InterfaceName: "eth1", @@ -92,7 +92,7 @@ new HostEndpoints interface and call the appropriate methods on that interface. // unique identifiers does not exist. hostEndpoint, err = hostEndpoints.Delete(api.HostEndpointMetadata{ Name: "endpoint1", - Hostname: "hostname1", + Nodename: "hostname1", }) // Get a hostEndpoint. All Get() methods return an error of type @@ -100,7 +100,7 @@ new HostEndpoints interface and call the appropriate methods on that interface. // unique identifiers does not exist. hostEndpoint, err = hostEndpoints.Get(api.HostEndpointMetadata{ Name: "endpoint1", - Hostname: "hostname1", + Nodename: "hostname1", }) // List all hostEndpoints. All List() methods take a (sub-)set of the resource diff --git a/lib/client/ippool_e2e_test.go b/lib/client/ippool_e2e_test.go index c21227dbb..304dc2d37 100644 --- a/lib/client/ippool_e2e_test.go +++ b/lib/client/ippool_e2e_test.go @@ -54,8 +54,7 @@ var _ = testutils.E2eDatastoreDescribe("IPPool e2e tests", testutils.DatastoreAl DescribeTable("IPPool e2e tests", func(meta1, meta2 api.IPPoolMetadata, spec1, spec2 api.IPPoolSpec) { - c := testutils.CreateClient(apiConfig) - testutils.CleanIPPools(c) + c := testutils.CreateCleanClient(apiConfig) By("Updating the pool before it is created") _, outError := c.IPPools().Update(&api.IPPool{Metadata: meta1, Spec: spec1}) @@ -260,8 +259,7 @@ var _ = testutils.E2eDatastoreDescribe("IPPool e2e tests", testutils.DatastoreAl ) Describe("Checking operations perform data validation", func() { - c := testutils.CreateClient(apiConfig) - testutils.CleanIPPools(c) + c := testutils.CreateCleanClient(apiConfig) var err error valErrorType := reflect.TypeOf(cerrors.ErrorValidation{}) diff --git a/lib/errors/errors.go b/lib/errors/errors.go index 59ec4f33d..327baf107 100644 --- a/lib/errors/errors.go +++ b/lib/errors/errors.go @@ -126,3 +126,31 @@ type ErrorResourceUpdateConflict struct { func (e ErrorResourceUpdateConflict) Error() string { return fmt.Sprintf("update conflict: '%s'", e.Identifier) } + +// UpdateErrorIdentifier modifies the supplied error to use the new resource +// identifier. +func UpdateErrorIdentifier(err error, id interface{}) error { + if err == nil { + return nil + } + + switch e := err.(type) { + case ErrorDatastoreError: + e.Identifier = id + err = e + case ErrorResourceDoesNotExist: + e.Identifier = id + err = e + case ErrorOperationNotSupported: + e.Identifier = id + err = e + case ErrorResourceAlreadyExists: + e.Identifier = id + err = e + case ErrorResourceUpdateConflict: + e.Identifier = id + err = e + } + return err +} + diff --git a/lib/testutils/client.go b/lib/testutils/client.go index b86737419..225dd6d0d 100644 --- a/lib/testutils/client.go +++ b/lib/testutils/client.go @@ -29,6 +29,7 @@ import ( "os/exec" "strings" + "github.com/projectcalico/libcalico-go/lib/backend/model" "golang.org/x/net/context" ) @@ -76,26 +77,6 @@ func CreateClient(config api.CalicoAPIConfig) *client.Client { return c } -func CleanIPPools(c *client.Client) { - if pools, err := c.IPPools().List(api.IPPoolMetadata{}); err == nil { - for _, pool := range pools.Items { - if err := c.IPPools().Delete(pool.Metadata); err != nil { - panic(err) - } - } - } -} - -func CleanBGPPeers(c *client.Client) { - if peers, err := c.BGPPeers().List(api.BGPPeerMetadata{}); err == nil { - for _, peer := range peers.Items { - if err := c.BGPPeers().Delete(peer.Metadata); err != nil { - panic(err) - } - } - } -} - func CleanDatastore(config api.CalicoAPIConfig) { var err error @@ -103,6 +84,7 @@ func CleanDatastore(config api.CalicoAPIConfig) { switch config.Spec.DatastoreType { case api.EtcdV2: + // To clean etcd, just create a new etcd client and delete the entire calico tree. cfg := etcdclient.Config{ Endpoints: []string{config.Spec.EtcdScheme + "://" + config.Spec.EtcdAuthority}, } @@ -117,6 +99,28 @@ func CleanDatastore(config api.CalicoAPIConfig) { } else { log.Errorf("Can't create etcd backend %v", err) } + case api.Kubernetes: + // To clean Kuberenetes, we create a Client and use the backend interface to + // list and remove each of the resource types currently supported by the KDD. We + // can't remove everything though because some of the resources are owned by Kubernetes. + backend := CreateClient(config).Backend + + types := []model.ListInterface{ + model.GlobalBGPConfigListOptions{}, + model.NodeBGPConfigListOptions{}, + model.GlobalBGPPeerListOptions{}, + model.NodeBGPPeerListOptions{}, + model.GlobalConfigListOptions{}, + model.IPPoolListOptions{}, + } + for _, t := range types { + rs, _ := backend.List(t) + for _, r := range rs { + log.WithField("Key", r.Key).Info("Deleting from KDD") + backend.Delete(r) + } + } + default: err = errors.New(fmt.Sprintf("Unknown datastore type: %v", config.Spec.DatastoreType)) } From 94a8cb497fedcb3909248f6535bb61fc7aee4bef Mon Sep 17 00:00:00 2001 From: Rob Brockbank Date: Wed, 5 Jul 2017 09:01:24 -0700 Subject: [PATCH 06/11] Expose interface to convert k8s node resource to KVPairs --- lib/backend/compat/compat.go | 1 - lib/backend/k8s/resources/client.go | 10 +++++++ .../k8s/resources/customnoderesource.go | 29 +++++++++++++++---- lib/backend/k8s/resources/node.go | 2 +- lib/backend/k8s/resources/nodebgpconfig.go | 2 +- lib/backend/k8s/resources/nodebgppeer.go | 2 +- lib/errors/errors.go | 1 - 7 files changed, 37 insertions(+), 10 deletions(-) diff --git a/lib/backend/compat/compat.go b/lib/backend/compat/compat.go index 0f50504e1..27e3156ab 100644 --- a/lib/backend/compat/compat.go +++ b/lib/backend/compat/compat.go @@ -761,7 +761,6 @@ func toDatastoreGlobalBGPConfig(d model.KVPair) *model.KVPair { return &d } - // fromDatastoreGlobalBGPConfig modifies the Global BGP Config KVPair from the format required in the // datastore (for back-compatibility with what is expected in teh etcdv2 datastore driver). func fromDatastoreGlobalBGPConfig(d model.KVPair) *model.KVPair { diff --git a/lib/backend/k8s/resources/client.go b/lib/backend/k8s/resources/client.go index be13c987b..049d3dce5 100644 --- a/lib/backend/k8s/resources/client.go +++ b/lib/backend/k8s/resources/client.go @@ -16,6 +16,8 @@ package resources import ( "github.com/projectcalico/libcalico-go/lib/backend/model" + + apiv1 "k8s.io/client-go/pkg/api/v1" ) // K8sResourceClient is the interface to the k8s datastore for CRUD operations @@ -63,3 +65,11 @@ type K8sResourceClient interface { // any ready to be used. EnsureInitialized() error } + +// K8sNodeResourceClient extends the K8sResourceClient to add a helper method to +// extract resources from the supplied K8s Node. This convenience interface is +// expected to be removed in a future libcalico-go release. +type K8sNodeResourceClient interface { + K8sResourceClient + ExtractResourcesFromNode(node *apiv1.Node) ([]*model.KVPair, error) +} diff --git a/lib/backend/k8s/resources/customnoderesource.go b/lib/backend/k8s/resources/customnoderesource.go index 5b1799992..49aa00873 100644 --- a/lib/backend/k8s/resources/customnoderesource.go +++ b/lib/backend/k8s/resources/customnoderesource.go @@ -61,15 +61,27 @@ type CustomK8sNodeResourceClientConfig struct { } // NewCustomK8sNodeResourceClient creates a new per-node K8sResourceClient. -func NewCustomK8sNodeResourceClient(config CustomK8sNodeResourceClientConfig) K8sResourceClient { - return &retryWrapper{ - client: &customK8sNodeResourceClient{ - CustomK8sNodeResourceClientConfig: config, - annotationKeyPrefix: config.Namespace + "/", +func NewCustomK8sNodeResourceClient(config CustomK8sNodeResourceClientConfig) K8sNodeResourceClient { + return &nodeRetryWrapper{ + retryWrapper: &retryWrapper{ + client: &customK8sNodeResourceClient{ + CustomK8sNodeResourceClientConfig: config, + annotationKeyPrefix: config.Namespace + "/", + }, }, } } +// nodeRetryWrapper extends the retryWrapper to include the ExtractResourcesFromNode +// method. +type nodeRetryWrapper struct { + *retryWrapper +} + +func (r *nodeRetryWrapper) ExtractResourcesFromNode(node *apiv1.Node) ([]*model.KVPair, error) { + return r.client.(*customK8sNodeResourceClient).ExtractResourcesFromNode(node) +} + // customK8sNodeResourceClient implements the K8sResourceClient interface. It // should only be created using newCustomK8sNodeResourceClientConfig since that // ensures it is wrapped with a retryWrapper. @@ -433,3 +445,10 @@ func (c *customK8sNodeResourceClient) extractResourcesFromAnnotation(node *apiv1 return kvps, nil } + +// ExtractResourcesFromNode returns the resources stored in the Node configuration. +// +// This convenience method is expected to be removed in a future libcalico-go release. +func (c *customK8sNodeResourceClient) ExtractResourcesFromNode(node *apiv1.Node) ([]*model.KVPair, error) { + return c.extractResourcesFromAnnotation(node, "") +} diff --git a/lib/backend/k8s/resources/node.go b/lib/backend/k8s/resources/node.go index da5be96da..e8c74cef0 100644 --- a/lib/backend/k8s/resources/node.go +++ b/lib/backend/k8s/resources/node.go @@ -64,7 +64,7 @@ func (c *nodeClient) Update(kvp *model.KVPair) (*model.KVPair, error) { err = K8sErrorToCalico(err, kvp.Key) // If this is an update conflict and we didn't specify a revision in the - // request, indicate to the retryWrapper that we can retry the action. + // request, indicate to the nodeRetryWrapper that we can retry the action. if _, ok := err.(errors.ErrorResourceUpdateConflict); ok && kvp.Revision == nil { err = retryError{err: err} } diff --git a/lib/backend/k8s/resources/nodebgpconfig.go b/lib/backend/k8s/resources/nodebgpconfig.go index 8d3d7dfb8..ad6638416 100644 --- a/lib/backend/k8s/resources/nodebgpconfig.go +++ b/lib/backend/k8s/resources/nodebgpconfig.go @@ -24,7 +24,7 @@ const ( perNodeBgpConfigAnnotationNamespace = "config.bgp.projectcalico.org" ) -func NewNodeBGPConfigClient(c *kubernetes.Clientset) K8sResourceClient { +func NewNodeBGPConfigClient(c *kubernetes.Clientset) K8sNodeResourceClient { return NewCustomK8sNodeResourceClient(CustomK8sNodeResourceClientConfig{ ClientSet: c, ResourceType: "NodeBGPConfig", diff --git a/lib/backend/k8s/resources/nodebgppeer.go b/lib/backend/k8s/resources/nodebgppeer.go index 9a45b4a57..7760f4ba5 100644 --- a/lib/backend/k8s/resources/nodebgppeer.go +++ b/lib/backend/k8s/resources/nodebgppeer.go @@ -24,7 +24,7 @@ const ( perNodeBgpPeerAnnotationNamespace = "peer.bgp.projectcalico.org" ) -func NewNodeBGPPeerClient(c *kubernetes.Clientset) K8sResourceClient { +func NewNodeBGPPeerClient(c *kubernetes.Clientset) K8sNodeResourceClient { return NewCustomK8sNodeResourceClient(CustomK8sNodeResourceClientConfig{ ClientSet: c, ResourceType: "NodeBGPPeer", diff --git a/lib/errors/errors.go b/lib/errors/errors.go index 327baf107..6aa1c2dfa 100644 --- a/lib/errors/errors.go +++ b/lib/errors/errors.go @@ -153,4 +153,3 @@ func UpdateErrorIdentifier(err error, id interface{}) error { } return err } - From c513ba37d8600cf472acb9c01425e93039151b49 Mon Sep 17 00:00:00 2001 From: Casey Davenport Date: Wed, 12 Jul 2017 14:55:05 -0700 Subject: [PATCH 07/11] Add release note to PR template --- .github/PULL_REQUEST_TEMPLATE.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 720bcf199..d2503d6ab 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,6 @@ ## Description -A few sentences describing the overall goals of the pull request's commits. -Please include +A few sentences describing the overall goals of the pull request's commits. +Please include - the type of fix - (e.g. bug fix, new feature, documentation) - some details on _why_ this PR should be merged - the details of the testing you've done on it (both manual and automated) @@ -10,4 +10,16 @@ Please include ## Todos - [ ] Tests - [ ] Documentation +- [ ] Release note +## Release Note + + +```release-note +None required +``` From d8b4cb2482032bd53c2ea7022dfb8052fb83e72a Mon Sep 17 00:00:00 2001 From: Shaun Crampton Date: Thu, 6 Jul 2017 15:32:05 +0100 Subject: [PATCH 08/11] Add a Nets field to our Rule struct, deprecate the Net field. --- lib/api/rule.go | 42 +++++- lib/backend/backend_suite_test.go | 14 ++ lib/backend/model/rule.go | 193 ++++++++++++++++---------- lib/client/policy_e2e_test.go | 33 ++++- lib/client/profile_e2e_test.go | 28 +++- lib/converter/converter_suite_test.go | 27 ++++ lib/converter/rule.go | 42 +++++- lib/converter/rule_test.go | 127 +++++++++++++++++ lib/testutils/rules.go | 48 +++++++ lib/validator/validator.go | 53 +++++-- lib/validator/validator_test.go | 108 +++++++++++++- 11 files changed, 610 insertions(+), 105 deletions(-) create mode 100644 lib/converter/converter_suite_test.go create mode 100644 lib/converter/rule_test.go diff --git a/lib/api/rule.go b/lib/api/rule.go index fe1ad9a5a..807d2cb63 100644 --- a/lib/api/rule.go +++ b/lib/api/rule.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Tigera, Inc. All rights reserved. +// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -75,7 +75,7 @@ type ICMPFields struct { // to a particular entity (that is either the source or destination). // // A source EntityRule matches the source endpoint and originating traffic. -// A desination EntityRule matches the destination endpoint and terminating traffic. +// A destination EntityRule matches the destination endpoint and terminating traffic. type EntityRule struct { // Tag is an optional field that restricts the rule to only apply to traffic that // originates from (or terminates at) endpoints that have profiles with the given tag @@ -84,8 +84,13 @@ type EntityRule struct { // Net is an optional field that restricts the rule to only apply to traffic that // originates from (or terminates at) IP addresses in the given subnet. + // Deprecated: superseded by the Nets field. Net *net.IPNet `json:"net,omitempty" validate:"omitempty"` + // Nets is an optional field that restricts the rule to only apply to traffic that + // originates from (or terminates at) IP addresses in any of the given subnets. + Nets []*net.IPNet `json:"nets,omitempty" validate:"omitempty"` + // Selector is an optional field that contains a selector expression (see Policy for // sample syntax). Only traffic that originates from (terminates at) endpoints matching // the selector will be matched. @@ -115,9 +120,15 @@ type EntityRule struct { // NotTag is the negated version of the Tag field. NotTag string `json:"notTag,omitempty" validate:"omitempty,tag"` - // NotNet is the negated version of the Net field. + // NotNet is an optional field that restricts the rule to only apply to traffic that + // does not originate from (or terminate at) an IP address in the given subnet. + // Deprecated: superseded by NotNets. NotNet *net.IPNet `json:"notNet,omitempty" validate:"omitempty"` + // NotNets is an optional field that restricts the rule to only apply to traffic that + // does not originate from (or terminate at) an IP address in any of the given subnets. + NotNets []*net.IPNet `json:"nets,omitempty" validate:"omitempty"` + // NotSelector is the negated version of the Selector field. See Selector field for // subtleties with negated selectors. NotSelector string `json:"notSelector,omitempty" validate:"omitempty,selector"` @@ -128,3 +139,28 @@ type EntityRule struct { // Protocol match in the Rule to be set to "tcp" or "udp". NotPorts []numorstring.Port `json:"notPorts,omitempty" validate:"omitempty,dive"` } + +func combineNets(n *net.IPNet, nets []*net.IPNet) []*net.IPNet { + if n == nil { + return nets + } + if len(nets) == 0 { + return []*net.IPNet{n} + } + combined := make([]*net.IPNet, len(nets)+1) + copy(combined, nets) + combined[len(combined)-1] = n + return combined +} + +// GetNets returns either r.Nets or a slice containing r.Net. It is useful for unifying the +// two representations. +func (r EntityRule) GetNets() []*net.IPNet { + return combineNets(r.Net, r.Nets) +} + +// GetNets returns either r.NotNets or a slice containing NotNet. It is useful for unifying the +// two representations. +func (r EntityRule) GetNotNets() []*net.IPNet { + return combineNets(r.NotNet, r.NotNets) +} diff --git a/lib/backend/backend_suite_test.go b/lib/backend/backend_suite_test.go index dcda2a747..d2aca4bf5 100644 --- a/lib/backend/backend_suite_test.go +++ b/lib/backend/backend_suite_test.go @@ -1,3 +1,17 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package backend_test import ( diff --git a/lib/backend/model/rule.go b/lib/backend/model/rule.go index 5691670df..dd4ebf478 100644 --- a/lib/backend/model/rule.go +++ b/lib/backend/model/rule.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Tigera, Inc. All rights reserved. +// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,25 +38,66 @@ type Rule struct { SrcTag string `json:"src_tag,omitempty" validate:"omitempty,tag"` SrcNet *net.IPNet `json:"src_net,omitempty" validate:"omitempty"` + SrcNets []*net.IPNet `json:"src_nets,omitempty" validate:"omitempty"` SrcSelector string `json:"src_selector,omitempty" validate:"omitempty,selector"` SrcPorts []numorstring.Port `json:"src_ports,omitempty" validate:"omitempty"` DstTag string `json:"dst_tag,omitempty" validate:"omitempty,tag"` DstSelector string `json:"dst_selector,omitempty" validate:"omitempty,selector"` DstNet *net.IPNet `json:"dst_net,omitempty" validate:"omitempty"` + DstNets []*net.IPNet `json:"dst_nets,omitempty" validate:"omitempty"` DstPorts []numorstring.Port `json:"dst_ports,omitempty" validate:"omitempty"` NotSrcTag string `json:"!src_tag,omitempty" validate:"omitempty,tag"` NotSrcNet *net.IPNet `json:"!src_net,omitempty" validate:"omitempty"` + NotSrcNets []*net.IPNet `json:"!src_nets,omitempty" validate:"omitempty"` NotSrcSelector string `json:"!src_selector,omitempty" validate:"omitempty,selector"` NotSrcPorts []numorstring.Port `json:"!src_ports,omitempty" validate:"omitempty"` NotDstTag string `json:"!dst_tag,omitempty" validate:"omitempty"` NotDstSelector string `json:"!dst_selector,omitempty" validate:"omitempty,selector"` NotDstNet *net.IPNet `json:"!dst_net,omitempty" validate:"omitempty"` + NotDstNets []*net.IPNet `json:"!dst_nets,omitempty" validate:"omitempty"` NotDstPorts []numorstring.Port `json:"!dst_ports,omitempty" validate:"omitempty"` LogPrefix string `json:"log_prefix,omitempty" validate:"omitempty"` } +func combineNets(n *net.IPNet, nets []*net.IPNet) []*net.IPNet { + if n == nil { + return nets + } + if len(nets) == 0 { + return []*net.IPNet{n} + } + var combination = make([]*net.IPNet, len(nets)+1) + copy(combination, nets) + combination[len(nets)] = n + return combination +} + +func (r Rule) AllSrcNets() []*net.IPNet { + return combineNets(r.SrcNet, r.SrcNets) +} + +func (r Rule) AllDstNets() []*net.IPNet { + return combineNets(r.DstNet, r.DstNets) +} + +func (r Rule) AllNotSrcNets() []*net.IPNet { + return combineNets(r.NotSrcNet, r.NotSrcNets) +} + +func (r Rule) AllNotDstNets() []*net.IPNet { + return combineNets(r.NotDstNet, r.NotDstNets) +} + +func joinNets(nets []*net.IPNet) string { + parts := make([]string, len(nets)) + for i, n := range nets { + parts[i] = n.String() + } + return strings.Join(parts, ",") +} + func (r Rule) String() string { parts := make([]string, 0) // Action. @@ -87,83 +128,93 @@ func (r Rule) String() string { parts = append(parts, "!code", strconv.Itoa(*r.NotICMPCode)) } - // Source attributes. - fromParts := make([]string, 0) - if len(r.SrcPorts) > 0 { - srcPorts := make([]string, len(r.SrcPorts)) - for ii, port := range r.SrcPorts { - srcPorts[ii] = port.String() + { + // Source attributes. New block ensures that fromParts goes out-of-scope before + // we calculate toParts. This prevents copy/paste errors. + fromParts := make([]string, 0) + if len(r.SrcPorts) > 0 { + srcPorts := make([]string, len(r.SrcPorts)) + for ii, port := range r.SrcPorts { + srcPorts[ii] = port.String() + } + fromParts = append(fromParts, "ports", strings.Join(srcPorts, ",")) } - fromParts = append(fromParts, "ports", strings.Join(srcPorts, ",")) - } - if r.SrcTag != "" { - fromParts = append(fromParts, "tag", r.SrcTag) - } - if r.SrcSelector != "" { - fromParts = append(fromParts, "selector", fmt.Sprintf("%#v", r.SrcSelector)) - } - if r.SrcNet != nil { - fromParts = append(fromParts, "cidr", r.SrcNet.String()) - } - if len(r.NotSrcPorts) > 0 { - notSrcPorts := make([]string, len(r.NotSrcPorts)) - for ii, port := range r.NotSrcPorts { - notSrcPorts[ii] = port.String() + if r.SrcTag != "" { + fromParts = append(fromParts, "tag", r.SrcTag) + } + if r.SrcSelector != "" { + fromParts = append(fromParts, "selector", fmt.Sprintf("%#v", r.SrcSelector)) + } + srcNets := r.AllSrcNets() + if len(srcNets) != 0 { + fromParts = append(fromParts, "cidr", joinNets(srcNets)) + } + if len(r.NotSrcPorts) > 0 { + notSrcPorts := make([]string, len(r.NotSrcPorts)) + for ii, port := range r.NotSrcPorts { + notSrcPorts[ii] = port.String() + } + fromParts = append(fromParts, "!ports", strings.Join(notSrcPorts, ",")) + } + if r.NotSrcTag != "" { + fromParts = append(fromParts, "!tag", r.NotSrcTag) + } + if r.NotSrcSelector != "" { + fromParts = append(fromParts, "!selector", fmt.Sprintf("%#v", r.NotSrcSelector)) + } + notSrcNets := r.AllNotSrcNets() + if len(notSrcNets) != 0 { + fromParts = append(fromParts, "!cidr", joinNets(notSrcNets)) } - fromParts = append(fromParts, "!ports", strings.Join(notSrcPorts, ",")) - } - if r.NotSrcTag != "" { - fromParts = append(fromParts, "!tag", r.NotSrcTag) - } - if r.NotSrcSelector != "" { - fromParts = append(fromParts, "!selector", fmt.Sprintf("%#v", r.NotSrcSelector)) - } - if r.NotSrcNet != nil { - fromParts = append(fromParts, "!cidr", r.NotSrcNet.String()) - } - // Destination attributes. - toParts := make([]string, 0) - if len(r.DstPorts) > 0 { - DstPorts := make([]string, len(r.DstPorts)) - for ii, port := range r.DstPorts { - DstPorts[ii] = port.String() + if len(fromParts) > 0 { + parts = append(parts, "from") + parts = append(parts, fromParts...) } - toParts = append(toParts, "ports", strings.Join(DstPorts, ",")) } - if r.DstTag != "" { - toParts = append(toParts, "tag", r.DstTag) - } - if r.DstSelector != "" { - toParts = append(toParts, "selector", fmt.Sprintf("%#v", r.DstSelector)) - } - if r.DstNet != nil { - toParts = append(toParts, "cidr", r.DstNet.String()) - } - if len(r.NotDstPorts) > 0 { - NotDstPorts := make([]string, len(r.NotDstPorts)) - for ii, port := range r.NotDstPorts { - NotDstPorts[ii] = port.String() + + { + // Destination attributes. + toParts := make([]string, 0) + if len(r.DstPorts) > 0 { + DstPorts := make([]string, len(r.DstPorts)) + for ii, port := range r.DstPorts { + DstPorts[ii] = port.String() + } + toParts = append(toParts, "ports", strings.Join(DstPorts, ",")) + } + if r.DstTag != "" { + toParts = append(toParts, "tag", r.DstTag) + } + if r.DstSelector != "" { + toParts = append(toParts, "selector", fmt.Sprintf("%#v", r.DstSelector)) + } + dstNets := r.AllDstNets() + if len(dstNets) != 0 { + toParts = append(toParts, "cidr", joinNets(dstNets)) + } + if len(r.NotDstPorts) > 0 { + notDstPorts := make([]string, len(r.NotDstPorts)) + for ii, port := range r.NotDstPorts { + notDstPorts[ii] = port.String() + } + toParts = append(toParts, "!ports", strings.Join(notDstPorts, ",")) + } + if r.NotDstTag != "" { + toParts = append(toParts, "!tag", r.NotDstTag) + } + if r.NotDstSelector != "" { + toParts = append(toParts, "!selector", fmt.Sprintf("%#v", r.NotDstSelector)) + } + notDstNets := r.AllNotDstNets() + if len(notDstNets) != 0 { + toParts = append(toParts, "!cidr", joinNets(notDstNets)) } - toParts = append(toParts, "!ports", strings.Join(NotDstPorts, ",")) - } - if r.NotDstTag != "" { - toParts = append(toParts, "!tag", r.NotDstTag) - } - if r.NotDstSelector != "" { - toParts = append(toParts, "!selector", fmt.Sprintf("%#v", r.NotDstSelector)) - } - if r.NotDstNet != nil { - toParts = append(toParts, "!cidr", r.NotDstNet.String()) - } - if len(fromParts) > 0 { - parts = append(parts, "from") - parts = append(parts, fromParts...) - } - if len(toParts) > 0 { - parts = append(parts, "to") - parts = append(parts, toParts...) + if len(toParts) > 0 { + parts = append(parts, "to") + parts = append(parts, toParts...) + } } return strings.Join(parts, " ") diff --git a/lib/client/policy_e2e_test.go b/lib/client/policy_e2e_test.go index 15986a1e4..11bf3d6d2 100644 --- a/lib/client/policy_e2e_test.go +++ b/lib/client/policy_e2e_test.go @@ -55,6 +55,14 @@ var policySpec1 = api.PolicySpec{ Selector: "thing == 'value'", } +// When reading back, the rules should have been updated to the newer format. +var policySpec1AfterRead = api.PolicySpec{ + Order: &order1, + IngressRules: []api.Rule{testutils.InRule1AfterRead, testutils.InRule2AfterRead}, + EgressRules: []api.Rule{testutils.EgressRule1AfterRead, testutils.EgressRule2AfterRead}, + Selector: "thing == 'value'", +} + var policySpec2 = api.PolicySpec{ Order: &order2, IngressRules: []api.Rule{testutils.InRule2, testutils.InRule1}, @@ -63,10 +71,19 @@ var policySpec2 = api.PolicySpec{ DoNotTrack: true, } +// When reading back, the rules should have been updated to the newer format. +var policySpec2AfterRead = api.PolicySpec{ + Order: &order2, + IngressRules: []api.Rule{testutils.InRule2AfterRead, testutils.InRule1AfterRead}, + EgressRules: []api.Rule{testutils.EgressRule2AfterRead, testutils.EgressRule1AfterRead}, + Selector: "thing2 == 'value2'", + DoNotTrack: true, +} + var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2, func(config api.CalicoAPIConfig) { DescribeTable("Policy e2e tests", - func(meta1, meta2 api.PolicyMetadata, spec1, spec2 api.PolicySpec) { + func(meta1, meta2 api.PolicyMetadata, spec1, spec2, spec1AfterRead, spec2AfterRead api.PolicySpec) { c := testutils.CreateCleanClient(config) By("Updating the policy before it is created") _, outError := c.Policies().Update(&api.Policy{Metadata: meta1, Spec: spec1}) @@ -95,8 +112,8 @@ var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2 // Should match spec1 & outPolicy1 and outPolicy2 & spec2 and errors to be nil. Expect(outError1).NotTo(HaveOccurred()) Expect(outError2).NotTo(HaveOccurred()) - Expect(outPolicy1.Spec).To(Equal(spec1)) - Expect(outPolicy2.Spec).To(Equal(spec2)) + Expect(outPolicy1.Spec).To(Equal(spec1AfterRead)) + Expect(outPolicy2.Spec).To(Equal(spec2AfterRead)) By("Update, Get and compare") @@ -109,7 +126,7 @@ var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2 // Assert the Spec for policy with meta1 matches spec2 and no error. Expect(outError1).NotTo(HaveOccurred()) - Expect(outPolicy1.Spec).To(Equal(spec2)) + Expect(outPolicy1.Spec).To(Equal(spec2AfterRead)) // Assert the Metadata for policy with meta1 matches meta1. Expect(outPolicy1.Metadata).To(Equal(meta1)) @@ -186,6 +203,8 @@ var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2 api.PolicyMetadata{Name: "policy.1"}, policySpec1, policySpec2, + policySpec1AfterRead, + policySpec2AfterRead, ), // Test 2: Pass one fully populated PolicySpec and another empty PolicySpec and expect the series of operations to succeed. @@ -194,6 +213,8 @@ var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2 api.PolicyMetadata{Name: "policy.1"}, policySpec1, api.PolicySpec{}, + policySpec1AfterRead, + api.PolicySpec{}, ), // Test 3: Pass one partially populated PolicySpec and another fully populated PolicySpec and expect the series of operations to succeed. @@ -204,6 +225,10 @@ var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2 Selector: "has(myLabel-8.9/88-._9)", }, policySpec2, + api.PolicySpec{ + Selector: "has(myLabel-8.9/88-._9)", + }, + policySpec2AfterRead, ), ) }) diff --git a/lib/client/profile_e2e_test.go b/lib/client/profile_e2e_test.go index 52fa632da..dc5615778 100644 --- a/lib/client/profile_e2e_test.go +++ b/lib/client/profile_e2e_test.go @@ -49,18 +49,30 @@ var profileSpec1 = api.ProfileSpec{ IngressRules: []api.Rule{testutils.InRule1, testutils.InRule2}, EgressRules: []api.Rule{testutils.EgressRule1, testutils.EgressRule2}, } + +// When reading back, the rules should have been updated to the newer format. +var profileSpec1AfterRead = api.ProfileSpec{ + IngressRules: []api.Rule{testutils.InRule1AfterRead, testutils.InRule2AfterRead}, + EgressRules: []api.Rule{testutils.EgressRule1AfterRead, testutils.EgressRule2AfterRead}, +} var tags1 = []string{"profile1-tag1", "profile1-tag2"} var profileSpec2 = api.ProfileSpec{ IngressRules: []api.Rule{testutils.InRule2, testutils.InRule1}, EgressRules: []api.Rule{testutils.EgressRule2, testutils.EgressRule1}, } + +// When reading back, the rules should have been updated to the newer format. +var profileSpec2AfterRead = api.ProfileSpec{ + IngressRules: []api.Rule{testutils.InRule2AfterRead, testutils.InRule1AfterRead}, + EgressRules: []api.Rule{testutils.EgressRule2AfterRead, testutils.EgressRule1AfterRead}, +} var tags2 = []string{"profile2-tag1", "profile2-tag2"} var _ = testutils.E2eDatastoreDescribe("Profile tests", testutils.DatastoreEtcdV2, func(config api.CalicoAPIConfig) { DescribeTable("Profile e2e tests", - func(meta1, meta2 api.ProfileMetadata, spec1, spec2 api.ProfileSpec) { + func(meta1, meta2 api.ProfileMetadata, spec1, spec2, spec1AfterRead, spec2AfterRead api.ProfileSpec) { c := testutils.CreateCleanClient(config) By("Updating the profile before it is created") @@ -90,8 +102,8 @@ var _ = testutils.E2eDatastoreDescribe("Profile tests", testutils.DatastoreEtcdV // Should match spec1 & outProfile1 and outProfile2 & spec2 and errors to be nil. Expect(outError1).NotTo(HaveOccurred()) Expect(outError2).NotTo(HaveOccurred()) - Expect(outProfile1.Spec).To(Equal(spec1)) - Expect(outProfile2.Spec).To(Equal(spec2)) + Expect(outProfile1.Spec).To(Equal(spec1AfterRead)) + Expect(outProfile2.Spec).To(Equal(spec2AfterRead)) By("Update, Get and compare") @@ -104,7 +116,7 @@ var _ = testutils.E2eDatastoreDescribe("Profile tests", testutils.DatastoreEtcdV // Assert the Spec for profile with meta1 matches spec2 and no error. Expect(outError1).NotTo(HaveOccurred()) - Expect(outProfile1.Spec).To(Equal(spec2)) + Expect(outProfile1.Spec).To(Equal(spec2AfterRead)) By("List all the profiles and compare") @@ -192,6 +204,8 @@ var _ = testutils.E2eDatastoreDescribe("Profile tests", testutils.DatastoreEtcdV }, profileSpec1, profileSpec2, + profileSpec1AfterRead, + profileSpec2AfterRead, ), // Test 2: Pass one fully populated ProfileSpec and another empty ProfileSpec and expect the series of operations to succeed. @@ -213,6 +227,8 @@ var _ = testutils.E2eDatastoreDescribe("Profile tests", testutils.DatastoreEtcdV }, profileSpec1, api.ProfileSpec{}, + profileSpec1AfterRead, + api.ProfileSpec{}, ), // Test 3: Pass one partially populated ProfileSpec and another fully populated ProfileSpec and expect the series of operations to succeed. @@ -235,6 +251,10 @@ var _ = testutils.E2eDatastoreDescribe("Profile tests", testutils.DatastoreEtcdV IngressRules: []api.Rule{testutils.InRule1}, }, profileSpec2, + api.ProfileSpec{ + IngressRules: []api.Rule{testutils.InRule1AfterRead}, + }, + profileSpec2AfterRead, ), ) }) diff --git a/lib/converter/converter_suite_test.go b/lib/converter/converter_suite_test.go new file mode 100644 index 000000000..e9d9076fe --- /dev/null +++ b/lib/converter/converter_suite_test.go @@ -0,0 +1,27 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package converter_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestConverter(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Converter Suite") +} diff --git a/lib/converter/rule.go b/lib/converter/rule.go index c7aa0fa0d..c9b64908b 100644 --- a/lib/converter/rule.go +++ b/lib/converter/rule.go @@ -15,6 +15,10 @@ package converter import ( + "sync" + + log "github.com/Sirupsen/logrus" + "github.com/projectcalico/libcalico-go/lib/api" "github.com/projectcalico/libcalico-go/lib/backend/model" "github.com/projectcalico/libcalico-go/lib/net" @@ -46,6 +50,8 @@ func RulesBackendToAPI(brs []model.Rule) []api.Rule { return ars } +var logDeprecationOnce sync.Once + // ruleAPIToBackend converts an API Rule structure to a Backend Rule structure. func ruleAPIToBackend(ar api.Rule) model.Rule { var icmpCode, icmpType, notICMPCode, notICMPType *int @@ -59,6 +65,14 @@ func ruleAPIToBackend(ar api.Rule) model.Rule { notICMPType = ar.NotICMP.Type } + if ar.Source.Net != nil || ar.Source.NotNet != nil || + ar.Destination.Net != nil || ar.Destination.NotNet != nil { + logDeprecationOnce.Do(func() { + log.Warning("The Net and NotNet fields in Source/Destination " + + "EntityRules are deprecated. Please use Nets or NotNets.") + }) + } + return model.Rule{ Action: ruleActionAPIToBackend(ar.Action), IPVersion: ar.IPVersion, @@ -71,26 +85,29 @@ func ruleAPIToBackend(ar api.Rule) model.Rule { SrcTag: ar.Source.Tag, SrcNet: ar.Source.Net, + SrcNets: ar.Source.Nets, SrcSelector: ar.Source.Selector, SrcPorts: ar.Source.Ports, DstTag: ar.Destination.Tag, DstNet: normalizeIPNet(ar.Destination.Net), + DstNets: normalizeIPNets(ar.Destination.Nets), DstSelector: ar.Destination.Selector, DstPorts: ar.Destination.Ports, NotSrcTag: ar.Source.NotTag, NotSrcNet: ar.Source.NotNet, + NotSrcNets: ar.Source.NotNets, NotSrcSelector: ar.Source.NotSelector, NotSrcPorts: ar.Source.NotPorts, NotDstTag: ar.Destination.NotTag, NotDstNet: normalizeIPNet(ar.Destination.NotNet), + NotDstNets: normalizeIPNets(ar.Destination.NotNets), NotDstSelector: ar.Destination.NotSelector, NotDstPorts: ar.Destination.NotPorts, } } -// normalizeIPNet converts an IPNet to a network by ensuring the IP address -// is correctly masked. +// normalizeIPNet converts an IPNet to a network by ensuring the IP address is correctly masked. func normalizeIPNet(n *net.IPNet) *net.IPNet { if n == nil { return nil @@ -98,6 +115,19 @@ func normalizeIPNet(n *net.IPNet) *net.IPNet { return n.Network() } +// normalizeIPNets converts an []*IPNet to a slice of networks by ensuring the IP addresses +// are correctly masked. +func normalizeIPNets(nets []*net.IPNet) []*net.IPNet { + if nets == nil { + return nil + } + out := make([]*net.IPNet, len(nets)) + for i, n := range nets { + out[i] = normalizeIPNet(n) + } + return out +} + // ruleBackendToAPI convert a Backend Rule structure to an API Rule structure. func ruleBackendToAPI(br model.Rule) api.Rule { var icmp, notICMP *api.ICMPFields @@ -122,22 +152,22 @@ func ruleBackendToAPI(br model.Rule) api.Rule { NotICMP: notICMP, Source: api.EntityRule{ Tag: br.SrcTag, - Net: br.SrcNet, + Nets: br.AllSrcNets(), Selector: br.SrcSelector, Ports: br.SrcPorts, NotTag: br.NotSrcTag, - NotNet: br.NotSrcNet, + NotNets: br.AllNotSrcNets(), NotSelector: br.NotSrcSelector, NotPorts: br.NotSrcPorts, }, Destination: api.EntityRule{ Tag: br.DstTag, - Net: br.DstNet, + Nets: br.AllDstNets(), Selector: br.DstSelector, Ports: br.DstPorts, NotTag: br.NotDstTag, - NotNet: br.NotDstNet, + NotNets: br.AllNotDstNets(), NotSelector: br.NotDstSelector, NotPorts: br.NotDstPorts, }, diff --git a/lib/converter/rule_test.go b/lib/converter/rule_test.go new file mode 100644 index 000000000..dd1fbbb95 --- /dev/null +++ b/lib/converter/rule_test.go @@ -0,0 +1,127 @@ +// Copyright (c) 2017 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package converter_test + +import ( + . "github.com/projectcalico/libcalico-go/lib/converter" + + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + + "github.com/projectcalico/libcalico-go/lib/api" + "github.com/projectcalico/libcalico-go/lib/backend/model" + "github.com/projectcalico/libcalico-go/lib/net" +) + +var ( + cidr1 = net.MustParseCIDR("10.0.0.1/24") + cidr2 = net.MustParseCIDR("11.0.0.1/24") + cidr3 = net.MustParseCIDR("12.0.0.1/24") + cidr4 = net.MustParseCIDR("13.0.0.1/24") + cidr3Net = net.MustParseCIDR("12.0.0.0/24") + cidr4Net = net.MustParseCIDR("13.0.0.0/24") + cidr3Norm = (&cidr3Net).Network() + cidr4Norm = (&cidr4Net).Network() +) + +var _ = DescribeTable("RulesAPIToBackend", + func(input api.Rule, expected model.Rule) { + output := RulesAPIToBackend([]api.Rule{input}) + Expect(output).To(ConsistOf(expected)) + }, + Entry("empty rule", api.Rule{}, model.Rule{}), + Entry("should normalise destination net fields", + api.Rule{ + Source: api.EntityRule{ + Net: &cidr1, + NotNet: &cidr2, + }, + Destination: api.EntityRule{ + Net: &cidr3, + NotNet: &cidr4, + }, + }, + model.Rule{ + SrcNet: &cidr1, + NotSrcNet: &cidr2, + DstNet: cidr3Norm, + NotDstNet: cidr4Norm, + }, + ), + Entry("should normalise destination nets fields", + api.Rule{ + Source: api.EntityRule{ + Nets: []*net.IPNet{&cidr1}, + NotNets: []*net.IPNet{&cidr2}, + }, + Destination: api.EntityRule{ + Nets: []*net.IPNet{&cidr3}, + NotNets: []*net.IPNet{&cidr4}, + }, + }, + model.Rule{ + SrcNets: []*net.IPNet{&cidr1}, + NotSrcNets: []*net.IPNet{&cidr2}, + DstNets: []*net.IPNet{cidr3Norm}, + NotDstNets: []*net.IPNet{cidr4Norm}, + }, + ), +) + +var _ = DescribeTable("RulesBackendToAPI", + func(input model.Rule, expected api.Rule) { + output := RulesBackendToAPI([]model.Rule{input}) + Expect(output).To(ConsistOf(expected)) + }, + Entry("empty rule should get explicit action", model.Rule{}, api.Rule{Action: "allow"}), + Entry("should convert net to nets fields", + model.Rule{ + SrcNet: &cidr1, + NotSrcNet: &cidr2, + DstNet: &cidr3, + NotDstNet: &cidr4, + }, + api.Rule{ + Action: "allow", + Source: api.EntityRule{ + Nets: []*net.IPNet{&cidr1}, + NotNets: []*net.IPNet{&cidr2}, + }, + Destination: api.EntityRule{ + Nets: []*net.IPNet{&cidr3}, + NotNets: []*net.IPNet{&cidr4}, + }, + }, + ), + Entry("should pass through nets fields", + model.Rule{ + SrcNets: []*net.IPNet{&cidr1}, + NotSrcNets: []*net.IPNet{&cidr2}, + DstNets: []*net.IPNet{&cidr3}, + NotDstNets: []*net.IPNet{&cidr4}, + }, + api.Rule{ + Action: "allow", + Source: api.EntityRule{ + Nets: []*net.IPNet{&cidr1}, + NotNets: []*net.IPNet{&cidr2}, + }, + Destination: api.EntityRule{ + Nets: []*net.IPNet{&cidr3}, + NotNets: []*net.IPNet{&cidr4}, + }, + }, + ), +) diff --git a/lib/testutils/rules.go b/lib/testutils/rules.go index b2dd7625e..6200fc5de 100644 --- a/lib/testutils/rules.go +++ b/lib/testutils/rules.go @@ -51,6 +51,18 @@ var InRule1 = api.Rule{ }, } +var InRule1AfterRead = api.Rule{ + Action: "allow", + IPVersion: &ipv4, + Protocol: &strProtocol1, + ICMP: &icmp1, + Source: api.EntityRule{ + Tag: "tag1", + Nets: []*net.IPNet{&cidr1}, + Selector: "label1 == 'value1'", + }, +} + var InRule2 = api.Rule{ Action: "deny", IPVersion: &ipv6, @@ -63,6 +75,18 @@ var InRule2 = api.Rule{ }, } +var InRule2AfterRead = api.Rule{ + Action: "deny", + IPVersion: &ipv6, + Protocol: &numProtocol1, + ICMP: &icmp1, + Source: api.EntityRule{ + Tag: "tag2", + Nets: []*net.IPNet{&cidrv61}, + Selector: "has(label2)", + }, +} + var EgressRule1 = api.Rule{ Action: "pass", IPVersion: &ipv4, @@ -75,6 +99,18 @@ var EgressRule1 = api.Rule{ }, } +var EgressRule1AfterRead = api.Rule{ + Action: "pass", + IPVersion: &ipv4, + Protocol: &numProtocol1, + ICMP: &icmp1, + Source: api.EntityRule{ + Tag: "tag3", + Nets: []*net.IPNet{&cidr2}, + Selector: "all()", + }, +} + var EgressRule2 = api.Rule{ Action: "allow", IPVersion: &ipv6, @@ -86,3 +122,15 @@ var EgressRule2 = api.Rule{ Selector: "label2 == '1234'", }, } + +var EgressRule2AfterRead = api.Rule{ + Action: "allow", + IPVersion: &ipv6, + Protocol: &strProtocol2, + ICMP: &icmp1, + Source: api.EntityRule{ + Tag: "tag4", + Nets: []*net.IPNet{&cidrv62}, + Selector: "label2 == '1234'", + }, +} diff --git a/lib/validator/validator.go b/lib/validator/validator.go index 3af20d76d..355c693c0 100644 --- a/lib/validator/validator.go +++ b/lib/validator/validator.go @@ -23,6 +23,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/projectcalico/libcalico-go/lib/api" "github.com/projectcalico/libcalico-go/lib/errors" + calinet "github.com/projectcalico/libcalico-go/lib/net" "github.com/projectcalico/libcalico-go/lib/numorstring" "github.com/projectcalico/libcalico-go/lib/scope" "github.com/projectcalico/libcalico-go/lib/selector" @@ -406,25 +407,47 @@ func validateRule(v *validator.Validate, structLevel *validator.StructLevel) { } } - // Check for mismatch in IP versions - if rule.Source.Net != nil && rule.IPVersion != nil { - if rule.Source.Net.Version() != *(rule.IPVersion) { - structLevel.ReportError(reflect.ValueOf(rule.Source.Net), "Source.Net", - "", reason("rule contains an IP version that does not match src CIDR version")) - } + // Check that only one of the net or nets fields is specified. + hasNetField := rule.Source.Net != nil || + rule.Destination.Net != nil || + rule.Source.NotNet != nil || + rule.Destination.NotNet != nil + hasNetsField := len(rule.Source.Nets) != 0 || + len(rule.Destination.Nets) != 0 || + len(rule.Source.NotNets) != 0 || + len(rule.Destination.NotNets) != 0 + if hasNetField && hasNetsField { + structLevel.ReportError(reflect.ValueOf(rule.Source.Nets), + "Source/Destination.Net/Nets", + "Source/Destination.Net/Nets", + reason("only one of Net and Nets fields allowed")) } - if rule.Destination.Net != nil && rule.IPVersion != nil { - if rule.Destination.Net.Version() != *(rule.IPVersion) { - structLevel.ReportError(reflect.ValueOf(rule.Destination.Net), "Destination.Net", - "", reason("rule contains an IP version that does not match dst CIDR version")) + + var seenV4, seenV6 bool + + scanNets := func(nets []*calinet.IPNet, fieldName string) { + var v4, v6 bool + for _, n := range nets { + v4 = v4 || n.Version() == 4 + v6 = v6 || n.Version() == 6 } - } - if rule.Source.Net != nil && rule.Destination.Net != nil { - if rule.Source.Net.Version() != rule.Destination.Net.Version() { - structLevel.ReportError(reflect.ValueOf(rule.Destination.Net), "Destination.Net", - "", reason("rule does not support mixing of IPv4/v6 CIDRs")) + if rule.IPVersion != nil && ((v4 && *rule.IPVersion != 4) || (v6 && *rule.IPVersion != 6)) { + structLevel.ReportError(reflect.ValueOf(rule.Source.Net), fieldName, + "", reason("rule IP version doesn't match CIDR version")) + } + if v4 && seenV6 || v6 && seenV4 || v4 && v6 { + // This field makes the rule inconsistent. + structLevel.ReportError(reflect.ValueOf(nets), fieldName, + "", reason("rule contains both IPv4 and IPv6 CIDRs")) } + seenV4 = seenV4 || v4 + seenV6 = seenV6 || v6 } + + scanNets(rule.Source.GetNets(), "Source.Net(s)") + scanNets(rule.Source.GetNotNets(), "Source.NotNet(s)") + scanNets(rule.Destination.GetNets(), "Destination.Net(s)") + scanNets(rule.Destination.GetNotNets(), "Destination.NotNet(s)") } func validateBackendRule(v *validator.Validate, structLevel *validator.StructLevel) { diff --git a/lib/validator/validator_test.go b/lib/validator/validator_test.go index 260edb075..851562a07 100644 --- a/lib/validator/validator_test.go +++ b/lib/validator/validator_test.go @@ -59,10 +59,10 @@ func init() { DescribeTable("Validator", func(input interface{}, valid bool) { if valid { - Expect(validator.Validate(input)).To(BeNil(), + Expect(validator.Validate(input)).NotTo(HaveOccurred(), "expected value to be valid") } else { - Expect(validator.Validate(input)).ToNot(BeNil(), + Expect(validator.Validate(input)).To(HaveOccurred(), "expected value to be invalid") } }, @@ -529,6 +529,110 @@ func init() { Net: &netv6_1, }, }, false), + Entry("net list: should reject rule with net and nets", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + IPVersion: &V4, + Source: api.EntityRule{ + Net: &netv4_3, + Nets: []*net.IPNet{&netv4_3}, + }, + }, false), + Entry("net list: should reject rule with not net and not nets", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + IPVersion: &V4, + Source: api.EntityRule{ + NotNet: &netv4_3, + NotNets: []*net.IPNet{&netv4_3}, + }, + }, false), + Entry("net list: should reject rule with net and nets", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + IPVersion: &V4, + Destination: api.EntityRule{ + Net: &netv4_3, + Nets: []*net.IPNet{&netv4_3}, + }, + }, false), + Entry("net list: should reject rule with not net and not nets", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + IPVersion: &V4, + Destination: api.EntityRule{ + NotNet: &netv4_3, + NotNets: []*net.IPNet{&netv4_3}, + }, + }, false), + Entry("net list: should reject rule mixed IPv4 (src) and IPv6 (dest)", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + Source: api.EntityRule{ + Nets: []*net.IPNet{&netv4_3}, + }, + Destination: api.EntityRule{ + Nets: []*net.IPNet{&netv6_3}, + }, + }, false), + Entry("net list: should reject rule mixed IPv6 (src) and IPv4 (dest)", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + Source: api.EntityRule{ + Nets: []*net.IPNet{&netv6_2}, + }, + Destination: api.EntityRule{ + Nets: []*net.IPNet{&netv4_2}, + }, + }, false), + Entry("net list: should reject rule mixed IPv6 version and IPv4 Net", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + IPVersion: &V6, + Source: api.EntityRule{ + Nets: []*net.IPNet{&netv4_4}, + }, + Destination: api.EntityRule{ + Nets: []*net.IPNet{&netv4_2}, + }, + }, false), + Entry("net list: should reject rule mixed IPv6 version and IPv4 Net", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + IPVersion: &V6, + Source: api.EntityRule{ + Net: &netv4_4, + }, + Destination: api.EntityRule{ + NotNets: []*net.IPNet{&netv4_2}, + }, + }, false), + Entry("net list: should reject rule mixed IPVersion and Source Net IP version", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + IPVersion: &V6, + Source: api.EntityRule{ + Nets: []*net.IPNet{&netv4_1}, + }, + }, false), + Entry("net list: should reject rule mixed IPVersion and Dest Net IP version", + api.Rule{ + Action: "allow", + Protocol: protocolFromString("tcp"), + IPVersion: &V4, + Destination: api.EntityRule{ + Nets: []*net.IPNet{&netv6_1}, + }, + }, false), // (API) NodeSpec Entry("should accept node with IPv4 BGP", api.NodeSpec{BGP: &api.NodeBGPSpec{IPv4Address: &netv4_1}}, true), From 1d20d4e54e530980b23a80467ae55919adc8b8f9 Mon Sep 17 00:00:00 2001 From: Shaun Crampton Date: Mon, 17 Jul 2017 11:36:20 +0100 Subject: [PATCH 09/11] Fix JSON name of notNets field. --- lib/api/rule.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/api/rule.go b/lib/api/rule.go index 807d2cb63..e219d06dc 100644 --- a/lib/api/rule.go +++ b/lib/api/rule.go @@ -127,7 +127,7 @@ type EntityRule struct { // NotNets is an optional field that restricts the rule to only apply to traffic that // does not originate from (or terminate at) an IP address in any of the given subnets. - NotNets []*net.IPNet `json:"nets,omitempty" validate:"omitempty"` + NotNets []*net.IPNet `json:"notNets,omitempty" validate:"omitempty"` // NotSelector is the negated version of the Selector field. See Selector field for // subtleties with negated selectors. From 97655b408c5aa0afd43639bc65a7d5b35346ad5a Mon Sep 17 00:00:00 2001 From: matt Date: Wed, 12 Jul 2017 11:49:06 -0700 Subject: [PATCH 10/11] Moving over logutils from Felix --- glide.lock | 35 ++- glide.yaml | 2 + lib/logutils/logutils.go | 436 ++++++++++++++++++++++++++++ lib/logutils/logutils_suite_test.go | 40 +++ lib/logutils/logutils_test.go | 412 ++++++++++++++++++++++++++ 5 files changed, 923 insertions(+), 2 deletions(-) create mode 100644 lib/logutils/logutils.go create mode 100644 lib/logutils/logutils_suite_test.go create mode 100644 lib/logutils/logutils_test.go diff --git a/glide.lock b/glide.lock index ec8b07cb4..623a249e9 100644 --- a/glide.lock +++ b/glide.lock @@ -1,6 +1,10 @@ -hash: a35ac70c3f24336298e49395f16d469f35bc7105e3d9df129c3ba948eb74132f -updated: 2017-06-04T10:01:10.429693073-07:00 +hash: 5a8e8f9d19d935d4247638b121a09ff5f53411623f01251a0cf9029fd7693b7b +updated: 2017-07-14T11:08:47.961492803-07:00 imports: +- name: github.com/beorn7/perks + version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 + subpackages: + - quantile - name: github.com/coreos/etcd version: 17ae440991da3bdb2df4309936dd2074f66ec394 subpackages: @@ -45,6 +49,11 @@ imports: - sortkeys - name: github.com/golang/glog version: 44145f04b68cf362d9c4df2182967c2275eaefed +- name: github.com/golang/protobuf + version: 4bd1920723d7b7c925de087aa32e2187708897f7 + subpackages: + - jsonpb + - proto - name: github.com/google/gofuzz version: 44d81051d367757e1c7c6a5a86423ece9afcf63c - name: github.com/howeyc/gopass @@ -61,6 +70,10 @@ imports: - buffer - jlexer - jwriter +- name: github.com/matttproud/golang_protobuf_extensions + version: c12348ce28de40eed0136aa2b644d0ee0650e56c + subpackages: + - pbutil - name: github.com/onsi/ginkgo version: f40a49d81e5c12e90400620b6242fb29a8e7c9d9 subpackages: @@ -90,6 +103,24 @@ imports: version: 955bc3e451ef0c9df8b9113bf2e341139cdafab2 - name: github.com/projectcalico/go-yaml-wrapper version: 598e54215bee41a19677faa4f0c32acd2a87eb56 +- name: github.com/prometheus/client_golang + version: c5b7fccd204277076155f10851dad72b76a49317 + subpackages: + - prometheus +- name: github.com/prometheus/client_model + version: 6f3806018612930941127f2a7c6c453ba2c527d2 + subpackages: + - go +- name: github.com/prometheus/common + version: 3e6a7635bac6573d43f49f97b47eb9bda195dba8 + subpackages: + - expfmt + - internal/bitbucket.org/ww/goautoneg + - model +- name: github.com/prometheus/procfs + version: e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2 + subpackages: + - xfs - name: github.com/PuerkitoBio/purell version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 - name: github.com/PuerkitoBio/urlesc diff --git a/glide.yaml b/glide.yaml index e7c8483c1..d62fd2362 100644 --- a/glide.yaml +++ b/glide.yaml @@ -43,5 +43,7 @@ import: - rest - tools/cache - tools/clientcmd +- package: github.com/prometheus/client_golang + version: ^0.8.0 testImport: - package: github.com/onsi/gomega diff --git a/lib/logutils/logutils.go b/lib/logutils/logutils.go new file mode 100644 index 000000000..3eb0b3158 --- /dev/null +++ b/lib/logutils/logutils.go @@ -0,0 +1,436 @@ +// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutils + +import ( + "bytes" + "fmt" + "io" + "os" + "path" + "runtime" + "sort" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/prometheus/client_golang/prometheus" +) + +// FilterLevels returns all the logrus.Level values <= maxLevel. +func FilterLevels(maxLevel log.Level) []log.Level { + levels := []log.Level{} + for _, l := range log.AllLevels { + if l <= maxLevel { + levels = append(levels, l) + } + } + return levels +} + +// Formatter is our custom log formatter designed to balance ease of machine processing +// with human readability. Logs include: +// - A sortable millisecond timestamp, for scanning and correlating logs +// - The log level, near the beginning of the line, to aid in visual scanning +// - The PID of the process to make it easier to spot log discontinuities (If +// you are looking at two disjoint chunks of log, were they written by the +// same process? Was there a restart in-between?) +// - The file name and line number, as essential context +// - The message! +// - Log fields appended in sorted order +// +// Example: +// 2017-01-05 09:17:48.238 [INFO][85386] endpoint_mgr.go 434: Skipping configuration of +// interface because it is oper down. ifaceName="cali1234" +type Formatter struct{} + +func (f *Formatter) Format(entry *log.Entry) ([]byte, error) { + stamp := entry.Time.Format("2006-01-02 15:04:05.000") + levelStr := strings.ToUpper(entry.Level.String()) + pid := os.Getpid() + fileName := entry.Data["__file__"] + lineNo := entry.Data["__line__"] + b := entry.Buffer + if b == nil { + b = &bytes.Buffer{} + } + fmt.Fprintf(b, "%s [%s][%d] %v %v: %v", stamp, levelStr, pid, fileName, lineNo, entry.Message) + appendKVsAndNewLine(b, entry) + return b.Bytes(), nil +} + +// FormatForSyslog formats logs in a way tailored for syslog. It avoids logging information that is +// already included in the syslog metadata such as timestamp and PID. The log level _is_ included +// because syslog doesn't seem to output it by default and it's very useful. +// +// INFO endpoint_mgr.go 434: Skipping configuration of interface because it is oper down. +// ifaceName="cali1234" +func FormatForSyslog(entry *log.Entry) string { + levelStr := strings.ToUpper(entry.Level.String()) + fileName := entry.Data["__file__"] + lineNo := entry.Data["__line__"] + b := entry.Buffer + if b == nil { + b = &bytes.Buffer{} + } + fmt.Fprintf(b, "%s %v %v: %v", levelStr, fileName, lineNo, entry.Message) + appendKVsAndNewLine(b, entry) + return b.String() +} + +// appendKeysAndNewLine writes the KV pairs attached to the entry to the end of the buffer, then +// finishes it with a newline. +func appendKVsAndNewLine(b *bytes.Buffer, entry *log.Entry) { + // Sort the keys for consistent output. + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + if key == "__file__" || key == "__line__" { + continue + } + var value interface{} = entry.Data[key] + var stringifiedValue string + if err, ok := value.(error); ok { + stringifiedValue = err.Error() + } else if stringer, ok := value.(fmt.Stringer); ok { + // Trust the value's String() method. + stringifiedValue = stringer.String() + } else { + // No string method, use %#v to get a more thorough dump. + fmt.Fprintf(b, " %v=%#v", key, value) + continue + } + b.WriteByte(' ') + b.WriteString(key) + b.WriteByte('=') + b.WriteString(stringifiedValue) + } + b.WriteByte('\n') +} + +// NullWriter is a dummy writer that always succeeds and does nothing. +type NullWriter struct{} + +func (w *NullWriter) Write(p []byte) (int, error) { + return len(p), nil +} + +type ContextHook struct { +} + +func (hook ContextHook) Levels() []log.Level { + return log.AllLevels +} + +func (hook ContextHook) Fire(entry *log.Entry) error { + pcs := make([]uintptr, 4) + if numEntries := runtime.Callers(6, pcs); numEntries > 0 { + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + if !shouldSkipFrame(frame) { + entry.Data["__file__"] = path.Base(frame.File) + entry.Data["__line__"] = frame.Line + break + } + if !more { + break + } + } + } + return nil +} + +func shouldSkipFrame(frame runtime.Frame) bool { + return strings.LastIndex(frame.File, "exported.go") > 0 || + strings.LastIndex(frame.File, "logger.go") > 0 || + strings.LastIndex(frame.File, "entry.go") > 0 +} + +type QueuedLog struct { + Level log.Level + Message []byte + SyslogMessage string + WaitGroup *sync.WaitGroup + + // NumSkippedLogs contains the number of logs that were skipped before this log (due to the + // queue being blocked). + NumSkippedLogs uint +} + +func (ql QueuedLog) OnLogDone() { + if ql.WaitGroup != nil { + ql.WaitGroup.Done() + } +} + +func NewStreamDestination( + level log.Level, + writer io.Writer, + c chan QueuedLog, + disableLogDropping bool, + counter prometheus.Counter, +) *Destination { + return &Destination{ + Level: level, + channel: c, + writeLog: func(ql QueuedLog) error { + if ql.NumSkippedLogs > 0 { + fmt.Fprintf(writer, "... dropped %d logs ...\n", + ql.NumSkippedLogs) + } + _, err := writer.Write(ql.Message) + return err + }, + disableLogDropping: disableLogDropping, + counter: counter, + } +} + +func NewSyslogDestination( + level log.Level, + writer syslogWriter, + c chan QueuedLog, + disableLogDropping bool, + counter prometheus.Counter, +) *Destination { + return &Destination{ + Level: level, + channel: c, + writeLog: func(ql QueuedLog) error { + if ql.NumSkippedLogs > 0 { + writer.Warning(fmt.Sprintf("... dropped %d logs ...\n", + ql.NumSkippedLogs)) + } + err := writeToSyslog(writer, ql) + return err + }, + disableLogDropping: disableLogDropping, + counter: counter, + } +} + +type Destination struct { + // Level is the minimum level that a log must have to be logged to this destination. + Level log.Level + // Channel is the channel used to queue logs to the background worker thread. Public for + // test purposes. + channel chan QueuedLog + // WriteLog is the function to actually make a log. The constructors above initialise this + // with a function that logs to a stream or to syslog, for example. + writeLog func(ql QueuedLog) error + + // DisableLogDropping forces all logs to be queued even if the destination blocks. + disableLogDropping bool + + // Lock protects the numDroppedLogs count. + lock sync.Mutex + numDroppedLogs uint + + // Counter is the prometheus counter for logged errors that this destination will increment + counter prometheus.Counter +} + +// Send sends a log to the background thread. It returns true on success or false if the channel +// is blocked. +func (d *Destination) Send(ql QueuedLog) (ok bool) { + if d.disableLogDropping { + d.channel <- ql + ok = true + return + } + + d.lock.Lock() + ql.NumSkippedLogs = d.numDroppedLogs + select { + case d.channel <- ql: + // We've now queued reporting of all the dropped logs, zero out the counter. + d.numDroppedLogs = 0 + ok = true + default: + d.numDroppedLogs += 1 + } + d.lock.Unlock() + return +} + +// LoopWritingLogs is intended to be used as a background go-routine. It processes the logs from +// the channel. +func (d *Destination) LoopWritingLogs() { + for ql := range d.channel { + err := d.writeLog(ql) + if err != nil { + // Increment the number of errors while trying to write to log + d.counter.Inc() + fmt.Fprintf(os.Stderr, "Failed to write to log: %v", err) + } + ql.OnLogDone() + } +} + +// Close closes the channel to the background goroutine. This is only safe to call if you know +// that the destination is no longer in use by any thread; in tests, for example. +func (d *Destination) Close() { + close(d.channel) +} + +type syslogWriter interface { + Debug(m string) error + Info(m string) error + Warning(m string) error + Err(m string) error + Crit(m string) error +} + +func writeToSyslog(writer syslogWriter, ql QueuedLog) error { + switch ql.Level { + case log.PanicLevel: + return writer.Crit(ql.SyslogMessage) + case log.FatalLevel: + return writer.Crit(ql.SyslogMessage) + case log.ErrorLevel: + return writer.Err(ql.SyslogMessage) + case log.WarnLevel: + return writer.Warning(ql.SyslogMessage) + case log.InfoLevel: + return writer.Info(ql.SyslogMessage) + case log.DebugLevel: + return writer.Debug(ql.SyslogMessage) + default: + return nil + } +} + +// BackgroundHook is a logrus Hook that (synchronously) formats each log and sends it to one or more +// Destinations for writing ona background thread. It supports filtering destinations on +// individual log levels. We write logs from background threads so that blocking of the output +// stream doesn't block the mainline code. Up to a point, we queue logs for writing, then we start +// dropping logs. +type BackgroundHook struct { + levels []log.Level + syslogLevel log.Level + + destinations []*Destination + + // Our own copy of the dropped logs counter, used for logging out when we drop logs. + // Must be read/updated using atomic.XXX. + numDroppedLogs uint64 + lastDropLogTime time.Duration + + // Counter + counter prometheus.Counter +} + +func NewBackgroundHook(levels []log.Level, syslogLevel log.Level, destinations []*Destination, counter prometheus.Counter) *BackgroundHook { + return &BackgroundHook{ + destinations: destinations, + levels: levels, + syslogLevel: syslogLevel, + counter: counter, + } +} + +func (h *BackgroundHook) Levels() []log.Level { + return h.levels +} + +func (h *BackgroundHook) Fire(entry *log.Entry) (err error) { + var serialized []byte + if serialized, err = entry.Logger.Formatter.Format(entry); err != nil { + return + } + + // entry's buffer will be reused after we return but we're about to send the message over + // a channel so we need to take a copy. + bufCopy := make([]byte, len(serialized)) + copy(bufCopy, serialized) + if entry.Buffer != nil { + entry.Buffer.Truncate(0) + } + + ql := QueuedLog{ + Level: entry.Level, + Message: bufCopy, + } + + if entry.Level <= h.syslogLevel { + // syslog gets its own log string since our default log string duplicates a lot of + // syslog metadata. Only calculate that string if it's needed. + ql.SyslogMessage = FormatForSyslog(entry) + } + + var waitGroup *sync.WaitGroup + if entry.Level <= log.FatalLevel { + // This is a panic or a fatal log so we'll get killed immediately after we return. + // Use a wait group to wait for the log to get written. + waitGroup = &sync.WaitGroup{} + ql.WaitGroup = waitGroup + } + + for _, dest := range h.destinations { + if ql.Level > dest.Level { + continue + } + if waitGroup != nil { + // Thread safety: we must call add before we send the wait group over the + // channel (or the background thread could be scheduled immediately and + // call Done() before we call Add()). Since we don't know if the send + // will succeed that leads to the need to call Done() on the 'default:' + // branch below to correctly pair Add()/Done() calls. + waitGroup.Add(1) + } + + if ok := dest.Send(ql); !ok { + // Background thread isn't keeping up. Drop the log and count how many + // we've dropped. + if waitGroup != nil { + waitGroup.Done() + } + // Increment the number of dropped logs + dest.counter.Inc() + } + } + if waitGroup != nil { + waitGroup.Wait() + } + return +} + +func (h *BackgroundHook) Start() { + for _, d := range h.destinations { + go d.LoopWritingLogs() + } +} + +// safeParseLogLevel parses a string version of a logrus log level, defaulting +// to logrus.PanicLevel on failure. +func SafeParseLogLevel(logLevel string) log.Level { + defaultedLevel := log.PanicLevel + if logLevel != "" { + parsedLevel, err := log.ParseLevel(logLevel) + if err == nil { + defaultedLevel = parsedLevel + } else { + log.WithField("raw level", logLevel).Warn( + "Invalid log level, defaulting to panic") + } + } + return defaultedLevel +} diff --git a/lib/logutils/logutils_suite_test.go b/lib/logutils/logutils_suite_test.go new file mode 100644 index 000000000..983b84aea --- /dev/null +++ b/lib/logutils/logutils_suite_test.go @@ -0,0 +1,40 @@ +// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutils_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" + + "github.com/Sirupsen/logrus" + "github.com/onsi/ginkgo/reporters" + + "github.com/projectcalico/libcalico-go/lib/logutils" + "github.com/projectcalico/libcalico-go/lib/testutils" +) + +func init() { + testutils.HookLogrusForGinkgo() + logrus.AddHook(&logutils.ContextHook{}) + logrus.SetFormatter(&logutils.Formatter{}) +} + +func TestLogutils(t *testing.T) { + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter("junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Logutils Suite", []Reporter{junitReporter}) +} diff --git a/lib/logutils/logutils_test.go b/lib/logutils/logutils_test.go new file mode 100644 index 000000000..744fb9837 --- /dev/null +++ b/lib/logutils/logutils_test.go @@ -0,0 +1,412 @@ +// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutils_test + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + "github.com/prometheus/client_golang/prometheus" + + . "github.com/projectcalico/libcalico-go/lib/logutils" +) + +var ( + testCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "test_counter", + Help: "Number of logs dropped and errors encountered while logging.", + }) +) + +func init() { + prometheus.MustRegister(testCounter) +} + +var _ = Describe("Logutils", func() { + ourHook := ContextHook{} + var savedWriter io.Writer + var buf *bytes.Buffer + BeforeEach(func() { + log.AddHook(ourHook) + savedWriter = log.StandardLogger().Out + buf = &bytes.Buffer{} + log.StandardLogger().Out = buf + }) + AfterEach(func() { + log.StandardLogger().Out = savedWriter + levelHooks := log.StandardLogger().Hooks + for level, hooks := range levelHooks { + j := 0 + for _, hook := range hooks { + if hook == ourHook { + continue + } + hooks[j] = hook + j += 1 + } + levelHooks[level] = hooks[:len(hooks)-1] + } + }) + + It("Should add correct file when invoked via log.Info", func() { + log.Info("Test log") + Expect(buf.String()).To(ContainSubstring("logutils_test.go")) + }) + It("Should add correct file when invoked via Logger.Info", func() { + log.StandardLogger().Info("Test log") + Expect(buf.String()).To(ContainSubstring("logutils_test.go")) + }) + It("Should add correct file when invoked via log.WithField(...).Info", func() { + log.WithField("foo", "bar").Info("Test log") + Expect(buf.String()).To(ContainSubstring("logutils_test.go")) + }) +}) + +var _ = DescribeTable("Formatter", + func(entry log.Entry, expectedLog, expectedSyslog string) { + f := &Formatter{} + out, err := f.Format(&entry) + Expect(err).NotTo(HaveOccurred()) + expectedLog = strings.Replace(expectedLog, "", fmt.Sprintf("%v", os.Getpid()), 1) + Expect(string(out)).To(Equal(expectedLog)) + Expect(FormatForSyslog(&entry)).To(Equal(expectedSyslog)) + }, + Entry("Empty", log.Entry{}, + "0001-01-01 00:00:00.000 [PANIC][] : \n", + "PANIC : \n"), + Entry("Basic", + log.Entry{ + Level: log.InfoLevel, + Time: theTime(), + Data: log.Fields{ + "__file__": "foo.go", + "__line__": 123, + }, + Message: "The answer is 42.", + }, + "2017-03-15 11:22:33.123 [INFO][] foo.go 123: The answer is 42.\n", + "INFO foo.go 123: The answer is 42.\n", + ), + Entry("With fields", + log.Entry{ + Level: log.WarnLevel, + Time: theTime(), + Data: log.Fields{ + "__file__": "foo.go", + "__line__": 123, + "a": 10, + "b": "foobar", + "c": theTime(), + "err": errors.New("an error"), + }, + Message: "The answer is 42.", + }, + "2017-03-15 11:22:33.123 [WARNING][] foo.go 123: The answer is 42. a=10 b=\"foobar\" c=2017-03-15 11:22:33.123 +0000 UTC err=an error\n", + "WARNING foo.go 123: The answer is 42. a=10 b=\"foobar\" c=2017-03-15 11:22:33.123 +0000 UTC err=an error\n"), +) + +func theTime() time.Time { + theTime, err := time.Parse("2006-01-02 15:04:05.000", "2017-03-15 11:22:33.123") + if err != nil { + panic(err) + } + return theTime +} + +var ( + message1 = QueuedLog{ + Level: log.InfoLevel, + Message: []byte("Message"), + SyslogMessage: "syslog message", + } + message2 = QueuedLog{ + Level: log.InfoLevel, + Message: []byte("Message2"), + SyslogMessage: "syslog message2", + } +) + +var _ = Describe("Stream Destination", func() { + var s *Destination + var c chan QueuedLog + var pr *io.PipeReader + var pw *io.PipeWriter + + BeforeEach(func() { + c = make(chan QueuedLog, 1) + pr, pw = io.Pipe() + s = NewStreamDestination( + log.InfoLevel, + pw, + c, + false, + testCounter, + ) + }) + + It("should report dropped logs to background thread", func() { + // First message should be queued on the channel. + ok := s.Send(message1) + Expect(ok).To(BeTrue()) + // Second message should be dropped. Drop counter should now be 1. + ok = s.Send(message1) + Expect(ok).To(BeFalse()) + // Drain the queue. + Expect(<-c).To(Equal(message1)) + // Third message should go through. + ok = s.Send(message1) + Expect(ok).To(BeTrue()) + Expect(<-c).To(Equal(QueuedLog{ + Level: log.InfoLevel, + Message: []byte("Message"), + SyslogMessage: "syslog message", + NumSkippedLogs: 1, + })) + // Subsequent messages should have the counter reset. + ok = s.Send(message1) + Expect(ok).To(BeTrue()) + Expect(<-c).To(Equal(message1)) + }) + + Describe("with dropping disabled", func() { + BeforeEach(func() { + c = make(chan QueuedLog, 1) + pr, pw = io.Pipe() + s = NewStreamDestination( + log.InfoLevel, + pw, + c, + true, + testCounter, + ) + }) + + It("should not drop logs", func() { + // First message should be queued on the channel. + ok := s.Send(message1) + Expect(ok).To(BeTrue()) + done := make(chan bool) + go func() { + // Second message should block so we do it in a goroutine. + ok = s.Send(message2) + done <- ok + }() + // Sleep so that the background goroutine has a chance to try to write. + time.Sleep(10 * time.Millisecond) + // Drain the queue. + Expect(<-c).To(Equal(message1)) + Expect(<-c).To(Equal(message2)) + Expect(<-done).To(BeTrue()) + }) + }) + + Describe("With real background thread", func() { + BeforeEach(func() { + go s.LoopWritingLogs() + }) + AfterEach(func() { + s.Close() + }) + + readNextMsg := func() string { + b := make([]byte, 1024) + n, err := pr.Read(b) + Expect(err).NotTo(HaveOccurred()) + return string(b[:n]) + } + + It("should write a log (no WaitGroup)", func() { + ok := s.Send(message1) + Expect(ok).To(BeTrue()) + msg := readNextMsg() + Expect(msg).To(Equal("Message")) + }) + + It("should log number of dropped logs", func() { + // Bypass Send() so we can force NumSkippedLogs to be non-zero. + c <- QueuedLog{ + Level: log.InfoLevel, + Message: []byte("Message"), + SyslogMessage: "syslog message", + NumSkippedLogs: 1, + } + msg := readNextMsg() + Expect(msg).To(Equal("... dropped 1 logs ...\n")) + msg = readNextMsg() + Expect(msg).To(Equal("Message")) + }) + + It("should trigger WaitGroup", func() { + wg := &sync.WaitGroup{} + wg.Add(1) + c <- QueuedLog{ + Level: log.InfoLevel, + Message: []byte("Message"), + SyslogMessage: "syslog message", + WaitGroup: wg, + } + b := make([]byte, 1024) + n, err := pr.Read(b) + wg.Wait() + Expect(err).NotTo(HaveOccurred()) + Expect(string(b[:n])).To(Equal("Message")) + }) + }) +}) + +var _ = Describe("Syslog Destination", func() { + var s *Destination + var c chan QueuedLog + var pr *io.PipeReader + var pw *io.PipeWriter + + BeforeEach(func() { + c = make(chan QueuedLog, 1) + pr, pw = io.Pipe() + s = NewSyslogDestination( + log.InfoLevel, + (*mockSyslogWriter)(pw), + c, + false, + testCounter, + ) + }) + + Describe("with dropping disabled", func() { + BeforeEach(func() { + s = NewSyslogDestination( + log.InfoLevel, + (*mockSyslogWriter)(pw), + c, + true, + testCounter, + ) + }) + + It("should not drop logs", func() { + // First message should be queued on the channel. + ok := s.Send(message1) + Expect(ok).To(BeTrue()) + done := make(chan bool) + go func() { + // Second message should block so we do it in a goroutine. + ok = s.Send(message2) + done <- ok + }() + // Sleep so that the background goroutine has a chance to try to write. + time.Sleep(10 * time.Millisecond) + // Drain the queue. + Expect(<-c).To(Equal(message1)) + Expect(<-c).To(Equal(message2)) + Expect(<-done).To(BeTrue()) + }) + }) + + Describe("With real background thread", func() { + BeforeEach(func() { + go s.LoopWritingLogs() + }) + AfterEach(func() { + s.Close() + }) + + readNextMsg := func() string { + b := make([]byte, 1024) + n, err := pr.Read(b) + Expect(err).NotTo(HaveOccurred()) + return string(b[:n]) + } + + defineLogLevelTest := func(level log.Level, levelName string) { + It("should write a log (no WaitGroup) level "+levelName, func() { + ql := message1 + ql.Level = level + ok := s.Send(ql) + Expect(ok).To(BeTrue()) + msg := readNextMsg() + Expect(msg).To(Equal(levelName + " syslog message")) + }) + } + defineLogLevelTest(log.InfoLevel, "INFO") + defineLogLevelTest(log.WarnLevel, "WARNING") + defineLogLevelTest(log.DebugLevel, "DEBUG") + defineLogLevelTest(log.ErrorLevel, "ERROR") + defineLogLevelTest(log.FatalLevel, "CRITICAL") + defineLogLevelTest(log.PanicLevel, "CRITICAL") + + It("should log number of dropped logs", func() { + // Bypass Send() so we can force NumSkippedLogs to be non-zero. + c <- QueuedLog{ + Level: log.InfoLevel, + Message: []byte("Message"), + SyslogMessage: "syslog message", + NumSkippedLogs: 1, + } + msg := readNextMsg() + Expect(msg).To(Equal("WARNING ... dropped 1 logs ...\n")) + msg = readNextMsg() + Expect(msg).To(Equal("INFO syslog message")) + }) + + It("should trigger WaitGroup", func() { + wg := &sync.WaitGroup{} + wg.Add(1) + c <- QueuedLog{ + Level: log.InfoLevel, + Message: []byte("Message"), + SyslogMessage: "syslog message", + WaitGroup: wg, + } + b := make([]byte, 1024) + n, err := pr.Read(b) + wg.Wait() + Expect(err).NotTo(HaveOccurred()) + Expect(string(b[:n])).To(Equal("INFO syslog message")) + }) + }) +}) + +type mockSyslogWriter io.PipeWriter + +func (s *mockSyslogWriter) Debug(m string) error { + _, err := fmt.Fprintf((*io.PipeWriter)(s), "DEBUG %s", m) + return err +} +func (s *mockSyslogWriter) Info(m string) error { + _, err := fmt.Fprintf((*io.PipeWriter)(s), "INFO %s", m) + return err +} +func (s *mockSyslogWriter) Warning(m string) error { + _, err := fmt.Fprintf((*io.PipeWriter)(s), "WARNING %s", m) + return err +} +func (s *mockSyslogWriter) Err(m string) error { + _, err := fmt.Fprintf((*io.PipeWriter)(s), "ERROR %s", m) + return err +} +func (s *mockSyslogWriter) Crit(m string) error { + _, err := fmt.Fprintf((*io.PipeWriter)(s), "CRITICAL %s", m) + return err +} From dd082590de92e3e86ac7e105692fb3a920b2f4ac Mon Sep 17 00:00:00 2001 From: Neil Jerram Date: Fri, 14 Jul 2017 12:31:35 +0100 Subject: [PATCH 11/11] Add pre-DNAT flag to Policy resource --- lib/api/policy.go | 3 ++ .../resources/systemnetworkpolicies_test.go | 35 +++++++++++++++++++ lib/backend/model/policy.go | 2 ++ lib/client/policy_e2e_test.go | 27 ++++++++++++++ lib/converter/policy.go | 2 ++ 5 files changed, 69 insertions(+) diff --git a/lib/api/policy.go b/lib/api/policy.go index 2ebaa7e84..fc868dee7 100644 --- a/lib/api/policy.go +++ b/lib/api/policy.go @@ -102,6 +102,9 @@ type PolicySpec struct { // this policy are applied before any data plane connection tracking, and packets allowed by // this policy are marked as not to be tracked. DoNotTrack bool `json:"doNotTrack,omitempty"` + + // PreDNAT indicates to apply the rules in this policy before any DNAT. + PreDNAT bool `json:"preDNAT,omitempty"` } // NewPolicy creates a new (zeroed) Policy struct with the TypeMetadata initialised to the current diff --git a/lib/backend/k8s/resources/systemnetworkpolicies_test.go b/lib/backend/k8s/resources/systemnetworkpolicies_test.go index 3143c08a2..9a6be78e3 100644 --- a/lib/backend/k8s/resources/systemnetworkpolicies_test.go +++ b/lib/backend/k8s/resources/systemnetworkpolicies_test.go @@ -100,6 +100,41 @@ var _ = Describe("System Network Policies conversion methods", func() { }, } + Context("with pre-DNAT flag", func() { + + BeforeEach(func() { + kvp1.Value.(*model.Policy).DoNotTrack = false + kvp1.Value.(*model.Policy).PreDNAT = true + res1.Spec.DoNotTrack = false + res1.Spec.PreDNAT = true + }) + + AfterEach(func() { + kvp1.Value.(*model.Policy).DoNotTrack = true + kvp1.Value.(*model.Policy).PreDNAT = false + res1.Spec.DoNotTrack = true + res1.Spec.PreDNAT = false + }) + + It("should convert between a KVPair and the equivalent Kubernetes resource", func() { + r, err := converter.FromKVPair(kvp1) + Expect(err).NotTo(HaveOccurred()) + Expect(r.GetObjectMeta().GetName()).To(Equal(res1.Metadata.Name)) + Expect(r.GetObjectMeta().GetResourceVersion()).To(Equal(res1.Metadata.ResourceVersion)) + Expect(r).To(BeAssignableToTypeOf(&thirdparty.SystemNetworkPolicy{})) + Expect(r.(*thirdparty.SystemNetworkPolicy).Spec).To(Equal(res1.Spec)) + }) + + It("should convert between a Kuberenetes resource and the equivalent KVPair", func() { + kvp, err := converter.ToKVPair(res1) + Expect(err).NotTo(HaveOccurred()) + Expect(kvp.Key).To(Equal(kvp1.Key)) + Expect(kvp.Revision).To(Equal(kvp1.Revision)) + Expect(kvp.Value).To(BeAssignableToTypeOf(&model.Policy{})) + Expect(kvp.Value).To(Equal(kvp1.Value)) + }) + }) + It("should convert an incomplete ListInterface to no Key", func() { Expect(converter.ListInterfaceToKey(listIncomplete)).To(BeNil()) }) diff --git a/lib/backend/model/policy.go b/lib/backend/model/policy.go index 46a5fc7e7..35b640cf7 100644 --- a/lib/backend/model/policy.go +++ b/lib/backend/model/policy.go @@ -95,6 +95,7 @@ type Policy struct { Selector string `json:"selector" validate:"selector"` DoNotTrack bool `json:"untracked,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` + PreDNAT bool `json:"pre_dnat,omitempty"` } func (p Policy) String() string { @@ -114,5 +115,6 @@ func (p Policy) String() string { } parts = append(parts, fmt.Sprintf("outbound:%v", strings.Join(outRules, ";"))) parts = append(parts, fmt.Sprintf("untracked:%v", p.DoNotTrack)) + parts = append(parts, fmt.Sprintf("pre_dnat:%v", p.PreDNAT)) return strings.Join(parts, ",") } diff --git a/lib/client/policy_e2e_test.go b/lib/client/policy_e2e_test.go index 11bf3d6d2..506978fc7 100644 --- a/lib/client/policy_e2e_test.go +++ b/lib/client/policy_e2e_test.go @@ -80,6 +80,23 @@ var policySpec2AfterRead = api.PolicySpec{ DoNotTrack: true, } +var policySpec3 = api.PolicySpec{ + Order: &order2, + IngressRules: []api.Rule{testutils.InRule2, testutils.InRule1}, + EgressRules: []api.Rule{testutils.EgressRule2, testutils.EgressRule1}, + Selector: "thing2 == 'value2'", + PreDNAT: true, +} + +// When reading back, the rules should have been updated to the newer format. +var policySpec3AfterRead = api.PolicySpec{ + Order: &order2, + IngressRules: []api.Rule{testutils.InRule2AfterRead, testutils.InRule1AfterRead}, + EgressRules: []api.Rule{testutils.EgressRule2AfterRead, testutils.EgressRule1AfterRead}, + Selector: "thing2 == 'value2'", + PreDNAT: true, +} + var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2, func(config api.CalicoAPIConfig) { DescribeTable("Policy e2e tests", @@ -230,5 +247,15 @@ var _ = testutils.E2eDatastoreDescribe("Policy tests", testutils.DatastoreEtcdV2 }, policySpec2AfterRead, ), + + // Test 4: Two fully populated PolicySpecs, one untracked and one pre-DNAT. + Entry("Two fully populated PolicySpecs, one untracked and one pre-DNAT", + api.PolicyMetadata{Name: "policy-1/with.foo", Annotations: map[string]string{"key": "value"}}, + api.PolicyMetadata{Name: "policy.1"}, + policySpec3, + policySpec2, + policySpec3AfterRead, + policySpec2AfterRead, + ), ) }) diff --git a/lib/converter/policy.go b/lib/converter/policy.go index 8cdcd659a..90eee7641 100644 --- a/lib/converter/policy.go +++ b/lib/converter/policy.go @@ -51,6 +51,7 @@ func (p PolicyConverter) ConvertAPIToKVPair(a unversioned.Resource) (*model.KVPa Selector: ap.Spec.Selector, DoNotTrack: ap.Spec.DoNotTrack, Annotations: ap.Metadata.Annotations, + PreDNAT: ap.Spec.PreDNAT, }, } @@ -71,6 +72,7 @@ func (p PolicyConverter) ConvertKVPairToAPI(d *model.KVPair) (unversioned.Resour ap.Spec.EgressRules = RulesBackendToAPI(bp.OutboundRules) ap.Spec.Selector = bp.Selector ap.Spec.DoNotTrack = bp.DoNotTrack + ap.Spec.PreDNAT = bp.PreDNAT return ap, nil }