diff --git a/Makefile b/Makefile index 21318c74..2fa99f21 100644 --- a/Makefile +++ b/Makefile @@ -8,9 +8,13 @@ REPO_VERSION := $$(git describe --abbrev=0 --tags) BUILD_DATE := $$(date +%Y-%m-%d-%H:%M) GIT_HASH := $$(git rev-parse --short HEAD) GOBUILD_VERSION_ARGS := -ldflags "-s -X $(VERSION_VAR)=$(REPO_VERSION) -X $(GIT_VAR)=$(GIT_HASH) -X $(BUILD_DATE_VAR)=$(BUILD_DATE)" -IMAGE_NAME := jtblin/$(BINARY_NAME) +# useful for other docker repos +DOCKER_REPO := jtblin +IMAGE_NAME := $(DOCKER_REPO)/$(BINARY_NAME) ARCH ?= darwin METALINTER_CONCURRENCY ?= 4 +# useful for passing --build-arg http_proxy :) +DOCKER_BUILD_FLAGS := setup: go get -v -u github.com/Masterminds/glide @@ -74,7 +78,7 @@ cross: CGO_ENABLED=0 GOOS=linux go build -o build/bin/linux/$(BINARY_NAME) $(GOBUILD_VERSION_ARGS) -a -installsuffix cgo github.com/jtblin/$(BINARY_NAME) docker: cross - docker build -t $(IMAGE_NAME):$(GIT_HASH) . + docker build -t $(IMAGE_NAME):$(GIT_HASH) . $(DOCKER_BUILD_FLAGS) release: check test docker docker push $(IMAGE_NAME):$(GIT_HASH) diff --git a/README.md b/README.md index 47186acd..ad100b8f 100644 --- a/README.md +++ b/README.md @@ -118,11 +118,11 @@ iptables \ --to-destination `curl 169.254.169.254/latest/meta-data/local-ipv4`:8181 ``` -This rule can be added automatically by setting `--iptables=true`, setting the `HOST_IP` environment +This rule can be added automatically by setting `--iptables=true`, setting the `HOST_IP` environment variable, and running the container in a privileged security context. -Note that the interface `--in-interface` above or using the `--host-interface` cli flag may be -different than `docker0` depending on which virtual network you use e.g. +Note that the interface `--in-interface` above or using the `--host-interface` cli flag may be +different than `docker0` depending on which virtual network you use e.g. * for Calico, use `cali+` (the interface name is something like cali1234567890 * for kops (on kubenet), use `cbr0` @@ -191,6 +191,29 @@ spec: You can use `--default-role` to set a fallback role to use when annotation is not set. +### Namespace Restrictions + +By using the flag --namespace-restrictions you can enable a mode in which the roles that pods can assume is restricted by an annotation on the pod's namespace. This annotation should be in the form of a json array. + +To allow the aws-cli pod specified above to run in the default namespace your namespace would look like the following. + +``` +--- +apiVersion: v1 +kind: Namespace +metadata: + annotations: + iam.amazonaws.com/allowed-roles: | + ["role-name"] + name: default +``` + +### Debug + +By using the --debug flag you can enable some extra features making debugging easier: + +- `/debug/store` endpoint enabled to dump knowledge of namespaces and role association. + ### Options By default, `kube2iam` will use the in-cluster method to connect to the kubernetes master, and use the `iam.amazonaws.com/role` @@ -204,6 +227,7 @@ Usage of kube2iam: --api-token string Token to authenticate with the api server --app-port string Http port (default "8181") --base-role-arn string Base role ARN + --debug Enable some debug features --default-role string Fallback role to use when annotation is not set --host-interface string Host interface for proxying AWS metadata (default "docker0") --host-ip string IP address of host @@ -211,6 +235,8 @@ Usage of kube2iam: --insecure Kubernetes server should be accessed without verifying the TLS. Testing only --iptables Add iptables rule (also requires --host-ip) --metadata-addr string Address for the ec2 metadata (default "169.254.169.254") + --namespace-key string Namespace annotation key used to retrieve the IAM roles allowed (value in annotation should be json array) (default "iam.amazonaws.com/allowed-roles") + --namespace-restrictions Enable namespace restrictions --verbose Verbose --version Print the version and exits diff --git a/cmd/k8s.go b/cmd/k8s.go index badafbd7..adc95432 100644 --- a/cmd/k8s.go +++ b/cmd/k8s.go @@ -30,16 +30,28 @@ func (k8s *k8s) watchForPods(podManager kcache.ResourceEventHandler) kcache.Stor k8s.createPodLW(), &api.Pod{}, resyncPeriod, - kcache.ResourceEventHandlerFuncs{ - AddFunc: podManager.OnAdd, - DeleteFunc: podManager.OnDelete, - UpdateFunc: podManager.OnUpdate, - }, + podManager, ) go podController.Run(wait.NeverStop) return podStore } +// returns a listwatcher of namespaces +func (k8s *k8s) createNamespaceLW() *kcache.ListWatch { + return kcache.NewListWatchFromClient(k8s, "namespaces", api.NamespaceAll, selector.Everything()) +} + +func (k8s *k8s) watchForNamespaces(nsManager kcache.ResourceEventHandler) kcache.Store { + nsStore, nsController := kcache.NewInformer( + k8s.createNamespaceLW(), + &api.Namespace{}, + resyncPeriod, + nsManager, + ) + go nsController.Run(wait.NeverStop) + return nsStore +} + func newK8s(host, token string, insecure bool) (*k8s, error) { var c *client.Client var err error diff --git a/cmd/namespace.go b/cmd/namespace.go new file mode 100644 index 00000000..38c18772 --- /dev/null +++ b/cmd/namespace.go @@ -0,0 +1,80 @@ +package cmd + +import ( + "encoding/json" + + log "github.com/Sirupsen/logrus" + "k8s.io/kubernetes/pkg/api" +) + +type namespaceHandler struct { + storage *store +} + +// OnAdd called with a namespace is added to k8s +func (h *namespaceHandler) OnAdd(obj interface{}) { + ns, ok := obj.(*api.Namespace) + if !ok { + log.Errorf("Expected Namespace but OnAdd handler received %+v", obj) + return + } + + log.Debugf("Namespace OnAdd %s", ns.GetName()) + + roles := h.getRoleAnnotation(ns) + for _, role := range roles { + log.Debugf("- Role %s", role) + h.storage.AddRoleToNamespace(ns.GetName(), role) + } + +} + +// OnUpdate called with a namespace is updated inside k8s +func (h *namespaceHandler) OnUpdate(oldObj, newObj interface{}) { + //ons, ok := oldObj.(*api.Namespace) + nns, ok := newObj.(*api.Namespace) + if !ok { + log.Errorf("Expected Namespace but OnUpdate handler received %+v", newObj) + return + } + log.Debugf("Namespace OnUpdate %s", nns.GetName()) + + roles := h.getRoleAnnotation(nns) + nsname := nns.GetName() + h.storage.DeleteNamespace(nsname) + for _, role := range roles { + log.Debugf("- Role %s", role) + h.storage.AddRoleToNamespace(nsname, role) + } +} + +// OnDelete called with a namespace is removed from k8s +func (h *namespaceHandler) OnDelete(obj interface{}) { + ns, ok := obj.(*api.Namespace) + if !ok { + log.Errorf("Expected Namespace but OnDelete handler received %+v", obj) + return + } + log.Debugf("Namespace OnDelete %s", ns.GetName()) + h.storage.DeleteNamespace(ns.GetName()) +} + +// getRoleAnnotations reads the "iam.amazonaws.com/allowed-roles" annotation off a namespace +// and splits them as a JSON list (["role1", "role2", "role3"]) +func (h *namespaceHandler) getRoleAnnotation(ns *api.Namespace) []string { + rolesString := ns.Annotations[h.storage.namespaceKey] + if rolesString != "" { + var decoded []string + if err := json.Unmarshal([]byte(rolesString), &decoded); err != nil { + log.Errorf("Unable to decode roles on namespace %s ( role annotation is '%s' ) with error: %s", ns.Name, rolesString, err) + } + return decoded + } + return nil +} + +func newNamespaceHandler(s *store) *namespaceHandler { + return &namespaceHandler{ + storage: s, + } +} diff --git a/cmd/pod.go b/cmd/pod.go new file mode 100644 index 00000000..a456f5a0 --- /dev/null +++ b/cmd/pod.go @@ -0,0 +1,74 @@ +package cmd + +import ( + log "github.com/Sirupsen/logrus" + "k8s.io/kubernetes/pkg/api" + kcache "k8s.io/kubernetes/pkg/client/cache" +) + +type podHandler struct { + storage *store +} + +// OnAdd is called when a pod is added. +func (p *podHandler) OnAdd(obj interface{}) { + pod, ok := obj.(*api.Pod) + if !ok { + log.Errorf("Expected Pod but OnAdd handler received %+v", obj) + return + } + log.Debugf("Pod OnAdd %s - %s", pod.GetName(), pod.Status.PodIP) + + p.storage.AddNamespaceToIP(pod) + + if pod.Status.PodIP != "" { + if role, ok := pod.Annotations[p.storage.iamRoleKey]; ok { + log.Debugf("- Role %s", role) + p.storage.AddRoleToIP(pod, role) + } + } +} + +// OnUpdate is called when a pod is modified. +func (p *podHandler) OnUpdate(oldObj, newObj interface{}) { + oldPod, ok1 := oldObj.(*api.Pod) + newPod, ok2 := newObj.(*api.Pod) + if !ok1 || !ok2 { + log.Errorf("Expected Pod but OnUpdate handler received %+v %+v", oldObj, newObj) + return + } + log.Debugf("Pod OnUpdate %s - %s", newPod.GetName(), newPod.Status.PodIP) + + if oldPod.Status.PodIP != newPod.Status.PodIP { + p.OnDelete(oldPod) + p.OnAdd(newPod) + } +} + +// OnDelete is called when a pod is deleted. +func (p *podHandler) OnDelete(obj interface{}) { + pod, ok := obj.(*api.Pod) + if !ok { + deletedObj, dok := obj.(kcache.DeletedFinalStateUnknown) + if dok { + pod, ok = deletedObj.Obj.(*api.Pod) + } + } + + if !ok { + log.Errorf("Expected Pod but OnDelete handler received %+v", obj) + return + } + + log.Debugf("Pod OnDelete %s - %s", pod.GetName(), pod.Status.PodIP) + + if pod.Status.PodIP != "" { + p.storage.DeleteIP(pod.Status.PodIP) + } +} + +func newPodHandler(s *store) *podHandler { + return &podHandler{ + storage: s, + } +} diff --git a/cmd/server.go b/cmd/server.go index e1b1d452..e0c059ec 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -29,9 +29,12 @@ type Server struct { BackoffMaxInterval time.Duration BackoffMaxElapsedTime time.Duration AddIPTablesRule bool + Debug bool Insecure bool Verbose bool Version bool + NamespaceRestriction bool + NamespaceKey string iam *iam k8s *k8s store *store @@ -41,7 +44,7 @@ type appHandler func(http.ResponseWriter, *http.Request) // ServeHTTP implements the net/http server Handler interface. func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - log.Infof("Requesting %s", r.RequestURI) + log.Debugf("Requesting %s", r.RequestURI) log.Debugf("RemoteAddr %s", parseRemoteAddr(r.RemoteAddr)) w.Header().Set("Server", "EC2ws") fn(w, r) @@ -79,6 +82,23 @@ func (s *Server) getRole(IP string) (string, error) { return role, nil } +func (s *Server) debugStoreHandler(w http.ResponseWriter, r *http.Request) { + output := make(map[string]interface{}) + + output["rolesByIP"] = s.store.DumpRolesByIP() + output["rolesByNamespace"] = s.store.DumpRolesByNamespace() + output["namespaceByIP"] = s.store.DumpNamespaceByIP() + + o, err := json.Marshal(output) + if err != nil { + log.Errorf("Error converting debug map to json: %+v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(o) +} + func (s *Server) securityCredentialsHandler(w http.ResponseWriter, r *http.Request) { remoteIP := parseRemoteAddr(r.RemoteAddr) role, err := s.getRole(remoteIP) @@ -99,17 +119,25 @@ func (s *Server) securityCredentialsHandler(w http.ResponseWriter, r *http.Reque func (s *Server) roleHandler(w http.ResponseWriter, r *http.Request) { remoteIP := parseRemoteAddr(r.RemoteAddr) - allowedRole, err := s.getRole(remoteIP) - allowedRoleARN := s.iam.roleARN(allowedRole) + podRole, err := s.getRole(remoteIP) if err != nil { http.Error(w, err.Error(), http.StatusNotFound) return } + podRoleARN := s.iam.roleARN(podRole) + + isRestricted, namespace := s.store.CheckNamespaceRestriction(podRoleARN, remoteIP) + if !isRestricted { + http.Error(w, fmt.Sprintf("Role requested %s not valid for namespace of pod at %s with namespace %s", podRole, remoteIP, namespace), http.StatusNotFound) + return + } + allowedRole := podRole + allowedRoleARN := podRoleARN + wantedRole := mux.Vars(r)["role"] wantedRoleARN := s.iam.roleARN(wantedRole) log.Debugf("Pod with RemoteAddr %s is annotated with role '%s' ('%s'), wants role '%s' ('%s')", remoteIP, allowedRole, allowedRoleARN, wantedRole, wantedRoleARN) - if wantedRoleARN != allowedRoleARN { log.Errorf("Invalid role '%s' ('%s') for RemoteAddr %s: does not match annotated role '%s' ('%s')", wantedRole, wantedRoleARN, remoteIP, allowedRole, allowedRoleARN) @@ -119,7 +147,7 @@ func (s *Server) roleHandler(w http.ResponseWriter, r *http.Request) { credentials, err := s.iam.assumeRole(wantedRoleARN, remoteIP) if err != nil { - log.Errorf("Error assuming role %+v", err) + log.Errorf("Error assuming role %+v for pod at %s with namespace %s", err, remoteIP, namespace) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -154,10 +182,16 @@ func (s *Server) Run(host, token string, insecure bool) error { return err } s.k8s = k8s - s.store = newStore(s.IAMRoleKey, s.DefaultIAMRole) - s.k8s.watchForPods(s.store) s.iam = newIAM(s.BaseRoleARN) + model := newStore(s.IAMRoleKey, s.DefaultIAMRole, s.NamespaceRestriction, s.NamespaceKey, s.iam) + s.store = model + s.k8s.watchForPods(newPodHandler(model)) + s.k8s.watchForNamespaces(newNamespaceHandler(model)) r := mux.NewRouter() + if s.Debug { + // This is a potential security risk if enabled in some clusters, hence the flag + r.Handle("/debug/store", appHandler(s.debugStoreHandler)) + } r.Handle("/{version}/meta-data/iam/security-credentials/", appHandler(s.securityCredentialsHandler)) r.Handle("/{version}/meta-data/iam/security-credentials/{role:.*}", appHandler(s.roleHandler)) r.Handle("/{path:.*}", appHandler(s.reverseProxyHandler)) @@ -175,5 +209,6 @@ func NewServer() *Server { AppPort: "8181", IAMRoleKey: "iam.amazonaws.com/role", MetadataAddress: "169.254.169.254", + NamespaceKey: "iam.amazonaws.com/allowed-roles", } } diff --git a/cmd/store.go b/cmd/store.go index 809655e5..0fff27b2 100644 --- a/cmd/store.go +++ b/cmd/store.go @@ -6,15 +6,19 @@ import ( log "github.com/Sirupsen/logrus" "k8s.io/kubernetes/pkg/api" - kcache "k8s.io/kubernetes/pkg/client/cache" ) // store implements the k8s framework ResourceEventHandler interface. type store struct { - defaultRole string - iamRoleKey string - mutex sync.RWMutex - rolesByIP map[string]string + defaultRole string + iamRoleKey string + namespaceKey string + namespaceRestriction bool + mutex sync.RWMutex + rolesByIP map[string]string + rolesByNamespace map[string][]string + namespaceByIP map[string]string + iam *iam } // Get returns the iam role based on IP address. @@ -31,64 +35,126 @@ func (s *store) Get(IP string) (string, error) { return "", fmt.Errorf("Unable to find role for IP %s", IP) } -// OnAdd is called when a pod is added. -func (s *store) OnAdd(obj interface{}) { - pod, ok := obj.(*api.Pod) - if !ok { - log.Errorf("Expected Pod but OnAdd handler received %+v", obj) - return - } +func (s *store) AddRoleToIP(pod *api.Pod, role string) { + s.mutex.Lock() + s.rolesByIP[pod.Status.PodIP] = role + s.mutex.Unlock() +} + +func (s *store) AddNamespaceToIP(pod *api.Pod) { + namespace := pod.GetNamespace() + s.mutex.Lock() + s.namespaceByIP[pod.Status.PodIP] = namespace + s.mutex.Unlock() +} + +func (s *store) DeleteIP(ip string) { + s.mutex.Lock() + delete(s.rolesByIP, ip) + delete(s.namespaceByIP, ip) + s.mutex.Unlock() +} - if pod.Status.PodIP != "" { - if role, ok := pod.Annotations[s.iamRoleKey]; ok { - s.mutex.Lock() - s.rolesByIP[pod.Status.PodIP] = role - s.mutex.Unlock() +// AddRoleToNamespace takes a role name and adds it to our internal state +func (s *store) AddRoleToNamespace(namespace string, role string) { + // Make sure to add the full ARN of roles to ensure string matching works + roleARN := s.iam.roleARN(role) + + ar := s.rolesByNamespace[namespace] + + // this is a tiny bit troubling, we could go with a the rolesByNamespace + // being a map[string]map[string]bool so that deduplication isn't + // ever a problem .. but for now... + c := true + for i := range ar { + if ar[i] == roleARN { + c = false + break } } + if c { + ar = append(ar, roleARN) + } + s.mutex.Lock() + s.rolesByNamespace[namespace] = ar + s.mutex.Unlock() } -// OnUpdate is called when a pod is modified. -func (s *store) OnUpdate(oldObj, newObj interface{}) { - oldPod, ok1 := oldObj.(*api.Pod) - newPod, ok2 := newObj.(*api.Pod) - if !ok1 || !ok2 { - log.Errorf("Expected Pod but OnUpdate handler received %+v %+v", oldObj, newObj) - return - } +// RemoveRoleFromNamespace takes a role and removes it from a namespace mapping +func (s *store) RemoveRoleFromNamespace(namespace string, role string) { + // Make sure to remove the full ARN of roles to ensure string matching works + roleARN := s.iam.roleARN(role) - if oldPod.Status.PodIP != newPod.Status.PodIP { - s.OnDelete(oldPod) - s.OnAdd(newPod) + ar := s.rolesByNamespace[namespace] + for i := range ar { + if ar[i] == roleARN { + ar = append(ar[:i], ar[i+1:]...) + break + } } + s.mutex.Lock() + s.rolesByNamespace[namespace] = ar + s.mutex.Unlock() } -// OnDelete is called when a pod is deleted. -func (s *store) OnDelete(obj interface{}) { - pod, ok := obj.(*api.Pod) - if !ok { - deletedObj, dok := obj.(kcache.DeletedFinalStateUnknown) - if dok { - pod, ok = deletedObj.Obj.(*api.Pod) +// DeleteNamespace removes all role mappings from a namespace +func (s *store) DeleteNamespace(namespace string) { + s.mutex.Lock() + delete(s.rolesByNamespace, namespace) + s.mutex.Unlock() +} + +// checkRoleForNamespace checks the 'database' for a role allowed in a namespace, +// returns true if the role is found, otheriwse false +func (s *store) checkRoleForNamespace(role string, namespace string) bool { + ar := s.rolesByNamespace[namespace] + for _, r := range ar { + if r == role { + log.Debugf("Role:%s on namespace:%s found.", role, namespace) + return true } } + log.Warnf("Role:%s on namespace:%s not found.", role, namespace) + return false +} - if !ok { - log.Errorf("Expected Pod but OnDelete handler received %+v", obj) - return +func (s *store) CheckNamespaceRestriction(role string, ip string) (bool, string) { + ns := s.namespaceByIP[ip] + + // if the namespace restrictions are not in place early out true + if !s.namespaceRestriction { + return true, ns } - if pod.Status.PodIP != "" { - s.mutex.Lock() - delete(s.rolesByIP, pod.Status.PodIP) - s.mutex.Unlock() + // if the role is the default role you are also good + if role == s.iam.roleARN(s.defaultRole) { + return true, ns } + + return s.checkRoleForNamespace(role, ns), ns +} + +func (s *store) DumpRolesByIP() map[string]string { + return s.rolesByIP +} + +func (s *store) DumpRolesByNamespace() map[string][]string { + return s.rolesByNamespace +} + +func (s *store) DumpNamespaceByIP() map[string]string { + return s.namespaceByIP } -func newStore(key string, defaultRole string) *store { +func newStore(key string, defaultRole string, namespaceRestriction bool, namespaceKey string, iamInstance *iam) *store { return &store{ - defaultRole: defaultRole, - iamRoleKey: key, - rolesByIP: make(map[string]string), + defaultRole: defaultRole, + iamRoleKey: key, + namespaceKey: namespaceKey, + namespaceRestriction: namespaceRestriction, + rolesByIP: make(map[string]string), + rolesByNamespace: make(map[string][]string), + namespaceByIP: make(map[string]string), + iam: iamInstance, } } diff --git a/main.go b/main.go index 553706d4..5226b6f4 100644 --- a/main.go +++ b/main.go @@ -23,6 +23,9 @@ func main() { addFlags(s, pflag.CommandLine) pflag.Parse() + // default to info or above (probably the default anyways) + log.SetLevel(log.InfoLevel) + if s.Verbose { log.SetLevel(log.DebugLevel) } @@ -48,12 +51,15 @@ func addFlags(s *cmd.Server, fs *pflag.FlagSet) { fs.StringVar(&s.APIToken, "api-token", s.APIToken, "Token to authenticate with the api server") fs.StringVar(&s.AppPort, "app-port", s.AppPort, "Http port") fs.StringVar(&s.BaseRoleARN, "base-role-arn", s.BaseRoleARN, "Base role ARN") + fs.BoolVar(&s.Debug, "debug", s.Debug, "Enable debug features") fs.StringVar(&s.DefaultIAMRole, "default-role", s.DefaultIAMRole, "Fallback role to use when annotation is not set") fs.StringVar(&s.IAMRoleKey, "iam-role-key", s.IAMRoleKey, "Pod annotation key used to retrieve the IAM role") fs.BoolVar(&s.Insecure, "insecure", false, "Kubernetes server should be accessed without verifying the TLS. Testing only") fs.StringVar(&s.MetadataAddress, "metadata-addr", s.MetadataAddress, "Address for the ec2 metadata") fs.BoolVar(&s.AddIPTablesRule, "iptables", false, "Add iptables rule (also requires --host-ip)") fs.StringVar(&s.HostInterface, "host-interface", "docker0", "Host interface for proxying AWS metadata") + fs.BoolVar(&s.NamespaceRestriction, "namespace-restrictions", false, "Enable namespace restrictions") + fs.StringVar(&s.NamespaceKey, "namespace-key", s.NamespaceKey, "Namespace annotation key used to retrieve the IAM roles allowed (value in annotation should be json array)") fs.StringVar(&s.HostIP, "host-ip", s.HostIP, "IP address of host") fs.DurationVar(&s.BackoffMaxInterval, "backoff-max-interval", defaultMaxInterval, "Max interval for backoff when querying for role.") fs.DurationVar(&s.BackoffMaxElapsedTime, "backoff-max-elapsed-time", defaultMaxElapsedTime, "Max elapsed time for backoff when querying for role.")