Skip to content

Commit c6ecf23

Browse files
authored
Fix local (#107)
* fix: provide the config template file for local * fix: fix local for api and controller * fix: wrong defalut value type for gc period * bump version * docs: mark some todos for the future * feat: add manual service account token creation support * fix: service name is incorrect * fix: typo in the file name * remove unused ingress logic * fix: remove kubectl usage in the code we do not need to download the kubectl in Dockerfile
1 parent 8ab71bb commit c6ecf23

File tree

6 files changed

+46
-41
lines changed

6 files changed

+46
-41
lines changed
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
apiVersion: v1
2+
kind: Secret
3+
metadata:
4+
name: shibuya
5+
annotations:
6+
kubernetes.io/service-account.name: shibuya
7+
type: kubernetes.io/service-account-token

makefile

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -33,16 +33,26 @@ grafana: grafana/
3333
kind load docker-image shibuya:grafana --name shibuya
3434
kubectl -n $(shibuya-controller-ns) replace -f kubernetes/grafana.yaml --force
3535

36-
.PHONY: shibuya
37-
shibuya: shibuya/ kubernetes/
38-
cd shibuya && sh build.sh
36+
.PHONY: local_api
37+
local_api:
38+
cd shibuya && sh build.sh api
3939
docker build -f shibuya/Dockerfile --build-arg env=local -t api:local shibuya
4040
kind load docker-image api:local --name shibuya
41+
42+
.PHONY: local_controller
43+
local_controller:
44+
cd shibuya && sh build.sh controller
45+
docker build -f shibuya/Dockerfile --build-arg env=local -t controller:local shibuya
46+
kind load docker-image controller:local --name shibuya
47+
48+
.PHONY: shibuya
49+
shibuya: local_api local_controller
4150
helm uninstall shibuya || true
42-
helm upgrade --install shibuya install/shibuya
51+
cd shibuya && helm upgrade --install shibuya install/shibuya
4352

4453
.PHONY: jmeter
4554
jmeter: shibuya/engines/jmeter
55+
cp shibuya/config_tmpl.json shibuya/config.json
4656
cd shibuya && sh build.sh jmeter
4757
docker build -t shibuya:jmeter -f shibuya/docker-local/Dockerfile.engines.jmeter shibuya
4858
kind load docker-image shibuya:jmeter --name shibuya
@@ -51,8 +61,12 @@ jmeter: shibuya/engines/jmeter
5161
expose:
5262
-killall kubectl
5363
-kubectl -n $(shibuya-controller-ns) port-forward service/grafana 3000:3000 > /dev/null 2>&1 &
54-
-kubectl -n $(shibuya-controller-ns) port-forward service/shibuya 8080:8080 > /dev/null 2>&1 &
64+
-kubectl -n $(shibuya-controller-ns) port-forward service/shibuya-api-local 8080:8080 > /dev/null 2>&1 &
5565

66+
# TODO!
67+
# After k8s 1.22, service account token is no longer auto generated. We need to manually create the secret
68+
# for the service account. ref: "https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#manual-secret-management-for-serviceaccounts"
69+
# So we should fetch the token details from the manually created secret instead of the automatically created ones
5670
.PHONY: kubeconfig
5771
kubeconfig:
5872
./kubernetes/generate_kubeconfig.sh $(shibuya-controller-ns)
@@ -61,6 +75,7 @@ kubeconfig:
6175
permissions:
6276
kubectl -n $(shibuya-executor-ns) apply -f kubernetes/roles.yaml
6377
kubectl -n $(shibuya-controller-ns) apply -f kubernetes/serviceaccount.yaml
78+
kubectl -n $(shibuya-controller-ns) apply -f kubernetes/service-account-secret.yaml
6479
-kubectl -n $(shibuya-executor-ns) create rolebinding shibuya --role=shibuya --serviceaccount $(shibuya-controller-ns):shibuya
6580
kubectl -n $(shibuya-executor-ns) replace -f kubernetes/ingress.yaml --force
6681

@@ -85,11 +100,3 @@ ingress-controller:
85100
# And update the image in the config.json
86101
docker build -t shibuya:ingress-controller -f ingress-controller/Dockerfile ingress-controller
87102
kind load docker-image shibuya:ingress-controller --name shibuya
88-
89-
.PHONY: controller
90-
controller:
91-
cd shibuya && sh build.sh controller
92-
docker build -f shibuya/Dockerfile --build-arg env=local --build-arg="binary_name=shibuya-controller" -t controller:local shibuya
93-
kind load docker-image controller:local --name shibuya
94-
helm uninstall shibuya || true
95-
helm upgrade --install shibuya install/shibuya

shibuya/Dockerfile

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,5 @@
11
FROM ubuntu:18.04
22

3-
RUN apt-get update && apt-get install -y curl
4-
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \
5-
&& chmod +x ./kubectl \
6-
&& mv ./kubectl /usr/local/bin/kubectl
7-
83
ARG binary_name=shibuya
94
ADD ./build/${binary_name} /usr/local/bin/${binary_name}
105

shibuya/install/shibuya/Chart.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ type: application
1515
# This is the chart version. This version number should be incremented each time you make changes
1616
# to the chart and its templates, including the app version.
1717
# Versions are expected to follow Semantic Versioning (https://semver.org/)
18-
version: v0.1.1
18+
version: v0.1.2
1919

2020
# This is the version number of the application being deployed. This version number should be
2121
# incremented each time you make changes to the application. Versions are not expected to

shibuya/install/shibuya/values.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ runtime:
7171
project: ""
7272
zone: ""
7373
cluster_id: ""
74-
gc_duration: ""
74+
gc_duration: 30
7575
service_type: ""
7676
in_cluster: true
7777
namespace: "shibuya-executors"

shibuya/scheduler/k8s.go

Lines changed: 17 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ import (
66
"fmt"
77
"io/ioutil"
88
"net/http"
9-
"os/exec"
109
"sort"
1110
"strconv"
1211
"strings"
@@ -632,16 +631,26 @@ func (kcm *K8sClientManager) ServiceReachable(engineUrl string) bool {
632631
}
633632

634633
func (kcm *K8sClientManager) deleteService(collectionID int64) error {
635-
// Delete services by collection is not supported as of yet
636-
// Wait for this PR to be merged - https://github.com/kubernetes/kubernetes/pull/85802
637-
cmd := exec.Command("kubectl", "-n", kcm.Namespace, "delete", "svc", "--force", "--grace-period=0", "-l", fmt.Sprintf("collection=%d", collectionID))
638-
o, err := cmd.Output()
634+
// We could not delete services by label
635+
// So we firstly get them by label and then delete them one by one
636+
// you can check here: https://github.com/kubernetes/kubernetes/issues/68468#issuecomment-419981870
637+
corev1Client := kcm.client.CoreV1().Services(kcm.Namespace)
638+
resp, err := corev1Client.List(context.TODO(), metav1.ListOptions{
639+
LabelSelector: makeCollectionLabel(collectionID),
640+
})
639641
if err != nil {
640-
log.Printf("Cannot delete services for collection %d", collectionID)
641642
return err
642643
}
643-
log.Print(string(o))
644-
return nil
644+
645+
// If there are any errors in deletion, we only return the last one
646+
// the errors could be similar so we should avoid return a long list of errors
647+
var lastError error
648+
for _, svc := range resp.Items {
649+
if err := corev1Client.Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}); err != nil {
650+
lastError = err
651+
}
652+
}
653+
return lastError
645654
}
646655

647656
func (kcm *K8sClientManager) deleteDeployment(collectionID int64) error {
@@ -672,10 +681,6 @@ func (kcm *K8sClientManager) PurgeCollection(collectionID int64) error {
672681
if err != nil {
673682
return err
674683
}
675-
err = kcm.deleteIngressRules(collectionID)
676-
if err != nil {
677-
return err
678-
}
679684
return nil
680685
}
681686

@@ -843,15 +848,6 @@ func (kcm *K8sClientManager) CreateIngress(ingressClass, ingressName, serviceNam
843848
return nil
844849
}
845850

846-
func (kcm *K8sClientManager) deleteIngressRules(collectionID int64) error {
847-
deletePolicy := metav1.DeletePropagationForeground
848-
return kcm.client.NetworkingV1().Ingresses(kcm.Namespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{
849-
PropagationPolicy: &deletePolicy,
850-
}, metav1.ListOptions{
851-
LabelSelector: fmt.Sprintf("collection=%d", collectionID),
852-
})
853-
}
854-
855851
func (kcm *K8sClientManager) GetNodesByCollection(collectionID string) ([]apiv1.Node, error) {
856852
opts := metav1.ListOptions{
857853
LabelSelector: fmt.Sprintf("collection_id=%s", collectionID),

0 commit comments

Comments
 (0)