Skip to content

Latest commit

 

History

History
311 lines (266 loc) · 6.58 KB

Lab6-cloud-workload-logging.md

File metadata and controls

311 lines (266 loc) · 6.58 KB

Lab6. Cloud Workload Logging with Logging Operator / ElasticSearch



1. Logging Operator 설정

  • Elastic Search / Logging Operator 설치
# Elastic Search 설치
$ kubectl create -f https://download.elastic.co/downloads/eck/2.8.0/crds.yaml
$ kubectl apply -f https://download.elastic.co/downloads/eck/2.8.0/operator.yaml

$ k create ns logging
$ cat <<EOF | kubectl apply -f -
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
  name: quickstart
  namespace: logging
spec:
  version: 8.8.0
  nodeSets:
  - name: default
    count: 1
    config:
      node.store.allow_mmap: false
EOF

$ cat <<EOF | kubectl apply -f -
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
  name: quickstart
  namespace: logging
spec:
  version: 8.8.0
  count: 1
  elasticsearchRef:
    name: quickstart
EOF

# 로깅 오퍼레이터 설치
$ helm repo add kube-logging https://kube-logging.github.io/helm-charts
$ helm repo update
$ helm upgrade --install --wait --create-namespace --namespace logging logging-operator kube-logging/logging-operator

2. Logging 객체 생성 및 로그 수집

# 로그 규칙을 적용할 새로운 Logging 객체 생성
$ kubectl -n logging apply -f - <<"EOF"
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
  name: default-logging-simple
spec:
  fluentd:
    logLevel: debug
  fluentbit: {}
  controlNamespace: logging
EOF

# log output 생성  
$ kubectl -n logging apply -f - <<"EOF"
apiVersion: logging.banzaicloud.io/v1beta1
kind: Output
metadata:
  name: es-output
spec:
  elasticsearch:
    host: quickstart-es-http.logging.svc.cluster.local
    port: 9200
    scheme: https
    ssl_verify: false
    ssl_version: TLSv1_2
    user: elastic
    password:
      valueFrom:
        secretKeyRef:
          name: quickstart-es-elastic-user
          key: elastic
    buffer:
      timekey: 1m
      timekey_wait: 30s
      timekey_use_utc: true
EOF

#Log Flow 생성

$ kubectl -n logging apply -f - <<"EOF"
apiVersion: logging.banzaicloud.io/v1beta1
kind: Flow
metadata:
  name: es-flow
spec:
  filters:
    - tag_normaliser: {}
    - parser:
        remove_key_name_field: true
        reserve_data: true
        parse:
          type: nginx
  match:
     - select:
         labels:
           app.kubernetes.io/name: log-generator
  localOutputRefs:
    - es-output
EOF

#Log 생성기 설치
$ helm upgrade --install --wait --create-namespace --namespace logging log-generator kube-logging/log-generator 

# FluentD 로그 확인
$ kubectl exec -ti -n logging default-logging-simple-fluentd-0 -- tail -f /fluentd/log/out

3. Kibana를 통한 로그 확인

ingress 에 아래 annotation 추가

nginx.ingress.kubernetes.io/proxy-ssl-secret: "resources/elastic-certificate-pem"
nginx.ingress.kubernetes.io/proxy-ssl-verify: "false"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"

# kibana svc nodeport 변경
$ k edit svc quickstart-kb-http
spec:
  type: NodePort

# nodeport 접속 시
# Node Port 확인
$ k get svc

# kibana 패스워드 확인
$ kubectl -n logging get secret quickstart-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode; echo

4. Grafana / Loki 로그 설정

# Grafana Helm Repo 추가
$ helm repo add grafana https://grafana.github.io/helm-charts

# Grafana / Helm chart 다운로드
$ helm fetch grafana/loki --version 2.9.1
$ helm fetch grafana/grafana --version 6.21.3

# Grafana / Loki 설치
$ helm upgrade -i loki loki-2.9.1.tgz -n logging
$ helm upgrade -i grafana grafana-6.21.3.tgz -n logging

# Grafana Ingress 설정
$ kubectl -n logging apply -f - <<"EOF"
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: grafana
  namespace: logging
spec:
  ingressClassName: nginx
  rules:
  - host: grafana.kw01
    http:
      paths:
      - backend:
          service:
            name: grafana
            port:
              number: 80
        path: /
        pathType: Prefix
EOF

# Loki Output 설정
$ kubectl -n logging apply -f - <<"EOF"
apiVersion: logging.banzaicloud.io/v1beta1
kind: Output
metadata:
 name: loki-output
spec:
 loki:
   url: http://loki:3100
   configure_kubernetes_labels: true
   buffer:
     timekey: 1m
     timekey_wait: 30s
     timekey_use_utc: true
EOF

# Loki Flow 설정
$ kubectl -n logging apply -f - <<"EOF"
apiVersion: logging.banzaicloud.io/v1beta1
kind: Flow
metadata:
  name: loki-flow
spec:
  filters:
    - tag_normaliser: {}
    - parser:
        remove_key_name_field: true
        reserve_data: true
        parse:
          type: nginx
  match:
    - select:
        labels:
          app.kubernetes.io/name: log-generator
  localOutputRefs:
    - loki-output
EOF

$ kubectl get secret --namespace logging grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo

# FluentD config 확인
$ k get secret default-logging-simple-fluentd-app -o jsonpath='{.data.fluentd\.conf}' | base64 -d

5. Loki 로그 확인


6. s3 로그 저장

# s3 저장소 인증정보를 Secret에 추가
# s3 output를 추가하고 flow에 등록
---
apiVersion: v1
data:
  access_key_id: Qjg3MDZBNjlCN0U0MUNFOTc4QUI=
  secret_access_key: N0M0OEJENDFDNjc5OTNBRDhCNjkzQTZCMzM1RUZCNkUyNzkyMUFBNA==
kind: Secret
metadata:
  name: s3-auth
  namespace: ns-app-core-dev
type: Opaque
- apiVersion: logging.banzaicloud.io/v1beta1
  kind: Flow
  metadata:
    name: app-log-flow
    namespace: ns-app-core-dev
  spec:
    filters:
    - parser:
        parse:
          type: json
        remove_key_name_field: false
        reserve_data: true
    localOutputRefs:
    - es-output
    - s3-output
    match:
    - select:
        labels:
          tier: backend
---
apiVersion: logging.banzaicloud.io/v1beta1
kind: Output
metadata:
  name: s3-output
  namespace: ns-app-core-dev
spec:
  s3:
    aws_key_id:
      valueFrom:
        secretKeyRef:
          key: access_key_id
          name: s3-auth
    aws_sec_key:
      valueFrom:
        secretKeyRef:
          key: secret_access_key
          name: s3-auth
    buffer:
      timekey: 1m
      timekey_use_utc: true
      timekey_wait: 30s
    force_path_style: "true"
    path: app-log/${tag}/%Y/%m/%d/
    s3_bucket: hro-app-log
    s3_endpoint: https://kr.object.private.fin-ncloudstorage.com
    s3_region: kr-standard