Skip to content

Commit

Permalink
Add samples about how to integrate with ELK stack
Browse files Browse the repository at this point in the history
To run the sample, first install docker and docker-compose.

Then run 'docker-compose up', a cluster will be setup with
nginx / elasticsearch / logstash / kibana / grafana.

+ add docker-compose.yml, logstash.conf
+ update Dockerfile and nginx.conf

Signed-off-by: Liu Lantao <liulantao@gmail.com>
  • Loading branch information
Lax committed May 11, 2018
1 parent a28e154 commit a6b9975
Show file tree
Hide file tree
Showing 5 changed files with 245 additions and 19 deletions.
79 changes: 79 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
version: '3.6'

services:
elasticsearch:
labels:
com.example.service: "es"
com.example.description: "For searching and indexing data"
image: elasticsearch
networks:
- elk
volumes:
- type: volume
source: esdata
target: /usr/share/elasticsearch/data/
ports:
- "9200:9200"

logstash:
labels:
com.example.service: "logstash"
com.example.description: "For logging data"
image: logstash
networks:
- elk
volumes:
- ./samples/elkg:/etc/logstash
command: logstash -f /etc/logstash/logstash.conf
depends_on:
- elasticsearch

kibana:
labels:
com.example.service: "kibana"
com.example.description: "Data visualisation and for log aggregation"
image: kibana
networks:
- elk
ports:
- "5601:5601"
environment:
- ELASTICSEARCH_URL=http://elasticsearch:9200
depends_on:
- elasticsearch

grafana:
labels:
com.example.service: "grafana"
com.example.description: "Data visualisation"
image: grafana/grafana
networks:
- elk
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
depends_on:
- elasticsearch

nginx:
container_name: nginx_accounting
build:
context: .
dockerfile: samples/Dockerfile
networks:
- nginx
- elk
ports:
- 8080:8080
- 8888:8888
depends_on:
- logstash


networks:
nginx:
elk:

volumes:
esdata:
10 changes: 5 additions & 5 deletions Dockerfile → samples/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
FROM centos as builder

ADD http://mirrors.aliyun.com/repo/Centos-7.repo /etc/yum.repos.d/CentOS-Base.repo
RUN yum install gcc make pcre-devel zlib-devel openssl-devel -y \
&& yum clean all

Expand All @@ -21,8 +20,8 @@ WORKDIR ${NGX_SRC_DIR}
ADD . nginx-http-accounting-module
RUN ./configure --prefix=${PREFIX} \
--with-stream \
--add-module=nginx-http-accounting-module \
--add-module=echo-nginx-module-master \
--add-dynamic-module=nginx-http-accounting-module \
--add-dynamic-module=echo-nginx-module-master \
--http-log-path=/dev/stdout \
--error-log-path=/dev/stderr \
&& make -s && make -s install
Expand All @@ -42,8 +41,9 @@ RUN ln -sf /dev/stdout ${PREFIX}/logs/access.log \
&& ln -sf /dev/stderr ${PREFIX}/logs/error.log \
&& ln -sf ../usr/share/zoneinfo/Asia/Shanghai /etc/localtime

ADD misc/nginx.conf ${PREFIX}/conf/nginx.conf
ADD samples/nginx.conf ${PREFIX}/conf/nginx.conf

EXPOSE 80
EXPOSE 8080
EXPOSE 8888
STOPSIGNAL SIGTERM
ENTRYPOINT ["./sbin/nginx", "-g", "daemon off;"]
77 changes: 77 additions & 0 deletions samples/elkg/es_template.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
{
"version" : 1,
"template" : "logstash-*",
"settings" : {
"index" : {
"refresh_interval" : "5s"
}
},
"mappings" : {
"logs" : {
"_all" : {
"enabled" : false,
"norms" : false
},
"dynamic_templates" : [
{
"accounting_status_fields" : {
"path_match" : "@nr_status.*",
"mapping" : {
"type" : "integer"
}
}
},
{
"accounting_aggregate_status_fields" : {
"path_match" : "@agg_status.*",
"mapping" : {
"type" : "integer"
}
}
}
],
"properties" : {
"@timestamp" : {
"type" : "date"
},
"@from" : {
"type" : "date"
},
"@to" : {
"type" : "date"
},
"accounting_id" : {
"type" : "keyword"
},
"entry_type" : {
"type" : "keyword"
},
"nr_entries": {
"type" : "integer"
},
"nr_open_entries": {
"type" : "integer"
},
"nr_close_entries": {
"type" : "integer"
},
"in_bytes": {
"type" : "integer"
},
"out_bytes": {
"type" : "integer"
},
"latency_ms": {
"type" : "integer"
},
"upstream_latency_ms": {
"type" : "integer"
},
"message" : {
"type" : "text"
}
}
}
},
"aliases" : {}
}
55 changes: 55 additions & 0 deletions samples/elkg/logstash.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
input {
syslog {
port => 29124
}
}
filter {
grok {
pattern_definitions => {
"TIMESTAMP_NGX" => "%{YEAR}/%{MONTHNUM}/%{MONTHDAY} %{HOUR}:?%{MINUTE}(?::?%{SECOND})"
}
match => {
"message" =>
"%{TIMESTAMP_NGX} \[%{LOGLEVEL}] %{NUMBER}\#%{NUMBER}: pid:%{NUMBER:pid:int}\|from:(?<from>\d{10})\|to:(?<to>\d{10})\|accounting_id:(?<accounting_id>[^|]+)\|%{WORD:entry_type}:%{NUMBER:nr_entries:int}\|bytes_in:%{NUMBER:in_bytes:int}\|bytes_out:%{NUMBER:out_bytes:int}\|latency_ms:%{NUMBER:latency_ms:int}\|upstream_latency_ms:%{NUMBER:upstream_latency_ms:int}\|%{GREEDYDATA:statuses}"
}
remove_field => [ "host", "severity", "facility", "priority", "severity_label", "facility_label" ]
remove_tag => []
}
# date {
# match => [ "timestamp" , "yyyy/MM/dd HH:mm:ss" ]
# target => '@timestamp'
# # timezone => 'Asia/Shanghai'
# remove_field => [ "timestamp" ]
# }
date {
match => [ "from" , "UNIX" ]
target => '@from'
remove_field => [ "from" ]
}
date {
match => [ "to" , "UNIX" ]
target => '@to'
remove_field => [ "to" ]
}
kv {
source => "statuses"
target => "statuses_kv"
field_split => "|"
value_split => ":"
remove_field => [ "statuses" ]
}
ruby {
code => "s={};agg={};agg.default=0;(event.get('statuses_kv')||{}).each{|k,v|agg['%dxx'%k[0]]+=v.to_i;s[k]=v.to_i};event.set('@agg_status',agg);event.set('@nr_status',s)"
remove_field => [ "statuses_kv" ]
}
}
output {
stdout { codec => rubydebug }
elasticsearch {
hosts => ["http://elasticsearch:9200"]
manage_template => true
template_overwrite => true
template => "/etc/logstash/es_template.json"
index => "logstash-%{+xxxx.ww}"
}
}
43 changes: 29 additions & 14 deletions misc/nginx.conf → samples/nginx.conf
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
load_module modules/ngx_http_accounting_module.so;
load_module modules/ngx_http_echo_module.so;

worker_processes auto;
error_log logs/error.log notice;

Expand All @@ -9,12 +12,8 @@ http {
include mime.types;
default_type application/octet-stream;

#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';

#access_log logs/access.log main;
access_log off;
log_not_found off;

sendfile on;
#tcp_nopush on;
Expand All @@ -25,16 +24,16 @@ http {
#gzip on;

http_accounting on;
http_accounting_interval 10;
http_accounting_interval 60;
http_accounting_perturb on;
http_accounting_id '_NGXTA_';
http_accounting_id 'HTTP';
http_accounting_log logs/http-accounting.log;

log_not_found off;
http_accounting_log syslog:server=logstash:29124,tag=http_accounting,nohostname notice;

server {
listen 1234;
listen 8888;
server_name echo;
http_accounting_id $host;

location / {
echo hello;
Expand All @@ -49,20 +48,36 @@ http {
echo nginx;
echo_flush;

http_accounting_id '_HTTP_ECHO_';
http_accounting_id 'HTTP_ECHO';
}

location /echo/now {
echo "Hello world!";
}
}

server {
listen 80;
listen 8080;
server_name localhost;

#charset koi8-r;

location / {
root html;
return 200 '';
http_accounting_id $uri;
}

location /index {
alias html;
index index.html index.htm;
http_accounting_id $host;
http_accounting_id "INDEX";
}

location /echo {
proxy_pass http://localhost:8888;
proxy_set_header Host "echo";
proxy_buffering off;
http_accounting_id "HTTP_PROXY_ECHO";
}

#error_page 404 /404.html;
Expand Down

0 comments on commit a6b9975

Please sign in to comment.