环境
- 主机ip 192.168.0.9
- docker version 19.03.2
- docker-compose version 1.24.0-rc1
- elasticsearch version 6.6.1
- kibana version 6.6.1
- logstash version 6.6.1
一、elk-dockerfile文件编写及配置文件
● elasticsearch
1、elasticsearch-dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
from centos:latest add elasticsearch-6.6.1. tar .gz /usr/local/ copy elasticsearch.yml /usr/local/elasticsearch-6 .6.1 /config/ copy jdk1.8 /usr/local/ env java_home= /usr/local/jdk1 .8 env classpath=$classpath:$java_home /lib :$java_home /jre/lib env path=$java_home /bin :$java_home /jre/bin :$path:$home /bin run groupadd elsearch && \ useradd elsearch -g elsearch -p elasticsearch && \ chown -r elsearch:elsearch /usr/local/elasticsearch-6 .6.1 && \ cp /usr/share/zoneinfo/asia/shanghai /etc/localtime && \ echo "asia/shanghai" > /etc/timezone && \ yum install which -y && \ mkdir /opt/data && \ mkdir /opt/logs expose 9200 9300 #主要是切换到elsearch用户启动es user elsearch workdir /usr/local/elasticsearch-6 .6.1 /bin/ entrypoint [ "./elasticsearch" ] |
2、elasticsearch.yml
1
2
3
4
5
6
7
8
9
10
11
12
|
[root@localhost elasticsearch] # egrep "^[^#]" elasticsearch.yml cluster.name: es-cluster node.name: node-1 path.data: /opt/data path.logs: /opt/logs network.host: 0.0.0.0 http.port: 9200 cluster.routing.allocation.disk.threshold_enabled: true cluster.routing.allocation.disk.watermark.low: 94% cluster.routing.allocation.disk.watermark.high: 96% cluster.routing.allocation.disk.watermark.flood_stage: 98% discovery.zen.minimum_master_nodes: 1 |
● logstash
1、logstash-dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
|
from centos:latest add logstash-6.6.1. tar .gz /usr/local/ copy logstash.yml /usr/local/logstash-6 .6.1 /config/ copy logstash.conf /usr/local/logstash-6 .6.1 /config/ copy jdk1.8 /usr/local/ copy start.sh /start .sh env java_home= /usr/local/jdk1 .8 env classpath=$classpath:$java_home /lib :$java_home /jre/lib env path=$java_home /bin :$java_home /jre/bin :$path:$home /bin run mkdir /opt/data && \ mkdir /opt/logs && \ chmod +x /start .sh entrypoint [ "/start.sh" ] |
2、logstash-start.sh
1
2
|
#!/bin/bash /usr/local/logstash-6 .6.1 /bin/logstash -f /usr/local/logstash-6 .6.1 /config/logstash .conf |
3、logstash.yml
1
2
3
4
|
[root@localhost logstash] # egrep "^[^#]" logstash.yml path.data: /opt/data path.logs: /opt/logs pipeline.batch.size: 200 |
4、logstash.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
|
input { file { path => "/usr/local/nginx/logs/access.log" type => "nginx" start_position => "beginning" sincedb_path => "/dev/null" } file { path => "/var/log/secure" type => "secure" start_position => "beginning" sincedb_path => "/dev/null" } } #详细说明可以查看我之前的博客 filter { grok { match => { "message" => '(?<clientip>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}) - - (?<requesttime>\[[0-9]{1,2}\/[a-z]+\/[0-9]{4}\:[0-9]{2}\:[0-9]{2}\:[0-9]{2} \+[0-9]*\]) "(?<requesttype>[a-z]+) (?<requesturl>[^ ]+) (?<requestv>http/\d\.\d)" (?<requestnode>[0-9]+) (?<requestsize>[0-9]+) "(?<content>[^ ]|(http|https)://[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/)" "(?<ua>(a-z|0-9| |.)+)"' } remove_field => ["message","log","beat","offset","prospector","host","@version"] } } #output指向es容器 output { if [type] == "nginx" { elasticsearch { hosts => ["es:9200"] index => "nginx-%{+yyyy.mm.dd}" } } else if [type] == "secure" { elasticsearch { hosts => ["es:9200"] index => "secure-%{+yyyy.mm.dd}" } } } |
● kibana
1、kibana-dockerfile
1
2
3
4
5
6
7
|
from centos:latest add kibana-6.6.1-linux-x86_64. tar .gz /usr/local/ copy kibana.yml /usr/local/kibana-6 .6.1-linux-x86_64 /config/ copy start.sh /start .sh run chmod +x /start .sh expose 5601 entrypoint [ "/start.sh" ] |
2、kibana.yml
1
2
3
4
5
|
[root@localhost kibana] # egrep "^[^#]" kibana.yml server.port: 5601 server.host: "0.0.0.0" #指向es容器的9200端口 elasticsearch.hosts: [ "http://es:9200" ] |
3、kibana-start.sh
1
2
|
#!/bin/bash /usr/local/kibana-6 .6.1-linux-x86_64 /bin/kibana |
二、docker-compose,yml文件编写
1
|
[root@localhost elk_dockerfile] # cat docker-compose.yml |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
version: '3.7' services: elasticsearch: image: elasticsearch:elk container_name: es networks: - elk volumes: - /opt/data : /opt/data - /opt/logs : /opt/logs expose: - 9200 - 9300 restart: always depends_on: - logstash - kibana logstash: image: logstash:elk container_name: logstash networks: - elk volumes: - /opt/logstash/data/ : /op/data - /opt/logstash/logs/ : /opt/logs - /opt/elk/elk_dockerfile/logstash/logstash .conf: /usr/local/logstash-6 .6.1 /config/logstash .conf - /usr/local/nginx/logs : /usr/local/nginx/logs - /var/log/secure : /var/log/secure restart: always kibana: image: kibana:elk container_name: kibana ports: - 5601:5601 networks: - elk volumes: - /opt/elk/elk_dockerfile/kibana/kibana .yml: /usr/local/kibana-6 .6.1-linux-x86_64 /config/kibana .yml networks: elk: |
compose文件version版本指向
三、访问界面
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持服务器之家。
原文链接:https://blog.51cto.com/13760226/2438738