简单的 docker 部署ELK
这是我的运维同事部署ELK的文档,我这里记录转载一下
服务规划
架构: Filebeat->kafka->logstash->ES
-
kafka集群部署参照: kafka集群部署
部署服务 程序路径/数据目录 端口 配置文件 elasticsearch /data/elasticsearch 9200 /data/elasticsearch/config/elasticsearch.yml logstash /data/logstash – /data/logstash/config/logstash.yml kibana /data/kibana 5601 /data/kibana/config/kibana.yml filebeat /data/filebeat – /data/filebeat/config/filebeat.yml
索引服务-Elasticsearch
创建数据目录
mkdir -pv /data/elasticsearch/{config,data,logs}
chown 1000 /data/elasticsearch/{data,logs}
修改主机配置
vim /etc/sysctl.conf
加入
vm.max_map_count=655360
sysctl -p
vim /etc/security/limits.conf
加入
* soft memlock unlimited
* hard memlock unlimited
配置文件
cat > /data/elasticsearch/config/elasticsearch.yml << 'EOF'
cluster.name: ccms-es-cluster
node.name: ccms-es1
network.host: 172.16.20.51
http.port: 9200
bootstrap.memory_lock: true
# 允许跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-methods: "OPTIONS, HEAD, GET, POST, PUT, DELETE"
http.cors.allow-headers: "Authorization, X-Requested-With, Content-Type, Content-Length, X-User"
# Cluster
node.master: true
node.data: true
transport.tcp.port: 9300
discovery.seed_hosts: ["172.16.20.51","172.16.20.52","172.16.20.53"]
cluster.initial_master_nodes: ["ccms-es1","ccms-es2","ccms-es3"]
cluster.routing.allocation.same_shard.host: true
cluster.routing.allocation.node_initial_primaries_recoveries: 4
cluster.routing.allocation.node_concurrent_recoveries: 4
# X-Pack
xpack.security.enabled: true
xpack.license.self_generated.type: basic
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
EOF
chown 1000 /data/elasticsearch/config/*
# 容器启动后先生成证书, 分发到各个节点的config目录下, 再重启es容器
discovery.zen.minimum_master_nodes算法: 节点数/2+1
# 设置ES密码:
# 自动设置密码命令
elasticsearch-setup-passwords auto
# 或者
# 自定义密码命令
elasticsearch-setup-passwords interactive
# es-head登录
http://172.16.20.52:9200/?auth_user=elastic&auth_password=elastic123456
# 生成证书(证书不需要设置密码):
cd /usr/share/elasticsearch/config/
elasticsearch-certutil ca -out config/elastic-certificates.p12 -pass ""
docker-compose编排
mkdir -pv /data/docker-compose/elasticsearch/
cat > /data/docker-compose/elasticsearch/docker-compose.yml << EOF
version: "3"
services:
es:
container_name: es
image: elasticsearch:7.11.1
network_mode: host
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /data/elasticsearch/config:/usr/share/elasticsearch/config
- /data/elasticsearch/data:/usr/share/elasticsearch/data
- /data/elasticsearch/logs:/usr/share/elasticsearch/logs
environment:
TZ: Asia/Shanghai
bootstrap.memory_lock: true
ES_JAVA_OPTS: "-Xmx8G -Xms8G"
ELASTIC_PASSWORD: "G1T@es2022#ccms"
ulimits:
memlock:
soft: -1
hard: -1
deploy:
resources:
limits:
memory: 10G
EOF
# 1. 解决es-head跨域问题(浏览器报: Request header field Content-Type is not allowed by Access-Control-Allow-Headers)
# es配置文件加入:
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-methods: "OPTIONS, HEAD, GET, POST, PUT, DELETE"
http.cors.allow-headers: "X-Requested-With, Content-Type, Content-Length, X-User"
# 2. 解决es-head数据浏览空白(浏览器报: 406 Not Acceptable)
# 修改es-head代码文件vendor.js
# 第6886行左右
contentType: "application/x-www-form-urlencoded" --> contentType: "application/json;charset=UTF-8"
启动
docker-compose up -d
日志采集-Filebeat
创建数据目录
mkdir -pv /data/filebeat/{config,data}
配置文件
发送到kafka
cat > /data/filebeat/config/filebeat.yml << 'EOF'
###################### Filebeat Configuration Example #########################
filebeat.name: ccms-test-08
filebeat.idle_timeout: 5s
filebeat.spool_zie: 2048
#----------------------------------input form ccms servers--------------------------------#
filebeat.inputs:
- type: log
enabled: true
paths:
- /opt/ccms-auto-deploy/credit-business/*/*/target/logs/*.log
- /opt/ccms-auto-deploy/credit-support/*/*/target/logs/*.log
fields:
kafka_topic: topic-ccms-dev
fields_under_root: true
# filebeat 多行日志的处理
multiline.pattern: '^\['
multiline.negate: true
multiline.match: after
encoding: plain
tail_files: false
# 检测指定目录下文件更新时间
scan_frequency: 3s
# 每隔1s检测一下文件变化,如果连续检测2次之后文件还没有变化,下一次检测间隔时间变为5s
backoff: 1s
max_backoff: 5s
backoff_factor: 2
#----------------------------------input form nginx access_log--------------------------------#
- type: log
enabled: true
paths:
- /data/nginx/logs/ccms-access.log
fields:
kafka_topic: topic-nginx-access
fields_under_root: true
encoding: plain
tail_files: false
json.keys_under_root: true
json.overwrite_keys: true
json.add_error_key: false
# 检测指定目录下文件更新时间
scan_frequency: 3s
# 每隔1s检测一下文件变化,如果连续检测2次之后文件还没有变化,下一次检测间隔时间变为5s
backoff: 1s
max_backoff: 5s
backoff_factor: 2
#----------------------------------Kafka output--------------------------------#
output.kafka:
enabled: true
hosts: ['3.1.101.33:9092','3.1.101.34:9092','3.1.101.35:9092']
topic: '%{[kafka_topic]}'
EOF
docker-compose编排
mkdir -pv /data/docker-compose/filebeat
cat > /data/docker-compose/filebeat/docker-compose.yml << EOF
version: "3"
services:
filebeat:
container_name: filebeat
image: elastic/filebeat:7.11.1
user: root
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /data/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml
- /data/filebeat/data:/usr/share/filebeat/data/registry
- /opt/ccms-auto-deploy:/opt/ccms-auto-deploy
- /data/nginx/logs:/data/nginx/logs/
deploy:
resources:
limits:
memory: 4G
reservations:
memory: 1G
EOF
启动
docker-compose up -d
安装kibana仪表盘
docker-compose exec filebeat filebeat setup --dashboards
过滤服务-Logstash
创建数据目录
mkdir -pv /data/logstash/{config,data,pipeline,logs}
chown 1000.1000 /data/logstash/{config,data,pipeline,logs}
配置文件
logstash.yml
cat > /data/logstash/config/logstash.yml << 'EOF'
node.name: logstast-node1
http.host: "0.0.0.0"
path.data: data
path.logs: /usr/share/logstash/logs
config.reload.automatic: true
config.reload.interval: 5s
config.test_and_exit: false
EOF
如果使用pipeline管道,不要配置path.config
pipelines.yml
cat > /data/logstash/config/pipelines.yml << 'EOF'
- pipeline.id: ccms-credit-java
path.config: "/usr/share/logstash/pipeline/ccms-credit-java.conf"
- pipeline.id: ccms-credit-nginx-access
path.config: "/usr/share/logstash/pipeline/ccms-credit-nginx-access.conf"
- pipeline.id: ccms-credit-nginx-error
path.config: "/usr/share/logstash/pipeline/ccms-credit-nginx-error.conf"
EOF
pipeline配置文件
pipeline/ccms-credit-java.conf
cat > /data/logstash/pipeline/ccms-credit-java.conf<< 'EOF'
input {
kafka {
topics_pattern => "topic-ccms-credit-sit-java"
bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
consumer_threads => 4
decorate_events => true
group_id => "kafka-ccms-credit-sit-java"
add_field => {"logstash-server" => "172.16.20.51"}
}
}
filter {
json {
source => "message"
}
grok {
match => { "message" => "\[%{TIMESTAMP_ISO8601:currentDateTime}\] \[%{LOGLEVEL:level}\] \[%{DATA:traceInfo}\] \[%{NOTSPACE:class}\] \[%{DATA:hostName}\] \[%{IP:hostIp}\] \[%{DATA:applicationName}\] \[%{DATA:location}\] \[%{DATA:messageInfo}\] ## %{QUOTEDSTRING:throwable}" }
}
mutate{
enable_metric => "false"
remove_field => ["ecs","tags","input","agent","@version","log","port","host","message"]
}
date {
match => [ "currentDateTime", "ISO8601" ]
}
}
output {
elasticsearch {
hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
user => "elastic"
password => "G1T@es2022#ccms"
index => "index-ccms-credit-sit-java_%{+YYY-MM-dd}"
sniffing => true
template_overwrite => true
}
}
EOF
pipeline/ccms-credit-nginx-access.conf
cat > /data/logstash/pipeline.d/ccms-nginx-access.conf<< 'EOF'
input {
kafka {
topics_pattern => "topic-ccms-credit-sit-nginx-access"
bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
codec => "json"
consumer_threads => 4
decorate_events => true
group_id => "kafka-ccms-credit-sit-nginx-access"
add_field => {"logstash-server" => "172.16.20.51"}
}
}
filter {
geoip {
source => "client_ip"
target => "geoip"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
remove_field => [ "[geoip][latitude]", "[geoip][longitude]", "[geoip][country_code2]","[geoip][country_code3]", "[geoip][timezone]", "[geoip][continent_code]", "[geoip][dma_code]", "[geoip][region_code]" ]
}
mutate {
convert => [ "size", "integer" ]
convert => [ "status", "integer" ]
convert => [ "responsetime", "float" ]
convert => [ "upstreamtime", "float" ]
convert => [ "[geoip][coordinates]", "float" ]
# 过滤 filebeat 没用的字段,这里过滤的字段要考虑好输出到es的,否则过滤了就没法做判断
remove_field => [ "ecs","agent","host","cloud","@version","input","logs_type" ]
}
useragent {
source => "http_user_agent"
target => "ua"
# 过滤useragent没用的字段
remove_field => [ "[ua][minor]","[ua][major]","[ua][build]","[ua][patch]","[ua][os_minor]","[ua][os_major]" ]
}
}
output {
elasticsearch {
hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
user => "elastic"
password => "G1T@es2022#ccms"
index => "logstash-ccms-credit-sit-nginx-access_%{+YYY-MM-dd}"
sniffing => true
template_overwrite => true
}
}
EOF
pipeline/ccms-credit-nginx-error.conf
cat > /data/logstash/pipeline.d/ccms-nginx-error.conf<< 'EOF'
input {
kafka {
topics_pattern => "topic-ccms-credit-sit-nginx-error"
bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
consumer_threads => 4
decorate_events => true
group_id => "kafka-ccms-credit-sit-nginx-error"
add_field => {"logstash-server" => "172.16.20.51"}
enable_metric => true
}
}
filter {
json {
source => "message"
}
grok {
match => [
"message", "%{DATESTAMP:currentDateTime}\s{1,}\[%{LOGLEVEL:level}\]\s{1,}(%{NUMBER:pid:int}#%{NUMBER}:\s{1,}\*%{NUMBER})\s{1,}(%{GREEDYDATA:messageInfo})(?:,\s{1,}client:\s{1,}(?<client>%{IP}|%{HOSTNAME}))(?:,\s{1,}server:\s{1,}%{IPORHOST:server})(?:, request: %{QS:request})?(?:, upstream: \"%{URI:endpoint}\")?(?:, host: \"%{HOSTPORT:host}\")?(?:, referrer: \"%{URI:referrer}\")?",
"message", "%{DATESTAMP:currentDateTime}\s{1,}\[%{DATA:level}\]\s{1,}%{GREEDYDATA:messageInfo}"]
}
date{
match => ["currentDateTime", "yy/MM/dd HH:mm:ss", "ISO8601"]
timezone => "+08:00"
target => "@timestamp"
}
mutate{
enable_metric => "false"
remove_field => [ "ecs","tags","input","agent","@version","log","port","host","message" ]
}
}
output {
elasticsearch {
hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
user => "elastic"
password => "G1T@es2022#ccms"
index => "logstash-ccms-credit-sit-nginx-error_%{+YYY-MM-dd}"
sniffing => true
template_overwrite => true
}
}
EOF
docker-compose编排
mkdir -pv /data/docker-compose/logstash
cat > /data/docker-compose/logstash/docker-compose.yml << EOF
version: "3"
services:
logstash:
container_name: logstash
image: 172.16.20.50:8005/public/logstash:7.11.1
user: root
network_mode: host
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /data/logstash/config:/usr/share/logstash/config
- /data/logstash/data:/usr/share/logstash/data
- /data/logstash/pipeline:/usr/share/logstash/pipeline
environment:
TZ: Asia/Shanghai
LS_JAVA_OPTS: "-Xmx8G -Xms8G"
deploy:
resources:
limits:
memory: 10G
EOF
启动
docker-compose up -d
展示服务-Kibana
创建数据目录
mkdir -pv /data/kibana/{config,logs}
chown 1000 /data/kibana/{config,logs}
配置文件
cat > /data/kibana/config/kibana.yml << 'EOF'
# Default Kibana configuration for docker target
server.name: ccms-kibana
server.port: 5601
server.host: "0"
elasticsearch.hosts: [ "http://172.16.20.51:9200","http://172.16.20.52:9200","http://172.16.20.53:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
map.tilemap.url: 'http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=7&x={x}&y={y}&z={z}'
xpack.security.enabled: true
xpack.security.encryptionKey: "fhjskloppd678ehkdfdlliverpoolfcr"
elasticsearch.username: "elastic"
elasticsearch.password: "G1T@es2022#ccms"
EOF
docker-compose编排
mkdir -pv /data/docker-compose/kibana/
cat > /data/docker-compose/kibana/docker-compose.yml << EOF
version: "3"
services:
kibana:
container_name: kibana
image: kibana:7.11.1
restart: always
ports:
- "5601:5601"
volumes:
- /data/kibana/config/kibana.yml:/opt/kibana/config/kibana.yml
EOF
启动
docker-compose up -d
本站资源均来自互联网,仅供研究学习,禁止违法使用和商用,产生法律纠纷本站概不负责!如果侵犯了您的权益请与我们联系!
转载请注明出处: 免费源码网-免费的源码资源网站 » 简单的 docker 部署ELK
发表评论 取消回复