filebeat.yml
filebeat.inputs:
- type: log #默认log,从日志文件读取每一行。stdin,从标准输入读取
paths:
- /root/logs/*/*.log
multiline.pattern: '^\d{4}\-\d{2}\-\d{2}' #匹配的正则
multiline.negate: true #多行匹配模式后配置的模式是否取反,默认false
multiline.match: after #定义多行内容被添加到模式匹配行之后还是之前,默认无,可以被设置为after或者before
fields:
index: 'server_log'
setup.ilm.enabled: false
setup.template.name: "java_logback_service_index_template"
setup.template.pattern: "java_logback_service_index_template-*"
setup.template.overwrite: true
setup.template.settings:
index.number_of_shards: 1
output.elasticsearch:
hosts: ["110.238.107.151:9001"]
indices:
- index: "server_log-%{+yyyy.MM.dd}"
when.contains:
fields:
index: "server_log"
pipeline: "test_java_log_pipeline"
document_type: log #该type会被添加到type字段,对于输出到ES来说,这个输入时的type字段会被存储,默认log
max_retries: 3 #ES重试次数,默认3次,超过3次后,当前事件将被丢弃
processors:
- drop_fields:
fields: ["log","host","input","agent","ecs","start_time"]
docker run
docker run -itd \
--privileged=true \
--user=root \
--name=filebeat \
--restart always \
--network=my_network \
-v /root/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:rw \
-v /root/filebeat/data/:/usr/share/filebeat/data/:rw \
-v /root/logs/:/root/logs/:rw \
-v /root/xiaoye_worker/logs/:/root/xiaoye_worker/logs/:rw \
docker.elastic.co/beats/filebeat:6.4.2
Elasticsearch 模版和pipeline 设置
GET _template/java_logback_service_index_template
DELETE _template/java_logback_service_index_template
# 创建模版索引
PUT _template/java_logback_service_index_template
{
"order": 1,
"index_patterns": [
"java_log-*"
],
"settings": {
"number_of_shards": 1,
"number_of_replicas": 1
},
"mappings": {
"type_name":{
"properties": {
"app_name": {
"type": "keyword"
},
"trance_id": {
"type": "keyword"
},
"log_level": {
"type": "keyword"
},
"thread": {
"type": "keyword"
},
"class_line": {
"type": "keyword"
},
"message": {
"type": "text",
"analyzer": "ik_max_word",
"search_analyzer": "ik_smart",
"norms": false
},
"timestamp": {
"type": "date"
}
}
}
},
"aliases": {}
}
GET _ingest/pipeline/test_java_log_pipeline
DELETE /_ingest/pipeline/test_java_log_pipeline
# 设置自定义处理
PUT /_ingest/pipeline/test_java_log_pipeline
{
"description": "test_java_log_pipeline",
"processors": [
{
"grok": {
"field": "message",
"patterns": [
"""%{TIMESTAMP_ISO8601:timestamp} %{DATA:app_name} %{LOGLEVEL:log_level} %{DATA:thread} \[%{DATA:trance_id}\] %{DATA:class_line} %{GREEDYDATA:message}
"""
],
"pattern_definitions": {
"ALL_CODE": "(\n)*"
}
},
"remove": {
"field": "@timestamp"
}
},
{
"date": {
"field": "timestamp",
"formats": [
"yyyy-MM-dd HH:mm:ss.SSS"
],
"timezone": "Asia/Shanghai",
"target_field": "timestamp"
},
"remove": {
"field": "@timestamp"
}
}
]
}
测试 gork
# 测试 gork
POST _ingest/pipeline/_simulate
{
"pipeline": {
"description": "timestamp pipeline",
"processors": [
{
"grok": {
"field": "message",
"patterns": [
"""%{TIMESTAMP_ISO8601:timestamp} %{DATA:app_name} %{LOGLEVEL:log_level} %{DATA:thread} \[%{DATA:trance_id}\] %{DATA:class_line} %{GREEDYDATA:message}"""
]
}
},
{
"date": {
"field": "timestamp",
"formats": [
"yyyy-MM-dd HH:mm:ss.SSS"
],
"timezone": "Asia/Shanghai",
"target_field": "create_time"
},
"remove": {
"field": "timestamp"
}
}
]
},
"docs": [
{
"_index": "syne_sys_log",
"_id": "id",
"_source": {
"message": "2024-10-02 21:11:20.083 xiaoye-scheduler INFO scheduling-1 [] com.xiaoye.orion.scheduler.service.SchedulerService:83 now: 2024-10-02T21:11:20.083, size: 0, id:1727874680011, startTime:1727874680011"
}
}
]
}
Entity
@Data
@SuperBuilder
@NoArgsConstructor
@AllArgsConstructor
@JsonInclude(JsonInclude.Include.NON_EMPTY)
@JsonIgnoreProperties(ignoreUnknown = true)
@Document(indexName = "server_log-*")
public class ServerLogEs {
@Field(name = "log_level", type = FieldType.Keyword)
@JsonProperty("log_level")
private String logLevel;
@Field(name = "thread", type = FieldType.Keyword)
private String thread;
@Field(name = "app_name", type = FieldType.Keyword)
@JsonProperty("app_name")
private String appName;
@Field(name = "trance_id", type = FieldType.Keyword)
@JsonProperty("trance_id")
private String tranceId;
@Field(name = "class_line", type = FieldType.Keyword)
@JsonProperty("class_line")
private String classLine;
@Field(type = FieldType.Text, analyzer = "ik_smart", searchAnalyzer = "ik_smart")
private String message;
@Field(name = "@create_time")
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
@DateTimeFormat(pattern = "yyyy-MM-dd HH:mm:ss")
private Date createTime;
}
logback.xml
<?xml version="1.0" encoding="UTF-8" ?>
<configuration>
<springProperty scope="context" name="logPath" source="log.path" defaultValue="/root/logs/${APP_NAME}/"/>
<!--获取服务名称-->
<springProperty scope="context" name="APP_NAME" source="spring.application.name" defaultValue="xiaoye-admin"/>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<jmxConfigurator/>
<appender name="consoleLog" class="ch.qos.logback.core.ConsoleAppender">
<layout class="ch.qos.logback.classic.PatternLayout">
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} ${APP_NAME} %-5level %thread [%X{traceId}] %class:%line %msg%n
</pattern>
</layout>
</appender>
<!-- 根据需要,是否输出到文件-->
<appender name="fileInfoLog" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>DENY</onMatch>
<onMismatch>ACCEPT</onMismatch>
</filter>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} ${APP_NAME} %-5level %thread [%X{traceId}] %class:%line %msg%n
</pattern>
<immediateFlush>false</immediateFlush>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${logPath}/info.%d{yyyy-MM-dd}.log</fileNamePattern>
<maxHistory>7</maxHistory>
</rollingPolicy>
</appender>
<appender name="fileErrorLog" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} ${APP_NAME} %-5level %thread [%X{traceId}] %class:%line %msg%n
</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${logPath}/error.%d{yyyy-MM-dd}.log</fileNamePattern>
<maxHistory>7</maxHistory>
</rollingPolicy>
</appender>
<logger name="org.springframework" level="ERROR"/>
<logger name="org.xnio" level="ERROR"/>
<logger name="io.undertow" level="ERROR"/>
<logger name="com.netflix.discovery" level="ERROR"/>
<!-- <springProfile name="dev,test">-->
<!-- <root level="info">-->
<!-- <appender-ref ref="consoleLog"/>-->
<!-- <appender-ref ref="fileInfoLog"/>-->
<!-- <appender-ref ref="fileErrorLog"/>-->
<!-- </root>-->
<!-- </springProfile>-->
<!-- 异步日志输出,对于量大的日志,能够明显提升性能,但有延迟-->
<!-- <appender name="asyncLog" class="ch.qos.logback.classic.AsyncAppender">-->
<!-- <discardingThreshold>0</discardingThreshold>-->
<!-- <queueSize>100</queueSize>-->
<!-- <appender-ref ref="fileInfoLog"/>-->
<!-- </appender>-->
<!-- <springProfile name="prod">-->
<!-- <root level="info">-->
<!-- <appender-ref ref="asyncLog"/>-->
<!-- <appender-ref ref="fileErrorLog"/>-->
<!-- </root>-->
<!-- </springProfile>-->
<root level="info">
<appender-ref ref="consoleLog"/>
<appender-ref ref="fileInfoLog"/>
<appender-ref ref="fileErrorLog"/>
</root>
</configuration>
本站资源均来自互联网,仅供研究学习,禁止违法使用和商用,产生法律纠纷本站概不负责!如果侵犯了您的权益请与我们联系!
转载请注明出处: 免费源码网-免费的源码资源网站 » docker 部署 filebeat 采集日志导入到elasticsearch 设置pipeline
发表评论 取消回复