Nginx 日志配置
注意 版本兼容问题
log_format test1 escape=json '{"@timestamp":"$time_iso8601",'
'"server_addr":"$server_addr",'
'"remote_addr":"$remote_addr",'
'"scheme":"$scheme",'
'"request_method":"$request_method",'
'"request_uri": "$request_uri",'
'"request_length": "$request_length",'
'"uri": "$uri", '
'"http_x_forwarded_for": "$http_x_forwarded_for", '
'"request_time":$request_time,'
'"body_bytes_sent":$body_bytes_sent,'
'"bytes_sent":$bytes_sent,'
'"status":"$status",'
'"upstream_time":"$upstream_response_time",'
'"upstream_host":"$upstream_addr",'
'"upstream_status":"$upstream_status",'
'"host":"$host",'
'"server_name": "$http_host", '
'"http_referer":"$http_referer",'
'"http_user_agent":"$http_user_agent",'
'"request_body":"$request_body",'
'"Upgrade":"$http_upgrade",'
'"Connection":"$connection_upgrade",'
'"args":"$args"'
'}';
Filebeat 配置文件
output.logstash:
hosts: ["localhost:5055"]
worker: 2
loadbalance: true
#================================ Processors =====================================
##
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
- drop_fields:
fields: ["ecs","agent","@metadata"]
max_procs: 1
queue.mem:
events: 4096
flush.min_events: 512
flush.timeout: 5s
#===============================log config==================================
# debug, info, warning, or error. The default log level is info.
logging.level: debug
logging.to_files: true
logging.files:
path: /var/log/filebeat
name: filebeat
keepfiles: 7
permissions: 0644
#====================all logs ============================================
filebeat.inputs:
- type: log
enabled: true
paths:
- /www/wwwlogs/*.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["front_nginx"]
max_bytes: 10240
tail_files: true
scan_frequency: 20
close_inactive: 2m
close_rename: true
close_removed: true
clean_removed: true
backoff: 1
max_backoff: 10
harvester_limit: 30
Logstash 配置文件
# Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.
input {
beats {
port => 5055
}
}
filter {
grok {
match => {
"message" => ["%{TIMESTAMP_ISO8601:logtime}\s+\[TID\s*\:(?<TID>%{USERNAME}|\s*N/A)\]\s+\[(?<thread>.*)\]\s+%{LOGLEVEL:level}\s+%{GREEDYDATA:message}"]
}
overwrite => ["message"]
}
date {
match => ["logtime", "yyyy-MM-dd HH:mm:ss,SSS","yyyy-MM-dd HH:mm:ss","dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
# ruby {
# code => "event.set('timestamp', event.get('@timestamp').time.localtime + 8*60*60)"
# }
#
# ruby {
# code => "event.set('@timestamp',event.get('timestamp'))"
# }
#
mutate {
# remove_field => ["timestamp"]
# remove_field => ["logtime"]
remove_field => ["event"]
}
ruby {
code => "event.set('index_date', event.get('@timestamp').time.localtime + 8*60*60)"
}
mutate {
convert => ["index_date", "string"]
gsub => ["index_date", "(T[\S\s]*?)Z", ""]
gsub => ["index_date", "-", "."]
}
#if "user_name" in ["message"] or "userName" in ["message"] or "merchant_admin" in ["message"] {
# drop { }
# }
}
output {
elasticsearch {
hosts => ["http://localhost:9200"]
index => "arms-nginx"
#index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
#user => "elastic"
#password => ""
}
file{
path => "/var/log/logstash/log-%{+YYYY.MM.dd}.log"
}
}
Elasticsearch 配置文件
cluster.name: graylog
node.name: graylog
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
action.auto_create_index: false #是否允许自动创建索引
Kibanaa 配置文件
server.port: 5601
server.host: "0.0.0.0"
i18n.locale: "zh-CN"
评论(0)