ELK 安装配置

下载安装包

下载地址

当前版本使用的全部是 5.6.0

配置

elasticsearch 配置

1
2
3
4
5
6
$ tar xf elasticsearch-5.6.0.tar.gz
$ cd elasticsearch-5.6.0
$ vim config/jvm.options
# 根据自己的服务器的内存定义此大小
-Xms32g
-Xmx32g

注意: 由于运行elasticsearch 需要普通用户,所以这里要创建个普通用户

1
2
3
4
$ useradd elk
$ su elk
# 启动服务
$ bin/elasticseach

测试

1
2
3
4
5
6
7
8
9
10
11
12
13
$ curl http://127.0.0.1:9200 -u admin:xxxxxx
"name" : "sUOfDTU",
"cluster_name" : "elasticsearch",
"cluster_uuid" : "48mAG5SaTAuI-ajfh5cPxw",
"version" : {
"number" : "5.6.0",
"build_hash" : "781a835",
"build_date" : "2017-09-07T03:09:58.087Z",
"build_snapshot" : false,
"lucene_version" : "6.6.0"
},
"tagline" : "You Know, for Search"
}

logstash 配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
$ tar xf logstash-5.6.0.tar.gz
$ cd logstash-5.6.0
$ vim config/logstash.yml
# 启用转义符, 由于日志的字段不同,所以我需要先用"\t"分割,然后用" "分割,具体操作,下面有详细配置
config.support_escapes: true
$ vim conf/logstash.conf
input {
file {
#path => "/var/log/xx/useract_server.log*"
# 文件源
path => "/10.29.212.58/00-00-ac-1d-d4-3a/tmp/AppServer06-XX2/log/useract_server.log*"
}
}
filter {
grok {
# 匹配文件名
match => ["path", "(?<filename>useract_server.log.\d{6}-\d{2}$)"]
}
# 分割后添加字段
mutate {
split => ["message", "\t"]
add_field => {
"tmp" => "%{[message][0]}"
}
add_field => {
"accid" => "%{[message][1]}"
}
add_field => {
"user_control_obj_type" => "%{[message][2]}"
}
add_field => {
"charid" => "%{[message][3]}"
}
add_field => {
"char_name" => "%{[message][4]}"
}
add_field => {
"number_ip" => "%{[message][5]}"
}
add_field => {
"level" => "%{[message][6]}"
}
add_field => {
"zone_id" => "%{[message][7]}"
}
add_field => {
"one_level_camp" => "%{[message][8]}"
}
add_field => {
"map_name" => "%{[message][9]}"
}
add_field => {
"map_detail" => "%{[message][10]}"
}
add_field => {
"x_coord" => "%{[message][11]}"
}
add_field => {
"y_coord" => "%{[message][12]}"
}
add_field => {
"z_coord" => "%{[message][13]}"
}
add_field => {
"operation_type" => "%{[message][14]}"
}
add_field => {
"operation_way" => "%{[message][15]}"
}
add_field => {
"operation_detail" => "%{[message][16]}"
}
add_field => {
"cluster_server_id" => "%{[message][17]}"
}
add_field => {
"result_object_type" => "%{[message][18]}"
}
add_field => {
"result_object_id" => "%{[message][-5]}"
}
add_field => {
"result_object_name" => "%{[message][-4]}"
}
add_field => {
"result_object_detail" => "%{[message][-3]}"
}
add_field => {
"result_type" => "%{[message][-2]}"
}
add_field => {
"result_count" => "%{[message][-1]}"
}
}
mutate {
split => ["tmp", " "]
add_field => {
"logdate" => "%{[tmp][0]}"
}
add_field => {
"game_id" => "%{[tmp][2]}"
}
}
# 转换字段类型
mutate {
convert => {
"accid" => "integer"
"user_control_obj_type" => "integer"
"charid" => "integer"
"number_ip" => "integer"
"level" => "integer"
"zone_id" => "integer"
"one_level_camp" => "integer"
"x_coord" => "integer"
"y_coord" => "integer"
"z_coord" => "integer"
"cluster_server_id" => "integer"
"result_object_type" => "integer"
"result_object_id" => "integer"
"result_type" => "integer"
"result_count" => "integer"
}
}
# 日期格式
date {
match => ["logdate", "yyMMdd-HH:mm:ss"]
target => "logdate"
}
}
# 输出到es, 由于安装了x-pack插件,所以配置了用户和密码
output {
elasticsearch {
hosts => ["127.0.0.1:9200"]
user => "admin"
password => "xxxxxx"
# 自定义索引名
index => "xx2-%{filename}"
}
#stdout {
# codec => rubydebug
#}
}
# 启动服务
$ bin/logstash -f config/logstash.conf

kibana配置

1
2
3
4
5
6
7
8
$ tar xf kibana-5.6.0-linux-x86_64.tar.gz
$ cd cd kibana-5.6.0-linux-x86_64
$ vim config/kibana.yml
server.host: 0.0.0.0
elasticsearch.url: "http://127.0.0.1:9200"
# 启动服务
$ bin/kibana

安装x-pack插件

由于安装太慢,所以这里翻墙后下载后,上传到服务器里

1
$ wget https://artifacts.elastic.co/downloads/kibana-plugins/x-pack/x-pack-5.6.0.zip

注意:集群中的每台 Elasticsearch 都是需要安装的, Kibana服务器上也同样需要安装

1
2
3
$ bin//elasticsearch-plugin install /usr/local/src/x-pack-5.6.0.zip
$ bin/kibana-plugin install /usr/local/src/x-pack-5.6.0.zip

页面设置

file

登录账号默认用户elastic, 默认密码changeme,登录后建议修改密码。

file

创建Index

file

发现数据

file

supervisord配置

使用supervisord管理服务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
$ yum install -y supervisord
# 配置
$ vim /etc/supervisord.conf
[program:logstash]
command = /usr/local/src/logstash-5.6.0/bin/logstash -f /usr/local/src/logstash-5.6.0/config/logstash.conf -w 40 -b 2000
autostart = true
autorestart = true
startsecs = 5
startretries = 3
user = root
redirect_stderr = true
stdout_logfile = /var/log/elk/logstash-std.log
stderr_logfile = /var/log/elk/logstash-err.log
[program:elasticsearch]
command=/usr/local/src/elasticsearch-5.6.0/bin/elasticsearch
autostart = true
autorestart = true
startsecs = 5
startretries = 3
user = elk
redirect_stderr = true
stdout_logfile = /var/log/elk/elasticsearch-std.log
stderr_logfile = /var/log/elk/elasticsearch-err.log
[program:kibana]
command=/usr/local/src/kibana-5.6.0-linux-x86_64/bin/kibana
autostart = true
autorestart = true
startsecs = 5
startretries = 3
user = root
redirect_stderr = true
stdout_logfile = /var/log/elk/kibana-std.log
stderr_logfile = /var/log/elk/kibana-err.log

启动服务

1
2
3
4
5
$ /etc/init.d/supervisord start
$ supervisorctl status
elasticsearch RUNNING pid 87134, uptime 4:31:22
kibana RUNNING pid 87136, uptime 4:31:22
logstash RUNNING pid 87135, uptime 4:31:22
坚持原创技术分享,您的支持将鼓励我继续创作!