ELK5.0版本源码安装过程,水平有限,凑合着看!!最后附上安装包
一、配置Java环境变量
- # mkdir /usr/local/java/ –p
- # cd /usr/local/java/
- # tar zxvf /data/elk5.0/jdk-8u111-linux-x64.tar.gz
-
- # cat >>/etc/profile<<EOF
-
- export JAVA_HOME=/usr/local/java/jdk1.8.0_111
- export PATH=$PATH:$JAVA_HOME/bin
- exportCLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
- EOF
-
- # source /etc/profile
- # java -version
- java version "1.8.0_111"
- Java(TM) SE Runtime Environment (build 1.8.0_111-b14)
- Java HotSpot(TM) 64-Bit Server VM (build 25.111-b14, mixedmode)
二、安装elasticsearch
- # mkdir /data/PRG/-p
- # cd /data/PRG/
- # tar zxvf /data/elk5.0/elasticsearch-5.0.0.tar.gz
- # mv elasticsearch-5.0.0 elasticsearch
- # useradd elasticsearch -s /sbin/nologin
- # chown elasticsearch. elasticsearch /data/PRG/elasticsearch/
添加启动脚本
vi /etc/init.d/elasticsearch
- # chmod +x /etc/init.d/elasticsearch
-
- # /etc/init.d/elasticsearch start
-
- # /etc/init.d/elasticsearch status
- elasticsearch (pid 20895) is running...
- # netstat -ntlp |grep 9[2-3]00
- tcp 0 0 :::9300 :::* LISTEN 20895/java
- tcp 0 0 :::9200 :::* LISTEN 20895/java
三、配置elasticsearch
内存低于2G,需要修改jvm配置
- # vim /data/PRG/elasticsearch/config/jvm.options
- -Xms512m
- -Xmx512m
- # cat /data/PRG/elasticsearch/config/elasticsearch.yml|grep -v '#'
- network.host: 0.0.0.0 ###开启监听地址,
- action.auto_create_index:.security,.monitoring*,.watches,.triggered_watches,.watcher-history*
- ####以下模块视情况是否开启
- xpack.security.enabled: true ####开启用户认证
- xpack.monitoring.enabled: true
- xpack.graph.enabled: true
- xpack.watcher.enabled: true
- xpack.security.authc.realms: ####用户认证模式,ldap、file、pki、ActiveDirectory等
- file1:
- type: file
- order: 0
四、安装logstash
- # cd /data/PRG/
- # tar zxvf /data/elk5.0/logstash-5.0.0.tar.gz
- # mv logstash-5.0.0 logstash
- # useradd logstash -s /sbin/nologin
- # chown logstash. logstash /data/PRG/logstash
添加启动脚本
vim /etc/init.d/logstash
- #!/bin/sh
- # Init script for logstash
- # Maintained by Elasticsearch
- # Generated by pleaserun.
- # Implemented based on LSB Core 3.1:
- # * Sections: 20.2, 20.3
- #
- ### BEGIN INIT INFO
- # Provides: logstash
- # Required-Start: $remote_fs $syslog
- # Required-Stop: $remote_fs $syslog
- # Default-Start: 2 3 4 5
- # Default-Stop: 0 1 6
- # Short-Description:
- # Description: Starts Logstash as a daemon.
- ### END INIT INFO
-
- PATH=/sbin:/usr/sbin:/bin:/usr/bin:/data/PRG/logstash/bin
- export PATH
-
- if [ `id -u` -ne 0 ]; then
- echo "You need root privileges to run this script"
- exit 1
- fi
-
- name=logstash
-
- LS_USER=logstash
- LS_GROUP=logstash
- LS_HOME=/data/PRG/logstash
- LS_HEAP_SIZE="1g"
- LS_LOG_DIR=/data/PRG/logstash/logs
- LS_LOG_FILE="${LS_LOG_DIR}/$name.log"
- pidfile="${LS_LOG_DIR}/$name.pid"
- LS_CONF_DIR=/data/PRG/logstash/conf.d
- LS_OPEN_FILES=16384
- LS_NICE=19
- KILL_ON_STOP_TIMEOUT=${KILL_ON_STOP_TIMEOUT-0} #default value is zero to this variable but could be updated by user request
- LS_OPTS=""
-
-
- [ -r /etc/default/$name ] && . /etc/default/$name
- [ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
-
- program=$LS_HOME/bin/logstash
- args=" -f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}"
-
- quiet() {
- "$@" > /dev/null 2>&1
- return $?
- }
-
- start() {
-
- LS_JAVA_OPTS="${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}"
- HOME=${LS_HOME}
- export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING LS_GC_LOG_FILE
-
- # chown doesn't grab the suplimental groups when setting the user:group - so we have to do it for it.
- # Boy, I hope we're root here.
- SGROUPS=$(id -Gn "$LS_USER" | tr " " "," | sed 's/,$//'; echo '')
-
- if [ ! -z $SGROUPS ]
- then
- EXTRA_GROUPS="--groups $SGROUPS"
- fi
-
- # set ulimit as (root, presumably) first, before we drop privileges
- ulimit -n ${LS_OPEN_FILES}
-
- # Run the program!
- nice -n ${LS_NICE} chroot --userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c "
- cd $LS_HOME
- ulimit -n ${LS_OPEN_FILES}
- $program $args > ${LS_LOG_DIR}/$name.stdout" 2> "${LS_LOG_DIR}/$name.err" &
-
- # Generate the pidfile from here. If we instead made the forked process
- # generate it there will be a race condition between the pidfile writing
- # and a process possibly asking for status.
- echo $! > $pidfile
-
- echo "$name started."
- return 0
- }
-
- stop() {
- # Try a few times to kill TERM the program
- if status ; then
- pid=`cat "$pidfile"`
- echo "Killing $name (pid $pid) with SIGTERM"
- ps -ef |grep $pid |grep -v 'grep' |awk '{print $2}' | xargs kill -9
- # Wait for it to exit.
- for i in 1 2 3 4 5 6 7 8 9 ; do
- echo "Waiting $name (pid $pid) to die..."
- status || break
- sleep 1
- done
- if status ; then
- if [ $KILL_ON_STOP_TIMEOUT -eq 1 ] ; then
- echo "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."
- kill -KILL $pid
- echo "$name killed with SIGKILL."
- else
- echo "$name stop failed; still running."
- return 1 # stop timed out and not forced
- fi
- else
- echo "$name stopped."
- fi
- fi
- }
-
- status() {
- if [ -f "$pidfile" ] ; then
- pid=`cat "$pidfile"`
- if kill -0 $pid > /dev/null 2> /dev/null ; then
- # process by this pid is running.
- # It may not be our pid, but that's what you get with just pidfiles.
- # TODO(sissel): Check if this process seems to be the same as the one we
- # expect. It'd be nice to use flock here, but flock uses fork, not exec,
- # so it makes it quite awkward to use in this case.
- return 0
- else
- return 2 # program is dead but pid file exists
- fi
- else
- return 3 # program is not running
- fi
- }
-
- configtest() {
- # Check if a config file exists
- if [ ! "$(ls -A ${LS_CONF_DIR}/* 2> /dev/null)" ]; then
- echo "There aren't any configuration files in ${LS_CONF_DIR}"
- return 1
- fi
-
- HOME=${LS_HOME}
- export PATH HOME
-
- test_args="-t -f ${LS_CONF_DIR} ${LS_OPTS} "
- $program ${test_args}
- [ $? -eq 0 ] && return 0
- # Program not configured
- return 6
- }
-
- case "$1" in
- start)
- status
- code=$?
- if [ $code -eq 0 ]; then
- echo "$name is already running"
- else
- start
- code=$?
- fi
- exit $code
- ;;
- stop) stop ;;
- force-stop) force_stop ;;
- status)
- status
- code=$?
- if [ $code -eq 0 ] ; then
- echo "$name is running"
- else
- echo "$name is not running"
- fi
- exit $code
- ;;
- reload) reload ;;
- restart)
- stop && start
- ;;
- check)
- configtest
- exit $?
- ;;
- *)
- echo "Usage: $SCRIPTNAME {start|stop|status|restart|check}" >&2
- exit 3
- ;;
- esac
-
- exit $?
- # chmod +x /etc/init.d/logstash
- # /etc/init.d/logstash start
- # /etc/init.d/logstash status
- logstash is running
-
- # netstat -ntlp|grep 9600
- tcp 0 0 :::9600 :::* LISTEN 10141/java
五、配置logstash
# cat /data/PRG/logstash/config/logstash.yml |grep -v '#'
http.host: "0.0.0.0" ###开启监听地址
ngin日志收集
- # cat /data/PRG/logstash/conf.d/filter.conf
- input {
- beats {
- port => 10200
- }
- }
-
- filter {
- grok {
- match=> {
- message => "%{IPORHOST:remote_addr} ,
, %{IPORHOST:http_host} , \"%{WORD:http_verb}(?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code}, %{NUMBER:bytes_read} , %{QS:referrer} , %{QS:agent} ,\"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" , - , - , - ,%{IPORHOST:server_ip} , %{BASE10NUM:request_duration}" - }
-
- match=> {
- message => "%{IPORHOST:remote_addr} ,
, %{IPORHOST:http_host} , \"%{WORD:http_verb}(?:%{PATH:baseurl}\?%{NOTSPACE:params}|%{DATA:raw_http_request})\" , %{NUMBER:http_status_code}, %{NUMBER:bytes_read} , %{QUOTEDSTRING:referrer} , %{QS:agent} ,\"%{IPORHOST:client_ip}, %{IPORHOST:proxy_server}\" ,%{IPORHOST}:%{INT} , %{INT} , %{BASE10NUM} , %{IPORHOST} ,%{BASE10NUM:request_duration}" - }
- }
-
- }
- output {
- elasticsearch {
- hosts =>["192.168.62.200:9200"]
- index =>"operation-%{+YYYY.MM.dd}"
- document_type=> "nginx2"
- user => 'admin' #### elasticsearch的用户名,用X-PACK插件创建
- password =>'kbsonlong' #### elasticsearch的用户名
- }
- stdout { codec =>rubydebug }
- }
六、安装kibana
- # cd /data/PRG/
- # tar zxvf /data/elk5.0/kibana-5.0.0-linux-x86_64.tar.gz
- # mv kibana-5.0.0-linux-x86_64 kibana
- # useradd kibana –s /sbin/nologin
- # chown kibana. kibana /data/PRG/kibana
添加启动脚本
# vim /etc/init.d/kibana
- # chmod +x /etc/init.d/kibana
- # /etc/init.d/kibana start
- # /etc/init.d/kibana status
- # netstat -ntlp |grep 5601
- tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 13052/node
-
七、配置kibana
- # cat /data/PRG/kibana/config/kibana.yml |grep -v '#'
- server.host: "0.0.0.0"
-
- ####以下模块视情况是否开启
- xpack.security.enabled: true
- xpack.monitoring.enabled: true
- xpack.graph.enabled: true
- xpack.reporting.enabled: true
八、x-pack插件安装
# /data/PRG/kibana/bin/kibana-plugin install
file:///root/x-pack-5.0.0.zip
# /data/PRG/elasticsearch/bin/elasticsearch-plugin install
file:///root/x-pack-5.0.0.zip
离线安装x-pack要修改用户脚本,默认创建用户配置文件在/etc/elasticsearch/x-pack目录
# vim /data/PRG/elasticsearch/bin/x-pack/users
否则在创建用户的时候提示/etc/elasticsearch/x-pack/users…tmp不存在
# mkdir /etc/elasticsearch/x-pack/
# chown elasticsearch. elasticsearch /etc/elasticsearch/x-pack/-R
1、 添加用户
- # cd /data/PRG/elasticsearch
- # bin/x-pack/users useradd admin -p kbsonlong -rsuperuser
2、 查看用户
- # /data/PRG/elasticsearch/bin/x-pack/users list
- admin :superuser
- test : - ###创建用户时没有添加-r参数,所以没有用户角色
3、 测试用户登录
- # curl http://localhost:9200/_xpack/ --useradmin:kbsonlong
- {"build":{"hash":"7763f8e","date":"2016-10-26T04:51:59.202Z"},"license":{"uid":"06a82587-66ac-4d4a-90c4-857d9ca7f3bc","type":"trial","mode":"trial","status":"active","expiry_date_in_millis":1483753731066},"features":{"graph":{"description":"GraphData Exploration for the ElasticStack","available":true,"enabled":true},"monitoring":{"description":"Monitoringfor the ElasticStack","available":true,"enabled":true},"security":{"description":"Securityfor the ElasticStack","available":true,"enabled":true},"watcher":{"description":"Alerting,Notification and Automation for the ElasticStack","available":true,"enabled":true}},"tagline":"Youknow, for X"}
4、 删除用户
- # /data/PRG/elasticsearch/bin/x-pack/users userdel test
-
- # /data/PRG/elasticsearch/bin/x-pack/users list
- admin :superuser
十、安装filebeat
- # cd /data/PRG
- # tar zxvf / data/elk5.0/filebeat-5.0.0-linux-x86_64.tar.gz
- # mv filebeat-5.0.0-linux-x86_64 filebeat
配置启动脚本
# vim /etc/init.d/filebeat
配置filebeat
- # cat filebeat/filebeat.yml |grep -v '#'
- filebeat.prospectors:
- - input_type: log
-
- paths:
- -/tmp/nginx.log
- output.logstash:
- enabled: true
- hosts: ["localhost:10200"]
启动filebeat
- # /etc/init.d/filebeat5 start
- Starting filebeat: 2016/12/0807:18:37.177631 beat.go:264: INFO Home path: [/data/PRG/filebeat] Config path:[/data/PRG/filebeat] Data path: [/data/PRG/filebeat/data] Logs path:[/data/PRG/filebeat/logs]
- 2016/12/08 07:18:37.177681 beat.go:174:INFO Setup Beat: filebeat; Version: 5.0.0
- 2016/12/08 07:18:37.177760 logstash.go:90:INFO Max Retries set to: 3
- 2016/12/08 07:18:37.177828 outputs.go:106:INFO Activated logstash as output plugin.
- 2016/12/08 07:18:37.177912 publish.go:291:INFO Publisher name: operation
- 2016/12/08 07:18:37.178158 async.go:63:INFO Flush Interval set to: 1s
- 2016/12/08 07:18:37.178170 async.go:64:INFO Max Bulk Size set to: 2048
- Config OK
- [ OK ]
-
- # /etc/init.d/filebeat5 status
- filebeat-god (pid 7365) is running...
-
- # ps -ef |grep filebeat
- root 7405 1 0 15:18 pts/1 00:00:00 filebeat-god -r / -n -p/data/PRG/filebeat/filebeat.pid -- /data/PRG/filebeat/filebeat -c/data/PRG/filebeat/filebeat.yml
- root 7406 7405 0 15:18 pts/1 00:00:00 /data/PRG/filebeat/filebeat -c/data/PRG/filebeat/filebeat.yml
附上安装源码包,包括x-pack、beat等
百度云盘http://pan.baidu.com/s/1skT4zCx 相关文章推荐
来源
http://blog.csdn.net/qq942477618/article/details/53518372
|