首頁>技術>

一、環境準備

1.安裝java環境:

yum install java-1.8.0-openjdk* -y

2.新增elk執行使用者:

groupadd -g 77 elkuseradd -u 77 -g elk -d /home/elk -s /bin/bash elk

3.在 /etc/security/limits.conf 追加以下內容:

elk soft memlock unlimitedelk hard memlock unlimited* soft nofile 65536* hard nofile 131072

4.執行生效

sysctl -p

5.配置主機名

hostnamectl set-hostname monitor-elkecho "10.135.3.135     monitor-elk" >> /etc/hosts

二、服務部署

1.服務端:

1)下載ELK相關的原始碼包:

wget "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.2.2.tar.gz"wget "https://artifacts.elastic.co/downloads/logstash/logstash-5.2.2.tar.gz"wget "https://artifacts.elastic.co/downloads/kibana/kibana-5.2.2-linux-x86_64.tar.gz"wget "http://mirror.bit.edu.cn/apache/kafka/0.10.2.0/kafka_2.12-0.10.2.0.tgz"wget "http://mirror.bit.edu.cn/apache/zookeeper/zookeeper-3.4.9/zookeeper-3.4.9.tar.gz"

2)建立elk目錄,並將以上原始碼包解壓至該目錄:

mkdir /usr/local/elkmkdir -p /data/elasticsearch/chown -R elk.elk /data/elasticsearch/mkdir -p /data/{kafka,zookeeper}mv logstash-5.2.2 logstash && mv kibana-5.2.2-linux-x86_64 kibana && mv elasticsearch-5.2.2 elasticsearch && mv filebeat-5.2.2-linux-x86_64 filebeat && mv kafka_2.12-0.10.2.0 kafka && mv zookeeper-3.4.9 zookeeperchown -R elk.elk /usr/local/elk/

程式目錄列表如下:

3)修改以下程式的相應配置檔案

①kibana:

[root@monitor-elk ~]# cat /usr/local/elk/kibana/config/kibana.yml |grep -v "^#\|^$"server.host: "localhost"elasticsearch.url: "http://localhost:9200"elasticsearch.requestTimeout: 30000logging.dest: /data/elk/logs/kibana.log[root@monitor-elk ~]#

②elasticsearch:

[root@monitor-elk ~]# cat /usr/local/elk/elasticsearch/config/elasticsearch.yml |grep -v "^#\|^$"node.name: node01path.data: /data/elasticsearch/datapath.logs: /data/elk/logs/elasticsearchbootstrap.memory_lock: truenetwork.host: 127.0.0.1http.port: 9200[root@monitor-elk ~]# /usr/local/elk/elasticsearch/config/jvm.options#修改以下引數-Xms1g-Xmx1g

③logstash:

[root@monitor-elk ~]# cat /usr/local/elk/logstash/config/logs.yml
input {    #使用kafka的資料作為日誌資料來源    kafka    {        bootstrap_servers => ["127.0.0.1:9092"]        topics => "beats"        codec => json    }}filter {   #過濾資料,如果日誌資料裡面包含有該IP地址,將會被丟棄   if [message] =~ "123.151.4.10" {       drop{}   }# 轉碼,轉成正常的url編碼,如中文#   urldecode {#       all_fields => true#   }# nginx access   #透過type來判斷傳入的日誌型別   if [type] == "hongbao-nginx-access" or [type] == "pano-nginx-access" or [type] == "logstash-nginx-access" {    grok {        #指定自定義的grok表示式路徑        patterns_dir => "./patterns"        #指定自定義的正則表示式名稱解析日誌內容,拆分成各個欄位        match => { "message" => "%{NGINXACCESS}" }        #解析完畢後,移除預設的message欄位        remove_field => ["message"]    }    #使用geoip庫解析IP地址    geoip {        #指定解析後的欄位作為資料來源        source => "clientip"        fields => ["country_name", "ip", "region_name"]    }    date {         #匹配日誌內容裡面的時間,如 05/Jun/2017:03:54:01 +0800         match => ["timestamp","dd/MMM/yyyy:HH:mm:ss Z"]         #將匹配到的時間賦值給@timestamp欄位         target => "@timestamp"         remove_field => ["timestamp"]    }   }#  tomcat access  if [type] == "hongbao-tomcat-access" or [type] == "ljq-tomcat-access" {    grok {        patterns_dir => "./patterns"        match => { "message" => "%{TOMCATACCESS}" }        remove_field => ["message"]    }    geoip {        source => "clientip"        fields => ["country_name", "ip", "region_name"]    }    date {         match => ["timestamp","dd/MMM/yyyy:HH:mm:ss Z"]         target => "@timestamp"         remove_field => ["timestamp"]    }   }# tomcat catalina  if [type] == "hongbao-tomcat-catalina" {    grok {     match => {                "message" => "^(?<log_time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}) (?<level>\w*) (?<log_data>.+)"            }                remove_field => ["message"]    }    date {         match => ["log_time","yyyy-MM-dd HH:mm:ss,SSS"]         target => "@timestamp"         remove_field => ["log_time"]    }   }} output {    #將解析失敗的記錄寫入到指定的檔案中    if "_grokparsefailure" in [tags] {        file {             path => "/data/elk/logs/grokparsefailure-%{[type]}-%{+YYYY.MM}.log"        }    }   # nginx access    #根據type日誌型別分別輸出到elasticsearch不同的索引    if [type] == "hongbao-nginx-access" {            #將處理後的結果輸出到elasticsearch        elasticsearch {            hosts => ["127.0.0.1:9200"]            #指定輸出到當天的索引            index => "hongbao-nginx-access-%{+YYYY.MM.dd}"        }    }    if [type] == "pano-nginx-access" {        elasticsearch {            hosts => ["127.0.0.1:9200"]            index => "pano-nginx-access-%{+YYYY.MM.dd}"        }    }    if [type] == "logstash-nginx-access" {        elasticsearch {            hosts => ["127.0.0.1:9200"]            index => "logstash-nginx-access-%{+YYYY.MM.dd}"        }    }# tomcat access    if [type] == "hongbao-tomcat-access" {        elasticsearch {            hosts => ["127.0.0.1:9200"]            index => "hongbao-tomcat-access-%{+YYYY.MM.dd}"        }    }    if [type] == "ljq-tomcat-access" {        elasticsearch {            hosts => ["127.0.0.1:9200"]            index => "ljq-tomcat-access-%{+YYYY.MM.dd}"        }    }# tomcat catalina    if [type] == "hongbao-tomcat-catalina" {        elasticsearch {            hosts => ["127.0.0.1:9200"]            index => "hongbao-tomcat-catalina-%{+YYYY.MM.dd}"        }    }}
[root@monitor-elk ~]#配置正則表示式[root@monitor-elk ~]# cp /usr/local/elk/logstash/vendor/bundle/jruby/1.9/gems/logstash-patterns-core-4.0.2/patterns/grok-patterns /usr/local/elk/logstash/config/patterns[root@monitor-elk  ~]# tail -5 /usr/local/elk/logstash/config/patterns# NginxNGINXACCESS %{COMBINEDAPACHELOG} %{QS:x_forwarded_for}# TomcatTOMCATACCESS %{COMMONAPACHELOG}[root@monitor-elk ~]# chown elk.elk /usr/local/elk/logstash/config/patterns

4)配置zookeeper:

cp /usr/local/elk/zookeeper/conf/zoo_sample.cfg /usr/local/elk/zookeeper/conf/zoo.cfg

修改配置檔案中的資料儲存路徑

vim /usr/local/elk/zookeeper/conf/zoo.cfgdataDir=/data/zookeeper

備份並修改指令碼 /usr/local/elk/zookeeper/bin/zkEnv.sh

修改以下變數的引數

ZOO_LOG_DIR="/data/zookeeper-logs"ZOO_LOG4J_PROP="INFO,ROLLINGFILE"

備份並修改日誌配置 /usr/local/elk/zookeeper/conf/log4j.properties

修改以下變數的引數

zookeeper.root.logger=INFO, ROLLINGFILElog4j.appender.ROLLINGFILE=org.apache.log4j.DailyRollingFileAppender# 每天輪轉日誌

啟動zookeeper:

/usr/local/elk/zookeeper/bin/zkServer.sh start

5)配置kafka:

修改配置檔案 /usr/local/elk/kafka/config/server.properties 的以下引數

log.dirs=/data/kafkazookeeper.connect=localhost:2181

備份並修改指令碼 /usr/local/elk/kafka/bin/kafka-run-class.sh

在“base_dir=$(dirname $0)/.. ”的下一行追加LOG_DIR變數,並指定日誌輸出路徑

LOG_DIR=/data/kafka-logs

建立日誌儲存目錄:

mkdir -p /data/kafka-logsmkdir -p /data/elk/logschown -R elk.elk /data/elk/logs

啟動kafka:

nohup /usr/local/elk/kafka/bin/kafka-server-start.sh /usr/local/elk/kafka/config/server.properties &>> /data/elk/logs/kafka.log &

需要注意的是主機名一定要有配置在/etc/hosts檔案中,否則kafka會無法啟動

[root@monitor-elk ~]# cat /etc/hosts127.0.0.1  localhost  localhost.localdomain::1         localhost localhost.localdomain localhost6 localhost6.localdomain610.135.3.135     monitor-elk

6)配置supervisor

①安裝supervisor:

yum install supervisor -y

設定服務開機自啟動(server程式也會一起啟動):

systemctl enable supervisord.service

②修改配置

a.建立日誌儲存路徑:

mkdir -p /data/supervisorchown -R elk.elk /data/supervisor/

b.修改主配置檔案 /etc/supervisord.conf

logfile=/data/supervisor/supervisord.log

c.建立elk程式對應的supervisor配置檔案,並新增以下配置內容:

[root@monitor-elk ~]# cat /etc/supervisord.d/elk.ini [program:elasticsearch]directory=/usr/local/elk/elasticsearchcommand=su -c "/usr/local/elk/elasticsearch/bin/elasticsearch" elkautostart=truestartsecs=5autorestart=truestartretries=3priority=10[program:logstash]directory=/usr/local/elk/logstashcommand=/usr/local/elk/logstash/bin/logstash -f /usr/local/elk/logstash/config/logs.ymluser=elkautostart=truestartsecs=5autorestart=truestartretries=3redirect_stderr=truestdout_logfile=/data/elk/logs/logstash.logstdout_logfile_maxbytes=1024MBstdout_logfile_backups=10priority=11[program:kibana]directory=/usr/local/elk/kibanacommand=/usr/local/elk/kibana/bin/kibanauser=elkautostart=truestartsecs=5autorestart=truestartretries=3priority=12[root@monitor-elk ~]#

③啟動supervisor:

systemctl start supervisord

檢視程式程序和日誌:

ps aux|grep -v grep|grep "elasticsearch\|logstash\|kibana"

tip:

重啟配置的單個程式,如:

supervisorctl restart logstash

重啟配置的所有程式:

supervisorctl restart all

過載配置(只重啟配置變動的對應程式,其他配置未變動的程式不重啟):

supervisorctl update

7)配置nginx

①安裝nginx

yum install nginx -y

②配置nginx代理:

[root@monitor-elk ~]# cat /etc/nginx/conf.d/kibana.conf upstream kibana {    server 127.0.0.1:5601 max_fails=3 fail_timeout=30s;}server {    listen       8080;    server_name  localhost;    location / {        proxy_pass http://kibana/;        index index.html index.htm;        #auth        auth_basic "kibana Private";        auth_basic_user_file /etc/nginx/.htpasswd;    } }[root@monitor-elk ~]# grep listen /etc/nginx/nginx.conflisten       8000 default_server;listen       [::]:8000 default_server;[root@monitor-elk ~]#

③建立nginx認證:

[root@monitor-elk ~]# yum install httpd -y[root@monitor-elk ~]# htpasswd -cm /etc/nginx/.htpasswd elkNew password: Re-type new password: Adding password for user elk[root@monitor-elk ~]# systemctl start nginx[root@monitor-elk ~]# systemctl enable nginx

8)配置ik中文分詞:

①安裝maven:

wget "http://mirror.bit.edu.cn/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz"tar -zxf apache-maven-3.3.9-bin.tar.gzmv apache-maven-3.3.9 /usr/local/mavenecho "export MAVEN_HOME=/usr/local/maven" >> /etc/bashrcecho "export PATH=$PATH:$MAVEN_HOME/bin" >> /etc/bashrc. /etc/bashrc

②編譯安裝ik(注意下載對應版本):

wget "https://github.com/medcl/elasticsearch-analysis-ik/archive/v5.2.2.zip"unzip v5.2.2.zipcd elasticsearch-analysis-ik-5.2.2/mvn packagemkdir /usr/local/elk/elasticsearch/plugins/ikcp target/releases/elasticsearch-analysis-ik-5.2.2.zip /usr/local/elk/elasticsearch/plugins/ik/cd /usr/local/elk/elasticsearch/plugins/ik/unzip elasticsearch-analysis-ik-5.2.2.zip rm -f elasticsearch-analysis-ik-5.2.2.zipchown -R elk.elk ../iksupervisorctl restart elasticsearch

③建立索引模板:

要使用ik分詞,需要在建立指定的索引前(不管是透過命令手動還是logstash配置來建立)先建立索引模板,否則使用預設的模板即可:

cd /usr/local/elk/logstash

建立並編輯檔案 logstash.json ,新增以下內容:

{    "order" : 1,    "template" : "tomcatcat-*",    "settings" : {      "index" : {        "refresh_interval" : "5s"      }    },    "mappings" : {      "_default_" : {        "dynamic_templates" : [          {            "string_fields" : {              "mapping" : {                "norms" : false,                "type" : "text",                "analyzer": "ik_max_word",                "search_analyzer": "ik_max_word"              },              "match_mapping_type" : "text",              "match" : "*"            }          }        ],        "_all" : {          "norms" : false,          "enabled" : true        },        "properties" : {          "@timestamp" : {            "include_in_all" : false,            "type" : "date"          },          "log_data": {            "include_in_all" : true,            "type" : "text",            "analyzer": "ik_max_word",            "search_analyzer": "ik_max_word",            "boost" : 8          },          "@version" : {            "include_in_all" : false,            "type" : "keyword"          }        }      }    },    "aliases" : { }}'

新增完畢後,執行curl命令建立索引模板

curl -XPUT 'http://localhost:9200/_template/tomcatcat' -d @logstash.json

執行成功後會返回結果 {"acknowledged":true}

④熱更新配置:

有些詞語ik無法識別分詞,如公司名稱、服務名稱之類

curl -XGET 'http://localhost:9200/_analyze?pretty&analyzer=ik_smart' -d '騰訊雲'

這時需要自己自定義詞庫,ik支援分詞熱更新的方式(不需要重啟elasticsearch),每分鐘自動檢測一次

在nginx根路徑下建立一個utf8格式的文字檔案 ik.txt ,將自己需要分詞的詞語寫入ik.txt,一行一詞:

然後修改/usr/local/elk/elasticsearch/plugins/ik/config/IKAnalyzer.cfg.xml

2.客戶端:

1)下載filebeat:

wget "https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.2.2-linux-x86_64.tar.gz"

解壓filebeat-5.2.2-linux-x86_64.tar.gz至/usr/local/elk/目錄,並重命名為filebeat

mkdir /usr/local/elk/mkdir -p /data/elk/logs/echo "10.135.3.135     elk" >> /etc/hosts

2)配置filebeat:

[root@test2 filebeat]# cat logs.ymlfilebeat.prospectors:-#指定需要監控的日誌檔案路徑,可以使用*匹配paths:- /data/nginx/log/*_access.log#指定檔案的輸入型別為log(預設)input_type: log#設定日誌型別document_type: pano-nginx-access#從檔案的末尾開始監控檔案新增的內容,並按行依次傳送tail_files: true#將日誌內容輸出到kafkaoutput.kafka:hosts: ["10.135.3.135:9092"]topic: beatscompression: Snappy[root@test2 filebeat]#[root@test3 filebeat]# cat logs.ymlfilebeat.prospectors:-  paths:    - /usr/local/tomcat/logs/*access_log.*.txt  input_type: log  document_type: hongbao-tomcat-access  tail_files: true- paths:    - /usr/local/tomcat/logs/catalina.out  input_type: log  document_type: hongbao-tomcat-catalina  #多行匹配模式,後接正則表示式,這裡表示匹配時間,如 2017-06-05 10:00:00,713  multiline.pattern: '^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}'  #將未匹配到的行合併到上一行,如java的錯誤日誌  multiline.negate: true  #將未匹配到的行新增到上一行的末尾  multiline.match: after  tail_files: trueoutput.kafka:hosts: ["10.135.3.135:9092"]topic: beatscompression: Snappy[root@test3 filebeat]#

3)啟動filebeat

nohup /usr/local/elk/filebeat/filebeat -e -c /usr/local/elk/filebeat/logs.yml -d "publish" &>> /data/elk/logs/filebeat.log &

三、kibana web端配置

1.瀏覽器訪問kibana地址,並輸入前面nginx設定的賬號密碼:

http://10.135.3.135:8080

在訪問 Kibana 時,預設情況下將載入 Discover(發現) 頁面,並選擇預設的索引模式(logstash-*)。 time filter(時間過濾器)預設設定為 last 15 minutes(最近 15 分鐘),搜尋查詢預設設定為 match-all(*)

伺服器資源狀態頁:

http://10.135.3.135:8080/status

2.建立索引模式

注意,索引模式的名稱要和logstash的output生成的索引(也就是說必須存在於Elasticsearch中,而且必須包含有資料)進行匹配,如logstash-*可與logstash-20170330匹配,還可以匹配多個索引(所有以logstash-開頭的索引)。

*匹配索引名稱中的零個或多個字元

4.建立視覺化圖表

繪製視覺化圖表,將拆分出來的nginx或tomcat訪問日誌中的欄位response狀態碼進行聚合顯示,以圖表的形式直觀顯示各狀態碼(如200、400等)的統計情況

2)選擇其中一個索引模式,如 pano-*

3)透過欄位 response.keyword 指定 terms(詞條)聚合,按從大到小的順序來顯示前五列狀態碼的總數資料,然後點選 Apply changes 圖示

生效。

圖表中,X軸顯示的是狀態碼,Y軸顯示的是對應的狀態碼總數。

5.建立儀表盤

可以將相同業務或型別的視覺化物件,集中顯示在同一個儀表盤中。

4)顯示的結果

四、服務監控指令碼

1.服務端

1)kafka

[root@monitor-elk  ~]# cat /usr/local/scripts/monitor_kafka.sh#!/bin/bash############################################### author:Ellen# describes:Check kafka program# version:v1.0# updated:20170407############################################### Configuration informationprogram_dir=/usr/local/elk/kafkalogfile=/usr/local/scripts/log/monitor_kafka.log# Check executed userif [ `whoami` != "root" ];thenecho "Please use root run script!!!"exit 1fi# Check kafka programnum=`ps aux|grep -w $program_dir|grep -vw "grep\|vim\|vi\|mv\|scp\|cat\|dd\|tail\|head\|script\|ls\|echo\|sys_log\|logger\|tar\|rsync\|ssh"|wc -l`if [ ${num} -eq 0 ];thenecho "[`date +'%F %T'`] [CRITICAL] Kafka program dost not start!!!"|tee -a $logfile# Send alarm information#cagent_tools是騰訊雲伺服器自帶的報警外掛,該外掛可傳送簡訊或郵箱告警,如不需要可註釋/usr/bin/cagent_tools alarm "Kafka program dost not start!!!"echo "[`date +'%F %T'`] [  INFO  ] Begin start kafka program..."|tee -a $logfilenohup /usr/local/elk/kafka/bin/kafka-server-start.sh /usr/local/elk/kafka/config/server.properties &>> /data/elk/logs/kafka.log &if [ $? -eq 0 ];thenecho "[`date +'%F %T'`] [  INFO  ] Kafka program start successful."|tee -a $logfile/usr/bin/cagent_tools alarm "Kafka program start successful"exit 0elseecho "[`date +'%F %T'`] [CRITICAL] Kafka program start failed!!!"|tee -a $logfile/usr/bin/cagent_tools alarm "Kafka program start failed!!!Please handle it!!!"exit 6fielseecho "[`date +'%F %T'`] [  INFO  ] Kafka program is running..."|tee -a $logfileexit 0fi[root@monitor-elk ~]#

2)zookeeper

[root@monitor-elk ~]# cat /usr/local/scripts/monitor_zookeeper.sh#!/bin/bash############################################### author:Ellen# describes:Check zookeeper program# version:v1.0# updated:20170407############################################### Configuration informationprogram_dir=/usr/local/elk/zookeeperlogfile=/usr/local/scripts/log/monitor_zookeeper.log# Check executed userif [ `whoami` != "root" ];thenecho "Please use root run script!!!"exit 1fi# Check zookeeper programnum=`ps aux|grep -w $program_dir|grep -vw "grep\|vim\|vi\|mv\|scp\|cat\|dd\|tail\|head\|ls\|echo\|sys_log\|tar\|rsync\|ssh"|wc -l`if [ ${num} -eq 0 ];thenecho "[`date +'%F %T'`] [CRITICAL] Zookeeper program dost not start!!!"|tee -a $logfile# Send alarm information/usr/bin/cagent_tools alarm "Zookeeper program dost not start!!!"echo "[`date +'%F %T'`] [  INFO  ] Begin start zookeeper program..."|tee -a $logfile/usr/local/elk/zookeeper/bin/zkServer.sh startif [ $? -eq 0 ];thenecho "[`date +'%F %T'`] [  INFO  ] Zookeeper program start successful."|tee -a $logfile/usr/bin/cagent_tools alarm "Zookeeper program start successful"exit 0elseecho "[`date +'%F %T'`] [CRITICAL] Zookeeper program start failed!!!"|tee -a $logfile/usr/bin/cagent_tools alarm "Zookeeper program start failed!!!Please handle it!!!"exit 6fielseecho "[`date +'%F %T'`] [  INFO  ] Zookeeper program is running..."|tee -a $logfileexit 0fi[root@monitor-elk ~]#

3)新增crontab定時任務

0-59/5 * * * * /usr/local/scripts/monitor_kafka.sh &> /dev/null0-59/5 * * * * /usr/local/scripts/monitor_zookeeper.sh &> /dev/null

2.客戶端:

[root@test2 ~]# cat /usr/local/scripts/monitor_filebeat.sh#!/bin/bash############################################### author:Ellen# describes:Check filebeat program# version:v1.0# updated:20170407############################################### Configuration informationprogram_dir=/usr/local/elk/filebeatlogfile=/usr/local/scripts/log/monitor_filebeat.log# Check executed userif [ `whoami` != "root" ];thenecho "Please use root run script!!!"exit 1fi# Check filebeat programnum=`ps aux|grep -w $program_dir|grep -vw "grep\|vim\|vi\|mv\|cp\|scp\|cat\|dd\|tail\|head\|script\|ls\|echo\|sys_log\|logger\|tar\|rsync\|ssh"|wc -l`if [ ${num} -eq 0 ];thenecho "[`date +'%F %T'`] [CRITICAL] Filebeat program dost not start!!!"|tee -a $logfile# Send alarm information/usr/bin/cagent_tools alarm "Filebeat program dost not start!!!"echo "[`date +'%F %T'`] [  INFO  ] Begin start filebeat program..."|tee -a $logfilenohup /usr/local/elk/filebeat/filebeat -e -c /usr/local/elk/filebeat/logs.yml -d "publish" &>> /data/elk/logs/filebeat.log &if [ $? -eq 0 ];thenecho "[`date +'%F %T'`] [  INFO  ] Filebeat program start successful."|tee -a $logfile/usr/bin/cagent_tools alarm "Filebeat program start successful"exit 0elseecho "[`date +'%F %T'`] [CRITICAL] Filebeat program start failed!!!"|tee -a $logfile/usr/bin/cagent_tools alarm "Filebeat program start failed!!!Please handle it!!!"exit 6fielseecho "[`date +'%F %T'`] [  INFO  ] Filebeat program is running..."|tee -a $logfileexit 0fi[root@test2 ~]#

3)新增crontab定時任務

0-59/5 * * * * /usr/local/scripts/monitor_filebeat.sh &> /dev/null

五、注意事項

1.資料流向

--------------------------------------------------------------------------------------------------

log_files -> filebeat -> kafka-> logstash -> elasticsearch -> kibana

--------------------------------------------------------------------------------------------------

2.每天定時清理elasticsearch索引,只保留30天內的索引

1)編寫指令碼

[root@monitor-elk ~]# cat /usr/local/scripts/del_index.sh#!/bin/bash############################################### author:Ellen# describes:Delete elasticsearch history index.# version:v1.0# updated:20170407############################################### Configuration informationlogfile=/usr/local/scripts/log/del_index.logtmpfile=/tmp/index.txthost=localhostport=9200deldate=`date -d '-30days' +'%Y.%m.%d'`# Check executed userif [ `whoami` != "root" ];thenecho "Please use root run script!!!"exit 1fi# Delete elasticsearch indexcurl -s "$host:$port/_cat/indices?v"|grep -v health|awk {'print $3'}|grep "$deldate" > $tmpfileif [ ! -s $tmpfile ];thenecho "[`date +'%F %T'`] [WARNING] $tmpfile is a empty file."|tee -a $logfileexit 1fifor i in `cat /tmp/index.txt`docurl -XDELETE http://$host:$port/$iif [ $? -eq 0 ];thenecho "[`date +'%F %T'`] [  INFO  ] Elasticsearch index $i delete successful."|tee -a $logfileelseecho "[`date +'%F %T'`] [CRITICAL] Elasticsearch index $i delete failed!!!"|tee -a $logfile/usr/bin/cagent_tools alarm "Elasticsearch index $i delete failed!!!"exit 6fidone[root@monitor-elk ~]#

2)新增crontab定時任務

00 02 * * * /usr/local/scripts/del_index.sh &> /dev/null

3.按業務進行建立索引

如hongbao、pano等

4.nginx和tomcat等訪問日誌使用預設格式

六、相關命令參考

1.列出所有索引

curl -s 'http://localhost:9200/_cat/indices?v'

2.列出節點列表

curl 'localhost:9200/_cat/nodes?v'

3.查詢叢集健康資訊

curl 'localhost:9200/_cat/health?v'

4.檢視指定的索引資料(預設返回十條結果)

curl -XGET 'http://localhost:9200/logstash-nginx-access-2017.05.20/_search?pretty'
curl -XDELETE http://localhost:9200/logstash-nginx-access-2017.05.20

6.查詢模板

curl -s 'http://localhost:9200/_template'

12
  • BSA-TRITC(10mg/ml) TRITC-BSA 牛血清白蛋白改性標記羅丹明
  • 新手教程,Linux系統下MySQL的安裝