System optimization

#!/bin/bash
#############################################################
# File Name: centos7-optimization.sh
# Author: Sean_Li
# Created Time: 20210414
#==================================================================
echo "check centos7 or centos6"
VERSION=`cat /etc/redhat-release|awk -F " " '{print \$3}'|awk -F "." '{print \$1}'`
if [ "${VERSION}" == "6" ];then
    echo "centos6"
    exit 3
else
    echo "centos7"
fi

#history 
export HISTTIMEFORMAT="%F %T `whoami` " && echo 'export HISTTIMEFORMAT="%F %T `whoami` "' >> /etc/profile && source  /etc/profile



#添加公网DNS地址
cat >> /etc/resolv.conf << EOF
nameserver 114.114.114.114
EOF

#禁用selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0

#关闭防火墙
systemctl disable firewalld.service 
systemctl stop firewalld.service

#修改字符集
sed -i 's/LANG="en_US.UTF-8"/LANG="zh_CN.UTF-8"/' /etc/locale.conf
#localectl set-locale LANG=zh_CN.UTF-8 source /etc/locale.conf

#Yum源更换为国内阿里源
yum install wget telnet -y
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo


#添加阿里的epel源
#add the epel
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

#yum重新建立缓存
yum clean all
yum makecache

yum install -y ntpdate net-tools lrzsz tree cmake gcc gcc-c++ autoconf l libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel bzip2 bzip2-devel ncurses ncurses-devel curl curl-devel libxslt-devel libtool-ltdl-devel make wget docbook-dtds asciidoc e2fsprogs-devel gd gd-devel openssl openssl-devel lsof git unzip gettext-devel gettext libevent libevent-devel pcre pcre-devel vim readline readline-devel


#主机名
ipname=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:" | awk -F '.' '{print $3"_"$4}'`
echo $ipname
hostnamectl set-hostname insurace-$ipname


#同步time
/usr/sbin/ntpdate cn.pool.ntp.org
echo "* 4 * * * /usr/sbin/ntpdate cn.pool.ntp.org > /dev/null 2>&1" >> /var/spool/cron/root
systemctl  restart crond.service
hwclock --systohc
timedatectl set-timezone Asia/Shanghai


#配置 ssh
sed -i 's/^GSSAPIAuthentication yes$/GSSAPIAuthentication no/' /etc/ssh/sshd_config
sed -i 's/#UseDNS yes/UseDNS no/' /etc/ssh/sshd_config   #禁止DNS反向解析客户端
#sed -i 's/#Port 22/Port 2223/' /etc/ssh/sshd_config
systemctl  restart sshd.service

#系统最大打开那啥
ulimit -SHn 102400
echo "ulimit -SHn 102400" >> /etc/rc.local
chmod +x /etc/rc.d/rc.local

cat >> /etc/security/limits.conf << EOF
*           soft   nofile       655350
*           hard   nofile       655350
EOF

sed -i 's/4096/65535/g' /etc/security/limits.d/20-nproc.conf




#内核参数优化
cat >> /etc/sysctl.conf << EOF
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
#决定检查过期多久邻居条目
net.ipv4.neigh.default.gc_stale_time=120
#使用arp_announce / arp_ignore解决ARP映射问题
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.all.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
# 避免放大攻击
net.ipv4.icmp_echo_ignore_broadcasts = 1
# 开启恶意icmp错误消息保护
net.ipv4.icmp_ignore_bogus_error_responses = 1
#关闭路由转发
net.ipv4.ip_forward = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.send_redirects = 0
#开启反向路径过滤
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
#处理无源路由的包
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.accept_source_route = 0
#关闭sysrq功能
kernel.sysrq = 0
#core文件名中添加pid作为扩展名
kernel.core_uses_pid = 1
# 开启SYN洪水攻击保护
net.ipv4.tcp_syncookies = 1
#修改消息队列长度
kernel.msgmnb = 65536
kernel.msgmax = 65536
#设置最大内存共享段大小bytes
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
#timewait的数量,默认180000
net.ipv4.tcp_max_tw_buckets = 6000
net.ipv4.tcp_sack = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096        87380   4194304
net.ipv4.tcp_wmem = 4096        16384   4194304
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
#每个网络接口接收数据包的速率比内核处理这些包的速率快时,允许送到队列的数据包的最大数目
net.core.netdev_max_backlog = 262144
#限制仅仅是为了防止简单的DoS 攻击
net.ipv4.tcp_max_orphans = 3276800
#未收到客户端确认信息的连接请求的最大值
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_timestamps = 0
#内核放弃建立连接之前发送SYNACK 包的数量
net.ipv4.tcp_synack_retries = 1
#内核放弃建立连接之前发送SYN 包的数量
net.ipv4.tcp_syn_retries = 1
#启用timewait 快速回收
net.ipv4.tcp_tw_recycle = 1
#开启重用。允许将TIME-WAIT sockets 重新用于新的TCP 连接
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_fin_timeout = 1
#当keepalive 起用的时候,TCP 发送keepalive 消息的频度。缺省是2 小时
net.ipv4.tcp_keepalive_time = 1800
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
#允许系统打开的端口范围
net.ipv4.ip_local_port_range = 1024    65000
#修改防火墙表大小,默认65536
net.netfilter.nf_conntrack_max=655350
net.netfilter.nf_conntrack_tcp_timeout_established=1200
# 确保无人能修改路由表
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.default.secure_redirects = 0
vm.max_map_count = 1000000
fs.nr_open = 10000000
fs.file-max = 11000000

EOF

/sbin/sysctl -p
echo "-----------------------success------------------------------"

Software download

  • wget https://artifacts.elastic.co/…
  • wget https://archive.apache.org/di…
  • wget https://artifacts.elastic.co/…

Install the elasticsearch – 7.6.2

[root@elk24 tar]# ls elasticsearch-7.6.2-linux-x86_64.tar.gz kibana-7.6.2-linux-x86_64.tar.gz [root@elk24 tar]# tar-xf Elasticsearch - 7.6.2 - Linux - x86_64. Tar. Gz [root @ elk24 tar] # mv elasticsearch - 7.6.2 - Linux - x86_64 / data / [root @ elk24 tar] # Tar-xf kibana-7.6.2-linux-x86_64.tar.gz [root@elk24 tar]# mv kibana-7.6.2-linux-x86_64 /data/

config

[root@elk24 tar]# CD /data/elasticsearch-7.6.2/config/ [' config ']# ls elasticsearch.keystore elasticsearch.yml elasticsearch.yml.bak jvm.options log4j2.properties role_mapping.yml roles.yml users users_roles [root@elk24 config]# Cat ElasticSearch. YML Cluster. Name: Insurance -Pro-7.6.2 Node.Name: Master -1 Node.Master: True Node.Data: Data: /data/elasticsearch-7.6.2/data path.logs: /data/elasticsearch-7.6.2/logs http.port: 9200 network.host: 0.0.0.0 cluster. Initial_master_nodes: [" 10.110.24.88 "] discovery. Zen. Ping. Unicast. Hosts: Minimum_master_nodes: 2 discovery. Zen. Ping_Timeout: 30s discovery. 15 discovery.zen.fd.ping_interval: 20s discovery.zen.master_election.ignore_non_master_pings: true http.cors.enabled: true http.cors.allow-origin: "*" http.cors.allow-headers: "Authorization,X-Requested-With,Content-Length,Content-Type" xpack.security.enabled: true xpack.security.transport.ssl.enabled: true search.max_buckets: 200000 bootstrap.memory_lock: false bootstrap.system_call_filter: false gateway.expected_nodes: Java [root@elk24 ik]# cat /etc/profile # /etc/profile. export JAVA_HOME=/data/elasticsearch-7.6.2/ JDK export PATH=$JAVA_HOME/bin:$PATH export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar [root@elk24 ik]#

kibana conf

[root@elk24 config]# ls apm.js kibana.yml kibana.yml.bak [config]# PWD /data/kibana-7.6.2-linux-x86_64/config [root@elk24 config]# cat kibana.yml server.port: 5601 server.host: "0.0.0.0" server.name: [root@elk24 config]# cat kibana.yml server.port: 5601 server.host: "0.0.0.0" server.name: "10.110.24.88" elasticsearch. Hosts: [" http://10.110.24.88:9200 "] elasticsearch. Username: "elastic" elasticsearch.password: "Elast111111111#" elasticsearch.ssl.verificationMode: none elasticsearch.requestTimeout: 90000 i18n.locale: "zh-CN" [root@elk24 config]#

Install Elasticsearch with IK participle

[root @ elk24 ik] # mkdir/data/elasticsearch - 7.6.2 / plugins/ik [root @ # # # https://github.com/medcl/elasticsearch-analysis-ik/releases?after=v7.10.0## elk24 ik] # PWD / data/elasticsearch - 7.6.2 / plugins/ik [root @ elk24 ik] # wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.6.2/elasticsearch-analysis-ik-7.6.2.zip [root@elk24 ik]# tar-xf elasticsearch-analysis-ik-7.6.2.zip [root@elk24 ik]# tar-xf elasticsearch-analysis-ik-7.6.2.zip / TMP [root@elk24 ik]# ls commons-codec-1.9.jar commons-logging-1.2.jar config elasticsearch-analysis-ik-7.6.2.jar Httpclient - 4.5.2. Jar httpcore - 4.4.4. Jar plugin - descriptor. The properties plugin - security. The policy

Hack the X-Pack plugin for permanent platinum privileges (for learning purposes only)

LicenseVerifier. Java file [root @ elk24 opt] # cat LicenseVerifier. Java package org. Elasticsearch. License; /** * * Responsible for verifying signed licenses * */ public class LicenseVerifier { /** * * verifies the license content with the signature using the packaged * * public key * * @param license to verify * * @return true if valid, false otherwise * */ public static boolean verifyLicense(final License license, byte[] publicKeyData) { return true; } public static boolean verifyLicense(final License license) { return true; }} XPackBuild. Java file [root @ elk24 opt] # cat XPackBuild. Java package org. Elasticsearch. Xpack. Core; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import java.io.IOException; import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.util.jar.JarInputStream; import java.util.jar.Manifest; public class XPackBuild { public static final XPackBuild CURRENT; static { CURRENT = new XPackBuild("Unknown", "Unknown"); } /** * * Returns path to xpack codebase path * */ @SuppressForbidden(reason = "looks up path of xpack.jar directly") static Path getElasticsearchCodebase() { URL url = XPackBuild.class.getProtectionDomain().getCodeSource().getLocation();  try { return PathUtils.get(url.toURI()); } catch (URISyntaxException bogus) { throw new RuntimeException(bogus); } } private String shortHash; private String date; XPackBuild(String shortHash, String date) { this.shortHash = shortHash; this.date = date; } public String shortHash() { return shortHash; } public String date() { return date; } } [root@elk24 opt]# javac -cp "/ data/elasticsearch 7.6.2 / lib/elasticsearch - 7.6.2. Jar: / data/elasticsearch - 7.6.2 / lib/lucene - core - 8.4.0. Jar: / data/elastic Search-7.6.2 /modules/x-pack-core/x-pack-core-7.6.2.jar" licenseverifier. Java [root@elk24 opt]# javac-cp "/ data/elasticsearch 7.6.2 / lib/elasticsearch - 7.6.2. Jar: / data/elasticsearch - 7.6.2 / lib/lucene - core - 8.4.0. Jar: / data/elastic Search - 7.6.2 / modules/x - pack - core/x - pack - core - 7.6.2. Jar: / data/elasticsearch - 7.6.2 / lib/elasticsearch - core - 7.6.2. Jar" XPackBuild.java

/data/elasticsearch-7.6.2/modules/x-pack-core/x-pack-core /x-pack-core-7.6.2.jar and /opt Open x-pack-core-7.6.2.jar using 7zip compression software and replace the two class files

Start the es

[root@elk24 opt]# groupadd elasticsearch [root@elk24 opt]# useradd elasticsearch-G elasticsearch [root@elk24 opt]# chown - R elasticsearch. Elasticsearch elasticsearch 7.6.2 / [root @ elk24 opt] # su - elasticsearch [root @ elk24 opt] # vim /data/elasticsearch-7.6.2/config/ jvm.options-xms8g-xmx8g [root@elk24 opt]# /data/elasticsearch-7.6.2/bin/elasticsearch  -d

Configure the administrative user password for the ES cluster

[elasticsearch@elk24 bin]$ ./elasticsearch-setup-passwords interactive Initiating the setup of passwords for reserved users elastic,apm_system,kibana,logstash_system,beats_system,remote_monitoring_user. You will be prompted to enter passwords as the process progresses. Please confirm that you would like to continue [y/N]y Enter password for [elastic]:  Reenter password for [elastic]: . . . Changed password for user [elastic]

kafka conf

/ root @ kafka24 ~ # wget HTTP: / / https://archive.apache.org/dist/kafka/2.2.1/kafka_2.11-2.2.1.tgz/root @ kafka24 ~ # CD/data / [root@kafka24 data]# ls kafka kafka-logs zookeeper [root@kafka24 data]# cd kafka/config/ [root@kafka24 config]# cat zookeeper.properties dataDir=/data/zookeeper clientPort=2181 maxClientCnxns=0 tickTime=2000 initLimit=10 syncLimit=5 [root@kafka24 config]# [root@kafka24 config]# grep -v "#" server.properties broker.id=0 num.network.threads=3 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 log.dirs=/data/kafka-logs num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=48 The segment. The bytes = 1073741824 log. Retention. Check. Interval. Ms = 300000 zookeeper. Connect = 10.110.24.89:2181 Listeners = PLAINTEXT: / / 10.110.24.89:9092 advertised. Listeners = PLAINTEXT: / / 10.110.24.89:9092 zookeeper.connection.timeout.ms=6000 group.initial.rebalance.delay.ms=0 [root@kafka24 config]# [root@kafka24 data]# cat /etc/rc.local touch /var/lock/subsys/local ulimit -SHn 102400 #/data/zookeeper/bin/zkServer.sh start #/data/kafka/bin/kafka-server-start.sh -daemon /data/kafka/config/server.properties cd /data/kafka/bin && nohup ./zookeeper-server-start.sh .. /config/zookeeper.properties & cd /data/kafka/bin && nohup ./kafka-server-start.sh .. /config/server.properties > kafka.log &

logstash config

[root@insurace-24 conf]# cat logstash-configmap-template.yaml kind: ConfigMap apiVersion: v1 metadata: name: logstash-#project#-#topics_pattern#-#profile# namespace: default data: logstash-#project#-#topics_pattern#-#profile#.conf: | input {kafka {bootstrap_servers = > [10.110.24.89:9092 ""] topics_pattern = >" # topics_pattern #. * codec "= >" json" consumer_threads => 5 auto_offset_reset => "latest" group_id => "#topics_pattern#" client_id => "#topics_pattern#" decorate_events => true #auto_commit_interval_ms => 5000 } } filter { json { source => "message" } date { match => [ "timestamp" ,"dd/MMM/YYYY:HH:mm:ss Z" ] } mutate { remove_field => "timestamp" } if "_geoip_lookup_failure" in [tags] { Drop {}} output {elasticsearch {hosts => ["10.110.24.88:9200"] index => "logstash-#project#-#topics_pattern#-%{+YYYY-MM-dd}" user => elastic password => "Elasti111111111111111*#" } stdout { codec => rubydebug } }

kibana start

[root@elk24 ik]# cat /etc/rc.local ulimit -shn 102400 su elasticsearch-c "/data/elasticsearch-7.6.2/bin/elasticsearch -d" CD /data/kibana-7.6.2-linux-x86_64/bin && nohup /data/kibana-7.6.2-linux-x86_64/bin/kibana -- All-root &