赞
踩
使用kubeadm或者其他方式部署一套k8s集群。
在k8s集群创建一个namespace:halashow
Deployment中存在一个es的业务容器,和一个init容器,init容器主要是配置vm.max_map_count=262144。
service暴露了9200端口,其他服务可通过service name加端口访问es。
Deployment中存在一个es的业务容器,和一个init容器,init容器主要是配置vm.max_map_count=262144。
service暴露了9200端口,其他服务可通过service name加端口访问es。
- apiVersion: v1
- kind: Service
- metadata:
- namespace: halashow
- name: elasticsearch
- labels:
- app: elasticsearch-logging
- spec:
- type: ClusterIP
- ports:
- - port: 9200
- name: elasticsearch
- selector:
- app: elasticsearch-logging
- ---
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- generation: 1
- labels:
- app: elasticsearch-logging
- version: v1
- name: elasticsearch
- namespace: halashow
- spec:
- serviceName: elasticsearch-logging
- minReadySeconds: 10
- progressDeadlineSeconds: 600
- replicas: 1
- revisionHistoryLimit: 10
- selector:
- matchLabels:
- app: elasticsearch-logging
- version: v1
- strategy:
- type: Recreate
- template:
- metadata:
- creationTimestamp: null
- labels:
- app: elasticsearch-logging
- version: v1
- spec:
- affinity:
- nodeAffinity: {}
- containers:
- - env:
- - name: discovery.type
- value: single-node
- - name: ES_JAVA_OPTS
- value: -Xms512m -Xmx512m
- - name: MINIMUM_MASTER_NODES
- value: "1"
- image: docker.elastic.co/elasticsearch/elasticsearch:7.12.0-amd64
- imagePullPolicy: IfNotPresent
- name: elasticsearch-logging
- ports:
- - containerPort: 9200
- name: db
- protocol: TCP
- - containerPort: 9300
- name: transport
- protocol: TCP
- resources:
- limits:
- cpu: "1"
- memory: 1Gi
- requests:
- cpu: "1"
- memory: 1Gi
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- volumeMounts:
- - mountPath: /data
- name: es-persistent-storage
- dnsPolicy: ClusterFirst
- imagePullSecrets:
- - name: user-1-registrysecret
- initContainers:
- - command:
- - /sbin/sysctl
- - -w
- - vm.max_map_count=262144
- image: alpine:3.6
- imagePullPolicy: IfNotPresent
- name: elasticsearch-logging-init
- resources: {}
- securityContext:
- privileged: true
- procMount: Default
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- restartPolicy: Always
- schedulerName: default-scheduler
- securityContext: {}
- terminationGracePeriodSeconds: 30
- volumes:
- - hostPath:
- path: /data/elk/elasticsearch-logging
- type: DirectoryOrCreate
- name: es-persistent-storage
- nodeSelector:
- alibabacloud.com/is-edge-worker: 'false'
- beta.kubernetes.io/arch: amd64
- beta.kubernetes.io/os: linux
- tolerations:
- - effect: NoSchedule
- key: node-role.alibabacloud.com/addon
- operator: Exists

elasticsearch持久化部署,参考资料
https://www.51cto.com/article/673023.html
-
- apiVersion: v1
- kind: Service
- metadata:
- name: es
- namespace: default
- labels:
- k8s-app: es
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
- kubernetes.io/name: "Elasticsearch"
- spec:
- ports:
- - port: 9200
- protocol: TCP
- targetPort: db
- selector:
- k8s-app: es
- ---
- # RBAC authn and authz
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: es
- namespace: default
- labels:
- k8s-app: es
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
- ---
- kind: ClusterRole
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- name: es
- labels:
- k8s-app: es
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
- rules:
- - apiGroups:
- - ""
- resources:
- - "services"
- - "namespaces"
- - "endpoints"
- verbs:
- - "get"
- ---
- kind: ClusterRoleBinding
- apiVersion: rbac.authorization.k8s.io/v1
- metadata:
- namespace: default
- name: es
- labels:
- k8s-app: es
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
- subjects:
- - kind: ServiceAccount
- name: es
- namespace: default
- apiGroup: ""
- roleRef:
- kind: ClusterRole
- name: es
- apiGroup: ""
- ---
- # Elasticsearch deployment itself
- apiVersion: apps/v1
- kind: StatefulSet #使用statefulset创建Pod
- metadata:
- name: es #pod名称,使用statefulSet创建的Pod是有序号有顺序的
- namespace: default #命名空间
- labels:
- k8s-app: es
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
- srv: srv-elasticsearch
- spec:
- serviceName: es #与svc相关联,这可以确保使用以下DNS地址访问Statefulset中的每个pod (es-cluster-[0,1,2].elasticsearch.elk.svc.cluster.local)
- replicas: 1 #副本数量,单节点
- selector:
- matchLabels:
- k8s-app: es #和pod template配置的labels相匹配
- template:
- metadata:
- labels:
- k8s-app: es
- kubernetes.io/cluster-service: "true"
- spec:
- serviceAccountName: es
- containers:
- - image: docker.io/library/elasticsearch:7.10.1
- name: es
- resources:
- # need more cpu upon initialization, therefore burstable class
- limits:
- cpu: 1000m
- memory: 2Gi
- requests:
- cpu: 100m
- memory: 500Mi
- ports:
- - containerPort: 9200
- name: db
- protocol: TCP
- - containerPort: 9300
- name: transport
- protocol: TCP
- volumeMounts:
- - name: es
- mountPath: /usr/share/elasticsearch/data/ #挂载点
- env:
- - name: "NAMESPACE"
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: "discovery.type" #定义单节点类型
- value: "single-node"
- - name: ES_JAVA_OPTS #设置Java的内存参数,可以适当进行加大调整
- value: "-Xms1024m -Xmx4g"
- volumes:
- - name: es
- hostPath:
- path: /data/es/
- nodeSelector: #如果需要匹配落盘节点可以添加nodeSelect
- es: data
- tolerations:
- - effect: NoSchedule
- operator: Exists
- # Elasticsearch requires vm.max_map_count to be at least 262144.
- # If your OS already sets up this number to a higher value, feel free
- # to remove this init container.
- initContainers: #容器初始化前的操作
- - name: es-init
- image: alpine:3.6
- command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] #添加mmap计数限制,太低可能造成内存不足的错误
- securityContext: #仅应用到指定的容器上,并且不会影响Volume
- privileged: true #运行特权容器
- - name: increase-fd-ulimit
- image: busybox
- imagePullPolicy: IfNotPresent
- command: ["sh", "-c", "ulimit -n 65536"] #修改文件描述符最大数量
- securityContext:
- privileged: true
- - name: elasticsearch-volume-init #es数据落盘初始化,加上777权限
- image: alpine:3.6
- command:
- - chmod
- - -R
- - "777"
- - /usr/share/elasticsearch/data/
- volumeMounts:
- - name: es
- mountPath: /usr/share/elasticsearch/data/
-
-
- ---
-
- apiVersion: v1
- kind: Service
- metadata:
- name: kibana
- namespace: default
- labels:
- k8s-app: kibana
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
- kubernetes.io/name: "Kibana"
- srv: srv-kibana
- spec:
- type: NodePort #采用nodeport方式进行暴露,端口默认为25601
- ports:
- - port: 5601
- nodePort: 30561
- protocol: TCP
- targetPort: ui
- selector:
- k8s-app: kibana
- ---
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: kibana
- namespace: default
- labels:
- k8s-app: kibana
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
- srv: srv-kibana
- spec:
- replicas: 1
- selector:
- matchLabels:
- k8s-app: kibana
- template:
- metadata:
- labels:
- k8s-app: kibana
- annotations:
- seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
- spec:
- containers:
- - name: kibana
- image: docker.io/kubeimages/kibana:7.9.3 #该镜像支持arm64和amd64两种架构
- resources:
- # need more cpu upon initialization, therefore burstable class
- limits:
- cpu: 1000m
- requests:
- cpu: 100m
- env:
- - name: ELASTICSEARCH_HOSTS
- value: http://es:9200
- ports:
- - containerPort: 5601
- name: ui
- protocol: TCP
- ---
- apiVersion: extensions/v1beta1
- kind: Ingress
- metadata:
- name: kibana
- namespace: yk-mysql-test
- spec:
- rules:
- - host: kibana.ctnrs.com
- http:
- paths:
- - path: /
- backend:
- serviceName: kibana
- servicePort: 5601
-

4 部署logstash
创建configMap定义logstash相关配置项,主要包括一下几项。
input:定义输入到logstash的源。
filter:定义过滤条件。
output:可以定义输出到es,redis,kafka等等。
- ---
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: logstash-config
- namespace: halashow
- data:
- logstash.conf: |-
-
- input {
- redis {
- host => "10.36.21.220"
- port => 30079
- db => 0
- key => "localhost"
- password => "123456"
- data_type => "list"
- threads => 4
- batch_count => "1"
- #tags => "user.log"
- }
-
- }
-
- filter {
- dissect {
- mapping => { "message" => "[%{Time}] %{LogLevel} %{message}" }
- }
- }
-
- output {
- if "nginx.log" in [tags] {
- elasticsearch {
- hosts => ["elasticsearch:9200"]
- index => "nginx.log"
- }
- }
-
- if "osale-uc-test" in [tags] {
- elasticsearch {
- hosts => ["elasticsearch:9200"]
- index => "osale-uc-test"
- }
- }
- if "osale-jindi-client-test" in [tags] {
- elasticsearch {
- hosts => ["elasticsearch:9200"]
- index => "osale-jindi-client-test"
- }
- }
- if "osale-admin-weixin" in [tags] {
- elasticsearch {
- hosts => ["elasticsearch:9200"]
- index => "osale-admin-weixin"
- }
- }
- }
-
-
- ---
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: logstash
- namespace: halashow
- labels:
- name: logstash
- spec:
- replicas: 1
- selector:
- matchLabels:
- name: logstash
- template:
- metadata:
- labels:
- app: logstash
- name: logstash
- spec:
- containers:
- - name: logstash
- image: docker.elastic.co/logstash/logstash:7.12.0
- ports:
- - containerPort: 5044
- protocol: TCP
- - containerPort: 9600
- protocol: TCP
-
- volumeMounts:
- - name: logstash-config
- #mountPath: /usr/share/logstash/logstash-simple.conf
- #mountPath: /usr/share/logstash/config/logstash-sample.conf
- mountPath: /usr/share/logstash/pipeline/logstash.conf
- subPath: logstash.conf
- #ports:
- # - containerPort: 80
- # protocol: TCP
-
-
- volumes:
- - name: logstash-config
- configMap:
- #defaultMode: 0644
- name: logstash-config
-
- ---
- apiVersion: v1
- kind: Service
- metadata:
- namespace: halashow
- name: logstash
- labels:
- app: logstash
- spec:
- type: ClusterIP
- ports:
- - port: 5044
- name: logstash
- selector:
- app: logstash

5.部署redis5.0
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: elk-redis
- labels:
- app: elk-redis
- data:
- redis.conf: |-
- bind 0.0.0.0
- daemonize no
- pidfile "/var/run/redis.pid"
- port 6380
- timeout 300
- loglevel warning
- logfile "redis.log"
- databases 16
- rdbcompression yes
- dbfilename "redis.rdb"
- dir "/data"
- requirepass "123456"
- masterauth "123456"
- maxclients 10000
- maxmemory 1000mb
- maxmemory-policy allkeys-lru
- appendonly yes
- appendfsync always
- ---
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: elk-redis
- labels:
- app: elk-redis
- spec:
- replicas: 1
- selector:
- matchLabels:
- app: elk-redis
- template:
- metadata:
- labels:
- app: elk-redis
- spec:
- containers:
- - name: redis
- image: redis:5.0.7
- command:
- - "sh"
- - "-c"
- - "redis-server /usr/local/redis/redis.conf"
- ports:
- - containerPort: 6379
- resources:
- limits:
- cpu: 1000m
- memory: 1024Mi
- requests:
- cpu: 1000m
- memory: 1024Mi
- livenessProbe:
- tcpSocket:
- port: 6379
- initialDelaySeconds: 300
- timeoutSeconds: 1
- periodSeconds: 10
- successThreshold: 1
- failureThreshold: 3
- readinessProbe:
- tcpSocket:
- port: 6379
- initialDelaySeconds: 5
- timeoutSeconds: 1
- periodSeconds: 10
- successThreshold: 1
- failureThreshold: 3
- volumeMounts:
- - name: data
- mountPath: /data
- # 时区设置
- - name: timezone
- mountPath: /etc/localtime
- - name: config
- mountPath: /usr/local/redis/redis.conf
- subPath: redis.conf
- volumes:
- - name: config
- configMap:
- name: elk-redis
- - name: timezone
- hostPath:
- path: /usr/share/zoneinfo/Asia/Shanghai
- - name: data
- hostPath:
- type: DirectoryOrCreate
- path: /data/elk/elk-redis
- nodeName: gem-yxyw-t-c02
- ---

为了提升redis的性能需要关闭持久化
、redis默认是开启持久化的
2、默认持久化方式为RDB
1、注释掉原来的持久化规则
# save 3600 1 300 100 60 10000
2、把 save 节点设置为空
save ""
3、删除 dump.rdb 转储文件
rm -f dump.rdb
1、设置 appendonly 的值为 no 即可
- wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.8.0-linux-x86_64.tar.gz
-
- tar -zxvf filebeat-7.8.0-linux-x86_64.tar.gz
-
- vi /data/elk/filebeat/filebeat-7.8.0-linux-x86_64/filebeat.yml
-
- filebeat.inputs:
- - type: log
- enabled: true
- paths:
- - /data/test-logs/osale-uc-test/*.log
- fields:
- tags: ["osale-uc-test"]
- - type: log
- enabled: true
- paths:
- - /data/test-logs/osale-jindi-client-test/*.log
- fields:
- tags: ["osale-jindi-client-test"]
- - type: log
- enabled: true
- paths:
- - /data/test-logs/osale-admin-weixin-test/*/osale-admin-weixin/*.log
- fields:
- tags: ["osale-admin-weixin"]
- - type: log
- enabled: true
- paths:
- - /data/tengine/logs/*.log
- fields:
- tags: ["nginx.log"]
-
- filebeat.config.modules:
- path: ${path.config}/modules.d/*.yml
- reload.enabled: false
- setup.template.settings:
- index.number_of_shards: 1
- setup.kibana:
- output.redis:
- enabled: true
- hosts: ["10.36.21.220:30079"]
- password: "123456"
- db: 0
- key: localhost
- worker: 4
- timeout: 5
- max_retries: 3
- datatype: list
- processors:
- - add_host_metadata: ~
- - add_cloud_metadata: ~
- - add_docker_metadata: ~
- - add_kubernetes_metadata: ~

- ---
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: filebeat-config-to-logstash
- namespace: halashow
- data:
- filebeat.yml: |-
- filebeat.inputs:
- - type: log
- paths:
- - /logm/*.log
- output.logstash:
- hosts: ['logstash:5044']
-
- ---
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: filebeat
- namespace: halashow
- labels:
- name: filebeat
- spec:
- replicas: 1
- selector:
- matchLabels:
- name: filebeat
- template:
- metadata:
- labels:
- app: filebeat
- name: filebeat
- spec:
- containers:
- - name: filebeat
- image: docker.elastic.co/beats/filebeat:7.12.0
- args: [
- "-c", "/etc/filebeat.yml",
- "-e",
- ]
-
- volumeMounts:
- - mountPath: /logm
- name: logm
- - name: config
- mountPath: /etc/filebeat.yml
- readOnly: true
- subPath: filebeat.yml
-
- volumes:
- - name: logm
- emptyDir: {}
- - name: config
- configMap:
- defaultMode: 0640
- name: filebeat-config-to-logstash

cd /data/elk/filebeat-7.8.0-linux-x86_64
sudo ./filebeat -e -c filebeat.yml -d "publish" #前台启动filebeat
nohup ./filebeat -e -c filebeat.yml >/dev/null 2>&1& #后台启动
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: kibana
- namespace: halashow
- labels:
- name: kibana
- spec:
- serviceName: halashow
- replicas: 1
- selector:
- matchLabels:
- name: kibana
- template:
- metadata:
- labels:
- app: kibana
- name: kibana
- spec:
- restartPolicy: Always
- containers:
- - name: kibana
- image: kibana:7.12.0
- imagePullPolicy: Always
- ports:
- - containerPort: 5601
- resources:
- requests:
- memory: 1024Mi
- cpu: 50m
- limits:
- memory: 1024Mi
- cpu: 1000m
- volumeMounts:
- - name: kibana-config
- mountPath: /usr/share/kibana/config/kibana.yml
- subPath: kibana.yml
- volumes:
- - name: kibana-config
- configMap:
- name: kibana-cm
- items:
- - key: "kibana.yml"
- path: "kibana.yml"
- ---
- apiVersion: v1
- kind: Service
- metadata:
- labels:
- app: kibana
- name: kibana
- namespace: halashow
- spec:
- type: NodePort
- ports:
- - name: kibana
- port: 5601
- nodePort: 30102
- protocol: TCP
- targetPort: 5601
- selector:
- app: kibana
-
- server.name: kibana
- server.host: "0"
- elasticsearch.hosts: [ "http://elasticsearch:9200" ]
- monitoring.ui.container.elasticsearch.enabled: true
- i18n.locale: "zh-CN" #kibana汉化

location / #必须是/否则代码不上
passpoxry http:ip:port
1.Kibana设置elasticsearch索引过期时间,到期自动删除
首先创建一个索引,索引必须名字后面*这样所有日期都能检索出。然后在创建一个十天的日志生命周期管理,在创建一个索引模板,索引模板可以检索出所有需要检索的日志,这个索引模板可以直接复制日志生命周期管理代码,也可之后日志生命周期里面加入这个索引模板。
2.创建一个索引生命周期策略
Index Management 索引管理
Index Lifecycle Policies 索引生命周期策略
Delete phase 删除阶段
3.创建一个索引模板用来管理所有索引
Index Templates 索引模板
- {
- "index": {
- "lifecycle": {
- "name": "gdnb-tes*"
- }
- }
- }
可将索引周期管理的代码复制过去,也可直接到索引周期管理里面选择gdnb-test-10day这个索引模板
3.将需要保存十天的日志索引模板加入刚创建的周期生命管理
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。