附014.Kubernetes Prometheus+Grafana+EFK+Kibana+Glusterfs整合性方案
程序员文章站
2022-06-18 18:26:11
一 glusterfs存储集群部署 注意:以下为简略步骤,详情参考《附009.Kubernetes永久存储之GlusterFS独立部署》。 1.1 架构示意 略 1.2 相关规划 主机 IP 磁盘 备注 k8smaster01 172.24.8.71 —— Kubernetes Master节点 H ......
一 glusterfs存储集群部署
注意:以下为简略步骤,详情参考《附009.kubernetes永久存储之glusterfs独立部署》。
1.1 架构示意
略
1.2 相关规划
提示:本规划直接使用裸磁盘完成。
1.3 安装glusterfs
# yum -y install centos-release-gluster
# yum -y install glusterfs-server
# systemctl start glusterd
# systemctl enable glusterd
提示:建议所有节点安装。
1.4 添加信任池
[root@k8snode01 ~]# gluster peer probe k8snode02
[root@k8snode01 ~]# gluster peer probe k8snode03
[root@k8snode01 ~]# gluster peer status #查看信任池状态
[root@k8snode01 ~]# gluster pool list #查看信任池列表
提示:仅需要在glusterfs任一节点执行一次即可。
1.5 安装heketi
[root@k8smaster01 ~]# yum -y install heketi heketi-client
1.6 配置heketi
[root@k8smaster01 ~]# vi /etc/heketi/heketi.json
1 { 2 "_port_comment": "heketi server port number", 3 "port": "8080", 4 5 "_use_auth": "enable jwt authorization. please enable for deployment", 6 "use_auth": true, 7 8 "_jwt": "private keys for access", 9 "jwt": { 10 "_admin": "admin has access to all apis", 11 "admin": { 12 "key": "admin123" 13 }, 14 "_user": "user only has access to /volumes endpoint", 15 "user": { 16 "key": "xianghy" 17 } 18 }, 19 20 "_glusterfs_comment": "glusterfs configuration", 21 "glusterfs": { 22 "_executor_comment": [ 23 "execute plugin. possible choices: mock, ssh", 24 "mock: this setting is used for testing and development.", 25 " it will not send commands to any node.", 26 "ssh: this setting will notify heketi to ssh to the nodes.", 27 " it will need the values in sshexec to be configured.", 28 "kubernetes: communicate with glusterfs containers over", 29 " kubernetes exec api." 30 ], 31 "executor": "ssh", 32 33 "_sshexec_comment": "ssh username and private key file information", 34 "sshexec": { 35 "keyfile": "/etc/heketi/heketi_key", 36 "user": "root", 37 "port": "22", 38 "fstab": "/etc/fstab" 39 }, 40 41 "_db_comment": "database file name", 42 "db": "/var/lib/heketi/heketi.db", 43 44 "_loglevel_comment": [ 45 "set log level. choices are:", 46 " none, critical, error, warning, info, debug", 47 "default is warning" 48 ], 49 "loglevel" : "warning" 50 } 51 }
1.7 配置免秘钥
[root@k8smaster01 ~]# ssh-keygen -t rsa -q -f /etc/heketi/heketi_key -n ""
[root@k8smaster01 ~]# chown heketi:heketi /etc/heketi/heketi_key
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode01
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode02
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode03
1.8 启动heketi
[root@k8smaster01 ~]# systemctl enable heketi.service
[root@k8smaster01 ~]# systemctl start heketi.service
[root@k8smaster01 ~]# systemctl status heketi.service
[root@k8smaster01 ~]# curl http://localhost:8080/hello #测试访问
1.9 配置heketi拓扑
[root@k8smaster01 ~]# vi /etc/heketi/topology.json
1 { 2 "clusters": [ 3 { 4 "nodes": [ 5 { 6 "node": { 7 "hostnames": { 8 "manage": [ 9 "k8snode01" 10 ], 11 "storage": [ 12 "172.24.8.74" 13 ] 14 }, 15 "zone": 1 16 }, 17 "devices": [ 18 "/dev/sdb" 19 ] 20 }, 21 { 22 "node": { 23 "hostnames": { 24 "manage": [ 25 "k8snode02" 26 ], 27 "storage": [ 28 "172.24.8.75" 29 ] 30 }, 31 "zone": 1 32 }, 33 "devices": [ 34 "/dev/sdb" 35 ] 36 }, 37 { 38 "node": { 39 "hostnames": { 40 "manage": [ 41 "k8snode03" 42 ], 43 "storage": [ 44 "172.24.8.76" 45 ] 46 }, 47 "zone": 1 48 }, 49 "devices": [ 50 "/dev/sdb" 51 ] 52 } 53 ] 54 } 55 ] 56 }
[root@k8smaster01 ~]# echo "export heketi_cli_server=http://k8smaster01:8080" >> /etc/profile.d/heketi.sh
[root@k8smaster01 ~]# echo "alias heketi-cli='heketi-cli --user admin --secret admin123'" >> .bashrc
[root@k8smaster01 ~]# source /etc/profile.d/heketi.sh
[root@k8smaster01 ~]# source .bashrc
[root@k8smaster01 ~]# echo $heketi_cli_server
http://k8smaster01:8080
[root@k8smaster01 ~]# heketi-cli --server $heketi_cli_server --user admin --secret admin123 topology load --json=/etc/heketi/topology.json
1.10 集群管理及测试
[root@heketi ~]# heketi-cli cluster list #集群列表
[root@heketi ~]# heketi-cli node list #卷信息
[root@heketi ~]# heketi-cli volume list #卷信息
[root@k8snode01 ~]# gluster volume info #通过glusterfs节点查看
1.11 创建storageclass
[root@k8smaster01 study]# vi heketi-secret.yaml
1 apiversion: v1 2 kind: secret 3 metadata: 4 name: heketi-secret 5 namespace: heketi 6 data: 7 key: ywrtaw4xmjm= 8 type: kubernetes.io/glusterfs
[root@k8smaster01 study]# kubectl create ns heketi
[root@k8smaster01 study]# kubectl create -f heketi-secret.yaml #创建heketi
[root@k8smaster01 study]# kubectl get secrets -n heketi
[root@k8smaster01 study]# vim gluster-heketi-storageclass.yaml #正式创建storageclass
1 apiversion: storage.k8s.io/v1 2 kind: storageclass 3 metadata: 4 name: ghstorageclass 5 parameters: 6 resturl: "http://172.24.8.71:8080" 7 clusterid: "ad0f81f75f01d01ebd6a21834a2caa30" 8 restauthenabled: "true" 9 restuser: "admin" 10 secretname: "heketi-secret" 11 secretnamespace: "heketi" 12 volumetype: "replicate:3" 13 provisioner: kubernetes.io/glusterfs 14 reclaimpolicy: delete
[root@k8smaster01 study]# kubectl create -f gluster-heketi-storageclass.yaml
注意:storageclass资源创建后不可变更,如修改只能删除后重建。
[root@k8smaster01 heketi]# kubectl get storageclasses #查看确认
name provisioner age
gluster-heketi-storageclass kubernetes.io/glusterfs 85s
[root@k8smaster01 heketi]# kubectl describe storageclasses ghstorageclass
二 集群监控metrics
注意:以下为简略步骤,详情参考《049.集群管理-集群监控metrics》。
2.1 开启聚合层
开机聚合层功能,使用kubeadm默认已开启此功能,可如下查看验证。
[root@k8smaster01 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml
2.2 获取部署文件
[root@k8smaster01 ~]# git clone https://github.com/kubernetes-incubator/metrics-server.git
[root@k8smaster01 ~]# cd metrics-server/deploy/1.8+/
[root@k8smaster01 1.8+]# vi metrics-server-deployment.yaml
1 …… 2 image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6 #修改为国内源 3 command: 4 - /metrics-server 5 - --metric-resolution=30s 6 - --kubelet-insecure-tls 7 - --kubelet-preferred-address-types=internalip,hostname,internaldns,externaldns,externalip #添加如上command 8 ……
2.3 正式部署
[root@k8smaster01 1.8+]# kubectl apply -f .
[root@k8smaster01 1.8+]# kubectl -n kube-system get pods -l k8s-app=metrics-server
[root@k8smaster01 1.8+]# kubectl -n kube-system logs -l k8s-app=metrics-server -f #可查看部署日志
2.4 确认验证
[root@k8smaster01 ~]# kubectl top nodes
[root@k8smaster01 ~]# kubectl top pods --all-namespaces
三 prometheus部署
注意:以下为简略步骤,详情参考《050.集群管理-prometheus+grafana监控方案》。
3.1 获取部署文件
[root@k8smaster01 ~]# git clone https://github.com/prometheus/prometheus
3.2 创建命名空间
[root@k8smaster01 ~]# cd prometheus/documentation/examples/
[root@k8smaster01 examples]# vi monitor-namespace.yaml
1 apiversion: v1 2 kind: namespace 3 metadata: 4 name: monitoring
[root@k8smaster01 examples]# kubectl create -f monitor-namespace.yaml
3.3 创建rbac
[root@k8smaster01 examples]# vi rbac-setup.yml
1 apiversion: rbac.authorization.k8s.io/v1beta1 2 kind: clusterrole 3 metadata: 4 name: prometheus 5 rules: 6 - apigroups: [""] 7 resources: 8 - nodes 9 - nodes/proxy 10 - services 11 - endpoints 12 - pods 13 verbs: ["get", "list", "watch"] 14 - apigroups: 15 - extensions 16 resources: 17 - ingresses 18 verbs: ["get", "list", "watch"] 19 - nonresourceurls: ["/metrics"] 20 verbs: ["get"] 21 --- 22 apiversion: v1 23 kind: serviceaccount 24 metadata: 25 name: prometheus 26 namespace: monitoring #仅需修改命名空间 27 --- 28 apiversion: rbac.authorization.k8s.io/v1beta1 29 kind: clusterrolebinding 30 metadata: 31 name: prometheus 32 roleref: 33 apigroup: rbac.authorization.k8s.io 34 kind: clusterrole 35 name: prometheus 36 subjects: 37 - kind: serviceaccount 38 name: prometheus 39 namespace: monitoring #仅需修改命名空间
[root@k8smaster01 examples]# kubectl create -f rbac-setup.yml
3.4 创建prometheus configmap
[root@k8smaster01 examples]# cat prometheus-kubernetes.yml | grep -v ^$ | grep -v "#" >> prometheus-config.yaml
[root@k8smaster01 examples]# vi prometheus-config.yaml
1 apiversion: v1 2 kind: configmap 3 metadata: 4 name: prometheus-server-conf 5 labels: 6 name: prometheus-server-conf 7 namespace: monitoring #修改命名空间 8 ……
[root@k8smaster01 examples]# kubectl create -f prometheus-config.yaml
3.5 创建持久pvc
[root@k8smaster01 examples]# vi prometheus-pvc.yaml
1 apiversion: v1 2 kind: persistentvolumeclaim 3 metadata: 4 name: prometheus-pvc 5 namespace: monitoring 6 annotations: 7 volume.beta.kubernetes.io/storage-class: ghstorageclass 8 spec: 9 accessmodes: 10 - readwritemany 11 resources: 12 requests: 13 storage: 5gi
[root@k8smaster01 examples]# kubectl create -f prometheus-pvc.yaml
3.6 prometheus部署
[root@k8smaster01 examples]# vi prometheus-deployment.yml
1 apiversion: apps/v1beta2 2 kind: deployment 3 metadata: 4 labels: 5 name: prometheus-deployment 6 name: prometheus-server 7 namespace: monitoring 8 spec: 9 replicas: 1 10 selector: 11 matchlabels: 12 app: prometheus-server 13 template: 14 metadata: 15 labels: 16 app: prometheus-server 17 spec: 18 containers: 19 - name: prometheus-server 20 image: prom/prometheus:v2.14.0 21 command: 22 - "/bin/prometheus" 23 args: 24 - "--config.file=/etc/prometheus/prometheus.yml" 25 - "--storage.tsdb.path=/prometheus/" 26 - "--storage.tsdb.retention=72h" 27 ports: 28 - containerport: 9090 29 protocol: tcp 30 volumemounts: 31 - name: prometheus-config-volume 32 mountpath: /etc/prometheus/ 33 - name: prometheus-storage-volume 34 mountpath: /prometheus/ 35 serviceaccountname: prometheus 36 imagepullsecrets: 37 - name: regsecret 38 volumes: 39 - name: prometheus-config-volume 40 configmap: 41 defaultmode: 420 42 name: prometheus-server-conf 43 - name: prometheus-storage-volume 44 persistentvolumeclaim: 45 claimname: prometheus-pvc
[root@k8smaster01 examples]# kubectl create -f prometheus-deployment.yml
3.7 创建prometheus service
[root@k8smaster01 examples]# vi prometheus-service.yaml
1 apiversion: v1 2 kind: service 3 metadata: 4 labels: 5 app: prometheus-service 6 name: prometheus-service 7 namespace: monitoring 8 spec: 9 type: nodeport 10 selector: 11 app: prometheus-server 12 ports: 13 - port: 9090 14 targetport: 9090 15 nodeport: 30001
[root@k8smaster01 examples]# kubectl create -f prometheus-service.yaml
[root@k8smaster01 examples]# kubectl get all -n monitoring
3.8 确认验证prometheus
浏览器直接访问:http://172.24.8.100:30001/
四 部署grafana
注意:以下为简略步骤,详情参考《050.集群管理-prometheus+grafana监控方案》。
4.1 获取部署文件
[root@k8smaster01 ~]# git clone https://github.com/liukuan73/kubernetes-addons
[root@k8smaster01 ~]# cd /root/kubernetes-addons/monitor/prometheus+grafana
4.2 创建持久pvc
[root@k8smaster01 prometheus+grafana]# vi grafana-data-pvc.yaml
1 apiversion: v1 2 kind: persistentvolumeclaim 3 metadata: 4 name: grafana-data-pvc 5 namespace: monitoring 6 annotations: 7 volume.beta.kubernetes.io/storage-class: ghstorageclass 8 spec: 9 accessmodes: 10 - readwriteonce 11 resources: 12 requests: 13 storage: 5gi
[root@k8smaster01 prometheus+grafana]# kubectl create -f grafana-data-pvc.yaml
4.3 grafana部署
[root@k8smaster01 prometheus+grafana]# vi grafana.yaml
1 apiversion: extensions/v1beta1 2 kind: deployment 3 metadata: 4 name: monitoring-grafana 5 namespace: monitoring 6 spec: 7 replicas: 1 8 template: 9 metadata: 10 labels: 11 task: monitoring 12 k8s-app: grafana 13 spec: 14 containers: 15 - name: grafana 16 image: grafana/grafana:6.5.0 17 imagepullpolicy: ifnotpresent 18 ports: 19 - containerport: 3000 20 protocol: tcp 21 volumemounts: 22 - mountpath: /var/lib/grafana 23 name: grafana-storage 24 env: 25 - name: influxdb_host 26 value: monitoring-influxdb 27 - name: gf_server_http_port 28 value: "3000" 29 - name: gf_auth_basic_enabled 30 value: "false" 31 - name: gf_auth_anonymous_enabled 32 value: "true" 33 - name: gf_auth_anonymous_org_role 34 value: admin 35 - name: gf_server_root_url 36 value: / 37 readinessprobe: 38 httpget: 39 path: /login 40 port: 3000 41 volumes: 42 - name: grafana-storage 43 persistentvolumeclaim: 44 claimname: grafana-data-pvc 45 nodeselector: 46 node-role.kubernetes.io/master: "true" 47 tolerations: 48 - key: "node-role.kubernetes.io/master" 49 effect: "noschedule" 50 --- 51 apiversion: v1 52 kind: service 53 metadata: 54 labels: 55 kubernetes.io/cluster-service: 'true' 56 kubernetes.io/name: monitoring-grafana 57 annotations: 58 prometheus.io/scrape: 'true' 59 prometheus.io/tcp-probe: 'true' 60 prometheus.io/tcp-probe-port: '80' 61 name: monitoring-grafana 62 namespace: monitoring 63 spec: 64 type: nodeport 65 ports: 66 - port: 80 67 targetport: 3000 68 nodeport: 30002 69 selector: 70 k8s-app: grafana
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster01 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster02 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster03 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl create -f grafana.yaml
[root@k8smaster01 examples]# kubectl get all -n monitoring
4.4 确认验证prometheus
浏览器直接访问:http://172.24.8.100:30002/
4.4 grafana配置
- 添加数据源:略
- 创建用户:略
提示:所有grafana配置可配置参考:https://grafana.com/docs/grafana/latest/installation/configuration/。
4.5 查看监控
浏览器再次访问:http://172.24.8.100:30002/