k8s 二进制部署
程序员文章站
2024-03-13 09:43:45
...
不确定文档是不是有漏的,大致是没问题的,小问题自己也能解决
一、环境准备
# all
mkdir /opt/kubernetes
mkdir /opt/kubernetes/{bin,cfg,ssl}
echo "PATH=$PATH:/opt/kubernetes/bin" >> /etc/profile
source /etc/profile
systemctl stop firewalld
systemctl disable firewalld
# 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
# 关闭交换分区(会导致k8s启动异常)
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
## all worker
# base tools
yum install -y yum-utils device-mapper-persistent-data lvm2
# docker-ce
yum remove docker docker-common docker-selinux docker-engine -y
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum makecache fast
yum -y install docker-ce-18.06.1.ce-3.el7
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": [ "https://registry.docker-cn.com"]
}
EOF
systemctl start docker
二、部署k8s
1、准备证书
# master
# 证书
curl -s -L -o /usr/local/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
curl -s -L -o /usr/local/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
curl -s -L -o /usr/local/bin/cfssl-certinfo https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
# mv /usr/local/bin/cfssl_linux-amd64 /usr/local/bin/cfssl
# mv /usr/local/bin/cfssljson_linux-amd64 /usr/local/bin/cfssljson
# mv /usr/local/bin/cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl*
# 新建CA配置文件
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
# 新建CA证书签发请求文件
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Shenzhen",
"ST": "Guangzhou",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca –
# 生成 ca.csr ca-key.pem ca.pem
# 生成server证书和私钥
# 注意:下面的IP是你自己的节点IP和你将要设置的集群的默认IP
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"10.1.7.1",
"192.168.10.101",
"192.168.10.102",
"192.168.10.103",
"192.168.10.104",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Shenzhen",
"ST": "Guangzhou",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
# 生成 server-key.pem 和server.pem
# 生成kube-proxy证书和私钥
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Shenzhen",
"ST": "Guangzhou",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
# 生成 kube-proxy-key.pem kube-proxy.pem
# 保留pem证书,将其他文件删除或移动
mkdir /ssl/config -p
ls | grep -v pem | xargs -i mv {} /ssl/config
mv *.pem /ssl/
# 拷贝所有证证书和生成文件到其他节点上(可以做免密,方便传输)
scp -r /ssl/ 192.168.10.102:/
scp -r /ssl/ 192.168.10.103:/
scp -r /ssl/ 192.168.10.104:/
2、部署etcd
# 所有节点部署etcd
tar -xvf etcd-v3.2.12-linux-amd64.tar.gz
cp etcd-v3.2.12-linux-amd64/etcd* /opt/kubernetes/bin/
ln -s /opt/kubernetes/bin/etcd* /usr/local/bin/
# 配置文件模板
# 创建etcd配置文件
cat > /opt/kubernetes/cfg/etcd <<EOF
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.10.101:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.10.101:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.10.101:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.10.101:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.10.101:2380,etcd02=https://192.168.10.102:2380,etcd03=https://192.168.10.103:2380,etcd04=https://192.168.10.104:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
# ETCD_NAME:指定etcd集群名称
# ETCD_DATA_DIR:etcd数据目录
# ETCD_LISTEN_PEER_URLS:监听的客户端地址
# ETCD_LISTEN_CLIENT_URLS:监听的数据端口
# ETCD_INITIAL_CLUSTER:集群节点信息
# ETCD_INITIAL_CLUSTER_TOKEN:认证的token,可自定义
# ETCD_INITIAL_CLUSTER_STATE:集群建立的状态
# 注意:这个配置文件是master节点上的,其他节点上注意修改IP地址
# 创建etcd启动配置文件,内容如下:
cat > /usr/lib/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/etcd
ExecStart=/opt/kubernetes/bin/etcd \\
--name=\${ETCD_NAME} \\
--data-dir=\${ETCD_DATA_DIR} \\
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \\
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \\
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \\
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \\
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \\
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER} \\
--initial-cluster-state=new \\
--cert-file=/opt/kubernetes/ssl/server.pem \\
--key-file=/opt/kubernetes/ssl/server-key.pem \\
--peer-cert-file=/opt/kubernetes/ssl/server.pem \\
--peer-key-file=/opt/kubernetes/ssl/server-key.pem \\
--trusted-ca-file=/opt/kubernetes/ssl/ca.pem \\
--peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
cp /ssl/*pem /opt/kubernetes/ssl/
# 注意:在第一个节点上启动的时候,因为别的节点都没有启动,所以会卡住,不用管,看到进程起来了就好。等其他节点都启动后,再重启一下这个节点就OK了。
systemctl daemon-reload
systemctl restart etcd.service
systemctl enable etcd.service
# 验证etcd集群
etcdctl -ca-file=/ssl/ca.pem --cert-file=/ssl/server.pem --key-file=/ssl/server-key.pem --endpoints="https://192.168.10.101:2379,https://192.168.10.102:2379,https://192.168.10.103:2379,https://192.168.10.104:2379" cluster-health
3、部署网络插件(flannel)
# all node
tar -zxf flannel-v0.13.0-linux-amd64.tar.gz
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
# 创建flannel配置文件
cat <<EOF >/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.10.101:2379,https://192.168.10.102:2379,https://192.168.10.103:2379,https://192.168.10.104:2379 \\
-etcd-cafile=/opt/kubernetes/ssl/ca.pem \\
-etcd-certfile=/opt/kubernetes/ssl/server.pem \\
-etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"
EOF
# 创建flannel服务启动文件
cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# 修改docker启动文件,使docker使用flannel的网络
cat <<EOF >/usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
# 指定etcd分配的子网,供flanneld使用
/opt/kubernetes/bin/etcdctl --ca-file=/ssl/ca.pem --cert-file=/ssl/server.pem --key-file=/ssl/server-key.pem --endpoints="https://192.168.10.101:2379,https://192.168.10.102:2379,https://192.168.10.103:2379,https://192.168.10.104:2379" \
set /coreos.com/network/config '{ "Network": "172.19.0.0/16", "Backend": {"Type": "vxlan"}}'
systemctl daemon-reload
systemctl start flanneld
systemctl enable flanneld
systemctl restart docker
# 查看分配的网络和类型
/opt/kubernetes/bin/etcdctl --ca-file=/ssl/ca.pem --cert-file=/ssl/server.pem --key-file=/ssl/server-key.pem --endpoints="https://192.168.10.101:2379,https://192.168.10.102:2379,https://192.168.10.103:2379,https://192.168.10.104:2379" get /coreos.com/network/config
# 查看subnets中保存的key
/opt/kubernetes/bin/etcdctl --ca-file=/ssl/ca.pem --cert-file=/ssl/server.pem --key-file=/ssl/server-key.pem --endpoints="https://192.168.10.101:2379,https://192.168.10.102:2379,https://192.168.10.103:2379,https://192.168.10.104:2379" ls /coreos.com/network/subnets
# 查看subnets的具体信息
/opt/kubernetes/bin/etcdctl --ca-file=/ssl/ca.pem --cert-file=/ssl/server.pem --key-file=/ssl/server-key.pem --endpoints="https://192.168.10.101:2379,https://192.168.10.102:2379,https://192.168.10.103:2379,https://192.168.10.104:2379" get /coreos.com/network/subnets/172.19.32.0-24
4、部署k8s组件
#################################################### master ###########################################################
# 在master上下载kubectl
tar -xvf kubernetes-server-linux-amd64.tar.gz
chmod +x kubernetes/server/bin/kubectl
mv kubernetes/server/bin/kubectl /opt/kubernetes/bin
# 创建 TLS Bootstrapping Token
# TLS Bootstrapping Token用来引导kubelet自动生成证书。
cd /ssl
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.10.101:6443 \
--kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=0137f25e5bccf0910c37c8857b492e60 \
--kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
# 创建kube-proxy kubeconfig文件
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.10.101:6443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
# 发送到所有的node
scp *.kubeconfig 192.168.10.102:/opt/kubernetes/cfg/
# 解压包并移动可执行文件到/opt/kubernetes/bin/目录下
cd ~/kubernetes/server/bin && mv kube-apiserver kube-controller-manager kube-scheduler /opt/kubernetes/bin/
chmod +x /opt/kubernetes/bin/{kube-apiserver,kube-controller-manager,kube-scheduler}
# 部署配置 kube-apiserver
MASTER_ADDRESS="192.168.10.101"
ETCD_SERVERS=https://192.168.10.101:2379,https://192.168.10.102:2379,https://192.168.10.103:2379,https://192.168.10.104:2379
# 生成kube-apiserver配置文件
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--insecure-bind-address=127.0.0.1 \\
--bind-address=${MASTER_ADDRESS} \\
--insecure-port=8080 \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.1.7.0/24 \\
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \\
--etcd-certfile=/opt/kubernetes/ssl/server.pem \\
--etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"
EOF
# 生成kube-apiserver启动文件
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# 复制token文件到k8s安装目录下的ssl目录中
cp /ssl/token.csv /opt/kubernetes/cfg/
# 启动kube-apiserver
systemctl daemon-reload
systemctl restart kube-apiserver.service
systemctl status kube-apiserver.service
systemctl enable kube-apiserver.service
# kube-controller-manager配置
# 生成kube-controller-manager配置文件
cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=127.0.0.1:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.1.7.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem"
EOF
# 生成kube-controller-manager启动文件
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# 启动kube-controller-manager
systemctl daemon-reload
systemctl restart kube-controller-manager.service
systemctl status kube-controller-manager.service
systemctl enable kube-controller-manager.service
# kube-scheduler配置
# 创建kube-scheduler配置文件
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=127.0.0.1:8080 \\
--leader-elect"
EOF
# 创建kube-scheduler启动文件
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# 启动kube-scheduler
systemctl daemon-reload
systemctl restart kube-scheduler.service
systemctl status kube-scheduler.service
systemctl enable kube-scheduler.service
# master创建账户
# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
#################################################### node ###########################################################
# 解压后把node节点组件移动到kubernetes的安装目录下
cd kubernetes/server/bin/
mv kubelet kube-proxy /opt/kubernetes/bin/
# 注意:DNS IP是一个预先指定的IP,我们将在后面搭建一个DNS服务,这个IP地址要在集群地址cluster ip 的范围内,NODE_ADDRESS的值是当前的node的ip
# 创建kubelet配置文件
cat <<EOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--address=192.168.10.103 \\
--hostname-override=192.168.10.103 \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--cert-dir=/opt/kubernetes/ssl \\
--cluster-dns=10.1.7.2 \\
--cluster-domain=cluster.local \\
--fail-swap-on=false \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
# 创建kubelet启动文件
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
# 启动kubelet
systemctl daemon-reload
systemctl start kubelet.service
systemctl status kubelet.service
systemctl enable kubelet.service
# 部署kube-proxy组件
# 创建kube-proxy配置文件
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=192.168.10.104 \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
EOF
# 创建kube-proxy启动程序
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# 启动kube-proxy
systemctl daemon-reload
systemctl start kube-proxy.service
systemctl status kube-proxy.service
systemctl enable kube-proxy.service
# master端检查node配置情况
kubectl get csr
# 依次同意各node节点的证书请求
kubectl certificate approve node-csr-Lt87X2osORREpvEfmY-uSVdvVuzbhL1kPCXH14uZojE
kubectl certificate approve node-csr-Xh91hV1o0unVkcOSS7G2i5hF7r0_uuuXucvTs86FM-E
kubectl certificate approve node-csr-e0fEqjObj1nu-8EgvHiBqH4nzRxjbjeWwJV2bgKjpv4
kubectl certificate approve node-csr-jf8UIWdwXqBPgf2ALNuuFV_IgX8uD0UC8V9MPnAk55s
# 创建一个pod测试
kubectl create deployment nginx --replicas=2 --image=nginx:latest --port=8080
# 查看信息
kubectl get svc,pods -o wide
上一篇: java 解析user-agent 信息