Tobey's Notes

k8s-install

字数统计: 4.5k阅读时长: 24 min
2018/10/17 Share

1. 环境准备

机器名 资源配置 操作系统 角色 IP
k8s101 2/cpu+2G/mem CentOS7.4-x86_64 Master 172.17.8.101
k8s102 2/cpu+2G/mem CentOS7.4-x86_64 Node 172.17.8.102
k8s103 2/cpu+2G/mem CentOS7.4-x86_64 Node 172.17.8.103

2. 应用版本准备

docker版本 etcd版本 CoreDNS版本 kubernetes版本 flannel版本
docker-ce-18.03.1.ce-1.el7 etcd-v3.1.12 v1.1.3 v1.10.5 v0.10.0

3. 机器角色的分配

  • k8s101
    - kube-apiserver
    - kube-controller-manager
    - kube-scheduler
    - etcd
    
  • k8s102
    - kubelet
    - kube-proxy
    - docker
    - flannel
    - etcd
    
  • k8s103
    - kubelet
    - kube-proxy
    - docker
    - flannel
    - etcd
    

4. 所有机器安装集群依赖的软件包

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#清空防火墙规则
iptables -t nat -F && iptables -t nat -X && iptables -F && iptables -X && iptables -Z && iptables -t nat -Z
#k8s 1.8+要求关闭swap
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

#修改内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl -p

#kubernetes 1.8+要求关闭swap分区(master节点,建议node节点也关闭swap)
swapoff -a && sysctl -w vm.swappiness=0

#创建k8s组件安装目录
mkdir -p /opt/kubernetes/{bin,cfg,ssl}

#配置selinux
sed -i '/SELINUX/s/enforcing/disabled/' /etc/selinux/config
setenforce 0

#安装依赖包
yum -y install yum-utils device-mapper-persistent-data lvm2

#添加docker官方yum源
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

#安装docker-ce
yum install docker-ce
#启动docker
systemctl start docker

#配置docker加速和私有仓库
cat << EOF > /etc/docker/daemon.json
{
"registry-mirrors": ["https://registry.docker-cn.com"],
"insecure-registries": ["192.168.0.210:5000"] #私有registry的地址
}
EOF

#删除上面文件中的"私有registry注释"
sed -i 's/#.*//g' /etc/docker/daemon.json

#重启docker服务
systemctl restart docker

5.1 准备自签名证书(其中一台机器操作即可)

组件 使用到的证书
etcd ca.pem, kubernetes-key.pem, kubernetes.pem
flannel ca.pem, kubernetes-key.pem, kubernetes.pem
kube-apiserver ca.pem, kubernetes-key.pem, kubernetes.pem
kubelet ca.pem
kube-proxy ca.pem, kube-proxy.pem, kube-proxy-key.pem
kubectl ca.pem, admin.pem, admin-key.pem
kube-controller 当前需要和 kube-apiserver 部署在同一台机器上且使用非安全端口通信,故不需要证书
kube-schedule 当前需要和 kube-apiserver 部署在同一台机器上且使用非安全端口通信,故不需要证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#安装证书生成个工具
wget -c https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget -c https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget -c https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

#创建一个证书的目录ssl
mkdir ssl
cd ssl
#生成证书模板
cfssl print-defaults config > config.json
cfssl print-defaults csr > csr.json

5.2 修改刚生成的json文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
根据config.json创建ca-config.json:
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}


ca-csr.json:
#需要注意的是CN,O和OU的属性都不要修改
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}

kubernetes-csr.json:
{
"CN": "kubernetes",
"hosts": [
"etcd101",
"etcd102",
"etcd103",
"127.0.0.1",
"172.17.8.101",
"172.17.8.102",
"172.17.8.103",
"10.254.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}

admin-csr.json:
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}

kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}

5.3 生成相应的证书和key

1
2
3
4
5
6
7
8
9
10
11
#生成ca的证书和key
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

#生成服务器证书和key
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

#生成admin的证书和key
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#生成kube-proxy的证书和key
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

5.4 查看一下是否生成了相应的证书和key

1
2
3
4
5
6
7
8
9
10
11
12
#仅保留相应的pem证书
mkdir ../cert
mv *.pem ../cert

#校验证书
cfssl-certinfo -cert kubernetes.pem

#查看相应的证书是否生成
[root@k8s101 ssl]# cd ../cert/
[root@k8s101 cert]# ls
admin-key.pem admin.pem ca-key.pem ca.pem kube-proxy-key.pem kube-proxy.pem server-key.pem server.pem
[root@k8s101 cert]#

6.1 安装etcd组件(所有机器)

1
2
3
4
5
6
7
8
#下载etcd
wget -c https://github.com/coreos/etcd/releases/download/v3.1.12/etcd-v3.1.12-linux-amd64.tar.gz

#解压安装
tar xf etcd-v3.1.12-linux-amd64.tar.gz
cd etcd-v3.1.12-linux-amd64
mv etcd* /opt/kubernetes/bin/
mkdir /var/lib/etcd

6.2 分别配置每个机器的参数(k8s101为例)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
cat > /opt/kubernetes/cfg/etcd << EOF
#[Member]
ETCD_NAME="etcd101"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd/"
ETCD_LISTEN_PEER_URLS="https://172.17.8.101:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.17.8.101:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.17.8.101:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://172.17.8.101:2379"
ETCD_INITIAL_CLUSTER="etcd101=https://172.17.8.101:2380,etcd102=https://172.17.8.102:2380,etcd103=https://172.17.8.103:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

6.3 配置etcd的服务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#把下面这个粘贴到/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
wants=network-online.target


[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/opt/kubernetes/cfg/etcd
#User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) \
/opt/kubernetes/bin/etcd \
--name=\"${ETCD_NAME}\" \
--data-dir=\"${ETCD_DATA_DIR}\" \
--listen-peer-urls=\"${ETCD_LISTEN_PEER_URLS}\" \
--listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" \
--advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\" \
--initial-advertise-peer-urls=\"${ETCD_INITIAL_ADVERTISE_PEER_URLS}\" \
--initial-cluster=\"${ETCD_INITIAL_CLUSTER}\" \
--initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\" \
--initial-cluster-state=\"${ETCD_INITIAL_CLUSTER_STATE}\" \
--cert-file=/opt/kubernetes/ssl/kubernetes.pem\
--key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
--peer-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
--peer-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
--trusted-ca-file=/opt/kubernetes/ssl/ca.pem \
--peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem" \
--client-cert-auth="\true\" \
--peer-client-cert-auth=\"true\" \
--auto-tls=\"true\" \
--peer-auto-tls=\"true\""

Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

#使得service生效
systemctl daemon-reload

#把需要用到的证书拷贝到目的地
cp ca.pem /opt/kubernetes/ssl/
cp server.pem /opt/kubernetes/ssl/
cp server-key.pem /opt/kubernetes/ssl/

#启动服务
systemctl start etcd

#验证服务
/opt/kubernetes/bin/etcdctl --ca-file=/opt/kubernetes/ssl/ca.pem --cert-file=/opt/kubernetes/ssl/kubernetes.pem --key-file=/opt/kubernetes/ssl/kubernetes-key.pem --endpoints=https://172.17.8.101:2379,https://172.17.8.102:2379,https://172.17.8.103:2379 cluster-health

#也可以这样验证
#[root@k8s103 ssl]# curl -s --cacert /opt/kubernetes/ssl/ca.pem https://172.17.8.101:2379/v2/members | jq .
[root@k8s103 ssl]# curl -s http://127.0.0.1:2379/v2/members |jq .
{
"members": [
{
"id": "52a549447e771f7",
"name": "etcd102",
"peerURLs": [
"https://172.17.8.102:2380"
],
"clientURLs": [
"https://172.17.8.102:2379"
]
},
{
"id": "680948716edebf39",
"name": "etcd103",
"peerURLs": [
"https://172.17.8.103:2380"
],
"clientURLs": [
"https://172.17.8.103:2379"
]
},
{
"id": "bf65263f1f4624b4",
"name": "etcd101",
"peerURLs": [
"https://172.17.8.101:2380"
],
"clientURLs": [
"https://172.17.8.101:2379"
]
}
]
}

7. 下载flannel并安装应用到所有Node节点,假设k8s102

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#下载flannel-v0.10
wget -c https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz

#解压
tar xf flannel-v0.10.0-linux-amd64.tar.gz

#添加flannel的配置文件/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=https://172.17.8.101:2379,https://172.17.8.102:2379,https://172.17.8.103:2379 -etcd-cafile=/opt/kubernetes/ssl/ca.pem -etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem -etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem"

#创建一个供flanneld工作的目录
mkdir -p /opt/kubernetes/run/flanneld/

#把下面这段粘贴到/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flannel overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld -iface=eth1 --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /opt/kubernetes/run/flanneld/subnet.env
Restart=on-failure

[Install]
wantedBy=multi-user.target

#第一个节点启动flanneld之前需要先设置vxlan网络信息
/opt/kubernetes/bin/etcdctl --ca-file=/opt/kubernetes/ssl/ca.pem --cert-file=/opt/kubernetes/ssl/kubernetes.pem --key-file=/opt/kubernetes/ssl/kubernetes-key.pem --endpoints=https://172.17.8.101:2379,https://172.17.8.102:2379,https://172.17.8.103:2379 set /coreos.com/network/config '{"Network": "172.20.0.0/16", "Backend": {"Type": "vxlan"}}'
#尽量使用172开头的网段


#查看网络信息
/opt/kubernetes/bin/etcdctl --ca-file=/opt/kubernetes/ssl/ca.pem --cert-file=/opt/kubernetes/ssl/kubernetes.pem --key-file=/opt/kubernetes/ssl/kubernetes-key.pem --endpoints=https://172.17.8.101:2379,https://172.17.8.102:2379,https://172.17.8.103:2379 get /coreos.com/network/config

#查看生成的网段信息
/opt/kubernetes/bin/etcdctl --ca-file=/opt/kubernetes/ssl/ca.pem --cert-file=/opt/kubernetes/ssl/kubernetes.pem --key-file=/opt/kubernetes/ssl/kubernetes-key.pem --endpoints=https://172.17.8.101:2379,https://172.17.8.102:2379,https://172.17.8.103:2379 ls /coreos.com/network/subnets


#修改docker的服务,
#在[Service]中ExecStart上面添加一行为
EnvironmentFile=/opt/kubernetes/run/flanneld/subnet.env
#下面的ExecStart修改为
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS

#重启docker服务
systemctl daemon-reload
systemctl restart docker

#此时,每个节点分配的docker0的网段应该都是不一样的,ping一下另外一个节点docker0的地址测试

8.1 生成k8s TLS Boostrapping Token(在任意一个master上操作)

1
2
3
4
5
mkdir kubeconfig && cd kubeconfig
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

8.2 创建kubelet bootstrapping kubeconfig文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#安装kubectl客户端工具
wget -c https://dl.k8s.io/v1.10.5/kubernetes-client-linux-amd64.tar.gz
tar xf kubernetes-client-linux-amd64.tar.gz
cd kubernetes/client
mv kubectl /opt/kubernetes/bin/
export KUBE_APISERVER="https://172.17.8.101:6443"

#添加环境变量
vi ~/.bash_profile
PATH修改为 PATH=$PATH:$HOME/bin:/opt/kubernetes/bin
source ~/.bash_profile

#创建kubectl bootstrapping kubeconfig文件
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig

# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

8.3 创建kube-proxy bootstrapping kubeconfig文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

9.把刚才创建的kubeconfig文件拷贝到所有的node节点上,位置先随意

1
2
3
4
scp kube-proxy.kubeconfig node1_ip:~/
...
scp bootstrap.kubeconfig node1_ip:~/
...

10. 安装master节点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
rm -rf kubernetes
wget -c https://dl.k8s.io/v1.10.5/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin

#把相应的二进制文件放到指定位置
mv kube-apiserver /opt/kubernetes/bin/
mv kube-controller-manager kube-scheduler /opt/kubernetes/bin/

#把下面的内容保存为apiserver.sh
#!/bin/bash
MASTER_ADDRESS=${1:-"192.168.1.195"}
ETCD_SERVERS=${2:-"http://127.0.0.1:2379"}
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--insecure-bind-address=127.0.0.1 \\
--bind-address=${MASTER_ADDRESS} \\
--insecure-port=8080 \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.254.0.0/16 \\
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \\
--etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \\
--etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem"
EOF

cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver


#执行apiserver.sh
./apiserver.sh 172.17.8.101 https://172.17.8.101:2379,https://172.17.8.102:2379,https://172.17.8.103:2379

#拷贝刚才生成的token
cd ~/kubeconfig
cp token.csv /opt/kubernetes/cfg/

#启动kube-apiserver
systemctl start kube-apiserver

#把下面的内容保存为controller-manager.sh
#!/bin/bash
MASTER_ADDRESS=${1:-"127.0.0.1"}
cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.254.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem"
EOF

cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager

#安装和启动controller-manager
./controller-manager.sh 127.0.0.1

#验证controller-manager是否启动
systemctl status kube-controller-manager

#把下面的内容保存为scheduler.sh
#!/bin/bash
MASTER_ADDRESS=${1:-"127.0.0.1"}
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"
EOF

cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler

#安装并启动kube-scheduler
./scheduler.sh 127.0.0.1

#查看是否启动
systemctl status kube-scheduler

#验证服务是否都正常
kubectl get cs
#输入如下
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}
etcd-1 Healthy {"health": "true"}
#至此master配置完毕

kubectl get svc
#如果要修改网段的话,可以修改api-server和controller-manager相应的配置文件,重启服务然后删除kubernetes服务即可

#kubectl delete svc default

11. 安装配置node

  • 建议kubelet参数修改加上KUBELET_EXTRA_ARGS= “–fail-swap-on=false” 忽略swap的配置
  • kubelet v1.11.1 建议配置 KUBE_PROXY_MODE=ipvs
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    #在master上执行,创建角色绑定
    kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

    #下面都是在node节点上操作
    #把刚才传过来的kubeconfig文件都放在指定位置
    cd kubeconfig/
    cp *kubeconfig /opt/kubernetes/cfg/

    #下载client包
    wget -c https://dl.k8s.io/v1.10.5/kubernetes-node-linux-amd64.tar.gz
    tar xf kubernetes-node-linux-amd64.tar.gz
    cd kubernetes/node/bin/
    mv kubelet kube-proxy /opt/kubernetes/bin/

    #把下面的内容保存为kubelet.sh
    #!/bin/bash
    NODE_ADDRESS=${1:-"192.168.1.196"}
    DNS_SERVER_IP=${2:-"10.10.10.2"}
    cat <<EOF >/opt/kubernetes/cfg/kubelet
    KUBELET_OPTS="--logtostderr=true \\
    --v=4 \\
    --address=${NODE_ADDRESS} \\
    --hostname-override=${NODE_ADDRESS} \\
    --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
    --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
    --cert-dir=/opt/kubernetes/ssl \\
    --allow-privileged=true \\
    --cluster-dns=${DNS_SERVER_IP} \\
    --cluster-domain=cluster.local \\
    --fail-swap-on=false \\
    --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

    EOF

    cat <<EOF >/usr/lib/systemd/system/kubelet.service
    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service

    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kubelet
    ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
    Restart=on-failure
    KillMode=process

    [Install]
    WantedBy=multi-user.target
    EOF

    systemctl daemon-reload
    systemctl enable kubelet
    systemctl restart kubelet

    #执行脚本
    bash kubelet.sh 172.17.8.102 10.254.0.2

    #把下面的内容保存到proxy.sh
    #!/bin/bash
    NODE_ADDRESS=${1:-"192.168.1.200"}
    cat <<EOF >/opt/kubernetes/cfg/kube-proxy
    KUBE_PROXY_OPTS="--logtostderr=true \
    --v=4 \
    --hostname-override=${NODE_ADDRESS} \
    --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
    EOF

    cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
    [Unit]
    Description=Kubernetes Proxy
    After=network.target

    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
    ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target
    EOF

    systemctl daemon-reload
    systemctl enable kube-proxy
    systemctl restart kube-proxy

    #执行proxy.sh
    bash proxy.sh 172.17.8.102

    #然后在master节点上操作,看到是Pending状态
    kubectl get csr
    NAME AGE REQUESTOR CONDITION
    node-csr-6oXgQziElgXRb1eF0Q986YHP8tmmVcJVka1PD8Ox0l4 2m kubelet-bootstrap Pending

    #这时候在master上授权允许就可以了
    kubectl certificate approve node-csr-6oXgQziElgXRb1eF0Q986YHP8tmmVcJVka1PD8Ox0l4

    #再次查看状态就变过来了
    kubectl get csr
    NAME AGE REQUESTOR CONDITION
    node-csr-6oXgQziElgXRb1eF0Q986YHP8tmmVcJVka1PD8Ox0l4 4m kubelet-bootstrap Approved,Issued

    #查看节点
    kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    172.17.8.102 Ready <none> 1m v1.10.5

    #其他的客户端跟这个一样操作即可,等node节点状态变为Ready就可用了

    #测试一个实例
    kubectl run nginx --image=nginx --replicas=3
    kubectl scale --replicas=4 deployment/nginx
    kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort

12. 配置一个客户端kubectl

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#把admin.pem和admin-key.pem,ca.pem拷贝到你想配置的节点上
#在这个节点上下载kubectl
kubectl config set-cluster kubernetes --server=https://172.17.8.101:6443 --certificate-authority=ca.pem

kubectl config set-credentials cluster-admin --certificate-authority=ca.pem --client-key=admin-key.pem --client-certificate=admin.pem

kubectl config set-context default --cluster=kubernetes --user=cluster-admin

kubectl config use-context default

#测试
kubectl get cs
kubectl get csr

#其实会自动生成配置文件,路径为 ~/.kube/config

13.1 配置Dashboard相关yaml文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#把下面内容保存为dashboard-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
---

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

#把下面内容保存为dashboard-deployment.yaml
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: kubernetes-dashboard
containers:
- name: kubernetes-dashboard
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.7.1
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 9090
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"

#把下面的内容保存为dashboard-service.yaml
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
type: NodePort
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 80
targetPort: 9090

13.2 安装dashboard

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
kubectl create -f dashboard-rbac.yaml
kubectl create -f dashboard-deployment.yaml
kubectl create -f dashboard-service.yaml

#查看并测试
kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.254.116.162 <none> 80:31523/TCP 1d

#看到了31523了吧,执行下面的命令
kubectl get pod -o wide -n kube-system
NAME READY STATUS RESTARTS AGE IP NODE
kubernetes-dashboard-b9f5f9d87-jszl5 1/1 Running 0 1d 172.68.24.2 172.17.8.102

#看到了dashboard在172.17.8.102这个node上了吧
#访问http://172.17.8.102:31523 就可以打开这个dashboard

14.1 把下面内容保存为coredns.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local 10.254.0.0/16 {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
reload
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: index.tenxcloud.com/xiangyu123/coredns:1.1.3
imagePullPolicy: IfNotPresent
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.254.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

14.2 安装过程

1
kubectl create -f coredns.yaml
CATALOG
  1. 1. 1. 环境准备
  2. 2. 2. 应用版本准备
  3. 3. 3. 机器角色的分配
  4. 4. 4. 所有机器安装集群依赖的软件包
  5. 5. 5.1 准备自签名证书(其中一台机器操作即可)
  6. 6. 5.2 修改刚生成的json文件
  7. 7. 5.3 生成相应的证书和key
  8. 8. 5.4 查看一下是否生成了相应的证书和key
  9. 9. 6.1 安装etcd组件(所有机器)
  10. 10. 6.2 分别配置每个机器的参数(k8s101为例)
  11. 11. 6.3 配置etcd的服务
  12. 12. 7. 下载flannel并安装应用到所有Node节点,假设k8s102
  13. 13. 8.1 生成k8s TLS Boostrapping Token(在任意一个master上操作)
  14. 14. 8.2 创建kubelet bootstrapping kubeconfig文件
  15. 15. 8.3 创建kube-proxy bootstrapping kubeconfig文件
  16. 16. 9.把刚才创建的kubeconfig文件拷贝到所有的node节点上,位置先随意
  17. 17. 10. 安装master节点
  18. 18. 11. 安装配置node
  19. 19. 12. 配置一个客户端kubectl
  20. 20. 13.1 配置Dashboard相关yaml文件
  21. 21. 13.2 安装dashboard
  22. 22. 14.1 把下面内容保存为coredns.yaml
  23. 23. 14.2 安装过程