k8s搭建devops环境

主要内容:

  • 使用kubeadm搭建kubernetes环境
  • 安装flannel网络插件
  • 搭建nfs服务器
  • 安装nfs provisioner
  • 安装helm
  • 安装nginx ingress
  • 安装Jenkins
  • 安装gitlab
  • 安装harbor

具体步骤:

安装ansible、expect

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#批量授权脚本
cat plssh.sh
#!/bin/bash
# Author: Ropon
# Blog: https://www.ropon.top
declare -A CserverLst
CserverLst=([s1]="192.168.8.151" [s2]="192.168.8.152")
cport="22"
cpasswd="ropon.top"
ansible_host="/etc/ansible/hosts"
tmpsshfile="/tmp/ssh.exp"
flag="k8snode"

yum install -y ansible expect
echo '#!/usr/bin/expect
spawn ssh-keygen
expect {
"*.ssh/id_rsa*" {exp_send "\r";exp_continue}
"*passphrase*" {exp_send "\r";exp_continue}
"*again*" {exp_send "\r"}
}' > $tmpsshfile
expect $tmpsshfile
sleep 1
echo "[$flag]" >> $ansible_host

for key in ${!CserverLst[*]}; do
cat > $tmpsshfile << EOF
#!/usr/bin/expect
spawn ssh-copy-id ${CserverLst[$key]} -p ${cport}
expect {
"*yes/no*" {exp_send "yes\r";exp_continue}
"*password*" {exp_send "${cpasswd}\r";exp_continue}
}
EOF
expect $tmpsshfile
echo "${CserverLst[$key]} ansible_ssh_port=${cport}" >> $ansible_host
done
ansible $flag -m ping

kubernets

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
#配置:至少2台2核心4GB
#检查系统主机名
#master和node执行以下命令检查:
cat /etc/redhat-release
lscpugrep CPU
#修改主机名
hostnamectl set-hostname master01
hostnamectl set-hostname node01
hostnamectl set-hostname node02
#查看修改结果
hostnamectl status
#配置hosts文件
echo "127.0.0.1 $(hostname)" >> /etc/hosts
cat >> /etc/hosts << EOF
192.168.8.150 master01
192.168.8.151 node01
192.168.8.152 node02
EOF
#关闭防护墙
systemctl disable firewalld
systemctl stop firewalld
systemctl disable iptables
systemctl stop iptables
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
#禁用swap
swapoff -a
sed -i.bak '/swap/s/^/#/' /etc/fstab

#master node节点批量执行
echo "
#新增br_netfilter ipvs模块
#!/bin/bash
# Author: Ropon
# Blog: https://www.ropon.top
cat > /etc/sysconfig/modules/br_netfilter_ipvs.modules << EOF
modprobe br_netfilter
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/br_netfilter_ipvs.modules
cat > /etc/rc.sysinit << EOF
#!/bin/bash
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && \$file
done
EOF
#优化内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
lsmod grep br_netfilter" > netfilter.sh

ansible k8snode -m copy -a 'src=/root/netfilter.sh dest=/root/netfilter.sh mode=744'
ansible k8snode -m shell -a 'bash /root/netfilter.sh'
echo "#设置源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum clean all
yum makecache fast -y" > yum.sh

ansible k8snode -m copy -a 'src=/root/yum.sh dest=/root/yum.sh mode=744'
ansible k8snode -m shell -a 'bash /root/yum.sh'

#安装docker
echo "yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce-18.09.0 docker-ce-cli-18.09.0 containerd.io-1.2.13 -y
mkdir -p /etc/docker
#k8s推荐使用systemd,然而docker默认以ccgroups方式启动,故做以下修改
#k8s配置文件/var/lib/kubelet/kubeadm-flags.env
tee /etc/docker/daemon.json <<-'EOF'
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://xxx.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload
systemctl start docker
systemctl enable docker" > docker.sh

ansible k8snode -m copy -a 'src=/root/docker.sh dest=/root/docker.sh mode=744'
ansible k8snode -m shell -a 'bash /root/docker.sh'

#安装k8s
#master node
echo "yum install -y kubelet-1.16.9 kubeadm-1.16.9 kubectl-1.16.9
systemctl enable kubelet" > k8s.sh

ansible k8snode -m copy -a 'src=/root/k8s.sh dest=/root/k8s.sh mode=744'
ansible k8snode -m shell -a 'bash /root/k8s.sh'

#master初始化集群
kubeadm init --kubernetes-version=1.16.9 \
--apiserver-advertise-address=192.168.8.150 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
#安装flannel插件
kubectl apply -f kube-flannel.yml
#配置kubectl
mkdir -p /root/.kube
cp /etc/kubernetes/admin.conf /root/.kube/config
#node执行加入集群
kubeadm join 192.168.8.150:6443 --token xxxxxxxxxxxxxxxx \
--discovery-token-ca-cert-hash sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#命令补全
#创建别名
alias k=kubectl
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
cd ~;echo "source <(kubectl completion bash)" >> .bashrc
#配置node kubectl
mkdir -p /root/.kube
cp /etc/kubernetes/admin.conf /root/.kube/config
scp -P 22 /root/.kube/config node01:/root/.kube/config
scp -P 22 /root/.kube/config node02:/root/.kube/config
#测试
kubectl get node -A
#安装nfs
yum install nfs-utils rpcbind -y
systemctl enable rpcbind.service
systemctl enable nfs.service
mkdir /home/k8sdata
chown nfsnobody.nfsnobody /home/k8sdata
echo "/home/k8sdata 192.168.8.150(rw,sync,root_squash) 192.168.8.151(rw,sync,root_squash) 192.168.8.152(rw,sync,root_squash)">>/etc/exports
systemctl start rpcbind
systemctl start nfs
showmount -e localhost
#测试
showmount -e 192.168.8.150
mkdir /test
mount 192.168.8.150:/home/k8sdata /test/
cd /test/
echo "ok" > test.txt
#安装provisioner
kubectl -f rbac.yaml
kubectl -f storageclass-nfs.yaml
#注意修改nfs服务地址
kubectl -f deployment-nfs.yaml
#安装helm
wget http://panel.ropon.top/soft/helm-v3.2.4-linux-amd64.tar.gz
tar xf helm-v3.2.4-linux-amd64.tar.gz
mv linux-amd64/helm /usr/bin
helm version
rm -rf helm-v3.2.4-linux-amd64.tar.gz linux-amd64/
#添加国内源
helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
helm repo update
#安装ingress
#注意修改api-server - --service-node-port-range=1-65535
/etc/kubernetes/manifests/kube-apiserver.yaml
systemctl daemon-reload
systemctl restart kubelet
kubectl apply -f ingress.yaml
kubectl apply -f ingress-svc.yaml
#安装jenkins
helm search repo stable/jenkins
helm pull stable/jenkins
#修改values.yaml文件
Image: "jenkinsci/blueocean"
ImageTag: "latest"
ImagePullPolicy: "IfNotPresent"
HostName: jenkins.ropon.top
AdminPassword: xxxxxx
#修改Jenkins时间
JavaOpts: >
-Djava.awt.headless=true
-Dorg.apache.commons.jelly.tags.fmt.timeZone=Asia/Shanghai
-Dfile.encoding=UTF-8
ServiceType: ClusterIP
#LoadBalancerSourceRanges:
#- 0.0.0.0/0
#取消自动安装插件
InstallPlugins:
#- kubernetes:1.1
#- workflow-aggregator:2.5
#- workflow-job:2.15
#- credentials-binding:1.13
#- git:3.6.4
StorageClass: "managed-nfs-storage"
rbac:
install: true
helm install jenkins .
#安装jenkins插件
#更新源(web面板修改)
https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json
cd /var/jenkins_home/updates
sed -i 's/http:\/\/updates.jenkins-ci.org\/download/https:\/\/mirrors.tuna.tsinghua.edu.cn\/jenkins/g' default.json && sed -i 's/http:\/\/www.google.com/https:\/\/www.baidu.com/g' default.json
#手工安装以下插件
Chinese
pipeline
kubernets
gitlab
#安装gitlab
cat > gitlab-setup.sh << EOF
#!/bin/bash
mkdir -p /home/gitlab
docker run --detach \\
--hostname xxxx.ropon.top \\
--env GITLAB_OMNIBUS_CONFIG="external_url 'http://xxxx.ropon.top/'; gitlab_rails['gitlab_shell_ssh_port'] = 6022;" \\
--publish 443:443 --publish 80:80 --publish 6022:22 \\
--name gitlab \\
--restart always \\
--volume /home/gitlab/config:/etc/gitlab \\
--volume /home/gitlab/logs:/var/log/gitlab \\
--volume /home/gitlab/data:/var/opt/gitlab \\
--cpus 2 \\
--memory 2048MB \\
gitlab/gitlab-ce:11.2.2-ce.0
EOF
sh gitlab-setup.sh
#启动https
/etc/gitlab/gitlab.rb
nginx['redirect_http_to_https'] =true
nginx['ssl_certificate'] = "/etc/gitlab/ssl/server.crt"
nginx['ssl_certificate_key'] = "/etc/gitlab/ssl/server.key"
#安装harbor
wget http://panel.ropon.top/k8s/harbor-offline-installer-v1.8.2.tgz
tar xf harbor-offline-installer-v1.8.2.tgz
#修改harbor.yml文件
hostname: xxxx.ropon.top
#开启https
port: 443
certificate: /home/harbor/ropon.top.crt
private_key: /home/harbor/ropon.top.key
#下载docker-compose
wget http://panel.ropon.top/soft/docker-compose-Linux-x86_64
mv docker-compose-Linux-x86_64 /usr/bin/docker-compose
./prepare
./install.sh
#jenkins配置k8s
https://kubernetes.default
default
http://jenkins.default:8080
jenkins-agent.default:50000
#配置gitlab
#新建任务,进入流水线任务编辑,勾选Build when a change is pushed to GitLab
Admin area => Settings => Outbound requests 勾选
project => Settings => Integrations
#创建拉取镜像秘钥
kubectl create secret docker-registry hellogoregistrykey --docker-server=xxxx.ropon.top --docker-username=admin --docker-password=xxxxxx --docker-email=ropon@ropon.top
#之前使用iptables后启动ipvs
kubectl -n kube-system edit cm kube-proxy
mode: "ipvs"
#删除之前pod等待重建
kubectl get pod -n kube-system grep kube-proxy awk '{system("kubectl delete pod "$1" -n kube-system")}'

Pipeline

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def gitlabUrl = "gitlab.ropon.top"
def harborUrl = "harbor.ropon.top"
def GroupName = "testgo"
def projectName = "hellogo"
def imageTag = "latest"
def kubectlImage = "lachlanevenson/k8s-kubectl:v1.16.9"
def branchName = "master"
def gitAuthName = "gitlab-auth-user"
def harborAuthName = "harbor-auth-user"
def sendmsgAuthName = "sendmsg-auth-user"
def msgText = "构建完成,请测试"

podTemplate(cloud: 'kubernetes',containers: [
containerTemplate(name: 'docker', image: 'docker:stable', command: 'cat', ttyEnabled: true),
containerTemplate(name: 'kubectl', image: "${kubectlImage}", command: 'cat', ttyEnabled: true)
],
volumes: [
hostPathVolume(hostPath: '/var/run/docker.sock', mountPath: '/var/run/docker.sock'),
hostPathVolume(hostPath: '/root/.kube', mountPath: '/root/.kube')
]
)

{
node (POD_LABEL) {
stage('pull code') {
checkout([$class: 'GitSCM', branches: [[name: "*/${branchName}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${gitAuthName}", url: "http://${gitlabUrl}/${GroupName}/${projectName}.git"]]])
}
container('docker') {
stage('docker-build') {
withCredentials([usernamePassword(credentialsId: "${harborAuthName}", passwordVariable: 'password', usernameVariable: 'username')]) {
sh "docker login -u $username -p $password $harborUrl"
}
sh "docker build -t ${projectName}:${imageTag} ."
def imageName = "${projectName}:${imageTag}"
def remoteImageName = "${harborUrl}/${GroupName}/${imageName}"
sh "docker tag $imageName $remoteImageName"
sh "docker push $remoteImageName"
sh "docker rmi $imageName"
sh "docker rmi $remoteImageName"
}
}
container('kubectl') {
stage('k8s deploy') {
sh "kubectl --kubeconfig=/root/.kube/config apply -f deployment.yaml"
}
}
stage('send msg') {
withCredentials([usernamePassword(credentialsId: "${sendmsgAuthName}", passwordVariable: 'password', usernameVariable: 'username')]) {
sh "wget http://panel.ropon.top/soft/sendmsg && chmod +x sendmsg && ./sendmsg $password $username $msgText"
}
}
}
}

Docerfile

1
2
3
4
5
6
7
8
9
10
11
12
13
FROM golang:1.13-alpine3.10 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0 \
GOOS=linux \
GOARCH=amd64 \
GOPROXY=https://goproxy.cn

COPY . /app/
RUN cd /app && go build -o hellogo .

FROM scratch
COPY --from=builder /app/hellogo /hellogo
ENTRYPOINT ["/hellogo"]

Deployment

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
apiVersion: v1  #注意版本号
kind: Service
metadata:
name: myapp
spec:
type: ClusterIP
selector: #属性,选择器
app: hello
ports:
- name: http
port: 9000
targetPort: 9000
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: hellogo
spec:
rules:
- host: hellogo.ropon.top
http:
paths:
- backend:
serviceName: myapp
servicePort: 9000
---
apiVersion: apps/v1 #描述文件遵循extensions/v1beta1版本的Kubernetes API
kind: Deployment #创建资源类型为Deployment
metadata: #该资源元数据
name: test-hello #Deployment名称
spec: #Deployment的规格说明
selector:
matchLabels:
app: hello
replicas: 2 #指定副本数为3
template: #定义Pod的模板
metadata: #定义Pod的元数据
labels: #定义label(标签)
app: hello #label的key和value分别为app和nginx
spec: #Pod的规格说明
imagePullSecrets:
- name: hellogoregistrykey
containers:
- name: hellogo #容器的名称
image: harbor.ropon.top/testgo/hellogo:v4 #创建容器所使用的镜像
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9000

k8s v1.20.11

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#docker版本
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce-19.03.15 docker-ce-cli-19.03.15 containerd.io-1.2.13 -y
mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://xxxxx.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload
systemctl start docker
systemctl enable docker

#k8s版本
yum install -y kubelet-1.20.11 kubeadm-1.20.11 kubectl-1.20.11
systemctl enable kubelet

#其他同上