前段时间线上的ceph存储出现了点问题,这里搭建个nfs存储用于应急使用,此文档用于记录搭建应用过程
NFS安装配置
服务端安装配置
安装前配置
1
2
3
4
5
6
|
#关闭防火墙
$ systemctl stop firewalld.service
$ systemctl disable firewalld.service
# 关闭selinux
$ setenforce 0
$ sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
|
安装nfs
1
2
3
4
5
6
7
8
9
10
11
12
13
|
#安装NFS
$ yum install -y rpcbind nfs-utils
# 建立共享文件夹
$ mkdir /home/kubedata
# 编辑共享文件夹权限,这里用*允许所有ip访问。支持多条
$ vim /etc/exports
/home/kubedata *(rw,no_root_squash,no_all_squash,sync)
# 启动nfs
$ systemctl start rpcbind
$ systemctl start nfs
$ systemctl enable rpcbind
$ systemctl enable nfs
|
参数解读
ro:目录只读
rw:目录读写
sync:将数据同步写入内存缓冲区与磁盘中,效率低,但可以保证数据的一致性
async:将数据先保存在内存缓冲区中,必要时才写入磁盘
all_squash:将远程访问的所有普通用户及所属组都映射为匿名用户或用户组(nfsnobody)
no_all_squash:与all_squash取反(默认设置)
root_squash:将root用户及所属组都映射为匿名用户或用户组(默认设置)
no_root_squash:与rootsquash取反
anonuid=xxx:将远程访问的所有用户都映射为匿名用户,并指定该用户为本地用户(UID=xxx)
anongid=xxx:将远程访问的所有用户组都映射为匿名用户组账户
客户端安装配置
在其他地方需要做挂载的可以做这步 ,如果只是用作为k8s做持久话存储用,不需要做这步
1
2
3
4
5
6
7
8
9
10
11
12
|
# 客户端一样需要下载安装nfs
$ yum install -y rpcbind nfs-utils
# 启动rpcbind
$ systemctl start rpcbind
$ systemctl enable rpcbind
# 检查nfs服务端是否有目录共享
$ showmount -e xxx.xxx.xxx.xxx
# 挂载
$ mkdir /testnfs
$ mount -t nfs xxx.xxx.xxx.xxx:/home/kubedata /testnfs
# 卸载挂载方式
$ umount /testnfs
|
k8s配置使用nfs作为持久化存储
创建namespace 用于做测试
1
|
$ kubectl create namespace nfs
|
RBAC
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
|
$ vim nfs-rbac.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: nfs
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
|
nfs-client
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
$ vim nfs-client.yaml
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: nfs-server-ip # 替换为你的NFS服务器
- name: NFS_PATH
value: /home/kubedata # 替换为你的NFS文件夹
volumes:
- name: nfs-client-root
nfs:
server: nfs-server-ip # 替换为你的NFS服务器
path: /home/kubedata # 替换为你的NFS文件夹
|
Storageclass
1
2
3
4
5
6
7
8
9
|
$ vim nfs-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true" # 这里注解设定此sc为默认sc
provisioner: fuseim.pri/ifs
|
部署
1
2
3
|
$ kubectl apply -f nfs-rbac.yaml -n nfs
$ kubectl apply -f nfs-client.yaml -n nfs
$ kubectl apply -f nfs-sc.yaml -n nfs
|
测试
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
# 创建pvc
$ vim test-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
storageClassName: managed-nfs-storage
accessModes:
- ReadWriteMany
resources:
requests:
storage: 500Mi
$ kubectl apply -f test-pvc.yaml
# 查看
kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-pvc Bound pvc-38106603-f029-11ea-9397-0050569026a0 500Mi RWX managed-nfs-storage 4m24s
|
Author
dylan
LastMod
2021-04-05
License
如需转载请注明文章作者和出处。谢谢!