考题复习-1
自己当时考试前复习过的考题,做题笔记。
# 考题复习-1
# 1 RBAC
创建一个名为deployment-clusterrole且仅允许创建以下资源类型的新ClusterRole:
Deployment
StatefulSet
DaemonSet
在现有的namespace app-team1中创建一个名为cicd-token的新 ServiceAccount
限于namespace app-team1中,将新的ClusterRole deployment-clusterrole绑定到新的ServiceAccount cicd-token
kubectl create clusterrole deployment-clusterrole --verb=create --resource=deployments,statefulsets,daemonsets
kubectl create sa cicd-token -n app-team1
kubectl create rolebinding cicd-token-rolebinding --clusterrole=deployment-clusterrole --serviceaccount=app-team1:cicd-token
1
2
3
4
5
2
3
4
5
# 2 配置网络策略 NetworkPolicy(注意echo和my-app的名字好像换了)
- 在现有的namespace echo中创建一个名为allow-port-from-namespace的新NetworkPolicy
- 确保新的NetworkPolicy允许命名空间my-app的能访问my-app的所有pod的5768端口
- 进一步确保新的NetworkPolicy
- 不允许对没有在监听 端口5768的Pods的访问
- 不允许非来自 namespace my-app中的Pods的访问
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-port-from-namespace
namespace: echo
spec:
podSelector: {}
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
project: my-app
# name: my-app
ports:
- protocol: TCP
port: 5768
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 3 暴露服务
- 请重新配置现有的部署front-end以及添加名为http的端口规范来公开现有容器 nginx 的端口80/tcp。
- 创建一个名为front-end-svc的新服务,以公开容器端口http。
- 配置此服务,以通过在排定的节点上的 NodePort 来公开各个 Pods。
kubectl edit deploy front-end
ports:
- name: http
containerPort: 80
protocol: TCP
kubectl expose deploy front-end --name=front-end-svc --port=80 --target-port=80 --type=NodePort
1
2
3
4
5
6
7
2
3
4
5
6
7
# 4 创建Ingress
- 如下创建一个新的nginx Ingress资源:
- 名称: ping
- Namespace: ing-internal
- 使用服务端口 5678在路径 /hello 上公开服务 hello
- 可以使用以下命令检查服务 hello的可用性,该命令应返回 hello:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ping
namespace: ing-internal
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- http:
paths:
- path: /hello
pathType: Prefix
backend:
service:
name: hello
port:
number: 5678
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# 5 扩容deployment副本数量
kubectl scale deploy <名称> --replicas=<数量>
1
# 6 nodeSelector 将 Pod 分配给节点
apiVersion: v1
kind: Pod
metadata:
name: pod1
spec:
nodeSelector:
name: node1
containers:
- name: pod1
image: nginx
imagePullPolicy: IfNotPresent
1
2
3
4
5
6
7
8
9
10
11
2
3
4
5
6
7
8
9
10
11
# 7 升级
# 1.20.1 --> 1.21.1
yum install kubeadm-1.21.1-0 --disableexcludes=kubernetes
kubectl drain <master> --ignore-daemonsets
kubeadm upgrade plan
kubeadm upgrade apply v1.21.1
kubectl uncordon <master>
yum install kubelet-1.21.1-0 kubectl-1.21.1-0 --disableexcludes=kubernetes
systemctl daemon-reload
systemctl restart kubelet
1
2
3
4
5
6
7
8
9
10
11
12
13
14
2
3
4
5
6
7
8
9
10
11
12
13
14
# 8 备份还原
export ETCDCTL_API=3
etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/opt/KUIN00601/ca.crt --cert=/opt/KUIN00601/etcd-client.crt --key=/opt/KUIN00601/etcd-client.key snapshot save /var/lib/backup/etcd-snapshot.db
etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/opt/KUIN00601/ca.crt --cert=/opt/KUIN00601/etcd-client.crt --key=/opt/KUIN00601/etcd-client.key snapshot restore /data/backup/etcd-snapshot-previous.db
1
2
3
4
5
2
3
4
5
# 9 多容器的Pod(就两个好像)
apiVersion: v1
kind: Pod
metadata:
name: kucc8
spec:
containers:
- name: nginx
image: nginx
- name: redis
image: redis
1
2
3
4
5
6
7
8
9
10
2
3
4
5
6
7
8
9
10
# 10 创建PV,PVC,及对应的POD
- 配置 Pod 以使用 PersistentVolume 作为存储 | Kubernetes
- https://kubernetes.io/zh/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume
- 以这个链接为参考
# 11 排查集群中故障节点
ssh node01
sudo -i
# 检查kubelet服务
systemctl status kubelet
systemctl restart kubelet
systemctl enable kubelet
systemctl status kubelet
1
2
3
4
5
6
7
8
9
10
2
3
4
5
6
7
8
9
10
# 12 drain 节点
kubectl cordon node01
kubectl drain node01 --ignore-daemonsets --delete-emptydir-data --force #我试了下不加delete-emptydir-data会出错
1
2
2
# 13 找CPU消耗最高的Pod
kubectl top pods -l name=cpu-loader --sort-by=cpu -A
1
# 14 找日志
kubectl logs foo | grep "要搜的内容"
1
编辑 (opens new window)