#要在master主节点执行,这里是在node-1执行。
这部分我们部署kubernetes的DNS插件 - CoreDNS。
总体来说,集群到这已经可用了。这个组件主要让集群内支持通过名字来访问。
在早期的版本中dns组件以pod形式独立运行,为集群提供dns服务,所有的pod都会请求同一个dns服务。
从kubernetes 1.18版本开始NodeLocal DnsCache功能进入stable状态。
NodeLocal DNSCache通过daemon-set的形式运行在每个工作节点,都运行pod的dns缓存的代理,从而避免了iptables的DNAT规则和内核connection tracking。极大提升了dns的性能。
官方文档地址:
coredns官方文档:”https://coredns.io/plugins/kubernetes/”
NodeLocal DNSCache:”https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/”
部署CoreDNS
# 设置 coredns 的 cluster-ip
#这个是service的IP地址段。K8S集群中的service的虚拟IP的地址段,内网中没有冲突可以。这里跟第四章的一致。
[root@node-1 ~]# COREDNS_CLUSTER_IP=10.233.0.10
[root@node-1 ~]# echo $COREDNS_CLUSTER_IP
10.233.0.10
# 下载coredns配置all-in-one(addons/coredns.yaml)
#在这个目录 https://git.imooc.com/coding-335/kubernetes-the-hard-way/src/v1.20.2/addons
[root@node-1 ~]# cat coredns.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
prefer_udp
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "coredns"
addonmanager.kubernetes.io/mode: Reconcile
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
spec:
selector:
k8s-app: kube-dns
clusterIP: ${COREDNS_CLUSTER_IP}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "coredns"
namespace: kube-system
labels:
k8s-app: "kube-dns"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "coredns"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 10%
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
priorityClassName: system-cluster-critical
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
k8s-app: kube-dns
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: In
values:
- ""
containers:
- name: coredns
image: "docker.io/coredns/coredns:1.6.7"
imagePullPolicy: IfNotPresent
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
# 替换cluster-ip
[root@node-1 ~]# sed -i.ori "s/\${COREDNS_CLUSTER_IP}/${COREDNS_CLUSTER_IP}/g" coredns.yaml
#这里grep一下image
[root@node-1 ~]# grep image coredns.yaml
image: "docker.io/coredns/coredns:1.6.7"
#在node-2/3下载镜像
[root@node-2/3 ~]# crictl pull docker.io/coredns/coredns:1.6.7
[root@node-2/3 ~]# crictl images
docker.io/coredns/coredns 1.6.7 67da37a9a360e 13.6MB
#校验IP是否替换
[root@node-1 ~]# grep 10.233.0.10 coredns.yaml
clusterIP: 10.233.0.10
# 创建 coredns
[root@node-1 ~]# kubectl apply -f coredns.yaml
configmap/coredns created
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
service/coredns created
deployment.apps/coredns created
部署NodeLocal DNSCache
部署NodeLocal DNSCache
# 设置 coredns 的 cluster-ip
[root@node-1 ~]# COREDNS_CLUSTER_IP=10.233.0.10
# 下载nodelocaldns配置all-in-one(addons/nodelocaldns.yaml)
#在这个目录 https://git.imooc.com/coding-335/kubernetes-the-hard-way/src/v1.20.2/addons
[root@node-1 ~]# cat nodelocaldns.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
cluster.local:53 {
errors
cache {
success 9984 30
denial 9984 5
}
reload
loop
bind 169.254.25.10
forward . ${COREDNS_CLUSTER_IP} {
force_tcp
}
prometheus :9253
health 169.254.25.10:9254
}
in-addr.arpa:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . ${COREDNS_CLUSTER_IP} {
force_tcp
}
prometheus :9253
}
ip6.arpa:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . ${COREDNS_CLUSTER_IP} {
force_tcp
}
prometheus :9253
}
.:53 {
errors
cache 30
reload
loop
bind 169.254.25.10
forward . /etc/resolv.conf
prometheus :9253
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nodelocaldns
namespace: kube-system
labels:
k8s-app: kube-dns
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: nodelocaldns
template:
metadata:
labels:
k8s-app: nodelocaldns
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9253'
spec:
priorityClassName: system-cluster-critical
serviceAccountName: nodelocaldns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.
tolerations:
- effect: NoSchedule
operator: "Exists"
- effect: NoExecute
operator: "Exists"
containers:
- name: node-cache
image: "registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/dns_k8s-dns-node-cache:1.16.0"
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-localip", "169.254.25.10", "-conf", "/etc/coredns/Corefile", "-upstreamsvc", "coredns" ]
securityContext:
privileged: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9253
name: metrics
protocol: TCP
livenessProbe:
httpGet:
host: 169.254.25.10
path: /health
port: 9254
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
host: 169.254.25.10
path: /health
port: 9254
scheme: HTTP
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 10
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: config-volume
configMap:
name: nodelocaldns
items:
- key: Corefile
path: Corefile
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
updateStrategy:
rollingUpdate:
maxUnavailable: 20%
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nodelocaldns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
# 替换cluster-ip
[root@node-1 ~]# sed -i.ori "s/\${COREDNS_CLUSTER_IP}/${COREDNS_CLUSTER_IP}/g" nodelocaldns.yaml
#这里面有替换后的国内镜像
[root@node-1 ~]# grep image nodelocaldns.yaml
image: "registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/dns_k8s-dns-node-cache:1.16.0"
#在node-2/3下载镜像
[root@node-2/3 ~]# crictl pull registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/dns_k8s-dns-node-cache:1.16.0
[root@node-2/3 ~]# crictl images
registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/dns_k8s-dns-node-cache 1.16.0 90f9d984ec9a3 56.1MB
#校验IP是否替换
[root@node-1 ~]# grep 10.233.0.10 nodelocaldns.yaml
forward . 10.233.0.10 {
forward . 10.233.0.10 {
forward . 10.233.0.10 {
# 创建 nodelocaldns
[root@node-1 ~]# kubectl apply -f nodelocaldns.yaml
configmap/nodelocaldns created
daemonset.apps/nodelocaldns created
serviceaccount/nodelocaldns created
检查服务启动
#查看POD是否启动
[root@node-1 ~]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-577f77cb5c-22q6z 1/1 Running 3 15h
calico-node-ncwxx 1/1 Running 2 15h
calico-node-vjbcn 1/1 Running 2 15h
coredns-84646c885d-fdxdw 1/1 Running 0 4m21s
coredns-84646c885d-gllvr 1/1 Running 0 4m21s
nginx-proxy-node-3 1/1 Running 3 11h
nodelocaldns-9727r 1/1 Running 0 12s
nodelocaldns-nlr7h 1/1 Running 0 12s
#看运行在哪个服务器上。
[root@node-1 ~]# kubectl get po -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-577f77cb5c-22q6z 1/1 Running 3 15h 10.200.247.3 node-2 <none> <none>
calico-node-ncwxx 1/1 Running 2 15h 172.16.1.23 node-3 <none> <none>
calico-node-vjbcn 1/1 Running 2 15h 172.16.1.22 node-2 <none> <none>
coredns-84646c885d-fdxdw 1/1 Running 0 5m51s 10.200.139.66 node-3 <none> <none>
coredns-84646c885d-gllvr 1/1 Running 0 5m51s 10.200.247.4 node-2 <none> <none>
nginx-proxy-node-3 1/1 Running 3 11h 172.16.1.23 node-3 <none> <none>
nodelocaldns-9727r 1/1 Running 0 102s 172.16.1.23 node-3 <none> <none>
nodelocaldns-nlr7h 1/1 Running 0 102s 172.16.1.22 node-2 <none> <none>
标题:Kubernetes(五)kubernetes-the-hard-way方式(5.8)DNS插件-CoreDNS
作者:yazong
地址:https://blog.llyweb.com/articles/2022/11/07/1667755235696.html