此任务演示了Istio 的流量镜像功能。
流量镜像,也称为影子流量,是一个以尽可能低的风险为生产带来变化的强大的功能。镜像会将实时流量的副本发送到镜像服务。镜像流量发生在主服务的关键请求路径之外。
在此任务中,首先把流量全部路由到v1 版本的测试服务。然后,执行规则将一部分流量镜像到 v2 版本。
大多用于测试环境,一般会把生产的流量拿一部分过来给新服务或新版本,看看是否有异常。
测试环境准备
首先部署两个版本的 httpbin 服务,httpbin 服务已开启访问日志
##### httpbin-v1:
[root@node-2 istio-1.9.5]# cat <<EOF | kubectl apply -f -
> apiVersion: apps/v1
> kind: Deployment
> metadata:
> name: httpbin-v1
> spec:
> replicas: 1
> selector:
> matchLabels:
> app: httpbin
> version: v1
> template:
> metadata:
> labels:
> app: httpbin
> version: v1
> spec:
> containers:
> - image: docker.io/kennethreitz/httpbin
> imagePullPolicy: IfNotPresent
> name: httpbin
#参数开启访问日志,以便后面看流量是否进来了。
> command: ["gunicorn", "--access-logfile", "-", "-b", "0.0.0.0:80", "httpbin:app"]
> ports:
> - containerPort: 80
> EOF
deployment.apps/httpbin-v1 created
====上下二者主要是label-version不同。
##### httpbin-v2:
[root@node-2 istio-1.9.5]# cat <<EOF | kubectl apply -f -
> apiVersion: apps/v1
> kind: Deployment
> metadata:
> name: httpbin-v2
> spec:
> replicas: 1
> selector:
> matchLabels:
> app: httpbin
> version: v2
> template:
> metadata:
> labels:
> app: httpbin
> version: v2
> spec:
> containers:
> - image: docker.io/kennethreitz/httpbin
> imagePullPolicy: IfNotPresent
> name: httpbin
> command: ["gunicorn", "--access-logfile", "-", "-b", "0.0.0.0:80", "httpbin:app"]
> ports:
> - containerPort: 80
> EOF
deployment.apps/httpbin-v2 created
##### httpbin Kubernetes service:
[root@node-2 istio-1.9.5]# kubectl apply -f - <<EOF
> apiVersion: v1
> kind: Service
> metadata:
> name: httpbin
> labels:
> app: httpbin
> spec:
> ports:
> - name: http
> port: 8000
> targetPort: 80
> selector:
> app: httpbin
> EOF
service/httpbin created
#### 启动 sleep 服务(用于curl访问服务)
[root@node-2 istio-1.9.5]# cat <<EOF | kubectl apply -f -
> apiVersion: apps/v1
> kind: Deployment
> metadata:
> name: sleep
> spec:
> replicas: 1
> selector:
> matchLabels:
> app: sleep
> template:
> metadata:
> labels:
> app: sleep
> spec:
> containers:
> - name: sleep
> image: tutum/curl
#用户访问httpbin服务,永远处于睡着状态,因为它只是使用的媒介。
> command: ["/bin/sleep","infinity"]
> imagePullPolicy: IfNotPresent
> EOF
deployment.apps/sleep created
创建默认路由策略(证书到期会失败)
默认情况下,Kubernetes 在 httpbin 服务的两个版本之间进行负载均衡。在此步骤中会更改该行为,把所有流量都路由到 v1。
# 创建一个默认路由规则,将所有流量路由到服务的 v1:
[root@node-2 istio-1.9.5]# kubectl apply -f - <<EOF
> apiVersion: networking.istio.io/v1alpha3
> kind: VirtualService
> metadata:
> name: httpbin
> spec:
> hosts:
> - httpbin
> http:
> - route:
> - destination:
> host: httpbin
#设置只访问到v1版本,如果是service,那么100%访问到service。
> subset: v1
> weight: 100
> ---
> apiVersion: networking.istio.io/v1alpha3
#DestinationRule注明了v1和v2两个版本。
> kind: DestinationRule
> metadata:
> name: httpbin
> spec:
> host: httpbin
> subsets:
> - name: v1
> labels:
> version: v1
> - name: v2
> labels:
> version: v2
> EOF
virtualservice.networking.istio.io/httpbin created
destinationrule.networking.istio.io/httpbin created
[root@node-2 istio-1.9.5]# kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}
sleep-96c4ddd7f-lh662
[root@node-2 istio-1.9.5]# kubectl exec -it $SLEEP_POD -c sleep -- sh -c 'curl http://httpbin:8000/headers'
#证书过期问题
upstream connect error or disconnect/reset before headers. reset reason: connection failure, transport failure reason: TLS error: 268435581:SSL routines:OPENSSL_internal:CERTIFICATE_VERIFY_FAILED
[root@node-2 istio-1.9.5]# export SLEEP_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
[root@node-2 istio-1.9.5]# kubectl exec -it $SLEEP_POD -c sleep -- sh -c 'curl http://httpbin:8000/headers' | python -m json.tool
{
"headers": {
"Accept": "*/*",
"Content-Length": "0",
"Host": "httpbin:8000",
"User-Agent": "curl/7.35.0",
"X-B3-Parentspanid": "e77e930c951ca7c9",
"X-B3-Sampled": "1",
"X-B3-Spanid": "1330f1fc69bb1e71",
"X-B3-Traceid": "86255d55df81208fe77e930c951ca7c9",
"X-Envoy-Attempt-Count": "1",
"X-Forwarded-Client-Cert": "By=spiffe://cluster.local/ns/default/sa/default;Hash=29fa8de46db60bcdee745075f89438f77e9706d8a237b205cd3d92f8d9a2badc;Subject=\"\";URI=spiffe://cluster.local/ns/default/sa/default"
}
}
# 查看 httpbin 服务 v1 的日志
[root@node-2 istio-1.9.5]# export V1_POD=$(kubectl get pod -l app=httpbin,version=v1 -o jsonpath={.items..metadata.name})
[root@node-2 istio-1.9.5]# kubectl logs -f $V1_POD -c httpbin
[2022-12-23 14:34:45 +0000] [1] [INFO] Starting gunicorn 19.9.0
[2022-12-23 14:34:45 +0000] [1] [INFO] Listening at: http://0.0.0.0:80 (1)
[2022-12-23 14:34:45 +0000] [1] [INFO] Using worker: sync
[2022-12-23 14:34:45 +0000] [8] [INFO] Booting worker with pid: 8
127.0.0.1 - - [23/Dec/2022:14:57:28 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
# 查看 httpbin 服务 v2 的日志
[root@node-2 istio-1.9.5]# export V2_POD=$(kubectl get pod -l app=httpbin,version=v2 -o jsonpath={.items..metadata.name})
[root@node-2 istio-1.9.5]# kubectl logs -f $V2_POD -c httpbin
[2022-12-23 14:36:13 +0000] [1] [INFO] Starting gunicorn 19.9.0
[2022-12-23 14:36:13 +0000] [1] [INFO] Listening at: http://0.0.0.0:80 (1)
[2022-12-23 14:36:13 +0000] [1] [INFO] Using worker: sync
[2022-12-23 14:36:13 +0000] [8] [INFO] Booting worker with pid: 8
镜像流量到 v2
改变流量规则将流量镜像到 v2:
[root@node-2 istio-1.9.5]# kubectl apply -f - <<EOF
> apiVersion: networking.istio.io/v1alpha3
> kind: VirtualService
> metadata:
> name: httpbin
> spec:
> hosts:
> - httpbin
> http:
> - route:
> - destination:
> host: httpbin
> subset: v1
#这个路由规则发送 100% 流量到 v1。
> weight: 100
> mirror:
> host: httpbin
> subset: v2
#最后一段表示你将镜像流量到 httpbin:v2 服务。当流量被镜像时,请求将发送到镜像服务中,并在 headers 中的 Host/Authority 属性值上追加 -shadow。例如 cluster-1 变为 cluster-1-shadow。
> mirrorPercentage:
> value: 100
> EOF
virtualservice.networking.istio.io/httpbin configured
#在当前路由下,会把httpbin服务的v1版本的流量100%去镜像到httpbin的v2版本。
#此外,重点注意这些被镜像的流量是『 即发即弃』 的,就是说镜像请求的响应会被丢弃。
#正常的流量有去有回,镜像的流量有去无回(把流量丢过去之后,啥都不管了,这也是流量镜像的基本使用方式)。
##### 发送流量
[root@node-2 istio-1.9.5]# export SLEEP_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
#多执行几次
[root@node-2 istio-1.9.5]# kubectl exec -it $SLEEP_POD -c sleep -- sh -c 'curl http://httpbin:8000/headers' | python -m json.tool
#现在就可以看到 v1 和 v2 中都有了访问日志。v2 中的访问日志就是由镜像流量产生的,这些请求的实际目标是 v1。
[root@node-2 istio-1.9.5]# export V2_POD=$(kubectl get pod -l app=httpbin,version=v2 -o jsonpath={.items..metadata.name})
[root@node-2 istio-1.9.5]# kubectl logs -f $V2_POD -c httpbin
127.0.0.1 - - [23/Dec/2022:15:07:47 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:53 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:56 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:56 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:57 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:57 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:58 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:59 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:08:00 +0000] "GET /headers HTTP/1.1" 200 593 "-" "curl/7.35.0"
[root@node-1 ~]# export V1_POD=$(kubectl get pod -l app=httpbin,version=v1 -o jsonpath={.items..metadata.name})
[root@node-1 ~]# kubectl logs -f $V1_POD -c httpbin
127.0.0.1 - - [23/Dec/2022:15:07:47 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:53 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:56 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:56 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:57 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:57 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:58 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:07:59 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
127.0.0.1 - - [23/Dec/2022:15:08:00 +0000] "GET /headers HTTP/1.1" 200 553 "-" "curl/7.35.0"
环境清理
#删除规则
[root@node-2 istio-1.9.5]# kubectl delete virtualservice httpbin
virtualservice.networking.istio.io "httpbin" deleted
[root@node-2 istio-1.9.5]# kubectl delete destinationrule httpbin
destinationrule.networking.istio.io "httpbin" deleted
#关闭 httpbin 服务和客户端
[root@node-2 istio-1.9.5]# kubectl delete deploy httpbin-v1 httpbin-v2 sleep
deployment.apps "httpbin-v1" deleted
deployment.apps "httpbin-v2" deleted
deployment.apps "sleep" deleted
[root@node-2 istio-1.9.5]# kubectl delete svc httpbin
service "httpbin" deleted
标题:Kubernetes(十三)istio(13.10)流量镜像
作者:yazong
地址:https://blog.llyweb.com/articles/2022/12/24/1671816156134.html