Skip to content


k8s_安装11_部署openresty

十一 部署openresty

准备镜像

docker search openresty 
#可以自选使用哪个镜像

#oprnresty 官方镜像
docker pull openresty/openresty 
docker images |grep openresty
openresty/openresty                                                            latest                eaeb31afac25   4 weeks ago     93.2MB
docker inspect openresty/openresty:latest
docker tag docker.io/openresty/openresty:latest repo.k8s.local/docker.io/openresty/openresty:latest
docker tag docker.io/openresty/openresty:latest repo.k8s.local/docker.io/openresty/openresty:1.19.9.1
docker push repo.k8s.local/docker.io/openresty/openresty:latest
docker push repo.k8s.local/docker.io/openresty/openresty:1.19.9.1

查看镜像

docker inspect openresty/openresty

                "resty_deb_version": "=1.19.9.1-1~bullseye1",
docker run -it openresty/openresty sh

apt-get update
apt-get install tree procps inetutils-ping net-tools

nginx -v
nginx version: openresty/1.19.9.1

ls /usr/local/openresty/nginx/conf

从镜像中复制出配制文件

docker run --name openresty -d openresty/openresty
docker cp openresty:/usr/local/openresty/nginx/conf/nginx.conf ./
docker stop openresty

从文件创建configmap

kubectl get cm -ntest
kubectl create -ntest configmap test-openresty-nginx-conf --from-file=nginx.conf=./nginx.conf
configmap/test-openresty-nginx-conf created

通过 edit 命令直接修改 configma

kubectl edit -ntest cm test-openresty-nginx-conf

通过 replace 替换

由于 configmap 我们创建通常都是基于文件创建,并不会编写 yaml 配置文件,因此修改时我们也是直接修改配置文件,而 replace 是没有 –from-file 参数的,因此无法实现基于源配置文件的替换,此时我们可以利用下方的命令实现
该命令的重点在于 –dry-run 参数,该参数的意思打印 yaml 文件,但不会将该文件发送给 apiserver,再结合 -oyaml 输出 yaml 文件就可以得到一个配置好但是没有发给 apiserver 的文件,然后再结合 replace 监听控制台输出得到 yaml 数据,通过 – 将当前输出变为当前命令的输入,即可实现替换

kubectl create -ntest cm  test-openresty-nginx-conf --from-file=nginx.conf --dry-run=client -oyaml | kubectl -ntest replace -f-
configmap/test-openresty-nginx-conf replaced

删除

kubectl delete cm test-openresty-nginx-conf

configmap不可修改

在 configMap 中加入 immutable: true

kubectl edit -ntest cm test-openresty-nginx-conf

准备yaml文件

#test-openresty-deploy.yaml 文件
#bitnami 公司镜像,使用宿主的目录,默认以1001 用户运行.
#指定以500 www 用户运行,在spec:添加
      securityContext:
        runAsUser: 500
#充许绑定1024下端口
      containers:
        capabilities:
          add:
          - NET_BIND_SERVICE
#挂载宿主时区文件,nginx目录
cat > test-openresty-deploy.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: openresty
  namespace: test
spec:
  selector :
    matchLabels:
      app: openresty
  replicas: 1
  template:
    metadata:
      labels:
        app: openresty
    spec:
      dnsPolicy: ClusterFirstWithHostNet
      securityContext:
        runAsUser: 500
      containers:
      - name: openresty
        resources:
          limits:
            cpu: "20m"
            memory: 20Mi
          requests:
            cpu: 10m
            memory: 10Mi
        env:
        - name: TZ
          value: Asia/Shanghai
        image: repo.k8s.local/docker.io/openresty/openresty:1.19.9.1
        #command: ["/bin/bash", "-ce", "tail -f /dev/null"]
        #command: ["/opt/bitnami/scripts/openresty/run.sh"]
        ports:
        - containerPort: 80
        volumeMounts:
        #- name: vol-opresty-conf
        #  mountPath: /opt/bitnami/openresty/nginx/conf/
        - name: nginx-conf   # 数据卷名称
          mountPath: /usr/local/openresty/nginx/conf/nginx.conf      # 挂载的路径
          subPath: etc/nginx/nginx.conf         # 与 volumes.[0].items.path 相同
        - name: vol-opresty-html
          mountPath: "/usr/share/nginx/html/"
        - name: vol-opresty-log
          mountPath: "/var/log/nginx/"
      volumes:
      - name: nginx-conf  # 数据卷名称
        configMap:        # 数据卷类型为 configMap
          name: test-openresty-nginx-conf    # configMap 名字
          items:       # 要将configMap中的哪些数据挂载进来
          - key: nginx.conf    # configMap 中的文件名
            path: etc/nginx/nginx.conf           # subPath 路径
      #- name: vol-opresty-conf
      #  hostPath:
      #    path: /nginx/openresty/conf/
      #    type: DirectoryOrCreate
      - name: vol-opresty-html
        hostPath:
          path: /nginx/html/
          type: DirectoryOrCreate
      - name: vol-opresty-log
        hostPath:
          path: /nginx/logs/
          type: DirectoryOrCreate
      affinity: #方式四 尽量分配到不同的node上
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/arch
                operator: In
                values:
                - amd64
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
            - weight: 100
              podAffinityTerm:
                labelSelector:
                  matchExpressions:
                    - key: app
                      operator: In
                      values:
                        - openresty
                topologyKey: kubernetes.io/hostname
EOF

#openresty nodeport服务
test-openresty-svc-nodeport.yaml
cat > test-openresty-svc-nodeport.yaml <<EOF
apiVersion: v1
kind: Service
metadata:
  name: svc-openresty
  namespace: test
spec:
  ports:
  - {name: http, nodePort: 32080, port: 31080, protocol: TCP, targetPort: 8089}
  selector: {app: openresty}
  type: NodePort
EOF
#ingress关联
test-openresty-ingress.yaml
cat > test-openresty-ingress.yaml  << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-svc-openresty
  annotations:
    kubernetes.io/ingress.class: "nginx"
  namespace: test
spec:
  rules:
  - http:
      paths:
      - path: /showvar
        pathType: Prefix
        backend:
          service:
            name: svc-openresty
            port:
              number: 31080
EOF

错误:
server-snippet annotation cannot be used. Snippet directives are disabled by the Ingress administrator

kind: ConfigMap中打开
allow-snippet-annotations: "true"

cat > ingress-nginx-ConfigMap.yaml <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
  labels:
    app.kubernetes.io/component: controller
    app.kubernetes.io/instance: ingress-nginx
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
    app.kubernetes.io/version: 1.9.3
  name: ingress-nginx-controller
  namespace: ingress-nginx
data:
  allow-snippet-annotations: "true"
  worker-processes: "auto" #worker_processes
  server-name-hash-bucket-size: "128" #server_names_hash_bucket_size
  variables-hash-bucket-size: "256" #variables_hash_bucket_size
  variables-hash-max-size: "2048" #variables_hash_max_size
  client-header-buffer-size: "32k" #client_header_buffer_size
  proxy-body-size: "8m" #client_max_body_size
  large-client-header-buffers: "4 512k" #large_client_header_buffers
  client-body-buffer-size: "512k" #client_body_buffer_size
  proxy-connect-timeout : "5" #proxy_connect_timeout
  proxy-read-timeout: "60" #proxy_read_timeout
  proxy-send-timeout: "5" #proxy_send_timeout
  proxy-buffer-size: "32k" #proxy_buffer_size
  proxy-buffers-number: "8 32k" #proxy_buffers
  keep-alive: "60" #keepalive_timeout
  enable-real-ip: "true" 
  use-forwarded-headers: "true"
  forwarded-for-header: "ns_clientip" #real_ip_header
  compute-full-forwarded-for: "true"
  enable-underscores-in-headers: "true" #underscores_in_headers on
  proxy-real-ip-cidr: 192.168.0.0/16,10.244.0.0/16  #set_real_ip_from
  access-log-path: "/var/log/nginx/access_$hostname.log"
  error-log-path: "/var/log/nginx/error.log"
  #log-format-escape-json: "true"
  log-format-upstream: '{"timestamp": "$time_iso8601", "requestID": "$req_id", "proxyUpstreamName":
    "$proxy_upstream_name","hostname": "$hostname","host": "$host","body_bytes_sent": "$body_bytes_sent","proxyAlternativeUpstreamName": "$proxy_alternative_upstream_name","upstreamStatus":
    "$upstream_status", "geoip_country_code": "$geoip_country_code","upstreamAddr": "$upstream_addr","request_time":
    "$request_time","httpRequest":{ "remoteIp": "$remote_addr","realIp": "$realip_remote_addr","requestMethod": "$request_method", "requestUrl":
    "$request_uri", "status": $status,"requestSize": "$request_length", "responseSize":
    "$upstream_response_length", "userAgent": "$http_user_agent",
    "referer": "$http_referer","x-forward-for":"$proxy_add_x_forwarded_for","latency": "$upstream_response_time", "protocol":"$server_protocol"}}'
EOF

kubectl delete -f ingress-nginx-ConfigMap.yaml
kubectl apply -f ingress-nginx-ConfigMap.yaml
kubectl edit configmap -n ingress-nginx ingress-nginx-controller
#ingress关联server-snippet
#realip 会在server 段对全域名生效
#ip 白名单whitelist-source-range 会在location = /showvar 生效,使用remoteaddr判定,需要全域白名单时才用.allow 223.2.2.0/24;deny all;

test-openresty-ingress-snippet.yaml
cat > test-openresty-ingress-snippet.yaml  << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-svc-openresty
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/server-snippet: |
      underscores_in_headers on;
      set_real_ip_from 10.244.0.0/16;
      set_real_ip_from 192.168.0.0/16;
      real_ip_header ns_clientip;
      #real_ip_recursive on;
    nginx.ingress.kubernetes.io/whitelist-source-range: 127.0.0.1/32,192.168.0.0/16,10.244.0.0/16,223.2.2.0/24
  namespace: test
spec:
  rules:
  - http:
      paths:
      - path: /showvar
        pathType: Prefix
        backend:
          service:
            name: svc-openresty
            port:
              number: 31080
EOF

kubectl apply -f test-openresty-ingress-snippet.yaml

在工作node上创建挂载目录

mkdir -p /nginx/openresty/conf
mkdir -p /nginx/{html,logs}
chown -R www:website /nginx/

先启运一下pod

kubectl apply -f test-openresty-deploy.yaml

#查看pod并得到name
kubectl get pods -o wide -n test
NAME                           READY   STATUS    RESTARTS        AGE     IP             NODE               NOMINATED NODE   READINESS GATES
nginx-deploy-7c9674d99-v92pd   1/1     Running   0               4d17h   10.244.1.97    node01.k8s.local   <none>           <none>
openresty-fdc45bdbc-jh67k      1/1     Running   0               8m41s   10.244.2.59    node02.k8s.local   <none>           <none>

将pod内的配制文件复制到master,并传递给工作node

kubectl -n test logs -f openresty-76cf797cfc-gccsl

cd /nginx/openresty/conf
#以下三种方式都行
kubectl cp test/openresty-76cf797cfc-gccsl:/opt/bitnami/openresty/nginx/conf /nginx/openresty/conf

kubectl cp test/openresty-76cf797cfc-gccsl:/opt/bitnami/openresty/nginx/conf ./

kubectl exec "openresty-76cf797cfc-gccsl" -n "test" -- tar cf - "/opt/bitnami/openresty/nginx/conf" | tar xf - 

chown -R www:website .

#从master传到node2
scp -r * [email protected]:/nginx/openresty/conf/

include "/opt/bitnami/openresty/nginx/conf/server_blocks/*.conf";

在node2上 创建web主机文件

对应pod中 /opt/bitnami/openresty/nginx/conf/server_blocks/

vi /nginx/openresty/nginx/conf/server_blocks/default.conf 
server {
    listen       8089;
    server_name  localhost;

    access_log  /var/log/nginx/host.access.log  main;

    location / {
        root   /usr/share/nginx/html;
        index  index.html index.htm;
    }

    location /showvar {
        default_type text/plain;           
        echo time_local: $time_local;
        echo hostname: $hostname;
        echo server_addr: $server_addr;
        echo server_port: $server_port;
        echo host: $host;
        echo scheme: $scheme;
        echo http_host: $http_host;
        echo uri: $uri;
        echo remote_addr: $remote_addr;
        echo remote_port: $remote_port;
        echo remote_user: $remote_user;
        echo realip_remote_addr: $realip_remote_addr;
        echo realip_remote_port: $realip_remote_port;
        echo http_ns_clientip: $http_ns_clientip;
        echo http_user_agent: $http_user_agent;
        echo http_x_forwarded_for: $http_x_forwarded_for;
        echo proxy_add_x_forwarded_for: $proxy_add_x_forwarded_for;
        echo X-Request-ID: $http_x_request_id;
        echo X-Real-IP: $http_x_real_ip;
        echo X-Forwarded-Host: $http_x_forwarded_host;
    }

    #error_page  404              /404.html;

    # redirect server error pages to the static page /50x.html
    #
    error_page   500 502 503 504  /50x.html;
    location = /50x.html {
        root   /usr/share/nginx/html;
    }

    # proxy the PHP scripts to Apache listening on 127.0.0.1:80
    #
    #location ~ \.php$ {
    #    proxy_pass   http://127.0.0.1;
    #}

    # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
    #
    #location ~ \.php$ {
    #    root           html;
    #    fastcgi_pass   127.0.0.1:9000;
    #    fastcgi_index  index.php;
    #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
    #    include        fastcgi_params;
    #}

    # deny access to .htaccess files, if Apache's document root
    # concurs with nginx's one
    #
    #location ~ /\.ht {
    #    deny  all;
    #}
}

cat > /nginx/openresty/nginx/conf/server_blocks/ngxrealip.conf <<EOF
    underscores_in_headers on;
    #ignore_invalid_headers off;
    set_real_ip_from   10.244.0.0/16;
    real_ip_header    ns_clientip;

EOF

修改test-openresty-deploy.yaml 中openresty的配制目录后重启pod

cat > test-openresty-deploy.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: openresty
  namespace: test
spec:
  selector :
    matchLabels:
      app: openresty
  replicas: 1
  template:
    metadata:
      labels:
        app: openresty
    spec:
      dnsPolicy: ClusterFirstWithHostNet
      securityContext:
        runAsNonRoot: true
        runAsUser: 500
        #runAsGroup: 500
      nodeName: 
        node02.k8s.local
      containers:
      - name: openresty
        image: repo.k8s.local/docker.io/bitnami/openresty:latest
        #command: ["/bin/bash", "-ce", "tail -f /dev/null"]
        command: ["/opt/bitnami/scripts/openresty/run.sh"]
        ports:
        - containerPort: 80
        volumeMounts:
        - name: timezone
          mountPath: /etc/localtime  
        - name: vol-opresty-conf
          mountPath: /opt/bitnami/openresty/nginx/conf/
          readOnly: true
        - name: vol-opresty-html
          mountPath: "/usr/share/nginx/html/"
        - name: vol-opresty-log
          mountPath: "/var/log/nginx/"
      volumes:
      - name: timezone       
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai  
      - name: vol-opresty-conf
        hostPath:
          path: /nginx/openresty/nginx/conf/
          type: DirectoryOrCreate
      - name: vol-opresty-html
        hostPath:
          path: /nginx/html/
          type: DirectoryOrCreate
      - name: vol-opresty-log
        hostPath:
          path: /nginx/logs/
          type: DirectoryOrCreate
      #nodeSelector:
        #ingresstype: ingress-nginx
EOF
#创建/关闭 test-openresty-deploy.yaml 
kubectl apply -f test-openresty-deploy.yaml
kubectl delete -f test-openresty-deploy.yaml

#创建/关闭 openresty noddeport 服务
kubectl apply -f test-openresty-svc-nodeport.yaml
kubectl delete -f test-openresty-svc-nodeport.yaml

#创建/关闭 openresty ingress 关联服务
kubectl apply -f test-openresty-ingress.yaml
kubectl delete -f test-openresty-ingress.yaml

#查看pod
kubectl get pods -o wide -n test
NAME                            READY   STATUS    RESTARTS     AGE   IP            NODE               NOMINATED NODE   READINESS GATES
openresty-b6d7798f8-h47xj       1/1     Running   0            64m   10.244.2.24   node02.k8s.local   <none>           <none>

#查看service
kubectl get service -n test
NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)           AGE
svc-openresty    NodePort    10.96.30.145    <none>        31080:32080/TCP   3d6h

#查看 ingress 关联
kubectl get  Ingress -n test
NAME                     CLASS    HOSTS   ADDRESS     PORTS   AGE
ingress-svc-openresty    <none>   *       localhost   80      2m22s
ingress-svc-test-nginx   <none>   *       localhost   80      3d22h

#查看ingress
kubectl get svc -n ingress-nginx
NAME                                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             NodePort    10.96.111.201   <none>        80:30080/TCP,443:30443/TCP   4d2h
ingress-nginx-controller-admission   ClusterIP   10.96.144.105   <none>        443/TCP                      4d2h

#查看详缰
kubectl -n test describe pod openresty-b6d7798f8-h47xj
kubectl -n test logs -f openresty-b6d7798f8-h47xj

kubectl -n test describe pod openresty-59449454db-6knwv 
kubectl -n test logs -f  openresty-59449454db-mj74d
#进入容器
kubectl exec -it pod/openresty-b6d7798f8-h47xj -n test -- /bin/sh
kubectl exec -it pod/openresty-fdc45bdbc-jh67k -n test -- /bin/sh

#test-openresty-deploy.yaml 中已将pod内openresty 的配置,web根目录,日志都已映设到宿主node2上.
#可以在宿主上操作相应文件,创建一个输出node名的首页
echo `hostname` >  /nginx/html/index.html

#不进入容器重启服务
kubectl exec -it pod/openresty-b6d7798f8-h47xj -n test -- /bin/sh -c '/opt/bitnami/openresty/bin/openresty -t'
kubectl exec -it pod/openresty-b6d7798f8-h47xj -n test -- /bin/sh -c '/opt/bitnami/openresty/bin/openresty -s reload'
kubectl exec -it pod/openresty-6b59dd984d-bj2xt -n test -- /bin/sh -c '/opt/bitnami/openresty/bin/openresty -t'
kubectl exec -it pod/openresty-6b59dd984d-bj2xt -n test -- /bin/sh -c '/opt/bitnami/openresty/bin/openresty -s reload'
kubectl exec -it pod/openresty-6b59dd984d-bj2xt -n test -- /bin/sh -c 'ls /opt/bitnami/openresty/nginx/conf/server_blocks/'

查看流量

列出当前节点网卡及ip

for i in `ifconfig | grep -o ^[a-z0-9\.@]*`; do echo -n "$i : ";ifconfig $i|sed -n 2p|awk '{ print $2 }'; done

master01
cni0 : 10.244.0.1
enp0s3 : 192.168.244.4
flannel.1 : 10.244.0.0

node01
cni0 : 10.244.1.1
enp0s3 : 192.168.244.5
flannel.1 : 10.244.1.0

node02
cni0 : 10.244.2.1
enp0s3 : 192.168.244.7
flannel.1 : 10.244.2.0

当前ingress 开了HostNetwork+nodeport 30080,关联到node01,node02,不关联master
当前openresty service 开了 nodeport 32080,只关联到node02.

pod ip+pod targetPort 可以在集群内任意节点访问
在集群内访问openresty clusterip+clusterport,请求会先经过VIP,再由kube-proxy分发到各个pod上面,使用ipvsadm命令来查看这些负载均衡的转发
在集群内外通过任意 nodeip+nodeport 访问openresty service,没有布署的节点会转发一次,NodePort服务的请求路径是从K8S节点IP直接到Pod,并不会经过ClusterIP,但是这个转发逻辑依旧是由kube-proxy实现
在集群内外访问ingress的nodeport,NodePort的解析结果是一个CLUSTER-IP,在集群内部请求的负载均衡逻辑和实现与ClusterIP Service是一致的
在集群内外访问ingress的hostnetwork ,如果没有pod,ingress nginx会跨node反代一次,否则是本机代理。0~1次转发。没有布ingress的不能访问。

openresty pod pod ip+pod targetPort

openresty pod ip+pod port 可以在集群内任意节点访问,集群外不能访问

在node02 访问node02 上 openresty 的pod ip+pod targetPort

curl http://10.244.2.24:8089/showvar
time_local: 30/Oct/2023:16:51:23 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 10.244.2.24
scheme: http
http_host: 10.244.2.24:8089
uri: /showvar
remote_addr: 10.244.2.1
remote_port: 42802
remote_user: 
http_x_forwarded_for: 

流量在本机node02上
10.244.2.1->10.244.2.24

在node01 访问node02 上 openresty 的pod ip+pod targetPort

curl http://10.244.2.24:8089/showvar
time_local: 30/Oct/2023:16:51:25 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 10.244.2.24
scheme: http
http_host: 10.244.2.24:8089
uri: /showvar
remote_addr: 10.244.1.0
remote_port: 39108
remote_user: 
http_x_forwarded_for:

流量在从node01上到node02
10.244.1.0->10.244.2.24

openresty service clusterip+clusterport

在master 访问node02 上 openresty service 的clusterip+clusterport

curl http://10.96.30.145:31080/showvar
time_local: 31/Oct/2023:10:15:49 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 10.96.30.145
scheme: http
http_host: 10.96.30.145:31080
uri: /showvar
remote_addr: 10.244.0.0
remote_port: 1266
remote_user: 
http_x_forwarded_for: 

在node2 访问node02 上 openresty service 的clusterip+clusterport

curl http://10.96.30.145:31080/showvar
time_local: 31/Oct/2023:10:18:01 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 10.96.30.145
scheme: http
http_host: 10.96.30.145:31080
uri: /showvar
remote_addr: 10.244.2.1
remote_port: 55374
remote_user: 
http_x_forwarded_for: 
http_user_agent: curl/7.29.0

在node2中pode内使用service域名访问

kubectl exec -it pod/test-pod-86df6cd59b-x8ndr -n test -- curl http://svc-openresty.test.svc.cluster.local:31080/showvar
time_local: 01/Nov/2023:11:11:46 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: svc-openresty.test.svc.cluster.local
scheme: http
http_host: svc-openresty.test.svc.cluster.local:31080
uri: /showvar
remote_addr: 10.244.2.41
remote_port: 43594
remote_user: 
http_x_forwarded_for: 
http_user_agent: curl/7.81.0

host自动补充

kubectl exec -it pod/test-pod-86df6cd59b-x8ndr -n test -- curl http://svc-openresty:31080/showvar     
time_local: 01/Nov/2023:11:15:26 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: svc-openresty
scheme: http
http_host: svc-openresty:31080
uri: /showvar
remote_addr: 10.244.2.41
remote_port: 57768
remote_user: 
http_x_forwarded_for: 
http_user_agent: curl/7.81.0

在集群内外通过任意nodeip+nodeport访问 openresty service

pod所在的node不转发,没有的node转发一次

curl http://192.168.244.4:32080
node02.k8s.local

curl http://192.168.244.4:32080/showvar
time_local: 30/Oct/2023:16:27:09 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 127.0.0.1
scheme: http
http_host: 127.0.0.1:32080
uri: /showvar
remote_addr: 10.244.0.0
remote_port: 22338
remote_user: 
http_x_forwarded_for: 

流量从master nodeip到node02 clusterip,跨node转发了一次
192.168.244.4->10.244.0.0->10.244.2.24

指向node2的32080
curl http://192.168.244.7:32080/showvar
流量从node2 nodeip到clusterip
192.168.244.7->10.244.2.1->10.244.2.24

在集群内外通过任意nodeip+nodeport访问 ingress service

访问没有布署ingress 的master nodeip+nodeport,会转发一次或两次(负载均衡的原因)

curl http://192.168.244.4:30080/showvar/
time_local: 30/Oct/2023:16:36:16 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 127.0.0.1
scheme: http
http_host: 127.0.0.1:30080
uri: /showvar/
remote_addr: 10.244.1.0
remote_port: 58680
remote_user: 
http_x_forwarded_for: 192.168.244.4

从master nodeip经flannel到ingree所在的node01或node02,再到node02,http_x_forwarded_for多了192.168.244.4
192.168.244.4->10.244.0.0->10.244.1.0->10.244.2.24
http_x_forwarded_for固定192.168.244.4
remote_addr会是10.244.1.0或10.244.2.0

在master上访问有布署ingress 的node02 nodeip+nodeport,会转发一次或两次

#两种情况

curl http://192.168.244.7:30080/showvar/
time_local: 30/Oct/2023:17:07:10 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 127.0.0.1
scheme: http
http_host: 127.0.0.1:30080
uri: /showvar/
remote_addr: 10.244.1.0
remote_port: 44200
remote_user: 
http_x_forwarded_for: 192.168.244.7

在master访问node02 nodeip经过node01再回到node02
192.168.244.7->10.244.2.0->10.244.1.0->10.244.2.24

curl http://192.168.244.7:30080/showvar/
time_local: 30/Oct/2023:17:18:06 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 192.168.244.7
scheme: http
http_host: 192.168.244.7:30080
uri: /showvar/
remote_addr: 10.244.2.1
remote_port: 45772
remote_user: 
http_x_forwarded_for: 192.168.244.4
从node02 nodeip经kubeproxy调度到master再到node02的ingress,再回到node02 
192.168.244.4->192.168.244.7->10.244.2.1->10.244.2.24

ingress hostnetwork

在master上访问node02的hostnetwork ,直接访问,没有跨node转发

curl http://192.168.244.7:80/showvar/
time_local: 30/Oct/2023:17:26:37 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 192.168.244.7
scheme: http
http_host: 192.168.244.7
uri: /showvar/
remote_addr: 10.244.2.1
remote_port: 45630
remote_user: 
http_x_forwarded_for: 192.168.244.4

192.168.244.7->10.244.2.1->10.244.2.24

在master上访问node01的hostnetwork ,经ingress转发一次

curl http://192.168.244.5:80/showvar/
time_local: 30/Oct/2023:17:28:10 +0800
hostname: openresty-b6d7798f8-h47xj
server_addr: 10.244.2.24
server_port: 8089
host: 192.168.244.5
scheme: http
http_host: 192.168.244.5
uri: /showvar/
remote_addr: 10.244.1.0
remote_port: 48512
remote_user: 
http_x_forwarded_for: 192.168.244.4

192.168.244.5->10.244.1.0->10.244.2.24

在master上访问master,因为没布ingress,所以不能访问

curl http://192.168.244.4:80/showvar/
curl: (7) Failed connect to 192.168.244.4:80; Connection refused

错误:

2023/10/30 14:43:23 [warn] 1#1: the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /opt/bitnami/openresty/nginx/conf/nginx.conf:2
nginx: [warn] the "user" directive makes sense only if the master process runs with super-user privileges, ignored in /opt/bitnami/openresty/nginx/conf/nginx.conf:2
2023/10/30 14:43:23 [emerg] 1#1: mkdir() "/opt/bitnami/openresty/nginx/tmp/client_body" failed (13: Permission denied)

nginx: [emerg] mkdir() "/opt/bitnami/openresty/nginx/tmp/client_body" failed (13: Permission denied)
权限不对,去除securityContext中runAsGroup

      securityContext:
        runAsUser: 1000
        #runAsGroup: 1000

deployment多个pod

openresty使用nfs存放配制文件及web文件
PV和StorageClass不受限于Namespace,PVC受限于Namespace,如果pod有namespace,那么pvc和pv也需相同的namespace

#准备pv
cat > test-openresty-spv.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: test-openresty-cfg-spv
  namespace: test
  labels:
    pv: test-openresty-cfg-spv
spec:
  capacity:
    storage: 300Mi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    path: /nfs/k8s/cfg/openresty
    server: 192.168.244.6
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: test-openresty-web-spv
  namespace: test
  labels:
    pv: test-openresty-web-spv
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    path: /nfs/k8s/web/openresty
    server: 192.168.244.6
EOF
#准备pvc
cat > test-openresty-pvc.yaml  << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: test-openresty-cfg-pvc
  namespace: test
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 300Mi
  selector:
    matchLabels:
      pv: test-openresty-cfg-spv
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: test-openresty-web-pvc
  namespace: test
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
  selector:
    matchLabels:
      pv: test-openresty-web-spv
EOF
#准备openresty Deployment
cat > test-openresty-deploy.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: openresty
  namespace: test
spec:
  selector :
    matchLabels:
      app: openresty
  replicas: 2
  template:
    metadata:
      labels:
        app: openresty
    spec:
      dnsPolicy: ClusterFirstWithHostNet
      securityContext:
        runAsNonRoot: true
        runAsUser: 500
        #runAsGroup: 500
      #nodeName: 
      #  node02.k8s.local
      containers:
      - name: openresty
        image: repo.k8s.local/docker.io/bitnami/openresty:latest
        #command: ["/bin/bash", "-ce", "tail -f /dev/null"]
        command: ["/opt/bitnami/scripts/openresty/run.sh"]
        ports:
        - containerPort: 80
        volumeMounts:
        - name: timezone
          mountPath: /etc/localtime  
        - name: vol-opresty-conf
          mountPath: /opt/bitnami/openresty/nginx/conf/
          readOnly: true
        - name: vol-opresty-html
          mountPath: "/usr/share/nginx/html/"
        - name: vol-opresty-log
          mountPath: "/var/log/nginx/"
      volumes:
      - name: timezone       
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai  
      - name: vol-opresty-conf
        #hostPath:
        #  path: /nginx/openresty/nginx/conf/
        #  type: DirectoryOrCreate
        persistentVolumeClaim:
          claimName: test-openresty-cfg-pvc
      - name: vol-opresty-html
        #hostPath:
        #  path: /nginx/html/
        #  type: DirectoryOrCreate
        persistentVolumeClaim:
          claimName: test-openresty-web-pvc
      - name: vol-opresty-log
        hostPath:
          path: /nginx/logs/
          type: DirectoryOrCreate
      nodeSelector:
        ingresstype: ingress-nginx
EOF
kubectl apply -f test-openresty-spv.yaml
kubectl delete -f test-openresty-spv.yaml
kubectl apply -f test-openresty-pvc.yaml
kubectl delete -f test-openresty-pvc.yaml

#创建/关闭 test-openresty-deploy.yaml 
kubectl apply -f test-openresty-deploy.yaml
kubectl delete -f test-openresty-deploy.yaml

#创建/关闭 openresty noddeport 服务
kubectl apply -f test-openresty-svc-nodeport.yaml
kubectl delete -f test-openresty-svc-nodeport.yaml

#创建/关闭 openresty ingress 关联服务
kubectl apply -f test-openresty-ingress.yaml
kubectl delete -f test-openresty-ingress.yaml

#查看pod
kubectl get pods -o wide -n test
NAME                            READY   STATUS    RESTARTS     AGE   IP            NODE               NOMINATED NODE   READINESS GATES
openresty-6b5c6c6966-h6z6d     1/1     Running   7 (47m ago)     53m     10.244.1.107   node01.k8s.local   <none>           <none>
openresty-6b5c6c6966-l667p     1/1     Running   6 (50m ago)     53m     10.244.2.69    node02.k8s.local   <none>           <none>

kubectl get pv,pvc -n test

kubectl get sc

kubectl describe pvc -n test
storageclass.storage.k8s.io "nfs" not found
从pv和pvc中去除  storageClassName: nfs

#查看详情
kubectl -n test describe pod openresty-6b5c6c6966-l667p
kubectl -n test logs -f openresty-6b5c6c6966-h6z6d

kubectl exec -it pod/openresty-b4475b994-m72qg -n test -- /bin/sh -c '/opt/bitnami/openresty/bin/openresty -t'
kubectl exec -it pod/openresty-b4475b994-m72qg -n test -- /bin/sh -c '/opt/bitnami/openresty/bin/openresty -s reload'
kubectl exec -it pod/openresty-6b5c6c6966-h6z6d -n test -- /bin/sh -c 'ls /opt/bitnami/openresty/nginx/conf/server_blocks/'

修改
sed -E -n 's/remote_user:.*/remote_user:test2;/p' /nfs/k8s/cfg/openresty/server_blocks/default.conf 
sed -E -i 's/remote_user:.*/remote_user:test5;/' /nfs/k8s/cfg/openresty/server_blocks/default.conf 

1.通过 Rollout 平滑重启 Pod
kubectl rollout restart deployment/openresty -n test

2.kubectl set env
kubectl set env deployment openresty -n test DEPLOY_DATE="$(date)"

3.扩展 Pod 的副本倒计时
kubectl scale deployment/openresty -n test --replicas=3

4.删除单个pod
kubectl delete pod openresty-7ccbdd4f6c-9l566  -n test

kubectl annotate pods openresty-7ccbdd4f6c-wrbl9 restartversion="2" -n test --overwrite

-------------
https://gitee.com/mirrors_openresty/docker-openresty?skip_mobile=true

kubectl create configmap test2-openresty-reload --from-literal=reloadnginx.log=1 -n test2
kubectl get configmaps test2-openresty-reload -o yaml -n test2
kubectl delete configmap test2-openresty-reload -n test2

#隐藏版本信息
#响应信息
sed -i 's/"Server: nginx" CRLF;/"Server:" CRLF;/g' /opt/nginx-1.20.2/src/http/ngx_http_header_filter_module.c
sed -i 's/"Server: " NGINX_VER CRLF;/"Server:" CRLF;/g' /opt/nginx-1.20.2/src/http/ngx_http_header_filter_module.c
#报错页面
sed -i 's/>" NGINX_VER "</></g' /opt/nginx-1.20.2/src/http/ngx_http_special_response.c
sed -i 's/>" NGINX_VER_BUILD "</></g' /opt/nginx-1.20.2/src/http/ngx_http_special_response.c
sed -i 's/>nginx</></g' /opt/nginx-1.20.2/src/http/ngx_http_special_response.c

Posted in 安装k8s/kubernetes.

Tagged with , .


No Responses (yet)

Stay in touch with the conversation, subscribe to the RSS feed for comments on this post.



Some HTML is OK

or, reply to this post via trackback.