前置条件
安装K8S

拉取k8s集群.kube/config 跳转链接
./generate-kube-config.sh \
    cluster1=192.168.103.227 \
    cluster2=192.168.103.231 \
    cluster3=192.168.103.235 \
    && source /etc/profile

cat >> /etc/profile << ERIC
export CTX_CLUSTER1=cluster1
export CTX_CLUSTER2=cluster2
export CTX_CLUSTER3=cluster3
ERIC

source /etc/profile

查看
[root@master01 ~]# kubectl config get-contexts
CURRENT   NAME       CLUSTER    AUTHINFO   NAMESPACE
*         cluster1   cluster1   cluster1
          cluster2   cluster2   cluster2
          cluster3   cluster3   cluster3
[root@master01 ~]#

安装 MetalLB
kubectl --context="${CTX_CLUSTER1}" apply -f - << ERIC

---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  # 这个名称在 metallb.yaml中设定,改完以后ConfigMap会不启作用
  name: config
data:
  config: |
    address-pools:
    - name: my-ip-space
      protocol: layer2
      addresses:
      # 提供给外部访问的IP地址段
      - 192.168.103.251-192.168.103.252

ERIC


kubectl --context="${CTX_CLUSTER2}" apply -f - << ERIC

---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  # 这个名称在 metallb.yaml中设定,改完以后ConfigMap会不启作用
  name: config
data:
  config: |
    address-pools:
    - name: my-ip-space
      protocol: layer2
      addresses:
      # 提供给外部访问的IP地址段
      - 192.168.103.253-192.168.103.254

ERIC




虚拟机名 内网IP 用途 CPU 内存 硬盘01 硬盘02 操作系统
Mao-k8s-v1.20.4-103.227 192.168.103.227 master01 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.228 192.168.103.228 worker01 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.229 192.168.103.229 worker02 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.230 192.168.103.230 worker03 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.231 192.168.103.231 master01 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.232 192.168.103.232 worker01 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.233 192.168.103.233 worker02 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.234 192.168.103.234 worker03 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.235 192.168.103.235 master01 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.236 192.168.103.236 worker01 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.237 192.168.103.237 worker02 8 8 100 100 CentOS 7.9
Mao-k8s-v1.20.4-103.238 192.168.103.238 worker03 8 8 100 100 CentOS 7.9



安装 istioctl 可执行程序
wget https://github.com/istio/istio/releases/download/1.9.4/istio-1.9.4-linux-amd64.tar.gz && tar -zxvf istio-1.9.4-linux-amd64.tar.gz && cp istio-1.9.4/bin/istioctl /usr/local/bin/



将证书和密钥插入集群
或者 使用istio默认提供的证书
cd istio-1.9.4

kubectl --context="${CTX_CLUSTER1}" create namespace istio-system
kubectl --context="${CTX_CLUSTER2}" create namespace istio-system

kubectl --context="${CTX_CLUSTER1}" create secret generic cacerts -n istio-system \
      --from-file=samples/certs/ca-cert.pem \
      --from-file=samples/certs/ca-key.pem \
      --from-file=samples/certs/root-cert.pem \
      --from-file=samples/certs/cert-chain.pem

kubectl --context="${CTX_CLUSTER2}" create secret generic cacerts -n istio-system \
      --from-file=samples/certs/ca-cert.pem \
      --from-file=samples/certs/ca-key.pem \
      --from-file=samples/certs/root-cert.pem \
      --from-file=samples/certs/cert-chain.pem





官方文档 跨网络多主集群安装

配置集群01的相关操作
###### 为 cluster1 设置缺省网络
kubectl --context="${CTX_CLUSTER1}" get namespace istio-system && \
  kubectl --context="${CTX_CLUSTER1}" label namespace istio-system topology.istio.io/network=network1


###### 将 cluster1 设为主集群
cat <<EOF > cluster1.yaml
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
  values:
    global:
      meshID: mesh1
      multiCluster:
        clusterName: cluster1
      network: network1
EOF

istioctl install --context="${CTX_CLUSTER1}" -f cluster1.yaml


###### 在 cluster1 安装东西向网关
samples/multicluster/gen-eastwest-gateway.sh \
    --mesh mesh1 --cluster cluster1 --network network1 | \
    istioctl --context="${CTX_CLUSTER1}" install -y -f -


###### 开放 cluster1 中的服务
kubectl --context="${CTX_CLUSTER1}" apply -n istio-system -f \
    samples/multicluster/expose-services.yaml


配置集群02的相关操作
###### 为 cluster2 设置缺省网络
kubectl --context="${CTX_CLUSTER2}" get namespace istio-system && \
  kubectl --context="${CTX_CLUSTER2}" label namespace istio-system topology.istio.io/network=network2


###### 将 cluster2 设为主集群
cat <<EOF > cluster2.yaml
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
  values:
    global:
      meshID: mesh1
      multiCluster:
        clusterName: cluster2
      network: network2
EOF

istioctl install --context="${CTX_CLUSTER2}" -f cluster2.yaml


###### 在 cluster2 安装东西向网关
samples/multicluster/gen-eastwest-gateway.sh \
    --mesh mesh1 --cluster cluster2 --network network2 | \
    istioctl --context="${CTX_CLUSTER2}" install -y -f -


###### 开放 cluster2 中的服务
kubectl --context="${CTX_CLUSTER2}" apply -n istio-system -f \
    samples/multicluster/expose-services.yaml



启用端点发现
###### 在 cluster2 中安装一个提供 cluster1 访问权限的远程 secret
istioctl x create-remote-secret \
  --context="${CTX_CLUSTER1}" \
  --name=cluster1 | \
  kubectl apply -f - --context="${CTX_CLUSTER2}"


###### 在 cluster1 中安装一个提供 cluster2 访问权限的远程 secret
istioctl x create-remote-secret \
  --context="${CTX_CLUSTER2}" \
  --name=cluster2 | \
  kubectl apply -f - --context="${CTX_CLUSTER1}"







验证安装结果
部署 HelloWorld Service
kubectl create --context="${CTX_CLUSTER1}" namespace sample
kubectl create --context="${CTX_CLUSTER2}" namespace sample


kubectl label --context="${CTX_CLUSTER1}" namespace sample \
    istio-injection=enabled
kubectl label --context="${CTX_CLUSTER2}" namespace sample \
    istio-injection=enabled


kubectl apply --context="${CTX_CLUSTER1}" \
    -f samples/helloworld/helloworld.yaml \
    -l service=helloworld -n sample
kubectl apply --context="${CTX_CLUSTER2}" \
    -f samples/helloworld/helloworld.yaml \
    -l service=helloworld -n sample



部署 V1 版的 HelloWorld
kubectl apply --context="${CTX_CLUSTER1}" \
    -f samples/helloworld/helloworld.yaml \
    -l version=v1 -n sample


部署 V2 版的 HelloWorld
kubectl apply --context="${CTX_CLUSTER2}" \
    -f samples/helloworld/helloworld.yaml \
    -l version=v2 -n sample


部署 Sleep
kubectl apply --context="${CTX_CLUSTER1}" \
    -f samples/sleep/sleep.yaml -n sample
kubectl apply --context="${CTX_CLUSTER2}" \
    -f samples/sleep/sleep.yaml -n sample




测试
kubectl exec --context="${CTX_CLUSTER1}" -n sample -c sleep \
    "$(kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l \
    app=sleep -o jsonpath='{.items[0].metadata.name}')" \
    -- curl -sS helloworld.sample:5000/hello


kubectl exec --context="${CTX_CLUSTER2}" -n sample -c sleep \
    "$(kubectl get pod --context="${CTX_CLUSTER2}" -n sample -l \
    app=sleep -o jsonpath='{.items[0].metadata.name}')" \
    -- curl -sS helloworld.sample:5000/hello

[root@master01 istio-1.9.4]# kubectl exec --context="${CTX_CLUSTER1}" -n sample -c sleep     "$(kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l \
    app=sleep -o jsonpath='{.items[0].metadata.name}')"     -- curl -v helloworld.sample:5000/hello
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0*   Trying 10.96.87.35:5000...
* Connected to helloworld.sample (10.96.87.35) port 5000 (#0)
> GET /hello HTTP/1.1
> Host: helloworld.sample:5000
> User-Agent: curl/7.76.1-DEV
> Accept: */*
>
Hello version: v1, instance: helloworld-v1-776f57d5f6-r4d4l
* Mark bundle as not supporting multiuse
< HTTP/1.1 200 OK
< content-type: text/html; charset=utf-8
< content-length: 60
< server: envoy
< date: Wed, 12 May 2021 07:46:24 GMT
< x-envoy-upstream-service-time: 146
<
{ [60 bytes data]
100    60  100    60    0     0    402      0 --:--:-- --:--:-- --:--:--   402
* Connection #0 to host helloworld.sample left intact
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]#
[root@master01 istio-1.9.4]# kubectl exec --context="${CTX_CLUSTER1}" -n sample -c sleep     "$(kubectl get pod --context="${CTX_CLUSTER1}" -n sample -l \
    app=sleep -o jsonpath='{.items[0].metadata.name}')"     -- curl -v helloworld.sample:5000/hello
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0*   Trying 10.96.87.35:5000...
* Connected to helloworld.sample (10.96.87.35) port 5000 (#0)
> GET /hello HTTP/1.1
> Host: helloworld.sample:5000
> User-Agent: curl/7.76.1-DEV
> Accept: */*
>
Hello version: v2, instance: helloworld-v2-54df5f84b-d5d45
* Mark bundle as not supporting multiuse
< HTTP/1.1 200 OK
< content-type: text/html; charset=utf-8
< content-length: 59
< server: envoy
< date: Wed, 12 May 2021 07:46:27 GMT
< x-envoy-upstream-service-time: 131
<
{ [59 bytes data]
100    59  100    59    0     0    437      0 --:--:-- --:--:-- --:--:--   440
* Connection #0 to host helloworld.sample left intact
[root@master01 istio-1.9.4]#




安装所有仪表板
cd istio-1.9.4

## 安装 Kiali 和其他插件
## 此命令执行两次, 执行一次会有一些资源创建错误
kubectl apply -f samples/addons

### 访问 Kiali 仪表板
istioctl dashboard --address 0.0.0.0 kiali




删除Istio
istioctl x uninstall --purge


kubectl --context="${CTX_CLUSTER1}" delete namespace istio-system
kubectl --context="${CTX_CLUSTER2}" delete namespace istio-system




分类: Istio

毛巳煜

高级软件开发全栈架构师