Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...

  1. Preparation

    • Setup INF O-Cloud.Install INF(StarlingX R8.0) AIO-SX by following the procedure. Enable the ceph Ceph as persistent storage.

    • Prepare a Linux environment in which the network is reachable to the INF AIO-SX with Bash CLI. And, install the required packages.

      $ sudo apt-get install git make patch jq
      # Make sure your CLI has the 'kubectl' and 'helm' installed
    • Clone openstack-helm and openstack-helm-infra code on your Linux.

      $ git clone --depth 1 --branch master https://opendev.org/openstack/openstack-helm.git
      $ git clone --depth 1 --branch master https://opendev.org/openstack/openstack-helm-infra.git
    • Copy the "/etc/kubernetes/admin.conf" on the INF controller node to your local Linux.

      $ scp <INF-controller-0>:/etc/kubernetes/admin.conf ~/.kube/config
      # Change the IP address in the ~/.kube/config
      # server: https://<INF-OAM-IP>:6443
      # You can get the OAM IP through this command on controller node
      # system addrpool-show `system addrpool-list | grep oam | awk '{print $2}'` | grep floating
    • Add labels to controller-0 node.

      $ kubectl label node controller-0 ceph-mgr=enabled
      $ kubectl label node controller-0 ceph-mon=enabled
      $ kubectl label node controller-0 ceph-mds=enabled
      $ kubectl label node controller-0 ceph-rgw=enabled
      $ kubectl label node controller-0 ceph-osd=enabled
      $ kubectl label node controller-0 openstack-control-plane=enabled
    • Create namespaces.

      $ kubectl create namespace openstack
      $ kubectl create namespace ceph
    Deploy Ingress ControllerOriginal procedure: https://docs.openstack.org/openstack-helm/latest/install/developer/kubernetes-and-common-setup.html#deploy-the-ingress-controller
    • Modify openstack-helm/tools/deployment/component/common/ingress.sh file as follows:

      diff --git a/tools/deployment/component/common/ingress.sh b/tools/deployment/component/common/ingress.sh
      index 9ae03719..884f93fc 100755
      --- a/tools/deployment/component/common/ingress.sh
      +++ b/tools/deployment/component/common/ingress.sh
      @@ -29,6 +29,23 @@ deployment:
        type: DaemonSet
      network:
        host_namespace: true
      +endpoints:
      + ingress:
      +   port:
      +     http:
      +       default: 10080
      +     https:
      +       default: 10443
      +     healthz:
      +       default: 11254
      +     status:
      +       default: 11246
      +     stream:
      +       default: 11247
      +     profiler:
      +       default: 11245
      +     server:
      +       default: 18181
      EOF

      touch /tmp/ingress-component.yaml
      @@ -48,21 +65,21 @@ pod:
      EOF
      fi

      -helm upgrade --install ingress-kube-system ${HELM_CHART_ROOT_PATH}/ingress \
      - --namespace=kube-system \
      - --values=/tmp/ingress-kube-system.yaml \
      - ${OSH_EXTRA_HELM_ARGS} \
      - ${OSH_EXTRA_HELM_ARGS_INGRESS} \
      - ${OSH_EXTRA_HELM_ARGS_INGRESS_KUBE_SYSTEM}
      -
      -#NOTE: Wait for deploy
      -./tools/deployment/common/wait-for-pods.sh kube-system
      +#helm upgrade --install ingress-kube-system ${HELM_CHART_ROOT_PATH}/ingress \
      +# --namespace=kube-system \
      +# --values=/tmp/ingress-kube-system.yaml \
      +# ${OSH_EXTRA_HELM_ARGS} \
      +# ${OSH_EXTRA_HELM_ARGS_INGRESS} \
      +# ${OSH_EXTRA_HELM_ARGS_INGRESS_KUBE_SYSTEM}
      +#
      +##NOTE: Wait for deploy
      +#./tools/deployment/common/wait-for-pods.sh kube-system

      #NOTE: Deploy namespace ingress
      helm upgrade --install ingress-openstack ${HELM_CHART_ROOT_PATH}/ingress \
        --namespace=openstack \
        --values=/tmp/ingress-component.yaml \
      - --set deployment.cluster.class=nginx \
      + --set deployment.cluster.class=nginx-openstack \
        ${OSH_EXTRA_HELM_ARGS} \
        ${OSH_EXTRA_HELM_ARGS_INGRESS} \
        ${OSH_EXTRA_HELM_ARGS_INGRESS_OPENSTACK}
    • Execute ingress.sh.

      $ cd $HOME/openstack-helm/
      $ ./tools/deployment/component/common/ingress.sh
    Deploy CephOriginal Procedure: https://docs.openstack.org/openstack-helm/latest/install/developer/deploy-with-ceph.html#deploy-ceph
    • Modify openstack-helm/toolsdeployment/component/ceph/ceph.sh file as follows:

      diff --git a/tools/deployment/component/ceph/ceph.sh b/tools/deployment/component/ceph/ceph.sh
      index 7d2550cd..518df21d 100755
      --- a/tools/deployment/component/ceph/ceph.sh
      +++ b/tools/deployment/component/ceph/ceph.sh
      @@ -47,8 +47,10 @@ endpoints:
        ceph_mgr:
          namespace: ceph
      network:
      - public: 172.17.0.1/16
      - cluster: 172.17.0.1/16
      + #public: 172.17.0.1/16
      + #cluster: 172.17.0.1/16
      + public: 192.168.206.1/24
      + cluster: 192.168.206.1/24
      deployment:
        storage_secrets: true
        ceph: true
    • Modify openstack-helm/tools/deployment/component/ceph/ceph-ns-activate.sh file as follows:

      diff --git a/tools/deployment/component/ceph/ceph-ns-activate.sh b/tools/deployment/component/ceph/ceph-ns-activate.sh
      index 9574d4f0..acadfc3f 100755
      --- a/tools/deployment/component/ceph/ceph-ns-activate.sh
      +++ b/tools/deployment/component/ceph/ceph-ns-activate.sh
      @@ -27,8 +27,10 @@ endpoints:
        ceph_mon:
          namespace: ceph
      network:
      - public: 172.17.0.1/16
      - cluster: 172.17.0.1/16
      + #public: 172.17.0.1/16
      + #cluster: 172.17.0.1/16
      + public: 192.168.206.1/24
      + cluster: 192.168.206.1/24
      deployment:
        ceph: false
        rbd_provisioner: false
    • Execute ceph.sh and ceph-ns-activate.sh.

      $ cd $HOME/openstack-helm/
      $ ./tools/deployment/component/ceph/ceph.sh
      $ ./tools/deployment/component/ceph/ceph-ns-activate.sh
    Deploy Other ComponentOriginal Procedure: https://docs.openstack.org/openstack-helm/latest/install/developer/deploy-with-ceph.html#deploy-mariadb ~Install the following components to use Tacker:* MariaDB* RabbitMQ* Memcached* Keystone* Glance
    • Modify openstack-helm/tools/deployment/component/glance/glance.sh file as follows:

      diff --git a/tools/deployment/component/glance/glance.sh b/tools/deployment/component/glance/glance.sh
      index b388ec04..4d50c2c5 100755
      --- a/tools/deployment/component/glance/glance.sh
      +++ b/tools/deployment/component/glance/glance.sh
      @@ -27,7 +27,7 @@ make glance
       tee /tmp/glance.yaml <<EOF
       storage: ${GLANCE_BACKEND}
       volume:
      -  class_name: standard
      +  class_name: general
       bootstrap:
         structured:
           images:
    • Execute script files.

      $ ./tools/deployment/developer/ceph/050-mariadb.sh
      $ ./tools/deployment/developer/ceph/060-rabbitmq.sh
      $ ./tools/deployment/developer/ceph/070-memcached.sh
      $ ./tools/deployment/developer/ceph/080-keystone.sh
      $ ./tools/deployment/component/glance/glance.sh
  2. Deploy Barbican and Tacker

    • Modify openstack-helm/tacker/templates/pvc.yaml file as follows:

      diff --git a/tacker/templates/pvc.yaml b/tacker/templates/pvc.yaml
      index 8b1678b3..c0599b45 100644
      --- a/tacker/templates/pvc.yaml
      +++ b/tacker/templates/pvc.yaml
      @@ -23,7 +23,7 @@ metadata:
        name: {{ $name }}
      spec:
        accessModes:
      -   - "ReadWriteMany"
      +   - "ReadWriteOnce"
        resources:
          requests:
            storage: {{ $size }}
    • Modify openstack-helm/tacker/values.yaml file as follows:

      diff --git a/tacker/values.yaml b/tacker/values.yaml
      index 90702f95..3d2f2621 100644
      --- a/tacker/values.yaml
      +++ b/tacker/values.yaml
      @@ -105,12 +105,12 @@ pod:
         security_context:
           server:
             pod:
      -        runAsUser: 42424
      -        runAsNonRoot: true
      +        runAsUser: 0
      +        runAsNonRoot: false
           conductor:
             pod:
      -        runAsUser: 42424
      -        runAsNonRoot: true
      +        runAsUser: 0
      +        runAsNonRoot: false
         lifecycle:
           termination_grace_period:
             server:
    • Execute script files.

      $ ./tools/deployment/developer/common/085-barbican.sh
      $ ./tools/deployment/component/tacker/tacker.sh
  3. Verify successful deployment

    • The helm releases are deployed as follows:

      sysadmin@controller-0:~$ helm list -n ceph
      NAME                   NAMESPACE       REVISION       UPDATED                                 STATUS         CHART                           APP VERSION
      ceph-client             ceph           1               2023-10-04 13:28:04.214446853 +0000 UTC deployed       ceph-client-0.1.47             v1.0.0    
      ceph-mon               ceph           1               2023-10-04 13:23:25.009836684 +0000 UTC deployed       ceph-mon-0.1.30                 v1.0.0    
      ceph-osd               ceph           1               2023-10-04 13:26:07.829373478 +0000 UTC deployed       ceph-osd-0.1.47                 v1.0.0    
      ceph-provisioners       ceph           1               2023-10-04 13:30:04.478204441 +0000 UTC deployed       ceph-provisioners-0.1.26       v1.0.0    
      ingress-ceph           ceph           1               2023-10-02 07:59:46.619657229 +0000 UTC deployed       ingress-0.2.17                 v1.5.1    

      sysadmin@controller-0:~$ helm list -n openstack
      NAME                   NAMESPACE       REVISION       UPDATED                                 STATUS         CHART                           APP VERSION
      barbican               openstack       1               2023-10-04 14:11:54.122228604 +0000 UTC deployed       barbican-0.3.5                 v1.0.0    
      ceph-openstack-config   openstack       1               2023-10-04 13:35:39.737074964 +0000 UTC deployed       ceph-provisioners-0.1.26       v1.0.0    
      glance                 openstack       1               2023-10-05 01:14:44.18606719 +0000 UTC deployed       glance-0.4.13                   v1.0.0    
      ingress-openstack       openstack       1               2023-10-02 07:59:30.823441021 +0000 UTC deployed       ingress-0.2.17                 v1.5.1    
      keystone               openstack       2               2023-10-04 13:58:36.81624535 +0000 UTC deployed       keystone-0.3.4                 v1.0.0    
      mariadb                 openstack       1               2023-10-04 13:36:33.178219784 +0000 UTC deployed       mariadb-0.2.33                 v10.6.7    
      memcached               openstack       1               2023-10-04 13:44:40.7788406 +0000 UTC   deployed       memcached-0.1.13               v1.5.5    
      rabbitmq               openstack       1               2023-10-04 13:39:44.683045128 +0000 UTC deployed       rabbitmq-0.1.29                 v3.9.0    
      tacker                 openstack       1               2023-10-05 10:03:19.033603307 +0000 UTC deployed       tacker-0.1.1                   v1.0.0
    • The pods are read as follows (Check all pods are "Completed" or "Running" status):

      sysadmin@controller-0:~/openstack-helm$ kubectl get pod -n ceph
      NAME                                       READY   STATUS     RESTARTS     AGE
      ceph-bootstrap-zx84v                       0/1     Completed   0             45h
      ceph-cephfs-client-key-generator-25d8b     0/1     Completed   0             44h
      ceph-checkdns-79d7d8bb68-qq8jb             1/1     Running     0             44h
      ceph-mds-7b84466549-9cqlgUse the following command to make sure you have the ceph as backend on the INF

sysadmin@controller-1:~$ source /etc/platform/openrc
[sysadmin@controller-1 ~(keystone_admin)]$ system storage-backend-list
+--------------------------------------+------------+---------+------------+-------------------+----------+---------------+
| uuid                                 | name       | backend | state      | task              | services | capabilities  |
+--------------------------------------+------------+---------+------------+-------------------+----------+---------------+
| da70e0b7-34c8-488a-9e1f-08e057d6a4be | ceph-store | ceph    | configured | provision-storage | None     | replication:  |
|                                      |            |         |            |                   |          | 2 min_replica |
|                                      |            |         |            |                   |          | tion: 1       |
|                                      |            |         |            |                   |          |               |
+--------------------------------------+------------+---------+------------+-------------------+----------+---------------+

[sysadmin@controller-1 ~(keystone_admin)]$ ceph -s
  cluster:
    id:     c5663990-249a-4b71-988f-19b402784429
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum controller (age 8h)
    mgr: controller-1(active, since 8h), standbys: controller-0
    mds: kube-cephfs:1 {0=controller-1=up:active} 1 up:standby
    osd: 2 osds: 2 up (since 8h), 2 in (since 8h)

  data:
    pools:   3 pools, 192 pgs
    objects: 181 objects, 258 MiB
    usage:   2.4 GiB used, 269 GiB / 271 GiB avail
    pgs:     192 active+clean

    • Prepare a Linux environment in which the network is reachable to the INF AIO-SX with Bash CLI. And, install the required packages.

      $ sudo apt-get install git make patch jq
      # Make sure your CLI has the 'kubectl', 'openstack' and 'helm' installed
    • Clone openstack-helm and openstack-helm-infra code on your Linux.

      # The commit id is 82a6aa8ce96b1669af0b9e8da85b537d02fc5fd3 that used in this demo.
      $ git clone --depth 1 --branch master https://opendev.org/openstack/openstack-helm.git
      # The commit id is 07c735f632147378c4af8e7b4ce6f390d38e3d69 that used in this demo.

      $ git clone --depth 1 --branch master https://opendev.org/openstack/openstack-helm-infra.git
    • Copy the "/etc/kubernetes/admin.conf" from the INF controller node to your local Linux.

      $ scp <INF-controller-0>:/etc/kubernetes/admin.conf ~/.kube/config
      # Change the IP address in the ~/.kube/config
      # server: https://<INF-OAM-IP>:6443
      # You can get the OAM IP through this command on controller node
      # system addrpool-show `system addrpool-list | grep oam | awk '{print $2}'` | grep floating
    • Add labels to controller-0 node.

      $ kubectl label node controller-0 openstack-control-plane=enabled
    • Create namespaces.

      $ kubectl create namespace openstack
  1. Deploy Ingress ControllerOriginal procedure: https://docs.openstack.org/openstack-helm/latest/install/developer/kubernetes-and-common-setup.html#deploy-the-ingress-controller

    • Modify openstack-helm/tools/deployment/component/common/ingress.sh file as follows:

      diff --git a/tools/deployment/component/common/ingress.sh b/tools/deployment/component/common/ingress.sh
      index 9ae03719..884f93fc 100755
      --- a/tools/deployment/component/common/ingress.sh
      +++ b/tools/deployment/component/common/ingress.sh
      @@ -29,6 +29,23 @@ deployment:
        type: DaemonSet
      network:
        host_namespace: true
      +endpoints:
      + ingress:
      +   port:
      +     http:
      +       default: 10080
      +     https:
      +       default: 10443
      +     healthz:
      +       default: 11254
      +     status:
      +       default: 11246
      +     stream:
      +       default: 11247
      +     profiler:
      +       default: 11245
      +     server:
      +       default: 18181
      EOF

      touch /tmp/ingress-component.yaml
      @@ -48,21 +65,21 @@ pod:
      EOF
      fi

      -helm upgrade --install ingress-kube-system ${HELM_CHART_ROOT_PATH}/ingress \
      - --namespace=kube-system \
      - --values=/tmp/ingress-kube-system.yaml \
      - ${OSH_EXTRA_HELM_ARGS} \
      - ${OSH_EXTRA_HELM_ARGS_INGRESS} \
      - ${OSH_EXTRA_HELM_ARGS_INGRESS_KUBE_SYSTEM}
      -
      -#NOTE: Wait for deploy
      -./tools/deployment/common/wait-for-pods.sh kube-system
      +#helm upgrade --install ingress-kube-system ${HELM_CHART_ROOT_PATH}/ingress \
      +# --namespace=kube-system \
      +# --values=/tmp/ingress-kube-system.yaml \
      +# ${OSH_EXTRA_HELM_ARGS} \
      +# ${OSH_EXTRA_HELM_ARGS_INGRESS} \
      +# ${OSH_EXTRA_HELM_ARGS_INGRESS_KUBE_SYSTEM}
      +#
      +##NOTE: Wait for deploy
      +#./tools/deployment/common/wait-for-pods.sh kube-system

      #NOTE: Deploy namespace ingress
      helm upgrade --install ingress-openstack ${HELM_CHART_ROOT_PATH}/ingress \
        --namespace=openstack \
        --values=/tmp/ingress-component.yaml \
      - --set deployment.cluster.class=nginx \
      + --set deployment.cluster.class=nginx-openstack \
        ${OSH_EXTRA_HELM_ARGS} \
        ${OSH_EXTRA_HELM_ARGS_INGRESS} \
        ${OSH_EXTRA_HELM_ARGS_INGRESS_OPENSTACK}
    • Execute ingress.sh.

      $ cd $HOME/openstack-helm/
      $ ./tools/deployment/component/common/ingress.sh
  1. Deploy Other ComponentOriginal Procedure: https://docs.openstack.org/openstack-helm/latest/install/developer/deploy-with-ceph.html#deploy-mariadb ~Install the following components to use Tacker:* MariaDB* RabbitMQ* Memcached* Keystone* Glance

    • Modify openstack-helm/tools/deployment/component/glance/glance.sh file as follows:

      diff --git a/tools/deployment/component/glance/glance.sh b/tools/deployment/component/glance/glance.sh
      index b388ec04..4d50c2c5 100755
      --- a/tools/deployment/component/glance/glance.sh
      +++ b/tools/deployment/component/glance/glance.sh
      @@ -27,7 +27,7 @@ make glance
       tee /tmp/glance.yaml <<EOF
       storage: ${GLANCE_BACKEND}
       volume:
      -  class_name: standard
      +  class_name: general
       bootstrap:
         structured:
           images:
    • Execute script files.

      $ ./tools/deployment/developer/ceph/050-mariadb.sh
      $ ./tools/deployment/developer/ceph/060-rabbitmq.sh
      $ ./tools/deployment/developer/ceph/070-memcached.sh
      $ ./tools/deployment/developer/ceph/080-keystone.sh
      $ ./tools/deployment/component/glance/glance.sh
  1. Deploy Barbican and Tacker

    • Modify openstack-helm/tacker/templates/pvc.yaml file as follows:

      diff --git a/tacker/templates/pvc.yaml b/tacker/templates/pvc.yaml
      index 8b1678b3..c0599b45 100644
      --- a/tacker/templates/pvc.yaml
      +++ b/tacker/templates/pvc.yaml
      @@ -23,7 +23,7 @@ metadata:
        name: {{ $name }}
      spec:
        accessModes:
      -   - "ReadWriteMany"
      +   - "ReadWriteOnce"
        resources:
          requests:
            storage: {{ $size }}
    • Modify openstack-helm/tacker/values.yaml file as follows:

      diff --git a/tacker/values.yaml b/tacker/values.yaml
      index 90702f95..3d2f2621 100644
      --- a/tacker/values.yaml
      +++ b/tacker/values.yaml
      @@ -105,12 +105,12 @@ pod:
         security_context:
           server:
             pod:
      -        runAsUser: 42424
      -        runAsNonRoot: true
      +        runAsUser: 0
      +        runAsNonRoot: false
           conductor:
             pod:
      -        runAsUser: 42424
      -        runAsNonRoot: true
      +        runAsUser: 0
      +        runAsNonRoot: false
         lifecycle:
           termination_grace_period:
             server:
    • Execute script files.

      $ ./tools/deployment/developer/common/085-barbican.sh
      $ ./tools/deployment/component/tacker/tacker.sh
  2. Verify successful deployment

    • The helm releases are deployed as follows:

      sysadmin@controller-0:~$ helm list -n openstack
      NAME                 1/1     RunningNAMESPACE     0    REVISION         44h
      ceph-mds-keyring-generator-b4ggp UPDATED           0/1     Completed   0             45h
      ceph-mgr-664d8b66cb-rr7bz        STATUS         1/1     Running     0CHART             45h
      ceph-mgr-keyring-generator-sqv4q            0/1    APP CompletedVERSION
      barbican   0              45h
      ceph-mon-check-9dc8fd588-c5m4z openstack       1       1/1      Running     0      2023-10-04 14:11:54.122228604 +0000 UTC deployed         45h
      ceph-mon-default-37207810-cc466barbican-0.3.5             1/1      Running v1.0.0     0
      glance             45h
      ceph-mon-keyring-generator-mwxpl        openstack   0/1     Completed1   0             45h
      ceph2023-osd-default-83945928-zhpsf    10-05 01:14:44.18606719 +0000 UTC deployed       2/2glance-0.4.13     Running      0         v1.0.0     45h
      ceph-osd-keyring-generator-s4c6pingress-openstack        openstack   0/1     Completed1   0             45h
      ceph2023-pool-checkpgs-28276455-6lgl5  10-02 07:59:30.823441021 +0000 UTC deployed       0/1ingress-0.2.17     Completed   0             12m
      ceph-rbd-csi-provisioner-cc45b976c-clv8n   5/5 v1.5.1     Running
      keystone     0             44h
      ceph-rbd-csi-provisioner-cc45b976c-wjtfb openstack   5/5     Running  2   0             44h
      ceph2023-rbd-plugin-4m4kq10-04 13:58:36.81624535 +0000 UTC deployed       keystone-0.3.4                 2/2v1.0.0     Running
      mariadb     0            openstack   44h
      ceph-rbd-pool-sqscp    1               2023-10-04 13:36:33.178219784 +0000 UTC deployed       0/1mariadb-0.2.33     Completed   0             44h
      ceph-storage-keys-generator-mvcpf v10.6.7    
      memcached     0/1     Completed   0      openstack       45h
      ingress-5955fbfb76-n9td71                   1/1  2023-10-04 13:44:40.7788406 +0000 UTC   Runningdeployed     1  (45h ago) memcached-0.1.13   4d2h
      ingress-error-pages-6c49c5ff74-7nl2q        1/1     Running v1.5.5     1
      rabbitmq (45h  ago)    4d2h

      sysadmin@controller-0:~/openstack-helm$ kubectl get pod -n openstack
      NAME        openstack       1                       2023-10-04 13:39:44.683045128 +0000 UTC deployed           rabbitmq-0.1.29                 v3.9.0 READY    STATUS
      tacker     RESTARTS      AGE
      barbican-api-75fd4d79d7-ncz2c       openstack       1               2023-10-05 10:03:19.033603307 +0000 UTC deployed       tacker-0.1/.1     Running      0             46h
      barbican-db-init-mvhs4v1.0.0
    • The pods are read as follows (Check all pods are "Completed" or "Running" status):

      sysadmin@controller-0:~/openstack-helm$ kubectl get pod -n openstack
      NAME                                         0/1      Completed    0    READY   STATUS     RESTARTS     46hAGE
      barbican-dbapi-sync75fd4d79d7-2hn96ncz2c                                1/1     Running 0/1     Completed   0             46h
      barbican-ksdb-endpoints-57rm2init-mvhs4                                     0/31     Completed   0             46h
      barbican-ksdb-servicesync-x2jqn2hn96                                     0/1     Completed   0             46h
      barbican-ks-userendpoints-ds9h6  57rm2                                   0/13     Completed   0             46h
      barbican-rabbitks-initservice-gz647x2jqn                                   0/1     Completed   0             46h
      barbican-test        ks-user-ds9h6                                       0/1     Completed   0             46h
      cephbarbican-openstackrabbit-config-ceph-ns-ceph-config-generator-wqv7c   init-gz647                                 0/1     Completed   0             46h
      ceph-openstack-config-ceph-ns-key-cleaner-lkbpkbarbican-test             0/1      Completed    0              3d9h
      ceph-openstack-config-ceph-ns-key-generator-g55dn           0/1     Completed   0             46h
      glance-api-97df56ddb-pr598                                 1/1     Running     0             35h
      glance-bootstrap-fbmpq                                     0/1     Completed   0             35h
      glance-db-init-gtmdc                                       0/1     Completed   0             35h
      glance-db-sync-9jkb8                                       0/1     Completed   0             35h
      glance-ks-endpoints-dkb6m                                   0/3     Completed   0             35h
      glance-ks-service-xdhfk                                     0/1     Completed   0             35h
      glance-ks-user-9xhvf                                       0/1     Completed   0             35h
      glance-metadefs-load-rw2kc                                 0/1     Completed   0             35h
      glance-rabbit-init-c4wvr                                   0/1     Completed   0             35h
      glance-storage-init-lzn72                                   0/1     Completed   0             35h
      ingress-5448bbd7d-7rz99                                     1/1     Running     1 (47h ago)   4d4h
      ingress-error-pages-54c8fdfb4d-wgktt                       1/1     Running     1 (47h ago)   4d4h
      keystone-api-6cb7d765ff-srpwg                               1/1     Running     0             46h
      keystone-bootstrap-f9s5n                                   0/1     Completed   0             46h
      keystone-credential-setup-27qkx                             0/1     Completed   0             46h
      keystone-db-init-sr9dj                                     0/1     Completed   0             46h
      keystone-db-sync-7hnj8                                     0/1     Completed   0             46h
      keystone-domain-manage-2n6sf                               0/1     Completed   0             46h
      keystone-fernet-rotate-28275120-djbg7                       0/1     Completed   0             24h
      keystone-fernet-rotate-28275840-z2wnq                       0/1     Completed   0             12h
      keystone-fernet-rotate-28276560-z6rmr                       0/1     Completed   0             30m
      keystone-fernet-setup-x8px7                                 0/1     Completed   0             46h
      keystone-rabbit-init-w5h9q                                 0/1     Completed   0             46h
      mariadb-ingress-7f9bcfd79b-6flfw                           1/1     Running     0             46h
      mariadb-ingress-7f9bcfd79b-tlwkc                           1/1     Running     0             46h
      mariadb-ingress-error-pages-557b55c45f-tw8sw               1/1     Running     0             46h
      mariadb-server-0                                           1/1     Running     0             46h
      memcached-memcached-785bbdd4d8-zxh76                       1/1     Running     0             46h
      rabbitmq-cluster-wait-49khp                                 0/1     Completed   0             46h
      rabbitmq-rabbitmq-0                                         1/1     Running     0             46h
      rabbitmq-rabbitmq-1                                         1/1     Running     0             46h
      tacker-conductor-9f977f5b4-tx58c                           1/1     Running     0             26h
      tacker-db-init-4d7xz                                       0/1     Completed   0             26h
      tacker-db-sync-vwzg2                                       0/1     Completed   0             26h
      tacker-ks-endpoints-426wd                                   0/3     Completed   0             26h
      tacker-ks-service-lltsv                                     0/1     Completed   0             26h
      tacker-ks-user-5vpws                                       0/1     Completed   0             26h
      tacker-rabbit-init-2jkgb                                   0/1     Completed   0             26h
      tacker-server-76d9bbf6c8-skk8h                             1/1     Running     0             26h
    • Test if Tacker is working properly

      $ TACKER_SERVER_POD=tacker-server-76d9bbf6c8-skk8h
      $ TACKER_ENDPOINT=tacker-api.openstack.svc.cluster.local

      # Issue token from keystone
      $ kubectl exec -n openstack -it $TACKER_SERVER_POD \
      -- curl -i -X POST -H "Content-Type: application/json" \
      -d '{"auth":{"identity":{"methods":["password"],"password":{"user":{"domain":{"name":"default"},"name":"admin","password":"password"}}},"scope":{"project":{"domain":{"name":"default"},"name":"admin"}}}}' \
      http://keystone-api.openstack.svc.cluster.local:5000/v3/auth/tokens

      HTTP/1.1 201 CREATED
      Date: Fri, 06 Oct 2023 12:46:40 GMT
      Content-Type: application/json
      Content-Length: 3175
      Connection: keep-alive
      X-Subject-Token: gAAAAABlIAGv1RqxqMJ7rt_VyAtPTxF0XjMG19zp-0zaZmHdFkKmEjLfUus09GkPUdcbCeVuR8ZfmMjqg9C2kRCWWX4Llfdwld1lKM-beqQ7s127kjhpilf28e1oXh351CmBFy97PaZ9D5WBoe3fRrDkhhB_cEsB76Pyj6P2KQuNeMIhGmb1fKA
      Vary: X-Auth-Token
      x-openstack-request-id: req-408ef1f6-2b61-4a8d-89b0-0d987878cbbb

      # Set `X-Subject-Token` retrieved as TOKEN
      $ TOKEN=gAAAAABlIAGv1RqxqMJ7rt_VyAtPTxF0XjMG19zp-0zaZmHdFkKmEjLfUus09GkPUdcbCeVuR8ZfmMjqg9C2kRCWWX4Llfdwld1lKM-beqQ7s127kjhpilf28e1oXh351CmBFy97PaZ9D5WBoe3fRrDkhhB_cEsB76Pyj6P2KQuNeMIhGmb1fKA

      $ kubectl exec -n openstack -it $TACKER_SERVER_POD \
      -- curl -X GET http://${TACKER_ENDPOINT}:9890/vnflcm/v2/vnf_instances \
      -H "X-Auth-Token:$TOKEN" -H "Version: 2.0.0"
      []   *** Success if you can get an empty list ***