/
Release K - Run in Docker
Release K - Run in Docker
- Jeff van Dam
Owned by Jeff van Dam
Dec 13, 2024
Loading data...
K Release images
Component | Image | Tag |
---|---|---|
TEIV Exposure | nexus3.o-ran-sc.org:10002/o-ran-sc/smo-teiv-exposure | 0.1.0 |
TEIV Ingestion | nexus3.o-ran-sc.org:10002/o-ran-sc/smo-teiv-ingestion | 0.1.0 |
Run using docker-compose
A sample docker-compose deployment is available to down load from https://gerrit.o-ran-sc.org/r/gitweb?p=smo/teiv.git;a=tree;f=docker-compose;h=refs/heads/k-release
docker-compose Expand source
# # ============LICENSE_START======================================================= # Copyright (C) 2024 Ericsson # Modifications Copyright (C) 2024 OpenInfra Foundation Europe # ================================================================================ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 # ============LICENSE_END========================================================= # services: # # uncomment if you have your own YANG files in docker-compose/generate-defaults # NB: uncomment lines 74 and 75 as well from dbpostgresql # depends_on: # - pgsql-schema-generator # # pgsql-schema-generator: # container_name: pgsql-schema-generator # image: o-ran-sc/smo-teiv-pgsql-schema-generator:latest # volumes: # - ./generate-defaults:/opt/app/pgsql-schema-generator/resources/generate-defaults # - ./sql_scripts:/opt/app/pgsql-schema-generator/sql_scripts # deploy: # resources: # reservations: # cpus: '2' # memory: 2G # limits: # cpus: '3' # memory: 3G # # comment out kafka-producer if you DON'T want pre-populated example data in TEIV OR # you have created your own sql schemas with your own YANG files OR # you can create your own events tailored to your YANG files in docker-compose/cloudEventProducer/events # kafka-producer: container_name: kafka-producer image: confluentinc/cp-kafka:7.6.1 volumes: - ./cloudEventProducer/cloudEventProducerForDockerCompose.sh:/cloudEventProducer.sh - ./cloudEventProducer/events:/events depends_on: - topology-ingestion-inventory - kafka restart: "no" entrypoint: [ "/bin/bash", "-c", "sleep 30 && bash /cloudEventProducer.sh" ] dbpostgresql: container_name: dbpostgresql image: postgis/postgis:13-3.4-alpine ports: - ${DB_PORT:-5432}:5432 volumes: - ./sql_scripts:/docker-entrypoint-initdb.d command: ["/bin/sh", "-c", "bash /docker-entrypoint-initdb.d/waitForFiles.sh"] environment: POSTGRES_DB: topology_exposure_db POSTGRES_USER: ${DB_USERNAME:-topology_exposure_user} POSTGRES_PASSWORD: ${DB_PASSWORD:-dbpassword} restart: always # depends_on: # - pgsql-schema-generator deploy: resources: reservations: cpus: '1' memory: 1G limits: cpus: '6' memory: 3G topology-exposure-inventory: container_name: topology-exposure-inventory image: o-ran-sc/smo-teiv-exposure:0.1.0 ports: - 31074:8080 # Uncomment for debug # - 5005:5005 # environment: # JAVA_TOOL_OPTIONS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005 depends_on: - dbpostgresql deploy: resources: reservations: cpus: '2' memory: 2G limits: cpus: '3' memory: 3G topology-ingestion-inventory: container_name: topology-ingestion-inventory image: o-ran-sc/smo-teiv-ingestion:0.1.0 depends_on: - dbpostgresql deploy: resources: reservations: cpus: '2' memory: 2G limits: cpus: '3' memory: 3G zookeeper: image: confluentinc/cp-zookeeper:6.2.1 container_name: zookeeper ports: - '2181:2181' environment: ZOOKEEPER_CLIENT_PORT: 2181 kafka: image: confluentinc/cp-kafka:7.6.1 container_name: kafka ports: - '9093:9093' depends_on: - zookeeper environment: KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka:9092,OUTSIDE://localhost:9093 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_BROKER_ID: 1 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 kafka2: image: confluentinc/cp-kafka:7.6.1 container_name: kafka2 depends_on: - zookeeper environment: KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka2:9094 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_BROKER_ID: 2 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 kafka3: image: confluentinc/cp-kafka:7.6.1 container_name: kafka3 depends_on: - zookeeper environment: KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka3:9096 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_BROKER_ID: 3 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
To execute this docker-compose script simply navigate to the directory containing the script and execute:
docker-compose up -d
Sample scripts to pre-load the TEIV registry with some sample models/schemas/data are also available in the 'docker-compose/
' directory in the repo mentioned above. The scripts are run automatically when using "docker-compose up"