# Copyright The OpenTelemetry Authors
# SPDX-License-Identifier: Apache-2.0

version: '3.9'
x-default-logging: &logging
  driver: "json-file"
  options:
    max-size: "5m"
    max-file: "2"

volumes:
  opensearch-data1:
  opensearch-data2:

networks:
  default:
    name: opentelemetry-demo
    driver: bridge

services:
  # ******************
  # Core Demo Services
  # ******************
  # Accounting service
  accountingservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-accountingservice
    container_name: accounting-service
    build:
      context: ./
      dockerfile: ./src/accountingservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-accountingservice
    deploy:
      resources:
        limits:
          memory: 20M
    restart: unless-stopped
    environment:
      - KAFKA_SERVICE_ADDR
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=accountingservice
    depends_on:
      otelcol:
        condition: service_started
      kafka:
        condition: service_healthy
    logging: *logging

  # AdService
  adservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-adservice
    container_name: ad-service
    build:
      context: ./
      dockerfile: ./src/adservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-adservice
    deploy:
      resources:
        limits:
          memory: 300M
    restart: unless-stopped
    ports:
      - "${AD_SERVICE_PORT}"
    environment:
      - AD_SERVICE_PORT
      - FEATURE_FLAG_GRPC_SERVICE_ADDR
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_LOGS_EXPORTER=otlp
      - OTEL_SERVICE_NAME=adservice
    depends_on:
      otelcol:
        condition: service_started
    logging: *logging

  # Cart service
  cartservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-cartservice
    container_name: cart-service
    build:
      context: ./
      dockerfile: ./src/cartservice/src/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-cartservice
    deploy:
      resources:
        limits:
          memory: 160M
    restart: unless-stopped
    ports:
      - "${CART_SERVICE_PORT}"
    environment:
      - CART_SERVICE_PORT
      - REDIS_ADDR
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=cartservice
      - ASPNETCORE_URLS=http://*:${CART_SERVICE_PORT}
    depends_on:
      redis-cart:
        condition: service_started
      otelcol:
        condition: service_started
    logging: *logging

  # Checkout service
  checkoutservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-checkoutservice
    container_name: checkout-service
    build:
      context: ./
      dockerfile: ./src/checkoutservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-checkoutservice
    deploy:
      resources:
        limits:
          memory: 20M
    restart: unless-stopped
    ports:
      - "${CHECKOUT_SERVICE_PORT}"
    environment:
      - CHECKOUT_SERVICE_PORT
      - CART_SERVICE_ADDR
      - CURRENCY_SERVICE_ADDR
      - EMAIL_SERVICE_ADDR
      - PAYMENT_SERVICE_ADDR
      - PRODUCT_CATALOG_SERVICE_ADDR
      - SHIPPING_SERVICE_ADDR
      - KAFKA_SERVICE_ADDR
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=checkoutservice
    depends_on:
      cartservice:
        condition: service_started
      currencyservice:
        condition: service_started
      emailservice:
        condition: service_started
      paymentservice:
        condition: service_started
      productcatalogservice:
        condition: service_started
      shippingservice:
        condition: service_started
      otelcol:
        condition: service_started
      kafka:
        condition: service_healthy
    logging: *logging

  # Currency service
  currencyservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-currencyservice
    container_name: currency-service
    build:
      context: ./src/currencyservice
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-currencyservice
      args:
        - GRPC_VERSION=1.46.0
        - OPENTELEMETRY_VERSION=1.5.0
    deploy:
      resources:
        limits:
          memory: 20M
    restart: unless-stopped
    ports:
      - "${CURRENCY_SERVICE_PORT}"
    environment:
      - CURRENCY_SERVICE_PORT
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_RESOURCE_ATTRIBUTES=${OTEL_RESOURCE_ATTRIBUTES},service.name=currencyservice   # The C++ SDK does not support OTEL_SERVICE_NAME
    depends_on:
      otelcol:
        condition: service_started
    logging: *logging

  # Email service
  emailservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-emailservice
    container_name: email-service
    build:
      context: ./src/emailservice
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-emailservice
    deploy:
      resources:
        limits:
          memory: 100M
    restart: unless-stopped
    ports:
      - "${EMAIL_SERVICE_PORT}"
    environment:
      - APP_ENV=production
      - EMAIL_SERVICE_PORT
      - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:4318/v1/traces
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=emailservice
    depends_on:
      otelcol:
        condition: service_started
    logging: *logging

  # Feature Flag service
  featureflagservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-featureflagservice
    container_name: feature-flag-service
    build:
      context: ./
      dockerfile: ./src/featureflagservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-featureflagservice
    deploy:
      resources:
        limits:
          memory: 175M
    restart: unless-stopped
    ports:
      - "${FEATURE_FLAG_SERVICE_PORT}:${FEATURE_FLAG_SERVICE_PORT}"     # Feature Flag Service UI
      - "${FEATURE_FLAG_GRPC_SERVICE_PORT}"                             # Feature Flag Service gRPC API
    environment:
      - FEATURE_FLAG_SERVICE_PORT
      - FEATURE_FLAG_GRPC_SERVICE_PORT
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc
      - OTEL_SERVICE_NAME=featureflagservice
      - DATABASE_URL=ecto://ffs:ffs@ffs_postgres:5432/ffs
    healthcheck:
      test: ["CMD", "curl", "-H", "baggage: synthetic_request=true", "-f", "http://localhost:${FEATURE_FLAG_SERVICE_PORT}"]
    depends_on:
      ffs_postgres:
        condition: service_healthy
    logging: *logging

  # Fraud Detection service
  frauddetectionservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-frauddetectionservice
    container_name: frauddetection-service
    build:
      context: ./
      dockerfile: ./src/frauddetectionservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-frauddetectionservice
    deploy:
      resources:
        limits:
          memory: 200M
    restart: unless-stopped
    environment:
      - KAFKA_SERVICE_ADDR
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=frauddetectionservice
    depends_on:
      otelcol:
        condition: service_started
      kafka:
        condition: service_healthy
    logging: *logging

  # Frontend
  frontend:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-frontend
    container_name: frontend
    build:
      context: ./
      dockerfile: ./src/frontend/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-frontend
    deploy:
      resources:
        limits:
          memory: 200M
    restart: unless-stopped
    ports:
      - "${FRONTEND_PORT}:${FRONTEND_PORT}"
    environment:
      - PORT=${FRONTEND_PORT}
      - FRONTEND_ADDR
      - AD_SERVICE_ADDR
      - CART_SERVICE_ADDR
      - CHECKOUT_SERVICE_ADDR
      - CURRENCY_SERVICE_ADDR
      - PRODUCT_CATALOG_SERVICE_ADDR
      - RECOMMENDATION_SERVICE_ADDR
      - SHIPPING_SERVICE_ADDR
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_RESOURCE_ATTRIBUTES=${OTEL_RESOURCE_ATTRIBUTES}
      - ENV_PLATFORM
      - OTEL_SERVICE_NAME=frontend
      - PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - WEB_OTEL_SERVICE_NAME=frontend-web
    depends_on:
      adservice:
        condition: service_started
      cartservice:
        condition: service_started
      checkoutservice:
        condition: service_started
      currencyservice:
        condition: service_started
      productcatalogservice:
        condition: service_started
      quoteservice:
        condition: service_started
      recommendationservice:
        condition: service_started
      shippingservice:
        condition: service_started
      otelcol:
        condition: service_started
    logging: *logging

  # Frontend Proxy (Envoy)
  frontendproxy:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-frontendproxy
    container_name: frontend-proxy
    build:
      context: ./
      dockerfile: src/frontendproxy/Dockerfile
    deploy:
      resources:
        limits:
          memory: 50M
    ports:
      - "${ENVOY_PORT}:${ENVOY_PORT}"
      - 10000:10000
    environment:
      - FRONTEND_PORT
      - FRONTEND_HOST
      - FEATURE_FLAG_SERVICE_PORT
      - FEATURE_FLAG_SERVICE_HOST
      - LOCUST_WEB_HOST
      - LOCUST_WEB_PORT
      - GRAFANA_SERVICE_PORT
      - GRAFANA_SERVICE_HOST
      - JAEGER_SERVICE_PORT
      - JAEGER_SERVICE_HOST
      - OTEL_COLLECTOR_HOST
      - OTEL_COLLECTOR_PORT_GRPC
      - OTEL_COLLECTOR_PORT_HTTP
      - ENVOY_PORT
    depends_on:
      frontend:
        condition: service_started
      featureflagservice:
        condition: service_started
      loadgenerator:
        condition: service_started
      jaeger:
        condition: service_started
      grafana:
        condition: service_started

  # Load Generator
  loadgenerator:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-loadgenerator
    container_name: load-generator
    build:
      context: ./
      dockerfile: ./src/loadgenerator/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-loadgenerator
    deploy:
      resources:
        limits:
          memory: 120M
    restart: unless-stopped
    ports:
      - "${LOCUST_WEB_PORT}:${LOCUST_WEB_PORT}"
    environment:
      - LOCUST_WEB_PORT
      - LOCUST_USERS
      - LOCUST_HOST
      - LOCUST_HEADLESS
      - LOCUST_AUTOSTART
      - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:4318/v1/traces
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=loadgenerator
      - PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
    depends_on:
      frontend:
        condition: service_started
    logging: *logging

  # Payment service
  paymentservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-paymentservice
    container_name: payment-service
    build:
      context: ./
      dockerfile: ./src/paymentservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-paymentservice
    deploy:
      resources:
        limits:
          memory: 120M
    restart: unless-stopped
    ports:
      - "${PAYMENT_SERVICE_PORT}"
    environment:
      - PAYMENT_SERVICE_PORT
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=paymentservice
    depends_on:
      otelcol:
        condition: service_started
    logging: *logging

  # Product Catalog service
  # if proxy.golang.org is experiencing i/o timeout use the next:
  #  - go env -w GOPROXY=direct
  #  - go env -w GOSUMDB=off
  productcatalogservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-productcatalogservice
    container_name: product-catalog-service
    build:
      context: ./
      dockerfile: ./src/productcatalogservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-productcatalogservice
    deploy:
      resources:
        limits:
          memory: 20M
    restart: unless-stopped
    ports:
      - "${PRODUCT_CATALOG_SERVICE_PORT}"
    environment:
      - PRODUCT_CATALOG_SERVICE_PORT
      - FEATURE_FLAG_GRPC_SERVICE_ADDR
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=productcatalogservice
    depends_on:
      otelcol:
        condition: service_started
    logging: *logging

  # Quote service
  quoteservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-quoteservice
    container_name: quote-service
    build:
      context: ./
      dockerfile: ./src/quoteservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-quoteservice
    deploy:
      resources:
        limits:
          memory: 40M
    restart: unless-stopped
    ports:
      - "${QUOTE_SERVICE_PORT}"
    environment:
      - FEATURE_FLAG_GRPC_SERVICE_ADDR
      - OTEL_EXPORTER_OTLP_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:4318
      - OTEL_PHP_AUTOLOAD_ENABLED=true
      - QUOTE_SERVICE_PORT
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=quoteservice
    depends_on:
      otelcol:
        condition: service_started
    logging: *logging

  # Recommendation service
  recommendationservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-recommendationservice
    container_name: recommendation-service
    build:
      context: ./
      dockerfile: ./src/recommendationservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-recommendationservice
    deploy:
      resources:
        limits:
          memory: 500M               # This is high to enable supporting the recommendationCache feature flag use case
    restart: unless-stopped
    ports:
      - "${RECOMMENDATION_SERVICE_PORT}"
    environment:
      - RECOMMENDATION_SERVICE_PORT
      - PRODUCT_CATALOG_SERVICE_ADDR
      - FEATURE_FLAG_GRPC_SERVICE_ADDR
      - OTEL_PYTHON_LOG_CORRELATION=true
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=recommendationservice
      - PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
    depends_on:
      featureflagservice:
        condition: service_started
      productcatalogservice:
        condition: service_started
      otelcol:
        condition: service_started
    logging: *logging

  # Frontend Nginx Proxy service
  nginx:
    image: nginx:latest
    container_name: nginx
    volumes:
      - ./src/nginx-otel/default.conf:/etc/nginx/conf.d/default.conf
    ports:
      - 90:90
    depends_on:
      - frontend
      - fluentbit
      - otelcol
      - loadgenerator
    links:
      - fluentbit
    logging:
      driver: "fluentd"
      options:
        fluentd-address: 127.0.0.1:24224
        tag: nginx.access

  # Fluent-bit logs shipper service
  fluentbit:
    container_name: fluentbit
    image: fluent/fluent-bit:latest
    volumes:
      - ./src/fluent-bit:/fluent-bit/etc
    ports:
      - "24224:24224"
      - "24224:24224/udp"
    depends_on:
      - opensearch-node1

  # Shipping service
  shippingservice:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-shippingservice
    container_name: shipping-service
    build:
      context: ./
      dockerfile: ./src/shippingservice/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-shippingservice
    deploy:
      resources:
        limits:
          memory: 20M
    restart: unless-stopped
    ports:
      - "${SHIPPING_SERVICE_PORT}"
    environment:
      - SHIPPING_SERVICE_PORT
      - QUOTE_SERVICE_ADDR
      - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:4317/v1/traces
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=shippingservice
    depends_on:
      otelcol:
        condition: service_started
    logging: *logging

  # ******************
  # Dependent Services
  # ******************
  # Postgres used by Feature Flag service
  ffs_postgres:
    image: postgres:14
    container_name: postgres
    user: postgres
    deploy:
      resources:
        limits:
          memory: 120M
    restart: unless-stopped
    environment:
      - POSTGRES_USER=ffs
      - POSTGRES_DB=ffs
      - POSTGRES_PASSWORD=ffs
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -d ffs -U ffs"]
      interval: 10s
      timeout: 5s
      retries: 5
    logging: *logging

  # Kafka used by Checkout, Accounting, and Fraud Detection services
  kafka:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-kafka
    container_name: kafka
    build:
      context: ./
      dockerfile: ./src/kafka/Dockerfile
      cache_from:
        - ${IMAGE_NAME}:${IMAGE_VERSION}-kafka
    deploy:
      resources:
        limits:
          memory: 500M
    restart: unless-stopped
    environment:
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092
      - OTEL_EXPORTER_OTLP_ENDPOINT
      - OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
      - OTEL_RESOURCE_ATTRIBUTES
      - OTEL_SERVICE_NAME=kafka
      - KAFKA_HEAP_OPTS=-Xmx200m -Xms200m
    healthcheck:
      test: nc -z kafka 9092
      start_period: 10s
      interval: 5s
      timeout: 10s
      retries: 10
    logging: *logging

  # Redis used by Cart service
  redis-cart:
    image: redis:alpine
    container_name: redis-cart
    user: redis
    deploy:
      resources:
        limits:
          memory: 20M
    restart: unless-stopped
    ports:
      - "${REDIS_PORT}"
    logging: *logging


  # ********************
  # Telemetry Components
  # ********************
  # data-prepper
  data-prepper:
    image: opensearchproject/data-prepper:latest
    container_name: data-prepper
    restart: unless-stopped
    volumes:
      - ./src/dataprepper/trace_analytics_no_ssl_2x.yml:/usr/share/data-prepper/pipelines/pipelines.yaml
      - ./src/dataprepper/data-prepper-config.yaml:/usr/share/data-prepper/config/data-prepper-config.yaml
    ports:
      - "21890:21890"
    depends_on:
      - opensearch-node1

  # Jaeger
  jaeger:
    image: jaegertracing/jaeger-collector:latest
    container_name: jaeger
    command:
      - "--metrics-backend=prometheus"
      - "--es.server-urls=https://opensearch-node1:9200"
      - "--es.tls.enabled=true"
    deploy:
      resources:
        limits:
          memory: 300M
    restart: unless-stopped
    ports:
      - "${JAEGER_SERVICE_PORT}"                    # Jaeger UI
      - "4317"                                      # OTLP gRPC default port
      - "14269:14269"
      - "14268:14268"
      - "14267:14267"
      - "14250:14250"
      - "9411:9411"
    environment:
      - COLLECTOR_OTLP_ENABLED=true
      - METRICS_STORAGE_TYPE=prometheus
      - SPAN_STORAGE_TYPE=opensearch
      - ES_TAGS_AS_FIELDS_ALL=true
      - ES_USERNAME=admin
      - ES_PASSWORD=admin
      - ES_TLS_SKIP_HOST_VERIFY=true
    depends_on:
      - opensearch-node1
      - opensearch-node2

    logging: *logging

  jaeger-agent:
    image: jaegertracing/jaeger-agent:latest
    container_name: jaeger-agent
    hostname: jaeger-agent
    command: ["--reporter.grpc.host-port=jaeger:14250"]
    ports:
      - "${GRAFANA_SERVICE_PORT}"
      - "5775:5775/udp"
      - "6831:6831/udp"
      - "6832:6832/udp"
      - "5778:5778"
    restart: on-failure
    environment:
      - SPAN_STORAGE_TYPE=opensearch
    depends_on:
      - jaeger

  # Grafana
  grafana:
    image: grafana/grafana:9.4.7
    container_name: grafana
    deploy:
      resources:
        limits:
          memory: 100M
    volumes:
      - ./src/grafana/grafana.ini:/etc/grafana/grafana.ini
      - ./src/grafana/provisioning/:/etc/grafana/provisioning/
    ports:
      - "${GRAFANA_SERVICE_PORT}:${GRAFANA_SERVICE_PORT}"
    logging: *logging

  # OpenTelemetry Collector
  otelcol:
    image: otel/opentelemetry-collector-contrib:0.76.1
    container_name: otel-col
    deploy:
      resources:
        limits:
          memory: 125M
    restart: unless-stopped
    command: [ "--config=/etc/otelcol-config.yml", "--config=/etc/otelcol-observability.yml", "--config=/etc/otelcol-config-extras.yml" ]
    volumes:
      - ./src/otelcollector/otelcol-config.yml:/etc/otelcol-config.yml
      - ./src/otelcollector/otelcol-observability.yml:/etc/otelcol-observability.yml
      - ./src/otelcollector/otelcol-config-extras.yml:/etc/otelcol-config-extras.yml
    ports:
      - "4317"            # OTLP over gRPC receiver
      - "4318:4318"       # OTLP over HTTP receiver
      - "13133:13133"     # health check port
      - "9464"            # Prometheus exporter
      - "8888"            # metrics endpoint
    depends_on:
      - jaeger-agent
      - data-prepper
    logging: *logging

  # Prometheus
  prometheus:
    image: quay.io/prometheus/prometheus:v2.43.0
    container_name: prometheus
    command:
      - --web.console.templates=/etc/prometheus/consoles
      - --web.console.libraries=/etc/prometheus/console_libraries
      - --storage.tsdb.retention.time=1h
      - --config.file=/etc/prometheus/prometheus-config.yaml
      - --storage.tsdb.path=/prometheus
      - --web.enable-lifecycle
      - --web.route-prefix=/
      - --enable-feature=exemplar-storage
    volumes:
      - ./src/prometheus/prometheus-config.yaml:/etc/prometheus/prometheus-config.yaml
    deploy:
      resources:
        limits:
          memory: 300M
    ports:
      - "${PROMETHEUS_SERVICE_PORT}:${PROMETHEUS_SERVICE_PORT}"
    logging: *logging

  # OpenSearch store - node1
  opensearch-node1: # This is also the hostname of the container within the Docker network (i.e. https://opensearch-node1/)
    image: opensearchproject/opensearch:2.8.0 # Specifying the latest available image - modify if you want a specific version
    container_name: opensearch-node1
    environment:
      - cluster.name=opensearch-cluster # Name the cluster
      - node.name=opensearch-node1 # Name the node that will run in this container
      - discovery.seed_hosts=opensearch-node1,opensearch-node2 # Nodes to look for when discovering the cluster
      - cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2 # Nodes eligible to serve as cluster manager
      - bootstrap.memory_lock=true # Disable JVM heap memory swapping
      - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # Set min and max JVM heap sizes to at least 50% of system RAM
    ulimits:
      memlock:
        soft: -1 # Set memlock to unlimited (no soft or hard limit)
        hard: -1
      nofile:
        soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536
        hard: 65536
    volumes:
      - opensearch-data1:/usr/share/opensearch/data # Creates volume called opensearch-data1 and mounts it to the container
    healthcheck:
      test: ["CMD", "curl", "-f", "https://opensearch-node1:9200/_cluster/health?wait_for_status=yellow", "-ku admin:admin"]
      interval: 5s
      timeout: 25s
      retries: 4
    ports:
      - "9200:9200"
      - "9600:9600"

  # OpenSearch store - node2
  opensearch-node2:
    image: opensearchproject/opensearch:2.8.0 # This should be the same image used for opensearch-node1 to avoid issues
    container_name: opensearch-node2
    environment:
      - cluster.name=opensearch-cluster
      - node.name=opensearch-node2
      - discovery.seed_hosts=opensearch-node1,opensearch-node2
      - cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2
      - bootstrap.memory_lock=true
      - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    volumes:
      - opensearch-data2:/usr/share/opensearch/data

  # OpenSearch store - dashboard
  opensearch-dashboards:
    image: opensearchproject/opensearch-dashboards:2.8.0 # Make sure the version of opensearch-dashboards matches the version of opensearch installed on other nodes
    container_name: opensearch-dashboards
    ports:
      - 5601:5601 # Map host port 5601 to container port 5601
    expose:
      - "5601" # Expose port 5601 for web access to OpenSearch Dashboards
    environment:
      OPENSEARCH_HOSTS: '["https://opensearch-node1:9200","https://opensearch-node2:9200"]' # Define the OpenSearch nodes that OpenSearch Dashboards will query
    depends_on:
      - opensearch-node1
      - opensearch-node2
      - prometheus

#   Observability OSD Integrations
  integrations:
    container_name: opensearch-integrations
    build:
      context: ./src/integrations
      dockerfile: Dockerfile
    volumes:
      - ./src/integrations:/integrations
    depends_on:
      - opensearch-node1
      - opensearch-node2
      - opensearch-dashboards

  # *****
  # Tests
  # *****
  # Frontend Tests
  frontendTests:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-frontend-tests
    container_name: frontend-tests
    build:
      context: ./
      dockerfile: ./src/frontend/Dockerfile.cypress
    profiles:
      - tests
    volumes:
      - ./src/frontend/cypress/videos:/app/cypress/videos
      - ./src/frontend/cypress/screenshots:/app/cypress/screenshots
    environment:
      - CYPRESS_baseUrl=http://${FRONTEND_ADDR}
      - FRONTEND_ADDR
      - NODE_ENV=production
    depends_on:
      - frontend

  # Integration Tests
  integrationTests:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-integrationTests
    container_name: integrationTests
    profiles:
      - tests
    build:
      context: ./
      dockerfile: ./test/Dockerfile
    environment:
      - AD_SERVICE_ADDR
      - CART_SERVICE_ADDR
      - CHECKOUT_SERVICE_ADDR
      - CURRENCY_SERVICE_ADDR
      - EMAIL_SERVICE_ADDR
      - PAYMENT_SERVICE_ADDR
      - PRODUCT_CATALOG_SERVICE_ADDR
      - RECOMMENDATION_SERVICE_ADDR
      - SHIPPING_SERVICE_ADDR
    depends_on:
      - adservice
      - cartservice
      - checkoutservice
      - currencyservice
      - emailservice
      - paymentservice
      - productcatalogservice
      - recommendationservice
      - shippingservice
      - quoteservice

  # Tracebased Tests
  traceBasedTests:
    image: ${IMAGE_NAME}:${IMAGE_VERSION}-traceBasedTests
    container_name: traceBasedTests
    profiles:
      - tests
    build:
      context: ./
      dockerfile: ./test/tracetesting/Dockerfile
    environment:
      - AD_SERVICE_ADDR
      - CART_SERVICE_ADDR
      - CHECKOUT_SERVICE_ADDR
      - CURRENCY_SERVICE_ADDR
      - EMAIL_SERVICE_ADDR
      - FRONTEND_ADDR
      - PAYMENT_SERVICE_ADDR
      - PRODUCT_CATALOG_SERVICE_ADDR
      - RECOMMENDATION_SERVICE_ADDR
      - SHIPPING_SERVICE_ADDR
    extra_hosts:
      - "host.docker.internal:host-gateway"
    depends_on:
      tracetest-server:
        condition: service_healthy
      # adding demo services as dependencies
      frontend:
        condition: service_started
      adservice:
        condition: service_started
      cartservice:
        condition: service_started
      checkoutservice:
        condition: service_started
      currencyservice:
        condition: service_started
      emailservice:
        condition: service_started
      paymentservice:
        condition: service_started
      productcatalogservice:
        condition: service_started
      recommendationservice:
        condition: service_started
      shippingservice:
        condition: service_started
      quoteservice:
        condition: service_started

  tracetest-server:
    image: kubeshop/tracetest:latest
    platform: linux/amd64
    container_name: tracetest-server
    profiles:
      - tests
    volumes:
      - type: bind
        source: ./test/tracetesting/tracetest-config.yaml
        target: /app/tracetest.yaml
      - type: bind
        source: ./test/tracetesting/tracetest-provision.yaml
        target: /app/provision.yaml
    command: --provisioning-file /app/provision.yaml
    ports:
      - 11633:11633
    extra_hosts:
      - "host.docker.internal:host-gateway"
    depends_on:
      tracetest-postgres:
        condition: service_healthy
      otelcol:
        condition: service_started
    healthcheck:
      test: [ "CMD", "wget", "--spider", "localhost:11633" ]
      interval: 1s
      timeout: 3s
      retries: 60
    environment:
      TRACETEST_DEV: ${TRACETEST_DEV}

  tracetest-postgres:
    image: postgres:14
    container_name: tracetest-postgres
    profiles:
      - tests
    environment:
      POSTGRES_PASSWORD: postgres
      POSTGRES_USER: postgres
    healthcheck:
      test: pg_isready -U "$$POSTGRES_USER" -d "$$POSTGRES_DB"
      interval: 1s
      timeout: 5s
      retries: 60
    ports:
      - 5432