version: '3.8' services: zookeeper: image: confluentinc/cp-zookeeper:7.5.0 container_name: job-zookeeper ports: - "2181:2181" environment: ZOOKEEPER_CLIENT_PORT: 2181 ZOOKEEPER_TICK_TIME: 2000 volumes: - zookeeper_data:/var/lib/zookeeper/data healthcheck: test: ["CMD", "nc", "-z", "localhost", "2181"] interval: 10s timeout: 5s retries: 5 networks: - job-network kafka: image: confluentinc/cp-kafka:7.5.0 container_name: job-kafka ports: - "9092:9092" - "29092:29092" environment: KAFKA_BROKER_ID: 1 KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" volumes: - kafka_data:/var/lib/kafka/data depends_on: zookeeper: condition: service_healthy healthcheck: test: ["CMD", "kafka-topics", "--bootstrap-server", "localhost:9092", "--list"] interval: 10s timeout: 10s retries: 5 networks: - job-network app: build: context: . dockerfile: Dockerfile container_name: job-crawler ports: - "8000:8000" environment: - CONFIG_PATH=/app/config/config.yml volumes: - ./config:/app/config:ro - app_data:/app/data depends_on: kafka: condition: service_healthy restart: unless-stopped networks: - job-network networks: job-network: driver: bridge volumes: zookeeper_data: kafka_data: app_data: