version: "3.9" name: knightcrawler networks: knightcrawler-network: name: knightcrawler-network driver: bridge volumes: postgres: lavinmq: redis: services: ## Postgres is the database that is used by the services. ## All downloaded metadata is stored in this database. postgres: env_file: stack.env healthcheck: test: [ "CMD", "sh", "-c", "pg_isready -h localhost -U $$POSTGRES_USER" ] timeout: 10s interval: 10s retries: 3 start_period: 10s image: postgres:latest # # If you need the database to be accessible from outside, please open the below port. # # Furthermore, please, please, please, change the username and password in the stack.env file. # # If you want to enhance your security even more, create a new user for the database with a strong password. # ports: # - "5432:5432" networks: - knightcrawler-network restart: unless-stopped volumes: - postgres:/var/lib/postgresql/data ## Redis is used as a cache for the services. ## It is used to store the infohashes that are currently being processed in sagas, as well as intrim data. redis: env_file: stack.env healthcheck: test: ["CMD-SHELL", "redis-cli ping"] timeout: 10s interval: 10s retries: 3 start_period: 10s image: redis/redis-stack:latest # # If you need redis to be accessible from outside, please open the below port. # ports: # - "6379:6379" networks: - knightcrawler-network restart: unless-stopped volumes: - redis:/data ## LavinMQ is used as a message broker for the services. ## It is a high performance drop in replacement for RabbitMQ. ## It is used to communicate between the services. lavinmq: env_file: stack.env # # If you need the database to be accessible from outside, please open the below port. # # Furthermore, please, please, please, look at the documentation for lavinmq / rabbitmq on how to secure the service. # ports: # - "5672:5672" # - "15672:15672" # - "15692:15692" image: cloudamqp/lavinmq:latest healthcheck: test: ["CMD-SHELL", "lavinmqctl status"] timeout: 10s interval: 10s retries: 3 start_period: 10s restart: unless-stopped networks: - knightcrawler-network volumes: - lavinmq:/var/lib/lavinmq/ ## The addon. This is what is used in stremio addon: depends_on: metadata: condition: service_completed_successfully migrator: condition: service_completed_successfully postgres: condition: service_healthy lavinmq: condition: service_healthy redis: condition: service_healthy env_file: stack.env hostname: knightcrawler-addon image: gabisonfire/knightcrawler-addon:2.0.24 labels: logging: promtail networks: - knightcrawler-network ports: - "7000:7000" restart: unless-stopped ## The consumer is responsible for consuming infohashes and orchestrating download of metadata. consumer: depends_on: metadata: condition: service_completed_successfully migrator: condition: service_completed_successfully postgres: condition: service_healthy lavinmq: condition: service_healthy redis: condition: service_healthy env_file: stack.env image: gabisonfire/knightcrawler-consumer:2.0.24 labels: logging: promtail networks: - knightcrawler-network restart: unless-stopped ## The debrid collector is responsible for downloading metadata from debrid services. (Currently only RealDebrid is supported) debridcollector: depends_on: metadata: condition: service_completed_successfully migrator: condition: service_completed_successfully postgres: condition: service_healthy lavinmq: condition: service_healthy redis: condition: service_healthy env_file: stack.env image: gabisonfire/knightcrawler-debrid-collector:2.0.24 labels: logging: promtail networks: - knightcrawler-network restart: unless-stopped ## The metadata service is responsible for downloading imdb publically available datasets. ## This is used to enrich the metadata during production of ingested infohashes. metadata: depends_on: migrator: condition: service_completed_successfully env_file: stack.env image: gabisonfire/knightcrawler-metadata:2.0.24 networks: - knightcrawler-network restart: "no" ## The migrator is responsible for migrating the database schema. migrator: depends_on: postgres: condition: service_healthy env_file: stack.env image: gabisonfire/knightcrawler-migrator:2.0.24 networks: - knightcrawler-network restart: "no" ## The producer is responsible for producing infohashes by acquiring for various sites, including DMM. producer: depends_on: metadata: condition: service_completed_successfully migrator: condition: service_completed_successfully postgres: condition: service_healthy lavinmq: condition: service_healthy redis: condition: service_healthy env_file: stack.env image: gabisonfire/knightcrawler-producer:2.0.24 labels: logging: promtail networks: - knightcrawler-network restart: unless-stopped ## QBit collector utilizes QBitTorrent to download metadata. qbitcollector: depends_on: metadata: condition: service_completed_successfully migrator: condition: service_completed_successfully postgres: condition: service_healthy lavinmq: condition: service_healthy redis: condition: service_healthy qbittorrent: condition: service_healthy deploy: replicas: ${QBIT_REPLICAS:-0} env_file: stack.env image: gabisonfire/knightcrawler-qbit-collector:2.0.24 labels: logging: promtail networks: - knightcrawler-network restart: unless-stopped ## QBitTorrent is a torrent client that can be used to download torrents. In this case its used to download metadata. ## The QBit collector requires this. qbittorrent: deploy: replicas: ${QBIT_REPLICAS:-0} env_file: stack.env environment: PGID: "1000" PUID: "1000" TORRENTING_PORT: "6881" WEBUI_PORT: "8080" healthcheck: test: ["CMD-SHELL", "curl --fail http://localhost:8080"] timeout: 10s interval: 10s retries: 3 start_period: 10s image: lscr.io/linuxserver/qbittorrent:latest networks: - knightcrawler-network ports: - "6881:6881/tcp" - "6881:6881/udp" # if you want to expose the webui, uncomment the following line # - "8001:8080" restart: unless-stopped volumes: - ./config/qbit/qbittorrent.conf:/config/qBittorrent/qBittorrent.conf