Files
knightcrawler/deployment/docker/docker-compose.yaml
2024-03-27 12:37:11 +00:00

267 lines
7.2 KiB
YAML

version: "3.9"
name: knightcrawler
networks:
knightcrawler-network:
name: knightcrawler-network
driver: bridge
volumes:
postgres:
rabbitmq:
redis:
services:
## Postgres is the database that is used by the services.
## All downloaded metadata is stored in this database.
postgres:
env_file: stack.env
environment:
PGUSER: ${POSTGRES_USER}
healthcheck:
test:
- CMD-SHELL
- pg_isready
timeout: 10s
interval: 10s
retries: 3
start_period: 10s
image: postgres:latest
# # If you need the database to be accessible from outside, please open the below port.
# # Furthermore, please, please, please, change the username and password in the stack.env file.
# # If you want to enhance your security even more, create a new user for the database with a strong password.
# ports:
# - "5432:5432"
networks:
- knightcrawler-network
restart: unless-stopped
volumes:
- postgres:/var/lib/postgresql/data
## Redis is used as a cache for the services.
## It is used to store the infohashes that are currently being processed in sagas, as well as intrim data.
redis:
env_file: stack.env
healthcheck:
test:
- CMD-SHELL
- redis-cli ping
timeout: 10s
interval: 10s
retries: 3
start_period: 10s
image: redis/redis-stack:latest
# # If you need redis to be accessible from outside, please open the below port.
# ports:
# - "6379:6379"
networks:
- knightcrawler-network
restart: unless-stopped
volumes:
- redis:/data
## RabbitMQ is used as a message broker for the services.
## It is used to communicate between the services.
rabbitmq:
env_file: stack.env
healthcheck:
test:
- CMD-SHELL
- rabbitmq-diagnostics -q ping
timeout: 10s
interval: 10s
retries: 3
start_period: 10s
# # If you need the database to be accessible from outside, please open the below port.
# # Furthermore, please, please, please, look at the documentation for rabbit on how to secure the service.
# ports:
# - "5672:5672"
# - "15672:15672"
# - "15692:15692"
image: rabbitmq:3-management
networks:
- knightcrawler-network
restart: unless-stopped
volumes:
- rabbitmq:/var/lib/rabbitmq
## The addon. This is what is used in stremio
addon:
depends_on:
metadata:
condition: service_completed_successfully
required: true
migrator:
condition: service_completed_successfully
required: true
postgres:
condition: service_healthy
required: true
rabbitmq:
condition: service_healthy
required: true
redis:
condition: service_healthy
required: true
env_file: stack.env
hostname: knightcrawler-addon
image: gabisonfire/knightcrawler-addon:2.0.7
labels:
logging: promtail
networks:
- knightcrawler-network
ports:
- "7000:7000"
restart: unless-stopped
## The consumer is responsible for consuming infohashes and orchestrating download of metadata.
consumer:
depends_on:
metadata:
condition: service_completed_successfully
required: true
migrator:
condition: service_completed_successfully
required: true
postgres:
condition: service_healthy
required: true
rabbitmq:
condition: service_healthy
required: true
redis:
condition: service_healthy
required: true
env_file: stack.env
image: gabisonfire/knightcrawler-consumer:2.0.7
labels:
logging: promtail
networks:
- knightcrawler-network
restart: unless-stopped
## The debrid collector is responsible for downloading metadata from debrid services. (Currently only RealDebrid is supported)
debridcollector:
depends_on:
metadata:
condition: service_completed_successfully
required: true
migrator:
condition: service_completed_successfully
required: true
postgres:
condition: service_healthy
required: true
rabbitmq:
condition: service_healthy
required: true
redis:
condition: service_healthy
required: true
env_file: stack.env
image: gabisonfire/knightcrawler-debrid-collector:2.0.7
labels:
logging: promtail
networks:
- knightcrawler-network
restart: unless-stopped
## The metadata service is responsible for downloading imdb publically available datasets.
## This is used to enrich the metadata during production of ingested infohashes.
metadata:
depends_on:
migrator:
condition: service_completed_successfully
required: true
env_file: stack.env
image: gabisonfire/knightcrawler-metadata:2.0.7
networks:
- knightcrawler-network
restart: "no"
## The migrator is responsible for migrating the database schema.
migrator:
depends_on:
postgres:
condition: service_healthy
required: true
env_file: stack.env
image: gabisonfire/knightcrawler-migrator:2.0.7
networks:
- knightcrawler-network
restart: "no"
## The producer is responsible for producing infohashes by acquiring for various sites, including DMM.
producer:
depends_on:
metadata:
condition: service_completed_successfully
required: true
migrator:
condition: service_completed_successfully
required: true
postgres:
condition: service_healthy
required: true
rabbitmq:
condition: service_healthy
required: true
redis:
condition: service_healthy
required: true
env_file: stack.env
image: gabisonfire/knightcrawler-producer:2.0.7
labels:
logging: promtail
networks:
- knightcrawler-network
restart: unless-stopped
## QBit collector utilizes QBitTorrent to download metadata.
qbitcollector:
depends_on:
qbittorrent:
condition: service_healthy
required: true
deploy:
replicas: ${QBIT_REPLICAS:-0}
env_file: stack.env
image: gabisonfire/knightcrawler-qbit-collector:2.0.7
labels:
logging: promtail
networks:
- knightcrawler-network
restart: unless-stopped
## QBitTorrent is a torrent client that can be used to download torrents. In this case its used to download metadata.
## The QBit collector requires this.
qbittorrent:
deploy:
replicas: ${QBIT_REPLICAS:-0}
env_file: stack.env
environment:
PGID: "1000"
PUID: "1000"
TORRENTING_PORT: "6881"
WEBUI_PORT: "8080"
healthcheck:
test:
- CMD-SHELL
- curl --fail http://localhost:8080
timeout: 10s
interval: 10s
retries: 3
start_period: 10s
image: lscr.io/linuxserver/qbittorrent:latest
networks:
- knightcrawler-network
ports:
- "6881:6881/tcp"
- "6881:6881/udp"
# if you want to expose the webui, uncomment the following line
# - "8001:8080"
restart: unless-stopped
volumes:
- ./config/qbit/qbittorrent.conf:/config/qBittorrent/qBittorrent.conf