mirror of
https://github.com/knightcrawler-stremio/knightcrawler.git
synced 2024-12-20 03:29:51 +00:00
Producer / Consumer / Collector rewrite (#160)
* Converted metadata service to redis * move to postgres instead * fix global usings * [skip ci] optimize wolverine by prebuilding static types * [skip ci] Stop indexing mac folder indexes * [skip ci] producer, metadata and migrations removed mongodb added redis cache imdb meta in postgres Enable pgtrm Create trigrams index Add search meta postgres function * [skip ci] get rid of node folder, replace mongo with redis in consumer also wire up postgres metadata searches * [skip ci] change mongo to redis in the addon * [skip ci] jackettio to redis * Rest of mongo removed... * Cleaner rerunning of metadata - without conflicts * Add akas import as well as basic metadata * Include episodes file too * cascade truncate pre-import * reverse order to avoid cascadeing * separate out clean to separate handler * Switch producer to use metadata matching pre-preocessing dmm * More work * Still porting PTN * PTN port, adding tests * [skip ci] Codec tests * [skip ci] Complete Collection handler tests * [skip ci] container tests * [skip ci] Convert handlers tests * [skip ci] DateHandler tests * [skip ci] Dual Audio matching tests * [skip ci] episode code tests * [skip ci] Extended handler tests * [skip ci] group handler tests * [skip ci] some broken stuff right now * [skip ci] more ptn * [skip ci] PTN now in a separate nuget package, rebased this on the redis changes - i need them. * [skip ci] Wire up PTN port. Tired - will test tomorrow * [skip ci] Needs a lot of work - too many titles being missed now * cleaner. done? * Handle the date in the imdb search - add integer function to confirm its a valid integer - use the input date as a range of -+1 year * [skip ci] Start of collector service for RD [skip ci] WIP Implemented metadata saga, along with channels to process up to a maximum of 100 infohashes each time The saga will rety for each infohas by requeuing up to three times, before just marking as complete for that infoHash - meaning no data will be updated in the db for that torrent. [skip ci] Ready to test with queue publishing Will provision a fanout exchange if it doesn't exist, and create and bind a queue to it. Listens to the queue with 50 prefetch count. Still needs PTN rewrite bringing in to parse the filename response from real debrid, and extract season and episode numbers if the file is a tvshow [skip ci] Add Debrid Collector Build Job Debrid Collector ready for testing New consumer, new collector, producer has meta lookup and anti porn measures [skip ci] WIP - moving from wolverine to MassTransit. not happy that wolverine cannot effectively control saga concurrency. we need to really. [skip ci] Producer and new Consumer moved to MassTransit Just the debrid collector to go now, then to write the optional qbit collector. Collector now switched to mass transit too hide porn titles in logs, clean up cache name in redis for imdb titles [skip ci] Allow control of queues [skip ci] Update deployment Remove old consumer, fix deployment files, fix dockerfiles for shared project import fix base deployment * Add collector missing env var * edits to kick off builds * Add optional qbit deployment which qbit collector will use * Qbit collector done * reorder compose, and bring both qbit and qbitcollector into the compose, with 0 replicas as default * Clean up compose file * Ensure debrid collector errors if no debrid api key
This commit is contained in:
12
.github/workflows/base_image_workflow.yaml
vendored
12
.github/workflows/base_image_workflow.yaml
vendored
@@ -6,12 +6,16 @@ on:
|
|||||||
CONTEXT:
|
CONTEXT:
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
|
DOCKERFILE:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
IMAGE_NAME:
|
IMAGE_NAME:
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CONTEXT: ${{ inputs.CONTEXT }}
|
CONTEXT: ${{ inputs.CONTEXT }}
|
||||||
|
DOCKERFILE: ${{ inputs.DOCKERFILE }}
|
||||||
IMAGE_NAME: ${{ inputs.IMAGE_NAME }}
|
IMAGE_NAME: ${{ inputs.IMAGE_NAME }}
|
||||||
PLATFORMS: linux/amd64,linux/arm64
|
PLATFORMS: linux/amd64,linux/arm64
|
||||||
|
|
||||||
@@ -21,11 +25,13 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Setting variables
|
- name: Setting variables
|
||||||
run: |
|
run: |
|
||||||
echo "CONTEXT=${{ env.CONTEXT }}
|
echo "CONTEXT=${{ env.CONTEXT }}"
|
||||||
echo "IMAGE_NAME=${{ env.IMAGE_NAME }}
|
echo "DOCKERFILE=${{ env.DOCKERFILE }}"
|
||||||
|
echo "IMAGE_NAME=${{ env.IMAGE_NAME }}"
|
||||||
echo "PLATFORMS=${{ env.PLATFORMS }}"
|
echo "PLATFORMS=${{ env.PLATFORMS }}"
|
||||||
outputs:
|
outputs:
|
||||||
CONTEXT: ${{ env.CONTEXT }}
|
CONTEXT: ${{ env.CONTEXT }}
|
||||||
|
DOCKERFILE: ${{ env.DOCKERFILE }}
|
||||||
IMAGE_NAME: ${{ env.IMAGE_NAME }}
|
IMAGE_NAME: ${{ env.IMAGE_NAME }}
|
||||||
PLATFORMS: ${{ env.PLATFORMS }}
|
PLATFORMS: ${{ env.PLATFORMS }}
|
||||||
|
|
||||||
@@ -80,6 +86,7 @@ jobs:
|
|||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
||||||
|
file: ${{ needs.set-vars.outputs.DOCKERFILE }}
|
||||||
push: true
|
push: true
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: localhost:5000/dockle-examine-image:test
|
tags: localhost:5000/dockle-examine-image:test
|
||||||
@@ -136,6 +143,7 @@ jobs:
|
|||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
||||||
|
file: ${{ needs.set-vars.outputs.DOCKERFILE }}
|
||||||
push: true
|
push: true
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: ${{ steps.docker-metadata.outputs.tags }}
|
tags: ${{ steps.docker-metadata.outputs.tags }}
|
||||||
|
|||||||
5
.github/workflows/build_addon.yaml
vendored
5
.github/workflows/build_addon.yaml
vendored
@@ -3,7 +3,7 @@ name: Build and Push Addon Service
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'src/node/addon/**'
|
- 'src/addon/**'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -13,5 +13,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/addon/
|
CONTEXT: ./src/addon/
|
||||||
|
DOCKERFILE: ./src/addon/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-addon
|
IMAGE_NAME: knightcrawler-addon
|
||||||
|
|||||||
5
.github/workflows/build_consumer.yaml
vendored
5
.github/workflows/build_consumer.yaml
vendored
@@ -3,7 +3,7 @@ name: Build and Push Consumer Service
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'src/node/consumer/**'
|
- 'src/torrent-consumer/**'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -13,5 +13,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/consumer/
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/torrent-consumer/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-consumer
|
IMAGE_NAME: knightcrawler-consumer
|
||||||
|
|||||||
16
.github/workflows/build_debrid_collector.yaml
vendored
Normal file
16
.github/workflows/build_debrid_collector.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
name: Build and Push Debrid Collector Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'src/debrid-collector/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/debrid-collector/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-debrid-collector
|
||||||
5
.github/workflows/build_jackett-addon.yaml
vendored
5
.github/workflows/build_jackett-addon.yaml
vendored
@@ -3,7 +3,7 @@ name: Build and Push Jackett Addon Service
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'src/node/addon-jackett/**'
|
- 'src/addon-jackett/**'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -13,5 +13,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/addon-jackett/
|
CONTEXT: ./src/addon-jackett/
|
||||||
|
DOCKERFILE: ./src/addon-jackett/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-addon-jackett
|
IMAGE_NAME: knightcrawler-addon-jackett
|
||||||
|
|||||||
1
.github/workflows/build_metadata.yaml
vendored
1
.github/workflows/build_metadata.yaml
vendored
@@ -14,4 +14,5 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/metadata/
|
CONTEXT: ./src/metadata/
|
||||||
|
DOCKERFILE: ./src/metadata/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-metadata
|
IMAGE_NAME: knightcrawler-metadata
|
||||||
|
|||||||
1
.github/workflows/build_migrator.yaml
vendored
1
.github/workflows/build_migrator.yaml
vendored
@@ -14,4 +14,5 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/migrator/
|
CONTEXT: ./src/migrator/
|
||||||
|
DOCKERFILE: ./src/migrator/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-migrator
|
IMAGE_NAME: knightcrawler-migrator
|
||||||
|
|||||||
3
.github/workflows/build_producer.yaml
vendored
3
.github/workflows/build_producer.yaml
vendored
@@ -13,5 +13,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/producer/
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/producer/src/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-producer
|
IMAGE_NAME: knightcrawler-producer
|
||||||
|
|||||||
16
.github/workflows/build_qbit_collector.yaml
vendored
Normal file
16
.github/workflows/build_qbit_collector.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
name: Build and Push Qbit Collector Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'src/qbit-collector/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/qbit-collector/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-qbit-collector
|
||||||
1
.github/workflows/build_tissue.yaml
vendored
1
.github/workflows/build_tissue.yaml
vendored
@@ -14,4 +14,5 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/tissue/
|
CONTEXT: ./src/tissue/
|
||||||
|
DOCKERFILE: ./src/tissue/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-tissue
|
IMAGE_NAME: knightcrawler-tissue
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -395,8 +395,6 @@ dist/
|
|||||||
downloads/
|
downloads/
|
||||||
eggs/
|
eggs/
|
||||||
.eggs/
|
.eggs/
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
parts/
|
||||||
sdist/
|
sdist/
|
||||||
var/
|
var/
|
||||||
@@ -610,3 +608,6 @@ fabric.properties
|
|||||||
# Caddy logs
|
# Caddy logs
|
||||||
!**/caddy/logs/.gitkeep
|
!**/caddy/logs/.gitkeep
|
||||||
**/caddy/logs/**
|
**/caddy/logs/**
|
||||||
|
|
||||||
|
# Mac directory indexes
|
||||||
|
.DS_Store
|
||||||
@@ -8,48 +8,29 @@ POSTGRES_USER=postgres
|
|||||||
POSTGRES_PASSWORD=postgres
|
POSTGRES_PASSWORD=postgres
|
||||||
POSTGRES_DB=knightcrawler
|
POSTGRES_DB=knightcrawler
|
||||||
|
|
||||||
# MongoDB
|
# Redis
|
||||||
MONGODB_HOST=mongodb
|
REDIS_CONNECTION_STRING=redis:6379
|
||||||
MONGODB_PORT=27017
|
|
||||||
MONGODB_DB=knightcrawler
|
|
||||||
MONGODB_USER=mongo
|
|
||||||
MONGODB_PASSWORD=mongo
|
|
||||||
|
|
||||||
# RabbitMQ
|
# RabbitMQ
|
||||||
RABBITMQ_HOST=rabbitmq
|
RABBITMQ_HOST=rabbitmq
|
||||||
RABBITMQ_USER=guest
|
RABBITMQ_USER=guest
|
||||||
RABBITMQ_PASSWORD=guest
|
RABBITMQ_PASSWORD=guest
|
||||||
RABBITMQ_QUEUE_NAME=ingested
|
RABBITMQ_CONSUMER_QUEUE_NAME=ingested
|
||||||
RABBITMQ_DURABLE=true
|
RABBITMQ_DURABLE=true
|
||||||
RABBITMQ_MAX_QUEUE_SIZE=0
|
RABBITMQ_MAX_QUEUE_SIZE=0
|
||||||
RABBITMQ_MAX_PUBLISH_BATCH_SIZE=500
|
RABBITMQ_MAX_PUBLISH_BATCH_SIZE=500
|
||||||
RABBITMQ_PUBLISH_INTERVAL_IN_SECONDS=10
|
RABBITMQ_PUBLISH_INTERVAL_IN_SECONDS=10
|
||||||
|
|
||||||
# Metadata
|
# Metadata
|
||||||
## Only used if DATA_ONCE is set to false. If true, the schedule is ignored
|
METADATA_INSERT_BATCH_SIZE=50000
|
||||||
METADATA_DOWNLOAD_IMDB_DATA_SCHEDULE="0 0 1 * *"
|
|
||||||
## If true, the metadata will be downloaded once and then the schedule will be ignored
|
# Collectors
|
||||||
METADATA_DOWNLOAD_IMDB_DATA_ONCE=true
|
COLLECTOR_QBIT_ENABLED=false
|
||||||
## Controls the amount of records processed in memory at any given time during import, higher values will consume more memory
|
COLLECTOR_DEBRID_ENABLED=true
|
||||||
METADATA_INSERT_BATCH_SIZE=25000
|
COLLECTOR_REAL_DEBRID_API_KEY=
|
||||||
|
|
||||||
# Addon
|
# Addon
|
||||||
DEBUG_MODE=false
|
DEBUG_MODE=false
|
||||||
|
|
||||||
# Consumer
|
|
||||||
JOB_CONCURRENCY=5
|
|
||||||
JOBS_ENABLED=true
|
|
||||||
## can be debug for extra verbosity (a lot more verbosity - useful for development)
|
|
||||||
LOG_LEVEL=info
|
|
||||||
MAX_CONNECTIONS_PER_TORRENT=10
|
|
||||||
MAX_CONNECTIONS_OVERALL=100
|
|
||||||
TORRENT_TIMEOUT=30000
|
|
||||||
UDP_TRACKERS_ENABLED=true
|
|
||||||
CONSUMER_REPLICAS=3
|
|
||||||
## Fix for #66 - toggle on for development
|
|
||||||
AUTO_CREATE_AND_APPLY_MIGRATIONS=false
|
|
||||||
## Allows control of the threshold for matching titles to the IMDB dataset. The closer to 0, the more strict the matching.
|
|
||||||
TITLE_MATCH_THRESHOLD=0.25
|
|
||||||
|
|
||||||
# Producer
|
# Producer
|
||||||
GITHUB_PAT=
|
GITHUB_PAT=
|
||||||
|
|||||||
58
deployment/docker/components/config/qbit/qbittorrent.conf
Normal file
58
deployment/docker/components/config/qbit/qbittorrent.conf
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
[Application]
|
||||||
|
FileLogger\Age=1
|
||||||
|
FileLogger\AgeType=1
|
||||||
|
FileLogger\Backup=true
|
||||||
|
FileLogger\DeleteOld=true
|
||||||
|
FileLogger\Enabled=true
|
||||||
|
FileLogger\MaxSizeBytes=66560
|
||||||
|
FileLogger\Path=/config/qBittorrent/logs
|
||||||
|
|
||||||
|
[AutoRun]
|
||||||
|
enabled=false
|
||||||
|
program=
|
||||||
|
|
||||||
|
[BitTorrent]
|
||||||
|
Session\DefaultSavePath=/downloads/
|
||||||
|
Session\ExcludedFileNames=
|
||||||
|
Session\MaxActiveDownloads=10
|
||||||
|
Session\MaxActiveTorrents=50
|
||||||
|
Session\MaxActiveUploads=50
|
||||||
|
Session\MaxConnections=2000
|
||||||
|
Session\Port=6881
|
||||||
|
Session\QueueingSystemEnabled=true
|
||||||
|
Session\TempPath=/downloads/incomplete/
|
||||||
|
Session\TorrentStopCondition=MetadataReceived
|
||||||
|
|
||||||
|
[Core]
|
||||||
|
AutoDeleteAddedTorrentFile=Never
|
||||||
|
|
||||||
|
[LegalNotice]
|
||||||
|
Accepted=true
|
||||||
|
|
||||||
|
[Meta]
|
||||||
|
MigrationVersion=6
|
||||||
|
|
||||||
|
[Network]
|
||||||
|
PortForwardingEnabled=true
|
||||||
|
Proxy\HostnameLookupEnabled=false
|
||||||
|
Proxy\Profiles\BitTorrent=true
|
||||||
|
Proxy\Profiles\Misc=true
|
||||||
|
Proxy\Profiles\RSS=true
|
||||||
|
|
||||||
|
[Preferences]
|
||||||
|
Connection\PortRangeMin=6881
|
||||||
|
Connection\ResolvePeerCountries=false
|
||||||
|
Connection\UPnP=false
|
||||||
|
Downloads\SavePath=/downloads/
|
||||||
|
Downloads\TempPath=/downloads/incomplete/
|
||||||
|
General\Locale=en
|
||||||
|
MailNotification\req_auth=true
|
||||||
|
WebUI\Address=*
|
||||||
|
WebUI\AuthSubnetWhitelist=0.0.0.0/0
|
||||||
|
WebUI\AuthSubnetWhitelistEnabled=true
|
||||||
|
WebUI\LocalHostAuth=false
|
||||||
|
WebUI\ServerDomains=*
|
||||||
|
|
||||||
|
[RSS]
|
||||||
|
AutoDownloader\DownloadRepacks=true
|
||||||
|
AutoDownloader\SmartEpisodeFilter=s(\\d+)e(\\d+), (\\d+)x(\\d+), "(\\d{4}[.\\-]\\d{1,2}[.\\-]\\d{1,2})", "(\\d{1,2}[.\\-]\\d{1,2}[.\\-]\\d{4})"
|
||||||
89
deployment/docker/components/infrastructure.yaml
Normal file
89
deployment/docker/components/infrastructure.yaml
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
x-basehealth: &base-health
|
||||||
|
interval: 10s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
x-rabbithealth: &rabbitmq-health
|
||||||
|
test: rabbitmq-diagnostics -q ping
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-redishealth: &redis-health
|
||||||
|
test: redis-cli ping
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-postgreshealth: &postgresdb-health
|
||||||
|
test: pg_isready
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-qbit: &qbit-health
|
||||||
|
test: "curl --fail http://localhost:8080"
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:latest
|
||||||
|
environment:
|
||||||
|
PGUSER: postgres # needed for healthcheck.
|
||||||
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
|
# # Furthermore, please, please, please, change the username and password in the .env file.
|
||||||
|
# # If you want to enhance your security even more, create a new user for the database with a strong password.
|
||||||
|
# ports:
|
||||||
|
# - "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres:/var/lib/postgresql/data
|
||||||
|
healthcheck: *postgresdb-health
|
||||||
|
restart: unless-stopped
|
||||||
|
env_file: ../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis/redis-stack:latest
|
||||||
|
# # If you need redis to be accessible from outside, please open the below port.
|
||||||
|
# ports:
|
||||||
|
# - "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- redis:/data
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck: *redis-health
|
||||||
|
env_file: ../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
rabbitmq:
|
||||||
|
image: rabbitmq:3-management
|
||||||
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
|
# # Furthermore, please, please, please, look at the documentation for rabbit on how to secure the service.
|
||||||
|
# ports:
|
||||||
|
# - "5672:5672"
|
||||||
|
# - "15672:15672"
|
||||||
|
# - "15692:15692"
|
||||||
|
volumes:
|
||||||
|
- rabbitmq:/var/lib/rabbitmq
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck: *rabbitmq-health
|
||||||
|
env_file: ../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
## QBitTorrent is a torrent client that can be used to download torrents. In this case its used to download metadata.
|
||||||
|
## The QBit collector requires this.
|
||||||
|
qbittorrent:
|
||||||
|
image: lscr.io/linuxserver/qbittorrent:latest
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- WEBUI_PORT=8080
|
||||||
|
- TORRENTING_PORT=6881
|
||||||
|
ports:
|
||||||
|
- 6881:6881
|
||||||
|
- 6881:6881/udp
|
||||||
|
env_file: ../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck: *qbit-health
|
||||||
|
volumes:
|
||||||
|
- ./config/qbit/qbittorrent.conf:/config/qBittorrent/qBittorrent.conf
|
||||||
71
deployment/docker/components/knightcrawler.yaml
Normal file
71
deployment/docker/components/knightcrawler.yaml
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
x-apps: &knightcrawler-app
|
||||||
|
labels:
|
||||||
|
logging: "promtail"
|
||||||
|
env_file: ../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
x-depends: &knightcrawler-app-depends
|
||||||
|
depends_on:
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
rabbitmq:
|
||||||
|
condition: service_healthy
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
|
||||||
|
services:
|
||||||
|
metadata:
|
||||||
|
image: gabisonfire/knightcrawler-metadata:2.0.0
|
||||||
|
env_file: ../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: no
|
||||||
|
depends_on:
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
|
||||||
|
migrator:
|
||||||
|
image: gabisonfire/knightcrawler-migrator:2.0.0
|
||||||
|
env_file: ../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: no
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
addon:
|
||||||
|
image: gabisonfire/knightcrawler-addon:2.0.0
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
hostname: knightcrawler-addon
|
||||||
|
ports:
|
||||||
|
- "7000:7000"
|
||||||
|
|
||||||
|
consumer:
|
||||||
|
image: gabisonfire/knightcrawler-consumer:2.0.0
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
debridcollector:
|
||||||
|
image: gabisonfire/knightcrawler-debridcollector:2.0.0
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
producer:
|
||||||
|
image: gabisonfire/knightcrawler-producer:2.0.0
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
qbitcollector:
|
||||||
|
image: gabisonfire/knightcrawler-qbitcollector:2.0.0
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
qbittorrent:
|
||||||
|
condition: service_healthy
|
||||||
4
deployment/docker/components/network.yaml
Normal file
4
deployment/docker/components/network.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
networks:
|
||||||
|
knightcrawler-network:
|
||||||
|
driver: bridge
|
||||||
|
name: knightcrawler-network
|
||||||
4
deployment/docker/components/volumes.yaml
Normal file
4
deployment/docker/components/volumes.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
volumes:
|
||||||
|
postgres:
|
||||||
|
redis:
|
||||||
|
rabbitmq:
|
||||||
7
deployment/docker/compose.override.yaml
Normal file
7
deployment/docker/compose.override.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
services:
|
||||||
|
qbittorrent:
|
||||||
|
deploy:
|
||||||
|
replicas: 0
|
||||||
|
qbitcollector:
|
||||||
|
deploy:
|
||||||
|
replicas: 0
|
||||||
7
deployment/docker/compose.yaml
Normal file
7
deployment/docker/compose.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
version: "3.9"
|
||||||
|
name: "knightcrawler"
|
||||||
|
include:
|
||||||
|
- components/network.yaml
|
||||||
|
- components/volumes.yaml
|
||||||
|
- components/infrastructure.yaml
|
||||||
|
- components/knightcrawler.yaml
|
||||||
@@ -1,139 +0,0 @@
|
|||||||
name: knightcrawler
|
|
||||||
|
|
||||||
x-restart: &restart-policy "unless-stopped"
|
|
||||||
|
|
||||||
x-basehealth: &base-health
|
|
||||||
interval: 10s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 3
|
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
x-rabbithealth: &rabbitmq-health
|
|
||||||
test: rabbitmq-diagnostics -q ping
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-mongohealth: &mongodb-health
|
|
||||||
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-postgreshealth: &postgresdb-health
|
|
||||||
test: pg_isready
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-apps: &knightcrawler-app
|
|
||||||
depends_on:
|
|
||||||
mongodb:
|
|
||||||
condition: service_healthy
|
|
||||||
postgres:
|
|
||||||
condition: service_healthy
|
|
||||||
rabbitmq:
|
|
||||||
condition: service_healthy
|
|
||||||
restart: *restart-policy
|
|
||||||
|
|
||||||
services:
|
|
||||||
postgres:
|
|
||||||
image: postgres:latest
|
|
||||||
env_file: .env
|
|
||||||
environment:
|
|
||||||
PGUSER: postgres # needed for healthcheck.
|
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
|
||||||
# # Furthermore, please, please, please, change the username and password in the .env file.
|
|
||||||
# # If you want to enhance your security even more, create a new user for the database with a strong password.
|
|
||||||
# ports:
|
|
||||||
# - "5432:5432"
|
|
||||||
volumes:
|
|
||||||
- postgres:/var/lib/postgresql/data
|
|
||||||
healthcheck: *postgresdb-health
|
|
||||||
restart: *restart-policy
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
mongodb:
|
|
||||||
image: mongo:latest
|
|
||||||
env_file: .env
|
|
||||||
environment:
|
|
||||||
MONGO_INITDB_ROOT_USERNAME: ${MONGODB_USER:?Variable MONGODB_USER not set}
|
|
||||||
MONGO_INITDB_ROOT_PASSWORD: ${MONGODB_PASSWORD:?Variable MONGODB_PASSWORD not set}
|
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
|
||||||
# # Furthermore, please, please, please, change the username and password in the .env file.
|
|
||||||
# ports:
|
|
||||||
# - "27017:27017"
|
|
||||||
volumes:
|
|
||||||
- mongo:/data/db
|
|
||||||
restart: *restart-policy
|
|
||||||
healthcheck: *mongodb-health
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
rabbitmq:
|
|
||||||
image: rabbitmq:3-management
|
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
|
||||||
# # Furthermore, please, please, please, look at the documentation for rabbit on how to secure the service.
|
|
||||||
# ports:
|
|
||||||
# - "5672:5672"
|
|
||||||
# - "15672:15672"
|
|
||||||
# - "15692:15692"
|
|
||||||
volumes:
|
|
||||||
- rabbitmq:/var/lib/rabbitmq
|
|
||||||
hostname: ${RABBITMQ_HOST}
|
|
||||||
restart: *restart-policy
|
|
||||||
healthcheck: *rabbitmq-health
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
producer:
|
|
||||||
image: gabisonfire/knightcrawler-producer:1.0.1
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
env_file: .env
|
|
||||||
<<: *knightcrawler-app
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
consumer:
|
|
||||||
image: gabisonfire/knightcrawler-consumer:1.0.1
|
|
||||||
env_file: .env
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
deploy:
|
|
||||||
replicas: ${CONSUMER_REPLICAS}
|
|
||||||
<<: *knightcrawler-app
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
metadata:
|
|
||||||
image: gabisonfire/knightcrawler-metadata:1.0.1
|
|
||||||
env_file: .env
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
restart: no
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
addon:
|
|
||||||
<<: *knightcrawler-app
|
|
||||||
env_file: .env
|
|
||||||
hostname: knightcrawler-addon
|
|
||||||
image: gabisonfire/knightcrawler-addon:1.0.1
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
# - caddy
|
|
||||||
ports:
|
|
||||||
- "7000:7000"
|
|
||||||
|
|
||||||
|
|
||||||
networks:
|
|
||||||
knightcrawler-network:
|
|
||||||
driver: bridge
|
|
||||||
name: knightcrawler-network
|
|
||||||
|
|
||||||
# caddy:
|
|
||||||
# name: caddy
|
|
||||||
# external: true
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
postgres:
|
|
||||||
mongo:
|
|
||||||
rabbitmq:
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,6 @@
|
|||||||
"axios": "^1.6.1",
|
"axios": "^1.6.1",
|
||||||
"bottleneck": "^2.19.5",
|
"bottleneck": "^2.19.5",
|
||||||
"cache-manager": "^3.4.4",
|
"cache-manager": "^3.4.4",
|
||||||
"cache-manager-mongodb": "^0.3.0",
|
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"debrid-link-api": "^1.0.1",
|
"debrid-link-api": "^1.0.1",
|
||||||
"express": "^4.18.2",
|
"express": "^4.18.2",
|
||||||
@@ -33,7 +32,11 @@
|
|||||||
"user-agents": "^1.0.1444",
|
"user-agents": "^1.0.1444",
|
||||||
"video-name-parser": "^1.4.6",
|
"video-name-parser": "^1.4.6",
|
||||||
"xml-js": "^1.6.11",
|
"xml-js": "^1.6.11",
|
||||||
"xml2js": "^0.6.2"
|
"xml2js": "^0.6.2",
|
||||||
|
"@redis/client": "^1.5.14",
|
||||||
|
"@redis/json": "^1.0.6",
|
||||||
|
"@redis/search": "^1.1.6",
|
||||||
|
"cache-manager-redis-store": "^2.0.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^20.11.6",
|
"@types/node": "^20.11.6",
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import cacheManager from 'cache-manager';
|
import cacheManager from 'cache-manager';
|
||||||
import mangodbStore from 'cache-manager-mongodb';
|
|
||||||
import { isStaticUrl } from '../moch/static.js';
|
import { isStaticUrl } from '../moch/static.js';
|
||||||
import {cacheConfig} from "./settings.js";
|
import {cacheConfig} from "./settings.js";
|
||||||
|
import redisStore from 'cache-manager-redis-store';
|
||||||
|
|
||||||
const STREAM_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|stream`;
|
const STREAM_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|stream`;
|
||||||
const IMDB_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|imdb`;
|
const IMDB_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|imdb`;
|
||||||
@@ -12,28 +12,20 @@ const memoryCache = initiateMemoryCache();
|
|||||||
const remoteCache = initiateRemoteCache();
|
const remoteCache = initiateRemoteCache();
|
||||||
|
|
||||||
function initiateRemoteCache() {
|
function initiateRemoteCache() {
|
||||||
if (cacheConfig.NO_CACHE) {
|
if (cacheConfig.NO_CACHE) {
|
||||||
return null;
|
return null;
|
||||||
} else if (cacheConfig.MONGODB_URI) {
|
} else if (cacheConfig.REDIS_CONNECTION_STRING) {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
store: mangodbStore,
|
store: redisStore,
|
||||||
uri: cacheConfig.MONGODB_URI,
|
ttl: cacheConfig.STREAM_EMPTY_TTL,
|
||||||
options: {
|
url: cacheConfig.REDIS_CONNECTION_STRING
|
||||||
collection: 'jackettio_addon_collection',
|
});
|
||||||
socketTimeoutMS: 120000,
|
} else {
|
||||||
useNewUrlParser: true,
|
return cacheManager.caching({
|
||||||
useUnifiedTopology: false,
|
store: 'memory',
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL
|
ttl: cacheConfig.STREAM_EMPTY_TTL
|
||||||
},
|
});
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL,
|
}
|
||||||
ignoreCacheErrors: true
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
return cacheManager.caching({
|
|
||||||
store: 'memory',
|
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function initiateMemoryCache() {
|
function initiateMemoryCache() {
|
||||||
@@ -25,7 +25,7 @@ export const cinemetaConfig = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const cacheConfig = {
|
export const cacheConfig = {
|
||||||
MONGODB_URI: process.env.MONGODB_URI,
|
REDIS_CONNECTION_STRING: process.env.REDIS_CONNECTION_STRING || 'redis://localhost:6379/0',
|
||||||
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
||||||
IMDB_TTL: parseInt(process.env.IMDB_TTL || 60 * 60 * 4), // 4 Hours
|
IMDB_TTL: parseInt(process.env.IMDB_TTL || 60 * 60 * 4), // 4 Hours
|
||||||
STREAM_TTL: parseInt(process.env.STREAM_TTL || 60 * 60 * 4), // 1 Hour
|
STREAM_TTL: parseInt(process.env.STREAM_TTL || 60 * 60 * 4), // 1 Hour
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,6 @@
|
|||||||
"axios": "^1.6.1",
|
"axios": "^1.6.1",
|
||||||
"bottleneck": "^2.19.5",
|
"bottleneck": "^2.19.5",
|
||||||
"cache-manager": "^3.4.4",
|
"cache-manager": "^3.4.4",
|
||||||
"cache-manager-mongodb": "^0.3.0",
|
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"debrid-link-api": "^1.0.1",
|
"debrid-link-api": "^1.0.1",
|
||||||
"express-rate-limit": "^6.7.0",
|
"express-rate-limit": "^6.7.0",
|
||||||
@@ -35,7 +34,11 @@
|
|||||||
"stremio-addon-sdk": "^1.6.10",
|
"stremio-addon-sdk": "^1.6.10",
|
||||||
"swagger-stats": "^0.99.7",
|
"swagger-stats": "^0.99.7",
|
||||||
"ua-parser-js": "^1.0.36",
|
"ua-parser-js": "^1.0.36",
|
||||||
"user-agents": "^1.0.1444"
|
"user-agents": "^1.0.1444",
|
||||||
|
"@redis/client": "^1.5.14",
|
||||||
|
"@redis/json": "^1.0.6",
|
||||||
|
"@redis/search": "^1.1.6",
|
||||||
|
"cache-manager-redis-store": "^2.0.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^20.11.6",
|
"@types/node": "^20.11.6",
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import cacheManager from 'cache-manager';
|
import cacheManager from 'cache-manager';
|
||||||
import mangodbStore from 'cache-manager-mongodb';
|
|
||||||
import { cacheConfig } from './config.js';
|
import { cacheConfig } from './config.js';
|
||||||
import { isStaticUrl } from '../moch/static.js';
|
import { isStaticUrl } from '../moch/static.js';
|
||||||
|
import redisStore from "cache-manager-redis-store";
|
||||||
|
|
||||||
const GLOBAL_KEY_PREFIX = 'knightcrawler-addon';
|
const GLOBAL_KEY_PREFIX = 'knightcrawler-addon';
|
||||||
const STREAM_KEY_PREFIX = `${GLOBAL_KEY_PREFIX}|stream`;
|
const STREAM_KEY_PREFIX = `${GLOBAL_KEY_PREFIX}|stream`;
|
||||||
@@ -21,19 +21,11 @@ const remoteCache = initiateRemoteCache();
|
|||||||
function initiateRemoteCache() {
|
function initiateRemoteCache() {
|
||||||
if (cacheConfig.NO_CACHE) {
|
if (cacheConfig.NO_CACHE) {
|
||||||
return null;
|
return null;
|
||||||
} else if (cacheConfig.MONGO_URI) {
|
} else if (cacheConfig.REDIS_CONNECTION_STRING) {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
store: mangodbStore,
|
store: redisStore,
|
||||||
uri: cacheConfig.MONGO_URI,
|
|
||||||
options: {
|
|
||||||
collection: 'knightcrawler_addon_collection',
|
|
||||||
socketTimeoutMS: 120000,
|
|
||||||
useNewUrlParser: true,
|
|
||||||
useUnifiedTopology: false,
|
|
||||||
ttl: STREAM_EMPTY_TTL
|
|
||||||
},
|
|
||||||
ttl: STREAM_EMPTY_TTL,
|
ttl: STREAM_EMPTY_TTL,
|
||||||
ignoreCacheErrors: true
|
url: cacheConfig.REDIS_CONNECTION_STRING
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
@@ -1,18 +1,8 @@
|
|||||||
export const cacheConfig = {
|
export const cacheConfig = {
|
||||||
MONGODB_HOST: process.env.MONGODB_HOST || 'mongodb',
|
REDIS_CONNECTION_STRING: process.env.REDIS_CONNECTION_STRING || 'redis://localhost:6379/0',
|
||||||
MONGODB_PORT: process.env.MONGODB_PORT || '27017',
|
|
||||||
MONGODB_DB: process.env.MONGODB_DB || 'knightcrawler',
|
|
||||||
MONGODB_USER: process.env.MONGODB_USER || 'mongo',
|
|
||||||
MONGODB_PASSWORD: process.env.MONGODB_PASSWORD || 'mongo',
|
|
||||||
COLLECTION_NAME: process.env.MONGODB_ADDON_COLLECTION || 'knightcrawler_addon_collection',
|
|
||||||
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Combine the environment variables into a connection string
|
|
||||||
// The combined string will look something like:
|
|
||||||
// 'mongodb://mongo:mongo@localhost:27017/knightcrawler?authSource=admin'
|
|
||||||
cacheConfig.MONGO_URI = 'mongodb://' + cacheConfig.MONGODB_USER + ':' + cacheConfig.MONGODB_PASSWORD + '@' + cacheConfig.MONGODB_HOST + ':' + cacheConfig.MONGODB_PORT + '/' + cacheConfig.MONGODB_DB + '?authSource=admin';
|
|
||||||
|
|
||||||
export const databaseConfig = {
|
export const databaseConfig = {
|
||||||
POSTGRES_HOST: process.env.POSTGRES_HOST || 'postgres',
|
POSTGRES_HOST: process.env.POSTGRES_HOST || 'postgres',
|
||||||
POSTGRES_PORT: process.env.POSTGRES_PORT || '5432',
|
POSTGRES_PORT: process.env.POSTGRES_PORT || '5432',
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user