Compare commits
33 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6181207513 | ||
|
|
684dbba2f0 | ||
|
|
c75ecd2707 | ||
|
|
c493ef3376 | ||
|
|
655a39e35c | ||
|
|
cfeee62f6b | ||
|
|
c6d4c06d70 | ||
|
|
08639a3254 | ||
|
|
d430850749 | ||
|
|
82c0ea459b | ||
|
|
1e83b4c5d8 | ||
|
|
66609c2a46 | ||
|
|
2d78dc2735 | ||
|
|
527d6cdf15 | ||
|
|
bb260d78d6 | ||
|
|
baec0450bf | ||
|
|
4308a0ee71 | ||
|
|
cc15a69517 | ||
|
|
a6d3a4a066 | ||
|
|
9430704205 | ||
|
|
6cc857bdc3 | ||
|
|
cc2adbfca5 | ||
|
|
9f928f9b66 | ||
|
|
a50b5071b3 | ||
|
|
72db18f0ad | ||
|
|
d70cef1b86 | ||
|
|
e1e718cd22 | ||
|
|
c3e58e4234 | ||
|
|
d584102d60 | ||
|
|
fe4bb59502 | ||
|
|
472b3342d5 | ||
|
|
b035ef596b | ||
|
|
9a831e92d0 |
12
.github/workflows/base_image_workflow.yaml
vendored
12
.github/workflows/base_image_workflow.yaml
vendored
@@ -6,12 +6,16 @@ on:
|
|||||||
CONTEXT:
|
CONTEXT:
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
|
DOCKERFILE:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
IMAGE_NAME:
|
IMAGE_NAME:
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CONTEXT: ${{ inputs.CONTEXT }}
|
CONTEXT: ${{ inputs.CONTEXT }}
|
||||||
|
DOCKERFILE: ${{ inputs.DOCKERFILE }}
|
||||||
IMAGE_NAME: ${{ inputs.IMAGE_NAME }}
|
IMAGE_NAME: ${{ inputs.IMAGE_NAME }}
|
||||||
PLATFORMS: linux/amd64,linux/arm64
|
PLATFORMS: linux/amd64,linux/arm64
|
||||||
|
|
||||||
@@ -21,11 +25,13 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Setting variables
|
- name: Setting variables
|
||||||
run: |
|
run: |
|
||||||
echo "CONTEXT=${{ env.CONTEXT }}
|
echo "CONTEXT=${{ env.CONTEXT }}"
|
||||||
echo "IMAGE_NAME=${{ env.IMAGE_NAME }}
|
echo "DOCKERFILE=${{ env.DOCKERFILE }}"
|
||||||
|
echo "IMAGE_NAME=${{ env.IMAGE_NAME }}"
|
||||||
echo "PLATFORMS=${{ env.PLATFORMS }}"
|
echo "PLATFORMS=${{ env.PLATFORMS }}"
|
||||||
outputs:
|
outputs:
|
||||||
CONTEXT: ${{ env.CONTEXT }}
|
CONTEXT: ${{ env.CONTEXT }}
|
||||||
|
DOCKERFILE: ${{ env.DOCKERFILE }}
|
||||||
IMAGE_NAME: ${{ env.IMAGE_NAME }}
|
IMAGE_NAME: ${{ env.IMAGE_NAME }}
|
||||||
PLATFORMS: ${{ env.PLATFORMS }}
|
PLATFORMS: ${{ env.PLATFORMS }}
|
||||||
|
|
||||||
@@ -80,6 +86,7 @@ jobs:
|
|||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
||||||
|
file: ${{ needs.set-vars.outputs.DOCKERFILE }}
|
||||||
push: true
|
push: true
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: localhost:5000/dockle-examine-image:test
|
tags: localhost:5000/dockle-examine-image:test
|
||||||
@@ -136,6 +143,7 @@ jobs:
|
|||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
||||||
|
file: ${{ needs.set-vars.outputs.DOCKERFILE }}
|
||||||
push: true
|
push: true
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: ${{ steps.docker-metadata.outputs.tags }}
|
tags: ${{ steps.docker-metadata.outputs.tags }}
|
||||||
|
|||||||
9
.github/workflows/build_addon.yaml
vendored
9
.github/workflows/build_addon.yaml
vendored
@@ -2,10 +2,10 @@ name: Build and Push Addon Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
|
||||||
- 'src/node/addon/**'
|
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'src/addon/**'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -13,5 +13,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/addon/
|
CONTEXT: ./src/addon/
|
||||||
|
DOCKERFILE: ./src/addon/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-addon
|
IMAGE_NAME: knightcrawler-addon
|
||||||
|
|||||||
9
.github/workflows/build_consumer.yaml
vendored
9
.github/workflows/build_consumer.yaml
vendored
@@ -2,10 +2,10 @@ name: Build and Push Consumer Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
|
||||||
- 'src/node/consumer/**'
|
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'src/torrent-consumer/**'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -13,5 +13,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/consumer/
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/torrent-consumer/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-consumer
|
IMAGE_NAME: knightcrawler-consumer
|
||||||
|
|||||||
18
.github/workflows/build_debrid_collector.yaml
vendored
Normal file
18
.github/workflows/build_debrid_collector.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
name: Build and Push Debrid Collector Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'src/debrid-collector/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/debrid-collector/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-debrid-collector
|
||||||
9
.github/workflows/build_jackett-addon.yaml
vendored
9
.github/workflows/build_jackett-addon.yaml
vendored
@@ -2,10 +2,10 @@ name: Build and Push Jackett Addon Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
|
||||||
- 'src/node/addon-jackett/**'
|
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'src/addon-jackett/**'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -13,5 +13,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/addon-jackett/
|
CONTEXT: ./src/addon-jackett/
|
||||||
|
DOCKERFILE: ./src/addon-jackett/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-addon-jackett
|
IMAGE_NAME: knightcrawler-addon-jackett
|
||||||
|
|||||||
5
.github/workflows/build_metadata.yaml
vendored
5
.github/workflows/build_metadata.yaml
vendored
@@ -2,10 +2,10 @@ name: Build and Push Metadata Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/metadata/**'
|
- 'src/metadata/**'
|
||||||
tags:
|
|
||||||
- 'v*.*.*'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -14,4 +14,5 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/metadata/
|
CONTEXT: ./src/metadata/
|
||||||
|
DOCKERFILE: ./src/metadata/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-metadata
|
IMAGE_NAME: knightcrawler-metadata
|
||||||
|
|||||||
5
.github/workflows/build_migrator.yaml
vendored
5
.github/workflows/build_migrator.yaml
vendored
@@ -2,10 +2,10 @@ name: Build and Push Migrator Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/migrator/**'
|
- 'src/migrator/**'
|
||||||
tags:
|
|
||||||
- 'v*.*.*'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -14,4 +14,5 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/migrator/
|
CONTEXT: ./src/migrator/
|
||||||
|
DOCKERFILE: ./src/migrator/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-migrator
|
IMAGE_NAME: knightcrawler-migrator
|
||||||
|
|||||||
7
.github/workflows/build_producer.yaml
vendored
7
.github/workflows/build_producer.yaml
vendored
@@ -2,10 +2,10 @@ name: Build and Push Producer Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/producer/**'
|
- 'src/producer/**'
|
||||||
tags:
|
|
||||||
- 'v*.*.*'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -13,5 +13,6 @@ jobs:
|
|||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/producer/
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/producer/src/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-producer
|
IMAGE_NAME: knightcrawler-producer
|
||||||
|
|||||||
18
.github/workflows/build_qbit_collector.yaml
vendored
Normal file
18
.github/workflows/build_qbit_collector.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
name: Build and Push Qbit Collector Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'src/qbit-collector/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/qbit-collector/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-qbit-collector
|
||||||
5
.github/workflows/build_tissue.yaml
vendored
5
.github/workflows/build_tissue.yaml
vendored
@@ -2,10 +2,10 @@ name: Build and Push Tissue Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/tissue/**'
|
- 'src/tissue/**'
|
||||||
tags:
|
|
||||||
- 'v*.*.*'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -14,4 +14,5 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/tissue/
|
CONTEXT: ./src/tissue/
|
||||||
|
DOCKERFILE: ./src/tissue/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-tissue
|
IMAGE_NAME: knightcrawler-tissue
|
||||||
|
|||||||
15
.github/workflows/build_torrent_ingester.yaml
vendored
Normal file
15
.github/workflows/build_torrent_ingester.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
name: Build and Push Torrent Ingestor Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'src/torrent-ingestor/**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/torrent-ingestor
|
||||||
|
DOCKERFILE: ./src/torrent-ingestor/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-torrent-ingestor
|
||||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -395,8 +395,6 @@ dist/
|
|||||||
downloads/
|
downloads/
|
||||||
eggs/
|
eggs/
|
||||||
.eggs/
|
.eggs/
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
parts/
|
||||||
sdist/
|
sdist/
|
||||||
var/
|
var/
|
||||||
@@ -610,3 +608,11 @@ fabric.properties
|
|||||||
# Caddy logs
|
# Caddy logs
|
||||||
!**/caddy/logs/.gitkeep
|
!**/caddy/logs/.gitkeep
|
||||||
**/caddy/logs/**
|
**/caddy/logs/**
|
||||||
|
|
||||||
|
# Mac directory indexes
|
||||||
|
.DS_Store
|
||||||
|
deployment/docker/stack.env
|
||||||
|
|
||||||
|
src/producer/src/python/
|
||||||
|
src/debrid-collector/python/
|
||||||
|
src/qbit-collector/python/
|
||||||
|
|||||||
13
README.md
13
README.md
@@ -51,11 +51,11 @@ Download and install [Docker Compose](https://docs.docker.com/compose/install/),
|
|||||||
|
|
||||||
### Environment Setup
|
### Environment Setup
|
||||||
|
|
||||||
Before running the project, you need to set up the environment variables. Copy the `.env.example` file to `.env`:
|
Before running the project, you need to set up the environment variables. Edit the values in `stack.env`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd deployment/docker
|
cd deployment/docker
|
||||||
cp .env.example .env
|
code stack.env
|
||||||
```
|
```
|
||||||
|
|
||||||
Then set any of the values you wouldd like to customize.
|
Then set any of the values you wouldd like to customize.
|
||||||
@@ -67,9 +67,6 @@ Then set any of the values you wouldd like to customize.
|
|||||||
|
|
||||||
By default, Knight Crawler is configured to be *relatively* conservative in its resource usage. If running on a decent machine (16GB RAM, i5+ or equivalent), you can increase some settings to increase consumer throughput. This is especially helpful if you have a large backlog from [importing databases](#importing-external-dumps).
|
By default, Knight Crawler is configured to be *relatively* conservative in its resource usage. If running on a decent machine (16GB RAM, i5+ or equivalent), you can increase some settings to increase consumer throughput. This is especially helpful if you have a large backlog from [importing databases](#importing-external-dumps).
|
||||||
|
|
||||||
In your `.env` file, under the `# Consumer` section increase `CONSUMER_REPLICAS` from `3` to `15`.
|
|
||||||
You can also increase `JOB_CONCURRENCY` from `5` to `10`.
|
|
||||||
|
|
||||||
### DebridMediaManager setup (optional)
|
### DebridMediaManager setup (optional)
|
||||||
|
|
||||||
There are some optional steps you should take to maximise the number of movies/tv shows we can find.
|
There are some optional steps you should take to maximise the number of movies/tv shows we can find.
|
||||||
@@ -90,9 +87,9 @@ We can search DebridMediaManager hash lists which are hosted on GitHub. This all
|
|||||||
(checked) Public Repositories (read-only)
|
(checked) Public Repositories (read-only)
|
||||||
```
|
```
|
||||||
4. Click `Generate token`
|
4. Click `Generate token`
|
||||||
5. Take the new token and add it to the bottom of the [.env](deployment/docker/.env) file
|
5. Take the new token and add it to the bottom of the [stack.env](deployment/docker/stack.env) file
|
||||||
```
|
```
|
||||||
GithubSettings__PAT=<YOUR TOKEN HERE>
|
GITHUB_PAT=<YOUR TOKEN HERE>
|
||||||
```
|
```
|
||||||
### Configure external access
|
### Configure external access
|
||||||
|
|
||||||
@@ -143,7 +140,7 @@ Remove or comment out the port for the addon, and connect it to Caddy:
|
|||||||
addon:
|
addon:
|
||||||
<<: *knightcrawler-app
|
<<: *knightcrawler-app
|
||||||
env_file:
|
env_file:
|
||||||
- .env
|
- stack.env
|
||||||
hostname: knightcrawler-addon
|
hostname: knightcrawler-addon
|
||||||
image: gabisonfire/knightcrawler-addon:latest
|
image: gabisonfire/knightcrawler-addon:latest
|
||||||
labels:
|
labels:
|
||||||
|
|||||||
@@ -1,55 +0,0 @@
|
|||||||
# General environment variables
|
|
||||||
TZ=London/Europe
|
|
||||||
|
|
||||||
# PostgreSQL
|
|
||||||
POSTGRES_HOST=postgres
|
|
||||||
POSTGRES_PORT=5432
|
|
||||||
POSTGRES_USER=postgres
|
|
||||||
POSTGRES_PASSWORD=postgres
|
|
||||||
POSTGRES_DB=knightcrawler
|
|
||||||
|
|
||||||
# MongoDB
|
|
||||||
MONGODB_HOST=mongodb
|
|
||||||
MONGODB_PORT=27017
|
|
||||||
MONGODB_DB=knightcrawler
|
|
||||||
MONGODB_USER=mongo
|
|
||||||
MONGODB_PASSWORD=mongo
|
|
||||||
|
|
||||||
# RabbitMQ
|
|
||||||
RABBITMQ_HOST=rabbitmq
|
|
||||||
RABBITMQ_USER=guest
|
|
||||||
RABBITMQ_PASSWORD=guest
|
|
||||||
RABBITMQ_QUEUE_NAME=ingested
|
|
||||||
RABBITMQ_DURABLE=true
|
|
||||||
RABBITMQ_MAX_QUEUE_SIZE=0
|
|
||||||
RABBITMQ_MAX_PUBLISH_BATCH_SIZE=500
|
|
||||||
RABBITMQ_PUBLISH_INTERVAL_IN_SECONDS=10
|
|
||||||
|
|
||||||
# Metadata
|
|
||||||
## Only used if DATA_ONCE is set to false. If true, the schedule is ignored
|
|
||||||
METADATA_DOWNLOAD_IMDB_DATA_SCHEDULE="0 0 1 * *"
|
|
||||||
## If true, the metadata will be downloaded once and then the schedule will be ignored
|
|
||||||
METADATA_DOWNLOAD_IMDB_DATA_ONCE=true
|
|
||||||
## Controls the amount of records processed in memory at any given time during import, higher values will consume more memory
|
|
||||||
METADATA_INSERT_BATCH_SIZE=25000
|
|
||||||
|
|
||||||
# Addon
|
|
||||||
DEBUG_MODE=false
|
|
||||||
|
|
||||||
# Consumer
|
|
||||||
JOB_CONCURRENCY=5
|
|
||||||
JOBS_ENABLED=true
|
|
||||||
## can be debug for extra verbosity (a lot more verbosity - useful for development)
|
|
||||||
LOG_LEVEL=info
|
|
||||||
MAX_CONNECTIONS_PER_TORRENT=10
|
|
||||||
MAX_CONNECTIONS_OVERALL=100
|
|
||||||
TORRENT_TIMEOUT=30000
|
|
||||||
UDP_TRACKERS_ENABLED=true
|
|
||||||
CONSUMER_REPLICAS=3
|
|
||||||
## Fix for #66 - toggle on for development
|
|
||||||
AUTO_CREATE_AND_APPLY_MIGRATIONS=false
|
|
||||||
## Allows control of the threshold for matching titles to the IMDB dataset. The closer to 0, the more strict the matching.
|
|
||||||
TITLE_MATCH_THRESHOLD=0.25
|
|
||||||
|
|
||||||
# Producer
|
|
||||||
GITHUB_PAT=
|
|
||||||
62
deployment/docker/config/qbit/qbittorrent.conf
Executable file
62
deployment/docker/config/qbit/qbittorrent.conf
Executable file
@@ -0,0 +1,62 @@
|
|||||||
|
[Application]
|
||||||
|
FileLogger\Age=1
|
||||||
|
FileLogger\AgeType=1
|
||||||
|
FileLogger\Backup=true
|
||||||
|
FileLogger\DeleteOld=true
|
||||||
|
FileLogger\Enabled=true
|
||||||
|
FileLogger\MaxSizeBytes=66560
|
||||||
|
FileLogger\Path=/config/qBittorrent/logs
|
||||||
|
|
||||||
|
[AutoRun]
|
||||||
|
enabled=false
|
||||||
|
program=
|
||||||
|
|
||||||
|
[BitTorrent]
|
||||||
|
Session\AnonymousModeEnabled=true
|
||||||
|
Session\BTProtocol=TCP
|
||||||
|
Session\DefaultSavePath=/downloads/
|
||||||
|
Session\ExcludedFileNames=
|
||||||
|
Session\MaxActiveCheckingTorrents=5
|
||||||
|
Session\MaxActiveDownloads=10
|
||||||
|
Session\MaxActiveTorrents=50
|
||||||
|
Session\MaxActiveUploads=50
|
||||||
|
Session\MaxConnections=2000
|
||||||
|
Session\Port=6881
|
||||||
|
Session\QueueingSystemEnabled=true
|
||||||
|
Session\TempPath=/downloads/incomplete/
|
||||||
|
Session\TorrentStopCondition=MetadataReceived
|
||||||
|
|
||||||
|
[Core]
|
||||||
|
AutoDeleteAddedTorrentFile=Never
|
||||||
|
|
||||||
|
[LegalNotice]
|
||||||
|
Accepted=true
|
||||||
|
|
||||||
|
[Meta]
|
||||||
|
MigrationVersion=6
|
||||||
|
|
||||||
|
[Network]
|
||||||
|
PortForwardingEnabled=true
|
||||||
|
Proxy\HostnameLookupEnabled=false
|
||||||
|
Proxy\Profiles\BitTorrent=true
|
||||||
|
Proxy\Profiles\Misc=true
|
||||||
|
Proxy\Profiles\RSS=true
|
||||||
|
|
||||||
|
[Preferences]
|
||||||
|
Connection\PortRangeMin=6881
|
||||||
|
Connection\ResolvePeerCountries=false
|
||||||
|
Connection\UPnP=false
|
||||||
|
Downloads\SavePath=/downloads/
|
||||||
|
Downloads\TempPath=/downloads/incomplete/
|
||||||
|
General\Locale=en
|
||||||
|
MailNotification\req_auth=true
|
||||||
|
WebUI\Address=*
|
||||||
|
WebUI\AuthSubnetWhitelist=0.0.0.0/0
|
||||||
|
WebUI\AuthSubnetWhitelistEnabled=true
|
||||||
|
WebUI\HostHeaderValidation=false
|
||||||
|
WebUI\LocalHostAuth=false
|
||||||
|
WebUI\ServerDomains=*
|
||||||
|
|
||||||
|
[RSS]
|
||||||
|
AutoDownloader\DownloadRepacks=true
|
||||||
|
AutoDownloader\SmartEpisodeFilter=s(\\d+)e(\\d+), (\\d+)x(\\d+), "(\\d{4}[.\\-]\\d{1,2}[.\\-]\\d{1,2})", "(\\d{1,2}[.\\-]\\d{1,2}[.\\-]\\d{4})"
|
||||||
@@ -1,139 +1,244 @@
|
|||||||
|
version: "3.9"
|
||||||
|
|
||||||
name: knightcrawler
|
name: knightcrawler
|
||||||
|
|
||||||
x-restart: &restart-policy "unless-stopped"
|
networks:
|
||||||
|
knightcrawler-network:
|
||||||
|
name: knightcrawler-network
|
||||||
|
driver: bridge
|
||||||
|
|
||||||
x-basehealth: &base-health
|
volumes:
|
||||||
interval: 10s
|
postgres:
|
||||||
timeout: 10s
|
lavinmq:
|
||||||
retries: 3
|
redis:
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
x-rabbithealth: &rabbitmq-health
|
|
||||||
test: rabbitmq-diagnostics -q ping
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-mongohealth: &mongodb-health
|
|
||||||
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-postgreshealth: &postgresdb-health
|
|
||||||
test: pg_isready
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-apps: &knightcrawler-app
|
|
||||||
depends_on:
|
|
||||||
mongodb:
|
|
||||||
condition: service_healthy
|
|
||||||
postgres:
|
|
||||||
condition: service_healthy
|
|
||||||
rabbitmq:
|
|
||||||
condition: service_healthy
|
|
||||||
restart: *restart-policy
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
## Postgres is the database that is used by the services.
|
||||||
|
## All downloaded metadata is stored in this database.
|
||||||
postgres:
|
postgres:
|
||||||
|
env_file: stack.env
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "sh", "-c", "pg_isready -h localhost -U $$POSTGRES_USER" ]
|
||||||
|
timeout: 10s
|
||||||
|
interval: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
image: postgres:latest
|
image: postgres:latest
|
||||||
env_file: .env
|
|
||||||
environment:
|
|
||||||
PGUSER: postgres # needed for healthcheck.
|
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
# # Furthermore, please, please, please, change the username and password in the .env file.
|
# # Furthermore, please, please, please, change the username and password in the stack.env file.
|
||||||
# # If you want to enhance your security even more, create a new user for the database with a strong password.
|
# # If you want to enhance your security even more, create a new user for the database with a strong password.
|
||||||
# ports:
|
# ports:
|
||||||
# - "5432:5432"
|
# - "5432:5432"
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- postgres:/var/lib/postgresql/data
|
- postgres:/var/lib/postgresql/data
|
||||||
healthcheck: *postgresdb-health
|
|
||||||
restart: *restart-policy
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
mongodb:
|
## Redis is used as a cache for the services.
|
||||||
image: mongo:latest
|
## It is used to store the infohashes that are currently being processed in sagas, as well as intrim data.
|
||||||
env_file: .env
|
redis:
|
||||||
environment:
|
env_file: stack.env
|
||||||
MONGO_INITDB_ROOT_USERNAME: ${MONGODB_USER:?Variable MONGODB_USER not set}
|
healthcheck:
|
||||||
MONGO_INITDB_ROOT_PASSWORD: ${MONGODB_PASSWORD:?Variable MONGODB_PASSWORD not set}
|
test: ["CMD-SHELL", "redis-cli ping"]
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
timeout: 10s
|
||||||
# # Furthermore, please, please, please, change the username and password in the .env file.
|
interval: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
image: redis/redis-stack:latest
|
||||||
|
# # If you need redis to be accessible from outside, please open the below port.
|
||||||
# ports:
|
# ports:
|
||||||
# - "27017:27017"
|
# - "6379:6379"
|
||||||
volumes:
|
|
||||||
- mongo:/data/db
|
|
||||||
restart: *restart-policy
|
|
||||||
healthcheck: *mongodb-health
|
|
||||||
networks:
|
networks:
|
||||||
- knightcrawler-network
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- redis:/data
|
||||||
|
|
||||||
rabbitmq:
|
## LavinMQ is used as a message broker for the services.
|
||||||
image: rabbitmq:3-management
|
## It is a high performance drop in replacement for RabbitMQ.
|
||||||
|
## It is used to communicate between the services.
|
||||||
|
lavinmq:
|
||||||
|
env_file: stack.env
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
# # Furthermore, please, please, please, look at the documentation for rabbit on how to secure the service.
|
# # Furthermore, please, please, please, look at the documentation for lavinmq / rabbitmq on how to secure the service.
|
||||||
# ports:
|
# ports:
|
||||||
# - "5672:5672"
|
# - "5672:5672"
|
||||||
# - "15672:15672"
|
# - "15672:15672"
|
||||||
# - "15692:15692"
|
# - "15692:15692"
|
||||||
|
image: cloudamqp/lavinmq:latest
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "lavinmqctl status"]
|
||||||
|
timeout: 10s
|
||||||
|
interval: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
volumes:
|
volumes:
|
||||||
- rabbitmq:/var/lib/rabbitmq
|
- lavinmq:/var/lib/lavinmq/
|
||||||
hostname: ${RABBITMQ_HOST}
|
|
||||||
restart: *restart-policy
|
|
||||||
healthcheck: *rabbitmq-health
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
producer:
|
|
||||||
image: gabisonfire/knightcrawler-producer:1.0.1
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
env_file: .env
|
|
||||||
<<: *knightcrawler-app
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
consumer:
|
|
||||||
image: gabisonfire/knightcrawler-consumer:1.0.1
|
|
||||||
env_file: .env
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
deploy:
|
|
||||||
replicas: ${CONSUMER_REPLICAS}
|
|
||||||
<<: *knightcrawler-app
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
metadata:
|
|
||||||
image: gabisonfire/knightcrawler-metadata:1.0.1
|
|
||||||
env_file: .env
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
restart: no
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
|
## The addon. This is what is used in stremio
|
||||||
addon:
|
addon:
|
||||||
<<: *knightcrawler-app
|
depends_on:
|
||||||
env_file: .env
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
hostname: knightcrawler-addon
|
hostname: knightcrawler-addon
|
||||||
image: gabisonfire/knightcrawler-addon:1.0.1
|
image: gabisonfire/knightcrawler-addon:2.0.19
|
||||||
labels:
|
labels:
|
||||||
logging: "promtail"
|
logging: promtail
|
||||||
networks:
|
networks:
|
||||||
- knightcrawler-network
|
- knightcrawler-network
|
||||||
# - caddy
|
|
||||||
ports:
|
ports:
|
||||||
- "7000:7000"
|
- "7000:7000"
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
## The consumer is responsible for consuming infohashes and orchestrating download of metadata.
|
||||||
|
consumer:
|
||||||
|
depends_on:
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-consumer:2.0.19
|
||||||
|
labels:
|
||||||
|
logging: promtail
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
networks:
|
## The debrid collector is responsible for downloading metadata from debrid services. (Currently only RealDebrid is supported)
|
||||||
knightcrawler-network:
|
debridcollector:
|
||||||
driver: bridge
|
depends_on:
|
||||||
name: knightcrawler-network
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-debrid-collector:2.0.19
|
||||||
|
labels:
|
||||||
|
logging: promtail
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
# caddy:
|
## The metadata service is responsible for downloading imdb publically available datasets.
|
||||||
# name: caddy
|
## This is used to enrich the metadata during production of ingested infohashes.
|
||||||
# external: true
|
metadata:
|
||||||
|
depends_on:
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-metadata:2.0.19
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: "no"
|
||||||
|
|
||||||
volumes:
|
## The migrator is responsible for migrating the database schema.
|
||||||
postgres:
|
migrator:
|
||||||
mongo:
|
depends_on:
|
||||||
rabbitmq:
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-migrator:2.0.19
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: "no"
|
||||||
|
|
||||||
|
## The producer is responsible for producing infohashes by acquiring for various sites, including DMM.
|
||||||
|
producer:
|
||||||
|
depends_on:
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-producer:2.0.19
|
||||||
|
labels:
|
||||||
|
logging: promtail
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
## QBit collector utilizes QBitTorrent to download metadata.
|
||||||
|
qbitcollector:
|
||||||
|
depends_on:
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
qbittorrent:
|
||||||
|
condition: service_healthy
|
||||||
|
deploy:
|
||||||
|
replicas: ${QBIT_REPLICAS:-0}
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-qbit-collector:2.0.19
|
||||||
|
labels:
|
||||||
|
logging: promtail
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
## QBitTorrent is a torrent client that can be used to download torrents. In this case its used to download metadata.
|
||||||
|
## The QBit collector requires this.
|
||||||
|
qbittorrent:
|
||||||
|
deploy:
|
||||||
|
replicas: ${QBIT_REPLICAS:-0}
|
||||||
|
env_file: stack.env
|
||||||
|
environment:
|
||||||
|
PGID: "1000"
|
||||||
|
PUID: "1000"
|
||||||
|
TORRENTING_PORT: "6881"
|
||||||
|
WEBUI_PORT: "8080"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "curl --fail http://localhost:8080"]
|
||||||
|
timeout: 10s
|
||||||
|
interval: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
image: lscr.io/linuxserver/qbittorrent:latest
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
ports:
|
||||||
|
- "6881:6881/tcp"
|
||||||
|
- "6881:6881/udp"
|
||||||
|
# if you want to expose the webui, uncomment the following line
|
||||||
|
# - "8001:8080"
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- ./config/qbit/qbittorrent.conf:/config/qBittorrent/qBittorrent.conf
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ rule_files:
|
|||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: "rabbitmq"
|
- job_name: "rabbitmq"
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["rabbitmq:15692"]
|
- targets: ["lavinmq:15692"]
|
||||||
- job_name: "postgres-exporter"
|
- job_name: "postgres-exporter"
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["postgres-exporter:9187"]
|
- targets: ["postgres-exporter:9187"]
|
||||||
|
|||||||
87
deployment/docker/src/components/infrastructure.yaml
Normal file
87
deployment/docker/src/components/infrastructure.yaml
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
x-basehealth: &base-health
|
||||||
|
interval: 10s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
x-lavinhealth: &lavinmq-health
|
||||||
|
test: [ "CMD-SHELL", "lavinmqctl status" ]
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-redishealth: &redis-health
|
||||||
|
test: redis-cli ping
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-postgreshealth: &postgresdb-health
|
||||||
|
test: [ "CMD", "sh", "-c", "pg_isready -h localhost -U $$POSTGRES_USER" ]
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-qbit: &qbit-health
|
||||||
|
test: "curl --fail http://localhost:8080"
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:latest
|
||||||
|
environment:
|
||||||
|
PGUSER: postgres # needed for healthcheck.
|
||||||
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
|
# # Furthermore, please, please, please, change the username and password in the .env file.
|
||||||
|
# # If you want to enhance your security even more, create a new user for the database with a strong password.
|
||||||
|
# ports:
|
||||||
|
# - "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres:/var/lib/postgresql/data
|
||||||
|
healthcheck: *postgresdb-health
|
||||||
|
restart: unless-stopped
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis/redis-stack:latest
|
||||||
|
# # If you need redis to be accessible from outside, please open the below port.
|
||||||
|
# ports:
|
||||||
|
# - "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- redis:/data
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck: *redis-health
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
lavinmq:
|
||||||
|
env_file: stack.env
|
||||||
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
|
# # Furthermore, please, please, please, look at the documentation for lavinmq / rabbitmq on how to secure the service.
|
||||||
|
# ports:
|
||||||
|
# - "5672:5672"
|
||||||
|
# - "15672:15672"
|
||||||
|
# - "15692:15692"
|
||||||
|
image: cloudamqp/lavinmq:latest
|
||||||
|
healthcheck: *lavinmq-health
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- lavinmq:/var/lib/lavinmq/
|
||||||
|
|
||||||
|
## QBitTorrent is a torrent client that can be used to download torrents. In this case its used to download metadata.
|
||||||
|
## The QBit collector requires this.
|
||||||
|
qbittorrent:
|
||||||
|
image: lscr.io/linuxserver/qbittorrent:latest
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- WEBUI_PORT=8080
|
||||||
|
- TORRENTING_PORT=6881
|
||||||
|
ports:
|
||||||
|
- 6881:6881
|
||||||
|
- 6881:6881/udp
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck: *qbit-health
|
||||||
|
volumes:
|
||||||
|
- ../../config/qbit/qbittorrent.conf:/config/qBittorrent/qBittorrent.conf
|
||||||
71
deployment/docker/src/components/knightcrawler.yaml
Normal file
71
deployment/docker/src/components/knightcrawler.yaml
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
x-apps: &knightcrawler-app
|
||||||
|
labels:
|
||||||
|
logging: "promtail"
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
x-depends: &knightcrawler-app-depends
|
||||||
|
depends_on:
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
|
||||||
|
services:
|
||||||
|
metadata:
|
||||||
|
image: gabisonfire/knightcrawler-metadata:2.0.18
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: no
|
||||||
|
depends_on:
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
|
||||||
|
migrator:
|
||||||
|
image: gabisonfire/knightcrawler-migrator:2.0.18
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: no
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
addon:
|
||||||
|
image: gabisonfire/knightcrawler-addon:2.0.18
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
hostname: knightcrawler-addon
|
||||||
|
ports:
|
||||||
|
- "7000:7000"
|
||||||
|
|
||||||
|
consumer:
|
||||||
|
image: gabisonfire/knightcrawler-consumer:2.0.18
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
debridcollector:
|
||||||
|
image: gabisonfire/knightcrawler-debrid-collector:2.0.18
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
producer:
|
||||||
|
image: gabisonfire/knightcrawler-producer:2.0.18
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
qbitcollector:
|
||||||
|
image: gabisonfire/knightcrawler-qbit-collector:2.0.18
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
qbittorrent:
|
||||||
|
condition: service_healthy
|
||||||
4
deployment/docker/src/components/network.yaml
Normal file
4
deployment/docker/src/components/network.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
networks:
|
||||||
|
knightcrawler-network:
|
||||||
|
driver: bridge
|
||||||
|
name: knightcrawler-network
|
||||||
4
deployment/docker/src/components/volumes.yaml
Normal file
4
deployment/docker/src/components/volumes.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
volumes:
|
||||||
|
postgres:
|
||||||
|
redis:
|
||||||
|
lavinmq:
|
||||||
7
deployment/docker/src/compose.override.yaml
Normal file
7
deployment/docker/src/compose.override.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
services:
|
||||||
|
qbittorrent:
|
||||||
|
deploy:
|
||||||
|
replicas: 0
|
||||||
|
qbitcollector:
|
||||||
|
deploy:
|
||||||
|
replicas: 0
|
||||||
7
deployment/docker/src/compose.yaml
Normal file
7
deployment/docker/src/compose.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
version: "3.9"
|
||||||
|
name: "knightcrawler"
|
||||||
|
include:
|
||||||
|
- ./components/network.yaml
|
||||||
|
- ./components/volumes.yaml
|
||||||
|
- ./components/infrastructure.yaml
|
||||||
|
- ./components/knightcrawler.yaml
|
||||||
43
deployment/docker/stack.env
Normal file
43
deployment/docker/stack.env
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# General environment variables
|
||||||
|
TZ=London/Europe
|
||||||
|
|
||||||
|
# PostgreSQL
|
||||||
|
POSTGRES_HOST=postgres
|
||||||
|
POSTGRES_PORT=5432
|
||||||
|
POSTGRES_USER=postgres
|
||||||
|
POSTGRES_PASSWORD=postgres
|
||||||
|
POSTGRES_DB=knightcrawler
|
||||||
|
|
||||||
|
# Redis
|
||||||
|
REDIS_HOST=redis
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_EXTRA=abortConnect=false,allowAdmin=true
|
||||||
|
|
||||||
|
# AMQP
|
||||||
|
RABBITMQ_HOST=lavinmq
|
||||||
|
RABBITMQ_USER=guest
|
||||||
|
RABBITMQ_PASSWORD=guest
|
||||||
|
RABBITMQ_CONSUMER_QUEUE_NAME=ingested
|
||||||
|
RABBITMQ_DURABLE=true
|
||||||
|
RABBITMQ_MAX_QUEUE_SIZE=0
|
||||||
|
RABBITMQ_MAX_PUBLISH_BATCH_SIZE=500
|
||||||
|
RABBITMQ_PUBLISH_INTERVAL_IN_SECONDS=10
|
||||||
|
|
||||||
|
# Metadata
|
||||||
|
METADATA_INSERT_BATCH_SIZE=50000
|
||||||
|
|
||||||
|
# Collectors
|
||||||
|
COLLECTOR_QBIT_ENABLED=false
|
||||||
|
COLLECTOR_DEBRID_ENABLED=true
|
||||||
|
COLLECTOR_REAL_DEBRID_API_KEY=
|
||||||
|
QBIT_HOST=http://qbittorrent:8080
|
||||||
|
QBIT_TRACKERS_URL=https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all_http.txt
|
||||||
|
|
||||||
|
# Number of replicas for the qBittorrent collector and qBitTorrent client. Should be 0 or 1.
|
||||||
|
QBIT_REPLICAS=0
|
||||||
|
|
||||||
|
# Addon
|
||||||
|
DEBUG_MODE=false
|
||||||
|
|
||||||
|
# Producer
|
||||||
|
GITHUB_PAT=
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,6 @@
|
|||||||
"axios": "^1.6.1",
|
"axios": "^1.6.1",
|
||||||
"bottleneck": "^2.19.5",
|
"bottleneck": "^2.19.5",
|
||||||
"cache-manager": "^3.4.4",
|
"cache-manager": "^3.4.4",
|
||||||
"cache-manager-mongodb": "^0.3.0",
|
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"debrid-link-api": "^1.0.1",
|
"debrid-link-api": "^1.0.1",
|
||||||
"express": "^4.18.2",
|
"express": "^4.18.2",
|
||||||
@@ -33,7 +32,11 @@
|
|||||||
"user-agents": "^1.0.1444",
|
"user-agents": "^1.0.1444",
|
||||||
"video-name-parser": "^1.4.6",
|
"video-name-parser": "^1.4.6",
|
||||||
"xml-js": "^1.6.11",
|
"xml-js": "^1.6.11",
|
||||||
"xml2js": "^0.6.2"
|
"xml2js": "^0.6.2",
|
||||||
|
"@redis/client": "^1.5.14",
|
||||||
|
"@redis/json": "^1.0.6",
|
||||||
|
"@redis/search": "^1.1.6",
|
||||||
|
"cache-manager-redis-store": "^2.0.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^20.11.6",
|
"@types/node": "^20.11.6",
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import cacheManager from 'cache-manager';
|
import cacheManager from 'cache-manager';
|
||||||
import mangodbStore from 'cache-manager-mongodb';
|
|
||||||
import { isStaticUrl } from '../moch/static.js';
|
import { isStaticUrl } from '../moch/static.js';
|
||||||
import {cacheConfig} from "./settings.js";
|
import {cacheConfig} from "./settings.js";
|
||||||
|
import redisStore from 'cache-manager-redis-store';
|
||||||
|
|
||||||
const STREAM_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|stream`;
|
const STREAM_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|stream`;
|
||||||
const IMDB_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|imdb`;
|
const IMDB_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|imdb`;
|
||||||
@@ -12,28 +12,20 @@ const memoryCache = initiateMemoryCache();
|
|||||||
const remoteCache = initiateRemoteCache();
|
const remoteCache = initiateRemoteCache();
|
||||||
|
|
||||||
function initiateRemoteCache() {
|
function initiateRemoteCache() {
|
||||||
if (cacheConfig.NO_CACHE) {
|
if (cacheConfig.NO_CACHE) {
|
||||||
return null;
|
return null;
|
||||||
} else if (cacheConfig.MONGODB_URI) {
|
} else if (cacheConfig.REDIS_CONNECTION_STRING) {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
store: mangodbStore,
|
store: redisStore,
|
||||||
uri: cacheConfig.MONGODB_URI,
|
ttl: cacheConfig.STREAM_EMPTY_TTL,
|
||||||
options: {
|
url: cacheConfig.REDIS_CONNECTION_STRING
|
||||||
collection: 'jackettio_addon_collection',
|
});
|
||||||
socketTimeoutMS: 120000,
|
} else {
|
||||||
useNewUrlParser: true,
|
return cacheManager.caching({
|
||||||
useUnifiedTopology: false,
|
store: 'memory',
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL
|
ttl: cacheConfig.STREAM_EMPTY_TTL
|
||||||
},
|
});
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL,
|
}
|
||||||
ignoreCacheErrors: true
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
return cacheManager.caching({
|
|
||||||
store: 'memory',
|
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function initiateMemoryCache() {
|
function initiateMemoryCache() {
|
||||||
@@ -25,7 +25,9 @@ export const cinemetaConfig = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const cacheConfig = {
|
export const cacheConfig = {
|
||||||
MONGODB_URI: process.env.MONGODB_URI,
|
REDIS_HOST: process.env.REDIS_HOST || 'redis',
|
||||||
|
REDIS_PORT: process.env.REDIS_PORT || '6379',
|
||||||
|
REDIS_EXTRA: process.env.REDIS_EXTRA || '',
|
||||||
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
||||||
IMDB_TTL: parseInt(process.env.IMDB_TTL || 60 * 60 * 4), // 4 Hours
|
IMDB_TTL: parseInt(process.env.IMDB_TTL || 60 * 60 * 4), // 4 Hours
|
||||||
STREAM_TTL: parseInt(process.env.STREAM_TTL || 60 * 60 * 4), // 1 Hour
|
STREAM_TTL: parseInt(process.env.STREAM_TTL || 60 * 60 * 4), // 1 Hour
|
||||||
@@ -40,3 +42,5 @@ export const cacheConfig = {
|
|||||||
STALE_ERROR_AGE: parseInt(process.env.STALE_ERROR_AGE) || 7 * 24 * 60 * 60, // 7 days
|
STALE_ERROR_AGE: parseInt(process.env.STALE_ERROR_AGE) || 7 * 24 * 60 * 60, // 7 days
|
||||||
GLOBAL_KEY_PREFIX: process.env.GLOBAL_KEY_PREFIX || 'jackettio-addon',
|
GLOBAL_KEY_PREFIX: process.env.GLOBAL_KEY_PREFIX || 'jackettio-addon',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheConfig.REDIS_CONNECTION_STRING = 'redis://' + cacheConfig.REDIS_HOST + ':' + cacheConfig.REDIS_PORT + '?' + cacheConfig.REDIS_EXTRA;
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,6 @@
|
|||||||
"axios": "^1.6.1",
|
"axios": "^1.6.1",
|
||||||
"bottleneck": "^2.19.5",
|
"bottleneck": "^2.19.5",
|
||||||
"cache-manager": "^3.4.4",
|
"cache-manager": "^3.4.4",
|
||||||
"cache-manager-mongodb": "^0.3.0",
|
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"debrid-link-api": "^1.0.1",
|
"debrid-link-api": "^1.0.1",
|
||||||
"express-rate-limit": "^6.7.0",
|
"express-rate-limit": "^6.7.0",
|
||||||
@@ -35,7 +34,11 @@
|
|||||||
"stremio-addon-sdk": "^1.6.10",
|
"stremio-addon-sdk": "^1.6.10",
|
||||||
"swagger-stats": "^0.99.7",
|
"swagger-stats": "^0.99.7",
|
||||||
"ua-parser-js": "^1.0.36",
|
"ua-parser-js": "^1.0.36",
|
||||||
"user-agents": "^1.0.1444"
|
"user-agents": "^1.0.1444",
|
||||||
|
"@redis/client": "^1.5.14",
|
||||||
|
"@redis/json": "^1.0.6",
|
||||||
|
"@redis/search": "^1.1.6",
|
||||||
|
"cache-manager-redis-store": "^2.0.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^20.11.6",
|
"@types/node": "^20.11.6",
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import cacheManager from 'cache-manager';
|
import cacheManager from 'cache-manager';
|
||||||
import mangodbStore from 'cache-manager-mongodb';
|
|
||||||
import { cacheConfig } from './config.js';
|
import { cacheConfig } from './config.js';
|
||||||
import { isStaticUrl } from '../moch/static.js';
|
import { isStaticUrl } from '../moch/static.js';
|
||||||
|
import redisStore from "cache-manager-redis-store";
|
||||||
|
|
||||||
const GLOBAL_KEY_PREFIX = 'knightcrawler-addon';
|
const GLOBAL_KEY_PREFIX = 'knightcrawler-addon';
|
||||||
const STREAM_KEY_PREFIX = `${GLOBAL_KEY_PREFIX}|stream`;
|
const STREAM_KEY_PREFIX = `${GLOBAL_KEY_PREFIX}|stream`;
|
||||||
@@ -21,19 +21,11 @@ const remoteCache = initiateRemoteCache();
|
|||||||
function initiateRemoteCache() {
|
function initiateRemoteCache() {
|
||||||
if (cacheConfig.NO_CACHE) {
|
if (cacheConfig.NO_CACHE) {
|
||||||
return null;
|
return null;
|
||||||
} else if (cacheConfig.MONGO_URI) {
|
} else if (cacheConfig.REDIS_CONNECTION_STRING) {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
store: mangodbStore,
|
store: redisStore,
|
||||||
uri: cacheConfig.MONGO_URI,
|
|
||||||
options: {
|
|
||||||
collection: 'knightcrawler_addon_collection',
|
|
||||||
socketTimeoutMS: 120000,
|
|
||||||
useNewUrlParser: true,
|
|
||||||
useUnifiedTopology: false,
|
|
||||||
ttl: STREAM_EMPTY_TTL
|
|
||||||
},
|
|
||||||
ttl: STREAM_EMPTY_TTL,
|
ttl: STREAM_EMPTY_TTL,
|
||||||
ignoreCacheErrors: true
|
url: cacheConfig.REDIS_CONNECTION_STRING
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
@@ -1,17 +1,11 @@
|
|||||||
export const cacheConfig = {
|
export const cacheConfig = {
|
||||||
MONGODB_HOST: process.env.MONGODB_HOST || 'mongodb',
|
REDIS_HOST: process.env.REDIS_HOST || 'redis',
|
||||||
MONGODB_PORT: process.env.MONGODB_PORT || '27017',
|
REDIS_PORT: process.env.REDIS_PORT || '6379',
|
||||||
MONGODB_DB: process.env.MONGODB_DB || 'knightcrawler',
|
REDIS_EXTRA: process.env.REDIS_EXTRA || '',
|
||||||
MONGODB_USER: process.env.MONGODB_USER || 'mongo',
|
|
||||||
MONGODB_PASSWORD: process.env.MONGODB_PASSWORD || 'mongo',
|
|
||||||
COLLECTION_NAME: process.env.MONGODB_ADDON_COLLECTION || 'knightcrawler_addon_collection',
|
|
||||||
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Combine the environment variables into a connection string
|
cacheConfig.REDIS_CONNECTION_STRING = 'redis://' + cacheConfig.REDIS_HOST + ':' + cacheConfig.REDIS_PORT + '?' + cacheConfig.REDIS_EXTRA;
|
||||||
// The combined string will look something like:
|
|
||||||
// 'mongodb://mongo:mongo@localhost:27017/knightcrawler?authSource=admin'
|
|
||||||
cacheConfig.MONGO_URI = 'mongodb://' + cacheConfig.MONGODB_USER + ':' + cacheConfig.MONGODB_PASSWORD + '@' + cacheConfig.MONGODB_HOST + ':' + cacheConfig.MONGODB_PORT + '/' + cacheConfig.MONGODB_DB + '?authSource=admin';
|
|
||||||
|
|
||||||
export const databaseConfig = {
|
export const databaseConfig = {
|
||||||
POSTGRES_HOST: process.env.POSTGRES_HOST || 'postgres',
|
POSTGRES_HOST: process.env.POSTGRES_HOST || 'postgres',
|
||||||
@@ -14,13 +14,12 @@ const Torrent = database.define('torrent',
|
|||||||
{
|
{
|
||||||
infoHash: { type: Sequelize.STRING(64), primaryKey: true },
|
infoHash: { type: Sequelize.STRING(64), primaryKey: true },
|
||||||
provider: { type: Sequelize.STRING(32), allowNull: false },
|
provider: { type: Sequelize.STRING(32), allowNull: false },
|
||||||
torrentId: { type: Sequelize.STRING(128) },
|
ingestedTorrentId: { type: Sequelize.BIGINT, allowNull: false },
|
||||||
title: { type: Sequelize.STRING(256), allowNull: false },
|
title: { type: Sequelize.STRING(256), allowNull: false },
|
||||||
size: { type: Sequelize.BIGINT },
|
size: { type: Sequelize.BIGINT },
|
||||||
type: { type: Sequelize.STRING(16), allowNull: false },
|
type: { type: Sequelize.STRING(16), allowNull: false },
|
||||||
uploadDate: { type: Sequelize.DATE, allowNull: false },
|
uploadDate: { type: Sequelize.DATE, allowNull: false },
|
||||||
seeders: { type: Sequelize.SMALLINT },
|
seeders: { type: Sequelize.SMALLINT },
|
||||||
trackers: { type: Sequelize.STRING(4096) },
|
|
||||||
languages: { type: Sequelize.STRING(4096) },
|
languages: { type: Sequelize.STRING(4096) },
|
||||||
resolution: { type: Sequelize.STRING(16) }
|
resolution: { type: Sequelize.STRING(16) }
|
||||||
}
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user