Compare commits
90 Commits
create-doc
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
594320ed63 | ||
|
|
a7d5944d25 | ||
|
|
c053a5f8da | ||
|
|
5611d3776f | ||
|
|
833ac11a96 | ||
|
|
16d8707c48 | ||
|
|
6dfbaa4739 | ||
|
|
03b5617312 | ||
|
|
19cb42af77 | ||
|
|
9344531b34 | ||
|
|
723aa6b6a0 | ||
|
|
e17b476801 | ||
|
|
2a414d8bc0 | ||
|
|
9b5f454e6e | ||
|
|
ad9549c695 | ||
|
|
1e85cb00ff | ||
|
|
da640a4071 | ||
|
|
e6a63fd72e | ||
|
|
02101ac50a | ||
|
|
3c8ffd5082 | ||
|
|
79e0a0f102 | ||
|
|
6181207513 | ||
|
|
684dbba2f0 | ||
|
|
c75ecd2707 | ||
|
|
c493ef3376 | ||
|
|
655a39e35c | ||
|
|
cfeee62f6b | ||
|
|
c6d4c06d70 | ||
|
|
08639a3254 | ||
|
|
d430850749 | ||
|
|
82c0ea459b | ||
|
|
1e83b4c5d8 | ||
|
|
66609c2a46 | ||
|
|
2d78dc2735 | ||
|
|
527d6cdf15 | ||
|
|
bb260d78d6 | ||
|
|
baec0450bf | ||
|
|
4308a0ee71 | ||
|
|
cc15a69517 | ||
|
|
a6d3a4a066 | ||
|
|
9430704205 | ||
|
|
6cc857bdc3 | ||
|
|
cc2adbfca5 | ||
|
|
9f928f9b66 | ||
|
|
a50b5071b3 | ||
|
|
72db18f0ad | ||
|
|
d70cef1b86 | ||
|
|
e1e718cd22 | ||
|
|
c3e58e4234 | ||
|
|
d584102d60 | ||
|
|
fe4bb59502 | ||
|
|
472b3342d5 | ||
|
|
b035ef596b | ||
|
|
9a831e92d0 | ||
|
|
9c6c1ac249 | ||
|
|
0ddfac57f7 | ||
|
|
9fbd750cd2 | ||
|
|
5fc2027cfa | ||
|
|
2d39476c65 | ||
|
|
e7f987a0d7 | ||
|
|
79a6aa3cb0 | ||
|
|
e24d81dd96 | ||
|
|
aeb83c19f8 | ||
|
|
e23ee974e2 | ||
|
|
5c310427b4 | ||
|
|
b3d9be0b7a | ||
|
|
dda81ec5bf | ||
|
|
8eae288f10 | ||
|
|
75ac89489e | ||
|
|
fa27b0cda9 | ||
|
|
500dd0d725 | ||
|
|
6f4bc10f5a | ||
|
|
1b3c190ed1 | ||
|
|
02150482df | ||
|
|
f18cd5b1ac | ||
|
|
2e774058ff | ||
|
|
4e84d7c9c3 | ||
|
|
ad04d323b4 | ||
|
|
7d0b779bc8 | ||
|
|
e2b45e799d | ||
|
|
6c03f79933 | ||
|
|
c8a1ebd8ae | ||
|
|
320fccc8e8 | ||
|
|
51246ed352 | ||
|
|
8d82a17876 | ||
|
|
f719520b3b | ||
|
|
bacb50e060 | ||
|
|
6600fceb1a | ||
|
|
5aba05f2b4 | ||
|
|
601dbdf64f |
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -12,6 +12,9 @@ A clear and concise description of what the bug is.
|
|||||||
|
|
||||||
**To Reproduce**
|
**To Reproduce**
|
||||||
Steps to reproduce the behavior:
|
Steps to reproduce the behavior:
|
||||||
|
1.
|
||||||
|
2.
|
||||||
|
3.
|
||||||
|
|
||||||
**Expected behavior**
|
**Expected behavior**
|
||||||
A clear and concise description of what you expected to happen.
|
A clear and concise description of what you expected to happen.
|
||||||
@@ -23,6 +26,7 @@ If the logs are short, make sure to triple backtick them, or use https://pastebi
|
|||||||
**Hardware:**
|
**Hardware:**
|
||||||
- OS and distro: [e.g. Raspberry Pi OS, Ubuntu, Rocky]
|
- OS and distro: [e.g. Raspberry Pi OS, Ubuntu, Rocky]
|
||||||
- Server: [e.g. VM, Baremetal, Pi]
|
- Server: [e.g. VM, Baremetal, Pi]
|
||||||
|
- Knightcrawler Version: [2.0.xx]
|
||||||
|
|
||||||
**Additional context**
|
**Additional context**
|
||||||
Add any other context about the problem here.
|
Add any other context about the problem here.
|
||||||
|
|||||||
18
.github/workflows/base_image_workflow.yaml
vendored
18
.github/workflows/base_image_workflow.yaml
vendored
@@ -6,12 +6,16 @@ on:
|
|||||||
CONTEXT:
|
CONTEXT:
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
|
DOCKERFILE:
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
IMAGE_NAME:
|
IMAGE_NAME:
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CONTEXT: ${{ inputs.CONTEXT }}
|
CONTEXT: ${{ inputs.CONTEXT }}
|
||||||
|
DOCKERFILE: ${{ inputs.DOCKERFILE }}
|
||||||
IMAGE_NAME: ${{ inputs.IMAGE_NAME }}
|
IMAGE_NAME: ${{ inputs.IMAGE_NAME }}
|
||||||
PLATFORMS: linux/amd64,linux/arm64
|
PLATFORMS: linux/amd64,linux/arm64
|
||||||
|
|
||||||
@@ -21,11 +25,13 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Setting variables
|
- name: Setting variables
|
||||||
run: |
|
run: |
|
||||||
echo "CONTEXT=${{ env.CONTEXT }}
|
echo "CONTEXT=${{ env.CONTEXT }}"
|
||||||
echo "IMAGE_NAME=${{ env.IMAGE_NAME }}
|
echo "DOCKERFILE=${{ env.DOCKERFILE }}"
|
||||||
|
echo "IMAGE_NAME=${{ env.IMAGE_NAME }}"
|
||||||
echo "PLATFORMS=${{ env.PLATFORMS }}"
|
echo "PLATFORMS=${{ env.PLATFORMS }}"
|
||||||
outputs:
|
outputs:
|
||||||
CONTEXT: ${{ env.CONTEXT }}
|
CONTEXT: ${{ env.CONTEXT }}
|
||||||
|
DOCKERFILE: ${{ env.DOCKERFILE }}
|
||||||
IMAGE_NAME: ${{ env.IMAGE_NAME }}
|
IMAGE_NAME: ${{ env.IMAGE_NAME }}
|
||||||
PLATFORMS: ${{ env.PLATFORMS }}
|
PLATFORMS: ${{ env.PLATFORMS }}
|
||||||
|
|
||||||
@@ -70,14 +76,17 @@ jobs:
|
|||||||
flavor: |
|
flavor: |
|
||||||
latest=auto
|
latest=auto
|
||||||
tags: |
|
tags: |
|
||||||
type=edge,branch=master,commit=${{ github.sha }}
|
type=ref,event=tag
|
||||||
|
type=ref,event=pr
|
||||||
type=sha,commit=${{ github.sha }}
|
type=sha,commit=${{ github.sha }}
|
||||||
|
type=semver,pattern={{version}}
|
||||||
type=raw,value=latest,enable={{is_default_branch}}
|
type=raw,value=latest,enable={{is_default_branch}}
|
||||||
|
|
||||||
- name: Build image for scanning
|
- name: Build image for scanning
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
||||||
|
file: ${{ needs.set-vars.outputs.DOCKERFILE }}
|
||||||
push: true
|
push: true
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: localhost:5000/dockle-examine-image:test
|
tags: localhost:5000/dockle-examine-image:test
|
||||||
@@ -130,10 +139,11 @@ jobs:
|
|||||||
sarif_file: 'trivy-results-os.sarif'
|
sarif_file: 'trivy-results-os.sarif'
|
||||||
|
|
||||||
- name: Push Service Image to repo
|
- name: Push Service Image to repo
|
||||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
|
# if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
context: ${{ needs.set-vars.outputs.CONTEXT }}
|
||||||
|
file: ${{ needs.set-vars.outputs.DOCKERFILE }}
|
||||||
push: true
|
push: true
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: ${{ steps.docker-metadata.outputs.tags }}
|
tags: ${{ steps.docker-metadata.outputs.tags }}
|
||||||
|
|||||||
8
.github/workflows/build_addon.yaml
vendored
8
.github/workflows/build_addon.yaml
vendored
@@ -2,13 +2,17 @@ name: Build and Push Addon Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/node/addon/**'
|
- 'src/addon/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
process:
|
process:
|
||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/addon/
|
CONTEXT: ./src/addon/
|
||||||
|
DOCKERFILE: ./src/addon/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-addon
|
IMAGE_NAME: knightcrawler-addon
|
||||||
|
|||||||
8
.github/workflows/build_consumer.yaml
vendored
8
.github/workflows/build_consumer.yaml
vendored
@@ -2,13 +2,17 @@ name: Build and Push Consumer Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/node/consumer/**'
|
- 'src/torrent-consumer/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
process:
|
process:
|
||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/consumer/
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/torrent-consumer/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-consumer
|
IMAGE_NAME: knightcrawler-consumer
|
||||||
|
|||||||
18
.github/workflows/build_debrid_collector.yaml
vendored
Normal file
18
.github/workflows/build_debrid_collector.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
name: Build and Push Debrid Collector Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'src/debrid-collector/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/debrid-collector/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-debrid-collector
|
||||||
86
.github/workflows/build_docs.yaml
vendored
86
.github/workflows/build_docs.yaml
vendored
@@ -1,86 +0,0 @@
|
|||||||
name: Build documentation
|
|
||||||
|
|
||||||
# TODO: Only run on ./docs folder change
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["master"]
|
|
||||||
paths:
|
|
||||||
- 'docs/**'
|
|
||||||
# Specify to run a workflow manually from the Actions tab on GitHub
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
pages: write
|
|
||||||
|
|
||||||
env:
|
|
||||||
INSTANCE: Writerside/kc
|
|
||||||
ARTIFACT: webHelpKC2-all.zip
|
|
||||||
DOCS_FOLDER: ./docs
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Build Writerside docs using Docker
|
|
||||||
uses: JetBrains/writerside-github-action@v4
|
|
||||||
with:
|
|
||||||
instance: ${{ env.INSTANCE }}
|
|
||||||
artifact: ${{ env.ARTIFACT }}
|
|
||||||
location: ${{ env.DOCS_FOLDER }}
|
|
||||||
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: docs
|
|
||||||
path: |
|
|
||||||
artifacts/${{ env.ARTIFACT }}
|
|
||||||
artifacts/report.json
|
|
||||||
retention-days: 7
|
|
||||||
|
|
||||||
test:
|
|
||||||
needs: build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Download artifacts
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: docs
|
|
||||||
path: artifacts
|
|
||||||
|
|
||||||
- name: Test documentation
|
|
||||||
uses: JetBrains/writerside-checker-action@v1
|
|
||||||
with:
|
|
||||||
instance: ${{ env.INSTANCE }}
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
environment:
|
|
||||||
name: github-pages
|
|
||||||
url: ${{ steps.deployment.outputs.page_url }}
|
|
||||||
needs: [build, test]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Download artifacts
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: docs
|
|
||||||
|
|
||||||
- name: Unzip artifact
|
|
||||||
run: unzip -O UTF-8 -qq '${{ env.ARTIFACT }}' -d dir
|
|
||||||
|
|
||||||
- name: Setup Pages
|
|
||||||
uses: actions/configure-pages@v4
|
|
||||||
|
|
||||||
- name: Package and upload Pages artifact
|
|
||||||
uses: actions/upload-pages-artifact@v3
|
|
||||||
with:
|
|
||||||
path: dir
|
|
||||||
|
|
||||||
- name: Deploy to GitHub Pages
|
|
||||||
id: deployment
|
|
||||||
uses: actions/deploy-pages@v4
|
|
||||||
8
.github/workflows/build_jackett-addon.yaml
vendored
8
.github/workflows/build_jackett-addon.yaml
vendored
@@ -2,13 +2,17 @@ name: Build and Push Jackett Addon Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/node/addon-jackett/**'
|
- 'src/addon-jackett/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
process:
|
process:
|
||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/node/addon-jackett/
|
CONTEXT: ./src/addon-jackett/
|
||||||
|
DOCKERFILE: ./src/addon-jackett/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-addon-jackett
|
IMAGE_NAME: knightcrawler-addon-jackett
|
||||||
|
|||||||
4
.github/workflows/build_metadata.yaml
vendored
4
.github/workflows/build_metadata.yaml
vendored
@@ -2,8 +2,11 @@ name: Build and Push Metadata Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/metadata/**'
|
- 'src/metadata/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
process:
|
process:
|
||||||
@@ -11,4 +14,5 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/metadata/
|
CONTEXT: ./src/metadata/
|
||||||
|
DOCKERFILE: ./src/metadata/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-metadata
|
IMAGE_NAME: knightcrawler-metadata
|
||||||
|
|||||||
4
.github/workflows/build_migrator.yaml
vendored
4
.github/workflows/build_migrator.yaml
vendored
@@ -2,8 +2,11 @@ name: Build and Push Migrator Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/migrator/**'
|
- 'src/migrator/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
process:
|
process:
|
||||||
@@ -11,4 +14,5 @@ jobs:
|
|||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/migrator/
|
CONTEXT: ./src/migrator/
|
||||||
|
DOCKERFILE: ./src/migrator/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-migrator
|
IMAGE_NAME: knightcrawler-migrator
|
||||||
|
|||||||
6
.github/workflows/build_producer.yaml
vendored
6
.github/workflows/build_producer.yaml
vendored
@@ -2,13 +2,17 @@ name: Build and Push Producer Service
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
paths:
|
paths:
|
||||||
- 'src/producer/**'
|
- 'src/producer/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
process:
|
process:
|
||||||
uses: ./.github/workflows/base_image_workflow.yaml
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
with:
|
with:
|
||||||
CONTEXT: ./src/producer/
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/producer/src/Dockerfile
|
||||||
IMAGE_NAME: knightcrawler-producer
|
IMAGE_NAME: knightcrawler-producer
|
||||||
|
|||||||
18
.github/workflows/build_qbit_collector.yaml
vendored
Normal file
18
.github/workflows/build_qbit_collector.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
name: Build and Push Qbit Collector Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'src/qbit-collector/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/
|
||||||
|
DOCKERFILE: ./src/qbit-collector/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-qbit-collector
|
||||||
18
.github/workflows/build_tissue.yaml
vendored
Normal file
18
.github/workflows/build_tissue.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
name: Build and Push Tissue Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'src/tissue/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/tissue/
|
||||||
|
DOCKERFILE: ./src/tissue/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-tissue
|
||||||
15
.github/workflows/build_torrent_ingester.yaml
vendored
Normal file
15
.github/workflows/build_torrent_ingester.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
name: Build and Push Torrent Ingestor Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'src/torrent-ingestor/**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
process:
|
||||||
|
uses: ./.github/workflows/base_image_workflow.yaml
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
CONTEXT: ./src/torrent-ingestor
|
||||||
|
DOCKERFILE: ./src/torrent-ingestor/Dockerfile
|
||||||
|
IMAGE_NAME: knightcrawler-torrent-ingestor
|
||||||
39
.github/workflows/git_cliff.yml
vendored
Normal file
39
.github/workflows/git_cliff.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
changelog:
|
||||||
|
name: Generate changelog
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Generate a changelog
|
||||||
|
uses: orhun/git-cliff-action@v3
|
||||||
|
with:
|
||||||
|
config: cliff.toml
|
||||||
|
args: --verbose
|
||||||
|
env:
|
||||||
|
OUTPUT: CHANGELOG.md
|
||||||
|
GITHUB_REPO: ${{ github.repository }}
|
||||||
|
|
||||||
|
- name: Commit
|
||||||
|
run: |
|
||||||
|
git config user.name 'github-actions[bot]'
|
||||||
|
git config user.email 'github-actions[bot]@users.noreply.github.com'
|
||||||
|
set +e
|
||||||
|
git checkout -b feat/changelog_$(date +"%d_%m")
|
||||||
|
git add CHANGELOG.md
|
||||||
|
git commit -m "[skip ci] Update changelog"
|
||||||
|
git push https://${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git feat/changelog_$(date +"%d_%m")
|
||||||
|
|
||||||
|
- name: create pull request
|
||||||
|
run: gh pr create -B main -H feat/changelog_$(date +"%d_%m") --title '[skip ci] Update changelog' --body 'Changelog update by git-cliff'
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -355,6 +355,9 @@ MigrationBackup/
|
|||||||
# Fody - auto-generated XML schema
|
# Fody - auto-generated XML schema
|
||||||
FodyWeavers.xsd
|
FodyWeavers.xsd
|
||||||
|
|
||||||
|
# Jetbrains ide's run profiles (Could contain sensative information)
|
||||||
|
**/.run/
|
||||||
|
|
||||||
# VS Code files for those working on multiple tools
|
# VS Code files for those working on multiple tools
|
||||||
.vscode/*
|
.vscode/*
|
||||||
!.vscode/settings.json
|
!.vscode/settings.json
|
||||||
@@ -392,8 +395,6 @@ dist/
|
|||||||
downloads/
|
downloads/
|
||||||
eggs/
|
eggs/
|
||||||
.eggs/
|
.eggs/
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
parts/
|
||||||
sdist/
|
sdist/
|
||||||
var/
|
var/
|
||||||
@@ -607,3 +608,11 @@ fabric.properties
|
|||||||
# Caddy logs
|
# Caddy logs
|
||||||
!**/caddy/logs/.gitkeep
|
!**/caddy/logs/.gitkeep
|
||||||
**/caddy/logs/**
|
**/caddy/logs/**
|
||||||
|
|
||||||
|
# Mac directory indexes
|
||||||
|
.DS_Store
|
||||||
|
deployment/docker/stack.env
|
||||||
|
|
||||||
|
src/producer/src/python/
|
||||||
|
src/debrid-collector/python/
|
||||||
|
src/qbit-collector/python/
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ repos:
|
|||||||
rev: v4.5.0
|
rev: v4.5.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-added-large-files
|
- id: check-added-large-files
|
||||||
|
args: ['--maxkb=2500']
|
||||||
- id: check-json
|
- id: check-json
|
||||||
- id: check-toml
|
- id: check-toml
|
||||||
- id: check-xml
|
- id: check-xml
|
||||||
@@ -15,5 +16,6 @@ repos:
|
|||||||
rev: v2.2.6
|
rev: v2.2.6
|
||||||
hooks:
|
hooks:
|
||||||
- id: codespell
|
- id: codespell
|
||||||
exclude: ^src/node/consumer/test/
|
exclude: |
|
||||||
|
(?x)^(src/node/consumer/test/.*|src/producer/Data/.*|src/tissue/Data/.*)$
|
||||||
args: ["-L", "strem,chage"]
|
args: ["-L", "strem,chage"]
|
||||||
|
|||||||
22
CHANGELOG.md
Normal file
22
CHANGELOG.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [1.0.0] - 2024-03-25
|
||||||
|
### Details
|
||||||
|
#### Changed
|
||||||
|
- Change POSTGRES_USERNAME to POSTGRES_USER. Oops by @purple-emily
|
||||||
|
- Change POSTGRES_DATABASE to POSTGRES_DB by @purple-emily
|
||||||
|
- Two movie commands instead of movie and tv by @purple-emily
|
||||||
|
- Cleanup RabbitMQ env vars, and Github Pat by @iPromKnight
|
||||||
|
|
||||||
|
#### Fixed
|
||||||
|
- HRD -> HDR by @mplewis
|
||||||
|
|
||||||
|
## New Contributors
|
||||||
|
* @mplewis made their first contribution
|
||||||
|
|
||||||
|
<!-- generated by git-cliff -->
|
||||||
34
CONTRIBUTING.md
Normal file
34
CONTRIBUTING.md
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
We use [Meaningful commit messages](https://reflectoring.io/meaningful-commit-messages/)
|
||||||
|
|
||||||
|
Tl;dr:
|
||||||
|
1. It should answer the question: “What happens if the changes are applied?".
|
||||||
|
2. Use the imperative, present tense. It is easier to read and scan quickly:
|
||||||
|
```
|
||||||
|
Right: Add feature to alert admin for new user registration
|
||||||
|
Wrong: Added feature ... (past tense)
|
||||||
|
```
|
||||||
|
3. The summary should always be able to complete the following sentence:
|
||||||
|
`If applied, this commit will… `
|
||||||
|
|
||||||
|
We use [git-cliff] for our changelog.
|
||||||
|
|
||||||
|
The breaking flag is set to true when the commit has an exclamation mark after the commit type and scope, e.g.:
|
||||||
|
`feat(scope)!: this is a breaking change`
|
||||||
|
|
||||||
|
Keywords (Commit messages should start with these):
|
||||||
|
```
|
||||||
|
# Added
|
||||||
|
add
|
||||||
|
support
|
||||||
|
# Removed
|
||||||
|
remove
|
||||||
|
delete
|
||||||
|
# Fixed
|
||||||
|
test
|
||||||
|
fix
|
||||||
|
```
|
||||||
|
|
||||||
|
Any other commits will fall under the `Changed` category
|
||||||
|
|
||||||
|
|
||||||
|
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html)
|
||||||
36
README.md
36
README.md
@@ -7,9 +7,6 @@
|
|||||||
|
|
||||||
## Contents
|
## Contents
|
||||||
|
|
||||||
> [!CAUTION]
|
|
||||||
> Until we reach `v1.0.0`, please consider releases as alpha.
|
|
||||||
|
|
||||||
> [!IMPORTANT]
|
> [!IMPORTANT]
|
||||||
> The latest change renames the project and requires a [small migration](#selfhostio-to-knightcrawler-migration).
|
> The latest change renames the project and requires a [small migration](#selfhostio-to-knightcrawler-migration).
|
||||||
- [Contents](#contents)
|
- [Contents](#contents)
|
||||||
@@ -54,11 +51,11 @@ Download and install [Docker Compose](https://docs.docker.com/compose/install/),
|
|||||||
|
|
||||||
### Environment Setup
|
### Environment Setup
|
||||||
|
|
||||||
Before running the project, you need to set up the environment variables. Copy the `.env.example` file to `.env`:
|
Before running the project, you need to set up the environment variables. Edit the values in `stack.env`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
cd deployment/docker
|
cd deployment/docker
|
||||||
cp .env.example .env
|
code stack.env
|
||||||
```
|
```
|
||||||
|
|
||||||
Then set any of the values you wouldd like to customize.
|
Then set any of the values you wouldd like to customize.
|
||||||
@@ -70,33 +67,6 @@ Then set any of the values you wouldd like to customize.
|
|||||||
|
|
||||||
By default, Knight Crawler is configured to be *relatively* conservative in its resource usage. If running on a decent machine (16GB RAM, i5+ or equivalent), you can increase some settings to increase consumer throughput. This is especially helpful if you have a large backlog from [importing databases](#importing-external-dumps).
|
By default, Knight Crawler is configured to be *relatively* conservative in its resource usage. If running on a decent machine (16GB RAM, i5+ or equivalent), you can increase some settings to increase consumer throughput. This is especially helpful if you have a large backlog from [importing databases](#importing-external-dumps).
|
||||||
|
|
||||||
In your `.env` file, under the `# Consumer` section increase `CONSUMER_REPLICAS` from `3` to `15`.
|
|
||||||
You can also increase `JOB_CONCURRENCY` from `5` to `10`.
|
|
||||||
|
|
||||||
### DebridMediaManager setup (optional)
|
|
||||||
|
|
||||||
There are some optional steps you should take to maximise the number of movies/tv shows we can find.
|
|
||||||
|
|
||||||
We can search DebridMediaManager hash lists which are hosted on GitHub. This allows us to add hundreds of thousands of movies and tv shows, but it requires a Personal Access Token to be generated. The software only needs read access and only for public repositories. To generate one, please follow these steps:
|
|
||||||
|
|
||||||
1. Navigate to GitHub settings -> Developer Settings -> Personal access tokens -> Fine-grained tokens (click [here](https://github.com/settings/tokens?type=beta) for a direct link)
|
|
||||||
2. Press `Generate new token`
|
|
||||||
3. Fill out the form (example data below):
|
|
||||||
```
|
|
||||||
Token name:
|
|
||||||
KnightCrawler
|
|
||||||
Expiration:
|
|
||||||
90 days
|
|
||||||
Description:
|
|
||||||
<blank>
|
|
||||||
Repository access
|
|
||||||
(checked) Public Repositories (read-only)
|
|
||||||
```
|
|
||||||
4. Click `Generate token`
|
|
||||||
5. Take the new token and add it to the bottom of the [.env](deployment/docker/.env) file
|
|
||||||
```
|
|
||||||
GithubSettings__PAT=<YOUR TOKEN HERE>
|
|
||||||
```
|
|
||||||
### Configure external access
|
### Configure external access
|
||||||
|
|
||||||
Please choose which applies to you:
|
Please choose which applies to you:
|
||||||
@@ -146,7 +116,7 @@ Remove or comment out the port for the addon, and connect it to Caddy:
|
|||||||
addon:
|
addon:
|
||||||
<<: *knightcrawler-app
|
<<: *knightcrawler-app
|
||||||
env_file:
|
env_file:
|
||||||
- .env
|
- stack.env
|
||||||
hostname: knightcrawler-addon
|
hostname: knightcrawler-addon
|
||||||
image: gabisonfire/knightcrawler-addon:latest
|
image: gabisonfire/knightcrawler-addon:latest
|
||||||
labels:
|
labels:
|
||||||
|
|||||||
112
cliff.toml
Normal file
112
cliff.toml
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# git-cliff ~ configuration file
|
||||||
|
# https://git-cliff.org/docs/configuration
|
||||||
|
|
||||||
|
[changelog]
|
||||||
|
# changelog header
|
||||||
|
header = """
|
||||||
|
# Changelog\n
|
||||||
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n
|
||||||
|
"""
|
||||||
|
# template for the changelog body
|
||||||
|
# https://keats.github.io/tera/docs/#introduction
|
||||||
|
body = """
|
||||||
|
{%- macro remote_url() -%}
|
||||||
|
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||||
|
{%- endmacro -%}
|
||||||
|
|
||||||
|
{% if version -%}
|
||||||
|
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
|
||||||
|
{% else -%}
|
||||||
|
## [Unreleased]
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
### Details\
|
||||||
|
|
||||||
|
{% for group, commits in commits | group_by(attribute="group") %}
|
||||||
|
#### {{ group | upper_first }}
|
||||||
|
{%- for commit in commits %}
|
||||||
|
- {{ commit.message | upper_first | trim }}\
|
||||||
|
{% if commit.github.username %} by @{{ commit.github.username }}{%- endif -%}
|
||||||
|
{% if commit.github.pr_number %} in \
|
||||||
|
[#{{ commit.github.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.github.pr_number }}) \
|
||||||
|
{%- endif -%}
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{%- if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
|
||||||
|
## New Contributors
|
||||||
|
{%- endif -%}
|
||||||
|
|
||||||
|
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
|
||||||
|
* @{{ contributor.username }} made their first contribution
|
||||||
|
{%- if contributor.pr_number %} in \
|
||||||
|
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}\n
|
||||||
|
"""
|
||||||
|
# template for the changelog footer
|
||||||
|
footer = """
|
||||||
|
{%- macro remote_url() -%}
|
||||||
|
https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
|
||||||
|
{%- endmacro -%}
|
||||||
|
|
||||||
|
{% for release in releases -%}
|
||||||
|
{% if release.version -%}
|
||||||
|
{% if release.previous.version -%}
|
||||||
|
[{{ release.version | trim_start_matches(pat="v") }}]: \
|
||||||
|
{{ self::remote_url() }}/compare/{{ release.previous.version }}..{{ release.version }}
|
||||||
|
{% endif -%}
|
||||||
|
{% else -%}
|
||||||
|
[unreleased]: {{ self::remote_url() }}/compare/{{ release.previous.version }}..HEAD
|
||||||
|
{% endif -%}
|
||||||
|
{% endfor %}
|
||||||
|
<!-- generated by git-cliff -->
|
||||||
|
"""
|
||||||
|
# remove the leading and trailing whitespace from the templates
|
||||||
|
trim = true
|
||||||
|
|
||||||
|
[git]
|
||||||
|
# parse the commits based on https://www.conventionalcommits.org
|
||||||
|
conventional_commits = true
|
||||||
|
# filter out the commits that are not conventional
|
||||||
|
filter_unconventional = true
|
||||||
|
# process each line of a commit as an individual commit
|
||||||
|
split_commits = false
|
||||||
|
# regex for preprocessing the commit messages
|
||||||
|
commit_preprocessors = [
|
||||||
|
# remove issue numbers from commits
|
||||||
|
{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "" },
|
||||||
|
]
|
||||||
|
# regex for parsing and grouping commits
|
||||||
|
commit_parsers = [
|
||||||
|
{ message = "^.*: add", group = "Added" },
|
||||||
|
{ message = "^add", group = "Added" },
|
||||||
|
{ message = "^.*: support", group = "Added" },
|
||||||
|
{ message = "^support", group = "Added" },
|
||||||
|
{ message = "^.*: remove", group = "Removed" },
|
||||||
|
{ message = "^remove", group = "Removed" },
|
||||||
|
{ message = "^.*: delete", group = "Removed" },
|
||||||
|
{ message = "^delete", group = "Removed" },
|
||||||
|
{ message = "^.*: test", group = "Fixed" },
|
||||||
|
{ message = "^test", group = "Fixed" },
|
||||||
|
{ message = "^.*: fix", group = "Fixed" },
|
||||||
|
{ message = "^fix", group = "Fixed" },
|
||||||
|
{ message = "^.*", group = "Changed" },
|
||||||
|
]
|
||||||
|
# protect breaking changes from being skipped due to matching a skipping commit_parser
|
||||||
|
protect_breaking_commits = false
|
||||||
|
# filter out the commits that are not matched by commit parsers
|
||||||
|
filter_commits = true
|
||||||
|
# regex for matching git tags
|
||||||
|
tag_pattern = "v[0-9].*"
|
||||||
|
# regex for skipping tags
|
||||||
|
skip_tags = "v0.1.0-beta.1"
|
||||||
|
# regex for ignoring tags
|
||||||
|
ignore_tags = ""
|
||||||
|
# sort the tags topologically
|
||||||
|
topo_order = false
|
||||||
|
# sort the commits inside sections by oldest/newest order
|
||||||
|
sort_commits = "oldest"
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
# General environment variables
|
|
||||||
TZ=London/Europe
|
|
||||||
|
|
||||||
# PostgreSQL
|
|
||||||
POSTGRES_HOST=postgres
|
|
||||||
POSTGRES_PORT=5432
|
|
||||||
POSTGRES_USER=postgres
|
|
||||||
POSTGRES_PASSWORD=postgres
|
|
||||||
POSTGRES_DB=knightcrawler
|
|
||||||
|
|
||||||
# MongoDB
|
|
||||||
MONGODB_HOST=mongodb
|
|
||||||
MONGODB_PORT=27017
|
|
||||||
MONGODB_DB=knightcrawler
|
|
||||||
MONGODB_USER=mongo
|
|
||||||
MONGODB_PASSWORD=mongo
|
|
||||||
|
|
||||||
# RabbitMQ
|
|
||||||
RABBITMQ_HOST=rabbitmq
|
|
||||||
RABBITMQ_USER=guest
|
|
||||||
RABBITMQ_PASSWORD=guest
|
|
||||||
RABBITMQ_QUEUE_NAME=ingested
|
|
||||||
RABBITMQ_DURABLE=true
|
|
||||||
RABBITMQ_MAX_QUEUE_SIZE=0
|
|
||||||
RABBITMQ_MAX_PUBLISH_BATCH_SIZE=500
|
|
||||||
RABBITMQ_PUBLISH_INTERVAL_IN_SECONDS=10
|
|
||||||
|
|
||||||
# Metadata
|
|
||||||
## Only used if DATA_ONCE is set to false. If true, the schedule is ignored
|
|
||||||
METADATA_DOWNLOAD_IMDB_DATA_SCHEDULE="0 0 1 * *"
|
|
||||||
## If true, the metadata will be downloaded once and then the schedule will be ignored
|
|
||||||
METADATA_DOWNLOAD_IMDB_DATA_ONCE=true
|
|
||||||
## Controls the amount of records processed in memory at any given time during import, higher values will consume more memory
|
|
||||||
METADATA_INSERT_BATCH_SIZE=25000
|
|
||||||
|
|
||||||
# Addon
|
|
||||||
DEBUG_MODE=false
|
|
||||||
|
|
||||||
# Consumer
|
|
||||||
JOB_CONCURRENCY=5
|
|
||||||
JOBS_ENABLED=true
|
|
||||||
## can be debug for extra verbosity (a lot more verbosity - useful for development)
|
|
||||||
LOG_LEVEL=info
|
|
||||||
MAX_CONNECTIONS_PER_TORRENT=10
|
|
||||||
MAX_CONNECTIONS_OVERALL=100
|
|
||||||
TORRENT_TIMEOUT=30000
|
|
||||||
UDP_TRACKERS_ENABLED=true
|
|
||||||
CONSUMER_REPLICAS=3
|
|
||||||
## Fix for #66 - toggle on for development
|
|
||||||
AUTO_CREATE_AND_APPLY_MIGRATIONS=false
|
|
||||||
## Allows control of the threshold for matching titles to the IMDB dataset. The closer to 0, the more strict the matching.
|
|
||||||
TITLE_MATCH_THRESHOLD=0.25
|
|
||||||
|
|
||||||
# Producer
|
|
||||||
GITHUB_PAT=
|
|
||||||
64
deployment/docker/config/qbit/qbittorrent.conf
Executable file
64
deployment/docker/config/qbit/qbittorrent.conf
Executable file
@@ -0,0 +1,64 @@
|
|||||||
|
[Application]
|
||||||
|
FileLogger\Age=1
|
||||||
|
FileLogger\AgeType=1
|
||||||
|
FileLogger\Backup=true
|
||||||
|
FileLogger\DeleteOld=true
|
||||||
|
FileLogger\Enabled=true
|
||||||
|
FileLogger\MaxSizeBytes=66560
|
||||||
|
FileLogger\Path=/config/qBittorrent/logs
|
||||||
|
|
||||||
|
[AutoRun]
|
||||||
|
enabled=false
|
||||||
|
program=
|
||||||
|
|
||||||
|
[BitTorrent]
|
||||||
|
Session\AnonymousModeEnabled=true
|
||||||
|
Session\BTProtocol=TCP
|
||||||
|
Session\ConnectionSpeed=150
|
||||||
|
Session\DefaultSavePath=/downloads/
|
||||||
|
Session\ExcludedFileNames=
|
||||||
|
Session\MaxActiveCheckingTorrents=20
|
||||||
|
Session\MaxActiveDownloads=20
|
||||||
|
Session\MaxActiveTorrents=50
|
||||||
|
Session\MaxActiveUploads=50
|
||||||
|
Session\MaxConcurrentHTTPAnnounces=1000
|
||||||
|
Session\MaxConnections=2000
|
||||||
|
Session\Port=6881
|
||||||
|
Session\QueueingSystemEnabled=true
|
||||||
|
Session\TempPath=/downloads/incomplete/
|
||||||
|
Session\TorrentStopCondition=MetadataReceived
|
||||||
|
|
||||||
|
[Core]
|
||||||
|
AutoDeleteAddedTorrentFile=Never
|
||||||
|
|
||||||
|
[LegalNotice]
|
||||||
|
Accepted=true
|
||||||
|
|
||||||
|
[Meta]
|
||||||
|
MigrationVersion=6
|
||||||
|
|
||||||
|
[Network]
|
||||||
|
PortForwardingEnabled=true
|
||||||
|
Proxy\HostnameLookupEnabled=false
|
||||||
|
Proxy\Profiles\BitTorrent=true
|
||||||
|
Proxy\Profiles\Misc=true
|
||||||
|
Proxy\Profiles\RSS=true
|
||||||
|
|
||||||
|
[Preferences]
|
||||||
|
Connection\PortRangeMin=6881
|
||||||
|
Connection\ResolvePeerCountries=false
|
||||||
|
Connection\UPnP=false
|
||||||
|
Downloads\SavePath=/downloads/
|
||||||
|
Downloads\TempPath=/downloads/incomplete/
|
||||||
|
General\Locale=en
|
||||||
|
MailNotification\req_auth=true
|
||||||
|
WebUI\Address=*
|
||||||
|
WebUI\AuthSubnetWhitelist=0.0.0.0/0
|
||||||
|
WebUI\AuthSubnetWhitelistEnabled=true
|
||||||
|
WebUI\HostHeaderValidation=false
|
||||||
|
WebUI\LocalHostAuth=false
|
||||||
|
WebUI\ServerDomains=*
|
||||||
|
|
||||||
|
[RSS]
|
||||||
|
AutoDownloader\DownloadRepacks=true
|
||||||
|
AutoDownloader\SmartEpisodeFilter=s(\\d+)e(\\d+), (\\d+)x(\\d+), "(\\d{4}[.\\-]\\d{1,2}[.\\-]\\d{1,2})", "(\\d{1,2}[.\\-]\\d{1,2}[.\\-]\\d{4})"
|
||||||
@@ -1,139 +1,244 @@
|
|||||||
|
version: "3.9"
|
||||||
|
|
||||||
name: knightcrawler
|
name: knightcrawler
|
||||||
|
|
||||||
x-restart: &restart-policy "unless-stopped"
|
networks:
|
||||||
|
knightcrawler-network:
|
||||||
|
name: knightcrawler-network
|
||||||
|
driver: bridge
|
||||||
|
|
||||||
x-basehealth: &base-health
|
volumes:
|
||||||
interval: 10s
|
postgres:
|
||||||
timeout: 10s
|
lavinmq:
|
||||||
retries: 3
|
redis:
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
x-rabbithealth: &rabbitmq-health
|
|
||||||
test: rabbitmq-diagnostics -q ping
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-mongohealth: &mongodb-health
|
|
||||||
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-postgreshealth: &postgresdb-health
|
|
||||||
test: pg_isready
|
|
||||||
<<: *base-health
|
|
||||||
|
|
||||||
x-apps: &knightcrawler-app
|
|
||||||
depends_on:
|
|
||||||
mongodb:
|
|
||||||
condition: service_healthy
|
|
||||||
postgres:
|
|
||||||
condition: service_healthy
|
|
||||||
rabbitmq:
|
|
||||||
condition: service_healthy
|
|
||||||
restart: *restart-policy
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
## Postgres is the database that is used by the services.
|
||||||
|
## All downloaded metadata is stored in this database.
|
||||||
postgres:
|
postgres:
|
||||||
|
env_file: stack.env
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD", "sh", "-c", "pg_isready -h localhost -U $$POSTGRES_USER" ]
|
||||||
|
timeout: 10s
|
||||||
|
interval: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
image: postgres:latest
|
image: postgres:latest
|
||||||
env_file: .env
|
|
||||||
environment:
|
|
||||||
PGUSER: postgres # needed for healthcheck.
|
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
# # Furthermore, please, please, please, change the username and password in the .env file.
|
# # Furthermore, please, please, please, change the username and password in the stack.env file.
|
||||||
# # If you want to enhance your security even more, create a new user for the database with a strong password.
|
# # If you want to enhance your security even more, create a new user for the database with a strong password.
|
||||||
# ports:
|
# ports:
|
||||||
# - "5432:5432"
|
# - "5432:5432"
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- postgres:/var/lib/postgresql/data
|
- postgres:/var/lib/postgresql/data
|
||||||
healthcheck: *postgresdb-health
|
|
||||||
restart: *restart-policy
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
mongodb:
|
## Redis is used as a cache for the services.
|
||||||
image: mongo:latest
|
## It is used to store the infohashes that are currently being processed in sagas, as well as intrim data.
|
||||||
env_file: .env
|
redis:
|
||||||
environment:
|
env_file: stack.env
|
||||||
MONGO_INITDB_ROOT_USERNAME: ${MONGODB_USER:?Variable MONGODB_USER not set}
|
healthcheck:
|
||||||
MONGO_INITDB_ROOT_PASSWORD: ${MONGODB_PASSWORD:?Variable MONGODB_PASSWORD not set}
|
test: ["CMD-SHELL", "redis-cli ping"]
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
timeout: 10s
|
||||||
# # Furthermore, please, please, please, change the username and password in the .env file.
|
interval: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
image: redis/redis-stack:latest
|
||||||
|
# # If you need redis to be accessible from outside, please open the below port.
|
||||||
# ports:
|
# ports:
|
||||||
# - "27017:27017"
|
# - "6379:6379"
|
||||||
volumes:
|
|
||||||
- mongo:/data/db
|
|
||||||
restart: *restart-policy
|
|
||||||
healthcheck: *mongodb-health
|
|
||||||
networks:
|
networks:
|
||||||
- knightcrawler-network
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- redis:/data
|
||||||
|
|
||||||
rabbitmq:
|
## LavinMQ is used as a message broker for the services.
|
||||||
image: rabbitmq:3-management
|
## It is a high performance drop in replacement for RabbitMQ.
|
||||||
|
## It is used to communicate between the services.
|
||||||
|
lavinmq:
|
||||||
|
env_file: stack.env
|
||||||
# # If you need the database to be accessible from outside, please open the below port.
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
# # Furthermore, please, please, please, look at the documentation for rabbit on how to secure the service.
|
# # Furthermore, please, please, please, look at the documentation for lavinmq / rabbitmq on how to secure the service.
|
||||||
# ports:
|
# ports:
|
||||||
# - "5672:5672"
|
# - "5672:5672"
|
||||||
# - "15672:15672"
|
# - "15672:15672"
|
||||||
# - "15692:15692"
|
# - "15692:15692"
|
||||||
|
image: cloudamqp/lavinmq:latest
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "lavinmqctl status"]
|
||||||
|
timeout: 10s
|
||||||
|
interval: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
volumes:
|
volumes:
|
||||||
- rabbitmq:/var/lib/rabbitmq
|
- lavinmq:/var/lib/lavinmq/
|
||||||
hostname: ${RABBITMQ_HOST}
|
|
||||||
restart: *restart-policy
|
|
||||||
healthcheck: *rabbitmq-health
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
producer:
|
|
||||||
image: gabisonfire/knightcrawler-producer:latest
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
env_file: .env
|
|
||||||
<<: *knightcrawler-app
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
consumer:
|
|
||||||
image: gabisonfire/knightcrawler-consumer:latest
|
|
||||||
env_file: .env
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
deploy:
|
|
||||||
replicas: ${CONSUMER_REPLICAS}
|
|
||||||
<<: *knightcrawler-app
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
metadata:
|
|
||||||
image: gabisonfire/knightcrawler-metadata:latest
|
|
||||||
env_file: .env
|
|
||||||
labels:
|
|
||||||
logging: "promtail"
|
|
||||||
restart: no
|
|
||||||
networks:
|
|
||||||
- knightcrawler-network
|
|
||||||
|
|
||||||
|
## The addon. This is what is used in stremio
|
||||||
addon:
|
addon:
|
||||||
<<: *knightcrawler-app
|
depends_on:
|
||||||
env_file: .env
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
hostname: knightcrawler-addon
|
hostname: knightcrawler-addon
|
||||||
image: gabisonfire/knightcrawler-addon:latest
|
image: gabisonfire/knightcrawler-addon:2.0.26
|
||||||
labels:
|
labels:
|
||||||
logging: "promtail"
|
logging: promtail
|
||||||
networks:
|
networks:
|
||||||
- knightcrawler-network
|
- knightcrawler-network
|
||||||
# - caddy
|
|
||||||
ports:
|
ports:
|
||||||
- "7000:7000"
|
- "7000:7000"
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
## The consumer is responsible for consuming infohashes and orchestrating download of metadata.
|
||||||
|
consumer:
|
||||||
|
depends_on:
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-consumer:2.0.26
|
||||||
|
labels:
|
||||||
|
logging: promtail
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
networks:
|
## The debrid collector is responsible for downloading metadata from debrid services. (Currently only RealDebrid is supported)
|
||||||
knightcrawler-network:
|
debridcollector:
|
||||||
driver: bridge
|
depends_on:
|
||||||
name: knightcrawler-network
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-debrid-collector:2.0.26
|
||||||
|
labels:
|
||||||
|
logging: promtail
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
# caddy:
|
## The metadata service is responsible for downloading imdb publically available datasets.
|
||||||
# name: caddy
|
## This is used to enrich the metadata during production of ingested infohashes.
|
||||||
# external: true
|
metadata:
|
||||||
|
depends_on:
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-metadata:2.0.26
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: "no"
|
||||||
|
|
||||||
volumes:
|
## The migrator is responsible for migrating the database schema.
|
||||||
postgres:
|
migrator:
|
||||||
mongo:
|
depends_on:
|
||||||
rabbitmq:
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-migrator:2.0.26
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: "no"
|
||||||
|
|
||||||
|
## The producer is responsible for producing infohashes by acquiring for various sites, including DMM.
|
||||||
|
producer:
|
||||||
|
depends_on:
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-producer:2.0.26
|
||||||
|
labels:
|
||||||
|
logging: promtail
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
## QBit collector utilizes QBitTorrent to download metadata.
|
||||||
|
qbitcollector:
|
||||||
|
depends_on:
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
qbittorrent:
|
||||||
|
condition: service_healthy
|
||||||
|
deploy:
|
||||||
|
replicas: ${QBIT_REPLICAS:-0}
|
||||||
|
env_file: stack.env
|
||||||
|
image: gabisonfire/knightcrawler-qbit-collector:2.0.26
|
||||||
|
labels:
|
||||||
|
logging: promtail
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
## QBitTorrent is a torrent client that can be used to download torrents. In this case its used to download metadata.
|
||||||
|
## The QBit collector requires this.
|
||||||
|
qbittorrent:
|
||||||
|
deploy:
|
||||||
|
replicas: ${QBIT_REPLICAS:-0}
|
||||||
|
env_file: stack.env
|
||||||
|
environment:
|
||||||
|
PGID: "1000"
|
||||||
|
PUID: "1000"
|
||||||
|
TORRENTING_PORT: "6881"
|
||||||
|
WEBUI_PORT: "8080"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "curl --fail http://localhost:8080"]
|
||||||
|
timeout: 10s
|
||||||
|
interval: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
image: lscr.io/linuxserver/qbittorrent:latest
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
ports:
|
||||||
|
- "6881:6881/tcp"
|
||||||
|
- "6881:6881/udp"
|
||||||
|
# if you want to expose the webui, uncomment the following line
|
||||||
|
# - "8001:8080"
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- ./config/qbit/qbittorrent.conf:/config/qBittorrent/qBittorrent.conf
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
## Once you have confirmed Caddy works you should comment out
|
## Once you have confirmed Caddy works you should comment out
|
||||||
## the below line:
|
## the below line:
|
||||||
acme_ca https://acme-staging-v02.api.letsencrypt.org/director
|
acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
||||||
}
|
}
|
||||||
|
|
||||||
(security-headers) {
|
(security-headers) {
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ rule_files:
|
|||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: "rabbitmq"
|
- job_name: "rabbitmq"
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["rabbitmq:15692"]
|
- targets: ["lavinmq:15692"]
|
||||||
- job_name: "postgres-exporter"
|
- job_name: "postgres-exporter"
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["postgres-exporter:9187"]
|
- targets: ["postgres-exporter:9187"]
|
||||||
|
|||||||
87
deployment/docker/src/components/infrastructure.yaml
Normal file
87
deployment/docker/src/components/infrastructure.yaml
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
x-basehealth: &base-health
|
||||||
|
interval: 10s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
x-lavinhealth: &lavinmq-health
|
||||||
|
test: [ "CMD-SHELL", "lavinmqctl status" ]
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-redishealth: &redis-health
|
||||||
|
test: redis-cli ping
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-postgreshealth: &postgresdb-health
|
||||||
|
test: [ "CMD", "sh", "-c", "pg_isready -h localhost -U $$POSTGRES_USER" ]
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
x-qbit: &qbit-health
|
||||||
|
test: "curl --fail http://localhost:8080"
|
||||||
|
<<: *base-health
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:latest
|
||||||
|
environment:
|
||||||
|
PGUSER: postgres # needed for healthcheck.
|
||||||
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
|
# # Furthermore, please, please, please, change the username and password in the .env file.
|
||||||
|
# # If you want to enhance your security even more, create a new user for the database with a strong password.
|
||||||
|
# ports:
|
||||||
|
# - "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- postgres:/var/lib/postgresql/data
|
||||||
|
healthcheck: *postgresdb-health
|
||||||
|
restart: unless-stopped
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis/redis-stack:latest
|
||||||
|
# # If you need redis to be accessible from outside, please open the below port.
|
||||||
|
# ports:
|
||||||
|
# - "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- redis:/data
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck: *redis-health
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
lavinmq:
|
||||||
|
env_file: stack.env
|
||||||
|
# # If you need the database to be accessible from outside, please open the below port.
|
||||||
|
# # Furthermore, please, please, please, look at the documentation for lavinmq / rabbitmq on how to secure the service.
|
||||||
|
# ports:
|
||||||
|
# - "5672:5672"
|
||||||
|
# - "15672:15672"
|
||||||
|
# - "15692:15692"
|
||||||
|
image: cloudamqp/lavinmq:latest
|
||||||
|
healthcheck: *lavinmq-health
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- lavinmq:/var/lib/lavinmq/
|
||||||
|
|
||||||
|
## QBitTorrent is a torrent client that can be used to download torrents. In this case its used to download metadata.
|
||||||
|
## The QBit collector requires this.
|
||||||
|
qbittorrent:
|
||||||
|
image: lscr.io/linuxserver/qbittorrent:latest
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- WEBUI_PORT=8080
|
||||||
|
- TORRENTING_PORT=6881
|
||||||
|
ports:
|
||||||
|
- 6881:6881
|
||||||
|
- 6881:6881/udp
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck: *qbit-health
|
||||||
|
volumes:
|
||||||
|
- ../../config/qbit/qbittorrent.conf:/config/qBittorrent/qBittorrent.conf
|
||||||
71
deployment/docker/src/components/knightcrawler.yaml
Normal file
71
deployment/docker/src/components/knightcrawler.yaml
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
x-apps: &knightcrawler-app
|
||||||
|
labels:
|
||||||
|
logging: "promtail"
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
|
||||||
|
x-depends: &knightcrawler-app-depends
|
||||||
|
depends_on:
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
lavinmq:
|
||||||
|
condition: service_healthy
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
metadata:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
|
||||||
|
services:
|
||||||
|
metadata:
|
||||||
|
image: gabisonfire/knightcrawler-metadata:2.0.26
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: no
|
||||||
|
depends_on:
|
||||||
|
migrator:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
|
||||||
|
migrator:
|
||||||
|
image: gabisonfire/knightcrawler-migrator:2.0.26
|
||||||
|
env_file: ../../.env
|
||||||
|
networks:
|
||||||
|
- knightcrawler-network
|
||||||
|
restart: no
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
addon:
|
||||||
|
image: gabisonfire/knightcrawler-addon:2.0.26
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
hostname: knightcrawler-addon
|
||||||
|
ports:
|
||||||
|
- "7000:7000"
|
||||||
|
|
||||||
|
consumer:
|
||||||
|
image: gabisonfire/knightcrawler-consumer:2.0.26
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
debridcollector:
|
||||||
|
image: gabisonfire/knightcrawler-debrid-collector:2.0.26
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
producer:
|
||||||
|
image: gabisonfire/knightcrawler-producer:2.0.26
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
qbitcollector:
|
||||||
|
image: gabisonfire/knightcrawler-qbit-collector:2.0.26
|
||||||
|
<<: [*knightcrawler-app, *knightcrawler-app-depends]
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
qbittorrent:
|
||||||
|
condition: service_healthy
|
||||||
4
deployment/docker/src/components/network.yaml
Normal file
4
deployment/docker/src/components/network.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
networks:
|
||||||
|
knightcrawler-network:
|
||||||
|
driver: bridge
|
||||||
|
name: knightcrawler-network
|
||||||
4
deployment/docker/src/components/volumes.yaml
Normal file
4
deployment/docker/src/components/volumes.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
volumes:
|
||||||
|
postgres:
|
||||||
|
redis:
|
||||||
|
lavinmq:
|
||||||
7
deployment/docker/src/compose.override.yaml
Normal file
7
deployment/docker/src/compose.override.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
services:
|
||||||
|
qbittorrent:
|
||||||
|
deploy:
|
||||||
|
replicas: 0
|
||||||
|
qbitcollector:
|
||||||
|
deploy:
|
||||||
|
replicas: 0
|
||||||
7
deployment/docker/src/compose.yaml
Normal file
7
deployment/docker/src/compose.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
version: "3.9"
|
||||||
|
name: "knightcrawler"
|
||||||
|
include:
|
||||||
|
- ./components/network.yaml
|
||||||
|
- ./components/volumes.yaml
|
||||||
|
- ./components/infrastructure.yaml
|
||||||
|
- ./components/knightcrawler.yaml
|
||||||
41
deployment/docker/stack.env
Normal file
41
deployment/docker/stack.env
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# General environment variables
|
||||||
|
TZ=London/Europe
|
||||||
|
|
||||||
|
# PostgreSQL
|
||||||
|
POSTGRES_HOST=postgres
|
||||||
|
POSTGRES_PORT=5432
|
||||||
|
POSTGRES_USER=postgres
|
||||||
|
POSTGRES_PASSWORD=postgres
|
||||||
|
POSTGRES_DB=knightcrawler
|
||||||
|
|
||||||
|
# Redis
|
||||||
|
REDIS_HOST=redis
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_EXTRA=abortConnect=false,allowAdmin=true
|
||||||
|
|
||||||
|
# AMQP
|
||||||
|
RABBITMQ_HOST=lavinmq
|
||||||
|
RABBITMQ_USER=guest
|
||||||
|
RABBITMQ_PASSWORD=guest
|
||||||
|
RABBITMQ_CONSUMER_QUEUE_NAME=ingested
|
||||||
|
RABBITMQ_DURABLE=true
|
||||||
|
RABBITMQ_MAX_QUEUE_SIZE=0
|
||||||
|
RABBITMQ_MAX_PUBLISH_BATCH_SIZE=500
|
||||||
|
RABBITMQ_PUBLISH_INTERVAL_IN_SECONDS=10
|
||||||
|
|
||||||
|
# Metadata
|
||||||
|
METADATA_INSERT_BATCH_SIZE=50000
|
||||||
|
|
||||||
|
# Collectors
|
||||||
|
COLLECTOR_QBIT_ENABLED=false
|
||||||
|
COLLECTOR_DEBRID_ENABLED=true
|
||||||
|
COLLECTOR_REAL_DEBRID_API_KEY=
|
||||||
|
QBIT_HOST=http://qbittorrent:8080
|
||||||
|
QBIT_TRACKERS_URL=https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all_http.txt
|
||||||
|
QBIT_CONCURRENCY=8
|
||||||
|
|
||||||
|
# Number of replicas for the qBittorrent collector and qBitTorrent client. Should be 0 or 1.
|
||||||
|
QBIT_REPLICAS=0
|
||||||
|
|
||||||
|
# Addon
|
||||||
|
DEBUG_MODE=false
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<buildprofiles xsi:noNamespaceSchemaLocation="https://resources.jetbrains.com/writerside/1.0/build-profiles.xsd"
|
|
||||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
|
||||||
|
|
||||||
<variables>
|
|
||||||
<header-logo>knight-crawler-logo.png</header-logo>
|
|
||||||
</variables>
|
|
||||||
<build-profile instance="kc">
|
|
||||||
<variables>
|
|
||||||
<noindex-content>true</noindex-content>
|
|
||||||
</variables>
|
|
||||||
</build-profile>
|
|
||||||
|
|
||||||
</buildprofiles>
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 568 KiB |
@@ -1,13 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!DOCTYPE instance-profile
|
|
||||||
SYSTEM "https://resources.jetbrains.com/writerside/1.0/product-profile.dtd">
|
|
||||||
|
|
||||||
<instance-profile id="kc" name="Knight Crawler"
|
|
||||||
start-page="Overview.md">
|
|
||||||
|
|
||||||
<toc-element topic="Overview.md"/>
|
|
||||||
<toc-element topic="Getting-started.md">
|
|
||||||
</toc-element>
|
|
||||||
<toc-element topic="External-access.md"/>
|
|
||||||
<toc-element topic="Supported-Debrid-services.md"/>
|
|
||||||
</instance-profile>
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
# External access
|
|
||||||
|
|
||||||
This guide outlines how to use Knight Crawler on devices like your TV. While it's currently limited to the device of
|
|
||||||
installation, we can change that. With some extra effort, we'll show you how to make it accessible on other devices.
|
|
||||||
This limitation is set by Stremio, as [explained here](https://github.com/Stremio/stremio-features/issues/687#issuecomment-1890546094).
|
|
||||||
|
|
||||||
## What to keep in mind
|
|
||||||
|
|
||||||
Before we make Knight Crawler available outside your home network, we've got to talk about safety. No software is
|
|
||||||
perfect, including ours. Knight Crawler is built on lots of different parts, some made by other people. So, if we keep
|
|
||||||
it just for your home network, it's a bit safer. But if you want to use it over the internet, just know that keeping
|
|
||||||
your devices secure is up to you. We won't be responsible for any problems or lost data if you use Knight Crawler that way.
|
|
||||||
|
|
||||||
## Initial setup
|
|
||||||
|
|
||||||
To enable external access for Knight Crawler, whether it's within your home network or over the internet, you'll
|
|
||||||
need to follow these initial setup steps:
|
|
||||||
|
|
||||||
- Set up Caddy, a powerful and easy-to-use web server.
|
|
||||||
- Disable the open port in the Knight Crawler <path>docker-compose.yaml</path> file.
|
|
||||||
|
|
||||||
|
|
||||||
### Caddy
|
|
||||||
|
|
||||||
A basic Caddy configuration is included with Knight Crawler in the deployment directory.
|
|
||||||
|
|
||||||
<path>deployment/docker/optional-services/caddy</path>
|
|
||||||
|
|
||||||
```Generic
|
|
||||||
deployment/
|
|
||||||
└── docker/
|
|
||||||
└── optional-services/
|
|
||||||
└── caddy/
|
|
||||||
├── config/
|
|
||||||
│ ├── snippets/
|
|
||||||
│ │ └── cloudflare-replace-X-Forwarded-For
|
|
||||||
│ └── Caddyfile
|
|
||||||
├── logs/
|
|
||||||
└── docker-compose.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
ports:
|
|
||||||
- "8080:8080"
|
|
||||||
|
|
||||||
By disabling the default port, Knight Crawler will only be accessible internally within your network, ensuring added security.
|
|
||||||
|
|
||||||
## Home network access
|
|
||||||
|
|
||||||
## Internet access
|
|
||||||
|
|
||||||
### Through a VPN
|
|
||||||
|
|
||||||
### On the public web
|
|
||||||
|
|
||||||
## Troubleshooting?
|
|
||||||
|
|
||||||
## Additional Resources?
|
|
||||||
@@ -1,192 +0,0 @@
|
|||||||
# Getting started
|
|
||||||
|
|
||||||
Knight Crawler is provided as an all-in-one solution. This means we include all the necessary software you need to get started
|
|
||||||
out of the box.
|
|
||||||
|
|
||||||
## Before you start
|
|
||||||
|
|
||||||
Make sure that you have:
|
|
||||||
|
|
||||||
- A place to host Knight Crawler
|
|
||||||
- [Docker](https://docs.docker.com/get-docker/) and [Compose](https://docs.docker.com/compose/install/) installed
|
|
||||||
- A [GitHub](https://github.com/) account _(optional)_
|
|
||||||
|
|
||||||
|
|
||||||
## Download the files
|
|
||||||
|
|
||||||
Installing Knight Crawler is as simple as downloading a copy of the [deployment directory](https://github.com/Gabisonfire/knightcrawler/tree/master/deployment/docker).
|
|
||||||
|
|
||||||
A basic installation requires only two files:
|
|
||||||
- <path>deployment/docker/.env.example</path>
|
|
||||||
- <path>deployment/docker/docker-compose.yaml</path>.
|
|
||||||
|
|
||||||
For this guide I will be placing them in a directory on my home drive <path>~/knightcrawler</path>.
|
|
||||||
|
|
||||||
Rename the <path>.env.example</path> file to be <path>.env</path>
|
|
||||||
|
|
||||||
```
|
|
||||||
~/
|
|
||||||
└── knightcrawler/
|
|
||||||
├── .env
|
|
||||||
└── docker-compose.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Initial configuration
|
|
||||||
|
|
||||||
Below are a few recommended configuration changes.
|
|
||||||
|
|
||||||
Open the <path>.env</path> file in your favourite editor.
|
|
||||||
|
|
||||||
> If you are using an external database, configure it in the <path>.env</path> file. Don't forget to disable the ones
|
|
||||||
> included in the <path>docker-compose.yaml</path>.
|
|
||||||
|
|
||||||
### Database credentials
|
|
||||||
|
|
||||||
It is strongly recommended that you change the credentials for the databases included with Knight Crawler. This is best done
|
|
||||||
before running Knight Crawler for the first time. It is much harder to change the passwords once the services have been started
|
|
||||||
for the first time.
|
|
||||||
|
|
||||||
```Bash
|
|
||||||
POSTGRES_PASSWORD=postgres
|
|
||||||
...
|
|
||||||
MONGODB_PASSWORD=mongo
|
|
||||||
...
|
|
||||||
RABBITMQ_PASSWORD=guest
|
|
||||||
```
|
|
||||||
|
|
||||||
Here's a few options on generating a secure password:
|
|
||||||
|
|
||||||
```Bash
|
|
||||||
# Linux
|
|
||||||
tr -cd '[:alnum:]' < /dev/urandom | fold -w 64 | head -n 1
|
|
||||||
# Or you could use openssl
|
|
||||||
openssl rand -hex 32
|
|
||||||
```
|
|
||||||
```Python
|
|
||||||
# Python
|
|
||||||
import secrets
|
|
||||||
|
|
||||||
print(secrets.token_hex(32))
|
|
||||||
```
|
|
||||||
|
|
||||||
### Your time zone
|
|
||||||
|
|
||||||
```Bash
|
|
||||||
TZ=London/Europe
|
|
||||||
```
|
|
||||||
|
|
||||||
A list of time zones can be found on [Wikipedia](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)
|
|
||||||
|
|
||||||
### Consumers
|
|
||||||
|
|
||||||
```Bash
|
|
||||||
JOB_CONCURRENCY=5
|
|
||||||
...
|
|
||||||
MAX_CONNECTIONS_PER_TORRENT=10
|
|
||||||
...
|
|
||||||
CONSUMER_REPLICAS=3
|
|
||||||
```
|
|
||||||
|
|
||||||
These are totally subjective to your machine and network capacity. The above default is pretty minimal and will work on
|
|
||||||
most machines.
|
|
||||||
|
|
||||||
`JOB_CONCURRENCY` is how many films and tv shows the consumers should process at once. As this affects every consumer
|
|
||||||
this will likely cause exponential
|
|
||||||
strain on your system. It's probably best to leave this at 5, but you can try experimenting with it if you wish.
|
|
||||||
|
|
||||||
`MAX_CONNECTIONS_PER_TORRENT` is how many peers the consumer will attempt to connect to when it is trying to collect
|
|
||||||
metadata.
|
|
||||||
Increasing this value can speed up processing, but you will eventually reach a point where more connections are being
|
|
||||||
made than
|
|
||||||
your router can handle. This will then cause a cascading fail where your internet stops working. If you are going to
|
|
||||||
increase this value
|
|
||||||
then try increasing it by 10 at a time.
|
|
||||||
|
|
||||||
> Increasing this value increases the max connections for every parallel job, for every consumer. For example
|
|
||||||
> with the default values above this means that Knight Crawler will be on average making `(5 x 3) x 10 = 150`
|
|
||||||
> connections at any one time.
|
|
||||||
>
|
|
||||||
{style="warning"}
|
|
||||||
|
|
||||||
`CONSUMER_REPLICAS` is how many consumers should be initially started. You can increase or decrease the number of consumers whilst the
|
|
||||||
service is running by running the command `docker compose up -d --scale consumer=<number>`.
|
|
||||||
|
|
||||||
### GitHub personal access token
|
|
||||||
|
|
||||||
This step is optional but strongly recommended. [Debrid Media Manager](https://debridmediamanager.com/start) is a media library manager
|
|
||||||
for Debrid services. When a user of this service chooses to export/share their library publicly it is saved to a public GitHub repository.
|
|
||||||
This is, essentially, a repository containing a vast amount of ready to go films and tv shows. Knight Crawler comes with the ability to
|
|
||||||
read these exported lists, but it requires a GitHub account to make it work.
|
|
||||||
|
|
||||||
Knight Crawler needs a personal access token with read-only access to public repositories. This means we can not access any private
|
|
||||||
repositories you have.
|
|
||||||
|
|
||||||
1. Navigate to GitHub settings ([GitHub token settings](https://github.com/settings/tokens?type=beta)):
|
|
||||||
- Navigate to `GitHub settings`.
|
|
||||||
- Click on `Developer Settings`.
|
|
||||||
- Select `Personal access tokens`.
|
|
||||||
- Choose `Fine-grained tokens`.
|
|
||||||
|
|
||||||
2. Press `Generate new token`.
|
|
||||||
|
|
||||||
3. Fill out the form with the following information:
|
|
||||||
```Generic
|
|
||||||
Token name:
|
|
||||||
KnightCrawler
|
|
||||||
Expiration:
|
|
||||||
90 days
|
|
||||||
Description:
|
|
||||||
<blank>
|
|
||||||
Repository access:
|
|
||||||
(checked) Public Repositories (read-only)
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Click `Generate token`.
|
|
||||||
|
|
||||||
5. Take the new token and add it to the bottom of the <path>.env</path> file:
|
|
||||||
```Bash
|
|
||||||
# Producer
|
|
||||||
GITHUB_PAT=<YOUR TOKEN HERE>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Start Knight Crawler
|
|
||||||
|
|
||||||
To start Knight Crawler use the following command:
|
|
||||||
|
|
||||||
```Bash
|
|
||||||
docker compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
Then we can follow the logs to watch it start:
|
|
||||||
|
|
||||||
```Bash
|
|
||||||
docker compose logs -f --since 1m
|
|
||||||
```
|
|
||||||
|
|
||||||
> Knight Crawler will only be accessible on the machine you run it on, to make it accessible from other machines navigate to [External access](External-access.md).
|
|
||||||
>
|
|
||||||
{style="note"}
|
|
||||||
|
|
||||||
To stop following the logs press <shortcut>Ctrl+C</shortcut> at any time.
|
|
||||||
|
|
||||||
The Knight Crawler configuration page should now be accessible in your web browser at [http://localhost:7000](http://localhost:7000)
|
|
||||||
|
|
||||||
## Start more consumers
|
|
||||||
|
|
||||||
If you wish to speed up the processing of the films and tv shows that Knight Crawler finds, then you'll likely want to
|
|
||||||
increase the number of consumers.
|
|
||||||
|
|
||||||
The below command can be used to both increase or decrease the number of running consumers. Gradually increase the number
|
|
||||||
until you encounter any issues and then decrease until stable.
|
|
||||||
|
|
||||||
```Bash
|
|
||||||
docker compose up -d --scale consumer=<number>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Stop Knight Crawler
|
|
||||||
|
|
||||||
Knight Crawler can be stopped with the following command:
|
|
||||||
|
|
||||||
```Bash
|
|
||||||
docker compose down
|
|
||||||
```
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
# Overview
|
|
||||||
|
|
||||||
<img alt="The image shows a Knight in silvery armour looking forwards." src="knight-crawler-logo.png" title="Knight Crawler logo" width="100"/>
|
|
||||||
|
|
||||||
Knight Crawler is a self-hosted [Stremio](https://www.stremio.com/) addon for streaming torrents via
|
|
||||||
a [Debrid](Supported-Debrid-services.md "Click for a list of Debrid services we support") service.
|
|
||||||
|
|
||||||
We are active on [Discord](https://discord.gg/8fQdxay9z2) for both support and casual conversation.
|
|
||||||
|
|
||||||
> Knight Crawler is currently alpha software.
|
|
||||||
>
|
|
||||||
> Users are responsible for ensuring their data is backed up regularly.
|
|
||||||
>
|
|
||||||
> Please read the changelogs before updating to the latest version.
|
|
||||||
>
|
|
||||||
{style="warning"}
|
|
||||||
|
|
||||||
## What does Knight Crawler do?
|
|
||||||
|
|
||||||
Knight Crawler is an addon for [Stremio](https://www.stremio.com/). It began as a fork of the very popular
|
|
||||||
[Torrentio](https://github.com/TheBeastLT/torrentio-scraper) addon. Knight crawler essentially does the following:
|
|
||||||
|
|
||||||
1. It searches the internet for available films and tv shows.
|
|
||||||
2. It collects as much information as it can about each film and tv show it finds.
|
|
||||||
3. It then stores this information to a database for easy access.
|
|
||||||
|
|
||||||
When you choose on a film or tv show to watch on Stremio, a request will be sent to your installation of Knight Crawler.
|
|
||||||
Knight Crawler will query the database and return a list of all the copies it has stored in the database as Debrid
|
|
||||||
links.
|
|
||||||
This enables playback to begin immediately for your chosen media.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Supported Debrid services
|
|
||||||
|
|
||||||
Start typing here...
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!DOCTYPE ihp SYSTEM "https://resources.jetbrains.com/writerside/1.0/ihp.dtd">
|
|
||||||
|
|
||||||
<ihp version="2.0">
|
|
||||||
<topics dir="topics" web-path="topics"/>
|
|
||||||
<images dir="images" web-path="knightcrawler"/>
|
|
||||||
<instance src="kc.tree"/>
|
|
||||||
</ihp>
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,6 @@
|
|||||||
"axios": "^1.6.1",
|
"axios": "^1.6.1",
|
||||||
"bottleneck": "^2.19.5",
|
"bottleneck": "^2.19.5",
|
||||||
"cache-manager": "^3.4.4",
|
"cache-manager": "^3.4.4",
|
||||||
"cache-manager-mongodb": "^0.3.0",
|
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"debrid-link-api": "^1.0.1",
|
"debrid-link-api": "^1.0.1",
|
||||||
"express": "^4.18.2",
|
"express": "^4.18.2",
|
||||||
@@ -33,7 +32,11 @@
|
|||||||
"user-agents": "^1.0.1444",
|
"user-agents": "^1.0.1444",
|
||||||
"video-name-parser": "^1.4.6",
|
"video-name-parser": "^1.4.6",
|
||||||
"xml-js": "^1.6.11",
|
"xml-js": "^1.6.11",
|
||||||
"xml2js": "^0.6.2"
|
"xml2js": "^0.6.2",
|
||||||
|
"@redis/client": "^1.5.14",
|
||||||
|
"@redis/json": "^1.0.6",
|
||||||
|
"@redis/search": "^1.1.6",
|
||||||
|
"cache-manager-redis-store": "^2.0.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^20.11.6",
|
"@types/node": "^20.11.6",
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import cacheManager from 'cache-manager';
|
import cacheManager from 'cache-manager';
|
||||||
import mangodbStore from 'cache-manager-mongodb';
|
|
||||||
import { isStaticUrl } from '../moch/static.js';
|
import { isStaticUrl } from '../moch/static.js';
|
||||||
import {cacheConfig} from "./settings.js";
|
import {cacheConfig} from "./settings.js";
|
||||||
|
import redisStore from 'cache-manager-redis-store';
|
||||||
|
|
||||||
const STREAM_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|stream`;
|
const STREAM_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|stream`;
|
||||||
const IMDB_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|imdb`;
|
const IMDB_KEY_PREFIX = `${cacheConfig.GLOBAL_KEY_PREFIX}|imdb`;
|
||||||
@@ -12,28 +12,20 @@ const memoryCache = initiateMemoryCache();
|
|||||||
const remoteCache = initiateRemoteCache();
|
const remoteCache = initiateRemoteCache();
|
||||||
|
|
||||||
function initiateRemoteCache() {
|
function initiateRemoteCache() {
|
||||||
if (cacheConfig.NO_CACHE) {
|
if (cacheConfig.NO_CACHE) {
|
||||||
return null;
|
return null;
|
||||||
} else if (cacheConfig.MONGODB_URI) {
|
} else if (cacheConfig.REDIS_CONNECTION_STRING) {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
store: mangodbStore,
|
store: redisStore,
|
||||||
uri: cacheConfig.MONGODB_URI,
|
ttl: cacheConfig.STREAM_EMPTY_TTL,
|
||||||
options: {
|
url: cacheConfig.REDIS_CONNECTION_STRING
|
||||||
collection: 'jackettio_addon_collection',
|
});
|
||||||
socketTimeoutMS: 120000,
|
} else {
|
||||||
useNewUrlParser: true,
|
return cacheManager.caching({
|
||||||
useUnifiedTopology: false,
|
store: 'memory',
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL
|
ttl: cacheConfig.STREAM_EMPTY_TTL
|
||||||
},
|
});
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL,
|
}
|
||||||
ignoreCacheErrors: true
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
return cacheManager.caching({
|
|
||||||
store: 'memory',
|
|
||||||
ttl: cacheConfig.STREAM_EMPTY_TTL
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function initiateMemoryCache() {
|
function initiateMemoryCache() {
|
||||||
@@ -25,7 +25,9 @@ export const cinemetaConfig = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const cacheConfig = {
|
export const cacheConfig = {
|
||||||
MONGODB_URI: process.env.MONGODB_URI,
|
REDIS_HOST: process.env.REDIS_HOST || 'redis',
|
||||||
|
REDIS_PORT: process.env.REDIS_PORT || '6379',
|
||||||
|
REDIS_EXTRA: process.env.REDIS_EXTRA || '',
|
||||||
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
||||||
IMDB_TTL: parseInt(process.env.IMDB_TTL || 60 * 60 * 4), // 4 Hours
|
IMDB_TTL: parseInt(process.env.IMDB_TTL || 60 * 60 * 4), // 4 Hours
|
||||||
STREAM_TTL: parseInt(process.env.STREAM_TTL || 60 * 60 * 4), // 1 Hour
|
STREAM_TTL: parseInt(process.env.STREAM_TTL || 60 * 60 * 4), // 1 Hour
|
||||||
@@ -40,3 +42,5 @@ export const cacheConfig = {
|
|||||||
STALE_ERROR_AGE: parseInt(process.env.STALE_ERROR_AGE) || 7 * 24 * 60 * 60, // 7 days
|
STALE_ERROR_AGE: parseInt(process.env.STALE_ERROR_AGE) || 7 * 24 * 60 * 60, // 7 days
|
||||||
GLOBAL_KEY_PREFIX: process.env.GLOBAL_KEY_PREFIX || 'jackettio-addon',
|
GLOBAL_KEY_PREFIX: process.env.GLOBAL_KEY_PREFIX || 'jackettio-addon',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheConfig.REDIS_CONNECTION_STRING = 'redis://' + cacheConfig.REDIS_HOST + ':' + cacheConfig.REDIS_PORT + '?' + cacheConfig.REDIS_EXTRA;
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -10,11 +10,10 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@putdotio/api-client": "^8.42.0",
|
"@putdotio/api-client": "^8.42.0",
|
||||||
"all-debrid-api": "^1.1.0",
|
"all-debrid-api": "^1.2.0",
|
||||||
"axios": "^1.6.1",
|
"axios": "^1.6.1",
|
||||||
"bottleneck": "^2.19.5",
|
"bottleneck": "^2.19.5",
|
||||||
"cache-manager": "^3.4.4",
|
"cache-manager": "^3.4.4",
|
||||||
"cache-manager-mongodb": "^0.3.0",
|
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"debrid-link-api": "^1.0.1",
|
"debrid-link-api": "^1.0.1",
|
||||||
"express-rate-limit": "^6.7.0",
|
"express-rate-limit": "^6.7.0",
|
||||||
@@ -35,7 +34,11 @@
|
|||||||
"stremio-addon-sdk": "^1.6.10",
|
"stremio-addon-sdk": "^1.6.10",
|
||||||
"swagger-stats": "^0.99.7",
|
"swagger-stats": "^0.99.7",
|
||||||
"ua-parser-js": "^1.0.36",
|
"ua-parser-js": "^1.0.36",
|
||||||
"user-agents": "^1.0.1444"
|
"user-agents": "^1.0.1444",
|
||||||
|
"@redis/client": "^1.5.14",
|
||||||
|
"@redis/json": "^1.0.6",
|
||||||
|
"@redis/search": "^1.1.6",
|
||||||
|
"cache-manager-redis-store": "^2.0.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^20.11.6",
|
"@types/node": "^20.11.6",
|
||||||
@@ -3,6 +3,7 @@ import { addonBuilder } from 'stremio-addon-sdk';
|
|||||||
import { cacheWrapStream } from './lib/cache.js';
|
import { cacheWrapStream } from './lib/cache.js';
|
||||||
import { dummyManifest } from './lib/manifest.js';
|
import { dummyManifest } from './lib/manifest.js';
|
||||||
import * as repository from './lib/repository.js';
|
import * as repository from './lib/repository.js';
|
||||||
|
import applyFilters from "./lib/filter.js";
|
||||||
import applySorting from './lib/sort.js';
|
import applySorting from './lib/sort.js';
|
||||||
import { toStreamInfo, applyStaticInfo } from './lib/streamInfo.js';
|
import { toStreamInfo, applyStaticInfo } from './lib/streamInfo.js';
|
||||||
import { Type } from './lib/types.js';
|
import { Type } from './lib/types.js';
|
||||||
@@ -32,6 +33,7 @@ builder.defineStreamHandler((args) => {
|
|||||||
.then(records => records
|
.then(records => records
|
||||||
.sort((a, b) => b.torrent.seeders - a.torrent.seeders || b.torrent.uploadDate - a.torrent.uploadDate)
|
.sort((a, b) => b.torrent.seeders - a.torrent.seeders || b.torrent.uploadDate - a.torrent.uploadDate)
|
||||||
.map(record => toStreamInfo(record)))))
|
.map(record => toStreamInfo(record)))))
|
||||||
|
.then(streams => applyFilters(streams, args.extra))
|
||||||
.then(streams => applySorting(streams, args.extra))
|
.then(streams => applySorting(streams, args.extra))
|
||||||
.then(streams => applyStaticInfo(streams))
|
.then(streams => applyStaticInfo(streams))
|
||||||
.then(streams => applyMochs(streams, args.extra))
|
.then(streams => applyMochs(streams, args.extra))
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import cacheManager from 'cache-manager';
|
import cacheManager from 'cache-manager';
|
||||||
import mangodbStore from 'cache-manager-mongodb';
|
|
||||||
import { cacheConfig } from './config.js';
|
import { cacheConfig } from './config.js';
|
||||||
import { isStaticUrl } from '../moch/static.js';
|
import { isStaticUrl } from '../moch/static.js';
|
||||||
|
import redisStore from "cache-manager-redis-store";
|
||||||
|
|
||||||
const GLOBAL_KEY_PREFIX = 'knightcrawler-addon';
|
const GLOBAL_KEY_PREFIX = 'knightcrawler-addon';
|
||||||
const STREAM_KEY_PREFIX = `${GLOBAL_KEY_PREFIX}|stream`;
|
const STREAM_KEY_PREFIX = `${GLOBAL_KEY_PREFIX}|stream`;
|
||||||
@@ -21,19 +21,11 @@ const remoteCache = initiateRemoteCache();
|
|||||||
function initiateRemoteCache() {
|
function initiateRemoteCache() {
|
||||||
if (cacheConfig.NO_CACHE) {
|
if (cacheConfig.NO_CACHE) {
|
||||||
return null;
|
return null;
|
||||||
} else if (cacheConfig.MONGO_URI) {
|
} else if (cacheConfig.REDIS_CONNECTION_STRING) {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
store: mangodbStore,
|
store: redisStore,
|
||||||
uri: cacheConfig.MONGO_URI,
|
|
||||||
options: {
|
|
||||||
collection: 'knightcrawler_addon_collection',
|
|
||||||
socketTimeoutMS: 120000,
|
|
||||||
useNewUrlParser: true,
|
|
||||||
useUnifiedTopology: false,
|
|
||||||
ttl: STREAM_EMPTY_TTL
|
|
||||||
},
|
|
||||||
ttl: STREAM_EMPTY_TTL,
|
ttl: STREAM_EMPTY_TTL,
|
||||||
ignoreCacheErrors: true
|
url: cacheConfig.REDIS_CONNECTION_STRING
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
return cacheManager.caching({
|
return cacheManager.caching({
|
||||||
@@ -1,17 +1,11 @@
|
|||||||
export const cacheConfig = {
|
export const cacheConfig = {
|
||||||
MONGODB_HOST: process.env.MONGODB_HOST || 'mongodb',
|
REDIS_HOST: process.env.REDIS_HOST || 'redis',
|
||||||
MONGODB_PORT: process.env.MONGODB_PORT || '27017',
|
REDIS_PORT: process.env.REDIS_PORT || '6379',
|
||||||
MONGODB_DB: process.env.MONGODB_DB || 'knightcrawler',
|
REDIS_EXTRA: process.env.REDIS_EXTRA || '',
|
||||||
MONGODB_USER: process.env.MONGODB_USER || 'mongo',
|
|
||||||
MONGODB_PASSWORD: process.env.MONGODB_PASSWORD || 'mongo',
|
|
||||||
COLLECTION_NAME: process.env.MONGODB_ADDON_COLLECTION || 'knightcrawler_addon_collection',
|
|
||||||
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
NO_CACHE: parseBool(process.env.NO_CACHE, false),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Combine the environment variables into a connection string
|
cacheConfig.REDIS_CONNECTION_STRING = 'redis://' + cacheConfig.REDIS_HOST + ':' + cacheConfig.REDIS_PORT + '?' + cacheConfig.REDIS_EXTRA;
|
||||||
// The combined string will look something like:
|
|
||||||
// 'mongodb://mongo:mongo@localhost:27017/knightcrawler?authSource=admin'
|
|
||||||
cacheConfig.MONGO_URI = 'mongodb://' + cacheConfig.MONGODB_USER + ':' + cacheConfig.MONGODB_PASSWORD + '@' + cacheConfig.MONGODB_HOST + ':' + cacheConfig.MONGODB_PORT + '/' + cacheConfig.MONGODB_DB + '?authSource=admin';
|
|
||||||
|
|
||||||
export const databaseConfig = {
|
export const databaseConfig = {
|
||||||
POSTGRES_HOST: process.env.POSTGRES_HOST || 'postgres',
|
POSTGRES_HOST: process.env.POSTGRES_HOST || 'postgres',
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user