13 Commits

Author SHA1 Message Date
iPromKnight
f56f205bbe helm chart [wip] 2024-03-30 23:54:34 +00:00
iPromKnight
c75ecd2707 add qbit housekeeping service to remove stale torrents (#193)
* Add housekeeping service to clean stale torrents

* version bump
2024-03-30 11:52:23 +00:00
iPromKnight
c493ef3376 Hotfix category, and roll back RTN to 0.1.8 (#192)
* Hotfix categories

Also roll back RTN to 0.1.8 as regression introduced in 0.2

* bump version
2024-03-30 04:47:36 +00:00
iPromKnight
655a39e35c patch the query with execute (#191) 2024-03-30 01:54:06 +00:00
iPromKnight
cfeee62f6b patch ratio (#190)
* add configurable threshold, default 0.95

* version bump
2024-03-30 01:43:21 +00:00
iPromKnight
c6d4c06d70 hotfix categories from imdb result instead (#189)
* category mapping from imdb

* version bump
2024-03-30 01:26:02 +00:00
iPromKnight
08639a3254 Patch isMovie (#188)
* fix is movie

* version bump
2024-03-30 00:28:35 +00:00
iPromKnight
d430850749 Patch message contract names (#187)
* ensure unique message contract names per collector type

* version bump
2024-03-30 00:09:13 +00:00
iPromKnight
82c0ea459b change qbittorrent settings (#186) 2024-03-29 23:35:27 +00:00
iPromKnight
1e83b4c5d8 Patch the addon (#185) 2024-03-29 19:08:17 +00:00
iPromKnight
66609c2a46 trigram performance increased and housekeeping (#184)
* add new indexes, and change year column to int

* Change gist to gin, and change year to int

* Producer changes for new gin query

* Fully map the rtn response using json dump from Pydantic

Also updates Rtn to 0.1.9

* Add housekeeping script to reconcile imdb ids.

* Join Torrent onto the ingested torrent table

Ensure that a torrent can always find the details of where it came from, and how it was parsed.

* Version bump for release

* missing quote on table name
2024-03-29 19:01:48 +00:00
iPromKnight
2d78dc2735 version bump for release (#183) 2024-03-28 23:37:35 +00:00
iPromKnight
527d6cdf15 Upgrade RTN to 0.1.8, replace rabbitmq with drop in replacement lavinmq - better performance, lower resource usage. (#182) 2024-03-28 23:35:41 +00:00
64 changed files with 1321 additions and 242 deletions

View File

@@ -12,8 +12,11 @@ enabled=false
program=
[BitTorrent]
Session\AnonymousModeEnabled=true
Session\BTProtocol=TCP
Session\DefaultSavePath=/downloads/
Session\ExcludedFileNames=
Session\MaxActiveCheckingTorrents=5
Session\MaxActiveDownloads=10
Session\MaxActiveTorrents=50
Session\MaxActiveUploads=50
@@ -50,6 +53,7 @@ MailNotification\req_auth=true
WebUI\Address=*
WebUI\AuthSubnetWhitelist=0.0.0.0/0
WebUI\AuthSubnetWhitelistEnabled=true
WebUI\HostHeaderValidation=false
WebUI\LocalHostAuth=false
WebUI\ServerDomains=*

View File

@@ -9,7 +9,7 @@ networks:
volumes:
postgres:
rabbitmq:
lavinmq:
redis:
services:
@@ -55,28 +55,29 @@ services:
volumes:
- redis:/data
## RabbitMQ is used as a message broker for the services.
## LavinMQ is used as a message broker for the services.
## It is a high performance drop in replacement for RabbitMQ.
## It is used to communicate between the services.
rabbitmq:
lavinmq:
env_file: stack.env
healthcheck:
test: ["CMD-SHELL", "rabbitmq-diagnostics -q ping"]
timeout: 10s
interval: 10s
retries: 3
start_period: 10s
# # If you need the database to be accessible from outside, please open the below port.
# # Furthermore, please, please, please, look at the documentation for rabbit on how to secure the service.
# # Furthermore, please, please, please, look at the documentation for lavinmq / rabbitmq on how to secure the service.
# ports:
# - "5672:5672"
# - "15672:15672"
# - "15692:15692"
image: rabbitmq:3-management
image: cloudamqp/lavinmq:latest
healthcheck:
test: ["CMD-SHELL", "lavinmqctl status"]
timeout: 10s
interval: 10s
retries: 3
start_period: 10s
restart: unless-stopped
networks:
- knightcrawler-network
restart: unless-stopped
volumes:
- rabbitmq:/var/lib/rabbitmq
- lavinmq:/var/lib/lavinmq/
## The addon. This is what is used in stremio
addon:
@@ -87,13 +88,13 @@ services:
condition: service_completed_successfully
postgres:
condition: service_healthy
rabbitmq:
lavinmq:
condition: service_healthy
redis:
condition: service_healthy
env_file: stack.env
hostname: knightcrawler-addon
image: gabisonfire/knightcrawler-addon:2.0.8
image: gabisonfire/knightcrawler-addon:2.0.17
labels:
logging: promtail
networks:
@@ -111,12 +112,12 @@ services:
condition: service_completed_successfully
postgres:
condition: service_healthy
rabbitmq:
lavinmq:
condition: service_healthy
redis:
condition: service_healthy
env_file: stack.env
image: gabisonfire/knightcrawler-consumer:2.0.8
image: gabisonfire/knightcrawler-consumer:2.0.17
labels:
logging: promtail
networks:
@@ -132,12 +133,12 @@ services:
condition: service_completed_successfully
postgres:
condition: service_healthy
rabbitmq:
lavinmq:
condition: service_healthy
redis:
condition: service_healthy
env_file: stack.env
image: gabisonfire/knightcrawler-debrid-collector:2.0.8
image: gabisonfire/knightcrawler-debrid-collector:2.0.17
labels:
logging: promtail
networks:
@@ -151,7 +152,7 @@ services:
migrator:
condition: service_completed_successfully
env_file: stack.env
image: gabisonfire/knightcrawler-metadata:2.0.8
image: gabisonfire/knightcrawler-metadata:2.0.17
networks:
- knightcrawler-network
restart: "no"
@@ -162,7 +163,7 @@ services:
postgres:
condition: service_healthy
env_file: stack.env
image: gabisonfire/knightcrawler-migrator:2.0.8
image: gabisonfire/knightcrawler-migrator:2.0.17
networks:
- knightcrawler-network
restart: "no"
@@ -176,12 +177,12 @@ services:
condition: service_completed_successfully
postgres:
condition: service_healthy
rabbitmq:
lavinmq:
condition: service_healthy
redis:
condition: service_healthy
env_file: stack.env
image: gabisonfire/knightcrawler-producer:2.0.8
image: gabisonfire/knightcrawler-producer:2.0.17
labels:
logging: promtail
networks:
@@ -191,12 +192,22 @@ services:
## QBit collector utilizes QBitTorrent to download metadata.
qbitcollector:
depends_on:
metadata:
condition: service_completed_successfully
migrator:
condition: service_completed_successfully
postgres:
condition: service_healthy
lavinmq:
condition: service_healthy
redis:
condition: service_healthy
qbittorrent:
condition: service_healthy
deploy:
replicas: ${QBIT_REPLICAS:-0}
env_file: stack.env
image: gabisonfire/knightcrawler-qbit-collector:2.0.8
image: gabisonfire/knightcrawler-qbit-collector:2.0.17
labels:
logging: promtail
networks:

View File

@@ -16,7 +16,7 @@ rule_files:
scrape_configs:
- job_name: "rabbitmq"
static_configs:
- targets: ["rabbitmq:15692"]
- targets: ["lavinmq:15692"]
- job_name: "postgres-exporter"
static_configs:
- targets: ["postgres-exporter:9187"]

View File

@@ -4,8 +4,8 @@ x-basehealth: &base-health
retries: 3
start_period: 10s
x-rabbithealth: &rabbitmq-health
test: rabbitmq-diagnostics -q ping
x-lavinhealth: &lavinmq-health
test: [ "CMD-SHELL", "lavinmqctl status" ]
<<: *base-health
x-redishealth: &redis-health
@@ -52,21 +52,19 @@ services:
networks:
- knightcrawler-network
rabbitmq:
image: rabbitmq:3-management
lavinmq:
env_file: stack.env
# # If you need the database to be accessible from outside, please open the below port.
# # Furthermore, please, please, please, look at the documentation for rabbit on how to secure the service.
# # Furthermore, please, please, please, look at the documentation for lavinmq / rabbitmq on how to secure the service.
# ports:
# - "5672:5672"
# - "15672:15672"
# - "15692:15692"
volumes:
- rabbitmq:/var/lib/rabbitmq
image: cloudamqp/lavinmq:latest
healthcheck: *lavinmq-health
restart: unless-stopped
healthcheck: *rabbitmq-health
env_file: ../../.env
networks:
- knightcrawler-network
volumes:
- lavinmq:/var/lib/lavinmq/
## QBitTorrent is a torrent client that can be used to download torrents. In this case its used to download metadata.
## The QBit collector requires this.

View File

@@ -11,7 +11,7 @@ x-depends: &knightcrawler-app-depends
condition: service_healthy
postgres:
condition: service_healthy
rabbitmq:
lavinmq:
condition: service_healthy
migrator:
condition: service_completed_successfully
@@ -20,7 +20,7 @@ x-depends: &knightcrawler-app-depends
services:
metadata:
image: gabisonfire/knightcrawler-metadata:2.0.8
image: gabisonfire/knightcrawler-metadata:2.0.17
env_file: ../../.env
networks:
- knightcrawler-network
@@ -30,7 +30,7 @@ services:
condition: service_completed_successfully
migrator:
image: gabisonfire/knightcrawler-migrator:2.0.8
image: gabisonfire/knightcrawler-migrator:2.0.17
env_file: ../../.env
networks:
- knightcrawler-network
@@ -40,7 +40,7 @@ services:
condition: service_healthy
addon:
image: gabisonfire/knightcrawler-addon:2.0.8
image: gabisonfire/knightcrawler-addon:2.0.17
<<: [*knightcrawler-app, *knightcrawler-app-depends]
restart: unless-stopped
hostname: knightcrawler-addon
@@ -48,22 +48,22 @@ services:
- "7000:7000"
consumer:
image: gabisonfire/knightcrawler-consumer:2.0.8
image: gabisonfire/knightcrawler-consumer:2.0.17
<<: [*knightcrawler-app, *knightcrawler-app-depends]
restart: unless-stopped
debridcollector:
image: gabisonfire/knightcrawler-debrid-collector:2.0.8
image: gabisonfire/knightcrawler-debrid-collector:2.0.17
<<: [*knightcrawler-app, *knightcrawler-app-depends]
restart: unless-stopped
producer:
image: gabisonfire/knightcrawler-producer:2.0.8
image: gabisonfire/knightcrawler-producer:2.0.17
<<: [*knightcrawler-app, *knightcrawler-app-depends]
restart: unless-stopped
qbitcollector:
image: gabisonfire/knightcrawler-qbit-collector:2.0.8
image: gabisonfire/knightcrawler-qbit-collector:2.0.17
<<: [*knightcrawler-app, *knightcrawler-app-depends]
restart: unless-stopped
depends_on:

View File

@@ -1,4 +1,4 @@
volumes:
postgres:
redis:
rabbitmq:
lavinmq:

View File

@@ -13,8 +13,8 @@ REDIS_HOST=redis
REDIS_PORT=6379
REDIS_EXTRA=abortConnect=false,allowAdmin=true
# RabbitMQ
RABBITMQ_HOST=rabbitmq
# AMQP
RABBITMQ_HOST=lavinmq
RABBITMQ_USER=guest
RABBITMQ_PASSWORD=guest
RABBITMQ_CONSUMER_QUEUE_NAME=ingested

View File

@@ -0,0 +1,6 @@
apiVersion: v2
appVersion: 2.0.17
description: A helm chart for Knightcrawler
name: knightcrawler
type: application
version: 0.1.0

View File

@@ -0,0 +1,6 @@
Congratulations,
Knightcrawler is now deployed. This may take a while to be up and responding.

View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: '{{ .Release.Name }}-config'
labels:
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
data:
COLLECTOR_DEBRID_ENABLED: '{{ .Values.knightcrawler.debridcollector.enabled }}'
COLLECTOR_QBIT_ENABLED: '{{ .Values.knightcrawler.qbitcollector.enabled }}'
DEBUG_MODE: '{{ .Values.knightcrawler.debug }}'
METADATA_INSERT_BATCH_SIZE: '{{ .Values.environment.metadata.insertBatchSize }}'
POSTGRES_DB: '{{ .Values.environment.postgres.dbName }}'
POSTGRES_HOST: '{{ if .Values.environment.postgres.external }}{{ .Values.environment.postgres.host }}{{ else }}{{ .Release.Name }}-postgres{{ end }}'
POSTGRES_PORT: '{{ .Values.environment.postgres.port }}'
QBIT_HOST: '{{ .Values.environment.qbitcollector.qbitHost }}'
QBIT_TRACKERS_URL: '{{ .Values.environment.qbitcollector.trackersUrl }}'
RABBITMQ_CONSUMER_QUEUE_NAME: '{{ .Values.environment.producer.queueName }}'
RABBITMQ_DURABLE: '{{ .Values.environment.producer.durable }}'
RABBITMQ_HOST: '{{ if .Values.environment.lavinmq.external }}{{ .Values.environment.lavinmq.host }}{{ else }}{{ .Release.Name }}-lavinmq{{ end }}'
RABBITMQ_MAX_PUBLISH_BATCH_SIZE: '{{ .Values.environment.producer.maxPublishBatchSize }}'
RABBITMQ_MAX_QUEUE_SIZE: '{{ .Values.environment.producer.maxQueueSize }}'
RABBITMQ_PUBLISH_INTERVAL_IN_SECONDS: '{{ .Values.environment.producer.publishIntervalSeconds }}'
REDIS_EXTRA: '{{ .Values.environment.redis.extra }}'
REDIS_HOST: '{{ if .Values.environment.redis.external }}{{ .Values.environment.redis.host }}{{ else }}{{ .Release.Name }}-redis{{ end }}'
REDIS_PORT: '{{ .Values.environment.redis.port }}'
TZ: '{{ .Values.shared.timezone }}'

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Secret
metadata:
name: '{{ .Release.Name }}-secrets'
labels:
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
type: Opaque
data:
GITHUB_PAT: '{{ .Values.environment.producer.githubPat | b64enc }}'
COLLECTOR_REAL_DEBRID_API_KEY: '{{ .Values.environment.debridcollector.realDebridApiKey | b64enc }}'
POSTGRES_USER: '{{ .Values.environment.postgres.user | b64enc }}'
POSTGRES_PASSWORD: '{{ .Values.environment.postgres.password | b64enc }}'
RABBITMQ_PASSWORD: '{{ .Values.environment.lavinmq.password | b64enc }}'
RABBITMQ_USER: '{{ .Values.environment.lavinmq.user | b64enc }}'

View File

@@ -0,0 +1,25 @@
{{ if .Values.infrastructure.lavinmq.enabled }}
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-lavinmq'
labels:
component: lavinmq
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
spec:
selector:
component: lavinmq
release: '{{ .Release.Name }}'
type: ClusterIP
ports:
- protocol: TCP
port: 5672
targetPort: 5672
- protocol: TCP
port: 15672
targetPort: 15672
- protocol: TCP
port: 15692
targetPort: 15692
{{- end -}}

View File

@@ -0,0 +1,60 @@
{{ if .Values.infrastructure.lavinmq.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: '{{ .Release.Name }}-lavinmq'
labels:
component: lavinmq
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "0"
spec:
serviceName: '{{ .Release.Name }}-lavinmq'
replicas: 1
selector:
matchLabels:
component: lavinmq
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: lavinmq
release: '{{ .Release.Name }}'
spec:
containers:
- name: lavinmq
image: '{{ .Values.infrastructure.lavinmq.image }}:{{ .Values.infrastructure.lavinmq.tag }}'
ports:
- name: lavinmq
containerPort: 5672
- name: lavinmq-15672
containerPort: 15672
- name: lavinmq-15692
containerPort: 15692
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'
volumeMounts:
- mountPath: /var/lib/lavinmq
name: lavinmq
livenessProbe:
exec:
command:
- lavinmqctl status
periodSeconds: 10
initialDelaySeconds: 10
successThreshold: 1
failureThreshold: 3
volumeClaimTemplates:
- metadata:
name: lavinmq
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: '{{ .Values.persistence.lavinmq.capacity }}'
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{ if .Values.infrastructure.postgres.enabled }}
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-postgres'
labels:
component: postgres
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
spec:
selector:
component: postgres
release: '{{ .Release.Name }}'
type: ClusterIP
ports:
- protocol: TCP
port: 5432
targetPort: 5432
{{- end -}}

View File

@@ -0,0 +1,58 @@
{{ if .Values.infrastructure.postgres.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: '{{ .Release.Name }}-postgres'
labels:
component: postgres
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "0"
spec:
serviceName: '{{ .Release.Name }}-postgres'
replicas: 1
selector:
matchLabels:
component: postgres
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: postgres
release: '{{ .Release.Name }}'
spec:
containers:
- name: postgres
image: '{{ .Values.infrastructure.postgres.image }}:{{ .Values.infrastructure.postgres.tag }}'
ports:
- name: postgres
containerPort: 5432
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgres
livenessProbe:
exec:
command:
- sh
- -c
- pg_isready -h localhost -U $POSTGRES_USER
periodSeconds: 10
initialDelaySeconds: 10
successThreshold: 1
failureThreshold: 3
volumeClaimTemplates:
- metadata:
name: postgres
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: '{{ .Values.persistence.postgres.capacity }}'
{{- end -}}

View File

@@ -0,0 +1,57 @@
{{ if .Values.knightcrawler.qbitcollector.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-qbittorrent'
labels:
component: qbittorrent
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "0"
spec:
replicas: 1
selector:
matchLabels:
component: qbittorrent
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: qbittorrent
release: '{{ .Release.Name }}'
spec:
containers:
- name: qbittorrent
image: '{{ .Values.infrastructure.qbittorrent.image }}:{{ .Values.infrastructure.qbittorrent.tag }}'
ports:
- name: qbittorrent
containerPort: 6881
- name: qbittorrent-6881
containerPort: 6881
- name: qbittorrent-8080
containerPort: 8080
env:
- name: PUID
value: '{{ .Values.environment.qbittorrent.puid }}'
- name: PGID
value: '{{ .Values.environment.qbittorrent.pgid }}'
- name: TORRENTING_PORT
value: '{{ .Values.environment.qbittorrent.torrentingPort }}'
- name: WEBUI_PORT
value: '{{ .Values.environment.qbittorrent.webuiPort }}'
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'
livenessProbe:
exec:
command:
- curl --fail http://localhost:8080
periodSeconds: 10
initialDelaySeconds: 10
successThreshold: 1
failureThreshold: 3
{{- end -}}

View File

@@ -0,0 +1,25 @@
{{ if .Values.knightcrawler.qbitcollector.enabled }}
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-qbittorrent'
labels:
component: qbittorrent
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
spec:
selector:
component: qbittorrent
release: '{{ .Release.Name }}'
type: ClusterIP
ports:
- protocol: TCP
port: 6881
targetPort: 6881
- protocol: TCP
port: 6881
targetPort: 6881
- protocol: TCP
port: 8080
targetPort: 8080
{{- end -}}

View File

@@ -0,0 +1,19 @@
{{ if .Values.infrastructure.redis.enabled }}
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-redis'
labels:
component: redis
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
spec:
selector:
component: redis
release: '{{ .Release.Name }}'
type: ClusterIP
ports:
- protocol: TCP
port: 6379
targetPort: 6379
{{- end -}}

View File

@@ -0,0 +1,56 @@
{{ if .Values.infrastructure.redis.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: '{{ .Release.Name }}-redis'
labels:
component: redis
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "0"
spec:
serviceName: '{{ .Release.Name }}-redis'
replicas: 1
selector:
matchLabels:
component: redis
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: redis
release: '{{ .Release.Name }}'
spec:
containers:
- name: redis
image: '{{ .Values.infrastructure.redis.image }}:{{ .Values.infrastructure.redis.tag }}'
ports:
- name: redis
containerPort: 6379
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'
volumeMounts:
- mountPath: /data
name: redis
livenessProbe:
exec:
command:
- redis-cli ping
periodSeconds: 10
initialDelaySeconds: 10
successThreshold: 1
failureThreshold: 3
volumeClaimTemplates:
- metadata:
name: redis
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: '{{ .Values.persistence.redis.capacity }}'
{{- end -}}

View File

@@ -0,0 +1,28 @@
apiVersion: batch/v1
kind: Job
metadata:
name: '{{ .Release.Name }}-metadata'
labels:
component: metadata
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "2"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
metadata:
labels:
component: metadata
release: '{{ .Release.Name }}'
spec:
restartPolicy: OnFailure
containers:
- name: metadata
image: '{{ .Values.knightcrawler.metadata.image }}{{ if ne .Values.knightcrawler.globalImageTagOverride "" }}:{{ .Values.knightcrawler.globalImageTagOverride }}{{else}}:{{ .Values.knightcrawler.metadata.tag}}{{ end }}'
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'

View File

@@ -0,0 +1,28 @@
apiVersion: batch/v1
kind: Job
metadata:
name: '{{ .Release.Name }}-migrator'
labels:
component: migrator
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "1"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
metadata:
labels:
component: migrator
release: '{{ .Release.Name }}'
spec:
restartPolicy: OnFailure
containers:
- name: migrator
image: '{{ .Values.knightcrawler.migrator.image }}{{ if ne .Values.knightcrawler.globalImageTagOverride "" }}:{{ .Values.knightcrawler.globalImageTagOverride }}{{else}}:{{ .Values.knightcrawler.migrator.tag}}{{ end }}'
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-addon'
labels:
component: addon
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "4"
spec:
replicas: {{ .Values.knightcrawler.addon.replicas }}
selector:
matchLabels:
component: addon
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: addon
release: '{{ .Release.Name }}'
spec:
containers:
- name: addon
image: '{{ .Values.knightcrawler.addon.image }}{{ if ne .Values.knightcrawler.globalImageTagOverride "" }}:{{ .Values.knightcrawler.globalImageTagOverride }}{{else}}:{{ .Values.knightcrawler.addon.tag}}{{ end }}'
ports:
- name: addon
containerPort: 7000
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'

View File

@@ -0,0 +1,32 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-consumer'
labels:
component: consumer
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "4"
spec:
replicas: {{ .Values.knightcrawler.consumer.replicas }}
selector:
matchLabels:
component: consumer
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: consumer
release: '{{ .Release.Name }}'
spec:
containers:
- name: consumer
image: '{{ .Values.knightcrawler.consumer.image }}{{ if ne .Values.knightcrawler.globalImageTagOverride "" }}:{{ .Values.knightcrawler.globalImageTagOverride }}{{else}}:{{ .Values.knightcrawler.consumer.tag}}{{ end }}'
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'

View File

@@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-debridcollector'
labels:
component: debridcollector
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "4"
spec:
replicas: {{ .Values.knightcrawler.debridcollector.replicas }}
selector:
matchLabels:
component: debridcollector
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: debridcollector
release: '{{ .Release.Name }}'
spec:
containers:
- name: debridcollector
image: '{{ .Values.knightcrawler.debridcollector.image }}{{ if ne .Values.knightcrawler.globalImageTagOverride "" }}:{{ .Values.knightcrawler.globalImageTagOverride }}{{else}}:{{ .Values.knightcrawler.debridcollector.tag}}{{ end }}'
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'

View File

@@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-producer'
labels:
component: producer
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "4"
spec:
replicas: {{ .Values.knightcrawler.producer.replicas }}
selector:
matchLabels:
component: producer
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: producer
release: '{{ .Release.Name }}'
spec:
containers:
- name: producer
image: '{{ .Values.knightcrawler.producer.image }}{{ if ne .Values.knightcrawler.globalImageTagOverride "" }}:{{ .Values.knightcrawler.globalImageTagOverride }}{{else}}:{{ .Values.knightcrawler.producer.tag}}{{ end }}'
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'

View File

@@ -0,0 +1,33 @@
{{ if .Values.knightcrawler.qbitcollector.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: '{{ .Release.Name }}-qbitcollector'
labels:
component: qbitcollector
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "4"
spec:
replicas: {{ .Values.knightcrawler.qbitcollector.replicas }}
selector:
matchLabels:
component: qbitcollector
release: '{{ .Release.Name }}'
template:
metadata:
labels:
component: qbitcollector
release: '{{ .Release.Name }}'
spec:
containers:
- name: qbitcollector
image: '{{ .Values.knightcrawler.qbitcollector.image }}{{ if ne .Values.knightcrawler.globalImageTagOverride "" }}:{{ .Values.knightcrawler.globalImageTagOverride }}{{else}}:{{ .Values.knightcrawler.qbitcollector.tag}}{{ end }}'
envFrom:
- configMapRef:
name: '{{ .Release.Name }}-config'
- secretRef:
name: '{{ .Release.Name }}-secrets'
{{- end -}}

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: '{{ .Release.Name }}-addon'
labels:
component: addon
project: '{{ .Chart.Name }}'
release: '{{ .Release.Name }}'
spec:
selector:
component: addon
release: '{{ .Release.Name }}'
type: ClusterIP
ports:
- protocol: TCP
port: 7000
targetPort: 7000

100
deployment/k8s/values.yaml Normal file
View File

@@ -0,0 +1,100 @@
knightcrawler:
debug: false
globalImageTagOverride: ""
addon:
image: "gabisonfire/knightcrawler-addon"
tag: "2.0.17"
replicas: 1
consumer:
image: "gabisonfire/knightcrawler-consumer"
tag: "2.0.17"
replicas: 1
metadata:
image: "gabisonfire/knightcrawler-metadata"
tag: "2.0.17"
replicas: 1
migrator:
image: "gabisonfire/knightcrawler-migrator"
tag: "2.0.17"
replicas: 1
debridcollector:
image: "gabisonfire/knightcrawler-debrid-collector"
tag: "2.0.17"
enabled: true
replicas: 1
qbitcollector:
image: "gabisonfire/knightcrawler-qbit-collector"
tag: "2.0.17"
enabled: false
replicas: 1
producer:
image: "gabisonfire/knightcrawler-producer"
tag: "2.0.17"
replicas: 1
infrastructure:
lavinmq:
image: "cloudamqp/lavinmq"
tag: "latest"
enabled: true
postgres:
image: "postgres"
tag: "latest"
enabled: true
redis:
image: "redis/redis-stack-server"
tag: "latest"
enabled: true
qbittorrent:
image: "lscr.io/linuxserver/qbittorrent"
tag: "latest"
environment:
redis:
external: false
host: ""
port: "6379"
extra: "abortConnect=false,allowAdmin=true"
postgres:
external: false
host: ""
port: "5432"
dbName: "knightcrawler"
user: "postgres"
password: "postgres"
lavinmq:
external: false
host: ""
user: "guest"
password: "guest"
qbitcollector:
qbitHost: "http://qbittorrent:8080"
trackersUrl: "https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all_http.txt"
debridcollector:
realDebridApiKey: ""
producer:
githubPat: ""
queueName: "ingested"
durable: true
maxPublishBatchSize: 500
maxQueueSize: 0
publishIntervalSeconds: 10
metadata:
insertBatchSize: 50000
qbittorrent:
pgid: "1000"
puid: "1000"
torrentingPort: "6881"
webuiPort: "8080"
persistence:
storageClassName: ""
redis:
capacity: 1Gi
postgres:
capacity: 1Gi
lavinmq:
capacity: 1Gi
shared:
timezone: "London/Europe"

View File

@@ -14,13 +14,12 @@ const Torrent = database.define('torrent',
{
infoHash: { type: Sequelize.STRING(64), primaryKey: true },
provider: { type: Sequelize.STRING(32), allowNull: false },
torrentId: { type: Sequelize.STRING(128) },
ingestedTorrentId: { type: Sequelize.BIGINT, allowNull: false },
title: { type: Sequelize.STRING(256), allowNull: false },
size: { type: Sequelize.BIGINT },
type: { type: Sequelize.STRING(16), allowNull: false },
uploadDate: { type: Sequelize.DATE, allowNull: false },
seeders: { type: Sequelize.SMALLINT },
trackers: { type: Sequelize.STRING(4096) },
languages: { type: Sequelize.STRING(4096) },
resolution: { type: Sequelize.STRING(16) }
}

View File

@@ -1,21 +1,21 @@
namespace DebridCollector.Features.Worker;
[EntityName("perform-metadata-request")]
[EntityName("perform-metadata-request-debrid-collector")]
public record PerformMetadataRequest(Guid CorrelationId, string InfoHash) : CorrelatedBy<Guid>;
[EntityName("torrent-metadata-response")]
[EntityName("torrent-metadata-response-debrid-collector")]
public record GotMetadata(TorrentMetadataResponse Metadata) : CorrelatedBy<Guid>
{
public Guid CorrelationId { get; init; } = Metadata.CorrelationId;
}
[EntityName("write-metadata")]
[EntityName("write-metadata-debrid-collector")]
public record WriteMetadata(Torrent Torrent, TorrentMetadataResponse Metadata, string ImdbId) : CorrelatedBy<Guid>
{
public Guid CorrelationId { get; init; } = Metadata.CorrelationId;
}
[EntityName("metadata-written")]
[EntityName("metadata-written-debrid-colloctor")]
public record MetadataWritten(TorrentMetadataResponse Metadata) : CorrelatedBy<Guid>
{
public Guid CorrelationId { get; init; } = Metadata.CorrelationId;

View File

@@ -72,7 +72,7 @@ public class BasicsFile(ILogger<BasicsFile> logger, ImdbDbService dbService): IF
Category = csv.GetField(1),
Title = csv.GetField(2),
Adult = isAdultSet && adult == 1,
Year = csv.GetField(5),
Year = csv.GetField(5) == @"\N" ? 0 : int.Parse(csv.GetField(5)),
};
if (cancellationToken.IsCancellationRequested)

View File

@@ -6,5 +6,5 @@ public class ImdbBasicEntry
public string? Category { get; set; }
public string? Title { get; set; }
public bool Adult { get; set; }
public string? Year { get; set; }
public int Year { get; set; }
}

View File

@@ -17,7 +17,7 @@ public class ImdbDbService(PostgresConfiguration configuration, ILogger<ImdbDbSe
await writer.WriteAsync(entry.ImdbId, NpgsqlDbType.Text);
await writer.WriteAsync(entry.Category, NpgsqlDbType.Text);
await writer.WriteAsync(entry.Title, NpgsqlDbType.Text);
await writer.WriteAsync(entry.Year, NpgsqlDbType.Text);
await writer.WriteAsync(entry.Year, NpgsqlDbType.Integer);
await writer.WriteAsync(entry.Adult, NpgsqlDbType.Boolean);
}
catch (Npgsql.PostgresException e)
@@ -116,7 +116,7 @@ public class ImdbDbService(PostgresConfiguration configuration, ILogger<ImdbDbSe
ExecuteCommandAsync(
async connection =>
{
await using var command = new NpgsqlCommand($"CREATE INDEX title_gist ON {TableNames.MetadataTable} USING gist(title gist_trgm_ops)", connection);
await using var command = new NpgsqlCommand($"CREATE INDEX title_gin ON {TableNames.MetadataTable} USING gin(title gin_trgm_ops)", connection);
await command.ExecuteNonQueryAsync();
}, "Error while creating index on imdb_metadata table");
@@ -125,7 +125,7 @@ public class ImdbDbService(PostgresConfiguration configuration, ILogger<ImdbDbSe
async connection =>
{
logger.LogInformation("Dropping Trigrams index if it exists already");
await using var dropCommand = new NpgsqlCommand("DROP INDEX if exists title_gist", connection);
await using var dropCommand = new NpgsqlCommand("DROP INDEX if exists title_gin", connection);
await dropCommand.ExecuteNonQueryAsync();
}, $"Error while dropping index on {TableNames.MetadataTable} table");

View File

@@ -0,0 +1,35 @@
-- Purpose: Change the year column to integer and add a search function that allows for searching by year.
ALTER TABLE imdb_metadata
ALTER COLUMN year TYPE integer USING (CASE WHEN year = '\N' THEN 0 ELSE year::integer END);
-- Remove the old search function
DROP FUNCTION IF EXISTS search_imdb_meta(TEXT, TEXT, TEXT, INT);
-- Add the new search function that allows for searching by year with a plus/minus one year range
CREATE OR REPLACE FUNCTION search_imdb_meta(search_term TEXT, category_param TEXT DEFAULT NULL, year_param INT DEFAULT NULL, limit_param INT DEFAULT 10)
RETURNS TABLE(imdb_id character varying(16), title character varying(1000),category character varying(50),year INT, score REAL) AS $$
BEGIN
SET pg_trgm.similarity_threshold = 0.9;
RETURN QUERY
SELECT imdb_metadata.imdb_id, imdb_metadata.title, imdb_metadata.category, imdb_metadata.year, similarity(imdb_metadata.title, search_term) as score
FROM imdb_metadata
WHERE (imdb_metadata.title % search_term)
AND (imdb_metadata.adult = FALSE)
AND (category_param IS NULL OR imdb_metadata.category = category_param)
AND (year_param IS NULL OR imdb_metadata.year BETWEEN year_param - 1 AND year_param + 1)
ORDER BY score DESC
LIMIT limit_param;
END; $$
LANGUAGE plpgsql;
-- Drop the old indexes
DROP INDEX IF EXISTS idx_imdb_metadata_adult;
DROP INDEX IF EXISTS idx_imdb_metadata_category;
DROP INDEX IF EXISTS idx_imdb_metadata_year;
DROP INDEX IF EXISTS title_gist;
-- Add indexes for the new columns
CREATE INDEX idx_imdb_metadata_adult ON imdb_metadata(adult);
CREATE INDEX idx_imdb_metadata_category ON imdb_metadata(category);
CREATE INDEX idx_imdb_metadata_year ON imdb_metadata(year);
CREATE INDEX title_gin ON imdb_metadata USING gin(title gin_trgm_ops);

View File

@@ -0,0 +1,40 @@
-- Purpose: Add the jsonb column to the ingested_torrents table to store the response from RTN
ALTER TABLE ingested_torrents
ADD COLUMN IF NOT EXISTS rtn_response jsonb;
-- Purpose: Drop torrentId column from torrents table
ALTER TABLE torrents
DROP COLUMN IF EXISTS "torrentId";
-- Purpose: Drop Trackers column from torrents table
ALTER TABLE torrents
DROP COLUMN IF EXISTS "trackers";
-- Purpose: Create a foreign key relationsship if it does not already exist between torrents and the source table ingested_torrents, but do not cascade on delete.
ALTER TABLE torrents
ADD COLUMN IF NOT EXISTS "ingestedTorrentId" bigint;
DO $$
BEGIN
IF EXISTS (
SELECT 1
FROM information_schema.table_constraints
WHERE constraint_name = 'fk_torrents_info_hash'
)
THEN
ALTER TABLE torrents
DROP CONSTRAINT fk_torrents_info_hash;
END IF;
END $$;
ALTER TABLE torrents
ADD CONSTRAINT fk_torrents_info_hash
FOREIGN KEY ("ingestedTorrentId")
REFERENCES ingested_torrents("id")
ON DELETE NO ACTION;
UPDATE torrents
SET "ingestedTorrentId" = ingested_torrents."id"
FROM ingested_torrents
WHERE torrents."infoHash" = ingested_torrents."info_hash"
AND torrents."provider" = ingested_torrents."source";

View File

@@ -0,0 +1,55 @@
DROP FUNCTION IF EXISTS kc_maintenance_reconcile_dmm_imdb_ids();
CREATE OR REPLACE FUNCTION kc_maintenance_reconcile_dmm_imdb_ids()
RETURNS INTEGER AS $$
DECLARE
rec RECORD;
imdb_rec RECORD;
rows_affected INTEGER := 0;
BEGIN
RAISE NOTICE 'Starting Reconciliation of DMM IMDB Ids...';
FOR rec IN
SELECT
it."id" as "ingestion_id",
t."infoHash",
it."category" as "ingestion_category",
f."id" as "file_Id",
f."title" as "file_Title",
(rtn_response->>'raw_title')::text as "raw_title",
(rtn_response->>'parsed_title')::text as "parsed_title",
(rtn_response->>'year')::int as "year"
FROM torrents t
JOIN ingested_torrents it ON t."ingestedTorrentId" = it."id"
JOIN files f ON t."infoHash" = f."infoHash"
WHERE t."provider" = 'DMM'
LOOP
RAISE NOTICE 'Processing record with file_Id: %', rec."file_Id";
FOR imdb_rec IN
SELECT * FROM search_imdb_meta(
rec."parsed_title",
CASE
WHEN rec."ingestion_category" = 'tv' THEN 'tvSeries'
WHEN rec."ingestion_category" = 'movies' THEN 'movie'
END,
CASE
WHEN rec."year" = 0 THEN NULL
ELSE rec."year" END,
1)
LOOP
IF imdb_rec IS NOT NULL THEN
RAISE NOTICE 'Updating file_Id: % with imdbId: %, parsed title: %, imdb title: %', rec."file_Id", imdb_rec."imdb_id", rec."parsed_title", imdb_rec."title";
UPDATE "files"
SET "imdbId" = imdb_rec."imdb_id"
WHERE "id" = rec."file_Id";
rows_affected := rows_affected + 1;
ELSE
RAISE NOTICE 'No IMDB ID found for file_Id: %, parsed title: %, imdb title: %, setting imdbId to NULL', rec."file_Id", rec."parsed_title", imdb_rec."title";
UPDATE "files"
SET "imdbId" = NULL
WHERE "id" = rec."file_Id";
END IF;
END LOOP;
END LOOP;
RAISE NOTICE 'Finished reconciliation. Total rows affected: %', rows_affected;
RETURN rows_affected;
END;
$$ LANGUAGE plpgsql;

View File

@@ -0,0 +1,19 @@
-- Remove the old search function
DROP FUNCTION IF EXISTS search_imdb_meta(TEXT, TEXT, INT, INT);
-- Add the new search function that allows for searching by year with a plus/minus one year range
CREATE OR REPLACE FUNCTION search_imdb_meta(search_term TEXT, category_param TEXT DEFAULT NULL, year_param INT DEFAULT NULL, limit_param INT DEFAULT 10, similarity_threshold REAL DEFAULT 0.95)
RETURNS TABLE(imdb_id character varying(16), title character varying(1000),category character varying(50),year INT, score REAL) AS $$
BEGIN
SET pg_trgm.similarity_threshold = similarity_threshold;
RETURN QUERY
SELECT imdb_metadata.imdb_id, imdb_metadata.title, imdb_metadata.category, imdb_metadata.year, similarity(imdb_metadata.title, search_term) as score
FROM imdb_metadata
WHERE (imdb_metadata.title % search_term)
AND (imdb_metadata.adult = FALSE)
AND (category_param IS NULL OR imdb_metadata.category = category_param)
AND (year_param IS NULL OR imdb_metadata.year BETWEEN year_param - 1 AND year_param + 1)
ORDER BY score DESC
LIMIT limit_param;
END; $$
LANGUAGE plpgsql;

View File

@@ -0,0 +1,19 @@
-- Remove the old search function
DROP FUNCTION IF EXISTS search_imdb_meta(TEXT, TEXT, INT, INT);
-- Add the new search function that allows for searching by year with a plus/minus one year range
CREATE OR REPLACE FUNCTION search_imdb_meta(search_term TEXT, category_param TEXT DEFAULT NULL, year_param INT DEFAULT NULL, limit_param INT DEFAULT 10, similarity_threshold REAL DEFAULT 0.95)
RETURNS TABLE(imdb_id character varying(16), title character varying(1000),category character varying(50),year INT, score REAL) AS $$
BEGIN
EXECUTE format('SET pg_trgm.similarity_threshold = %L', similarity_threshold);
RETURN QUERY
SELECT imdb_metadata.imdb_id, imdb_metadata.title, imdb_metadata.category, imdb_metadata.year, similarity(imdb_metadata.title, search_term) as score
FROM imdb_metadata
WHERE (imdb_metadata.title % search_term)
AND (imdb_metadata.adult = FALSE)
AND (category_param IS NULL OR imdb_metadata.category = category_param)
AND (year_param IS NULL OR imdb_metadata.year BETWEEN year_param - 1 AND year_param + 1)
ORDER BY score DESC
LIMIT limit_param;
END; $$
LANGUAGE plpgsql;

View File

@@ -1,2 +1,3 @@
remove-item -recurse -force ../src/python
mkdir -p ../src/python
pip install --force-reinstall rank-torrent-name==0.1.6 -t ../src/python/
pip install -r ../src/requirements.txt -t ../src/python/

View File

@@ -1,4 +1,5 @@
#!/bin/bash
rm -rf ../src/python
mkdir -p ../src/python
pip install --force-reinstall rank-torrent-name==0.1.6 -t ../src/python/
python3 -m pip install -r ../src/requirements.txt -t ../src/python/

View File

@@ -13,13 +13,19 @@ FROM mcr.microsoft.com/dotnet/aspnet:8.0-alpine3.19
WORKDIR /app
ENV PYTHONUNBUFFERED=1
RUN apk add --update --no-cache python3=~3.11.8-r0 py3-pip && ln -sf python3 /usr/bin/python
COPY --from=build /src/out .
RUN rm -rf /app/python && mkdir -p /app/python
RUN pip3 install --force-reinstall rank-torrent-name==0.1.6 -t /app/python
RUN pip3 install -r /app/requirements.txt -t /app/python
RUN addgroup -S producer && adduser -S -G producer producer
USER producer
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
CMD pgrep -f dotnet || exit 1

View File

@@ -1,5 +1,3 @@
using Microsoft.VisualBasic;
namespace Producer.Features.Crawlers.Dmm;
public partial class DebridMediaManagerCrawler(
@@ -12,7 +10,6 @@ public partial class DebridMediaManagerCrawler(
{
[GeneratedRegex("""<iframe src="https:\/\/debridmediamanager.com\/hashlist#(.*)"></iframe>""")]
private static partial Regex HashCollectionMatcher();
private LengthAwareRatioScorer _lengthAwareRatioScorer = new();
private const string DownloadBaseUrl = "https://raw.githubusercontent.com/debridmediamanager/hashlists/main";
protected override IReadOnlyDictionary<string, string> Mappings => new Dictionary<string, string>();
@@ -111,39 +108,38 @@ public partial class DebridMediaManagerCrawler(
return null;
}
var parsedTorrent = rankTorrentName.Parse(torrentTitle.CleanTorrentTitleForImdb());
var parsedTorrent = rankTorrentName.Parse(torrentTitle);
if (!parsedTorrent.Success)
{
return null;
}
var (cached, cachedResult) = await CheckIfInCacheAndReturn(parsedTorrent.ParsedTitle);
var torrentType = parsedTorrent.Response.IsMovie ? "movie" : "tvSeries";
var cacheKey = GetCacheKey(torrentType, parsedTorrent.Response.ParsedTitle, parsedTorrent.Response.Year);
var (cached, cachedResult) = await CheckIfInCacheAndReturn(cacheKey);
if (cached)
{
logger.LogInformation("[{ImdbId}] Found cached imdb result for {Title}", cachedResult.ImdbId, parsedTorrent.ParsedTitle);
logger.LogInformation("[{ImdbId}] Found cached imdb result for {Title}", cachedResult.ImdbId, parsedTorrent.Response.ParsedTitle);
return MapToTorrent(cachedResult, bytesElement, hashElement, parsedTorrent);
}
var year = parsedTorrent.Year != 0 ? parsedTorrent.Year.ToString() : null;
var imdbEntries = await Storage.FindImdbMetadata(parsedTorrent.ParsedTitle, parsedTorrent.IsMovie ? "movies" : "tv", year);
int? year = parsedTorrent.Response.Year != 0 ? parsedTorrent.Response.Year : null;
var imdbEntry = await Storage.FindImdbMetadata(parsedTorrent.Response.ParsedTitle, torrentType, year);
if (imdbEntries.Count == 0)
if (imdbEntry is null)
{
return null;
}
var scoredTitles = await ScoreTitles(parsedTorrent, imdbEntries);
await AddToCache(cacheKey, imdbEntry);
if (!scoredTitles.Success)
{
return null;
}
logger.LogInformation("[{ImdbId}] Found best match for {Title}: {BestMatch} with score {Score}", imdbEntry.ImdbId, parsedTorrent.Response.ParsedTitle, imdbEntry.Title, imdbEntry.Score);
logger.LogInformation("[{ImdbId}] Found best match for {Title}: {BestMatch} with score {Score}", scoredTitles.BestMatch.Value.ImdbId, parsedTorrent.ParsedTitle, scoredTitles.BestMatch.Value.Title, scoredTitles.BestMatch.Score);
return MapToTorrent(scoredTitles.BestMatch.Value, bytesElement, hashElement, parsedTorrent);
return MapToTorrent(imdbEntry, bytesElement, hashElement, parsedTorrent);
}
private IngestedTorrent MapToTorrent(ImdbEntry result, JsonElement bytesElement, JsonElement hashElement, ParseTorrentTitleResponse parsedTorrent) =>
@@ -156,45 +152,24 @@ public partial class DebridMediaManagerCrawler(
InfoHash = hashElement.ToString(),
Seeders = 0,
Leechers = 0,
Category = parsedTorrent.IsMovie switch
{
true => "movies",
false => "tv",
},
Category = AssignCategory(result),
RtnResponse = parsedTorrent.Response.ToJson(),
};
private async Task<(bool Success, ExtractedResult<ImdbEntry>? BestMatch)> ScoreTitles(ParseTorrentTitleResponse parsedTorrent, List<ImdbEntry> imdbEntries)
{
var lowerCaseTitle = parsedTorrent.ParsedTitle.ToLowerInvariant();
// Scoring directly operates on the List<ImdbEntry>, no need for lookup table.
var scoredResults = Process.ExtractAll(new(){Title = lowerCaseTitle}, imdbEntries, x => x.Title?.ToLowerInvariant(), scorer: _lengthAwareRatioScorer, cutoff: 90);
var best = scoredResults.MaxBy(x => x.Score);
if (best is null)
{
return (false, null);
}
await AddToCache(lowerCaseTitle, best);
return (true, best);
}
private Task AddToCache(string lowerCaseTitle, ExtractedResult<ImdbEntry> best)
private Task AddToCache(string cacheKey, ImdbEntry best)
{
var cacheOptions = new DistributedCacheEntryOptions
{
AbsoluteExpirationRelativeToNow = TimeSpan.FromDays(1),
};
return cache.SetStringAsync(lowerCaseTitle, JsonSerializer.Serialize(best.Value), cacheOptions);
return cache.SetStringAsync(cacheKey, JsonSerializer.Serialize(best), cacheOptions);
}
private async Task<(bool Success, ImdbEntry? Entry)> CheckIfInCacheAndReturn(string title)
private async Task<(bool Success, ImdbEntry? Entry)> CheckIfInCacheAndReturn(string cacheKey)
{
var cachedImdbId = await cache.GetStringAsync(title.ToLowerInvariant());
var cachedImdbId = await cache.GetStringAsync(cacheKey);
if (!string.IsNullOrEmpty(cachedImdbId))
{
@@ -234,4 +209,14 @@ public partial class DebridMediaManagerCrawler(
return (pageIngested, name);
}
private static string AssignCategory(ImdbEntry entry) =>
entry.Category.ToLower() switch
{
var category when string.Equals(category, "movie", StringComparison.OrdinalIgnoreCase) => "movies",
var category when string.Equals(category, "tvSeries", StringComparison.OrdinalIgnoreCase) => "tv",
_ => "unknown",
};
private static string GetCacheKey(string category, string title, int year) => $"{category.ToLowerInvariant()}:{year}:{title.ToLowerInvariant()}";
}

View File

@@ -33,6 +33,9 @@
<None Include="Configuration\*.json">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None>
<None Update="requirements.txt">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None>
</ItemGroup>
<ItemGroup Condition="'$(Configuration)' == 'Debug'">

View File

@@ -0,0 +1 @@
rank-torrent-name==0.1.8

View File

@@ -18,6 +18,7 @@ public static class ServiceCollectionExtensions
services.AddHttpClient();
services.AddSingleton<ITrackersService, TrackersService>();
services.AddHostedService<TrackersBackgroundService>();
services.AddHostedService<HousekeepingBackgroundService>();
return services;
}

View File

@@ -0,0 +1,52 @@
namespace QBitCollector.Features.Qbit;
public class HousekeepingBackgroundService(IQBittorrentClient client, ILogger<HousekeepingBackgroundService> logger) : BackgroundService
{
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
{
logger.LogInformation("Service is Running.");
await DoWork();
using PeriodicTimer timer = new(TimeSpan.FromMinutes(2));
try
{
while (await timer.WaitForNextTickAsync(stoppingToken))
{
await DoWork();
}
}
catch (OperationCanceledException)
{
logger.LogInformation("Service stopping.");
}
}
private async Task DoWork()
{
try
{
logger.LogInformation("Cleaning Stale Entries in Qbit...");
var torrents = await client.GetTorrentListAsync();
foreach (var torrentInfo in torrents)
{
if (!(torrentInfo.AddedOn < DateTimeOffset.UtcNow.AddMinutes(-1)))
{
continue;
}
logger.LogInformation("Torrent [{InfoHash}] Identified as stale because was added at {AddedOn}", torrentInfo.Hash, torrentInfo.AddedOn);
await client.DeleteAsync(new[] {torrentInfo.Hash}, deleteDownloadedData: true);
logger.LogInformation("Cleaned up stale torrent: [{InfoHash}]", torrentInfo.Hash);
}
}
catch (Exception e)
{
logger.LogError(e, "Error cleaning up stale torrents this interval.");
}
}
}

View File

@@ -1,22 +1,24 @@
namespace QBitCollector.Features.Worker;
[EntityName("perform-metadata-request")]
[EntityName("perform-metadata-request-qbit-collector")]
public record PerformQbitMetadataRequest(Guid CorrelationId, string InfoHash) : CorrelatedBy<Guid>;
[EntityName("torrent-metadata-response")]
[EntityName("torrent-metadata-response-qbit-collector")]
public record GotQbitMetadata(QBitMetadataResponse Metadata) : CorrelatedBy<Guid>
{
public Guid CorrelationId { get; init; } = Metadata.CorrelationId;
}
[EntityName("write-metadata")]
[EntityName("write-metadata-qbit-collector")]
public record WriteQbitMetadata(Torrent Torrent, QBitMetadataResponse Metadata, string ImdbId) : CorrelatedBy<Guid>
{
public Guid CorrelationId { get; init; } = Metadata.CorrelationId;
}
[EntityName("metadata-written")]
[EntityName("metadata-written-qbit-collector")]
public record QbitMetadataWritten(QBitMetadataResponse Metadata) : CorrelatedBy<Guid>
{
public Guid CorrelationId { get; init; } = Metadata.CorrelationId;
public QBitMetadataResponse Metadata { get; init; } = Metadata;
}

View File

@@ -9,9 +9,9 @@ public class DapperDataStorage(PostgresConfiguration configuration, RabbitMqConf
const string query =
"""
INSERT INTO ingested_torrents
("name", "source", "category", "info_hash", "size", "seeders", "leechers", "imdb", "processed", "createdAt", "updatedAt")
("name", "source", "category", "info_hash", "size", "seeders", "leechers", "imdb", "processed", "createdAt", "updatedAt", "rtn_response")
VALUES
(@Name, @Source, @Category, @InfoHash, @Size, @Seeders, @Leechers, @Imdb, @Processed, @CreatedAt, @UpdatedAt)
(@Name, @Source, @Category, @InfoHash, @Size, @Seeders, @Leechers, @Imdb, @Processed, @CreatedAt, @UpdatedAt, @RtnResponse::jsonb)
ON CONFLICT (source, info_hash) DO NOTHING
""";
@@ -110,21 +110,21 @@ public class DapperDataStorage(PostgresConfiguration configuration, RabbitMqConf
public async Task<List<ImdbEntry>> GetImdbEntriesForRequests(int year, int batchSize, string? stateLastProcessedImdbId, CancellationToken cancellationToken = default) =>
await ExecuteCommandAsync(async connection =>
{
const string query = @"SELECT imdb_id AS ImdbId, title as Title, category as Category, year as Year, adult as Adult FROM imdb_metadata WHERE CAST(NULLIF(Year, '\N') AS INTEGER) <= @Year AND imdb_id > @LastProcessedImdbId ORDER BY ImdbId LIMIT @BatchSize";
const string query = @"SELECT imdb_id AS ImdbId, title as Title, category as Category, year as Year, adult as Adult FROM imdb_metadata WHERE Year <= @Year AND imdb_id > @LastProcessedImdbId ORDER BY ImdbId LIMIT @BatchSize";
var result = await connection.QueryAsync<ImdbEntry>(query, new { Year = year, LastProcessedImdbId = stateLastProcessedImdbId, BatchSize = batchSize });
return result.ToList();
}, "Error getting imdb metadata.", cancellationToken);
public async Task<List<ImdbEntry>> FindImdbMetadata(string? parsedTorrentTitle, string torrentType, string? year, CancellationToken cancellationToken = default) =>
public async Task<ImdbEntry?> FindImdbMetadata(string? parsedTorrentTitle, string torrentType, int? year, CancellationToken cancellationToken = default) =>
await ExecuteCommandAsync(async connection =>
{
var query = $"select \"imdb_id\" as \"ImdbId\", \"title\" as \"Title\", \"year\" as \"Year\" from search_imdb_meta('{parsedTorrentTitle.Replace("'", "").Replace("\"", "")}', '{(torrentType.Equals("movie", StringComparison.OrdinalIgnoreCase) ? "movie" : "tvSeries")}'";
query += year is not null ? $", '{year}'" : ", NULL";
query += ", 15)";
var query = $"select \"imdb_id\" as \"ImdbId\", \"title\" as \"Title\", \"year\" as \"Year\", \"score\" as Score, \"category\" as Category from search_imdb_meta('{parsedTorrentTitle.Replace("'", "").Replace("\"", "")}', '{torrentType}'";
query += year is not null ? $", {year}" : ", NULL";
query += ", 1)";
var result = await connection.QueryAsync<ImdbEntry>(query);
return result.ToList();
var results = result.ToList();
return results.FirstOrDefault();
}, "Error finding imdb metadata.", cancellationToken);
public Task InsertTorrent(Torrent torrent, CancellationToken cancellationToken = default) =>
@@ -134,9 +134,9 @@ public class DapperDataStorage(PostgresConfiguration configuration, RabbitMqConf
const string query =
"""
INSERT INTO "torrents"
("infoHash", "provider", "torrentId", "title", "size", "type", "uploadDate", "seeders", "trackers", "languages", "resolution", "reviewed", "opened", "createdAt", "updatedAt")
("infoHash", "ingestedTorrentId", "provider", "title", "size", "type", "uploadDate", "seeders", "languages", "resolution", "reviewed", "opened", "createdAt", "updatedAt")
VALUES
(@InfoHash, @Provider, @TorrentId, @Title, 0, @Type, NOW(), @Seeders, NULL, NULL, NULL, false, false, NOW(), NOW())
(@InfoHash, @IngestedTorrentId, @Provider, @Title, 0, @Type, NOW(), @Seeders, NULL, NULL, false, false, NOW(), NOW())
ON CONFLICT ("infoHash") DO NOTHING
""";

View File

@@ -9,7 +9,7 @@ public interface IDataStorage
Task<DapperResult<PageIngestedResult, PageIngestedResult>> MarkPageAsIngested(string pageId, CancellationToken cancellationToken = default);
Task<DapperResult<int, int>> GetRowCountImdbMetadata(CancellationToken cancellationToken = default);
Task<List<ImdbEntry>> GetImdbEntriesForRequests(int year, int batchSize, string? stateLastProcessedImdbId, CancellationToken cancellationToken = default);
Task<List<ImdbEntry>> FindImdbMetadata(string? parsedTorrentTitle, string parsedTorrentTorrentType, string? parsedTorrentYear, CancellationToken cancellationToken = default);
Task<ImdbEntry?> FindImdbMetadata(string? parsedTorrentTitle, string parsedTorrentTorrentType, int? parsedTorrentYear, CancellationToken cancellationToken = default);
Task InsertTorrent(Torrent torrent, CancellationToken cancellationToken = default);
Task InsertFiles(IEnumerable<TorrentFile> files, CancellationToken cancellationToken = default);
Task InsertSubtitles(IEnumerable<SubtitleFile> subtitles, CancellationToken cancellationToken = default);

View File

@@ -0,0 +1,14 @@
namespace SharedContracts.Extensions;
public static class JsonExtensions
{
private static readonly JsonSerializerOptions JsonSerializerOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = false,
ReferenceHandler = ReferenceHandler.IgnoreCycles,
NumberHandling = JsonNumberHandling.Strict,
};
public static string AsJson<T>(this T obj) => JsonSerializer.Serialize(obj, JsonSerializerOptions);
}

View File

@@ -1,5 +1,3 @@
using System.Text.RegularExpressions;
namespace SharedContracts.Extensions;
public static partial class StringExtensions

View File

@@ -1,6 +1,8 @@
// Global using directives
global using System.Text.Json;
global using System.Text.Json.Serialization;
global using System.Text.RegularExpressions;
global using Dapper;
global using MassTransit;
global using Microsoft.AspNetCore.Builder;

View File

@@ -7,4 +7,5 @@ public class ImdbEntry
public string? Category { get; set; }
public string? Year { get; set; }
public bool? Adult { get; set; }
public decimal? Score { get; set; }
}

View File

@@ -12,7 +12,9 @@ public class IngestedTorrent
public int Leechers { get; set; }
public string? Imdb { get; set; }
public bool Processed { get; set; } = false;
public bool Processed { get; set; }
public DateTime CreatedAt { get; set; } = DateTime.UtcNow;
public DateTime UpdatedAt { get; set; } = DateTime.UtcNow;
public string? RtnResponse { get; set; }
}

View File

@@ -3,6 +3,7 @@ namespace SharedContracts.Models;
public class Torrent
{
public string? InfoHash { get; set; }
public long? IngestedTorrentId { get; set; }
public string? Provider { get; set; }
public string? TorrentId { get; set; }
public string? Title { get; set; }

View File

@@ -0,0 +1,13 @@
namespace SharedContracts.Python;
public interface IPythonEngineService
{
ILogger<PythonEngineService> Logger { get; }
Task InitializePythonEngine(CancellationToken cancellationToken);
T ExecuteCommandOrScript<T>(string command, PyModule module, bool throwOnErrors);
T ExecutePythonOperation<T>(Func<T> operation, string operationName, bool throwOnErrors);
T ExecutePythonOperationWithDefault<T>(Func<T> operation, T? defaultValue, string operationName, bool throwOnErrors, bool logErrors);
Task StopPythonEngine(CancellationToken cancellationToken);
dynamic? Sys { get; }
}

View File

@@ -0,0 +1,8 @@
namespace SharedContracts.Python;
public class PythonEngineManager(IPythonEngineService pythonEngineService) : IHostedService
{
public Task StartAsync(CancellationToken cancellationToken) => pythonEngineService.InitializePythonEngine(cancellationToken);
public Task StopAsync(CancellationToken cancellationToken) => pythonEngineService.StopPythonEngine(cancellationToken);
}

View File

@@ -1,11 +1,15 @@
namespace SharedContracts.Python;
public class PythonEngineService(ILogger<PythonEngineService> logger) : IHostedService
public class PythonEngineService(ILogger<PythonEngineService> logger) : IPythonEngineService
{
private IntPtr _mainThreadState;
private bool _isInitialized;
public Task StartAsync(CancellationToken cancellationToken)
public ILogger<PythonEngineService> Logger { get; } = logger;
public dynamic? Sys { get; private set; }
public Task InitializePythonEngine(CancellationToken cancellationToken)
{
if (_isInitialized)
{
@@ -18,7 +22,7 @@ public class PythonEngineService(ILogger<PythonEngineService> logger) : IHostedS
if (string.IsNullOrWhiteSpace(pythonDllEnv))
{
logger.LogWarning("PYTHONNET_PYDLL env is not set. Exiting Application");
Logger.LogWarning("PYTHONNET_PYDLL env is not set. Exiting Application");
Environment.Exit(1);
return Task.CompletedTask;
}
@@ -28,22 +32,93 @@ public class PythonEngineService(ILogger<PythonEngineService> logger) : IHostedS
_mainThreadState = PythonEngine.BeginAllowThreads();
_isInitialized = true;
logger.LogInformation("Python engine initialized");
Logger.LogInformation("Python engine initialized");
}
catch (Exception e)
{
logger.LogWarning(e, "Failed to initialize Python engine");
Logger.LogError(e, $"Failed to initialize Python engine: {e.Message}");
Environment.Exit(1);
}
return Task.CompletedTask;
}
public Task StopAsync(CancellationToken cancellationToken)
public T ExecuteCommandOrScript<T>(string command, PyModule module, bool throwOnErrors) =>
ExecutePythonOperation(
() =>
{
var pyCompile = PythonEngine.Compile(command);
var nativeResult = module.Execute(pyCompile);
return nativeResult.As<T>();
}, nameof(ExecuteCommandOrScript), throwOnErrors);
public T ExecutePythonOperation<T>(Func<T> operation, string operationName, bool throwOnErrors) =>
ExecutePythonOperationWithDefault(operation, default, operationName, throwOnErrors, true);
public T ExecutePythonOperationWithDefault<T>(Func<T> operation, T? defaultValue, string operationName, bool throwOnErrors, bool logErrors) =>
ExecutePythonOperationInternal(operation, defaultValue, operationName, throwOnErrors, logErrors);
public void ExecuteOnGIL(Action act, bool throwOnErrors)
{
Sys ??= LoadSys();
try
{
using var gil = Py.GIL();
act();
}
catch (Exception ex)
{
Logger.LogError(ex, "Python Error: {Message} ({OperationName})", ex.Message, nameof(ExecuteOnGIL));
if (throwOnErrors)
{
throw;
}
}
}
public Task StopPythonEngine(CancellationToken cancellationToken)
{
PythonEngine.EndAllowThreads(_mainThreadState);
PythonEngine.Shutdown();
return Task.CompletedTask;
}
private static dynamic LoadSys()
{
using var gil = Py.GIL();
var sys = Py.Import("sys");
return sys;
}
// ReSharper disable once EntityNameCapturedOnly.Local
private T ExecutePythonOperationInternal<T>(Func<T> operation, T? defaultValue, string operationName, bool throwOnErrors, bool logErrors)
{
Sys ??= LoadSys();
var result = defaultValue;
try
{
using var gil = Py.GIL();
result = operation();
}
catch (Exception ex)
{
if (logErrors)
{
Logger.LogError(ex, "Python Error: {Message} ({OperationName})", ex.Message, nameof(operationName));
}
if (throwOnErrors)
{
throw;
}
}
return result;
}
}

View File

@@ -3,6 +3,4 @@ namespace SharedContracts.Python.RTN;
public interface IRankTorrentName
{
ParseTorrentTitleResponse Parse(string title);
bool IsTrash(string title);
bool TitleMatch(string title, string checkTitle);
}

View File

@@ -1,6 +1,3 @@
namespace SharedContracts.Python.RTN;
public record ParseTorrentTitleResponse(bool Success, string ParsedTitle, int Year, int[]? Season = null, int[]? Episode = null)
{
public bool IsMovie => Season == null && Episode == null;
}
public record ParseTorrentTitleResponse(bool Success, RtnResponse? Response);

View File

@@ -2,117 +2,49 @@ namespace SharedContracts.Python.RTN;
public class RankTorrentName : IRankTorrentName
{
private const string SysModuleName = "sys";
private readonly IPythonEngineService _pythonEngineService;
private const string RtnModuleName = "RTN";
private readonly ILogger<RankTorrentName> _logger;
private dynamic? _sys;
private dynamic? _rtn;
public RankTorrentName(ILogger<RankTorrentName> logger)
public RankTorrentName(IPythonEngineService pythonEngineService)
{
_logger = logger;
_pythonEngineService = pythonEngineService;
InitModules();
}
public ParseTorrentTitleResponse Parse(string title)
{
try
{
using var py = Py.GIL();
var result = _rtn?.parse(title);
if (result == null)
public ParseTorrentTitleResponse Parse(string title) =>
_pythonEngineService.ExecutePythonOperationWithDefault(
() =>
{
return new(false, string.Empty, 0);
}
return ParseResult(result);
}
catch (Exception e)
{
_logger.LogError(e, "Failed to parse title");
return new(false, string.Empty, 0);
}
}
public bool IsTrash(string title)
{
try
{
using var py = Py.GIL();
var result = _rtn?.check_trash(title);
if (result == null)
{
return false;
}
var response = result.As<bool>() ?? false;
return response;
}
catch (Exception e)
{
_logger.LogError(e, "Failed to parse title");
return false;
}
}
public bool TitleMatch(string title, string checkTitle)
{
try
{
using var py = Py.GIL();
var result = _rtn?.title_match(title, checkTitle);
if (result == null)
{
return false;
}
var response = result.As<bool>() ?? false;
return response;
}
catch (Exception e)
{
_logger.LogError(e, "Failed to parse title");
return false;
}
}
var result = _rtn?.parse(title);
return ParseResult(result);
}, new ParseTorrentTitleResponse(false, null), nameof(Parse), throwOnErrors: false, logErrors: false);
private static ParseTorrentTitleResponse ParseResult(dynamic result)
{
var parsedTitle = result.GetAttr("parsed_title")?.As<string>() ?? string.Empty;
var year = result.GetAttr("year")?.As<int>() ?? 0;
var seasonList = result.GetAttr("season")?.As<PyList>();
var episodeList = result.GetAttr("episode")?.As<PyList>();
int[]? seasons = seasonList?.Length() > 0 ? seasonList.As<int[]>() : null;
int[]? episodes = episodeList?.Length() > 0 ? episodeList.As<int[]>() : null;
return new ParseTorrentTitleResponse(true, parsedTitle, year, seasons, episodes);
}
private void InitModules()
{
using var py = Py.GIL();
_sys = Py.Import(SysModuleName);
if (_sys == null)
if (result == null)
{
_logger.LogError($"Failed to import Python module: {SysModuleName}");
return;
return new(false, null);
}
_sys.path.append(Path.Combine(AppContext.BaseDirectory, "python"));
var json = result.model_dump_json()?.As<string?>();
_rtn = Py.Import(RtnModuleName);
if (_rtn == null)
if (json is null || string.IsNullOrEmpty(json))
{
_logger.LogError($"Failed to import Python module: {RtnModuleName}");
return new(false, null);
}
var response = JsonSerializer.Deserialize<RtnResponse>(json);
return new(true, response);
}
private void InitModules() =>
_rtn =
_pythonEngineService.ExecutePythonOperation(() =>
{
_pythonEngineService.Sys.path.append(Path.Combine(AppContext.BaseDirectory, "python"));
return Py.Import(RtnModuleName);
}, nameof(InitModules), throwOnErrors: false);
}

View File

@@ -0,0 +1,98 @@
namespace SharedContracts.Python.RTN;
public class RtnResponse
{
[JsonPropertyName("raw_title")]
public string? RawTitle { get; set; }
[JsonPropertyName("parsed_title")]
public string? ParsedTitle { get; set; }
[JsonPropertyName("fetch")]
public bool Fetch { get; set; }
[JsonPropertyName("is_4k")]
public bool Is4K { get; set; }
[JsonPropertyName("is_multi_audio")]
public bool IsMultiAudio { get; set; }
[JsonPropertyName("is_multi_subtitle")]
public bool IsMultiSubtitle { get; set; }
[JsonPropertyName("is_complete")]
public bool IsComplete { get; set; }
[JsonPropertyName("year")]
public int Year { get; set; }
[JsonPropertyName("resolution")]
public List<string>? Resolution { get; set; }
[JsonPropertyName("quality")]
public List<string>? Quality { get; set; }
[JsonPropertyName("season")]
public List<int>? Season { get; set; }
[JsonPropertyName("episode")]
public List<int>? Episode { get; set; }
[JsonPropertyName("codec")]
public List<string>? Codec { get; set; }
[JsonPropertyName("audio")]
public List<string>? Audio { get; set; }
[JsonPropertyName("subtitles")]
public List<string>? Subtitles { get; set; }
[JsonPropertyName("language")]
public List<string>? Language { get; set; }
[JsonPropertyName("bit_depth")]
public List<int>? BitDepth { get; set; }
[JsonPropertyName("hdr")]
public string? Hdr { get; set; }
[JsonPropertyName("proper")]
public bool Proper { get; set; }
[JsonPropertyName("repack")]
public bool Repack { get; set; }
[JsonPropertyName("remux")]
public bool Remux { get; set; }
[JsonPropertyName("upscaled")]
public bool Upscaled { get; set; }
[JsonPropertyName("remastered")]
public bool Remastered { get; set; }
[JsonPropertyName("directors_cut")]
public bool DirectorsCut { get; set; }
[JsonPropertyName("extended")]
public bool Extended { get; set; }
// [JsonPropertyName("is_show")]
// public bool IsTvShow { get; set; }
//
// [JsonPropertyName("is_movie")]
// public bool IsMovie { get; set; }
public string ToJson() => this.AsJson();
public bool IsMovie => !TvRegexes.Any(regex => regex.IsMatch(RawTitle)) && Season?.Count == 0 && Episode?.Count == 0;
private static List<Regex> TvRegexes { get; set; } =
[
new(@"[se]\d\d", RegexOptions.IgnoreCase),
new(@"\b(tv|complete)\b", RegexOptions.IgnoreCase),
new(@"\b(saisons?|stages?|seasons?).?\d", RegexOptions.IgnoreCase),
new(@"[a-z]\s?\-\s?\d{2,4}\b", RegexOptions.IgnoreCase),
new(@"\d{2,4}\s?\-\s?\d{2,4}\b", RegexOptions.IgnoreCase),
];
}

View File

@@ -4,9 +4,8 @@ public static class ServiceCollectionExtensions
{
public static IServiceCollection RegisterPythonEngine(this IServiceCollection services)
{
services.AddSingleton<PythonEngineService>();
services.AddHostedService(p => p.GetRequiredService<PythonEngineService>());
services.AddSingleton<IPythonEngineService, PythonEngineService>();
services.AddHostedService<PythonEngineManager>();
return services;
}

View File

@@ -11,6 +11,7 @@ public class PerformIngestionConsumer(IDataStorage dataStorage, ILogger<PerformI
var torrent = new Torrent
{
InfoHash = request.IngestedTorrent.InfoHash.ToLowerInvariant(),
IngestedTorrentId = request.IngestedTorrent.Id,
Provider = request.IngestedTorrent.Source,
Title = request.IngestedTorrent.Name,
Type = request.IngestedTorrent.Category,