mirror of
https://github.com/knightcrawler-stremio/knightcrawler.git
synced 2024-12-20 03:29:51 +00:00
Changed from page scraping to rss xml scraping Includes RealDebridManager hashlist decoding (requires a github readonly PAT as requests must be authenticated) - This allows ingestion of 200k+ entries in a few hours. Simplifies a lot of torrentio to deal with new data
45 lines
1.3 KiB
JavaScript
45 lines
1.3 KiB
JavaScript
import { Type } from './types.js';
|
|
import { createTorrentEntry, checkAndUpdateTorrent } from './torrentEntries.js';
|
|
import {getTrackers} from "./trackerService.js";
|
|
|
|
export async function processTorrentRecord(torrent) {
|
|
const category = torrent.category;
|
|
const type = category === 'tv' ? Type.SERIES : Type.MOVIE;
|
|
const torrentInfo = await parseTorrent(torrent, type);
|
|
console.log(`Processing torrent ${torrentInfo.title} with infoHash ${torrentInfo.infoHash}`)
|
|
|
|
if (await checkAndUpdateTorrent(torrentInfo)) {
|
|
return torrentInfo;
|
|
}
|
|
|
|
return createTorrentEntry(torrentInfo);
|
|
}
|
|
|
|
async function assignTorrentTrackers() {
|
|
const trackers = await getTrackers();
|
|
return trackers.join(',');
|
|
}
|
|
|
|
async function parseTorrent(torrent, category) {
|
|
const infoHash = torrent.infoHash?.trim().toLowerCase()
|
|
return {
|
|
title: torrent.name,
|
|
torrentId: `${torrent.name}_${infoHash}`,
|
|
infoHash: infoHash,
|
|
seeders: 100,
|
|
size: torrent.size,
|
|
uploadDate: torrent.createdAt,
|
|
imdbId: parseImdbId(torrent),
|
|
type: category,
|
|
provider: torrent.source,
|
|
trackers: await assignTorrentTrackers(),
|
|
}
|
|
}
|
|
|
|
function parseImdbId(torrent) {
|
|
if (torrent.imdb === undefined || torrent.imdb === null) {
|
|
return undefined;
|
|
}
|
|
|
|
return torrent.imdb;
|
|
} |