Files
knightcrawler/src/node/consumer/lib/ingestedTorrent.js
iPromKnight ab17ef81be Big rewrite - distributed consumers for ingestion / scraping(scalable) - single producer written in c#.
Changed from page scraping to rss xml scraping
Includes RealDebridManager hashlist decoding (requires a github readonly PAT as requests must be authenticated) - This allows ingestion of 200k+ entries in a few hours.
Simplifies a lot of torrentio to deal with new data
2024-02-01 16:38:45 +00:00

45 lines
1.3 KiB
JavaScript

import { Type } from './types.js';
import { createTorrentEntry, checkAndUpdateTorrent } from './torrentEntries.js';
import {getTrackers} from "./trackerService.js";
export async function processTorrentRecord(torrent) {
const category = torrent.category;
const type = category === 'tv' ? Type.SERIES : Type.MOVIE;
const torrentInfo = await parseTorrent(torrent, type);
console.log(`Processing torrent ${torrentInfo.title} with infoHash ${torrentInfo.infoHash}`)
if (await checkAndUpdateTorrent(torrentInfo)) {
return torrentInfo;
}
return createTorrentEntry(torrentInfo);
}
async function assignTorrentTrackers() {
const trackers = await getTrackers();
return trackers.join(',');
}
async function parseTorrent(torrent, category) {
const infoHash = torrent.infoHash?.trim().toLowerCase()
return {
title: torrent.name,
torrentId: `${torrent.name}_${infoHash}`,
infoHash: infoHash,
seeders: 100,
size: torrent.size,
uploadDate: torrent.createdAt,
imdbId: parseImdbId(torrent),
type: category,
provider: torrent.source,
trackers: await assignTorrentTrackers(),
}
}
function parseImdbId(torrent) {
if (torrent.imdb === undefined || torrent.imdb === null) {
return undefined;
}
return torrent.imdb;
}