mirror of
https://github.com/knightcrawler-stremio/knightcrawler.git
synced 2024-12-20 03:29:51 +00:00
Was a little inspired. Now we have a database (self populating) of imdb id's - why shouldn't we actually have the ability to scrape any other instance of torrentio, or knightcrawler? Also restructured the producer to be vertically sliced to make it easier to work with Too much flicking back and forth between Jobs and Crawlers when configuring
40 lines
1.3 KiB
C#
40 lines
1.3 KiB
C#
using Literals = Producer.Features.JobSupport.Literals;
|
|
|
|
namespace Producer.Features.Amqp;
|
|
|
|
[DisallowConcurrentExecution]
|
|
[ManualJobRegistration]
|
|
public class PublisherJob(IMessagePublisher publisher, IDataStorage storage, ILogger<PublisherJob> logger) : IJob
|
|
{
|
|
private const string JobName = nameof(PublisherJob);
|
|
public static readonly JobKey Key = new(JobName, nameof(Literals.PublishingJobs));
|
|
public static readonly TriggerKey Trigger = new($"{JobName}-trigger", nameof(Literals.PublishingJobs));
|
|
|
|
public async Task Execute(IJobExecutionContext context)
|
|
{
|
|
var cancellationToken = context.CancellationToken;
|
|
var torrents = await storage.GetPublishableTorrents(cancellationToken);
|
|
|
|
if (torrents.Count == 0)
|
|
{
|
|
return;
|
|
}
|
|
|
|
var published = await publisher.PublishAsync(torrents, cancellationToken);
|
|
|
|
if (!published)
|
|
{
|
|
return;
|
|
}
|
|
|
|
var result = await storage.SetTorrentsProcessed(torrents, cancellationToken);
|
|
|
|
if (!result.Success)
|
|
{
|
|
logger.LogWarning("Failed to set torrents as processed: [{Error}]", result.ErrorMessage);
|
|
return;
|
|
}
|
|
|
|
logger.LogInformation("Successfully set {Count} torrents as processed", result.UpdatedCount);
|
|
}
|
|
} |