2020-03-29 07:05:09 +00:00
|
|
|
''' handle reading a csv from goodreads '''
|
2020-03-25 12:29:21 +00:00
|
|
|
import csv
|
2020-04-20 16:10:19 +00:00
|
|
|
from requests import HTTPError
|
2020-03-25 12:29:21 +00:00
|
|
|
|
2020-04-20 16:10:19 +00:00
|
|
|
from fedireads import outgoing
|
|
|
|
from fedireads.tasks import app
|
2020-04-21 14:09:21 +00:00
|
|
|
from fedireads.models import ImportJob, ImportItem
|
2020-04-22 11:43:10 +00:00
|
|
|
from fedireads.status import create_notification
|
2020-03-29 07:05:09 +00:00
|
|
|
|
2020-04-28 13:59:48 +00:00
|
|
|
# TODO: remove or increase once we're confident it's not causing problems.
|
|
|
|
MAX_ENTRIES = 500
|
2020-03-25 12:58:27 +00:00
|
|
|
|
2020-03-29 07:05:09 +00:00
|
|
|
|
2020-04-21 14:09:21 +00:00
|
|
|
def create_job(user, csv_file):
|
2020-05-09 21:26:27 +00:00
|
|
|
''' check over a csv and creates a database entry for the job'''
|
2020-04-21 14:09:21 +00:00
|
|
|
job = ImportJob.objects.create(user=user)
|
|
|
|
for index, entry in enumerate(list(csv.DictReader(csv_file))[:MAX_ENTRIES]):
|
2020-04-29 14:33:06 +00:00
|
|
|
if not all(x in entry for x in ('ISBN13', 'Title', 'Author')):
|
|
|
|
raise ValueError("Author, title, and isbn must be in data.")
|
2020-04-21 14:09:21 +00:00
|
|
|
ImportItem(job=job, index=index, data=entry).save()
|
|
|
|
return job
|
2020-03-25 12:29:21 +00:00
|
|
|
|
2020-05-09 21:26:27 +00:00
|
|
|
|
2020-04-21 14:09:21 +00:00
|
|
|
def start_import(job):
|
2020-05-09 21:26:27 +00:00
|
|
|
''' initalizes a csv import job '''
|
2020-04-21 14:09:21 +00:00
|
|
|
result = import_data.delay(job.id)
|
|
|
|
job.task_id = result.id
|
|
|
|
job.save()
|
2020-04-20 16:10:19 +00:00
|
|
|
|
2020-05-09 21:26:27 +00:00
|
|
|
|
2020-04-20 16:10:19 +00:00
|
|
|
@app.task
|
2020-04-21 14:09:21 +00:00
|
|
|
def import_data(job_id):
|
2020-05-09 21:26:27 +00:00
|
|
|
''' does the actual lookup work in a celery task '''
|
2020-04-21 14:09:21 +00:00
|
|
|
job = ImportJob.objects.get(id=job_id)
|
2020-04-22 11:43:10 +00:00
|
|
|
try:
|
|
|
|
results = []
|
|
|
|
for item in job.items.all():
|
|
|
|
try:
|
|
|
|
item.resolve()
|
|
|
|
except HTTPError:
|
|
|
|
pass
|
|
|
|
if item.book:
|
|
|
|
item.save()
|
|
|
|
results.append(item)
|
|
|
|
else:
|
|
|
|
item.fail_reason = "Could not match book on OpenLibrary"
|
|
|
|
item.save()
|
|
|
|
|
2020-04-22 13:16:46 +00:00
|
|
|
status = outgoing.handle_import_books(job.user, results)
|
|
|
|
if status:
|
|
|
|
job.import_status = status
|
|
|
|
job.save()
|
2020-04-22 11:43:10 +00:00
|
|
|
finally:
|
|
|
|
create_notification(job.user, 'IMPORT', related_import=job)
|