diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml
index 1b14149f2..a3117f7cb 100644
--- a/.github/workflows/pylint.yml
+++ b/.github/workflows/pylint.yml
@@ -21,8 +21,7 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- pip install pylint
- name: Analysing the code with pylint
run: |
- pylint bookwyrm/ --ignore=migrations,tests --disable=E1101,E1135,E1136,R0903,R0901,R0902,W0707,W0511,W0406,R0401,R0801
+ pylint bookwyrm/
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 000000000..7f92d0168
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,6 @@
+[MAIN]
+ignore=migrations
+load-plugins=pylint.extensions.no_self_use
+
+[MESSAGES CONTROL]
+disable=E1101,E1135,E1136,R0903,R0901,R0902,W0707,W0511,W0406,R0401,R0801,C3001
diff --git a/Dockerfile b/Dockerfile
index 349dd82b1..b3cd26e88 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -6,6 +6,7 @@ RUN mkdir /app /app/static /app/images
WORKDIR /app
+RUN apt-get update && apt-get install -y gettext libgettextpo-dev tidy && apt-get clean
+
COPY requirements.txt /app/
RUN pip install -r requirements.txt --no-cache-dir
-RUN apt-get update && apt-get install -y gettext libgettextpo-dev tidy && apt-get clean
diff --git a/README.md b/README.md
index bd7344df9..cf40d284d 100644
--- a/README.md
+++ b/README.md
@@ -9,21 +9,18 @@ Social reading and reviewing, decentralized with ActivityPub
- [What it is and isn't](#what-it-is-and-isnt)
- [The role of federation](#the-role-of-federation)
- [Features](#features)
-- [Book data](#book-data)
-- [Set up Bookwyrm](#set-up-bookwyrm)
+- [Set up BookWyrm](#set-up-bookwyrm)
## Joining BookWyrm
-BookWyrm is still a young piece of software, and isn't at the level of stability and feature-richness that you'd find in a production-ready application. But it does what it says on the box! If you'd like to join an instance, you can check out the [instances](https://joinbookwyrm.com/instances/) list.
-
-You can request an invite by entering your email address at https://bookwyrm.social.
+If you'd like to join an instance, you can check out the [instances](https://joinbookwyrm.com/instances/) list.
## Contributing
-See [contributing](https://docs.joinbookwyrm.com/how-to-contribute.html) for code, translation or monetary contributions.
+See [contributing](https://docs.joinbookwyrm.com/contributing.html) for code, translation or monetary contributions.
## About BookWyrm
### What it is and isn't
-BookWyrm is a platform for social reading! You can use it to track what you're reading, review books, and follow your friends. It isn't primarily meant for cataloguing or as a data-source for books, but it does do both of those things to some degree.
+BookWyrm is a platform for social reading. You can use it to track what you're reading, review books, and follow your friends. It isn't primarily meant for cataloguing or as a data-source for books, but it does do both of those things to some degree.
### The role of federation
BookWyrm is built on [ActivityPub](http://activitypub.rocks/). With ActivityPub, it inter-operates with different instances of BookWyrm, and other ActivityPub compliant services, like Mastodon. This means you can run an instance for your book club, and still follow your friend who posts on a server devoted to 20th century Russian speculative fiction. It also means that your friend on mastodon can read and comment on a book review that you post on your BookWyrm instance.
@@ -78,8 +75,5 @@ Deployment
- [Nginx](https://nginx.org/en/) HTTP server
-## Book data
-The application is set up to share book and author data between instances, and get book data from arbitrary outside sources. Right now, the only connector is to OpenLibrary, but other connectors could be written.
-
-## Set up Bookwyrm
-The [documentation website](https://docs.joinbookwyrm.com/) has instruction on how to set up Bookwyrm in a [developer environment](https://docs.joinbookwyrm.com/developer-environment.html) or [production](https://docs.joinbookwyrm.com/installing-in-production.html).
+## Set up BookWyrm
+The [documentation website](https://docs.joinbookwyrm.com/) has instruction on how to set up BookWyrm in a [developer environment](https://docs.joinbookwyrm.com/install-dev.html) or [production](https://docs.joinbookwyrm.com/install-prod.html).
diff --git a/bookwyrm/activitypub/base_activity.py b/bookwyrm/activitypub/base_activity.py
index 6bee25f62..fa1535694 100644
--- a/bookwyrm/activitypub/base_activity.py
+++ b/bookwyrm/activitypub/base_activity.py
@@ -1,6 +1,7 @@
""" basics for an activitypub serializer """
from dataclasses import dataclass, fields, MISSING
from json import JSONEncoder
+import logging
from django.apps import apps
from django.db import IntegrityError, transaction
@@ -8,6 +9,8 @@ from django.db import IntegrityError, transaction
from bookwyrm.connectors import ConnectorException, get_data
from bookwyrm.tasks import app
+logger = logging.getLogger(__name__)
+
class ActivitySerializerError(ValueError):
"""routine problems serializing activitypub json"""
@@ -39,12 +42,12 @@ def naive_parse(activity_objects, activity_json, serializer=None):
activity_json["type"] = "PublicKey"
activity_type = activity_json.get("type")
+ if activity_type in ["Question", "Article"]:
+ return None
try:
serializer = activity_objects[activity_type]
except KeyError as err:
# we know this exists and that we can't handle it
- if activity_type in ["Question"]:
- return None
raise ActivitySerializerError(err)
return serializer(activity_objects=activity_objects, **activity_json)
@@ -65,7 +68,7 @@ class ActivityObject:
try:
value = kwargs[field.name]
if value in (None, MISSING, {}):
- raise KeyError()
+ raise KeyError("Missing required field", field.name)
try:
is_subclass = issubclass(field.type, ActivityObject)
except TypeError:
@@ -268,9 +271,9 @@ def resolve_remote_id(
try:
data = get_data(remote_id)
except ConnectorException:
- raise ActivitySerializerError(
- f"Could not connect to host for remote_id: {remote_id}"
- )
+ logger.exception("Could not connect to host for remote_id: %s", remote_id)
+ return None
+
# determine the model implicitly, if not provided
# or if it's a model with subclasses like Status, check again
if not model or hasattr(model.objects, "select_subclasses"):
diff --git a/bookwyrm/activitystreams.py b/bookwyrm/activitystreams.py
index f2dd43fb2..a90d7943b 100644
--- a/bookwyrm/activitystreams.py
+++ b/bookwyrm/activitystreams.py
@@ -298,8 +298,9 @@ def add_status_on_create_command(sender, instance, created):
priority = HIGH
# check if this is an old status, de-prioritize if so
# (this will happen if federation is very slow, or, more expectedly, on csv import)
- one_day = 60 * 60 * 24
- if (instance.created_date - instance.published_date).seconds > one_day:
+ if instance.published_date < timezone.now() - timedelta(
+ days=1
+ ) or instance.created_date < instance.published_date - timedelta(days=1):
priority = LOW
add_status_task.apply_async(
diff --git a/bookwyrm/book_search.py b/bookwyrm/book_search.py
index e42a6d8c3..4b0a6eab9 100644
--- a/bookwyrm/book_search.py
+++ b/bookwyrm/book_search.py
@@ -148,8 +148,8 @@ class SearchResult:
def __repr__(self):
# pylint: disable=consider-using-f-string
- return "".format(
- self.key, self.title, self.author
+ return "".format(
+ self.key, self.title, self.author, self.confidence
)
def json(self):
diff --git a/bookwyrm/connectors/abstract_connector.py b/bookwyrm/connectors/abstract_connector.py
index 56e273886..dc4be4b3d 100644
--- a/bookwyrm/connectors/abstract_connector.py
+++ b/bookwyrm/connectors/abstract_connector.py
@@ -1,9 +1,8 @@
""" functionality outline for a book data connector """
from abc import ABC, abstractmethod
import imghdr
-import ipaddress
import logging
-from urllib.parse import urlparse
+import re
from django.core.files.base import ContentFile
from django.db import transaction
@@ -11,7 +10,7 @@ import requests
from requests.exceptions import RequestException
from bookwyrm import activitypub, models, settings
-from .connector_manager import load_more_data, ConnectorException
+from .connector_manager import load_more_data, ConnectorException, raise_not_valid_url
from .format_mappings import format_mappings
@@ -39,62 +38,34 @@ class AbstractMinimalConnector(ABC):
for field in self_fields:
setattr(self, field, getattr(info, field))
- def search(self, query, min_confidence=None, timeout=settings.QUERY_TIMEOUT):
- """free text search"""
- params = {}
- if min_confidence:
- params["min_confidence"] = min_confidence
+ def get_search_url(self, query):
+ """format the query url"""
+ # Check if the query resembles an ISBN
+ if maybe_isbn(query) and self.isbn_search_url and self.isbn_search_url != "":
+ return f"{self.isbn_search_url}{query}"
- data = self.get_search_data(
- f"{self.search_url}{query}",
- params=params,
- timeout=timeout,
- )
- results = []
+ # NOTE: previously, we tried searching isbn and if that produces no results,
+ # searched as free text. This, instead, only searches isbn if it's isbn-y
+ return f"{self.search_url}{query}"
- for doc in self.parse_search_data(data)[:10]:
- results.append(self.format_search_result(doc))
- return results
-
- def isbn_search(self, query, timeout=settings.QUERY_TIMEOUT):
- """isbn search"""
- params = {}
- data = self.get_search_data(
- f"{self.isbn_search_url}{query}",
- params=params,
- timeout=timeout,
- )
- results = []
-
- # this shouldn't be returning mutliple results, but just in case
- for doc in self.parse_isbn_search_data(data)[:10]:
- results.append(self.format_isbn_search_result(doc))
- return results
-
- def get_search_data(self, remote_id, **kwargs): # pylint: disable=no-self-use
- """this allows connectors to override the default behavior"""
- return get_data(remote_id, **kwargs)
+ def process_search_response(self, query, data, min_confidence):
+ """Format the search results based on the formt of the query"""
+ if maybe_isbn(query):
+ return list(self.parse_isbn_search_data(data))[:10]
+ return list(self.parse_search_data(data, min_confidence))[:10]
@abstractmethod
def get_or_create_book(self, remote_id):
"""pull up a book record by whatever means possible"""
@abstractmethod
- def parse_search_data(self, data):
+ def parse_search_data(self, data, min_confidence):
"""turn the result json from a search into a list"""
- @abstractmethod
- def format_search_result(self, search_result):
- """create a SearchResult obj from json"""
-
@abstractmethod
def parse_isbn_search_data(self, data):
"""turn the result json from a search into a list"""
- @abstractmethod
- def format_isbn_search_result(self, search_result):
- """create a SearchResult obj from json"""
-
class AbstractConnector(AbstractMinimalConnector):
"""generic book data connector"""
@@ -254,9 +225,6 @@ def get_data(url, params=None, timeout=10):
# check if the url is blocked
raise_not_valid_url(url)
- if models.FederatedServer.is_blocked(url):
- raise ConnectorException(f"Attempting to load data from blocked url: {url}")
-
try:
resp = requests.get(
url,
@@ -311,20 +279,6 @@ def get_image(url, timeout=10):
return image_content, extension
-def raise_not_valid_url(url):
- """do some basic reality checks on the url"""
- parsed = urlparse(url)
- if not parsed.scheme in ["http", "https"]:
- raise ConnectorException("Invalid scheme: ", url)
-
- try:
- ipaddress.ip_address(parsed.netloc)
- raise ConnectorException("Provided url is an IP address: ", url)
- except ValueError:
- # it's not an IP address, which is good
- pass
-
-
class Mapping:
"""associate a local database field with a field in an external dataset"""
@@ -366,3 +320,9 @@ def unique_physical_format(format_text):
# try a direct match, so saving this would be redundant
return None
return format_text
+
+
+def maybe_isbn(query):
+ """check if a query looks like an isbn"""
+ isbn = re.sub(r"[\W_]", "", query) # removes filler characters
+ return len(isbn) in [10, 13] # ISBN10 or ISBN13
diff --git a/bookwyrm/connectors/bookwyrm_connector.py b/bookwyrm/connectors/bookwyrm_connector.py
index 6dcba7c31..e07a0b281 100644
--- a/bookwyrm/connectors/bookwyrm_connector.py
+++ b/bookwyrm/connectors/bookwyrm_connector.py
@@ -10,15 +10,12 @@ class Connector(AbstractMinimalConnector):
def get_or_create_book(self, remote_id):
return activitypub.resolve_remote_id(remote_id, model=models.Edition)
- def parse_search_data(self, data):
- return data
-
- def format_search_result(self, search_result):
- search_result["connector"] = self
- return SearchResult(**search_result)
+ def parse_search_data(self, data, min_confidence):
+ for search_result in data:
+ search_result["connector"] = self
+ yield SearchResult(**search_result)
def parse_isbn_search_data(self, data):
- return data
-
- def format_isbn_search_result(self, search_result):
- return self.format_search_result(search_result)
+ for search_result in data:
+ search_result["connector"] = self
+ yield SearchResult(**search_result)
diff --git a/bookwyrm/connectors/connector_manager.py b/bookwyrm/connectors/connector_manager.py
index 14bb702cb..86774af56 100644
--- a/bookwyrm/connectors/connector_manager.py
+++ b/bookwyrm/connectors/connector_manager.py
@@ -1,17 +1,18 @@
""" interface with whatever connectors the app has """
-from datetime import datetime
+import asyncio
import importlib
+import ipaddress
import logging
-import re
from urllib.parse import urlparse
+import aiohttp
from django.dispatch import receiver
from django.db.models import signals
from requests import HTTPError
from bookwyrm import book_search, models
-from bookwyrm.settings import SEARCH_TIMEOUT
+from bookwyrm.settings import SEARCH_TIMEOUT, USER_AGENT
from bookwyrm.tasks import app
logger = logging.getLogger(__name__)
@@ -21,53 +22,85 @@ class ConnectorException(HTTPError):
"""when the connector can't do what was asked"""
+async def get_results(session, url, min_confidence, query, connector):
+ """try this specific connector"""
+ # pylint: disable=line-too-long
+ headers = {
+ "Accept": (
+ 'application/json, application/activity+json, application/ld+json; profile="https://www.w3.org/ns/activitystreams"; charset=utf-8'
+ ),
+ "User-Agent": USER_AGENT,
+ }
+ params = {"min_confidence": min_confidence}
+ try:
+ async with session.get(url, headers=headers, params=params) as response:
+ if not response.ok:
+ logger.info("Unable to connect to %s: %s", url, response.reason)
+ return
+
+ try:
+ raw_data = await response.json()
+ except aiohttp.client_exceptions.ContentTypeError as err:
+ logger.exception(err)
+ return
+
+ return {
+ "connector": connector,
+ "results": connector.process_search_response(
+ query, raw_data, min_confidence
+ ),
+ }
+ except asyncio.TimeoutError:
+ logger.info("Connection timed out for url: %s", url)
+ except aiohttp.ClientError as err:
+ logger.exception(err)
+
+
+async def async_connector_search(query, items, min_confidence):
+ """Try a number of requests simultaneously"""
+ timeout = aiohttp.ClientTimeout(total=SEARCH_TIMEOUT)
+ async with aiohttp.ClientSession(timeout=timeout) as session:
+ tasks = []
+ for url, connector in items:
+ tasks.append(
+ asyncio.ensure_future(
+ get_results(session, url, min_confidence, query, connector)
+ )
+ )
+
+ results = await asyncio.gather(*tasks)
+ return results
+
+
def search(query, min_confidence=0.1, return_first=False):
"""find books based on arbitary keywords"""
if not query:
return []
results = []
- # Have we got a ISBN ?
- isbn = re.sub(r"[\W_]", "", query)
- maybe_isbn = len(isbn) in [10, 13] # ISBN10 or ISBN13
-
- start_time = datetime.now()
+ items = []
for connector in get_connectors():
- result_set = None
- if maybe_isbn and connector.isbn_search_url and connector.isbn_search_url != "":
- # Search on ISBN
- try:
- result_set = connector.isbn_search(isbn)
- except Exception as err: # pylint: disable=broad-except
- logger.info(err)
- # if this fails, we can still try regular search
+ # get the search url from the connector before sending
+ url = connector.get_search_url(query)
+ try:
+ raise_not_valid_url(url)
+ except ConnectorException:
+ # if this URL is invalid we should skip it and move on
+ logger.info("Request denied to blocked domain: %s", url)
+ continue
+ items.append((url, connector))
- # if no isbn search results, we fallback to generic search
- if not result_set:
- try:
- result_set = connector.search(query, min_confidence=min_confidence)
- except Exception as err: # pylint: disable=broad-except
- # we don't want *any* error to crash the whole search page
- logger.info(err)
- continue
-
- if return_first and result_set:
- # if we found anything, return it
- return result_set[0]
-
- if result_set:
- results.append(
- {
- "connector": connector,
- "results": result_set,
- }
- )
- if (datetime.now() - start_time).seconds >= SEARCH_TIMEOUT:
- break
+ # load as many results as we can
+ results = asyncio.run(async_connector_search(query, items, min_confidence))
+ results = [r for r in results if r]
if return_first:
- return None
+ # find the best result from all the responses and return that
+ all_results = [r for con in results for r in con["results"]]
+ all_results = sorted(all_results, key=lambda r: r.confidence, reverse=True)
+ return all_results[0] if all_results else None
+ # failed requests will return None, so filter those out
return results
@@ -133,3 +166,20 @@ def create_connector(sender, instance, created, *args, **kwargs):
"""create a connector to an external bookwyrm server"""
if instance.application_type == "bookwyrm":
get_or_create_connector(f"https://{instance.server_name}")
+
+
+def raise_not_valid_url(url):
+ """do some basic reality checks on the url"""
+ parsed = urlparse(url)
+ if not parsed.scheme in ["http", "https"]:
+ raise ConnectorException("Invalid scheme: ", url)
+
+ try:
+ ipaddress.ip_address(parsed.netloc)
+ raise ConnectorException("Provided url is an IP address: ", url)
+ except ValueError:
+ # it's not an IP address, which is good
+ pass
+
+ if models.FederatedServer.is_blocked(url):
+ raise ConnectorException(f"Attempting to load data from blocked url: {url}")
diff --git a/bookwyrm/connectors/inventaire.py b/bookwyrm/connectors/inventaire.py
index a9aeb94f9..c13f4e3e6 100644
--- a/bookwyrm/connectors/inventaire.py
+++ b/bookwyrm/connectors/inventaire.py
@@ -77,53 +77,42 @@ class Connector(AbstractConnector):
**{k: data.get(k) for k in ["uri", "image", "labels", "sitelinks", "type"]},
}
- def search(self, query, min_confidence=None): # pylint: disable=arguments-differ
- """overrides default search function with confidence ranking"""
- results = super().search(query)
- if min_confidence:
- # filter the search results after the fact
- return [r for r in results if r.confidence >= min_confidence]
- return results
-
- def parse_search_data(self, data):
- return data.get("results")
-
- def format_search_result(self, search_result):
- images = search_result.get("image")
- cover = f"{self.covers_url}/img/entities/{images[0]}" if images else None
- # a deeply messy translation of inventaire's scores
- confidence = float(search_result.get("_score", 0.1))
- confidence = 0.1 if confidence < 150 else 0.999
- return SearchResult(
- title=search_result.get("label"),
- key=self.get_remote_id(search_result.get("uri")),
- author=search_result.get("description"),
- view_link=f"{self.base_url}/entity/{search_result.get('uri')}",
- cover=cover,
- confidence=confidence,
- connector=self,
- )
+ def parse_search_data(self, data, min_confidence):
+ for search_result in data.get("results", []):
+ images = search_result.get("image")
+ cover = f"{self.covers_url}/img/entities/{images[0]}" if images else None
+ # a deeply messy translation of inventaire's scores
+ confidence = float(search_result.get("_score", 0.1))
+ confidence = 0.1 if confidence < 150 else 0.999
+ if confidence < min_confidence:
+ continue
+ yield SearchResult(
+ title=search_result.get("label"),
+ key=self.get_remote_id(search_result.get("uri")),
+ author=search_result.get("description"),
+ view_link=f"{self.base_url}/entity/{search_result.get('uri')}",
+ cover=cover,
+ confidence=confidence,
+ connector=self,
+ )
def parse_isbn_search_data(self, data):
"""got some daaaata"""
results = data.get("entities")
if not results:
- return []
- return list(results.values())
-
- def format_isbn_search_result(self, search_result):
- """totally different format than a regular search result"""
- title = search_result.get("claims", {}).get("wdt:P1476", [])
- if not title:
- return None
- return SearchResult(
- title=title[0],
- key=self.get_remote_id(search_result.get("uri")),
- author=search_result.get("description"),
- view_link=f"{self.base_url}/entity/{search_result.get('uri')}",
- cover=self.get_cover_url(search_result.get("image")),
- connector=self,
- )
+ return
+ for search_result in list(results.values()):
+ title = search_result.get("claims", {}).get("wdt:P1476", [])
+ if not title:
+ continue
+ yield SearchResult(
+ title=title[0],
+ key=self.get_remote_id(search_result.get("uri")),
+ author=search_result.get("description"),
+ view_link=f"{self.base_url}/entity/{search_result.get('uri')}",
+ cover=self.get_cover_url(search_result.get("image")),
+ connector=self,
+ )
def is_work_data(self, data):
return data.get("type") == "work"
diff --git a/bookwyrm/connectors/openlibrary.py b/bookwyrm/connectors/openlibrary.py
index 118222a16..c1527527e 100644
--- a/bookwyrm/connectors/openlibrary.py
+++ b/bookwyrm/connectors/openlibrary.py
@@ -152,39 +152,41 @@ class Connector(AbstractConnector):
image_name = f"{cover_id}-{size}.jpg"
return f"{self.covers_url}/b/id/{image_name}"
- def parse_search_data(self, data):
- return data.get("docs")
+ def parse_search_data(self, data, min_confidence):
+ for idx, search_result in enumerate(data.get("docs")):
+ # build the remote id from the openlibrary key
+ key = self.books_url + search_result["key"]
+ author = search_result.get("author_name") or ["Unknown"]
+ cover_blob = search_result.get("cover_i")
+ cover = self.get_cover_url([cover_blob], size="M") if cover_blob else None
- def format_search_result(self, search_result):
- # build the remote id from the openlibrary key
- key = self.books_url + search_result["key"]
- author = search_result.get("author_name") or ["Unknown"]
- cover_blob = search_result.get("cover_i")
- cover = self.get_cover_url([cover_blob], size="M") if cover_blob else None
- return SearchResult(
- title=search_result.get("title"),
- key=key,
- author=", ".join(author),
- connector=self,
- year=search_result.get("first_publish_year"),
- cover=cover,
- )
+ # OL doesn't provide confidence, but it does sort by an internal ranking, so
+ # this confidence value is relative to the list position
+ confidence = 1 / (idx + 1)
+
+ yield SearchResult(
+ title=search_result.get("title"),
+ key=key,
+ author=", ".join(author),
+ connector=self,
+ year=search_result.get("first_publish_year"),
+ cover=cover,
+ confidence=confidence,
+ )
def parse_isbn_search_data(self, data):
- return list(data.values())
-
- def format_isbn_search_result(self, search_result):
- # build the remote id from the openlibrary key
- key = self.books_url + search_result["key"]
- authors = search_result.get("authors") or [{"name": "Unknown"}]
- author_names = [author.get("name") for author in authors]
- return SearchResult(
- title=search_result.get("title"),
- key=key,
- author=", ".join(author_names),
- connector=self,
- year=search_result.get("publish_date"),
- )
+ for search_result in list(data.values()):
+ # build the remote id from the openlibrary key
+ key = self.books_url + search_result["key"]
+ authors = search_result.get("authors") or [{"name": "Unknown"}]
+ author_names = [author.get("name") for author in authors]
+ yield SearchResult(
+ title=search_result.get("title"),
+ key=key,
+ author=", ".join(author_names),
+ connector=self,
+ year=search_result.get("publish_date"),
+ )
def load_edition_data(self, olkey):
"""query openlibrary for editions of a work"""
diff --git a/bookwyrm/forms/books.py b/bookwyrm/forms/books.py
index 72df1371c..9b3c84010 100644
--- a/bookwyrm/forms/books.py
+++ b/bookwyrm/forms/books.py
@@ -4,6 +4,7 @@ from django import forms
from bookwyrm import models
from bookwyrm.models.fields import ClearableFileInputWithWarning
from .custom_form import CustomForm
+from .widgets import ArrayWidget, SelectDateWidget, Select
# pylint: disable=missing-class-docstring
@@ -14,14 +15,6 @@ class CoverForm(CustomForm):
help_texts = {f: None for f in fields}
-class ArrayWidget(forms.widgets.TextInput):
- # pylint: disable=unused-argument
- # pylint: disable=no-self-use
- def value_from_datadict(self, data, files, name):
- """get all values for this name"""
- return [i for i in data.getlist(name) if i]
-
-
class EditionForm(CustomForm):
class Meta:
model = models.Edition
@@ -56,16 +49,16 @@ class EditionForm(CustomForm):
"publishers": forms.TextInput(
attrs={"aria-describedby": "desc_publishers_help desc_publishers"}
),
- "first_published_date": forms.SelectDateWidget(
+ "first_published_date": SelectDateWidget(
attrs={"aria-describedby": "desc_first_published_date"}
),
- "published_date": forms.SelectDateWidget(
+ "published_date": SelectDateWidget(
attrs={"aria-describedby": "desc_published_date"}
),
"cover": ClearableFileInputWithWarning(
attrs={"aria-describedby": "desc_cover"}
),
- "physical_format": forms.Select(
+ "physical_format": Select(
attrs={"aria-describedby": "desc_physical_format"}
),
"physical_format_detail": forms.TextInput(
@@ -85,3 +78,27 @@ class EditionForm(CustomForm):
),
"ASIN": forms.TextInput(attrs={"aria-describedby": "desc_ASIN"}),
}
+
+
+class EditionFromWorkForm(CustomForm):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # make all fields hidden
+ for visible in self.visible_fields():
+ visible.field.widget = forms.HiddenInput()
+
+ class Meta:
+ model = models.Work
+ fields = [
+ "title",
+ "subtitle",
+ "authors",
+ "description",
+ "languages",
+ "series",
+ "series_number",
+ "subjects",
+ "subject_places",
+ "cover",
+ "first_published_date",
+ ]
diff --git a/bookwyrm/forms/forms.py b/bookwyrm/forms/forms.py
index 8af8fb812..4aa1e5758 100644
--- a/bookwyrm/forms/forms.py
+++ b/bookwyrm/forms/forms.py
@@ -45,7 +45,7 @@ class ReportForm(CustomForm):
class ReadThroughForm(CustomForm):
def clean(self):
- """make sure the email isn't in use by a registered user"""
+ """don't let readthroughs end before they start"""
cleaned_data = super().clean()
start_date = cleaned_data.get("start_date")
finish_date = cleaned_data.get("finish_date")
@@ -53,7 +53,12 @@ class ReadThroughForm(CustomForm):
self.add_error(
"finish_date", _("Reading finish date cannot be before start date.")
)
+ stopped_date = cleaned_data.get("stopped_date")
+ if start_date and stopped_date and start_date > stopped_date:
+ self.add_error(
+ "stopped_date", _("Reading stopped date cannot be before start date.")
+ )
class Meta:
model = models.ReadThrough
- fields = ["user", "book", "start_date", "finish_date"]
+ fields = ["user", "book", "start_date", "finish_date", "stopped_date"]
diff --git a/bookwyrm/forms/widgets.py b/bookwyrm/forms/widgets.py
new file mode 100644
index 000000000..ee9345aa0
--- /dev/null
+++ b/bookwyrm/forms/widgets.py
@@ -0,0 +1,70 @@
+""" using django model forms """
+from django import forms
+
+
+class ArrayWidget(forms.widgets.TextInput):
+ """Inputs for postgres array fields"""
+
+ # pylint: disable=unused-argument
+ # pylint: disable=no-self-use
+ def value_from_datadict(self, data, files, name):
+ """get all values for this name"""
+ return [i for i in data.getlist(name) if i]
+
+
+class Select(forms.Select):
+ """custom template for select widget"""
+
+ template_name = "widgets/select.html"
+
+
+class SelectDateWidget(forms.SelectDateWidget):
+ """
+ A widget that splits date input into two
{% url "conduct" as coc_path %}
{% blocktrans trimmed with site_name=site.name %}
- {{ site_name }}'s moderators and administrators keep the site up and running, enforce the code of conduct, and respond when users report spam and bad behavior.
+ {{ site_name }}'s moderators and administrators keep the site up and running, enforce the code of conduct, and respond when users report spam and bad behavior.
{% endblocktrans %}