mirror of
https://github.com/jointakahe/takahe.git
synced 2024-11-21 23:01:00 +00:00
Module-specific loggers and minor reformatting (#657)
This commit is contained in:
parent
2f546dfa74
commit
1ceef59bec
13 changed files with 84 additions and 53 deletions
|
@ -46,6 +46,8 @@ from users.models.identity import Identity, IdentityStates
|
|||
from users.models.inbox_message import InboxMessage
|
||||
from users.models.system_actor import SystemActor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PostStates(StateGraph):
|
||||
new = State(try_interval=300)
|
||||
|
@ -897,7 +899,7 @@ class Post(StatorModel):
|
|||
# don't have content, but this shouldn't be a total failure
|
||||
post.content = get_value_or_map(data, "content", "contentMap")
|
||||
except ActivityPubFormatError as err:
|
||||
logging.warning(f"{err} on {post.url}")
|
||||
logger.warning("%s on %s", err, post.url)
|
||||
post.content = None
|
||||
# Document types have names, not summaries
|
||||
post.summary = data.get("summary") or data.get("name")
|
||||
|
@ -993,8 +995,10 @@ class Post(StatorModel):
|
|||
try:
|
||||
cls.ensure_object_uri(post.in_reply_to, reason=post.object_uri)
|
||||
except ValueError:
|
||||
logging.warning(
|
||||
f"Cannot fetch ancestor of Post={post.pk}, ancestor_uri={post.in_reply_to}"
|
||||
logger.warning(
|
||||
"Cannot fetch ancestor of Post=%s, ancestor_uri=%s",
|
||||
post.pk,
|
||||
post.in_reply_to,
|
||||
)
|
||||
else:
|
||||
parent.calculate_stats()
|
||||
|
|
|
@ -9,6 +9,8 @@ from activities.models import (
|
|||
)
|
||||
from users.models import Identity
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PostService:
|
||||
"""
|
||||
|
@ -99,7 +101,7 @@ class PostService:
|
|||
try:
|
||||
Post.ensure_object_uri(object_uri, reason=reason)
|
||||
except ValueError:
|
||||
logging.error(
|
||||
logger.error(
|
||||
f"Cannot fetch ancestor Post={self.post.pk}, ancestor_uri={object_uri}"
|
||||
)
|
||||
break
|
||||
|
|
|
@ -8,6 +8,8 @@ from pyld import jsonld
|
|||
|
||||
from core.exceptions import ActivityPubFormatError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
schemas = {
|
||||
"unknown": {
|
||||
"contentType": "application/ld+json",
|
||||
|
@ -630,7 +632,7 @@ def builtin_document_loader(url: str, options={}):
|
|||
# Get URL without scheme
|
||||
pieces = urllib_parse.urlparse(url)
|
||||
if pieces.hostname is None:
|
||||
logging.info(f"No host name for json-ld schema: {url!r}")
|
||||
logger.info(f"No host name for json-ld schema: {url!r}")
|
||||
return schemas["unknown"]
|
||||
key = pieces.hostname + pieces.path.rstrip("/")
|
||||
try:
|
||||
|
@ -641,7 +643,7 @@ def builtin_document_loader(url: str, options={}):
|
|||
return schemas[key]
|
||||
except KeyError:
|
||||
# return an empty context instead of throwing an error
|
||||
logging.info(f"Ignoring unknown json-ld schema: {url!r}")
|
||||
logger.info(f"Ignoring unknown json-ld schema: {url!r}")
|
||||
return schemas["unknown"]
|
||||
|
||||
|
||||
|
|
|
@ -27,12 +27,14 @@ if SENTRY_ENABLED:
|
|||
set_context = sentry_sdk.set_context
|
||||
set_tag = sentry_sdk.set_tag
|
||||
start_transaction = sentry_sdk.start_transaction
|
||||
start_span = sentry_sdk.start_span
|
||||
else:
|
||||
configure_scope = noop_context
|
||||
push_scope = noop_context
|
||||
set_context = noop
|
||||
set_tag = noop
|
||||
start_transaction = noop_context
|
||||
start_span = noop_context
|
||||
|
||||
|
||||
def set_takahe_app(name: str):
|
||||
|
|
|
@ -19,6 +19,8 @@ from pyld import jsonld
|
|||
|
||||
from core.ld import format_ld_date
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VerificationError(BaseException):
|
||||
"""
|
||||
|
@ -259,7 +261,7 @@ class HttpSignature:
|
|||
)
|
||||
except SSLError as invalid_cert:
|
||||
# Not our problem if the other end doesn't have proper SSL
|
||||
logging.info(f"{uri} {invalid_cert}")
|
||||
logger.info("Invalid cert on %s %s", uri, invalid_cert)
|
||||
raise SSLCertVerificationError(invalid_cert) from invalid_cert
|
||||
except InvalidCodepoint as ex:
|
||||
# Convert to a more generic error we handle
|
||||
|
|
|
@ -8,6 +8,8 @@ from core.models import Config
|
|||
from stator.models import StatorModel
|
||||
from stator.runner import StatorRunner
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Runs a Stator runner"
|
||||
|
@ -80,7 +82,7 @@ class Command(BaseCommand):
|
|||
if not models:
|
||||
models = StatorModel.subclasses
|
||||
models = [model for model in models if model not in excluded]
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Running for models: " + " ".join(m._meta.label_lower for m in models)
|
||||
)
|
||||
# Run a runner
|
||||
|
@ -94,4 +96,4 @@ class Command(BaseCommand):
|
|||
try:
|
||||
runner.run()
|
||||
except KeyboardInterrupt:
|
||||
logging.critical("Ctrl-C received")
|
||||
logger.critical("Ctrl-C received")
|
||||
|
|
|
@ -11,6 +11,8 @@ from django.utils.functional import classproperty
|
|||
from stator.exceptions import TryAgainLater
|
||||
from stator.graph import State, StateGraph
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StateField(models.CharField):
|
||||
"""
|
||||
|
@ -189,7 +191,7 @@ class StatorModel(models.Model):
|
|||
# If it's a manual progression state don't even try
|
||||
# We shouldn't really be here in this case, but it could be a race condition
|
||||
if current_state.externally_progressed:
|
||||
logging.warning(
|
||||
logger.warning(
|
||||
f"Warning: trying to progress externally progressed state {self.state}!"
|
||||
)
|
||||
return None
|
||||
|
@ -203,7 +205,7 @@ class StatorModel(models.Model):
|
|||
except TryAgainLater:
|
||||
pass
|
||||
except BaseException as e:
|
||||
logging.exception(e)
|
||||
logger.exception(e)
|
||||
else:
|
||||
if next_state:
|
||||
# Ensure it's a State object
|
||||
|
|
|
@ -14,6 +14,8 @@ from core import sentry
|
|||
from core.models import Config
|
||||
from stator.models import StatorModel, Stats
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoopingTimer:
|
||||
"""
|
||||
|
@ -84,7 +86,7 @@ class StatorRunner:
|
|||
self.scheduling_timer = LoopingTimer(self.schedule_interval)
|
||||
self.deletion_timer = LoopingTimer(self.delete_interval)
|
||||
# For the first time period, launch tasks
|
||||
logging.info("Running main task loop")
|
||||
logger.info("Running main task loop")
|
||||
try:
|
||||
with sentry.configure_scope() as scope:
|
||||
while True:
|
||||
|
@ -137,18 +139,18 @@ class StatorRunner:
|
|||
pass
|
||||
|
||||
# Wait for tasks to finish
|
||||
logging.info("Waiting for tasks to complete")
|
||||
logger.info("Waiting for tasks to complete")
|
||||
self.executor.shutdown()
|
||||
|
||||
# We're done
|
||||
logging.info("Complete")
|
||||
logger.info("Complete")
|
||||
|
||||
def alarm_handler(self, signum, frame):
|
||||
"""
|
||||
Called when SIGALRM fires, which means we missed a schedule loop.
|
||||
Just exit as we're likely deadlocked.
|
||||
"""
|
||||
logging.warning("Watchdog timeout exceeded")
|
||||
logger.warning("Watchdog timeout exceeded")
|
||||
os._exit(2)
|
||||
|
||||
def load_config(self):
|
||||
|
@ -163,9 +165,10 @@ class StatorRunner:
|
|||
"""
|
||||
with sentry.start_transaction(op="task", name="stator.run_scheduling"):
|
||||
for model in self.models:
|
||||
with sentry.start_span(description=model._meta.label_lower):
|
||||
num = self.handled.get(model._meta.label_lower, 0)
|
||||
if num or settings.DEBUG:
|
||||
logging.info(
|
||||
logger.info(
|
||||
f"{model._meta.label_lower}: Scheduling ({num} handled)"
|
||||
)
|
||||
self.submit_stats(model)
|
||||
|
@ -239,7 +242,7 @@ class StatorRunner:
|
|||
try:
|
||||
task.result()
|
||||
except BaseException as e:
|
||||
logging.exception(e)
|
||||
logger.exception(e)
|
||||
|
||||
def run_single_cycle(self):
|
||||
"""
|
||||
|
@ -269,11 +272,11 @@ def task_transition(instance: StatorModel, in_thread: bool = True):
|
|||
result = instance.transition_attempt()
|
||||
duration = time.monotonic() - started
|
||||
if result:
|
||||
logging.info(
|
||||
logger.info(
|
||||
f"{instance._meta.label_lower}: {instance.pk}: {instance.state} -> {result} ({duration:.2f}s)"
|
||||
)
|
||||
else:
|
||||
logging.info(
|
||||
logger.info(
|
||||
f"{instance._meta.label_lower}: {instance.pk}: {instance.state} unchanged ({duration:.2f}s)"
|
||||
)
|
||||
if in_thread:
|
||||
|
@ -289,7 +292,7 @@ def task_deletion(model: type[StatorModel], in_thread: bool = True):
|
|||
deleted = model.transition_delete_due()
|
||||
if not deleted:
|
||||
break
|
||||
logging.info(f"{model._meta.label_lower}: Deleted {deleted} stale items")
|
||||
logger.info(f"{model._meta.label_lower}: Deleted {deleted} stale items")
|
||||
time.sleep(1)
|
||||
if in_thread:
|
||||
close_old_connections()
|
||||
|
|
|
@ -14,6 +14,8 @@ from core.models import Config
|
|||
from stator.models import State, StateField, StateGraph, StatorModel
|
||||
from users.schemas import NodeInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DomainStates(StateGraph):
|
||||
outdated = State(try_interval=60 * 30, force_initial=True)
|
||||
|
@ -209,13 +211,14 @@ class Domain(StatorModel):
|
|||
and response.status_code < 500
|
||||
and response.status_code not in [401, 403, 404, 406, 410]
|
||||
):
|
||||
logging.warning(
|
||||
f"Client error fetching nodeinfo: {str(ex)}",
|
||||
logger.warning(
|
||||
"Client error fetching nodeinfo: %d %s %s",
|
||||
response.status_code,
|
||||
nodeinfo20_url,
|
||||
ex,
|
||||
extra={
|
||||
"code": response.status_code,
|
||||
"content": response.content,
|
||||
"domain": self.domain,
|
||||
"nodeinfo20_url": nodeinfo20_url,
|
||||
},
|
||||
)
|
||||
return None
|
||||
|
@ -223,11 +226,12 @@ class Domain(StatorModel):
|
|||
try:
|
||||
info = NodeInfo(**response.json())
|
||||
except (json.JSONDecodeError, pydantic.ValidationError) as ex:
|
||||
logging.warning(
|
||||
f"Client error decoding nodeinfo: {str(ex)}",
|
||||
logger.warning(
|
||||
"Client error decoding nodeinfo: %s %s",
|
||||
nodeinfo20_url,
|
||||
ex,
|
||||
extra={
|
||||
"domain": self.domain,
|
||||
"nodeinfo20_url": nodeinfo20_url,
|
||||
},
|
||||
)
|
||||
return None
|
||||
|
|
|
@ -11,6 +11,8 @@ from users.models.block import Block
|
|||
from users.models.identity import Identity
|
||||
from users.models.inbox_message import InboxMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FollowStates(StateGraph):
|
||||
unrequested = State(try_interval=600)
|
||||
|
@ -350,7 +352,7 @@ class Follow(StatorModel):
|
|||
try:
|
||||
follow = cls.by_ap(data, create=True)
|
||||
except Identity.DoesNotExist:
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Identity not found for incoming Follow", extra={"data": data}
|
||||
)
|
||||
return
|
||||
|
@ -367,7 +369,7 @@ class Follow(StatorModel):
|
|||
try:
|
||||
follow = cls.by_ap(data["object"])
|
||||
except (cls.DoesNotExist, Identity.DoesNotExist):
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Follow or Identity not found for incoming Accept",
|
||||
extra={"data": data},
|
||||
)
|
||||
|
@ -389,7 +391,7 @@ class Follow(StatorModel):
|
|||
try:
|
||||
follow = cls.by_ap(data["object"])
|
||||
except (cls.DoesNotExist, Identity.DoesNotExist):
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Follow or Identity not found for incoming Reject",
|
||||
extra={"data": data},
|
||||
)
|
||||
|
@ -419,7 +421,7 @@ class Follow(StatorModel):
|
|||
try:
|
||||
follow = cls.by_ap(data["object"])
|
||||
except (cls.DoesNotExist, Identity.DoesNotExist):
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Follow or Identity not found for incoming Undo", extra={"data": data}
|
||||
)
|
||||
return
|
||||
|
|
|
@ -37,6 +37,8 @@ from users.models.domain import Domain
|
|||
from users.models.inbox_message import InboxMessage
|
||||
from users.models.system_actor import SystemActor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IdentityStates(StateGraph):
|
||||
"""
|
||||
|
@ -872,7 +874,7 @@ class Identity(StatorModel):
|
|||
# Their account got deleted, so let's do the same.
|
||||
Identity.objects.filter(pk=self.pk).delete()
|
||||
if status_code < 500 and status_code not in [401, 403, 404, 406, 410]:
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Client error fetching actor: %d %s", status_code, self.actor_uri
|
||||
)
|
||||
return False
|
||||
|
@ -880,7 +882,7 @@ class Identity(StatorModel):
|
|||
document = canonicalise(response.json(), include_security=True)
|
||||
except ValueError:
|
||||
# servers with empty or invalid responses are inevitable
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Invalid response fetching actor %s",
|
||||
self.actor_uri,
|
||||
extra={
|
||||
|
@ -942,10 +944,10 @@ class Identity(StatorModel):
|
|||
self.domain = Domain.get_remote_domain(webfinger_domain)
|
||||
except TryAgainLater:
|
||||
# continue with original domain when webfinger times out
|
||||
logging.info("WebFinger timed out: %s", self.actor_uri)
|
||||
logger.info("WebFinger timed out: %s", self.actor_uri)
|
||||
pass
|
||||
except ValueError as exc:
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Can't parse WebFinger: %s %s",
|
||||
exc.args[0],
|
||||
self.actor_uri,
|
||||
|
|
|
@ -19,6 +19,8 @@ from users.models import (
|
|||
User,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IdentityService:
|
||||
"""
|
||||
|
@ -226,7 +228,7 @@ class IdentityService:
|
|||
state__in=PostInteractionStates.group_active(),
|
||||
)
|
||||
except MultipleObjectsReturned as exc:
|
||||
logging.exception("%s on %s", exc, object_uri)
|
||||
logger.exception("%s on %s", exc, object_uri)
|
||||
pass
|
||||
except Post.DoesNotExist:
|
||||
# ignore 404s...
|
||||
|
|
|
@ -26,6 +26,8 @@ from users.models import Identity, InboxMessage, SystemActor
|
|||
from users.models.domain import Domain
|
||||
from users.shortcuts import by_handle_or_404
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HttpResponseUnauthorized(HttpResponse):
|
||||
status_code = 401
|
||||
|
@ -147,7 +149,7 @@ class Inbox(View):
|
|||
# This ensures that the signature used for the headers matches the actor
|
||||
# described in the payload.
|
||||
if "actor" not in document:
|
||||
logging.warning("Inbox error: unspecified actor")
|
||||
logger.warning("Inbox error: unspecified actor")
|
||||
return HttpResponseBadRequest("Unspecified actor")
|
||||
|
||||
identity = Identity.by_actor_uri(document["actor"], create=True, transient=True)
|
||||
|
@ -167,7 +169,7 @@ class Inbox(View):
|
|||
domain = Domain.get_remote_domain(actor_url_parts.hostname)
|
||||
if identity.blocked or domain.recursively_blocked():
|
||||
# I love to lie! Throw it away!
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Inbox: Discarded message from blocked %s %s",
|
||||
"domain" if domain.recursively_blocked() else "user",
|
||||
identity.actor_uri,
|
||||
|
@ -196,21 +198,21 @@ class Inbox(View):
|
|||
request,
|
||||
identity.public_key,
|
||||
)
|
||||
logging.debug(
|
||||
logger.debug(
|
||||
"Inbox: %s from %s has good HTTP signature",
|
||||
document_type,
|
||||
identity,
|
||||
)
|
||||
else:
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Inbox: New actor, no key available: %s",
|
||||
document["actor"],
|
||||
)
|
||||
except VerificationFormatError as e:
|
||||
logging.warning("Inbox error: Bad HTTP signature format: %s", e.args[0])
|
||||
logger.warning("Inbox error: Bad HTTP signature format: %s", e.args[0])
|
||||
return HttpResponseBadRequest(e.args[0])
|
||||
except VerificationError:
|
||||
logging.warning("Inbox error: Bad HTTP signature from %s", identity)
|
||||
logger.warning("Inbox error: Bad HTTP signature from %s", identity)
|
||||
return HttpResponseUnauthorized("Bad signature")
|
||||
|
||||
# Mastodon advices not implementing LD Signatures, but
|
||||
|
@ -224,18 +226,18 @@ class Inbox(View):
|
|||
creator, create=True, transient=True
|
||||
)
|
||||
if not creator_identity.public_key:
|
||||
logging.info("Inbox: New actor, no key available: %s", creator)
|
||||
logger.info("Inbox: New actor, no key available: %s", creator)
|
||||
# if we can't verify it, we don't keep it
|
||||
document.pop("signature")
|
||||
else:
|
||||
LDSignature.verify_signature(document, creator_identity.public_key)
|
||||
logging.debug(
|
||||
logger.debug(
|
||||
"Inbox: %s from %s has good LD signature",
|
||||
document["type"],
|
||||
creator_identity,
|
||||
)
|
||||
except VerificationFormatError as e:
|
||||
logging.warning("Inbox error: Bad LD signature format: %s", e.args[0])
|
||||
logger.warning("Inbox error: Bad LD signature format: %s", e.args[0])
|
||||
return HttpResponseBadRequest(e.args[0])
|
||||
except VerificationError:
|
||||
# An invalid LD Signature might also indicate nothing but
|
||||
|
@ -243,14 +245,14 @@ class Inbox(View):
|
|||
# Strip it out if we can't verify it.
|
||||
if "signature" in document:
|
||||
document.pop("signature")
|
||||
logging.info(
|
||||
logger.info(
|
||||
"Inbox: Stripping invalid LD signature from %s %s",
|
||||
creator_identity,
|
||||
document["id"],
|
||||
)
|
||||
|
||||
if not ("signature" in request or "signature" in document):
|
||||
logging.debug(
|
||||
logger.debug(
|
||||
"Inbox: %s from %s is unauthenticated. That's OK.",
|
||||
document["type"],
|
||||
identity,
|
||||
|
|
Loading…
Reference in a new issue