mirror of
https://github.com/LemmyNet/lemmy.git
synced 2024-12-24 17:00:36 +00:00
chore: fix some comments (#4637)
Signed-off-by: TechVest <techdashen@qq.com>
This commit is contained in:
parent
6efab9aab1
commit
b0370ae2fd
20 changed files with 25 additions and 25 deletions
|
@ -242,7 +242,7 @@ test("Admin actions in remote community are not federated to origin", async () =
|
|||
);
|
||||
expect(banRes.banned).toBe(true);
|
||||
|
||||
// ban doesnt federate to community's origin instance alpha
|
||||
// ban doesn't federate to community's origin instance alpha
|
||||
let alphaPost = (await resolvePost(alpha, gammaPost.post)).post;
|
||||
expect(alphaPost?.creator_banned_from_community).toBe(false);
|
||||
|
||||
|
@ -452,7 +452,7 @@ test("Dont receive community activities after unsubscribe", async () => {
|
|||
);
|
||||
expect(communityRes1.community_view.counts.subscribers).toBe(2);
|
||||
|
||||
// temporarily block alpha, so that it doesnt know about unfollow
|
||||
// temporarily block alpha, so that it doesn't know about unfollow
|
||||
let editSiteForm: EditSite = {};
|
||||
editSiteForm.allowed_instances = ["lemmy-epsilon"];
|
||||
await beta.editSite(editSiteForm);
|
||||
|
@ -513,7 +513,7 @@ test("Fetch community, includes posts", async () => {
|
|||
expect(post_listing.posts[0].post.ap_id).toBe(postRes.post_view.post.ap_id);
|
||||
});
|
||||
|
||||
test("Content in local-only community doesnt federate", async () => {
|
||||
test("Content in local-only community doesn't federate", async () => {
|
||||
// create a community and set it local-only
|
||||
let communityRes = (await createCommunity(alpha)).community_view.community;
|
||||
let form: EditCommunity = {
|
||||
|
|
|
@ -41,7 +41,7 @@ test("Upload image and delete it", async () => {
|
|||
// Before running this test, you need to delete all previous images in the DB
|
||||
await deleteAllImages(alpha);
|
||||
|
||||
// Upload test image. We use a simple string buffer as pictrs doesnt require an actual image
|
||||
// Upload test image. We use a simple string buffer as pictrs doesn't require an actual image
|
||||
// in testing mode.
|
||||
const upload_form: UploadImage = {
|
||||
image: Buffer.from("test"),
|
||||
|
@ -235,7 +235,7 @@ test("No image proxying if setting is disabled", async () => {
|
|||
);
|
||||
expect(post.post_view.post).toBeDefined();
|
||||
|
||||
// remote image doesnt get proxied after upload
|
||||
// remote image doesn't get proxied after upload
|
||||
expect(
|
||||
post.post_view.post.url?.startsWith("http://127.0.0.1:8551/pictrs/image/"),
|
||||
).toBeTruthy();
|
||||
|
@ -248,7 +248,7 @@ test("No image proxying if setting is disabled", async () => {
|
|||
);
|
||||
expect(betaPost.post).toBeDefined();
|
||||
|
||||
// remote image doesnt get proxied after federation
|
||||
// remote image doesn't get proxied after federation
|
||||
expect(
|
||||
betaPost.post.url?.startsWith("http://127.0.0.1:8551/pictrs/image/"),
|
||||
).toBeTruthy();
|
||||
|
@ -295,7 +295,7 @@ test("Make regular post, and give it a custom thumbnail", async () => {
|
|||
expect(post.post_view.post.thumbnail_url).toBe(upload1.url);
|
||||
});
|
||||
|
||||
test("Create an image post, and make sure a custom thumbnail doesnt overwrite it", async () => {
|
||||
test("Create an image post, and make sure a custom thumbnail doesn't overwrite it", async () => {
|
||||
const uploadForm1: UploadImage = {
|
||||
image: Buffer.from("test1"),
|
||||
};
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
cache_external_link_previews: true
|
||||
# Specifies how to handle remote images, so that users don't have to connect directly to remote servers.
|
||||
image_mode:
|
||||
# Leave images unchanged, don't generate any local thumbnails for post urls. Instead the the
|
||||
# Leave images unchanged, don't generate any local thumbnails for post urls. Instead the
|
||||
# Opengraph image is directly returned as thumbnail
|
||||
"None"
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ pub(crate) async fn send_like_activity(
|
|||
let activity = AnnouncableActivities::Vote(vote);
|
||||
send_activity_in_community(activity, &actor, &community, empty, false, &context).await
|
||||
} else {
|
||||
// Lemmy API doesnt distinguish between Undo/Like and Undo/Dislike, so we hardcode it here.
|
||||
// Lemmy API doesn't distinguish between Undo/Like and Undo/Dislike, so we hardcode it here.
|
||||
let vote = Vote::new(object_id, &actor, &community, VoteType::Like, &context)?;
|
||||
let undo_vote = UndoVote::new(vote, &actor, &community, &context)?;
|
||||
let activity = AnnouncableActivities::UndoVote(undo_vote);
|
||||
|
|
|
@ -195,7 +195,7 @@ pub(crate) async fn check_apub_id_valid_with_strictness(
|
|||
|
||||
/// Store received activities in the database.
|
||||
///
|
||||
/// This ensures that the same activity doesnt get received and processed more than once, which
|
||||
/// This ensures that the same activity doesn't get received and processed more than once, which
|
||||
/// would be a waste of resources.
|
||||
#[tracing::instrument(skip(data))]
|
||||
async fn insert_received_activity(ap_id: &Url, data: &Data<LemmyContext>) -> LemmyResult<()> {
|
||||
|
|
|
@ -228,7 +228,7 @@ pub(crate) mod tests {
|
|||
url: &Url,
|
||||
context: &Data<LemmyContext>,
|
||||
) -> LemmyResult<(ApubPerson, ApubCommunity, ApubPost, ApubSite)> {
|
||||
// use separate counter so this doesnt affect tests
|
||||
// use separate counter so this doesn't affect tests
|
||||
let context2 = context.reset_request_count();
|
||||
let (person, site) = parse_lemmy_person(&context2).await?;
|
||||
let community = parse_lemmy_community(&context2).await?;
|
||||
|
|
|
@ -269,7 +269,7 @@ pub(crate) mod tests {
|
|||
pub(crate) async fn parse_lemmy_community(
|
||||
context: &Data<LemmyContext>,
|
||||
) -> LemmyResult<ApubCommunity> {
|
||||
// use separate counter so this doesnt affect tests
|
||||
// use separate counter so this doesn't affect tests
|
||||
let context2 = context.reset_request_count();
|
||||
let mut json: Group = file_to_json_object("assets/lemmy/objects/group.json")?;
|
||||
// change these links so they dont fetch over the network
|
||||
|
|
|
@ -23,7 +23,7 @@ pub struct AnnounceActivity {
|
|||
}
|
||||
|
||||
/// Use this to receive community inbox activities, and then announce them if valid. This
|
||||
/// ensures that all json fields are kept, even if Lemmy doesnt understand them.
|
||||
/// ensures that all json fields are kept, even if Lemmy doesn't understand them.
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct RawAnnouncableActivities {
|
||||
pub(crate) id: Url,
|
||||
|
|
|
@ -22,7 +22,7 @@ impl Crud for CommentReply {
|
|||
let conn = &mut get_conn(pool).await?;
|
||||
|
||||
// since the return here isnt utilized, we dont need to do an update
|
||||
// but get_result doesnt return the existing row here
|
||||
// but get_result doesn't return the existing row here
|
||||
insert_into(comment_reply::table)
|
||||
.values(comment_reply_form)
|
||||
.on_conflict((comment_reply::recipient_id, comment_reply::comment_id))
|
||||
|
|
|
@ -33,7 +33,7 @@ use diesel::{
|
|||
use diesel_async::RunQueryDsl;
|
||||
|
||||
impl Instance {
|
||||
/// Attempt to read Instance column for the given domain. If it doesnt exist, insert a new one.
|
||||
/// Attempt to read Instance column for the given domain. If it doesn't exist, insert a new one.
|
||||
/// There is no need for update as the domain of an existing instance cant change.
|
||||
pub async fn read_or_create(pool: &mut DbPool<'_>, domain_: String) -> Result<Self, Error> {
|
||||
use crate::schema::instance::domain;
|
||||
|
|
|
@ -55,7 +55,7 @@ impl Crud for Person {
|
|||
impl Person {
|
||||
/// Update or insert the person.
|
||||
///
|
||||
/// This is necessary for federation, because Activitypub doesnt distinguish between these actions.
|
||||
/// This is necessary for federation, because Activitypub doesn't distinguish between these actions.
|
||||
pub async fn upsert(pool: &mut DbPool<'_>, form: &PersonInsertForm) -> Result<Self, Error> {
|
||||
let conn = &mut get_conn(pool).await?;
|
||||
insert_into(person::table)
|
||||
|
|
|
@ -21,7 +21,7 @@ impl Crud for PersonMention {
|
|||
) -> Result<Self, Error> {
|
||||
let conn = &mut get_conn(pool).await?;
|
||||
// since the return here isnt utilized, we dont need to do an update
|
||||
// but get_result doesnt return the existing row here
|
||||
// but get_result doesn't return the existing row here
|
||||
insert_into(person_mention::table)
|
||||
.values(person_mention_form)
|
||||
.on_conflict((person_mention::recipient_id, person_mention::comment_id))
|
||||
|
|
|
@ -46,7 +46,7 @@ impl Reportable for PrivateMessageReport {
|
|||
.await
|
||||
}
|
||||
|
||||
// TODO: this is unused because private message doesnt have remove handler
|
||||
// TODO: this is unused because private message doesn't have remove handler
|
||||
async fn resolve_all_for_object(
|
||||
_pool: &mut DbPool<'_>,
|
||||
_pm_id_: PrivateMessageId,
|
||||
|
|
|
@ -176,7 +176,7 @@ impl Display for DbUrl {
|
|||
}
|
||||
}
|
||||
|
||||
// the project doesnt compile with From
|
||||
// the project doesn't compile with From
|
||||
#[allow(clippy::from_over_into)]
|
||||
impl Into<DbUrl> for Url {
|
||||
fn into(self) -> DbUrl {
|
||||
|
|
|
@ -646,7 +646,7 @@ impl<'a> PostQuery<'a> {
|
|||
site: &Site,
|
||||
pool: &mut DbPool<'_>,
|
||||
) -> Result<Option<PostQuery<'a>>, Error> {
|
||||
// first get one page for the most popular community to get an upper bound for the the page end for the real query
|
||||
// first get one page for the most popular community to get an upper bound for the page end for the real query
|
||||
// the reason this is needed is that when fetching posts for a single community PostgreSQL can optimize
|
||||
// the query to use an index on e.g. (=, >=, >=, >=) and fetch only LIMIT rows
|
||||
// but for the followed-communities query it has to query the index on (IN, >=, >=, >=)
|
||||
|
|
|
@ -66,7 +66,7 @@ fn adapt_request(
|
|||
client: &ClientWithMiddleware,
|
||||
url: String,
|
||||
) -> RequestBuilder {
|
||||
// remove accept-encoding header so that pictrs doesnt compress the response
|
||||
// remove accept-encoding header so that pictrs doesn't compress the response
|
||||
const INVALID_HEADERS: &[HeaderName] = &[ACCEPT_ENCODING, HOST];
|
||||
|
||||
let client_request = client
|
||||
|
|
|
@ -63,7 +63,7 @@ async fn get_webfinger_response(
|
|||
});
|
||||
|
||||
// Mastodon seems to prioritize the last webfinger item in case of duplicates. Put
|
||||
// community last so that it gets prioritized. For Lemmy the order doesnt matter.
|
||||
// community last so that it gets prioritized. For Lemmy the order doesn't matter.
|
||||
vec![
|
||||
webfinger_link_for_actor(user_id, "Person", &context),
|
||||
webfinger_link_for_actor(community_id, "Group", &context),
|
||||
|
|
|
@ -97,7 +97,7 @@ pub struct PictrsConfig {
|
|||
#[derive(Debug, Deserialize, Serialize, Clone, SmartDefault, Document, PartialEq)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub enum PictrsImageMode {
|
||||
/// Leave images unchanged, don't generate any local thumbnails for post urls. Instead the the
|
||||
/// Leave images unchanged, don't generate any local thumbnails for post urls. Instead the
|
||||
/// Opengraph image is directly returned as thumbnail
|
||||
None,
|
||||
/// Generate thumbnails for external post urls and store them persistently in pict-rs. This
|
||||
|
|
|
@ -117,7 +117,7 @@ services:
|
|||
"track_activity_query_size=1048576",
|
||||
]
|
||||
ports:
|
||||
# use a different port so it doesnt conflict with potential postgres db running on the host
|
||||
# use a different port so it doesn't conflict with potential postgres db running on the host
|
||||
- "5433:5432"
|
||||
environment:
|
||||
- POSTGRES_USER=lemmy
|
||||
|
|
|
@ -477,7 +477,7 @@ async fn update_instance_software(
|
|||
.build();
|
||||
let form = match client.get(&node_info_url).send().await {
|
||||
Ok(res) if res.status().is_client_error() => {
|
||||
// Instance doesnt have nodeinfo but sent a response, consider it alive
|
||||
// Instance doesn't have nodeinfo but sent a response, consider it alive
|
||||
Some(default_form)
|
||||
}
|
||||
Ok(res) => match res.json::<NodeInfo>().await {
|
||||
|
|
Loading…
Reference in a new issue