Compare commits

..

No commits in common. "develop" and "v2.5.4" have entirely different histories.

1229 changed files with 9137 additions and 28297 deletions

View file

@ -1,9 +0,0 @@
[
{"lib/cachex.ex", "Unknown type: Spec.cache/0."},
{"lib/pleroma/web/plugs/rate_limiter.ex", "The pattern can never match the type {:commit, _} | {:ignore, _}."},
{"lib/pleroma/web/plugs/rate_limiter.ex", "Function get_scale/2 will never be called."},
{"lib/pleroma/web/plugs/rate_limiter.ex", "Function initialize_buckets!/1 will never be called."},
{"lib/pleroma/workers/receiver_worker.ex", :call},
{"lib/pleroma/workers/receiver_worker.ex", :pattern_match},
{"lib/pleroma/workers/receiver_worker.ex", :pattern_match_cov},
]

13
.gitignore vendored
View file

@ -6,7 +6,7 @@
/test/instance
/test/uploads
/.elixir_ls
/test/fixtures/DSCN0010_tmp*
/test/fixtures/DSCN0010_tmp.jpg
/test/fixtures/test_tmp.txt
/test/fixtures/image_tmp.jpg
/test/tmp/
@ -57,12 +57,5 @@ pleroma.iml
.tool-versions
# Editor temp files
*~
*#
*.swp
archive-*
.gitlab-ci-local
# Test files should be named *.exs
test/pleroma/**/*.ex
/*~
/*#

View file

@ -1,39 +1,28 @@
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.14.5-otp-25
image: git.pleroma.social:5050/pleroma/pleroma/ci-base
variables: &global_variables
# Only used for the release
ELIXIR_VER: 1.14.5
POSTGRES_DB: pleroma_test
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
DB_HOST: postgres
DB_PORT: "5432"
DB_PORT: 5432
MIX_ENV: test
GIT_STRATEGY: fetch
workflow:
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
when: never
- if: $CI_COMMIT_BRANCH
cache: &global_cache_policy
key: $CI_JOB_IMAGE-$CI_COMMIT_SHORT_SHA
key:
files:
- mix.lock
paths:
- deps
- _build
stages:
- build
- lint
- test
- check-changelog
- benchmark
- deploy
- release
- docker
- docker-combine
before_script:
- echo $MIX_ENV
@ -43,60 +32,32 @@ before_script:
after_script:
- rm -rf _build/*/lib/pleroma
check-changelog:
stage: check-changelog
image: alpine
rules:
- if: $CI_MERGE_REQUEST_SOURCE_PROJECT_PATH == 'pleroma/pleroma' && $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == 'weblate-extract'
when: never
- if: $CI_MERGE_REQUEST_SOURCE_PROJECT_PATH == 'pleroma/pleroma' && $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == 'weblate'
when: never
- if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop"
before_script: ''
after_script: ''
cache: {}
script:
- apk add git
- sh ./tools/check-changelog
.build_changes_policy:
rules:
- changes:
- ".gitlab-ci.yml"
- "**/*.ex"
- "**/*.exs"
- "mix.lock"
.using-ci-base:
tags:
- amd64
build-1.14.5-otp-25:
build:
extends:
- .build_changes_policy
- .using-ci-base
stage: build
script:
- mix compile --force
build-1.17.1-otp-26:
extends:
- .build_changes_policy
- .using-ci-base
stage: build
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26
only:
changes: &build_changes_policy
- ".gitlab-ci.yml"
- "**/*.ex"
- "**/*.exs"
- "mix.lock"
script:
- mix compile --force
spec-build:
extends:
- .using-ci-base
stage: build
rules:
- changes:
- ".gitlab-ci.yml"
- "lib/pleroma/web/api_spec/**/*.ex"
- "lib/pleroma/web/api_spec.ex"
stage: test
only:
changes:
- ".gitlab-ci.yml"
- "lib/pleroma/web/api_spec/**/*.ex"
- "lib/pleroma/web/api_spec.ex"
artifacts:
paths:
- spec.json
@ -111,7 +72,7 @@ benchmark:
variables:
MIX_ENV: benchmark
services:
- name: postgres:11.22-alpine
- name: postgres:9.6-alpine
alias: postgres
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
script:
@ -119,22 +80,24 @@ benchmark:
- mix ecto.migrate
- mix pleroma.load_testing
unit-testing-1.14.5-otp-25:
unit-testing:
extends:
- .build_changes_policy
- .using-ci-base
stage: test
only:
changes: *build_changes_policy
cache: &testing_cache_policy
<<: *global_cache_policy
policy: pull
services: &testing_services
services:
- name: postgres:13-alpine
alias: postgres
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
script: &testing_script
script:
- mix ecto.create
- mix ecto.migrate
- mix pleroma.test_runner --cover --preload-modules
- mix test --cover --preload-modules
coverage: '/^Line total: ([^ ]*%)$/'
artifacts:
reports:
@ -142,20 +105,68 @@ unit-testing-1.14.5-otp-25:
coverage_format: cobertura
path: coverage.xml
unit-testing-1.17.1-otp-26:
unit-testing-erratic:
extends:
- .build_changes_policy
- .using-ci-base
stage: test
image: git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26
cache: *testing_cache_policy
services: *testing_services
script: *testing_script
retry: 2
allow_failure: true
only:
changes: *build_changes_policy
cache: &testing_cache_policy
<<: *global_cache_policy
policy: pull
formatting-1.15:
extends: .build_changes_policy
image: &formatting_elixir elixir:1.15-alpine
stage: lint
services:
- name: postgres:13-alpine
alias: postgres
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
script:
- mix ecto.create
- mix ecto.migrate
- mix test --only=erratic
# Removed to fix CI issue. In this early state it wasn't adding much value anyway.
# TODO Fix and reinstate federated testing
# federated-testing:
# stage: test
# cache: *testing_cache_policy
# services:
# - name: minibikini/postgres-with-rum:12
# alias: postgres
# command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
# script:
# - mix deps.get
# - mix ecto.create
# - mix ecto.migrate
# - epmd -daemon
# - mix test --trace --only federated
unit-testing-rum:
extends:
- .using-ci-base
stage: test
only:
changes: *build_changes_policy
cache: *testing_cache_policy
services:
- name: minibikini/postgres-with-rum:12
alias: postgres
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
variables:
<<: *global_variables
RUM_ENABLED: "true"
script:
- mix ecto.create
- mix ecto.migrate
- "mix ecto.migrate --migrations-path priv/repo/optional_migrations/rum_indexing/"
- mix test --preload-modules
lint:
image: &current_elixir elixir:1.12-alpine
stage: test
only:
changes: *build_changes_policy
cache: *testing_cache_policy
before_script: &current_bfr_script
- apk update
@ -166,38 +177,27 @@ formatting-1.15:
script:
- mix format --check-formatted
cycles-1.15:
extends: .build_changes_policy
image: *formatting_elixir
stage: lint
analysis:
extends:
- .using-ci-base
stage: test
only:
changes: *build_changes_policy
cache: *testing_cache_policy
script:
- mix credo --strict --only=warnings,todo,fixme,consistency,readability
cycles:
image: *current_elixir
stage: test
only:
changes: *build_changes_policy
cache: {}
before_script: *current_bfr_script
script:
- mix compile
- mix xref graph --format cycles --label compile | awk '{print $0} END{exit ($0 != "No cycles found")}'
analysis:
extends:
- .build_changes_policy
- .using-ci-base
stage: lint
cache: *testing_cache_policy
script:
- mix credo --strict --only=warnings,todo,fixme,consistency,readability
dialyzer:
extends:
- .build_changes_policy
- .using-ci-base
stage: lint
allow_failure: true
when: manual
cache: *testing_cache_policy
tags:
- feld
script:
- mix dialyzer
docs-deploy:
stage: deploy
cache: *testing_cache_policy
@ -208,7 +208,7 @@ docs-deploy:
before_script:
- apk add curl
script:
- curl --fail-with-body -X POST -F"token=$CI_JOB_TOKEN" -F'ref=master' -F"variables[BRANCH]=$CI_COMMIT_REF_NAME" https://git.pleroma.social/api/v4/projects/673/trigger/pipeline
- curl -X POST -F"token=$DOCS_PIPELINE_TRIGGER" -F'ref=master' -F"variables[BRANCH]=$CI_COMMIT_REF_NAME" https://git.pleroma.social/api/v4/projects/673/trigger/pipeline
review_app:
image: alpine:3.9
stage: deploy
@ -249,7 +249,7 @@ spec-deploy:
before_script:
- apk add curl
script:
- curl --fail-with-body -X POST -F"token=$CI_JOB_TOKEN" -F'ref=master' -F"variables[BRANCH]=$CI_COMMIT_REF_NAME" -F"variables[JOB_REF]=$CI_JOB_ID" https://git.pleroma.social/api/v4/projects/1130/trigger/pipeline
- curl -X POST -F"token=$API_DOCS_PIPELINE_TRIGGER" -F'ref=master' -F"variables[BRANCH]=$CI_COMMIT_REF_NAME" -F"variables[JOB_REF]=$CI_JOB_ID" https://git.pleroma.social/api/v4/projects/1130/trigger/pipeline
stop_review_app:
@ -272,7 +272,7 @@ stop_review_app:
amd64:
stage: release
image: elixir:$ELIXIR_VER
image: elixir:1.11.4
only: &release-only
- stable@pleroma/pleroma
- develop@pleroma/pleroma
@ -296,9 +296,8 @@ amd64:
- deps
variables: &release-variables
MIX_ENV: prod
VIX_COMPILATION_MODE: PLATFORM_PROVIDED_LIBVIPS
before_script: &before-release
- apt-get update && apt-get install -y cmake libmagic-dev libvips-dev erlang-dev
- apt-get update && apt-get install -y cmake libmagic-dev
- echo "import Config" > config/prod.secret.exs
- mix local.hex --force
- mix local.rebar --force
@ -313,13 +312,13 @@ amd64-musl:
stage: release
artifacts: *release-artifacts
only: *release-only
image: elixir:$ELIXIR_VER-alpine
image: elixir:1.11.4-alpine
tags:
- amd64
cache: *release-cache
variables: *release-variables
before_script: &before-release-musl
- apk add git build-base cmake file-dev openssl vips-dev
- apk add git build-base cmake file-dev openssl
- echo "import Config" > config/prod.secret.exs
- mix local.hex --force
- mix local.rebar --force
@ -331,7 +330,7 @@ arm:
only: *release-only
tags:
- arm32-specified
image: arm32v7/elixir:$ELIXIR_VER
image: arm32v7/elixir:1.11.4
cache: *release-cache
variables: *release-variables
before_script: *before-release
@ -343,7 +342,7 @@ arm-musl:
only: *release-only
tags:
- arm32-specified
image: arm32v7/elixir:$ELIXIR_VER-alpine
image: arm32v7/elixir:1.11.4-alpine
cache: *release-cache
variables: *release-variables
before_script: *before-release-musl
@ -355,7 +354,7 @@ arm64:
only: *release-only
tags:
- arm
image: arm64v8/elixir:$ELIXIR_VER
image: arm64v8/elixir:1.11.4
cache: *release-cache
variables: *release-variables
before_script: *before-release
@ -367,173 +366,110 @@ arm64-musl:
only: *release-only
tags:
- arm
image: arm64v8/elixir:$ELIXIR_VER-alpine
image: arm64v8/elixir:1.11.4-alpine
cache: *release-cache
variables: *release-variables
before_script: *before-release-musl
script: *release
.kaniko:
docker:
stage: docker
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
image: docker:latest
cache: {}
dependencies: []
before_script: &before-kaniko
variables: &docker-variables
DOCKER_DRIVER: overlay2
DOCKER_HOST: unix:///var/run/docker.sock
IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHORT_SHA
IMAGE_TAG_SLUG: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
IMAGE_TAG_LATEST: $CI_REGISTRY_IMAGE:latest
IMAGE_TAG_LATEST_STABLE: $CI_REGISTRY_IMAGE:latest-stable
DOCKER_BUILDX_URL: https://github.com/docker/buildx/releases/download/v0.6.3/buildx-v0.6.3.linux-amd64
DOCKER_BUILDX_HASH: 980e6b9655f971991fbbb5fd6cd19f1672386195
before_script: &before-docker
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker pull $IMAGE_TAG_SLUG || true
- export CI_JOB_TIMESTAMP=$(date --utc -Iseconds)
- export CI_VCS_REF=$CI_COMMIT_SHORT_SHA
- export IMAGE_TAG=$CI_REGISTRY_IMAGE/$BUILD_ARCH_IMG_SUFFIX:$CI_COMMIT_SHORT_SHA
- export IMAGE_TAG_SLUG=$CI_REGISTRY_IMAGE/$BUILD_ARCH_IMG_SUFFIX:$CI_COMMIT_REF_SLUG
- export IMAGE_TAG_LATEST=$CI_REGISTRY_IMAGE/$BUILD_ARCH_IMG_SUFFIX:latest
- export IMAGE_TAG_LATEST_STABLE=$CI_REGISTRY_IMAGE/$BUILD_ARCH_IMG_SUFFIX:latest-stable
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
.kaniko-latest:
extends: .kaniko
allow_failure: true
script:
- mkdir -p /root/.docker/cli-plugins
- wget "${DOCKER_BUILDX_URL}" -O ~/.docker/cli-plugins/docker-buildx
- echo "${DOCKER_BUILDX_HASH} /root/.docker/cli-plugins/docker-buildx" | sha1sum -c
- chmod +x ~/.docker/cli-plugins/docker-buildx
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- docker buildx create --name mbuilder --driver docker-container --use
- docker buildx inspect --bootstrap
- docker buildx build --platform linux/amd64,linux/arm/v7,linux/arm64/v8 --push --cache-from $IMAGE_TAG_SLUG --build-arg VCS_REF=$CI_VCS_REF --build-arg BUILD_DATE=$CI_JOB_TIMESTAMP -t $IMAGE_TAG -t $IMAGE_TAG_SLUG -t $IMAGE_TAG_LATEST .
tags:
- dind
only:
- develop@pleroma/pleroma
script:
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --custom-platform=$BUILD_ARCH --build-arg VCS_REF=$CI_VCS_REF --build-arg BUILD_DATE=$CI_JOB_TIMESTAMP --build-arg ELIXIR_IMG=$ELIXIR_IMG --destination $IMAGE_TAG --destination $IMAGE_TAG_SLUG --destination $IMAGE_TAG_LATEST
.kaniko-stable:
extends: .kaniko
docker-stable:
stage: docker
image: docker:latest
cache: {}
dependencies: []
variables: *docker-variables
before_script: *before-docker
allow_failure: true
script:
- mkdir -p /root/.docker/cli-plugins
- wget "${DOCKER_BUILDX_URL}" -O ~/.docker/cli-plugins/docker-buildx
- echo "${DOCKER_BUILDX_HASH} /root/.docker/cli-plugins/docker-buildx" | sha1sum -c
- chmod +x ~/.docker/cli-plugins/docker-buildx
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- docker buildx create --name mbuilder --driver docker-container --use
- docker buildx inspect --bootstrap
- docker buildx build --platform linux/amd64,linux/arm/v7,linux/arm64/v8 --push --cache-from $IMAGE_TAG_SLUG --build-arg VCS_REF=$CI_VCS_REF --build-arg BUILD_DATE=$CI_JOB_TIMESTAMP -t $IMAGE_TAG -t $IMAGE_TAG_SLUG -t $IMAGE_TAG_LATEST_STABLE .
tags:
- dind
only:
- stable@pleroma/pleroma
script:
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --custom-platform=$BUILD_ARCH --build-arg VCS_REF=$CI_VCS_REF --build-arg BUILD_DATE=$CI_JOB_TIMESTAMP --build-arg ELIXIR_IMG=$ELIXIR_IMG --destination $IMAGE_TAG --destination $IMAGE_TAG_SLUG --destination $IMAGE_TAG_LATEST_STABLE
.kaniko-release:
extends: .kaniko
docker-release:
stage: docker
image: docker:latest
cache: {}
dependencies: []
variables: *docker-variables
before_script: *before-docker
allow_failure: true
script:
script:
- mkdir -p /root/.docker/cli-plugins
- wget "${DOCKER_BUILDX_URL}" -O ~/.docker/cli-plugins/docker-buildx
- echo "${DOCKER_BUILDX_HASH} /root/.docker/cli-plugins/docker-buildx" | sha1sum -c
- chmod +x ~/.docker/cli-plugins/docker-buildx
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- docker buildx create --name mbuilder --driver docker-container --use
- docker buildx inspect --bootstrap
- docker buildx build --platform linux/amd64,linux/arm/v7,linux/arm64/v8 --push --cache-from $IMAGE_TAG_SLUG --build-arg VCS_REF=$CI_VCS_REF --build-arg BUILD_DATE=$CI_JOB_TIMESTAMP -t $IMAGE_TAG -t $IMAGE_TAG_SLUG .
tags:
- dind
only:
- /^release/.*$/@pleroma/pleroma
script:
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --custom-platform=$BUILD_ARCH --build-arg VCS_REF=$CI_VCS_REF --build-arg BUILD_DATE=$CI_JOB_TIMESTAMP --build-arg ELIXIR_IMG=$ELIXIR_IMG --destination $IMAGE_TAG --destination $IMAGE_TAG_SLUG
.kaniko-adhoc:
extends: .kaniko
docker-adhoc:
stage: docker
image: docker:latest
cache: {}
dependencies: []
variables: *docker-variables
before_script: *before-docker
allow_failure: true
script:
script:
- mkdir -p /root/.docker/cli-plugins
- wget "${DOCKER_BUILDX_URL}" -O ~/.docker/cli-plugins/docker-buildx
- echo "${DOCKER_BUILDX_HASH} /root/.docker/cli-plugins/docker-buildx" | sha1sum -c
- chmod +x ~/.docker/cli-plugins/docker-buildx
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- docker buildx create --name mbuilder --driver docker-container --use
- docker buildx inspect --bootstrap
- docker buildx build --platform linux/amd64,linux/arm/v7,linux/arm64/v8 --push --cache-from $IMAGE_TAG_SLUG --build-arg VCS_REF=$CI_VCS_REF --build-arg BUILD_DATE=$CI_JOB_TIMESTAMP -t $IMAGE_TAG -t $IMAGE_TAG_SLUG .
tags:
- dind
only:
- /^build-docker/.*$/@pleroma/pleroma
script:
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --custom-platform=$BUILD_ARCH --build-arg VCS_REF=$CI_VCS_REF --build-arg BUILD_DATE=$CI_JOB_TIMESTAMP --build-arg ELIXIR_IMG=$ELIXIR_IMG --destination $IMAGE_TAG --destination $IMAGE_TAG_SLUG
.kaniko:linux/amd64:
variables:
BUILD_ARCH: linux/amd64
BUILD_ARCH_IMG_SUFFIX: linux-amd64
ELIXIR_IMG: hexpm/elixir
tags:
- amd64
.kaniko:linux/arm64:
variables:
BUILD_ARCH: linux/arm64/v8
BUILD_ARCH_IMG_SUFFIX: linux-arm64-v8
ELIXIR_IMG: hexpm/elixir
tags:
- arm
.kaniko:linux/arm:
variables:
BUILD_ARCH: linux/arm/v7
BUILD_ARCH_IMG_SUFFIX: linux-arm-v7
ELIXIR_IMG: git.pleroma.social:5050/pleroma/ci-image/elixir-linux-arm-v7
tags:
- arm32-specified
kaniko-latest:linux/amd64:
extends:
- .kaniko-latest
- .kaniko:linux/amd64
kaniko-latest:linux/arm64:
extends:
- .kaniko-latest
- .kaniko:linux/arm64
kaniko-latest:linux/arm:
extends:
- .kaniko-latest
- .kaniko:linux/arm
kaniko-stable:linux/amd64:
extends:
- .kaniko-stable
- .kaniko:linux/amd64
kaniko-stable:linux/arm64:
extends:
- .kaniko-stable
- .kaniko:linux/arm64
kaniko-stable:linux/arm:
extends:
- .kaniko-stable
- .kaniko:linux/arm
kaniko-release:linux/amd64:
extends:
- .kaniko-release
- .kaniko:linux/amd64
kaniko-release:linux/arm64:
extends:
- .kaniko-release
- .kaniko:linux/arm64
kaniko-release:linux/arm:
extends:
- .kaniko-release
- .kaniko:linux/arm
.docker-combine:
stage: docker-combine
image: docker:cli
cache: {}
before_script:
- 'BUILD_ARCHES="linux-amd64 linux-arm64-v8 linux-arm-v7"'
- export IMAGE_TAG=$CI_REGISTRY_IMAGE:$CI_COMMIT_SHORT_SHA
- export IMAGE_TAG_SLUG=$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
- export IMAGE_TAG_LATEST=$CI_REGISTRY_IMAGE:latest
- export IMAGE_TAG_LATEST_STABLE=$CI_REGISTRY_IMAGE:latest-stable
- 'IMAGES=; for arch in $BUILD_ARCHES; do IMAGES="$IMAGES $CI_REGISTRY_IMAGE/$arch:$CI_COMMIT_SHORT_SHA"; done'
- 'IMAGES_SLUG=; for arch in $BUILD_ARCHES; do IMAGES_SLUG="$IMAGES_SLUG $CI_REGISTRY_IMAGE/$arch:$CI_COMMIT_REF_SLUG"; done'
- 'IMAGES_LATEST=; for arch in $BUILD_ARCHES; do IMAGES_LATEST="$IMAGES_LATEST $CI_REGISTRY_IMAGE/$arch:latest"; done'
- 'IMAGES_LATEST_STABLE=; for arch in $BUILD_ARCHES; do IMAGES_LATEST_STABLE="$IMAGES_LATEST_STABLE $CI_REGISTRY_IMAGE/$arch:latest"; done'
- mkdir -p ~/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
docker-combine:latest:
extends: .docker-combine
only:
- develop@pleroma/pleroma
script:
- 'docker manifest create $IMAGE_TAG $IMAGES'
- 'docker manifest push $IMAGE_TAG'
- 'docker manifest create $IMAGE_TAG_SLUG $IMAGES_SLUG'
- 'docker manifest push $IMAGE_TAG_SLUG'
- 'docker manifest create $IMAGE_TAG_LATEST $IMAGES_LATEST'
- 'docker manifest push $IMAGE_TAG_LATEST'
docker-combine:stable:
extends: .docker-combine
only:
- stable@pleroma/pleroma
script:
- 'docker manifest create $IMAGE_TAG $IMAGES'
- 'docker manifest push $IMAGE_TAG'
- 'docker manifest create $IMAGE_TAG_SLUG $IMAGES_SLUG'
- 'docker manifest push $IMAGE_TAG_SLUG'
- 'docker manifest create $IMAGE_TAG_LATEST_STABLE $IMAGES_LATEST_STABLE'
- 'docker manifest push $IMAGE_TAG_LATEST_STABLE'
docker-combine:release:
extends: .docker-combine
only:
- /^release/.*$/@pleroma/pleroma
script:
- 'docker manifest create $IMAGE_TAG $IMAGES'
- 'docker manifest push $IMAGE_TAG'
- 'docker manifest create $IMAGE_TAG_SLUG $IMAGES_SLUG'
- 'docker manifest push $IMAGE_TAG_SLUG'

View file

@ -1,10 +0,0 @@
### Checklist
- [ ] Adding a changelog: In the `changelog.d` directory, create a file named `<code>.<type>`.
`<code>` can be anything, but we recommend using a more or less unique identifier to avoid collisions, such as the branch name.
`<type>` can be `add`, `change`, `remove`, `fix`, `security` or `skip`. `skip` is only used if there is no user-visible change in the MR (for example, only editing comments in the code). Otherwise, choose a type that corresponds to your change.
In the file, write the changelog entry. For example, if an MR adds group functionality, we can create a file named `group.add` and write `Add group functionality` in it.
If one changelog entry is not enough, you may add more. But that might mean you can split it into two MRs. Only use more than one changelog entry if you really need to (for example, when one change in the code fix two different bugs, or when refactoring).

View file

@ -1,6 +1,6 @@
### Release checklist
* [ ] Bump version in `mix.exs`
* [ ] Compile a changelog with the `tools/collect-changelog` script
* [ ] Compile a changelog
* [ ] Create an MR with an announcement to pleroma.social
#### post-merge
* [ ] Tag the release on the merge commit

View file

@ -1 +0,0 @@
priv/static

View file

@ -4,239 +4,20 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## 2.7.0
### Security
- HTTP Security: By default, don't allow unsafe-eval. The setting needs to be changed to allow Flash emulation.
- Fix webfinger spoofing.
- Use proper workers for fetching pins instead of an ad-hoc task, fixing a potential fetch loop
## Unreleased
### Changed
- Update to Phoenix 1.7
- Elixir Logger configuration is now longer permitted through AdminFE and ConfigDB
- Refactor the user backups code and improve test coverage
- Invalid activities delivered to the inbox will be rejected with a 400 Bad Request
- Support Bandit as an alternative to Cowboy for the HTTP server.
- Update Bandit to 1.5.2
- Replace eblurhash with rinpatch_blurhash. This also removes a dependency on ImageMagick.
- Elixir 1.13 is the minimum required version.
- Document maximum supported version of Erlang & Elixir
- Update and extend NetBSD installation docs
- Make `/api/v1/pleroma/federation_status` publicly available
- Increase outgoing federation parallelism
- Change Hackney connection pool timeouts to align with the values Gun uses
- Transmogrifier: handle non-validate errors on incoming Delete activities
- Remote object fetch failures will prevent the object fetch job from retrying if the object request returns 401, 403, 404, 410, or exceeds the maximum thread depth.
- - Change AccountView `last_status_at` from a datetime to a date (as done in Mastodon 3.1.0)
- Improve error logging when LDAP authentication fails.
- Publisher jobs will not retry if the error received is a 400
- PollWorker jobs will not retry if the activity no longer exists.
- Improved detecting unrecoverable errors for incoming federation jobs
- Changed some jobs to return :cancel on unrecoverable errors that should not be retried
- Discard Remote Fetcher jobs which errored due to an MRF rejection.
- Oban queues have refactored to simplify the queue design
- Ensure all Oban jobs have timeouts defined
- Optimistic Inbox reduces the processing overhead of incoming activities without instantly verifiable signatures.
- HTTP connection pool adjustments
- Disable jit by default for PostgreSQL
- Update the documentation for configuring Prometheus metrics.
- Change the prometheus library to PromEx.
- Publisher jobs now store the the activity id instead of inserting duplicate JSON data in the Oban queue for each delivery.
- Activity publishing failures will prevent the job from retrying if the publishing request returns a 403 or 410
- Publisher errors will now emit logs indicating the inbox that was not available for delivery.
- Reduce the reachability timestamp update to a single upsert query
- A 422 error is returned when attempting to reply to a deleted status
- Rich Media backfilling is now an Oban job
- Refactored Rich Media to cache the content in the database. Fetching operations that could block status rendering have been eliminated.
- Set default values on validators for transient objects (attachment, poll options)
- User profile refreshes are now asynchronous
- Change mediaproxy previews to use vips to generate thumbnails instead of ImageMagick
- Render nice web push notifications for polls
- Refactor the Mastodon /api/v1/streaming websocket handler to use Phoenix.Socket.Transport
### Added
- Uploader: Add support for uploading attachments using IPFS
- Add NSFW-detecting MRF
- Add DNSRBL MRF
- Add options to the mix prune_objects task
- Add Anti-mention Spam MRF backported from Rebased
- HTTPSignaturePlug: Add :authorized_fetch_mode_exceptions configuration
- Support /authorize-interaction route used by Mastodon
- Add an option to reject certain domains when authorized fetch is enabled.
- Include following/followers in backups
- Allow to group bookmarks in folders
- Include image description in status media cards
- Implement `/api/v1/accounts/familiar_followers`
- Add support for configuring favicon, embed favicon and PWA manifest in server-generated meta
- Implement FEP-2c59, add "webfinger" to user actor
- Framegrabs with ffmpeg will execute with a 5 second timeout and cache the URLs of failures with a TTL of 15 minutes to prevent excessive retries.
- Added a Mix task "pleroma.config fix_mrf_policies" which will remove erroneous MRF policies from ConfigDB.
- Add ForceMention MRF
- [docs] add frontends management documentation
- Implement group actors
- Add contact account to InstanceView
- Add instance rules
- Implement /api/v2/instance route
- Verify profile link ownership with rel="me"
- Logger metadata is now attached to some logs to help with troubleshooting and analysis
- Add new parameters to /api/v2/instance: configuration[accounts][max_pinned_statuses] and configuration[statuses][characters_reserved_per_url]
- Add meilisearch, make search engines pluggable
- Add missing indexes on foreign key relationships
- Startup detection for configured MRF modules that are missing or incorrectly defined
- Permit passing --chunk and --step values to the Pleroma.Search.Indexer Mix task
- Deleting, Unfavoriting, Unrepeating, or Unreacting will cancel undelivered publishing jobs for the original activity.
- Oban jobs can now be viewed in the Live Dashboard
- Add media proxy to opengraph rich media cards
- Support for Erlang OTP 26
- Prioritize mentioned recipients (i.e., those that are not just followers) when federating.
- PromEx documentation
- Expose nonAnonymous field from Smithereen polls
- Add Qdrant/OpenAI embedding search
- Adds the capability to add a URL to a scrobble (optional field)
- scrubbers/default: Add more formatting elements from HTML4 / GoToSocial (acronym, bdo, big, cite, dfn, ins, kbd, q, samp, s, tt, var, wbr)
- Monitoring of search backend health to control the processing of jobs in the search indexing Oban queue
- Display reposted replies with exclude_replies: true
- Add "status" notification type
- Support honk-style attachment summaries as alt-text.
### Fixed
- Fix Emoji object IDs not always being valid
- Remove checking ImageMagick's commands for Pleroma.Upload.Filter.AnalyzeMetadata
- Ensure that StripLocation actually removes everything resembling GPS data from PNGs
- Fix authentication check on account rendering when bio is defined
- ap userview: add outbox field.
- Fix #strip_report_status_data
- Fix federation with Convergence AP Bridge
- ChatMessage: Tolerate attachment field set to an empty array
- Config: Check the permissions of the linked file instead of the symlink
- MediaProxy was setting the content-length header which is not permitted by RFC9112§6.2 when we are chunking the reply as it conflicts with the existence of the transfer-encoding header.
- Restore Cowboy's ability to stream MediaProxy responses without Chunked encoding.
- Fix the processing of email digest jobs.
- Client application data was always missing from the status
- Elixir 1.15 compatibility
- When downloading remote emojis packs, account for pagination
- Make remote emoji packs API use specifically the V1 URL. Akkoma does not understand it without V1, and it works either way with normal pleroma, so no reason to not do this
- Following HTTP Redirects when the HTTP Adapter is Finch
- Video framegrabs were not working correctly after the change to use Exile to execute ffmpeg
- Deactivated groups would still try to repeat a post.
- Fix logic error in Gun connection pooling which prevented retries even when the worker was launched with retry = true
- Connection pool errors when publishing an activity is a soft-error that will be retried shortly.
- Gun Connection Pool was not retrying to acquire a connection if the pool was full and stale connections were reclaimed
- TwitterAPI: Return proper error when healthcheck is disabled
- Handle cases when users.inbox is nil.
- Fix LDAP support
- Use correct domain for fqn and InstanceView
- The query for marking notifications as read has been simplified
- Mastodon API /api/v1/directory: Fix listing directory contents when not authenticated
- Ensure MediaProxy HTTP requests obey all the defined connection settings
- Fix a memory leak caused by Websocket connections that would not enter a state where a full garbage collection run could be triggered.
- Fix OpenGraph and Twitter metadata providers when parsing objects with no content or summary fields.
- MRF: Log sensible error for subdomains_regex
- MRF.StealEmojiPolicy: Properly add fallback extension to filenames missing one
- Federated timeline removal of hashtags via MRF HashtagPolicy
- Support objects with a null contentMap (firefish)
- Fix notifications query which was not using the index properly
- Notifications: improve performance by filtering on users table instead of activities table
- Prevent Rich Media backfill jobs from retrying in cases where it is likely they will fail again.
- Oban Jobs for refreshing users were not respecting the uniqueness setting
- Fix Optimistic Inbox for failed signatures
- MediaProxy Preview failures prevented when encountering certain video files
- pleroma_ctl: Use realpath(1) instead of readlink(1)
- ReceiverWorker: Make sure non-{:ok, _} is returned as {:error, …}
- Harden Rich Media parsing against very slow or malicious URLs
- Rich Media Preview cache eviction when the activity is updated.
- Parsing of RichMedia TTLs for Amazon URLs when query parameters are nil
- End of poll notifications were not streamed over websockets or web push
- Fix eblurhash and elixir-captcha not using system cflags
- Video thumbnails were not being generated due to a negative cache lookup logic error
- Fix web push notifications not successfully delivering
- Web Push notifications are no longer generated for muted/blocked threads and users.
- Fix validate_webfinger when running a different domain for Webfinger
### Removed
- Mastodon API: Remove deprecated GET /api/v1/statuses/:id/card endpoint https://github.com/mastodon/mastodon/pull/11213
- Removed support for multiple federator modules as we only support ActivityPub
## 2.6.2
### Security
- MRF StealEmojiPolicy: Sanitize shortcodes (thanks to Hazel K for the report
## 2.6.1
### Changed
- - Document maximum supported version of Erlang & Elixir
### Added
- [docs] add frontends management documentation
### Fixed
- TwitterAPI: Return proper error when healthcheck is disabled
- Fix eblurhash and elixir-captcha not using system cflags
## 2.6.0
### Security
- Preload: Make generated JSON html-safe. It already was html safe because it only consists of config data that is base64 encoded, but this will keep it safe it that ever changes.
- CommonAPI: Prevent users from accessing media of other users by creating a status with reused attachment ID
- Disable XML entity resolution completely to fix a dos vulnerability
### Added
- Support for Image activities, namely from Hubzilla
- Add OAuth scope descriptions
- Allow lang attribute in status text
- OnlyMedia Upload Filter
- Implement MRF policy to reject or delist according to emojis
- (hardening) Add no_new_privs=yes to OpenRC service files
- Implement quotes
- Add unified streaming endpoint
### Fixed
- rel="me" was missing its cache
- MediaProxy responses now return a sandbox CSP header
- Filter context activities using Visibility.visible_for_user?
- UploadedMedia: Add missing disposition_type to Content-Disposition
- fix not being able to fetch flash file from remote instance
- Fix abnormal behaviour when refetching a poll
- Allow non-HTTP(s) URIs in "url" fields for compatibility with "FEP-fffd: Proxy Objects"
- Fix opengraph and twitter card meta tags
- ForceMentionsInContent: fix double mentions for Mastodon/Misskey posts
- OEmbed HTML tags are now filtered
- Restrict attachments to only uploaded files only
- Fix error 404 when deleting status of a banned user
- Fix config ownership in dockerfile to pass restriction test
- Fix user fetch completely broken if featured collection is not in a supported form
- Correctly handle the situation when a poll has both "anyOf" and "oneOf" but one of them being empty
- Fix handling report from a deactivated user
- Prevent using the .json format to bypass authorized fetch mode
- Fix mentioning punycode domains when using Markdown
- Show more informative errors when profile exceeds char limits
### Removed
- BREAKING: Support for passwords generated with `crypt(3)` (Gnu Social migration artifact)
- remove BBS/SSH feature, replaced by an external bridge.
- Remove a few unused indexes.
- Cleanup OStatus-era user upgrades and ap_enabled indicator
- Deprecate Pleroma's audio scrobbling
## 2.5.4
## 2.5.54
## Security
- Fix XML External Entity (XXE) loading vulnerability allowing to fetch arbitrary files from the server's filesystem
## 2.5.3
### Security
- Emoji pack loader sanitizes pack names
- Reduced permissions of config files and directories, distros requiring greater permissions like group-read need to pre-create the directories
## 2.5.5
## Security
- Prevent users from accessing media of other users by creating a status with reused attachment ID
## 2.5.4
## Security
- Fix XML External Entity (XXE) loading vulnerability allowing to fetch arbitrary files from the server's filesystem
- Fix XML External Entity (XXE) loading vulnerability allowing to fetch arbitary files from the server's filesystem
## 2.5.3
@ -276,7 +57,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
- Fix `block_from_stranger` setting
- Fix rel="me"
- Docker images will now run properly
- Fix improper content being cached in report content
- Fix inproper content being cached in report content
- Notification filter on object content will not operate on the ones that inherently have no content
- ZWNJ and double dots in links are parsed properly for Plain-text posts
- OTP releases will work on systems with a newer libcrypt
@ -942,7 +723,7 @@ switched to a new configuration mechanism, however it was not officially removed
- Rate limiter crashes when there is no explicitly specified ip in the config
- 500 errors when no `Accept` header is present if Static-FE is enabled
- Instance panel not being updated immediately due to wrong `Cache-Control` headers
- Statuses posted with BBCode/Markdown having unnecessary newlines in Pleroma-FE
- Statuses posted with BBCode/Markdown having unncessary newlines in Pleroma-FE
- OTP: Fix some settings not being migrated to in-database config properly
- No `Cache-Control` headers on attachment/media proxy requests
- Character limit enforcement being off by 1
@ -1262,10 +1043,10 @@ curl -Lo ./bin/pleroma_ctl 'https://git.pleroma.social/pleroma/pleroma/raw/devel
- Reverse Proxy limiting `max_body_length` was incorrectly defined and only checked `Content-Length` headers which may not be sufficient in some circumstances
### Added
- Expiring/ephemeral activities. All activities can have expires_at value set, which controls when they should be deleted automatically.
- Expiring/ephemeral activites. All activities can have expires_at value set, which controls when they should be deleted automatically.
- Mastodon API: in post_status, the expires_in parameter lets you set the number of seconds until an activity expires. It must be at least one hour.
- Mastodon API: all status JSON responses contain a `pleroma.expires_at` item which states when an activity will expire. The value is only shown to the user who created the activity. To everyone else it's empty.
- Configuration: `ActivityExpiration.enabled` controls whether expired activities will get deleted at the appropriate time. Enabled by default.
- Configuration: `ActivityExpiration.enabled` controls whether expired activites will get deleted at the appropriate time. Enabled by default.
- Conversations: Add Pleroma-specific conversation endpoints and status posting extensions. Run the `bump_all_conversations` task again to create the necessary data.
- MRF: Support for priming the mediaproxy cache (`Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy`)
- MRF: Support for excluding specific domains from Transparency.

View file

@ -1,17 +1,14 @@
# https://hub.docker.com/r/hexpm/elixir/tags
ARG ELIXIR_IMG=hexpm/elixir
ARG ELIXIR_VER=1.14.5
ARG ERLANG_VER=25.3.2.14
ARG ALPINE_VER=3.17.9
ARG ELIXIR_VER=1.11.4
ARG ERLANG_VER=24.2.1
ARG ALPINE_VER=3.17.0
FROM ${ELIXIR_IMG}:${ELIXIR_VER}-erlang-${ERLANG_VER}-alpine-${ALPINE_VER} as build
FROM hexpm/elixir:${ELIXIR_VER}-erlang-${ERLANG_VER}-alpine-${ALPINE_VER} as build
COPY . .
ENV MIX_ENV=prod
ENV VIX_COMPILATION_MODE=PLATFORM_PROVIDED_LIBVIPS
RUN apk add git gcc g++ musl-dev make cmake file-dev vips-dev &&\
RUN apk add git gcc g++ musl-dev make cmake file-dev &&\
echo "import Config" > config/prod.secret.exs &&\
mix local.hex --force &&\
mix local.rebar --force &&\
@ -39,7 +36,7 @@ ARG HOME=/opt/pleroma
ARG DATA=/var/lib/pleroma
RUN apk update &&\
apk add exiftool ffmpeg vips libmagic ncurses postgresql-client &&\
apk add exiftool ffmpeg imagemagick libmagic ncurses postgresql-client &&\
adduser --system --shell /bin/false --home ${HOME} pleroma &&\
mkdir -p ${DATA}/uploads &&\
mkdir -p ${DATA}/static &&\
@ -51,7 +48,7 @@ USER pleroma
COPY --from=build --chown=pleroma:0 /release ${HOME}
COPY --chown=pleroma --chmod=640 ./config/docker.exs /etc/pleroma/config.exs
COPY ./config/docker.exs /etc/pleroma/config.exs
COPY ./docker-entrypoint.sh ${HOME}
EXPOSE 4000

View file

@ -30,8 +30,7 @@ If your platform is not supported, or you just want to be able to edit the sourc
- [OpenBSD (fi)](https://docs-develop.pleroma.social/backend/installation/openbsd_fi/)
### OS/Distro packages
Currently Pleroma is packaged for [YunoHost](https://yunohost.org), [NixOS](https://nixos.org), [Gentoo through GURU](https://gentoo.org/) and [Archlinux through AUR](https://aur.archlinux.org/packages/pleroma). You may find more at <https://repology.org/project/pleroma/versions>.
If you want to package Pleroma for any OS/Distros, we can guide you through the process on our [community channels](#community-channels). If you want to change default options in your Pleroma package, please **discuss it with us first**.
Currently Pleroma is packaged for [YunoHost](https://yunohost.org) and [NixOS](https://nixos.org). If you want to package Pleroma for any OS/Distros, we can guide you through the process on our [community channels](#community-channels). If you want to change default options in your Pleroma package, please **discuss it with us first**.
### Docker
While we dont provide docker files, other people have written very good ones. Take a look at <https://github.com/angristan/docker-pleroma> or <https://glitch.sh/sn0w/pleroma-docker>.

1
changelog.d/3126.fix Normal file
View file

@ -0,0 +1 @@
MediaProxy responses now return a sandbox CSP header

1
changelog.d/3883.fix Normal file
View file

@ -0,0 +1 @@
Fix abnormal behaviour when refetching a poll

1
changelog.d/3891.fix Normal file
View file

@ -0,0 +1 @@
OEmbed HTML tags are now filtered

View file

@ -0,0 +1 @@
Fix XML External Entity (XXE) loading vulnerability allowing to fetch arbitary files from the server's filesystem

View file

@ -1 +0,0 @@
Added support for argon2 passwords and their conversion for migration from Akkoma fork to upstream.

View file

@ -1 +0,0 @@
Truncate remote user fields, avoids them getting rejected

View file

@ -1 +0,0 @@
Deprecate `/api/v1/pleroma/accounts/:id/subscribe`/`unsubscribe`

View file

@ -1 +0,0 @@
Restrict incoming activities from unknown actors to a subset that does not imply a previous relationship and early rejection of unrecognized activity types.

View file

@ -1 +0,0 @@
Elixir 1.14 and Erlang/OTP 23 is now the minimum supported release

View file

@ -0,0 +1 @@
Emoji pack loader sanitizes pack names

View file

@ -0,0 +1 @@
Correctly handle the situation when a poll has both "anyOf" and "oneOf" but one of them being empty

View file

@ -1 +0,0 @@
Fixed malformed follow requests that cause them to appear stuck pending due to the recipient being unable to process them.

View file

@ -1 +0,0 @@
Improve the FollowValidator to successfully incoming activities with an errant cc field.

View file

@ -1 +0,0 @@
Resolved edge case where the API can report you are following a user but the relationship is not fully established.

View file

@ -1 +0,0 @@
Support `id` param in `GET /api/v1/statuses`

View file

@ -1 +0,0 @@
Remove stub for /api/v1/accounts/:id/identity_proofs (deprecated by Mastodon 3.5.0)

View file

@ -1 +0,0 @@
LDAP configuration now permits overriding the CA root certificate file for TLS validation.

View file

@ -1 +0,0 @@
LDAP authentication has been refactored to operate as a GenServer process which will maintain an active connection to the LDAP server.

View file

@ -1 +0,0 @@
STARTTLS certificate and hostname verification for LDAP authentication

View file

@ -1 +0,0 @@
LDAPS connections (implicit TLS) are now supported.

View file

@ -1 +0,0 @@
Include list id in StatusView

View file

@ -1 +0,0 @@
The Swoosh email adapter for Mailgun was missing a new dependency on :multipart

View file

@ -1 +0,0 @@
Added MRF.FODirectReply which changes replies to followers-only posts to be direct.

View file

@ -1 +0,0 @@
Add `id_filter` to MRF to filter URLs and their domain prior to fetching

View file

@ -1 +0,0 @@
Added MRF.QuietReply which prevents replies to public posts from being published to the timelines

View file

@ -1 +0,0 @@
Add `group_key` to notifications

View file

@ -1 +0,0 @@
Fix 'Setting a marker should mark notifications as read'

View file

@ -1 +0,0 @@
Add a rate limiter to the OAuth App creation endpoint and ensure registered apps are assigned to users.

View file

@ -1 +0,0 @@
ReceiverWorker will cancel processing jobs instead of retrying if the user cannot be fetched due to 403, 404, or 410 errors or if the account is disabled locally.

View file

@ -1 +0,0 @@
Adjust more Oban workers to enforce unique job constraints.

View file

@ -1 +0,0 @@
Oban updated to 2.18.3

View file

@ -1 +0,0 @@
Publisher behavior improvement when snoozing Oban jobs due to Gun connection pool contention.

View file

@ -0,0 +1 @@
- Reduced permissions of config files and directories, distros requiring greater permissions like group-read need to pre-create the directories

View file

@ -1 +0,0 @@
Allow providing avatar/header descriptions

View file

@ -1 +0,0 @@
Address case where instance reachability status couldn't be updated

View file

@ -1 +0,0 @@
Remote Fetcher Worker recognizes more permanent failure errors

View file

@ -1 +0,0 @@
Rich Media preview fetching will skip making an HTTP HEAD request to check a URL for allowed content type and length if the Tesla adapter is Gun or Finch

View file

@ -1 +0,0 @@
scrubbers/default: Allow "mention hashtag" classes used by Mastodon

View file

@ -1 +0,0 @@
StreamerView: Do not leak follows count if hidden

View file

@ -1 +0,0 @@
Added dependencies for Swoosh's Mua mail adapter

View file

@ -1 +0,0 @@
Update Oban to 2.18

View file

@ -1 +0,0 @@
Imports of blocks, mutes, and follows would retry repeatedly due to incorrect error handling and all work executed in a single job

View file

@ -1 +0,0 @@
Accept application/activity+json for requests to .well-known/nodeinfo

View file

@ -1 +0,0 @@
Worker configuration is no longer available. This only affects custom max_retries values for a couple Oban queues.

View file

@ -1,4 +1,4 @@
FROM elixir:1.14.5-otp-25
FROM elixir:1.11.4
# Single RUN statement, otherwise intermediate images are created
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run

1
ci/build_and_push.sh Executable file
View file

@ -0,0 +1 @@
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:latest --push .

View file

@ -1 +0,0 @@
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.14.5-otp-25 --push .

View file

@ -1,8 +0,0 @@
FROM elixir:1.15.8-otp-26
# Single RUN statement, otherwise intermediate images are created
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
RUN apt-get update &&\
apt-get install -y libmagic-dev cmake libimage-exiftool-perl ffmpeg &&\
mix local.hex --force &&\
mix local.rebar --force

View file

@ -1 +0,0 @@
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.15.8-otp-26 --push .

View file

@ -1,8 +0,0 @@
FROM elixir:1.16.3-otp-26
# Single RUN statement, otherwise intermediate images are created
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
RUN apt-get update &&\
apt-get install -y libmagic-dev cmake libimage-exiftool-perl ffmpeg &&\
mix local.hex --force &&\
mix local.rebar --force

View file

@ -1 +0,0 @@
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.16.3-otp-26 --push .

View file

@ -1,8 +0,0 @@
FROM elixir:1.17.1-otp-26
# Single RUN statement, otherwise intermediate images are created
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
RUN apt-get update &&\
apt-get install -y libmagic-dev cmake libimage-exiftool-perl ffmpeg &&\
mix local.hex --force &&\
mix local.rebar --force

View file

@ -1 +0,0 @@
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/ci-base:elixir-1.17.1-otp-26 --push .

View file

@ -1,3 +0,0 @@
FROM postgres:13-bullseye
RUN apt-get update && apt-get install -y postgresql-13-rum/bullseye-pgdg

View file

@ -1 +0,0 @@
docker buildx build --platform linux/amd64,linux/arm64 -t git.pleroma.social:5050/pleroma/pleroma/postgres-with-rum-13:latest --push .

View file

@ -14,7 +14,7 @@ config :pleroma, Pleroma.Captcha,
method: Pleroma.Captcha.Mock
# Print only warnings and errors during test
config :logger, level: :warning
config :logger, level: :warn
config :pleroma, :auth, oauth_consumer_strategies: []
@ -79,10 +79,6 @@ IO.puts("RUM enabled: #{rum_enabled}")
config :pleroma, Pleroma.ReverseProxy.Client, Pleroma.ReverseProxy.ClientMock
config :pleroma, Pleroma.Application,
background_migrators: false,
streamer_registry: false
if File.exists?("./config/benchmark.secret.exs") do
import_config "benchmark.secret.exs"
else

View file

@ -82,10 +82,6 @@ config :ex_aws, :s3,
# region: "us-east-1", # may be required for Amazon AWS
scheme: "https://"
config :pleroma, Pleroma.Uploaders.IPFS,
post_gateway_url: "http://localhost:5001",
get_gateway_url: "http://localhost:8080"
config :pleroma, :emoji,
shortcode_globs: ["/emoji/custom/**/*.png"],
pack_extensions: [".png", ".gif"],
@ -114,11 +110,32 @@ config :pleroma, :uri_schemes,
"xmpp"
]
websocket_config = [
path: "/websocket",
serializer: [
{Phoenix.Socket.V1.JSONSerializer, "~> 1.0.0"},
{Phoenix.Socket.V2.JSONSerializer, "~> 2.0.0"}
],
timeout: 60_000,
transport_log: false,
compress: false
]
# Configures the endpoint
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "localhost"],
http: [
ip: {127, 0, 0, 1}
ip: {127, 0, 0, 1},
dispatch: [
{:_,
[
{"/api/v1/streaming", Pleroma.Web.MastodonAPI.WebsocketHandler, []},
{"/websocket", Phoenix.Endpoint.CowboyWebSocket,
{Phoenix.Transports.WebSocket,
{Pleroma.Web.Endpoint, Pleroma.Web.UserSocket, websocket_config}}},
{:_, Phoenix.Endpoint.Cowboy2Handler, {Pleroma.Web.Endpoint, []}}
]}
]
],
protocol: "https",
secret_key_base: "aK4Abxf29xU9TTDKre9coZPUgevcVCFQJe/5xP/7Lt4BEif6idBIbjupVbOrbKxl",
@ -132,18 +149,16 @@ config :pleroma, Pleroma.Web.Endpoint,
]
# Configures Elixir's Logger
config :logger, backends: [:console]
config :logger, :console,
level: :debug,
format: "\n$time $metadata[$level] $message\n",
metadata: [:actor, :path, :type, :user]
metadata: [:request_id]
config :logger, :ex_syslogger,
level: :debug,
ident: "pleroma",
format: "$metadata[$level] $message",
metadata: [:actor, :path, :type, :user]
metadata: [:request_id]
config :mime, :types, %{
"application/xml" => ["xml"],
@ -170,7 +185,6 @@ config :pleroma, :instance,
short_description: "",
background_image: "/images/city.jpg",
instance_thumbnail: "/instance/thumbnail.jpeg",
favicon: "/favicon.png",
limit: 5_000,
description_limit: 5_000,
remote_limit: 100_000,
@ -191,10 +205,12 @@ config :pleroma, :instance,
federating: true,
federation_incoming_replies_max_depth: 100,
federation_reachability_timeout_days: 7,
federation_publisher_modules: [
Pleroma.Web.ActivityPub.Publisher
],
allow_relay: true,
public: true,
quarantined_instances: [],
rejected_instances: [],
static_dir: "instance/static/",
allowed_post_formats: [
"text/plain",
@ -344,8 +360,6 @@ config :pleroma, :manifest,
icons: [
%{
src: "/static/logo.svg",
sizes: "512x512",
purpose: "any",
type: "image/svg+xml"
}
],
@ -394,12 +408,6 @@ config :pleroma, :mrf_keyword,
federated_timeline_removal: [],
replace: []
config :pleroma, :mrf_emoji,
remove_url: [],
remove_shortcode: [],
federated_timeline_removal_url: [],
federated_timeline_removal_shortcode: []
config :pleroma, :mrf_hashtag,
sensitive: ["nsfw"],
reject: [],
@ -413,33 +421,13 @@ config :pleroma, :mrf_vocabulary,
accept: [],
reject: []
config :pleroma, :mrf_dnsrbl,
nameserver: "127.0.0.1",
port: 53,
zone: "bl.pleroma.com"
# threshold of 7 days
config :pleroma, :mrf_object_age,
threshold: 604_800,
actions: [:delist, :strip_followers]
config :pleroma, :mrf_nsfw_api,
url: "http://127.0.0.1:5000/",
threshold: 0.7,
mark_sensitive: true,
unlist: false,
reject: false
config :pleroma, :mrf_follow_bot, follower_nickname: nil
config :pleroma, :mrf_inline_quote, template: "<bdi>RT:</bdi> {url}"
config :pleroma, :mrf_force_mention,
mention_parent: true,
mention_quoted: true
config :pleroma, :mrf_antimentionspam, user_age_limit: 30_000
config :pleroma, :rich_media,
enabled: true,
ignore_hosts: [],
@ -448,12 +436,8 @@ config :pleroma, :rich_media,
Pleroma.Web.RichMedia.Parsers.TwitterCard,
Pleroma.Web.RichMedia.Parsers.OEmbed
],
timeout: 5_000,
ttl_setters: [
Pleroma.Web.RichMedia.Parser.TTL.AwsSignedUrl,
Pleroma.Web.RichMedia.Parser.TTL.Opengraph
],
max_body: 5_000_000
failure_backoff: 60_000,
ttl_setters: [Pleroma.Web.RichMedia.Parser.TTL.AwsSignedUrl]
config :pleroma, :media_proxy,
enabled: false,
@ -522,8 +506,7 @@ config :pleroma, :http_security,
sts: false,
sts_max_age: 31_536_000,
ct_max_age: 2_592_000,
referrer_policy: "same-origin",
allow_unsafe_eval: false
referrer_policy: "same-origin"
config :cors_plug,
max_age: 86_400,
@ -580,25 +563,38 @@ config :pleroma, Pleroma.User,
],
email_blacklist: []
# The Pruner :max_age must be longer than Worker :unique
# value or it cannot enforce uniqueness.
config :pleroma, Oban,
repo: Pleroma.Repo,
log: false,
queues: [
activity_expiration: 10,
token_expiration: 5,
filter_expiration: 1,
backup: 1,
federator_incoming: 5,
federator_outgoing: 25,
federator_outgoing: 5,
ingestion_queue: 50,
web_push: 50,
background: 20,
search_indexing: [limit: 10, paused: true],
slow: 5
mailer: 10,
transmogrifier: 20,
scheduled_activities: 10,
poll_notifications: 10,
background: 5,
remote_fetcher: 2,
attachments_cleanup: 1,
new_users_digest: 1,
mute_expire: 5
],
plugins: [{Oban.Plugins.Pruner, max_age: 900}],
plugins: [Oban.Plugins.Pruner],
crontab: [
{"0 0 * * 0", Pleroma.Workers.Cron.DigestEmailsWorker},
{"0 0 * * *", Pleroma.Workers.Cron.NewUsersDigestWorker},
{"*/10 * * * *", Pleroma.Workers.Cron.AppCleanupWorker}
{"0 0 * * *", Pleroma.Workers.Cron.NewUsersDigestWorker}
]
config :pleroma, :workers,
retries: [
federator_incoming: 5,
federator_outgoing: 5
]
config :pleroma, Pleroma.Formatter,
@ -612,17 +608,17 @@ config :pleroma, Pleroma.Formatter,
config :pleroma, :ldap,
enabled: System.get_env("LDAP_ENABLED") == "true",
host: System.get_env("LDAP_HOST", "localhost"),
port: String.to_integer(System.get_env("LDAP_PORT", "389")),
host: System.get_env("LDAP_HOST") || "localhost",
port: String.to_integer(System.get_env("LDAP_PORT") || "389"),
ssl: System.get_env("LDAP_SSL") == "true",
sslopts: [],
tls: System.get_env("LDAP_TLS") == "true",
tlsopts: [],
base: System.get_env("LDAP_BASE", "dc=example,dc=com"),
uid: System.get_env("LDAP_UID", "cn"),
# defaults to CAStore's Mozilla roots
cacertfile: System.get_env("LDAP_CACERTFILE", nil),
mail: System.get_env("LDAP_MAIL", "mail")
base: System.get_env("LDAP_BASE") || "dc=example,dc=com",
uid: System.get_env("LDAP_UID") || "cn"
config :esshd,
enabled: false
oauth_consumer_strategies =
System.get_env("OAUTH_CONSUMER_STRATEGIES")
@ -659,26 +655,12 @@ config :pleroma, Pleroma.Emails.UserEmail,
config :pleroma, Pleroma.Emails.NewUsersDigestEmail, enabled: false
config :pleroma, Pleroma.PromEx,
disabled: false,
manual_metrics_start_delay: :no_delay,
drop_metrics_groups: [],
grafana: [
host: System.get_env("GRAFANA_HOST", "http://localhost:3000"),
auth_token: System.get_env("GRAFANA_TOKEN"),
upload_dashboards_on_start: false,
folder_name: "BEAM",
annotate_app_lifecycle: true
],
metrics_server: [
port: 4021,
path: "/metrics",
protocol: :http,
pool_size: 5,
cowboy_opts: [],
auth_strategy: :none
],
datasource: "Prometheus"
config :prometheus, Pleroma.Web.Endpoint.MetricsExporter,
enabled: false,
auth: false,
ip_whitelist: [],
path: "/api/pleroma/app_metrics",
format: :text
config :pleroma, Pleroma.ScheduledActivity,
daily_user_limit: 25,
@ -715,7 +697,6 @@ config :pleroma, :rate_limit,
timeline: {500, 3},
search: [{1000, 10}, {1000, 30}],
app_account_creation: {1_800_000, 25},
oauth_app_creation: {900_000, 5},
relations_actions: {10_000, 10},
relation_id_action: {60_000, 2},
statuses_actions: {10_000, 15},
@ -814,7 +795,7 @@ config :pleroma, :modules, runtime_dir: "instance/modules"
config :pleroma, configurable_from_database: false
config :pleroma, Pleroma.Repo,
parameters: [gin_fuzzy_search_limit: "500", jit: "off"],
parameters: [gin_fuzzy_search_limit: "500"],
prepare: :unnamed
config :pleroma, :connections_pool,
@ -828,27 +809,22 @@ config :pleroma, :connections_pool,
config :pleroma, :pools,
federation: [
size: 75,
max_waiting: 20,
size: 50,
max_waiting: 10,
recv_timeout: 10_000
],
media: [
size: 75,
max_waiting: 20,
recv_timeout: 15_000
],
rich_media: [
size: 25,
size: 50,
max_waiting: 20,
recv_timeout: 15_000
],
upload: [
size: 25,
max_waiting: 20,
max_waiting: 5,
recv_timeout: 15_000
],
default: [
size: 50,
size: 10,
max_waiting: 2,
recv_timeout: 5_000
]
@ -856,19 +832,15 @@ config :pleroma, :pools,
config :pleroma, :hackney_pools,
federation: [
max_connections: 50,
timeout: 10_000
timeout: 150_000
],
media: [
max_connections: 50,
timeout: 15_000
],
rich_media: [
max_connections: 50,
timeout: 15_000
timeout: 150_000
],
upload: [
max_connections: 25,
timeout: 15_000
timeout: 300_000
]
config :pleroma, :majic_pool, size: 2
@ -883,11 +855,7 @@ config :pleroma, :restrict_unauthenticated,
config :pleroma, Pleroma.Web.ApiSpec.CastAndValidate, strict: false
config :pleroma, :mrf,
policies: [
Pleroma.Web.ActivityPub.MRF.ObjectAgePolicy,
Pleroma.Web.ActivityPub.MRF.TagPolicy,
Pleroma.Web.ActivityPub.MRF.InlineQuotePolicy
],
policies: [Pleroma.Web.ActivityPub.MRF.ObjectAgePolicy, Pleroma.Web.ActivityPub.MRF.TagPolicy],
transparency: true,
transparency_exclusions: []
@ -906,45 +874,15 @@ config :pleroma, Pleroma.Web.Auth.Authenticator, Pleroma.Web.Auth.PleromaAuthent
config :pleroma, Pleroma.User.Backup,
purge_after_days: 30,
limit_days: 7,
dir: nil,
process_chunk_size: 100,
timeout: :timer.minutes(30)
dir: nil
config :pleroma, ConcurrentLimiter, [
{Pleroma.Search, [max_running: 30, max_waiting: 50]}
{Pleroma.Web.RichMedia.Helpers, [max_running: 5, max_waiting: 5]},
{Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy, [max_running: 5, max_waiting: 5]}
]
config :pleroma, Pleroma.Web.WebFinger, domain: nil, update_nickname_on_user_fetch: true
config :pleroma, Pleroma.Search, module: Pleroma.Search.DatabaseSearch
config :pleroma, Pleroma.Search.Meilisearch,
url: "http://127.0.0.1:7700/",
private_key: nil,
initial_indexing_chunk_size: 100_000
config :pleroma, Pleroma.Application,
background_migrators: true,
internal_fetch: true,
load_custom_modules: true,
max_restarts: 3,
streamer_registry: true
config :pleroma, Pleroma.Uploaders.Uploader, timeout: 30_000
config :pleroma, Pleroma.Search.QdrantSearch,
qdrant_url: "http://127.0.0.1:6333/",
qdrant_api_key: "",
openai_url: "http://127.0.0.1:11345",
# The healthcheck url has to be set to nil when used with the real openai
# API, as it doesn't have a healthcheck endpoint.
openai_healthcheck_url: "http://127.0.0.1:11345/health",
openai_model: "snowflake/snowflake-arctic-embed-xs",
openai_api_key: "",
qdrant_index_configuration: %{
vectors: %{size: 384, distance: "Cosine"}
}
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env()}.exs"

View file

@ -136,31 +136,6 @@ config :pleroma, :config_description, [
}
]
},
%{
group: :pleroma,
key: Pleroma.Uploaders.IPFS,
type: :group,
description: "IPFS uploader-related settings",
children: [
%{
key: :get_gateway_url,
type: :string,
description: "GET Gateway URL",
suggestions: [
"https://ipfs.mydomain.com/{CID}",
"https://{CID}.ipfs.mydomain.com/"
]
},
%{
key: :post_gateway_url,
type: :string,
description: "POST Gateway URL",
suggestions: [
"http://localhost:5001/"
]
}
]
},
%{
group: :pleroma,
key: Pleroma.Uploaders.S3,
@ -591,20 +566,6 @@ config :pleroma, :config_description, [
"Cool instance"
]
},
%{
key: :status_page,
type: :string,
description: "A page where people can see the status of the server during an outage",
suggestions: [
"https://status.pleroma.example.org"
]
},
%{
key: :contact_username,
type: :string,
description: "Instance owner username",
suggestions: ["admin"]
},
%{
key: :limit,
type: :integer,
@ -774,18 +735,6 @@ config :pleroma, :config_description, [
{"*.quarantined.com", "Reason"}
]
},
%{
key: :rejected_instances,
type: {:list, :tuple},
key_placeholder: "instance",
value_placeholder: "reason",
description:
"List of ActivityPub instances to reject requests from if authorized_fetch_mode is enabled",
suggestions: [
{"rejected.com", "Reason"},
{"*.rejected.com", "Reason"}
]
},
%{
key: :static_dir,
type: :string,
@ -1038,12 +987,6 @@ config :pleroma, :config_description, [
"The instance thumbnail can be any image that represents your instance and is used by some apps or services when they display information about your instance.",
suggestions: ["/instance/thumbnail.jpeg"]
},
%{
key: :favicon,
type: {:string, :image},
description: "Favicon of the instance",
suggestions: ["/favicon.png"]
},
%{
key: :show_reactions,
type: :boolean,
@ -1228,6 +1171,79 @@ config :pleroma, :config_description, [
}
]
},
%{
group: :logger,
type: :group,
description: "Logger-related settings",
children: [
%{
key: :backends,
type: [:atom, :tuple, :module],
description:
"Where logs will be sent, :console - send logs to stdout, { ExSyslogger, :ex_syslogger } - to syslog, Quack.Logger - to Slack.",
suggestions: [:console, {ExSyslogger, :ex_syslogger}, Quack.Logger]
}
]
},
%{
group: :logger,
type: :group,
key: :ex_syslogger,
label: "ExSyslogger",
description: "ExSyslogger-related settings",
children: [
%{
key: :level,
type: {:dropdown, :atom},
description: "Log level",
suggestions: [:debug, :info, :warn, :error]
},
%{
key: :ident,
type: :string,
description:
"A string that's prepended to every message, and is typically set to the app name",
suggestions: ["pleroma"]
},
%{
key: :format,
type: :string,
description: "Default: \"$date $time [$level] $levelpad$node $metadata $message\"",
suggestions: ["$metadata[$level] $message"]
},
%{
key: :metadata,
type: {:list, :atom},
suggestions: [:request_id]
}
]
},
%{
group: :logger,
type: :group,
key: :console,
label: "Console Logger",
description: "Console logger settings",
children: [
%{
key: :level,
type: {:dropdown, :atom},
description: "Log level",
suggestions: [:debug, :info, :warn, :error]
},
%{
key: :format,
type: :string,
description: "Default: \"$date $time [$level] $levelpad$node $metadata $message\"",
suggestions: ["$metadata[$level] $message"]
},
%{
key: :metadata,
type: {:list, :atom},
suggestions: [:request_id]
}
]
},
%{
group: :pleroma,
key: :frontend_configurations,
@ -1422,7 +1438,7 @@ config :pleroma, :config_description, [
label: "Subject line behavior",
type: :string,
description: "Allows changing the default behaviour of subject lines in replies.
`email`: copy and prepend re:, as in email,
`email`: copy and preprend re:, as in email,
`masto`: copy verbatim, as in Mastodon,
`noop`: don't copy the subject.",
suggestions: ["email", "masto", "noop"]
@ -1755,12 +1771,6 @@ config :pleroma, :config_description, [
type: :boolean,
description: "Require HTTP signatures for AP fetches"
},
%{
key: :authorized_fetch_mode_exceptions,
type: {:list, :string},
description:
"List of IPs (CIDR format accepted) to exempt from HTTP Signatures requirement (for example to allow debugging, you shouldn't otherwise need this)"
},
%{
key: :note_replies_output_limit,
type: :integer,
@ -1921,7 +1931,7 @@ config :pleroma, :config_description, [
key: :log,
type: {:dropdown, :atom},
description: "Logs verbose mode",
suggestions: [false, :error, :warning, :info, :debug]
suggestions: [false, :error, :warn, :info, :debug]
},
%{
key: :queues,
@ -2013,6 +2023,23 @@ config :pleroma, :config_description, [
}
]
},
%{
group: :pleroma,
key: :workers,
type: :group,
description: "Includes custom worker options not interpretable directly by `Oban`",
children: [
%{
key: :retries,
type: {:keyword, :integer},
description: "Max retry attempts for failed jobs, per `Oban` queue",
suggestions: [
federator_incoming: 5,
federator_outgoing: 5
]
}
]
},
%{
group: :pleroma,
key: Pleroma.Web.Metadata,
@ -2084,11 +2111,11 @@ config :pleroma, :config_description, [
]
},
%{
key: :timeout,
key: :failure_backoff,
type: :integer,
description:
"Amount of milliseconds after which the HTTP request is forcibly terminated.",
suggestions: [5_000]
"Amount of milliseconds after request failure, during which the request will not be retried.",
suggestions: [60_000]
}
]
},
@ -2241,8 +2268,14 @@ config :pleroma, :config_description, [
label: "SSL options",
type: :keyword,
description: "Additional SSL options",
suggestions: [verify: :verify_peer],
suggestions: [cacertfile: "path/to/file/with/PEM/cacerts", verify: :verify_peer],
children: [
%{
key: :cacertfile,
type: :string,
description: "Path to file with PEM encoded cacerts",
suggestions: ["path/to/file/with/PEM/cacerts"]
},
%{
key: :verify,
type: :atom,
@ -2262,8 +2295,14 @@ config :pleroma, :config_description, [
label: "TLS options",
type: :keyword,
description: "Additional TLS options",
suggestions: [verify: :verify_peer],
suggestions: [cacertfile: "path/to/file/with/PEM/cacerts", verify: :verify_peer],
children: [
%{
key: :cacertfile,
type: :string,
description: "Path to file with PEM encoded cacerts",
suggestions: ["path/to/file/with/PEM/cacerts"]
},
%{
key: :verify,
type: :atom,
@ -2280,25 +2319,11 @@ config :pleroma, :config_description, [
},
%{
key: :uid,
label: "UID Attribute",
label: "UID",
type: :string,
description:
"LDAP attribute name to authenticate the user, e.g. when \"cn\", the filter will be \"cn=username,base\"",
suggestions: ["cn"]
},
%{
key: :cacertfile,
label: "CACertfile",
type: :string,
description: "Path to CA certificate file"
},
%{
key: :mail,
label: "Mail Attribute",
type: :string,
description:
"LDAP attribute name to use as the email address when automatically registering the user on first login",
suggestions: ["mail"]
}
]
},
@ -2603,6 +2628,45 @@ config :pleroma, :config_description, [
}
]
},
%{
group: :esshd,
label: "ESSHD",
type: :group,
description:
"Before enabling this you must add :esshd to mix.exs as one of the extra_applications " <>
"and generate host keys in your priv dir with ssh-keygen -m PEM -N \"\" -b 2048 -t rsa -f ssh_host_rsa_key",
children: [
%{
key: :enabled,
type: :boolean,
description: "Enables SSH"
},
%{
key: :priv_dir,
type: :string,
description: "Dir with SSH keys",
suggestions: ["/some/path/ssh_keys"]
},
%{
key: :handler,
type: :string,
description: "Handler module",
suggestions: ["Pleroma.BBS.Handler"]
},
%{
key: :port,
type: :integer,
description: "Port to connect",
suggestions: [10_022]
},
%{
key: :password_authenticator,
type: :string,
description: "Authenticator module",
suggestions: ["Pleroma.BBS.Authenticator"]
}
]
},
%{
group: :mime,
label: "Mime Types",
@ -3065,7 +3129,7 @@ config :pleroma, :config_description, [
key: :max_waiting,
type: :integer,
description:
"Maximum number of requests waiting for other requests to finish. After this number is reached, the pool will start returning errors when a new request is made",
"Maximum number of requests waiting for other requests to finish. After this number is reached, the pool will start returning errrors when a new request is made",
suggestions: [10]
},
%{
@ -3331,7 +3395,7 @@ config :pleroma, :config_description, [
%{
key: :purge_after_days,
type: :integer,
description: "Remove backup archives after N days",
description: "Remove backup achives after N days",
suggestions: [30]
},
%{
@ -3339,20 +3403,6 @@ config :pleroma, :config_description, [
type: :integer,
description: "Limit user to export not more often than once per N days",
suggestions: [7]
},
%{
key: :process_chunk_size,
type: :integer,
label: "Process Chunk Size",
description: "The number of activities to fetch in the backup job for each chunk.",
suggestions: [100]
},
%{
key: :timeout,
type: :integer,
label: "Timeout",
description: "The amount of time to wait for backup to complete in seconds.",
suggestions: [1_800]
}
]
},
@ -3440,48 +3490,5 @@ config :pleroma, :config_description, [
]
}
]
},
%{
group: :pleroma,
key: Pleroma.Search,
type: :group,
description: "General search settings.",
children: [
%{
key: :module,
type: :keyword,
description: "Selected search module.",
suggestion: [Pleroma.Search.DatabaseSearch, Pleroma.Search.Meilisearch]
}
]
},
%{
group: :pleroma,
key: Pleroma.Search.Meilisearch,
type: :group,
description: "Meilisearch settings.",
children: [
%{
key: :url,
type: :string,
description: "Meilisearch URL.",
suggestion: ["http://127.0.0.1:7700/"]
},
%{
key: :private_key,
type: :string,
description:
"Private key for meilisearch authentication, or `nil` to disable private key authentication.",
suggestion: [nil]
},
%{
key: :initial_indexing_chunk_size,
type: :integer,
description:
"Amount of posts in a batch when running the initial indexing operation. Should probably not be more than 100000" <>
" since there's a limit on maximum insert size",
suggestion: [100_000]
}
]
}
]

View file

@ -8,7 +8,8 @@ import Config
# with brunch.io to recompile .js and .css sources.
config :pleroma, Pleroma.Web.Endpoint,
http: [
port: 4000
port: 4000,
protocol_options: [max_request_line_length: 8192, max_header_value_length: 8192]
],
protocol: "http",
debug_errors: true,
@ -35,8 +36,8 @@ config :pleroma, Pleroma.Emails.Mailer, adapter: Swoosh.Adapters.Local
# configured to run both http and https servers on
# different ports.
# Do not include timestamps in development logs
config :logger, Logger.Backends.Console, format: "$metadata[$level] $message\n"
# Do not include metadata nor timestamps in development logs
config :logger, :console, format: "[$level] $message\n"
# Set a higher stacktrace during development. Avoid configuring such
# in production as building large stacktraces may be expensive.

View file

@ -20,7 +20,6 @@ config :pleroma, Pleroma.Web.Endpoint,
config :phoenix, serve_endpoints: true
# Do not print debug messages in production
config :logger, Logger.Backends.Console, level: :info
config :logger, :console, level: :info
config :logger, :ex_syslogger, level: :info

View file

@ -16,7 +16,7 @@ config :pleroma, Pleroma.Captcha,
# Print only warnings and errors during test
config :logger, :console,
level: :warning,
level: :warn,
format: "\n[$level] $message\n"
config :pleroma, :auth, oauth_consumer_strategies: []
@ -49,8 +49,7 @@ config :pleroma, Pleroma.Repo,
hostname: System.get_env("DB_HOST") || "localhost",
port: System.get_env("DB_PORT") || "5432",
pool: Ecto.Adapters.SQL.Sandbox,
pool_size: System.schedulers_online() * 2,
log: false
pool_size: 50
config :pleroma, :dangerzone, override_repo_pool_size: true
@ -62,8 +61,7 @@ config :tesla, adapter: Tesla.Mock
config :pleroma, :rich_media,
enabled: false,
ignore_hosts: [],
ignore_tld: ["local", "localdomain", "lan"],
max_body: 2_000_000
ignore_tld: ["local", "localdomain", "lan"]
config :pleroma, :instance,
multi_factor_authentication: [
@ -135,61 +133,10 @@ config :pleroma, :side_effects,
ap_streamer: Pleroma.Web.ActivityPub.ActivityPubMock,
logger: Pleroma.LoggerMock
config :pleroma, Pleroma.Search, module: Pleroma.Search.DatabaseSearch
config :pleroma, Pleroma.Search.Meilisearch, url: "http://127.0.0.1:7700/", private_key: nil
# Reduce recompilation time
# https://dashbit.co/blog/speeding-up-re-compilation-of-elixir-projects
config :phoenix, :plug_init_mode, :runtime
config :pleroma, :config_impl, Pleroma.UnstubbedConfigMock
config :pleroma, Pleroma.PromEx, disabled: true
# Mox definitions. Only read during compile time.
config :pleroma, Pleroma.User.Backup, config_impl: Pleroma.UnstubbedConfigMock
config :pleroma, Pleroma.Uploaders.S3, ex_aws_impl: Pleroma.Uploaders.S3.ExAwsMock
config :pleroma, Pleroma.Uploaders.S3, config_impl: Pleroma.UnstubbedConfigMock
config :pleroma, Pleroma.Upload, config_impl: Pleroma.UnstubbedConfigMock
config :pleroma, Pleroma.ScheduledActivity, config_impl: Pleroma.UnstubbedConfigMock
config :pleroma, Pleroma.Web.RichMedia.Helpers, config_impl: Pleroma.StaticStubbedConfigMock
config :pleroma, Pleroma.Uploaders.IPFS, config_impl: Pleroma.UnstubbedConfigMock
config :pleroma, Pleroma.Web.Plugs.HTTPSecurityPlug, config_impl: Pleroma.StaticStubbedConfigMock
config :pleroma, Pleroma.Web.Plugs.HTTPSignaturePlug, config_impl: Pleroma.StaticStubbedConfigMock
config :pleroma, Pleroma.Signature, http_signatures_impl: Pleroma.StubbedHTTPSignaturesMock
peer_module =
if String.to_integer(System.otp_release()) >= 25 do
:peer
else
:slave
end
config :pleroma, Pleroma.Cluster, peer_module: peer_module
config :pleroma, Pleroma.Application,
background_migrators: false,
internal_fetch: false,
load_custom_modules: false,
max_restarts: 100,
streamer_registry: false,
test_http_pools: true
config :pleroma, Pleroma.Web.Streaming, sync_streaming: true
config :pleroma, Pleroma.Uploaders.Uploader, timeout: 1_000
config :pleroma, Pleroma.Emoji.Loader, test_emoji: true
config :pleroma, Pleroma.Web.RichMedia.Backfill,
stream_out: Pleroma.Web.ActivityPub.ActivityPubMock
config :pleroma, Pleroma.Web.Plugs.HTTPSecurityPlug, enable: false
config :pleroma, Pleroma.User.Backup, tempdir: "test/tmp"
if File.exists?("./config/test.secret.exs") do
import_config "test.secret.exs"
else

View file

@ -1,4 +1,4 @@
# Transferring the config to/from the database
# Transfering the config to/from the database
{! backend/administration/CLI_tasks/general_cli_task_info.include !}
@ -34,7 +34,7 @@
Options:
- `<path>` - where to save migrated config. E.g. `--path=/tmp`. If file saved into non-standard folder, you must manually copy file into directory where Pleroma can read it. For OTP install path will be `PLEROMA_CONFIG_PATH` or `/etc/pleroma`. For installation from source - `config` directory in the pleroma folder.
- `<path>` - where to save migrated config. E.g. `--path=/tmp`. If file saved into non standart folder, you must manually copy file into directory where Pleroma can read it. For OTP install path will be `PLEROMA_CONFIG_PATH` or `/etc/pleroma`. For installation from source - `config` directory in the pleroma folder.
- `<env>` - environment, for which is migrated config. By default is `prod`.
- To delete transferred settings from database optional flag `-d` can be used
@ -154,19 +154,4 @@ This forcibly removes all saved values in the database.
```sh
mix pleroma.config [--force] reset
```
## Remove invalid MRF modules from the database
This forcibly removes any enabled MRF that does not exist and will fix the ability of the instance to start.
=== "OTP"
```sh
./bin/pleroma_ctl config fix_mrf_policies
```
=== "From Source"
```sh
mix pleroma.config fix_mrf_policies
```

View file

@ -21,18 +21,16 @@ Replaces embedded objects with references to them in the `objects` table. Only n
mix pleroma.database remove_embedded_objects [option ...]
```
### Options
- `--vacuum` - run `VACUUM FULL` after the embedded objects are replaced with their references
## Prune old remote posts from the database
This will prune remote posts older than 90 days (configurable with [`config :pleroma, :instance, remote_post_retention_days`](../../configuration/cheatsheet.md#instance)) from the database. Pruned posts may be refetched in some cases.
!!! note
The disk space will only be reclaimed after a proper vacuum. By default Postgresql does this for you on a regular basis, but if your instance has been running for a long time and there are many rows deleted, it may be advantageous to use `VACUUM FULL` (e.g. by using the `--vacuum` option).
This will prune remote posts older than 90 days (configurable with [`config :pleroma, :instance, remote_post_retention_days`](../../configuration/cheatsheet.md#instance)) from the database, they will be refetched from source when accessed.
!!! danger
You may run out of disk space during the execution of the task or vacuuming if you don't have about 1/3rds of the database size free. Vacuum causes a substantial increase in I/O traffic, and may lead to a degraded experience while it is running.
The disk space will only be reclaimed after `VACUUM FULL`. You may run out of disk space during the execution of the task or vacuuming if you don't have about 1/3rds of the database size free.
=== "OTP"
@ -47,11 +45,7 @@ This will prune remote posts older than 90 days (configurable with [`config :ple
```
### Options
- `--keep-threads` - Don't prune posts when they are part of a thread where at least one post has seen local interaction (e.g. one of the posts is a local post, or is favourited by a local user, or has been repeated by a local user...). It also won't delete posts when at least one of the posts in that thread is kept (e.g. because one of the posts has seen recent activity).
- `--keep-non-public` - Keep non-public posts like DM's and followers-only, even if they are remote.
- `--prune-orphaned-activities` - Also prune orphaned activities afterwards. Activities are things like Like, Create, Announce, Flag (aka reports). They can significantly help reduce the database size. Note: this can take a very long time.
- `--vacuum` - Run `VACUUM FULL` after the objects are pruned. This should not be used on a regular basis, but is useful if your instance has been running for a long time before pruning.
- `--vacuum` - run `VACUUM FULL` after the objects are pruned
## Create a conversation for all existing DMs
@ -99,9 +93,6 @@ Can be safely re-run
## Vacuum the database
!!! note
By default Postgresql has an autovacuum deamon running. While the tasks described here can help in some cases, they shouldn't be needed on a regular basis. See [the Postgresql docs on vacuuming](https://www.postgresql.org/docs/current/sql-vacuum.html) for more information on this.
### Analyze
Running an `analyze` vacuum job can improve performance by updating statistics used by the query planner. **It is safe to cancel this.**

View file

@ -31,7 +31,7 @@
1. Optionally you can remove the users of your instance. This will trigger delete requests for their accounts and posts. Note that this is 'best effort' and doesn't mean that all traces of your instance will be gone from the fediverse.
* You can do this from the admin-FE where you can select all local users and delete the accounts using the *Moderate multiple users* dropdown.
* You can also list local users and delete them individually using the CLI tasks for [Managing users](./CLI_tasks/user.md).
* You can also list local users and delete them individualy using the CLI tasks for [Managing users](./CLI_tasks/user.md).
2. Stop the Pleroma service `systemctl stop pleroma`
3. Disable pleroma from systemd `systemctl disable pleroma`
4. Remove the files and folders you created during installation (see installation guide). This includes the pleroma, nginx and systemd files and folders.

View file

@ -1,71 +0,0 @@
# Managing installed frontends
Pleroma lets you install multiple frontends including multiple versions of same frontend. Right now it's only possible to switch which frontend is the default, but in the future it would be possible for user to select which frontend they prefer to use.
As of 2.6.0 there are two ways of managing frontends - through PleromaFE's Admin Dashboard (preferred, easier method) or through AdminFE (clunky but also works on versions older than 2.6.0).
!!! note
Managing frontends through UI requires [in-database configuration](../configuration/howto_database_config.md) to be enabled (default on newer instances but might be off on older ones).
## How it works
When installing frontends, it creates a folder in [static directory](../configuration/static_dir.md) that follows this pattern: `/frontends/${front-end name}/${front-end version}/`, puts contents of the built frontend in there. Then when accessing the server backend checks what front-end name and version are set to be default and serves index.html and assets from appropriate path.
!!! warning
If you've been putting your frontend build directly into static dir as an antiquated way of serving custom frontend, this system will not work and will still serve the custom index.html you put in there. You can still serve custom frontend builds if you put your build into `/frontends/$name/$version` instead and set the "default frontend" fields appropriately.
Currently, there is no backup system, i.e. when installing `master` version it _will_ overwrite installed `master` version, for now if you want to keep previous version you should back it up manually, i.e. running `cp -r ./frontends/pleroma-fe/master ./frontends/pleroma-fe/master_old` in your static dir.
## Managing front-ends through Admin Dashboard
Open up Admin Dashboard (gauge icon in top bar, same as where link to AdminFE was),__
![location of Admin Dashboard icon](../assets/admin_dash_location.png)
switch to "Front-ends" tab.
![screenshot of Front-ends tab](../assets/frontends_tab.png)
This page is designed to be self-explanatory and easy to use, while avoiding issues and pitfalls of AdminFE, but it's also early in development, everything is subject to change.
!!! warning
This goes without saying, but if you set default frontend to anything except >2.6.0 version of PleromaFE you'll lose the access to Admin Dashboard and will have to use AdminFE to get it back. See below on how to use AdminFE.
### Limitations
Currently the list of available for install frontends is essentially hard-coded in backend's configuration, each providing only one version, with exception for PleromaFE which overrides 'pleroma-fe' to also include `develop` version. There is no way to manually install build with a URL (coming soon) nor add more available frontends to the repository (it's broken).
There is also no way to tell if there is an update available or not, for now you should watch for [announcements](https://pleroma.social/announcements/) of new PleromaFE stable releases to see if there is new stable version. For `develop` version it's up to you whether you want to follow the development process or just reinstall it periodically hoping for new stuff.
## Using AdminFE to manage frontends
Access AdminFE either directly by going to `/pleroma/admin` of your instance or by opening Admin Dashboard and clicking the link at the bottom of the window
![link to open old AdminFE](../assets/old_adminfe_link.png)
Go to Settings -> Frontend.
### Installing front-ends
At the very top of the page there's a list of available frontends and button to install custom front-end
!!! tip
Remember to click "Submit" in bottom right corner to save your changes!
!!! bug
**Available Frontends** section lets you _install_ frontends but **NOT** update/reinstall them. It's only useful for installing a frontend once.
Due to aforementioned bug, preferred way of installing frontends in AdminFE is by clicking the "Install another frontend"
![screenshot of admin-fe with instructions on how to install a frontend](../assets/way_to_install_frontends.png)
and filling in the fields. Unfortunately AdminFE does not provide the raw data necessary for you to fill those fields, so your best bet is to see what backend returns in browser's devtools or refer to the [source code](https://git.pleroma.social/pleroma/pleroma/-/blob/develop/config/config.exs?ref_type=heads#L742-791). For the most part, only **Name**, **Ref** (i.e. version) and **Build URL** fields are required, although some frontends might also require **Build Directory** to work.
For pleroma-fe you can use either `master` or `develop` refs, or potentially any ref in GitLab that has artifacts for `build` job, but that's outside scope of this document.
### Selecting default frontend
Scroll page waaaaay down, search for "Frontends" section, subtitled "Installed frontends management", change the name and reference of the "Primary" frontend.
![screenshot of admin-fe with instructions on how to install a frontend](../assets/primary_frontend_section.png)
!!! danger
If you change "Admin" frontend name/reference you risk losing access to AdminFE as well.
!!! warning
Don't put anything into the "Available" section as it will break the list of available frontends completely, including the "add another frontend" button. If you accidentally put something in there, click the trashbin icon next to "Available" to reset it and restore the frontends list.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 145 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 127 KiB

View file

@ -3,6 +3,12 @@ Note: Additional clients may be working but theses are officially supporting Ple
Feel free to contact us to be added to this list!
## Desktop
### Roma for Desktop
- Homepage: <https://www.pleroma.com/#desktopApp>
- Source Code: <https://github.com/roma-apps/roma-desktop>
- Platforms: Windows, Mac, Linux
- Features: MastoAPI, Streaming Ready
### Social
- Source Code: <https://gitlab.gnome.org/World/Social>
- Contact: [@brainblasted@social.libre.fi](https://social.libre.fi/users/brainblasted)
@ -13,14 +19,7 @@ Feel free to contact us to be added to this list!
### Whalebird
- Homepage: <https://whalebird.social/>
- Source Code: <https://github.com/h3poteto/whalebird-desktop>
- Contact: [@whalebird@pleroma.io](https://pleroma.io/users/whalebird)
- Platforms: Windows, Mac, Linux
- Features: MastoAPI, Streaming Ready
### Fedistar
- Homepage: <https://fedistar.net>
- Source Code: <https://github.com/h3poteto/fedistar>
- Contact: [@fedistar@pleroma.io](https://pleroma.io/users/fedistar)
- Contact: [@h3poteto@pleroma.io](https://pleroma.io/users/h3poteto)
- Platforms: Windows, Mac, Linux
- Features: MastoAPI, Streaming Ready

View file

@ -41,7 +41,6 @@ To add configuration to your config file, you can copy it from the base config.
* `allow_relay`: Permits remote instances to subscribe to all public posts of your instance. This may increase the visibility of your instance.
* `public`: Makes the client API in authenticated mode-only except for user-profiles. Useful for disabling the Local Timeline and The Whole Known Network. Note that there is a dependent setting restricting or allowing unauthenticated access to specific resources, see `restrict_unauthenticated` for more details.
* `quarantined_instances`: ActivityPub instances where private (DMs, followers-only) activities will not be send.
* `rejected_instances`: ActivityPub instances to reject requests from if authorized_fetch_mode is enabled.
* `allowed_post_formats`: MIME-type list of formats allowed to be posted (transformed into HTML).
* `extended_nickname_format`: Set to `true` to use extended local nicknames format (allows underscores/dashes). This will break federation with
older software for theses nicknames.
@ -155,15 +154,12 @@ To add configuration to your config file, you can copy it from the base config.
* `Pleroma.Web.ActivityPub.MRF.MentionPolicy`: Drops posts mentioning configurable users. (See [`:mrf_mention`](#mrf_mention)).
* `Pleroma.Web.ActivityPub.MRF.VocabularyPolicy`: Restricts activities to a configured set of vocabulary. (See [`:mrf_vocabulary`](#mrf_vocabulary)).
* `Pleroma.Web.ActivityPub.MRF.ObjectAgePolicy`: Rejects or delists posts based on their age when received. (See [`:mrf_object_age`](#mrf_object_age)).
* `Pleroma.Web.ActivityPub.MRF.ActivityExpirationPolicy`: Sets a default expiration on all posts made by users of the local instance. Requires `Pleroma.Workers.PurgeExpiredActivity` to be enabled for processing the scheduled deletions.
* `Pleroma.Web.ActivityPub.MRF.ActivityExpirationPolicy`: Sets a default expiration on all posts made by users of the local instance. Requires `Pleroma.Workers.PurgeExpiredActivity` to be enabled for processing the scheduled delections.
* `Pleroma.Web.ActivityPub.MRF.ForceBotUnlistedPolicy`: Makes all bot posts to disappear from public timelines.
* `Pleroma.Web.ActivityPub.MRF.FollowBotPolicy`: Automatically follows newly discovered users from the specified bot account. Local accounts, locked accounts, and users with "#nobot" in their bio are respected and excluded from being followed.
* `Pleroma.Web.ActivityPub.MRF.AntiFollowbotPolicy`: Drops follow requests from followbots. Users can still allow bots to follow them by first following the bot.
* `Pleroma.Web.ActivityPub.MRF.KeywordPolicy`: Rejects or removes from the federated timeline or replaces keywords. (See [`:mrf_keyword`](#mrf_keyword)).
* `Pleroma.Web.ActivityPub.MRF.ForceMentionsInContent`: Forces every mentioned user to be reflected in the post content.
* `Pleroma.Web.ActivityPub.MRF.InlineQuotePolicy`: Forces quote post URLs to be reflected in the message content inline.
* `Pleroma.Web.ActivityPub.MRF.QuoteToLinkTagPolicy`: Force a Link tag for posts quoting another post. (may break outgoing federation of quote posts with older Pleroma versions).
* `Pleroma.Web.ActivityPub.MRF.ForceMention`: Forces posts to include a mention of the author of parent post or the author of quoted post.
* `transparency`: Make the content of your Message Rewrite Facility settings public (via nodeinfo).
* `transparency_exclusions`: Exclude specific instance names from MRF transparency. The use of the exclusions feature will be disclosed in nodeinfo as a boolean value.
@ -265,18 +261,6 @@ Notes:
* `follower_nickname`: The name of the bot account to use for following newly discovered users. Using `followbot` or similar is strongly suggested.
#### :mrf_emoji
* `remove_url`: A list of patterns which result in emoji whose URL matches being removed from the message. This will apply to statuses, emoji reactions, and user profiles. Each pattern can be a string or a [regular expression](https://hexdocs.pm/elixir/Regex.html).
* `remove_shortcode`: A list of patterns which result in emoji whose shortcode matches being removed from the message. This will apply to statuses, emoji reactions, and user profiles. Each pattern can be a string or a [regular expression](https://hexdocs.pm/elixir/Regex.html).
* `federated_timeline_removal_url`: A list of patterns which result in message with emojis whose URLs match being removed from federated timelines (a.k.a unlisted). This will apply only to statuses. Each pattern can be a string or a [regular expression](https://hexdocs.pm/elixir/Regex.html).
* `federated_timeline_removal_shortcode`: A list of patterns which result in message with emojis whose shortcodes match being removed from federated timelines (a.k.a unlisted). This will apply only to statuses. Each pattern can be a string or a [regular expression](https://hexdocs.pm/elixir/Regex.html).
#### :mrf_inline_quote
* `template`: The template to append to the post. `{url}` will be replaced with the actual link to the quoted post. Default: `<bdi>RT:</bdi> {url}`
#### :mrf_force_mention
* `mention_parent`: Whether to append mention of parent post author
* `mention_quoted`: Whether to append mention of parent quoted author
### :activitypub
* `unfollow_blocked`: Whether blocks result in people getting unfollowed
@ -285,7 +269,6 @@ Notes:
* `deny_follow_blocked`: Whether to disallow following an account that has blocked the user in question
* `sign_object_fetches`: Sign object fetches with HTTP signatures
* `authorized_fetch_mode`: Require HTTP signatures for AP fetches
* `authorized_fetch_mode_exceptions`: List of IPs (CIDR format accepted) to exempt from HTTP Signatures requirement (for example to allow debugging, you shouldn't otherwise need this)
## Pleroma.User
@ -436,7 +419,7 @@ config :pleroma, Pleroma.Web.MediaProxy.Invalidation.Http,
* `ignore_hosts`: list of hosts which will be ignored by the metadata parser. For example `["accounts.google.com", "xss.website"]`, defaults to `[]`.
* `ignore_tld`: list TLDs (top-level domains) which will ignore for parse metadata. default is ["local", "localdomain", "lan"].
* `parsers`: list of Rich Media parsers.
* `timeout`: Amount of milliseconds after which the HTTP request is forcibly terminated.
* `failure_backoff`: Amount of milliseconds after request failure, during which the request will not be retried.
## HTTP server
@ -474,7 +457,6 @@ This will make Pleroma listen on `127.0.0.1` port `8080` and generate urls start
* ``ct_max_age``: The maximum age for the `Expect-CT` header if sent.
* ``referrer_policy``: The referrer policy to use, either `"same-origin"` or `"no-referrer"`.
* ``report_uri``: Adds the specified url to `report-uri` and `report-to` group in CSP header.
* `allow_unsafe_eval`: Adds `wasm-unsafe-eval` to the CSP header. Needed for some non-essential frontend features like Flash emulation.
### Pleroma.Web.Plugs.RemoteIp
@ -514,7 +496,7 @@ config :pleroma, :rate_limit,
Means that:
1. In 60 seconds, 15 authentication attempts can be performed from the same IP address.
2. In 1 second, 10 search requests can be performed from the same IP address by unauthenticated users, while authenticated users can perform 30 search requests per second.
2. In 1 second, 10 search requests can be performed from the same IP adress by unauthenticated users, while authenticated users can perform 30 search requests per second.
Supported rate limiters:
@ -664,19 +646,6 @@ config :ex_aws, :s3,
host: "s3.eu-central-1.amazonaws.com"
```
#### Pleroma.Uploaders.IPFS
* `post_gateway_url`: URL with port of POST Gateway (unauthenticated)
* `get_gateway_url`: URL of public GET Gateway
Example:
```elixir
config :pleroma, Pleroma.Uploaders.IPFS,
post_gateway_url: "http://localhost:5001",
get_gateway_url: "http://{CID}.ipfs.mydomain.com"
```
### Upload filters
#### Pleroma.Upload.Filter.AnonymizeFilename
@ -702,12 +671,6 @@ This filter reads the ImageDescription and iptc:Caption-Abstract fields with Exi
No specific configuration.
#### Pleroma.Upload.Filter.OnlyMedia
This filter rejects uploads that are not identified with Content-Type matching audio/\*, image/\*, or video/\*
No specific configuration.
#### Pleroma.Upload.Filter.Mogrify
* `args`: List of actions for the `mogrify` command like `"strip"` or `["strip", "auto-orient", {"implode", "1"}]`.
@ -742,21 +705,6 @@ config :pleroma, Pleroma.Emails.Mailer,
auth: :always
```
An example for Mua adapter:
```elixir
config :pleroma, Pleroma.Emails.Mailer,
enabled: true,
adapter: Swoosh.Adapters.Mua,
relay: "mail.example.com",
port: 465,
auth: [
username: "YOUR_USERNAME@domain.tld",
password: "YOUR_SMTP_PASSWORD"
],
protocol: :ssl
```
### :email_notifications
Email notifications settings.
@ -868,7 +816,7 @@ config :logger,
backends: [{ExSyslogger, :ex_syslogger}]
config :logger, :ex_syslogger,
level: :warning
level: :warn
```
Another example, keeping console output and adding the pid to syslog output:
@ -877,7 +825,7 @@ config :logger,
backends: [:console, {ExSyslogger, :ex_syslogger}]
config :logger, :ex_syslogger,
level: :warning,
level: :warn,
option: [:pid, :ndelay]
```
@ -925,8 +873,21 @@ This will probably take a long time.
### BBS / SSH access
This feature has been removed from Pleroma core.
However, a client has been made and is available at https://git.pleroma.social/Duponin/sshocial.
To enable simple command line interface accessible over ssh, add a setting like this to your configuration file:
```exs
app_dir = File.cwd!
priv_dir = Path.join([app_dir, "priv/ssh_keys"])
config :esshd,
enabled: true,
priv_dir: priv_dir,
handler: "Pleroma.BBS.Handler",
port: 10_022,
password_authenticator: "Pleroma.BBS.Authenticator"
```
Feel free to adjust the priv_dir and port number. Then you will have to create the key for the keys (in the example `priv/ssh_keys`) and create the host keys with `ssh-keygen -m PEM -N "" -b 2048 -t rsa -f ssh_host_rsa_key`. After restarting, you should be able to connect to your Pleroma instance with `ssh username@server -p $PORT`
### :gopher
* `enabled`: Enables the gopher interface
@ -983,13 +944,12 @@ Pleroma account will be created with the same name as the LDAP user name.
* `enabled`: enables LDAP authentication
* `host`: LDAP server hostname
* `port`: LDAP port, e.g. 389 or 636
* `ssl`: true to use implicit SSL/TLS, usually port 636
* `ssl`: true to use SSL, usually implies the port 636
* `sslopts`: additional SSL options
* `tls`: true to use explicit TLS (STARTTLS), usually port 389
* `tls`: true to start TLS, usually implies the port 389
* `tlsopts`: additional TLS options
* `base`: LDAP base, e.g. "dc=example,dc=com"
* `uid`: LDAP attribute name to authenticate the user, e.g. when "cn", the filter will be "cn=username,base"
* `cacertfile`: Path to alternate CA root certificates file
Note, if your LDAP server is an Active Directory server the correct value is commonly `uid: "cn"`, but if you use an
OpenLDAP server the value may be `uid: "uid"`.
@ -1118,7 +1078,7 @@ config :pleroma, Pleroma.Formatter,
## :configurable_from_database
Boolean, enables/disables in-database configuration. Read [Transferring the config to/from the database](../administration/CLI_tasks/config.md) for more information.
Boolean, enables/disables in-database configuration. Read [Transfering the config to/from the database](../administration/CLI_tasks/config.md) for more information.
## :database_config_whitelist
@ -1179,7 +1139,7 @@ Control favicons for instances.
!!! note
Requires enabled email
* `:purge_after_days` an integer, remove backup achieves after N days.
* `:purge_after_days` an integer, remove backup achives after N days.
* `:limit_days` an integer, limit user to export not more often than once per N days.
* `:dir` a string with a path to backup temporary directory or `nil` to let Pleroma choose temporary directory in the following order:
1. the directory named by the TMPDIR environment variable
@ -1187,7 +1147,6 @@ Control favicons for instances.
3. the directory named by the TMP environment variable
4. C:\TMP on Windows or /tmp on Unix-like operating systems
5. as a last resort, the current working directory
* `:timeout` an integer representing seconds
## Frontend management

View file

@ -29,7 +29,7 @@ foo, /emoji/custom/foo.png
The files should be PNG (APNG is okay with `.png` for `image/png` Content-type) and under 50kb for compatibility with mastodon.
Default file extensions and locations for emojis are set in `config.exs`. To use different locations or file-extensions, add the `shortcode_globs` to your secrets file (`prod.secret.exs` or `dev.secret.exs`) and edit it. Note that not all fediverse-software will show emojis with other file extensions:
Default file extentions and locations for emojis are set in `config.exs`. To use different locations or file-extentions, add the `shortcode_globs` to your secrets file (`prod.secret.exs` or `dev.secret.exs`) and edit it. Note that not all fediverse-software will show emojis with other file extentions:
```elixir
config :pleroma, :emoji, shortcode_globs: ["/emoji/custom/**/*.png", "/emoji/custom/**/*.gif"]
```

View file

@ -62,20 +62,6 @@ An additional “Expect-CT” header will be sent with the configured `ct_max_ag
If you click on a link, your browsers request to the other site will include from where it is coming from. The “Referrer policy” header tells the browser how and if it should send this information. (see [Referrer policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy))
### Uploaded media and media proxy
It is STRONGLY RECOMMENDED to serve both the locally-uploaded media and the media proxy from another domain than the domain that Pleroma runs on, if applicable.
```elixir
config :pleroma, :media_proxy,
base_url: "https://some.other.domain"
config :pleroma, Pleroma.Upload,
base_url: "https://some.other.domain/media"
```
See `installation/pleroma-mediaproxy.nginx` for examples on how to configure your media proxy.
## systemd
A systemd unit example is provided at `installation/pleroma.service`.

View file

@ -1,4 +1,4 @@
# I2P Federation and Accessibility
# I2P Federation and Accessability
This guide is going to focus on the Pleroma federation aspect. The actual installation is neatly explained in the official documentation, and more likely to remain up-to-date.
It might be added to this guide if there will be a need for that.

View file

@ -29,7 +29,7 @@ HiddenServiceDir /var/lib/tor/pleroma_hidden_service/
HiddenServicePort 80 127.0.0.1:8099
HiddenServiceVersion 3 # Remove if Tor version is below 0.3 ( tor --version )
```
Restart Tor to generate an address:
Restart Tor to generate an adress:
```
systemctl restart tor@default.service
```

View file

@ -1,6 +1,6 @@
# Optimizing the BEAM
Pleroma is built upon the Erlang/OTP VM known as BEAM. The BEAM VM is highly optimized for latency, but this has drawbacks in environments without dedicated hardware. One of the tricks used by the BEAM VM is [busy waiting](https://en.wikipedia.org/wiki/Busy_waiting). This allows the application to pretend to be busy working so the OS kernel does not pause the application process and switch to another process waiting for the CPU to execute its workload. It does this by spinning for a period of time which inflates the apparent CPU usage of the application so it is immediately ready to execute another task. This can be observed with utilities like **top(1)** which will show consistently high CPU usage for the process. Switching between processes is a rather expensive operation and also clears CPU caches further affecting latency and performance. The goal of busy waiting is to avoid this penalty.
Pleroma is built upon the Erlang/OTP VM known as BEAM. The BEAM VM is highly optimized for latency, but this has drawbacks in environments without dedicated hardware. One of the tricks used by the BEAM VM is [busy waiting](https://en.wikipedia.org/wiki/Busy_waiting). This allows the application to pretend to be busy working so the OS kernel does not pause the application process and switch to another process waiting for the CPU to execute its workload. It does this by spinning for a period of time which inflates the apparent CPU usage of the application so it is immediately ready to execute another task. This can be observed with utilities like **top(1)** which will show consistently high CPU usage for the process. Switching between procesess is a rather expensive operation and also clears CPU caches further affecting latency and performance. The goal of busy waiting is to avoid this penalty.
This strategy is very successful in making a performant and responsive application, but is not desirable on Virtual Machines or hardware with few CPU cores. Pleroma instances are often deployed on the same server as the required PostgreSQL database which can lead to situations where the Pleroma application is holding the CPU in a busy-wait loop and as a result the database cannot process requests in a timely manner. The fewer CPUs available, the more this problem is exacerbated. The latency is further amplified by the OS being installed on a Virtual Machine as the Hypervisor uses CPU time-slicing to pause the entire OS and switch between other tasks.

View file

@ -22,7 +22,7 @@ config :pleroma, Pleroma.Repo,
]
```
A more detailed explanation of the issue can be found at <https://blog.soykaf.com/post/postgresql-elixir-troubles/>.
A more detailed explaination of the issue can be found at <https://blog.soykaf.com/post/postgresql-elixir-troubles/>.
## Example configurations

View file

@ -1,147 +0,0 @@
# Configuring search
{! backend/administration/CLI_tasks/general_cli_task_info.include !}
## Built-in search
To use built-in search that has no external dependencies, set the search module to `Pleroma.Activity`:
> config :pleroma, Pleroma.Search, module: Pleroma.Search.DatabaseSearch
While it has no external dependencies, it has problems with performance and relevancy.
## QdrantSearch
This uses the vector search engine [Qdrant](https://qdrant.tech) to search the posts in a vector space. This needs a way to generate embeddings and uses the [OpenAI API](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings). This is implemented by several project besides OpenAI itself, including the python-based fastembed-server found in `supplemental/search/fastembed-api`.
The default settings will support a setup where both the fastembed server and Qdrant run on the same system as pleroma. To use it, set the search provider and run the fastembed server, see the README in `supplemental/search/fastembed-api`:
> config :pleroma, Pleroma.Search, module: Pleroma.Search.QdrantSearch
Then, start the Qdrant server, see [here](https://qdrant.tech/documentation/quick-start/) for instructions.
You will also need to create the Qdrant index once by running `mix pleroma.search.indexer create_index`. Running `mix pleroma.search.indexer index` will retroactively index the last 100_000 activities.
### Indexing and model options
To see the available configuration options, check out the QdrantSearch section in `config/config.exs`.
The default indexing option work for the default model (`snowflake-arctic-embed-xs`). To optimize for a low memory footprint, adjust the index configuration as described in the [Qdrant docs](https://qdrant.tech/documentation/guides/optimize/). See also [this blog post](https://qdrant.tech/articles/memory-consumption/) that goes into detail.
Different embedding models will need different vector size settings. You can see a list of the models supported by the fastembed server [here](https://qdrant.github.io/fastembed/examples/Supported_Models), including their vector dimensions. These vector dimensions need to be set in the `qdrant_index_configuration`.
E.g, If you want to use `sentence-transformers/all-MiniLM-L6-v2` as a model, you will not need to adjust things, because it and `snowflake-arctic-embed-xs` are both 384 dimensional models. If you want to use `snowflake/snowflake-arctic-embed-l`, you will need to adjust the `size` parameter in the `qdrant_index_configuration` to 1024, as it has a dimension of 1024.
When using a different model, you will need do drop the index and recreate it (`mix pleroma.search.indexer drop_index` and `mix pleroma.search.indexer create_index`), as the different embeddings are not compatible with each other.
## Meilisearch
Note that it's quite a bit more memory hungry than PostgreSQL (around 4-5G for ~1.2 million
posts while idle and up to 7G while indexing initially). The disk usage for this additional index is also
around 4 gigabytes. Like [RUM](./cheatsheet.md#rum-indexing-for-full-text-search) indexes, it offers considerably
higher performance and ordering by timestamp in a reasonable amount of time.
Additionally, the search results seem to be more accurate.
Due to high memory usage, it may be best to set it up on a different machine, if running pleroma on a low-resource
computer, and use private key authentication to secure the remote search instance.
To use [meilisearch](https://www.meilisearch.com/), set the search module to `Pleroma.Search.Meilisearch`:
> config :pleroma, Pleroma.Search, module: Pleroma.Search.Meilisearch
You then need to set the address of the meilisearch instance, and optionally the private key for authentication. You might
also want to change the `initial_indexing_chunk_size` to be smaller if you're server is not very powerful, but not higher than `100_000`,
because meilisearch will refuse to process it if it's too big. However, in general you want this to be as big as possible, because meilisearch
indexes faster when it can process many posts in a single batch.
> config :pleroma, Pleroma.Search.Meilisearch,
> url: "http://127.0.0.1:7700/",
> private_key: "private key",
> initial_indexing_chunk_size: 100_000
Information about setting up meilisearch can be found in the
[official documentation](https://docs.meilisearch.com/learn/getting_started/installation.html).
You probably want to start it with `MEILI_NO_ANALYTICS=true` environment variable to disable analytics.
At least version 0.25.0 is required, but you are strongly advised to use at least 0.26.0, as it introduces
the `--enable-auto-batching` option which drastically improves performance. Without this option, the search
is hardly usable on a somewhat big instance.
### Private key authentication (optional)
To set the private key, use the `MEILI_MASTER_KEY` environment variable when starting. After setting the _master key_,
you have to get the _private key_, which is actually used for authentication.
=== "OTP"
```sh
./bin/pleroma_ctl search.meilisearch show-keys <your master key here>
```
=== "From Source"
```sh
mix pleroma.search.meilisearch show-keys <your master key here>
```
You will see a "Default Admin API Key", this is the key you actually put into your configuration file.
### Initial indexing
After setting up the configuration, you'll want to index all of your already existing posts. Only public posts are indexed. You'll only
have to do it one time, but it might take a while, depending on the amount of posts your instance has seen. This is also a fairly RAM
consuming process for `meilisearch`, and it will take a lot of RAM when running if you have a lot of posts (seems to be around 5G for ~1.2
million posts while idle and up to 7G while indexing initially, but your experience may be different).
The sequence of actions is as follows:
1. First, change the configuration to use `Pleroma.Search.Meilisearch` as the search backend
2. Restart your instance, at this point it can be used while the search indexing is running, though search won't return anything
3. Start the initial indexing process (as described below with `index`),
and wait until the task says it sent everything from the database to index
4. Wait until everything is actually indexed (by checking with `stats` as described below),
at this point you don't have to do anything, just wait a while.
To start the initial indexing, run the `index` command:
=== "OTP"
```sh
./bin/pleroma_ctl search.meilisearch index
```
=== "From Source"
```sh
mix pleroma.search.meilisearch index
```
This will show you the total amount of posts to index, and then show you the amount of posts indexed currently, until the numbers eventually
become the same. The posts are indexed in big batches and meilisearch will take some time to actually index them, even after you have
inserted all the posts into it. Depending on the amount of posts, this may be as long as several hours. To get information about the status
of indexing and how many posts have actually been indexed, use the `stats` command:
=== "OTP"
```sh
./bin/pleroma_ctl search.meilisearch stats
```
=== "From Source"
```sh
mix pleroma.search.meilisearch stats
```
### Clearing the index
In case you need to clear the index (for example, to re-index from scratch, if that needs to happen for some reason), you can
use the `clear` command:
=== "OTP"
```sh
./bin/pleroma_ctl search.meilisearch clear
```
=== "From Source"
```sh
mix pleroma.search.meilisearch clear
```
This will clear **all** the posts from the search index. Note, that deleted posts are also removed from index by the instance itself, so
there is no need to actually clear the whole index, unless you want **all** of it gone. That said, the index does not hold any information
that cannot be re-created from the database, it should also generally be a lot smaller than the size of your database. Still, the size
depends on the amount of text in posts.

Some files were not shown because too many files have changed in this diff Show more