This commit is contained in:
Mayel de Borniol 2022-03-30 11:02:21 +13:00
parent 5a9fc30c23
commit 6b78f9dc7b
9 changed files with 161 additions and 190 deletions

View file

@ -4,7 +4,8 @@ on:
branches:
- main
jobs:
# elixir_release:
# elixir_release:
# name: Elixir/OTP release without Docker
# runs-on: ubuntu-latest
# container: elixir:alpine
# steps:
@ -118,21 +119,13 @@ jobs:
# with:
# fetch-depth: 2 # needed for action-detect-and-tag-new-version
# -
# name: Detect new version
# name: Detect version
# id: version
# uses: salsify/action-detect-and-tag-new-version@v2
# with:
# create-tag: false # tag already created in first job
# version-command: |
# grep -m 1 'version:' mix.exs | cut -d '"' -f2
# -
# if: steps.version.outputs.current-version == steps.version.outputs.previous-version
# name: Cancel workflow if the version has not changed
# uses: andymckay/cancel-action@0.2
# # -
# # name: Get branch names
# # id: branch-name
# # uses: tj-actions/branch-names@v4
# -
# name: Set up QEMU
# uses: docker/setup-qemu-action@v1
@ -146,11 +139,6 @@ jobs:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
# -
# name: Cancel any already running releases workflows
# uses: styfle/cancel-workflow-action@0.9.0
# with:
# access_token: ${{ github.token }}
# -
# name: Pre-build prep
# run: mkdir forks/ && mkdir -p data/uploads/ && make rel.config.prepare && touch data/current_flavour/config/deps.path
# -
@ -187,21 +175,13 @@ jobs:
with:
fetch-depth: 2 # needed for action-detect-and-tag-new-version
-
name: Detect if new version
name: Detect version
id: version
uses: salsify/action-detect-and-tag-new-version@v2
with:
create-tag: false # tag already created in first job
version-command: |
grep -m 1 'version:' mix.exs | cut -d '"' -f2
-
if: steps.version.outputs.current-version == steps.version.outputs.previous-version
name: Cancel workflow if the version has not changed
uses: andymckay/cancel-action@0.2
# -
# name: Get branch names
# id: branch-name
# uses: tj-actions/branch-names@v4
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
@ -214,11 +194,6 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Cancel any already running releases workflows
uses: styfle/cancel-workflow-action@0.9.0
with:
access_token: ${{ github.token }}
-
name: Pre-build prep
run: mkdir forks/ && mkdir -p data/uploads/ && make rel.config.prepare && touch data/current_flavour/config/deps.path
@ -242,71 +217,58 @@ jobs:
run: echo ${{ steps.docker_build.outputs.digest }}
# haha_flavour_docker_release_build_push:
# name: Haha Academy flavour - Maybe release Docker image
# runs-on: ubuntu-latest
# env:
# FLAVOUR: haha
# FLAVOUR_PATH: flavours/haha
# DOCKER_REPO: haha
# APP_NAME: haha
# steps:
# -
# name: Checkout
# uses: actions/checkout@v2
# with:
# fetch-depth: 2 # needed for action-detect-and-tag-new-version
# -
# name: Detect if new version
# id: version
# uses: salsify/action-detect-and-tag-new-version@v2
# with:
# create-tag: false # tag already created in first job
# version-command: |
# grep -m 1 'version:' mix.exs | cut -d '"' -f2
# -
# if: steps.version.outputs.current-version == steps.version.outputs.previous-version
# name: Cancel workflow if the version has not changed
# uses: andymckay/cancel-action@0.2
# # -
# # name: Get branch names
# # id: branch-name
# # uses: tj-actions/branch-names@v4
# -
# name: Set up QEMU
# uses: docker/setup-qemu-action@v1
# -
# name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v1
# -
# name: Login to DockerHub
# uses: docker/login-action@v1
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
# -
# name: Cancel any already running releases workflows
# uses: styfle/cancel-workflow-action@0.9.0
# with:
# access_token: ${{ github.token }}
# -
# name: Pre-build prep
# run: mkdir forks/ && mkdir -p data/uploads/ && make rel.config.prepare && touch data/current_flavour/config/deps.path
# -
# name: Build and push
# id: docker_build
# uses: docker/build-push-action@v2
# env:
# FLAVOUR: haha
# FLAVOUR_PATH: data/current_flavour
# with:
# context: .
# file: Dockerfile.release
# platforms: linux/amd64 #,linux/arm64,linux/arm/v7
# push: true
# tags: |
# bonfirenetworks/${{ env.DOCKER_REPO }}:latest
# bonfirenetworks/${{ env.DOCKER_REPO }}:${{steps.version.outputs.current-version}}
# -
# name: Image digest
# run: echo ${{ steps.docker_build.outputs.digest }}
reflow_flavour_docker_release_build_push:
name: Reflow flavour - Maybe release Docker image
runs-on: ubuntu-latest
env:
FLAVOUR: reflow
FLAVOUR_PATH: flavours/reflow
DOCKER_REPO: reflow
APP_NAME: reflow
steps:
-
name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 2 # needed for action-detect-and-tag-new-version
-
name: Detect version
id: version
uses: salsify/action-detect-and-tag-new-version@v2
with:
create-tag: false # tag already created in first job
version-command: |
grep -m 1 'version:' mix.exs | cut -d '"' -f2
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
-
name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Pre-build prep
run: mkdir forks/ && mkdir -p data/uploads/ && make rel.config.prepare && touch data/current_flavour/config/deps.path
-
name: Build and push
id: docker_build
uses: docker/build-push-action@v2
env:
FLAVOUR: reflow
FLAVOUR_PATH: data/current_flavour
with:
context: .
file: Dockerfile.release
platforms: linux/amd64 #,linux/arm64
push: true
tags: |
bonfirenetworks/${{ env.DOCKER_REPO }}:latest
bonfirenetworks/${{ env.DOCKER_REPO }}:${{steps.version.outputs.current-version}}
-
name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}

View file

@ -105,7 +105,7 @@ else
endif
doc: ## Generate docs from code & readmes
@make --no-print-directory cmd cmd="mix docs"
@make --no-print-directory mix.remote~docs
recompile: ## Force the app to recompile
@make --no-print-directory cmd cmd="mix compile --force"

View file

@ -170,26 +170,26 @@ It is said that naming is one of the four hard problems of computer science (alo
This namespace handles the ActivityPub logic and stores AP activities. It is largely adapted Pleroma code with some modifications, for example merging of the activity and object tables and new actor object abstraction.
Also refer to [MRF documentation](./MRF.md) to learn how to rewrite or discard messages.
`ActivityPub` contains the main API and is documented there.
`ActivityPub.Adapter` defines callback functions for the AP library.
`ActivityPub` also contains some functionality that isn't part of the AP spec but is required for federation:
- `ActivityPub` contains the main API and is documented there.
- `ActivityPub.Adapter` defines callback functions for the AP library.
It also contains some functionality that isn't part of the AP spec but is required for federation:
- `ActivityPub.Keys` - Generating and handling RSA keys for messagage signing
- `ActivityPub.Signature` - Adapter for the HTTPSignature library
- `ActivityPub.WebFinger` - Implementation of the WebFinger protocol
- `ActivityPub.HTTP` - Module for making HTTP requests (wrapper around tesla)
- `ActivityPub.Instances` - Module for storing reachability information about remote instances
Also refer to [MRF documentation](./MRF.md) to learn how to rewrite or discard messages.
### `ActivityPubWeb`
This namespace contains the AP S2S REST API, the activity ingestion pipeline (`ActivityPubWeb.Transmogrifier`) and the push federation facilities (`ActivityPubWeb.Federator`, `ActivityPubWeb.Publisher` and others). The outgoing federation module is designed in a modular way allowing federating through different protocols in the future.
This namespace contains the ActivityPub Server-to-Server REST API, the activity ingestion pipeline (`ActivityPubWeb.Transmogrifier`) and the push federation facilities (`ActivityPubWeb.Federator`, `ActivityPubWeb.Publisher` and others). The outgoing federation module is designed in a modular way allowing federating through different protocols in the future.
### `ActivityPub` interaction in our application logic
### ActivityPub integration with Bonfire's application logic
The callback functions defined in `ActivityPub.Adapter` are implemented in `Bonfire.ActivityPub.Adapter`. Facilities for calling the ActivityPub API are implemented in `Bonfire.ActivityPub.Publisher`. When implementing federation for a new object type it needs to be implemented both ways: both for outgoing federation in `Bonfire.ActivityPub.Publisher` and for incoming federation in `Bonfire.ActivityPub.Adapter`.
The callback functions defined in `ActivityPub.Adapter` are implemented in `Bonfire.Federate.ActivityPub.Adapter`.
When implementing federation for a new object type it needs to be implemented for both directions:
for outgoing federation using the hooks in `Bonfire.Federate.ActivityPub.Publisher` and for incoming federation using the hooks in `Bonfire.Federate.ActivityPub.Receiver`.

View file

@ -1,19 +1,21 @@
# Bonfire-flavoured Elixir
Bonfire has a few libraries that are widely used internally and make writing elixir feel a little
Bonfire has a few libraries that are widely used in the codebase and make writing Elixir feel a little
bit different. To help you get less confused by this, I've put together this handy guide on what I'm
calling "bonfire-flavoured elixir"!
Please note this guide assumes you already know [Elixir](https://elixir-lang.org/getting-started/introduction.html).
## Arrows
The elixir [|> ("pipe") operator](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) is one of the
things that seems to get people excited about elixir. I suspect it's because they're lazy about
coming up with names, which I can appreciate. Unfortunately it's kind of limiting. The moment you
need to pipe a parameter into a position that isn't the first one, it breaks down and you have to
drop out of the pipeline format or write a secondary function to handle it.
coming up with function names, which I can appreciate. Unfortunately it's kind of limiting.
The moment you need to pipe a parameter into a position that isn't the first one, it breaks down
and you have to drop out of the pipeline format or write a secondary function to handle it.
Not any more! By simply inserting `...` where you would like the value to be inserted, it will
override where it is placed! This allows you to keep on piping while accommodating that function
override where it is placed. This allows you to keep on piping while accommodating that function
with the annoying argument order.
I stole the idea from [an existing library](https://hexdocs.pm/magritte/Magritte.html) and removed a
@ -62,19 +64,19 @@ We also have an `ok-pipe` operator, `~>`, which only pipes into the next functio
the last one was considered a success. It's inspired by [OK](https://hexdocs.pm/ok/readme.html), but
we have chosen to do things slightly differently so it better fits with our regular pipe.
input | result |
:------------------------- | :-------------- |
`{:ok, x}` | `fun.(x)` |
`{:error, e}` | `{:error, e}` |
`nil` | `nil` |
`x when not is_nil(x)` | `fun.(x)` |
input | result |
:----------------------- | :-------------- |
`{:ok, x}` | `fun.(x)` |
`{:error, e}` | `{:error, e}` |
`nil` | `nil` |
`x when not is_nil(x)` | `fun.(x)` |
In the case of a function returning an ok/error tuple being on the left hand side, this is
straightforward to determine. In the event of `{:ok, x}`, x will be passed into the right hand side
to call. In the event of `{:error, x}`, the result will be `{:error, x}`.
We also deal with a lot of functions that indicate failure by returning nil. `~>` tries to 'do what
i mean' for both of these so you can have one pipe operator to rule them all. If `nil` is a valid
I mean' for both of these so you can have one pipe operator to rule them all. If `nil` is a valid
result, you must thus be sure to wrap it in an `ok` tuple when it occurs on the left hand side of `~>`.
`|>` and `~>` compose in the way you'd expect; i.e. a `~>` receiving an error tuple or nil will stop
@ -82,8 +84,8 @@ executing the rest of the chain of (mixed) pipes.
## Where
`Where` provides replacements for the macros in `Logger` and the `IO.inspect` function with versions
that output code location information. The first argument will be `inspect`ed and the second (where
`Where` provides replacements for the macros in Elixir's `Logger` and the `IO.inspect` function to
output code location information. The first argument will be `inspect`ed and the second (where
provided) will be used as a label:
```
@ -92,7 +94,7 @@ Where
iex(2)> debug(:no, "the answer is") # log at debug
11:19:09.915 [debug] [iex:2] the answer is: :no
:no
iex(3)> Where.dump(%{a: :map}, "it") # inspect something on stdout
iex(3)> dump(%{a: :map}, "it") # inspect something on stdout
[iex:3] it: %{a: :map}
%{a: :map}
```
@ -111,7 +113,7 @@ do_something()
|> debug("output of do_something/0")
```
When you no longer need to debug this, the location of the debug statement is already in the output
When you are done debugging something, the location of the debug statement is already in the output
so you know where to remove it or comment it out! Bliss!
You will find the codebase uses this a lot and the debugs are frequently commented out. Just

View file

@ -1,4 +1,4 @@
# Bonfire Boundaries
# Boundaries & Access Control
Boundaries is Bonfire's flexible framework for full
per-user/per-object/per-action access control. It makes it easy to
@ -13,15 +13,16 @@ of circles that they can add to and categorise other users in as they
please.
Circles allow a user to categorise work colleagues differently from
friends. They can choose to allow different interactions from users in
the two circles or limit which content each sees on a per-item basis.
friends, for example. They can choose to allow different interactions
from users in the two circles or limit which content each sees on a
per-item basis.
## Verbs
Verbs represent actions that the user could perform, such as reading a
post or replying to a message.
Each verb has a unique ID, like the table IDs from `pointers` which
Each verb has a unique ID, like the table IDs from `pointers`, which
must be known to the system through configuration.
## Permissions
@ -34,7 +35,7 @@ Permissions can take one of three values:
`true` and `false` are easy enough to understand as yes and no, but what is `nil`?
`nil` represents `no answer`. in isolation, it is the same as `false`.
`nil` represents `no answer` - in isolation, it is the same as `false`.
Because a user could be in more than one circle and each circle may
have a different permission, we need a way of combining permissions to
@ -53,7 +54,7 @@ left | right | result
`false` | `false` | `false`
To be considered granted, the result of combining the permissions must
be `true` - `nil` is as good as `false` again here.
be `true` (`nil` is as good as `false` again here).
`nil` can thus be seen as a sort of `weak false`, being easily
overridden by a true, but also not by itself granting anything.

View file

@ -1,58 +1,67 @@
# Bonfire's Database - An Introduction
# Bonfire's Database - an intro
Bonfire uses the excellent PostgreSQL database for most data storage. PostgreSQL allows us to make a
wide range of queries and to make them relatively fast while upholding data integrity guarantees.
Postgres is a schema-led database - it expects you to define what fields go in each table and to
reference the table you are referring to when you refer to a record with a foreign key.
Postgres is a relational schema-led database - it expects you to pre-define tables and the fields
in each table (represented in tabular form, i.e. as a collection of tables with each table consisting
of a set of rows and columns). Fields can contain data or a reference to a row in another table.
This usually means that a field containing a reference has to be pre-defined with a foreign key
pointing to a specific field (typically a primary key, like an ID column) *in a specific table*.
A social network, by contrast, is actually a graph of objects. Objects can refer to other objects by
their ID without knowing their type. We would like the flexibility to have a foreign key that
references `any referenceable object`. We call our such system `pointers`.
A simple example would be a blogging app, which might have a `post` table with `author` field that references the `user` table.
This guide is a brief introduction to pointers. It assumes a little knowledge:
A social network, by contrast, is actually a graph of objects. Objects need to be able to refer
to other objects by their ID without knowing their type.
* Basic understanding of how PostgreSQL works, in particular:
A simple example would be likes, you might have a `likes` table with `liked_object` field that references the `post` table. But you don't just have posts that can be liked, but also videos, images, polls, etc, each with their own table?
We needed the flexibility to have a foreign key that can reference any referenceable object.
We call our system `Pointers`.
This guide is a brief introduction to Pointers. It assumes some foundational knowledge:
* Basic understanding of how relational databases like Postgresql work, in particular:
* Tables being made up of fields.
* What a primary key is and why it's useful.
* Foreign keys and relationships between tables (1:1, 1:Many, Many:1, Many:Many).
* Foreign keys and relationships between tables (1 to 1, 1 to Many, Many to 1, Many to Many).
* Views as virtual tables backed by a SQL query.
* Basic understanding of elixir (enough to follow the examples).
* Basic working knowledge of the ecto database library (schema and migration definitions)
* Basic understanding of Elixir (enough to follow the examples).
* Basic working knowledge of the [Ecto](https://hexdocs.pm/ecto/Ecto.html) database library (schema and migration definitions)
## Identifying objects - the ULID type
All referenceable objects in the system have a unique ID whose type is the
[`ULID`](https://github.com/ulid/spec) It'sa lot like a `UUID` in that you can generate unique ones
All referenceable objects in the system have a unique ID (primary key) whose type is the
[`ULID`](https://github.com/ulid/spec). It's a lot like a `UUID` in that you can generate unique ones
independently of the database. It's also a little different, being made up of two parts:
* The current timestamp, to millisecond precision.
* Strong random padding for uniqueness.
This means that it naturally sorts by time to the millisecond (close enough for us), giving us a
performance advantage on creation time-ordered queries! By contrast, UUIDv4 is randomly
distributed - a worst case scenario for ordering!
performance advantage on queries ordered by a seperate creation datetime field (by contrast, UUIDv4 is
randomly distributed).
If you've only worked with integer primary keys before, you are probably used to letting the
database dispense an ID for you. With `ULID` (or `UUID`), IDs can be known *before* they are stored,
greatly easing the process of storing a graph of data and allowing us to do more of the preparation
work outside of a transaction for increased performance.
In PostgreSQL, we actually store `ULID`s as `UUID` columns, owing to the lack of a `ULID` column
type shipping with postgresql and them both being the same size. You mostly will not notice this
In PostgreSQL, we actually store `ULID`s as `UUID` columns, thanks to both being the same size
(and the lack of a `ULID` column type shipping with postgresql). You mostly will not notice this
because it's handled for you, but there are a few places it can come up:
* Ecto debug and error output may show either binary values or UUID-formatted values.
* Hand-written sql in migrations may need to convert table IDs to the `UUID` format before use.
* Hand-written SQL may need to convert table IDs to the `UUID` format before use.
## It's just a table, dave
## It's just a table
The `pointers` system is mostly based around a single table represented by the `Pointers.Pointer`
schema with the following fields:
* `id` (ULID) - the database-wide unique id for the object, primary key.
* `table_id` (ULID) - identifies the type type of the object, references `Pointers.Table`.
* `deleted_at` (timestamp, default: null) - when the object was deleted.
* `table_id` (ULID) - identifies the type of the object, references `Pointers.Table`.
* `deleted_at` (timestamp, default: `null`) - when the object was deleted.
Every object that is stored in the system will have a record in this table. It may also have records
in other tables (handy for storing more than 3 fields about the object!).
@ -66,16 +75,17 @@ Mixins are tables which contain extra information on behalf of objects. Each obj
record or not record information for each mixin. Sample mixins include:
* user profile (containing a name, location and summary)
* post content (containing the escaped html body of a post or message)
* post content (containing the title, summary, and/or html body of a post or message)
* created (containing the id of the object creator)
In this way, they are reusable across different object types. One mixin may (not) be used by any
number of objects. This is mostly driven by the type of the object we are storing, but can also be
driven by user input.
In this way, they are reusable across different object types. One mixin may (or may not) be used
by any number of objects. This is mostly driven by the type of the object we are storing,
but can also be driven by user input.
Mixins are just tables too! The only requirement is they have a `ULID` primary key which references
`Pointers.Pointer`. The developer of the mixin is free to put whatever other fields they want in the
table, so long as they have that primary key.
table, so long as they have that primary-key-as-reference (which will be automatically added for you
by the `mixin_schema` macro).
Here is a sample mixin definition for a user profile:
@ -96,15 +106,14 @@ end
```
Aside from `use`ing `Pointers.Mixin` instead of `Ecto.Schema` and calling `mixin_schema` instead of
`schema`, pretty similar, right? The `ULID` primary key referencing `Pointers.Pointer` will be
automatically added for you by `mixin_schema`.
`schema`, pretty similar to a standard Ecto schema, right?
The arguments to `use Pointers.Mixin` are:
* `otp_app`: the otp app name to use when loading dynamic configuration, e.g. the current app (required)
* `otp_app`: the OTP app name to use when loading dynamic configuration, e.g. the current extension or app (required)
* `source`: the underlying table name to use in the database
We will cover dynamic configuration later. For now, you can use the otp app that includes the module.
We will cover dynamic configuration later. For now, you can use the OTP app that includes the module.
## Multimixins
@ -132,12 +141,7 @@ end
```
Notice that this looks very similar to defining a mixin. Indeed, the only difference is the
`primary_key: true` in this line, which adds a field to the compound primary key:
```elixir
belongs_to :feed, Pointer, primary_key: true
```
`primary_key: true` in this line, which adds a second field to the compound primary key.
This results in ecto recording a compound primary key of `(id, feed_id)` for the schema (the id is
added for you as with regular mixins).
@ -145,7 +149,7 @@ added for you as with regular mixins).
### Picking a table id
The first step to declaring a table is picking a unique table ID in ULID format. You could just
The first step to declaring a type is picking a unique table ID in ULID format. You could just
generate one at the terminal, but since these IDs are special, we tend to assign a synthetic ULID
that are readable as words so they stand out in debug output.
@ -197,8 +201,8 @@ defmodule Bonfire.Data.Social.Block do
end
```
It should look quite similar to a mixin definition, except that we `use Pointers.Virtual` this time
(passing an additional `table_id` argument) and we call `virtual_schema`.
It should look quite similar to a mixin definition, except that we `use` `Pointers.Virtual` this time
(passing an additional `table_id` argument) and we call the `virtual_schema` macro.
The primary limitation of a virtual is that you cannot put extra fields into one. This also means
that `belongs_to` is not generally permitted because it results in adding a field. `has_one` and
@ -206,15 +210,16 @@ that `belongs_to` is not generally permitted because it results in adding a fiel
This is not usually a problem, as extra fields can be put into mixins or multimixins as appropriate.
Under the hood, a virtual has a view (in the example, called `bonfire_data_social_block`). It looks
Under the hood, a virtual has a view (in this example, called `bonfire_data_social_block`). It looks
like a table with just an id, but it's populated with all the ids of blocks that are not
deleted. When the view is inserted into, a record is created in the `pointers` table for you transparently. When
you delete from the view, the corresponding `pointers` entry is marked deleted for you.
deleted. When the view is inserted into, a record is created in the `pointers` table for you
transparently. When you delete from the view, the corresponding `pointers` entry is marked deleted
for you.
### Pointables
The other, lesser used, type of object is called the pointable. The major difference is that unlike
the simple case of virtuals, pointers are not backed by views, but by tables.
The other, lesser used, type of object is called the Pointable. The major difference is that unlike
the simple case of virtuals, pointables are not backed by views, but by tables.
When a record is inserted into a pointable table, a copy is made in the `pointers` table for you
transparently. When you delete from the table, the the corresponding `pointers` entry is marked
@ -223,11 +228,11 @@ are free to add new fields.
Pointables pay for this flexibility by being slightly more expensive than virtuals:
* Records must be inserted into/deleted from two tables (the pointable table and the `pointers` table).
* Records must be inserted into/deleted from two tables (the pointable's table and the `pointers` table).
* The pointable table needs its own primary key index.
Here is a definition of a pointable type (indicating an activitypub document whose type we don't
recognise, stored as a json blob):
Here is a definition of a pointable type (indicating an ActivityPub activity whose type we don't
recognise, stored as a JSON blob):
```elixir
defmodule Bonfire.Data.Social.APActivity do
@ -243,11 +248,11 @@ defmodule Bonfire.Data.Social.APActivity do
end
```
The choice of using a pointable instead of a virtual and a mixin is ultimately up to you.
The choice of using a pointable instead of a virtual combined with one or more mixins is ultimately up to you.
## Writing Migrations
Migrations are typically included in schema libraries as public APIs you can call within your
Migrations are typically included along with the schemas as public APIs you can call within your
project's migrations.
### Virtuals
@ -301,8 +306,8 @@ ordering of operations. Handling down migrations can be a bit awkward in ecto.
### Pointables
As of now, pointables are a little trickier to define flexibly than virtuals because we want to
preserve the ability for the user to add new fields. There are some questions about how useful this
is in practice, so we might go for a simpler option in future.
preserve the ability for the user to define extra fields in config. There are some questions about
how useful this is in practice, so we might go for a simpler option in future.
Example:
@ -343,7 +348,7 @@ defmodule Bonfire.Data.Social.APActivity.Migration do
end
```
## Mixins
### Mixins
Mixins look much like pointables:
@ -398,7 +403,7 @@ defmodule Bonfire.Data.Social.Profile.Migration do
end
```
## Multimixins
### Multimixins
Similar to mixins:
@ -429,7 +434,6 @@ defmodule Bonfire.Data.Social.FeedPublish.Migration do
def drop_feed_publish_table(), do: drop_pointable_table(FeedPublish)
def migrate_feed_publish_feed_index(dir \\ direction(), opts \\ [])
def migrate_feed_publish_feed_index(:up, opts),
do: create_if_not_exists(index(@feed_publish_table, [:feed_id], opts))
@ -463,8 +467,7 @@ defmodule Bonfire.Data.Social.FeedPublish.Migration do
end
```
### More examples
## More examples
Take a look at a few of the migrations in our data libraries. Between them, they cover most
scenarios by now:

View file

@ -1,4 +1,4 @@
# Backend Configuration and Deployment
# Deployment guide
### WARNING: Bonfire is still under active development and deployment is only recommended for development and testing purposes!

View file

@ -1,4 +1,4 @@
# Bonfire GraphQL Guide
# GraphQL API
## GraphQL Introduction

View file

@ -12,6 +12,9 @@ defmodule Bonfire.MixProject do
"docs/HACKING.md",
"docs/DEPLOY.md",
"docs/ARCHITECTURE.md",
"docs/BONFIRE-FLAVOURED-ELIXIR.md",
"docs/DATABASE.md",
"docs/BOUNDARIES.md",
"docs/GRAPHQL.md",
"docs/MRF.md",
],
@ -55,9 +58,9 @@ defmodule Bonfire.MixProject do
"Flavours of Bonfire": Path.wildcard("flavours/*/*"),
"Data schemas": Path.wildcard("{deps,forks}/bonfire_data_*/*"),
"UI extensions": Path.wildcard("{deps,forks}/bonfire_ui_*/*"),
"Bonfire utilities": ["bonfire_api_graphql", "bonfire_boundaries", "bonfire_common", "bonfire_ecto", "bonfire_epics", "bonfire_fail", "bonfire_files", "bonfire_mailer"] |> Enum.flat_map(&Path.wildcard("*/#{&1}/*")),
"Bonfire utilities": ["bonfire_api_graphql", "bonfire_boundaries", "bonfire_common", "bonfire_ecto", "bonfire_epics", "bonfire_fail", "bonfire_files", "bonfire_mailer"] |> Enum.flat_map(&Path.wildcard("{deps,forks}/#{&1}/*")),
"Feature extensions": Path.wildcard("{deps,forks}/bonfire_*/*"),
"Generic utilities": Path.wildcard("{deps,forks}/*/*"),
"Other utilities": Path.wildcard("{deps,forks}/*/*"),
"Dependencies": Path.wildcard("docs/DEPENDENCIES/*"),
],
groups_for_modules: [