Merge branch 'develop' into catofish-fix/local-user-notes-count

This commit is contained in:
naskya 2024-03-20 18:34:33 +09:00
commit 7e2559c5ee
No known key found for this signature in database
GPG key ID: 712D413B3A9FED5C
63 changed files with 1043 additions and 2464 deletions

View file

@ -1,38 +1,28 @@
url: http://localhost:3000
port: 3000
url: http://localhost:3030
port: 3030
db:
host: 127.0.0.1
host: firefish_db
port: 5432
db: firefish
db: firefish_db
user: firefish
pass: firefish
pass: password
redis:
host: localhost
host: firefish_redis
port: 6379
family: 4
#sonic:
# host: localhost
# port: 1491
# auth: SecretPassword
# collection: notes
# bucket: default
#elasticsearch:
# host: localhost
# port: 9200
# ssl: false
# user:
# pass:
id: 'aid'
reservedUsernames:
- root
- admin
- administrator
- me
- system
#allowedPrivateNetworks: [
# '10.69.1.0/24'
#]
logLevel: [
'error',
'success',
'warning',
'debug',
'info'
]

View file

@ -41,11 +41,11 @@ db:
port: 5432
#ssl: false
# Database name
db: firefish
db: firefish_db
# Auth
user: example-firefish-user
pass: example-firefish-pass
user: firefish
pass: password
# Whether disable Caching queries
#disableCache: true
@ -181,9 +181,6 @@ logLevel: [
# Proxy remote files (default: false)
#proxyRemoteFiles: true
# Use authorized fetch for outgoing requests
signToActivityPubGet: true
#allowedPrivateNetworks: [
# '127.0.0.1/32'
#]

View file

@ -45,8 +45,6 @@ docker-compose.yml
docker-compose.example.yml
firefish.apache.conf
firefish.nginx.conf
flake.lock
flake.nix
title.svg
/.gitlab
/chart

4
.envrc
View file

@ -1,4 +0,0 @@
if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8="
fi
use flake . --impure

10
.gitignore vendored
View file

@ -29,7 +29,7 @@ coverage
!/.config/docker_ci.env
!/.config/helm_values_example.yml
!/.config/LICENSE
docker-compose.yml
/docker-compose.yml
/custom
# ESLint
@ -60,6 +60,10 @@ packages/backend/assets/LICENSE
packages/megalodon/lib
packages/megalodon/.idea
dev/container/firefish
dev/container/db
dev/container/redis
# blender backups
*.blend1
*.blend2
@ -71,10 +75,6 @@ packages/megalodon/.idea
.yarn
yarn*
# Nix Development shell items
.devenv
.direnv
# Cargo cache for Docker
/.cargo-cache
/.cargo-target

View file

@ -22,7 +22,17 @@ Before creating an issue, please check the following:
> **Warning**
> Do not close issues that are about to be resolved. It should remain open until a commit that actually resolves it is merged.
## Before implementation
## Coding
### Preparing a development environment
You can prepare your local Firefish environment in multiple ways:
- [Run Firefish and databases on your host](../dev/docs/local-installation.md)
- [Run Firefish on your host and databases in containers](../dev/docs/db-container.md)
- [Run Firefish and databases in containers](../dev/docs/container.md)
### Before implementation
When you want to add a feature or fix a bug, **first have the design and policy reviewed in an Issue** (if it is not there, please make one). Without this step, there is a high possibility that the MR will not be merged even if it is implemented.
At this point, you also need to clarify the goals of the MR you will create, and make sure that the other members of the team are aware of them.
@ -30,14 +40,14 @@ MRs that do not have a clear set of do's and don'ts tend to be bloated and diffi
Also, when you start implementation, assign yourself to the Issue (if you cannot do it yourself, ask another member to assign you). By expressing your intention to work the Issue, you can prevent conflicts in the work.
## Well-known branches
### Well-known branches
- The **`main`** branch is tracking the latest release and used for production purposes.
- The **`develop`** branch is where we work for the next release.
- When you create a MR, basically target it to this branch. **But create a different branch**
- The **`l10n_develop`** branch is reserved for localization management.
- **`feature/*`** branches are reserved for the development of a specific feature
## Creating a merge request (MR)
### Creating a merge request (MR)
Thank you for your MR! Before creating a MR, please check the following:
- If possible, prefix the title with a keyword that identifies the type of this MR, as shown below.
- `fix` / `refactor` / `feat` / `enhance` / `perf` / `chore` etc. You are also welcome to use gitmoji. This is important as we use these to A) easier read the git history and B) generate our changelog. Without propper prefixing it is possible that your MR is rejected.
@ -68,235 +78,3 @@ Be willing to comment on the good points and not just the things you want fixed
- Does the test ensure the expected behavior?
- Are there any omissions or gaps?
- Does it check for anomalies?
## Preparing the development environment
1. Install the following software
- nodejs
- rustup
- cargo
- sea-orm-cli
- podman
- podman-compose
2. Copy the config file
```sh
cp .config/dev.example.yml .config/default.yml
```
3. Start postgres/redis containers
```sh
pnpm run dev:up
```
4. Build Firefish
```sh
pnpm install
pnpm run build:debug
pnpm run migrate
```
5. Start Firefish on your localhost
```sh
pnpm run start
```
You can use the following commands to initialize the database:
```sh
pnpm run dev:init
pnpm run migrate
```
Make sure to clear your browser local storage after initializing the dev instance.
## Deploy (SOON)
The `/deploy` command by issue comment can be used to deploy the contents of a MR to the preview environment.
```
/deploy sha=<commit hash>
```
An actual domain will be assigned so you can test the federation.
# THE FOLLOWING IS OUTDATED:
## Merge
## Release
### Release Instructions
1. Commit version changes in the `develop` branch ([package.json](https://github.com/misskey-dev/misskey/blob/develop/package.json))
2. Create a release PR.
- Into `master` from `develop` branch.
- The title must be in the format `Release: x.y.z`.
- `x.y.z` is the new version you are trying to release.
3. Deploy and perform a simple QA check. Also verify that the tests passed.
4. Merge it.
5. Create a [release of GitHub](https://github.com/misskey-dev/misskey/releases)
- The target branch must be `master`
- The tag name must be the version
## Development
During development, it is useful to use the `pnpm run dev` command.
This command monitors the server-side and client-side source files and automatically builds them if they are modified.
In addition, it will also automatically start the Firefish server process.
## Testing
- Test codes are located in [`/test`](/test).
### Run test
Create a config file.
```
cp test/test.yml .config/
```
Prepare DB/Redis for testing.
```
docker-compose -f test/docker-compose.yml up
```
Alternatively, prepare an empty (data can be erased) DB and edit `.config/test.yml`.
Run all test.
```
yarn test
```
#### Run specify test
```
TS_NODE_FILES=true TS_NODE_TRANSPILE_ONLY=true TS_NODE_PROJECT="./test/tsconfig.json" pnpx mocha test/foo.ts --require ts-node/register
```
## Vue
Firefish uses Vue(v3) as its front-end framework.
- Use TypeScript.
- **When creating a new component, please use the Composition API (with [setup syntax](https://v3.vuejs.org/api/sfc-script-setup.html) and [ref syntax](https://github.com/vuejs/rfcs/discussions/369)) instead of the Options API.**
- Some of the existing components are implemented in the Options API, but it is an old implementation. Refactors that migrate those components to the Composition API are also welcome.
## nirax
niraxは、Misskeyで使用しているオリジナルのフロントエンドルーティングシステムです。
**vue-routerから影響を多大に受けているので、まずはvue-routerについて学ぶことをお勧めします。**
### ルート定義
ルート定義は、以下の形式のオブジェクトの配列です。
``` ts
{
name?: string;
path: string;
component: Component;
query?: Record<string, string>;
loginRequired?: boolean;
hash?: string;
globalCacheKey?: string;
children?: RouteDef[];
}
```
> **Warning**
> 現状、ルートは定義された順に評価されます。
> たとえば、`/foo/:id`ルート定義の次に`/foo/bar`ルート定義がされていた場合、後者がマッチすることはありません。
### 複数のルーター
vue-routerとの最大の違いは、niraxは複数のルーターが存在することを許可している点です。
これにより、アプリ内ウィンドウでブラウザとは個別にルーティングすることなどが可能になります。
## Notes
### How to resolve conflictions occurred at yarn.lock?
Just execute `yarn` to fix it.
### INSERTするときにはsaveではなくinsertを使用する
#6441
### placeholder
SQLをクエリビルダで組み立てる際、使用するプレースホルダは重複してはならない
例えば
``` ts
query.andWhere(new Brackets(qb => {
for (const type of ps.fileType) {
qb.orWhere(`:type = ANY(note.attachedFileTypes)`, { type: type });
}
}));
```
と書くと、ループ中で`type`というプレースホルダが複数回使われてしまいおかしくなる
だから次のようにする必要がある
```ts
query.andWhere(new Brackets(qb => {
for (const type of ps.fileType) {
const i = ps.fileType.indexOf(type);
qb.orWhere(`:type${i} = ANY(note.attachedFileTypes)`, { [`type${i}`]: type });
}
}));
```
### Not `null` in TypeORM
```ts
const foo = await Foos.findOne({
bar: Not(null)
});
```
のようなクエリ(`bar`が`null`ではない)は期待通りに動作しない。
次のようにします:
```ts
const foo = await Foos.findOne({
bar: Not(IsNull())
});
```
### `null` in SQL
SQLを発行する際、パラメータが`null`になる可能性のある場合はSQL文を出し分けなければならない
例えば
``` ts
query.where('file.folderId = :folderId', { folderId: ps.folderId });
```
という処理で、`ps.folderId`が`null`だと結果的に`file.folderId = null`のようなクエリが発行されてしまい、これは正しいSQLではないので期待した結果が得られない
だから次のようにする必要がある
``` ts
if (ps.folderId) {
query.where('file.folderId = :folderId', { folderId: ps.folderId });
} else {
query.where('file.folderId IS NULL');
}
```
### `[]` in SQL
SQLを発行する際、`IN`のパラメータが`[]`(空の配列)になる可能性のある場合はSQL文を出し分けなければならない
例えば
``` ts
const users = await Users.find({
id: In(userIds)
});
```
という処理で、`userIds`が`[]`だと結果的に`user.id IN ()`のようなクエリが発行されてしまい、これは正しいSQLではないので期待した結果が得られない
だから次のようにする必要がある
``` ts
const users = userIds.length > 0 ? await Users.find({
id: In(userIds)
}) : [];
```
### 配列のインデックス in SQL
SQLでは配列のインデックスは**1始まり**。
`[a, b, c]``a`にアクセスしたいなら`[0]`ではなく`[1]`と書く
### null IN
nullが含まれる可能性のあるカラムにINするときは、そのままだとおかしくなるのでORなどでnullのハンドリングをしよう。
### `undefined`にご用心
MongoDBの時とは違い、findOneでレコードを取得する時に対象レコードが存在しない場合 **`undefined`** が返ってくるので注意。
MongoDBは`null`で返してきてたので、その感覚で`if (x === null)`とか書くとバグる。代わりに`if (x == null)`と書いてください
### Migration作成方法
packages/backendで:
```sh
pnpm dlx typeorm migration:generate -d ormconfig.js -o <migration name>
```
- 生成後、ファイルをmigration下に移してください
- 作成されたスクリプトは不必要な変更を含むため除去してください
### コネクションには`markRaw`せよ
**Vueのコンポーネントのdataオプションとして**misskey.jsのコネクションを設定するとき、必ず`markRaw`でラップしてください。インスタンスが不必要にリアクティブ化されることで、misskey.js内の処理で不具合が発生するとともに、パフォーマンス上の問題にも繋がる。なお、Composition APIを使う場合はこの限りではない(リアクティブ化はマニュアルなため)。
### JSONのimportに気を付けよう
TypeScriptでjsonをimportすると、tscでコンパイルするときにそのjsonファイルも一緒にdistディレクトリに吐き出されてしまう。この挙動により、意図せずファイルの書き換えが発生することがあるので、jsonをimportするときは書き換えられても良いものかどうか確認すること。書き換えされて欲しくない場合は、importで読み込むのではなく、`fs.readFileSync`などの関数を使って読み込むようにすればよい。
### コンポーネントのスタイル定義でmarginを持たせない
コンポーネント自身がmarginを設定するのは問題の元となることはよく知られている
marginはそのコンポーネントを使う側が設定する
## その他
### HTMLのクラス名で follow という単語は使わない
広告ブロッカーで誤ってブロックされる

213
README.md
View file

@ -15,213 +15,22 @@ Firefish is based off of Misskey, a powerful microblogging server on ActivityPub
<div style="clear: both;"></div>
# Documents
- [Installation guide](./docs/install.md)
- [Contributing guide](./CONTRIBUTING.md)
- [Changelog](./docs/changelog.md)
# Links
### Want to get involved? Great!
- If you have the means to, [donations](https://opencollective.com/Firefish) are a great way to keep us going.
- If you know how to program in TypeScript, Vue, or Rust, read the [contributing](./CONTRIBUTING.md) document.
- If you know a non-English language, translating Firefish on [Weblate](https://hosted.weblate.org/engage/firefish/) help bring Firefish to more people. No technical experience needed!
### Links
- Donations:
- OpenCollective: <https://opencollective.com/Firefish>
- Donations: <https://opencollective.com/Firefish>
- Matrix space: <https://matrix.to/#/#firefish-community:nitro.chat>
- Official account: <a href="https://info.firefish.dev/@firefish" rel="me">`@firefish@info.firefish.dev`</a>
- Weblate: <https://hosted.weblate.org/engage/firefish/>
# Getting started
# Want to get involved? Great!
This guide will work for both **starting from scratch** and **migrating from Misskey**.
- If you know how to program in TypeScript, Vue, or Rust, please read the [contributing guide](./CONTRIBUTING.md).
- If you have the means to, [donations](https://opencollective.com/Firefish) are a great way to keep us going.
- If you know a non-English language, translating Firefish on [Weblate](https://hosted.weblate.org/engage/firefish/) help bring Firefish to more people. No technical experience needed!
<!-- ## Easy installers
If you have access to a server that supports one of the sources below, I recommend you use it! Note that these methods *won't* allow you to migrate from Misskey without manual intervention.
[![Install on Ubuntu](https://pool.jortage.com/voringme/misskey/3b62a443-1b44-45cf-8f9e-f1c588f803ed.png)](https://firefish.dev/firefish/ubuntu-bash-install)  [![Install on the Arch User Repository](https://pool.jortage.com/voringme/misskey/ba2a5c07-f078-43f1-8483-2e01acca9c40.png)](https://aur.archlinux.org/packages/firefish)  [![Install Firefish with YunoHost](https://install-app.yunohost.org/install-with-yunohost.svg)](https://install-app.yunohost.org/?app=firefish) -->
## Containerization
- [How to run Firefish with Docker](https://firefish.dev/firefish/firefish/-/blob/develop/docs/docker.md)
- [How to run Firefish with Kubernetes/Helm](https://firefish.dev/firefish/firefish/-/blob/develop/docs/kubernetes.md)
## Dependencies
- At least [NodeJS](https://nodejs.org/en/) v18.17.0 (v20/v21 recommended)
- At least [PostgreSQL](https://www.postgresql.org/) v12 (v16 recommended)
- At least [Redis](https://redis.io/) v7
- Web Proxy (one of the following)
- Nginx (recommended)
- Caddy (recommended)
- Apache
### Optional dependencies
- [FFmpeg](https://ffmpeg.org/) for video transcoding
- Caching server (one of the following)
- [DragonflyDB](https://www.dragonflydb.io/) (recommended)
- [KeyDB](https://keydb.dev/)
- Another [Redis](https://redis.io/) server
### Build dependencies
- At least [Rust](https://www.rust-lang.org/) v1.74
- C/C++ compiler & build tools
- `build-essential` on Debian/Ubuntu Linux
- `base-devel` on Arch Linux
- [Python 3](https://www.python.org/)
## Get folder ready
```sh
git clone https://firefish.dev/firefish/firefish.git
cd firefish/
```
> **Note**
> By default, you're on the develop branch. Run `git checkout main` to switch to the Main branch.
## Install dependencies
```sh
# nvm install 19 && nvm use 19
sudo corepack enable
corepack prepare pnpm@latest --activate
pnpm install --frozen-lockfile
```
### pm2
To install pm2 run:
```
npm i -g pm2
pm2 install pm2-logrotate
```
> **Note**
> [`pm2-logrotate`](https://github.com/keymetrics/pm2-logrotate/blob/master/README.md) ensures that log files don't infinitely gather size, as Firefish produces a lot of logs.
## Create database
In PostgreSQL (`psql`), run the following command:
```sql
CREATE DATABASE firefish WITH encoding = 'UTF8';
```
or run the following from the command line:
```sh
psql postgres -c "create database firefish with encoding = 'UTF8';"
```
In Firefish's directory, fill out the `db` section of `.config/default.yml` with the correct information, where the `db` key is `firefish`.
## Caching server
If you experience a lot of traffic, it's a good idea to set up another Redis-compatible caching server. If you don't set one one up, it'll fall back to the mandatory Redis server. DragonflyDB is the recommended option due to its unrivaled performance and ease of use.
## Set up search
### Sonic
Sonic is better suited for self hosters with smaller deployments. It uses almost no resources, barely any any disk space, and is relatively fast.
Follow sonic's [installation guide](https://github.com/valeriansaliou/sonic#installation)
> **Note**
> If you use IPv4: in Sonic's directory, edit the `config.cfg` file to change `inet` to `"0.0.0.0:1491"`.
In Firefish's directory, fill out the `sonic` section of `.config/default.yml` with the correct information.
### Meilisearch
Meilisearch is better suited for larger deployments. It's faster but uses far more resources and disk space.
Follow Meilisearch's [quick start guide](https://www.meilisearch.com/docs/learn/getting_started/quick_start)
In Firefish's directory, fill out the `meilisearch` section of `.config/default.yml` with the correct information.
### ElasticSearch
Please don't use ElasticSearch unless you already have an ElasticSearch setup and want to continue using it for Firefish. ElasticSearch is slow, heavy, and offers very few benefits over Sonic/Meilisearch.
## Customize
- To add custom CSS for all users, edit `./custom/assets/instance.css`.
- To add static assets (such as images for the splash screen), place them in the `./custom/assets/` directory. They'll then be available on `https://yourserver.tld/static-assets/filename.ext`.
- To add custom locales, place them in the `./custom/locales/` directory. If you name your custom locale the same as an existing locale, it will overwrite it. If you give it a unique name, it will be added to the list. Also make sure that the first part of the filename matches the locale you're basing it on. (Example: `en-FOO.yml`)
- To add custom error images, place them in the `./custom/assets/badges` directory, replacing the files already there.
- To add custom sounds, place only mp3 files in the `./custom/assets/sounds` directory.
- To update custom assets without rebuilding, just run `pnpm run gulp`.
- To block ChatGPT, CommonCrawl, or other crawlers from indexing your instance, uncomment the respective rules in `./custom/robots.txt`.
## Configuring a new server
- Run `cp .config/example.yml .config/default.yml`
- Edit `.config/default.yml`, making sure to fill out required fields.
- Also copy and edit `.config/docker_example.env` to `.config/docker.env` if you're using Docker.
## Migrating from Misskey/FoundKey to Firefish
For migrating from Misskey v13, Misskey v12, and FoundKey, read [this document](https://firefish.dev/firefish/firefish/-/blob/develop/docs/migrate.md).
## Web proxy
### Nginx (recommended)
- Run `sudo cp ./firefish.nginx.conf /etc/nginx/sites-available/ && cd /etc/nginx/sites-available/`
- Edit `firefish.nginx.conf` to reflect your server properly
- Run `sudo ln -s ./firefish.nginx.conf ../sites-enabled/firefish.nginx.conf`
- Run `sudo nginx -t` to validate that the config is valid, then restart the NGINX service.
### Caddy (recommended)
- Add the following block to your `Caddyfile`, replacing `example.tld` with your own domain:
```caddy
example.tld {
reverse_proxy http://127.0.0.1:3000
}
```
- Reload your caddy configuration
### Apache
> **Warning**
> Apache has some known problems with Firefish. Only use it if you have to.
- Run `sudo cp ./firefish.apache.conf /etc/apache2/sites-available/ && cd /etc/apache2/sites-available/`
- Edit `firefish.apache.conf` to reflect your server properly
- Run `sudo a2ensite firefish.apache` to enable the site
- Run `sudo service apache2 restart` to reload apache2 configuration
## Build and launch!
### NodeJS + pm2
#### `git pull` and run these steps to update Firefish in the future!
```sh
# git pull
pnpm install
NODE_ENV=production pnpm run build && pnpm run migrate
pm2 start "NODE_ENV=production pnpm run start" --name Firefish
```
## Tips & Tricks
- When editing the config file, please don't fill out the settings at the bottom. They're designed *only* for managed hosting, not self hosting. Those settings are much better off being set in Firefish's control panel.
- Port 3000 (used in the default config) might be already used on your server for something else. To find an open port for Firefish, run `for p in {3000..4000}; do ss -tlnH | tr -s ' ' | cut -d" " -sf4 | grep -q "${p}$" || echo "${p}"; done | head -n 1`. Replace 3000 with the minimum port and 4000 with the maximum port if you need it.
- I'd recommend you use a S3 Bucket/CDN for Object Storage, especially if you use Docker.
- When using object storage, setting a proper `Access-Control-Allow-Origin` response header is highly recommended.
- I'd ***strongly*** recommend against using CloudFlare, but if you do, make sure to turn code minification off.
- For push notifications, run `npx web-push generate-vapid-keys`, then put the public and private keys into Control Panel > General > ServiceWorker.
- For translations, make a [DeepL](https://deepl.com) account and generate an API key, then put it into Control Panel > General > DeepL Translation.
- To add another admin account:
- Go to the user's page > 3 Dots > About > Moderation > turn on "Moderator"
- Go back to Overview > click the clipboard icon next to the ID
- Run `psql -d firefish` (or whatever the database name is)
- Run `UPDATE "user" SET "isAdmin" = true WHERE id='999999';` (replace `999999` with the copied ID)
- Restart your Firefish server

View file

@ -1,16 +1,5 @@
# Reporting Security Issues
## Minor Security Issues
If you discover a minor security issue in Firefish, please report it by sending an
email to [kainoa@t1c.dev](mailto:kainoa@t1c.dev).
## High Security Issues
If you discover a security issue, which is so high risk, that too much is affected by it, please dont send it over unencrypted communication. You can share your PGP keys with us using kainoa@t1c.dev and after we established a secure communication, send it over E-Mail, or message us using matrix' encrypted private messages at @t1c:matrix.fedibird.com or @cleo:tchncs.de
This will allow us to assess the risk, and make a fix available before we add a
bug report to the Codeberg repository.
If you discover a security issue, please report it as a confidential issue. You can create a confidential issue by checking the "This issue is confidential and should only be visible to team members with at least Reporter access." checkbox in <https://firefish.dev/firefish/firefish/-/issues/new>.
Thanks for helping make Firefish safe for everyone.

View file

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View file

@ -1,38 +0,0 @@
apiVersion: v2
name: firefish
description: A fun, new, open way to experience social media https://joinfirefish.org
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 'v1.0.4-beta31'
dependencies:
- name: elasticsearch
version: 19.0.1
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
condition: elasticsearch.enabled
- name: postgresql
version: 11.1.3
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
condition: postgresql.enabled
- name: redis
version: 16.13.2
repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami
condition: redis.enabled

View file

@ -1,89 +0,0 @@
# firefish
![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: rc](https://img.shields.io/badge/AppVersion-rc-informational?style=flat-square)
A fun, new, open way to experience social media https://joinfirefish.org
## Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami | elasticsearch | 19.0.1 |
| https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami | postgresql | 11.1.3 |
| https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami | redis | 16.13.2 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
| autoscaling.enabled | bool | `false` | |
| autoscaling.maxReplicas | int | `100` | |
| autoscaling.minReplicas | int | `1` | |
| autoscaling.targetCPUUtilizationPercentage | int | `80` | |
| firefish.allowedPrivateNetworks | list | `[]` | If you want to allow firefish to connect to private ips, enter the cidrs here. |
| firefish.deepl.authKey | string | `""` | |
| firefish.deepl.isPro | bool | `false` | |
| firefish.deepl.managed | bool | `false` | |
| firefish.domain | string | `"firefish.local"` | |
| firefish.isManagedHosting | bool | `true` | |
| firefish.libreTranslate.apiKey | string | `""` | |
| firefish.libreTranslate.apiUrl | string | `""` | |
| firefish.libreTranslate.managed | bool | `false` | |
| firefish.objectStorage.access_key | string | `""` | |
| firefish.objectStorage.access_secret | string | `""` | |
| firefish.objectStorage.baseUrl | string | `""` | |
| firefish.objectStorage.bucket | string | `""` | |
| firefish.objectStorage.endpoint | string | `""` | |
| firefish.objectStorage.managed | bool | `true` | |
| firefish.objectStorage.prefix | string | `"files"` | |
| firefish.objectStorage.region | string | `""` | |
| firefish.reservedUsernames[0] | string | `"root"` | |
| firefish.reservedUsernames[1] | string | `"admin"` | |
| firefish.reservedUsernames[2] | string | `"administrator"` | |
| firefish.reservedUsernames[3] | string | `"me"` | |
| firefish.reservedUsernames[4] | string | `"system"` | |
| firefish.smtp.from_address | string | `"notifications@example.com"` | |
| firefish.smtp.login | string | `""` | |
| firefish.smtp.managed | bool | `true` | |
| firefish.smtp.password | string | `""` | |
| firefish.smtp.port | int | `587` | |
| firefish.smtp.server | string | `"smtp.mailgun.org"` | |
| firefish.smtp.useImplicitSslTls | bool | `false` | |
| elasticsearch | object | `{"auth":{},"enabled":false,"hostname":"","port":9200,"ssl":false}` | https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch#parameters |
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"registry.firefish.dev/firefish/firefish"` | |
| image.tag | string | `""` | |
| imagePullSecrets | list | `[]` | |
| ingress.annotations | object | `{}` | |
| ingress.className | string | `""` | |
| ingress.enabled | bool | `false` | |
| ingress.hosts[0].host | string | `"chart-example.local"` | |
| ingress.hosts[0].paths[0].path | string | `"/"` | |
| ingress.hosts[0].paths[0].pathType | string | `"ImplementationSpecific"` | |
| ingress.tls | list | `[]` | |
| nameOverride | string | `""` | |
| nodeSelector | object | `{}` | |
| podAnnotations | object | `{}` | |
| podSecurityContext | object | `{}` | |
| postgresql.auth.database | string | `"firefish_production"` | |
| postgresql.auth.password | string | `""` | |
| postgresql.auth.username | string | `"firefish"` | |
| postgresql.enabled | bool | `true` | disable if you want to use an existing db; in which case the values below must match those of that external postgres instance |
| redis.auth.password | string | `""` | you must set a password; the password generated by the redis chart will be rotated on each upgrade: |
| redis.enabled | bool | `true` | |
| redis.hostname | string | `""` | |
| redis.port | int | `6379` | |
| replicaCount | int | `1` | |
| resources | object | `{}` | |
| securityContext | object | `{}` | |
| service.port | int | `80` | |
| service.type | string | `"ClusterIP"` | |
| serviceAccount.annotations | object | `{}` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `""` | |
| tolerations | list | `[]` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View file

@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "firefish.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "firefish.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "firefish.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "firefish.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View file

@ -1,327 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "firefish.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "firefish.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "firefish.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "firefish.labels" -}}
helm.sh/chart: {{ include "firefish.chart" . }}
{{ include "firefish.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "firefish.selectorLabels" -}}
app.kubernetes.io/name: {{ include "firefish.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "firefish.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "firefish.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create a default fully qualified name for dependent services.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "firefish.elasticsearch.fullname" -}}
{{- printf "%s-%s" .Release.Name "elasticsearch" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "firefish.redis.fullname" -}}
{{- printf "%s-%s" .Release.Name "redis" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "firefish.postgresql.fullname" -}}
{{- printf "%s-%s" .Release.Name "postgresql" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
config/default.yml content
*/}}
{{- define "firefish.configDir.default.yml" -}}
#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Firefish configuration
#━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# ┌─────┐
#───┘ URL └─────────────────────────────────────────────────────
# Final accessible URL seen by a user.
url: "https://{{ .Values.firefish.domain }}/"
# ONCE YOU HAVE STARTED THE INSTANCE, DO NOT CHANGE THE
# URL SETTINGS AFTER THAT!
# ┌───────────────────────┐
#───┘ Port and TLS settings └───────────────────────────────────
#
# Firefish requires a reverse proxy to support HTTPS connections.
#
# +----- https://example.tld/ ------------+
# +------+ |+-------------+ +----------------+|
# | User | ---> || Proxy (443) | ---> | Firefish (3000) ||
# +------+ |+-------------+ +----------------+|
# +---------------------------------------+
#
# You need to set up a reverse proxy. (e.g. nginx)
# An encrypted connection with HTTPS is highly recommended
# because tokens may be transferred in GET requests.
# The port that your Firefish server should listen on.
port: 3000
# ┌──────────────────────────┐
#───┘ PostgreSQL configuration └────────────────────────────────
db:
{{- if .Values.postgresql.enabled }}
host: {{ template "firefish.postgresql.fullname" . }}
port: 5432
{{- else }}
host: {{ .Values.postgresql.postgresqlHostname }}
port: {{ .Values.postgresql.postgresqlPort | default 5432 }}
{{- end }}
# Database name
db: {{ .Values.postgresql.auth.database }}
# Auth
user: {{ .Values.postgresql.auth.username }}
pass: "{{ .Values.postgresql.auth.password }}"
# Whether disable Caching queries
#disableCache: true
# Extra Connection options
#extra:
# ssl:
# host: localhost
# rejectUnauthorized: false
# ┌─────────────────────┐
#───┘ Redis configuration └─────────────────────────────────────
redis:
{{- if .Values.redis.enabled }}
host: {{ template "firefish.redis.fullname" . }}-master
{{- else }}
host: {{ required "When the redis chart is disabled .Values.redis.hostname is required" .Values.redis.hostname }}
{{- end }}
port: {{ .Values.redis.port | default 6379 }}
#family: 0 # 0=Both, 4=IPv4, 6=IPv6
pass: {{ .Values.redis.auth.password | quote }}
#prefix: example-prefix
#db: 1
#user: default
#tls:
# host: localhost
# rejectUnauthorized: false
# ┌─────────────────────┐
#───┘ Sonic configuration └─────────────────────────────────────
#sonic:
# host: localhost
# port: 1491
# auth: SecretPassword
# collection: notes
# bucket: default
# ┌─────────────────────────────┐
#───┘ Elasticsearch configuration └─────────────────────────────
{{- if .Values.elasticsearch.enabled }}
elasticsearch:
host: {{ template "mastodon.elasticsearch.fullname" . }}-master-hl
port: 9200
ssl: false
{{- else if .Values.elasticsearch.hostname }}
elasticsearch:
host: {{ .Values.elasticsearch.hostname | quote }}
port: {{ .Values.elasticsearch.port }}
ssl: {{ .Values.elasticsearch.ssl }}
{{- if .Values.elasticsearch.auth }}
user: {{ .Values.elasticsearch.auth.username | quote }}
pass: {{ .Values.elasticsearch.auth.password | quote }}
{{- end }}
{{- end }}
# ┌───────────────┐
#───┘ ID generation └───────────────────────────────────────────
# You can select the ID generation method.
# You don't usually need to change this setting, but you can
# change it according to your preferences.
# Available methods:
# aid ... Short, Millisecond accuracy
# meid ... Similar to ObjectID, Millisecond accuracy
# ulid ... Millisecond accuracy
# objectid ... This is left for backward compatibility
# ONCE YOU HAVE STARTED THE INSTANCE, DO NOT CHANGE THE
# ID SETTINGS AFTER THAT!
id: 'aid'
# ┌─────────────────────┐
#───┘ Other configuration └─────────────────────────────────────
# Max note length, should be < 8000.
#maxNoteLength: 3000
# Maximum lenght of an image caption or file comment (default 1500, max 8192)
#maxCaptionLength: 1500
# Reserved usernames that only the administrator can register with
reservedUsernames: {{ .Values.firefish.reservedUsernames | toJson }}
# Whether disable HSTS
#disableHsts: true
# Number of worker processes
#clusterLimit: 1
# Job concurrency per worker
# deliverJobConcurrency: 128
# inboxJobConcurrency: 16
# Job rate limiter
# deliverJobPerSec: 128
# inboxJobPerSec: 16
# Job attempts
# deliverJobMaxAttempts: 12
# inboxJobMaxAttempts: 8
# IP address family used for outgoing request (ipv4, ipv6 or dual)
#outgoingAddressFamily: ipv4
# Syslog option
#syslog:
# host: localhost
# port: 514
# Proxy for HTTP/HTTPS
#proxy: http://127.0.0.1:3128
#proxyBypassHosts: [
# 'example.com',
# '192.0.2.8'
#]
# Proxy for SMTP/SMTPS
#proxySmtp: http://127.0.0.1:3128 # use HTTP/1.1 CONNECT
#proxySmtp: socks4://127.0.0.1:1080 # use SOCKS4
#proxySmtp: socks5://127.0.0.1:1080 # use SOCKS5
# Media Proxy
#mediaProxy: https://example.com/proxy
# Proxy remote files (default: false)
#proxyRemoteFiles: true
allowedPrivateNetworks: {{ .Values.firefish.allowedPrivateNetworks | toJson }}
# TWA
#twa:
# nameSpace: android_app
# packageName: tld.domain.twa
# sha256CertFingerprints: ['AB:CD:EF']
# Upload or download file size limits (bytes)
#maxFileSize: 262144000
# Managed hosting settings
# !!!!!!!!!!
# >>>>>> NORMAL SELF-HOSTERS, STAY AWAY! <<<<<<
# >>>>>> YOU DON'T NEED THIS! <<<<<<
# !!!!!!!!!!
# Each category is optional, but if each item in each category is mandatory!
# If you mess this up, that's on you, you've been warned...
#maxUserSignups: 100
isManagedHosting: {{ .Values.firefish.isManagedHosting }}
deepl:
managed: {{ .Values.firefish.deepl.managed }}
authKey: {{ .Values.firefish.deepl.authKey | quote}}
isPro: {{ .Values.firefish.deepl.isPro }}
libreTranslate:
managed: {{ .Values.firefish.libreTranslate.managed }}
apiUrl: {{ .Values.firefish.libreTranslate.apiUrl | quote }}
apiKey: {{ .Values.firefish.libreTranslate.apiKey | quote }}
email:
managed: {{ .Values.firefish.smtp.managed }}
address: {{ .Values.firefish.smtp.from_address | quote }}
host: {{ .Values.firefish.smtp.server | quote }}
port: {{ .Values.firefish.smtp.port }}
user: {{ .Values.firefish.smtp.login | quote }}
pass: {{ .Values.firefish.smtp.password | quote }}
useImplicitSslTls: {{ .Values.firefish.smtp.useImplicitSslTls }}
objectStorage:
managed: {{ .Values.firefish.objectStorage.managed }}
baseUrl: {{ .Values.firefish.objectStorage.baseUrl | quote }}
bucket: {{ .Values.firefish.objectStorage.bucket | quote }}
prefix: {{ .Values.firefish.objectStorage.prefix | quote }}
endpoint: {{ .Values.firefish.objectStorage.endpoint | quote }}
region: {{ .Values.firefish.objectStorage.region | quote }}
accessKey: {{ .Values.firefish.objectStorage.access_key | quote }}
secretKey: {{ .Values.firefish.objectStorage.access_secret | quote }}
useSsl: true
connnectOverProxy: false
setPublicReadOnUpload: true
s3ForcePathStyle: true
# !!!!!!!!!!
# >>>>>> AGAIN, NORMAL SELF-HOSTERS, STAY AWAY! <<<<<<
# >>>>>> YOU DON'T NEED THIS, ABOVE SETTINGS ARE FOR MANAGED HOSTING ONLY! <<<<<<
# !!!!!!!!!!
# Seriously. Do NOT fill out the above settings if you're self-hosting.
# They're much better off being set from the control panel.
{{- end }}

View file

@ -1,82 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "firefish.fullname" . }}
labels:
{{- include "firefish.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "firefish.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/secret-config: {{ include ( print $.Template.BasePath "/secret-config.yaml" ) . | sha256sum | quote }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "firefish.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "firefish.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
volumes:
- name: config-volume
secret:
secretName: {{ template "firefish.fullname" . }}-config
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- pnpm
- run
- start
env:
- name: "NODE_ENV"
value: "production"
volumeMounts:
- name: config-volume
mountPath: /firefish/.config
ports:
- name: http
containerPort: 3000
protocol: TCP
startupProbe:
httpGet:
path: /
port: http
failureThreshold: 30
periodSeconds: 10
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -1,28 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "firefish.fullname" . }}
labels:
{{- include "firefish.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "firefish.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View file

@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "firefish.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "firefish.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,59 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "firefish.fullname" . }}-db-migrate
labels:
{{- include "firefish.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
"helm.sh/hook-weight": "-2"
spec:
template:
metadata:
name: {{ include "firefish.fullname" . }}-db-migrate
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: Never
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "firefish.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
volumes:
- name: config-volume
secret:
secretName: {{ template "firefish.fullname" . }}-config
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- pnpm
- run
- migrate
env:
- name: "NODE_ENV"
value: "production"
volumeMounts:
- name: config-volume
mountPath: /firefish/.config
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ template "firefish.fullname" . }}-config
labels:
{{- include "firefish.labels" . | nindent 4 }}
type: Opaque
data:
default.yml: {{ include "firefish.configDir.default.yml" . | b64enc }}

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "firefish.fullname" . }}
labels:
{{- include "firefish.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "firefish.selectorLabels" . | nindent 4 }}

View file

@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "firefish.serviceAccountName" . }}
labels:
{{- include "firefish.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "firefish.fullname" . }}-test-connection"
labels:
{{- include "firefish.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "firefish.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View file

@ -1,168 +0,0 @@
# Default values for firefish.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: registry.firefish.dev/firefish/firefish
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
firefish:
isManagedHosting: true
domain: firefish.local
deepl:
managed: false
authKey: ""
isPro: false
libreTranslate:
managed: false
apiUrl: ""
apiKey: ""
smtp:
managed: true
from_address: notifications@example.com
port: 587
server: smtp.mailgun.org
useImplicitSslTls: false
login: ""
password: ""
objectStorage:
managed: true
access_key: ""
access_secret: ""
baseUrl: "" # e.g. "https://my-bucket.nyc3.cdn.digitaloceanspaces.com"
bucket: "" # e.g. "my-bucket"
prefix: files
endpoint: "" # e.g. "nyc3.digitaloceanspaces.com:443"
region: "" # e.g. "nyc3"
# -- If you want to allow firefish to connect to private ips, enter the cidrs here.
allowedPrivateNetworks: []
# - "10.0.0.0/8"
reservedUsernames:
- root
- admin
- administrator
- me
- system
# https://github.com/bitnami/charts/tree/master/bitnami/postgresql#parameters
postgresql:
# -- disable if you want to use an existing db; in which case the values below
# must match those of that external postgres instance
enabled: true
# postgresqlHostname: preexisting-postgresql
# postgresqlPort: 5432
auth:
database: firefish_production
username: firefish
# you must set a password; the password generated by the postgresql chart will
# be rotated on each upgrade:
# https://github.com/bitnami/charts/tree/master/bitnami/postgresql#upgrade
password: ""
# https://github.com/bitnami/charts/tree/master/bitnami/redis#parameters
redis:
# disable if you want to use an existing redis instance; in which case the
# values below must match those of that external redis instance
enabled: true
hostname: ""
port: 6379
auth:
# -- you must set a password; the password generated by the redis chart will be
# rotated on each upgrade:
password: ""
# -- https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch#parameters
elasticsearch:
# disable if you want to use an existing redis instance; in which case the
# values below must match those of that external elasticsearch instance
enabled: false
hostname: ""
port: 9200
ssl: false
auth: {}
# username: ""
# password: ""
# @ignored
image:
tag: 7
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -0,0 +1,51 @@
version: "3"
services:
web:
image: docker.io/node:18.17.0-bookworm
container_name: firefish_web
restart: unless-stopped
depends_on:
- db
- redis
ports:
- "3030:3030"
networks:
- firefishnet
environment:
NODE_ENV: production
PGPASSWORD: password
URL: http://localhost:3030
volumes:
- ./firefish:/firefish:rw
- ./docker-entrypoint.sh:/docker-entrypoint.sh:ro
entrypoint: /docker-entrypoint.sh
redis:
restart: unless-stopped
image: docker.io/redis:7.0-alpine
container_name: firefish_redis
networks:
- firefishnet
ports:
- "26379:6379"
volumes:
- ./redis:/data
db:
restart: unless-stopped
image: docker.io/groonga/pgroonga:3.1.8-alpine-12
container_name: firefish_db
networks:
- firefishnet
environment:
- "POSTGRES_PASSWORD=password"
- "POSTGRES_USER=firefish"
- "POSTGRES_DB=firefish_db"
ports:
- "25432:5432"
volumes:
- ./db:/var/lib/postgresql/data
networks:
firefishnet:

View file

@ -0,0 +1,65 @@
#!/bin/sh
set -xeu
node --version
# Check Environment Initialized Flag
if [ ! -f '/.firefish_env_initialized' ]; then
# Install entrypoint dependencies
apt-get update
DEBIAN_FRONTEND='noninteractive' apt-get install -y --no-install-recommends wget curl ca-certificates lsb-release gnupg
# Create the PostgreSQL file repository configuration
sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
# Import the PostgreSQL repository signing key
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
# Install compilation dependencies
apt-get update
DEBIAN_FRONTEND='noninteractive' apt-get install -y --no-install-recommends build-essential python3 ffmpeg git postgresql-client-12
curl -vvv --proto '=https' --tlsv1.2 --show-error --fail https://sh.rustup.rs | sh -s -- -y
# Add Cargo PATH
PATH="/root/.cargo/bin:${PATH}"
# If Firefish not exist
if [ ! -f '/firefish/README.md' ]; then
# Clone Firefish
cd /
git clone -v https://firefish.dev/firefish/firefish.git
# Configuring a new server
cd /firefish
cp .config/devenv.yml .config/default.yml
URL="$(echo "${URL}" | sed 's#/#\\/#g')"
sed -i'.bak' "s/http:\/\/localhost:3030/${URL}/" .config/default.yml
fi
# Configure postgres, add pgroonga search
psql --user=firefish --host=firefish_db --dbname=firefish_db --command='CREATE EXTENSION IF NOT EXISTS pgroonga;'
# Configure pnpm, and install dev mode dependencies for compilation
cd /firefish
corepack enable
corepack prepare pnpm@latest --activate
pnpm install --prod false
fi
# Add Environment Initialized Flag
touch /.firefish_env_initialized
# Add Cargo PATH
PATH="/root/.cargo/bin:${PATH}"
# Start a new server
cd /firefish
pnpm install --prod false
pnpm run build:debug
pnpm run migrate
pnpm run start

View file

@ -6,7 +6,7 @@ services:
ports:
- "26379:6379"
db:
image: docker.io/groonga/pgroonga:latest-alpine-16-slim
image: docker.io/groonga/pgroonga:3.1.8-alpine-12
environment:
- "POSTGRES_PASSWORD=password"
- "POSTGRES_USER=firefish"

30
dev/docs/container.md Normal file
View file

@ -0,0 +1,30 @@
# Set up a fully-containerized development environment
## Prerequisites
- Container runtime installation
- [Docker](https://docs.docker.com/get-docker/)
- [Podman](https://podman.io/docs/installation) and [Podman Compose](https://github.com/containers/podman-compose)
- [containerd](https://github.com/containerd/containerd) and [nerdctl](https://github.com/containerd/nerdctl)
- or whatever you want to use
- The following ports are not in use
- 3030
- 25432
- 26379
## Start up the environment
1. Download the [`dev/container` directory](./dev/container) and execute `chmod +x docker-entrypoint.sh`
- Alternatively, you can use `git clone https://firefish.dev/firefish/firefish.git && cd firefish/dev/container`, although this will clone the entire repository.
1. Edit `docker-compose.yml` and set `URL` to the URL you want to use (or leave it as `http://localhost:3030`)
1. Run `docker compose up`
- This will build the environment, install dependencies and prepare the needed config files.
- If you use Podman, you should run `podman-compose up` instead.
1. Wait until the following message shows up
```log
DONE * [core boot] All workers started
DONE * [core boot] Now listening on port 3030 on https://your_firefish_url.example.com
```
1. A fresh Firefish environment is created on the URL you have set!
When you want to restart the dev server, you just need to terminate the process (a.k.a. press `Ctrl+C`) and run `docker compose up` again.

98
dev/docs/db-container.md Normal file
View file

@ -0,0 +1,98 @@
# Set up database containers and run Firefish locally
## Prerequisites
- Dependencies
- git
- Node.js
- pnpm
- Rust toolchain
- FFmpeg
- Container runtime
- [Docker](https://docs.docker.com/get-docker/)
- [Podman](https://podman.io/docs/installation) and [Podman Compose](https://github.com/containers/podman-compose)
- [containerd](https://github.com/containerd/containerd) and [nerdctl](https://github.com/containerd/nerdctl)
- or whatever you want to use
- The following ports are not in use
- 25432
- 26379
You can refer to [local-installation.md](./local-installation.md) to install the dependencies.
## Configure the environment
1. Fork the Firefish repository on GitLab
1. Clone your Firefish repository
```sh
git clone https://firefish.dev/your-user-name/firefish.git
cd firefish
```
1. Create `.config/default.yml` with the following content
```yaml
# You can change the port if 3000 is already used
url: http://localhost:3000
port: 3000
db:
host: localhost
port: 25432
db: firefish_db
user: firefish
pass: password
redis:
host: localhost
port: 26379
logLevel: [
'error',
'success',
'warning',
'info'
]
```
1. Start database containers
```sh
cd dev/db-container
docker compose up --detach
# or podman-compose up --detach
# or whatever
# go back to the repository root
cd ../..
```
## Build and start Firefish
1. Build Firefish
```sh
pnpm install
pnpm run build:debug
```
1. Execute database migrations
```sh
pnpm run migrate
```
1. Start Firefish
```sh
pnpm run start
```
You can access to the local Firefish server on http://localhost:3000 after this message shows up!
```
DONE * [core boot] All workers started
DONE * [core boot] Now listening on port 3000 on http://localhost:3000
```
## Reset the environment
You can recreate a fresh local Firefish environment by recreating the database containers:
```sh
cd dev/db-container
docker compose down
docker compose up --detach
cd ../..
pnpm run migrate
pnpm run start
```

View file

@ -0,0 +1,173 @@
# Set up a development environment by installing all dependencies locally
This document demonstrates an example procedure to set up a Firefish development environment on Debian 12. You can refer to this document if you prefer to install all dependencies (Node.js, PostgreSQL, Redis, etc.) locally.
Make sure that you can use the `sudo` command before proceeding.
## 1. Install dependencies
### Utilities
```sh
sudo apt update
sudo apt install build-essential python3 curl wget git lsb-release
```
### Node.js
Firefish requires Node.js v18.17.0 or later. While you can choose any versions between v18.17.0 and the latest version (v21.6.2 as of writing), we recommend that you install v18.x so as not to use new features inadvertently and introduce incompatibility issues.
Instructions can be found at [this repository](https://github.com/nodesource/distributions).
```sh
NODE_MAJOR=18
curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x" | sudo -E bash -
sudo apt install nodejs
# check version
node --version
```
### Rust toolchain
Instructions can be found at [this page](https://www.rust-lang.org/tools/install).
```sh
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
. "${HOME}/.cargo/env"
# check version
cargo --version
```
### PostgreSQL and PGroonga
PostgreSQL install instructions can be found at [this page](https://www.postgresql.org/download/).
```sh
sudo sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt update
sudo apt install postgresql-12
sudo systemctl enable --now postgresql
# check version
psql --version
```
PGroonga install instructions can be found at [this page](https://pgroonga.github.io/install/).
```sh
wget "https://apache.jfrog.io/artifactory/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb"
sudo apt install "./apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb"
wget "https://packages.groonga.org/debian/groonga-apt-source-latest-$(lsb_release --codename --short).deb"
sudo apt install "./groonga-apt-source-latest-$(lsb_release --codename --short).deb"
sudo apt update
sudo apt install postgresql-12-pgdg-pgroonga
rm "apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb" "groonga-apt-source-latest-$(lsb_release --codename --short).deb"
```
### Redis
Instructions can be found at [this page](https://redis.io/docs/install/install-redis/).
```sh
curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list
sudo apt update
sudo apt install redis
sudo systemctl enable --now redis-server
# check version
redis-cli --version
```
### FFmpeg
```sh
sudo apt install ffmpeg
```
## 2. Set up a database
1. Create a database user
```sh
sudo -u postgres createuser --no-createdb --no-createrole --no-superuser --encrypted --pwprompt firefish
```
If you forgot the password you typed, you can reset it by executing `sudo -u postgres psql -c "ALTER USER firefish PASSWORD 'password';"`.
2. Create a database
```sh
sudo -u postgres createdb --encoding='UTF8' --owner=firefish firefish_db
```
3. Enable PGronnga extension
```sh
sudo -u postgres psql --command='CREATE EXTENSION pgroonga;' --dbname=firefish_db
```
## 3. Configure Firefish
1. Fork the Firefish repository on GitLab
1. Clone your Firefish repository
```sh
git clone https://firefish.dev/your-user-name/firefish.git
```
1. Create the config file
```sh
cd firefish
vim .config/default.yml
```
```yaml
url: http://localhost:3000
port: 3000
db:
host: localhost
port: 5432
db: firefish_db
user: firefish
pass: password
redis:
host: localhost
port: 6379
logLevel: [
'error',
'success',
'warning',
'info'
]
```
## 4. Build and start Firefish
1. Install pnpm
```sh
sudo corepack enable
corepack prepare pnpm@latest --activate
# check version
pnpm --version
```
1. Build
```sh
pnpm install
pnpm run build:debug
```
1. Execute database migrations
```sh
pnpm run migrate
```
1. Start Firefish
```sh
pnpm run start
```
You can access to the local Firefish server on http://localhost:3000 after this message shows up!
```
DONE * [core boot] All workers started
DONE * [core boot] Now listening on port 3000 on http://localhost:3000
```

View file

@ -52,7 +52,7 @@ services:
db:
restart: unless-stopped
image: docker.io/groonga/pgroonga:latest-alpine-16-slim
image: docker.io/groonga/pgroonga:3.1.8-alpine-16-slim
container_name: firefish_db
networks:
- calcnet

View file

@ -2,7 +2,7 @@
Breaking changes are indicated by the :warning: icon.
## Unreleased
## v20240319
- :warning: `followingCount` and `followersCount` in `users/show` will be `null` (instead of 0) if these values are unavailable.
- :warning: `admin/search/index-all` is removed since posts are now indexed automatically.

View file

@ -2,7 +2,10 @@
Critical security updates are indicated by the :warning: icon.
## Unreleased
- Server administrators should check [notice-for-admins.md](./notice-for-admins.md) as well.
- Third-party client/bot developers may want to check [api-change.md](./api-change.md) as well.
## [v20240319](https://firefish.dev/firefish/firefish/-/compare/v20240301...v20240319?from_project_id=7&straight=false)
- Introduce new full-text search engine and post search filters
- Refactoring
@ -12,6 +15,7 @@ Critical security updates are indicated by the :warning: icon.
- Add a toggleable setting to show a warning when you attempt to post files without alt text
- Fix bugs
- Update documents and example config files
- Added `/authorize_interaction` page, allowing users to jump from a remote Mastodon post/user page to the corresponding page in Firefish (!10702)
## [v20240301](https://firefish.dev/firefish/firefish/-/compare/v20240229...v20240301?from_project_id=7&straight=false)

View file

@ -1,108 +0,0 @@
# Firefish Developer Docs
## Nix Dev Environment
The Firefish repo comes with a Nix-based shell environment to help make development as easy as possible!
Please note, however, that this environment will not work on Windows outside of a WSL2 environment.
### Prerequisites
- Installed the [Nix Package Manager](https://nixos.org/download.html) (use the comman on their website)
- Installed [direnv](https://direnv.net/docs/installation.html) and added its hook to your shell. (package manager)
Once the repo is cloned to your computer, follow these next few steps inside the Firefish folder:
- Run `direnv allow`. This will build the environment and install all needed tools.
- Run `install-deps`, then `prepare-config`, to install the node dependencies and prepare the needed config files.
- In a second terminal, run `devenv up`. This will spawn a **Redis** server, a **Postgres** server, and the **Firefish** server in dev mode.
- Once you see the Firefish banner printed in your second terminal, run `migrate` in the first.
- Once migrations finish, open http://localhost:3000 in your web browser.
- You should now see the admin user creation screen!
Note: When you want to restart a dev server, all you need to do is run `devenv up`, no other steps are necessary.
# Possible Troubles with the dev enviroment
(this doesn't have to be done under normal conditions, this is for future reference)
### direnv
If you have any trouble with `direnv allow`
Check that the contents of `.envrc` have the same version of nix-direnv that is specified here:
> nix-direnv under -> installation -> using direnv source url
> https://github.com/nix-community/nix-direnv#direnv-source_url
there should be no errors during `direnv allow`
### outdated nix packages
if `install-deps` or any subsequent command doesn't run due to versioning problems
`flake.nix` and `flake.lock` may be outdated
delete `flake.lock`, or better, run `nix flake update --extra-experimental-features flakes --extra-experimental-features nix-command`
after that, run `direnv rebuild`
if there are any errors, you might have to change `flake.nix`
(because the available options can change between versions - consider getting support in [the matrix channel](https://matrix.to/#/#firefish-community:nitro.chat))
### after changing a node version
in my case, i had to change the node version from 19, to 18
! before proceeding, make sure to delete all build artifacts!
remove `node_modules` and `built` folders, and maybe `.devenv` and `.direnv` as well
manually, or run `npm cache clean --force` and `pnpm cleanall`
### Windows Subsystem for Linux
if `devenv up` terminates because of wrong folder permissions,
create the file `/etc/wsl.conf` in your distro and add
```shell
[automount]
options = "metadata"
```
this allows `chmod` calls to actually have an effect.
the build scripts DO actually set the permissions, it just needs to work in wsl.
### devenv up
devenv up may take a looong time. (some say this is fake news, maybe it was bad luck in my case)
do not get spooked by this error:
```
> firefish@14.0.0-dev32 start /mnt/.../firefish
> pnpm --filter backend run start
> backend@ start /mnt/.../firefish/packages/backend
> pnpm node ./built/index.js
node:internal/modules/cjs/loader:1078
throw err;
^
Error: Cannot find module '/mnt/.../firefish/packages/backend/built/index.js'
at Module._resolveFilename (node:internal/modules/cjs/loader:1075:15)
at Module._load (node:internal/modules/cjs/loader:920:27)
at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:81:12)
at node:internal/main/run_main_module:23:47 {
code: 'MODULE_NOT_FOUND',
requireStack: []
}
Node.js v18.16.0
undefined
/mnt/.../firefish/packages/backend:
ERR_PNPM_RECURSIVE_RUN_FIRST_FAIL backend@ start: `pnpm node ./built/index.js`
Exit status 1
ELIFECYCLE Command failed with exit code 1.
```
the script is designed to constantly try to start the server, while the build is still running.
this just means that the build isn't finished yet.
at some point you should see a banner that says "Firefish" in big letters -
then you're good to go and can run `migrate` (in another terminal)!
if you don't see the banner,
and it's for some reason stuck on `Finished 'build' after 917 ms` for a view minutes,
just leave devenv running and open another terminal in the folder
run `migrate` and then `pnpm --filter backend run start` by yourself
the server should start

View file

@ -2,7 +2,6 @@ BEGIN;
DELETE FROM "migrations" WHERE name IN (
'FixMutingIndices1710690239308',
'RemoveMentionedUsersColumn1710688552234',
'NoteFile1710304584214',
'RenameMetaColumns1705944717480',
'SeparateHardMuteWordsAndPatterns1706413792769',
@ -30,19 +29,6 @@ CREATE INDEX "IDX_renote_muting_createdAt" ON "muting" ("createdAt");
CREATE INDEX "IDX_renote_muting_muteeId" ON "muting" ("muteeId");
CREATE INDEX "IDX_renote_muting_muterId" ON "muting" ("muterId");
-- remove-mentioned-users-column
ALTER TABLE "note" ADD "mentionedRemoteUsers" text NOT NULL DEFAULT '[]'::text;
CREATE TABLE "temp_mentions_1710688552234" AS
SELECT "id", "url", "uri", "username", "host"
FROM "user"
JOIN "user_profile" ON "user"."id" = "user_profile". "userId" WHERE "user"."host" IS NOT NULL;
CREATE UNIQUE INDEX "temp_mentions_id" ON "temp_mentions_1710688552234" ("id");
UPDATE "note" SET "mentionedRemoteUsers" = (
SELECT COALESCE(json_agg(row_to_json("data")::jsonb - 'id')::text, '[]') FROM "temp_mentions_1710688552234" AS "data"
WHERE "data"."id" = ANY("note"."mentions")
);
DROP TABLE "temp_mentions_1710688552234";
-- note-file
DROP TABLE "note_file";

View file

@ -1,41 +0,0 @@
diff --git a/packages/backend/migration/1661376843000-remove-mentioned-remote-users-column.js b/packages/backend/migration/1661376843000-remove-mentioned-remote-users-column.js
index 42d79b5b5..1fd5e0f10 100644
--- a/packages/backend/migration/1661376843000-remove-mentioned-remote-users-column.js
+++ b/packages/backend/migration/1661376843000-remove-mentioned-remote-users-column.js
@@ -7,6 +7,22 @@ export class removeMentionedRemoteUsersColumn1661376843000 {
async down(queryRunner) {
await queryRunner.query(`ALTER TABLE "note" ADD "mentionedRemoteUsers" TEXT NOT NULL DEFAULT '[]'::text`);
- await queryRunner.query(`UPDATE "note" SET "mentionedRemoteUsers" = (SELECT COALESCE(json_agg(row_to_json("data"))::text, '[]') FROM (SELECT "url", "uri", "username", "host" FROM "user" JOIN "user_profile" ON "user"."id" = "user_profile". "userId" WHERE "user"."host" IS NOT NULL AND "user"."id" = ANY("note"."mentions")) AS "data")`);
+ await queryRunner.query(`
+ CREATE TEMP TABLE IF NOT EXISTS "temp_mentions" AS
+ SELECT "id", "url", "uri", "username", "host"
+ FROM "user"
+ JOIN "user_profile" ON "user"."id" = "user_profile"."userId" WHERE "user"."host" IS NOT NULL
+ `);
+
+ await queryRunner.query(`
+ CREATE UNIQUE INDEX "temp_mentions_id" ON "temp_mentions"("id")
+ `);
+
+ await queryRunner.query(`
+ UPDATE "note" SET "mentionedRemoteUsers" = (
+ SELECT COALESCE(json_agg(row_to_json("data")::jsonb - 'id')::text, '[]') FROM "temp_mentions" AS "data"
+ WHERE "data"."id" = ANY("note"."mentions")
+ )
+ `);
}
}
diff --git a/packages/backend/migration/1663399074403-resize-comments-drive-file.js b/packages/backend/migration/1663399074403-resize-comments-drive-file.js
index a037f1655..0873aec9b 100644
--- a/packages/backend/migration/1663399074403-resize-comments-drive-file.js
+++ b/packages/backend/migration/1663399074403-resize-comments-drive-file.js
@@ -9,6 +9,6 @@ export class resizeCommentsDriveFile1663399074403 {
}
async down(queryRunner) {
- await queryRunner.query(`ALTER TABLE "drive_file" ALTER COLUMN "comment" TYPE character varying(512)`);
- }
+ console.log('This migration cannot be reverted, skipping...');
+ }
}

View file

@ -1,19 +1,13 @@
# Running a Firefish server with Docker
# Running a Firefish server with containers
## Pre-built docker container
[registry.firefish.dev/firefish/firefish](https://firefish.dev/firefish/firefish/container_registry)
## Prerequisites
## `docker-compose`
- Latest [Docker](https://docs.docker.com/get-docker/) installation
- You can also use [Podman](https://podman.io/docs/installation) and [Podman Compose](https://github.com/containers/podman-compose).
There are example config files that you can use to build the container from source
## Configuration
- docker-compose.example.yml (**compose file**)
- .config/docker_example.env (**db config settings**)
- .config/default.yml (**firefish server settings**)
## Configuring
Copy the files:
Copy the example config files:
```sh
cp docker-compose.example.yml docker-compose.yml
@ -23,20 +17,31 @@ cp .config/docker_example.env .config/docker.env
then edit them according to your environment.
You can configure `docker.env` with anything you like, but you will have to pay attention to the `default.yml` file:
- `url` should be set to the URL you will be hosting the web interface for the server at.
- `host`, `db`, `user`, `pass` will have to be configured in the `PostgreSQL configuration` section - `host` is the name of the postgres container (eg: *firefish_db_1*), and the others should match your `docker.env`.
- `host`will need to be configured in the *Redis configuration* section - it is the name of the redis container (eg: *firefish_redis_1*)
- `auth` will need to be configured in the *Sonic* section - cannot be the default `SecretPassword`
Everything else can be left as-is.
## Running docker-compose
## Pull the container image
The [prebuilt container for firefish](https://firefish.dev/firefish/firefish/container_registry) is fairly large, and may take a few minutes to download and extract using docker.
The image tag is [`registry.firefish.dev/firefish/firefish:latest`](https://firefish.dev/firefish/firefish/container_registry/1).
Copy `docker-compose.yml` and the `config/` to a directory, then run the **docker-compose** command:
`docker-compose up -d`.
```sh
docker pull registry.firefish.dev/firefish/firefish:latest
# or podman pull registry.firefish.dev/firefish/firefish:latest
```
## Run
```sh
docker compose up --detach
# or podman-compose up --detach
```
NOTE: This will take some time to come fully online, even after download and extracting the container images, and it may emit some error messages before completing successfully. Specifically, the `db` container needs to initialize and so isn't available to the `web` container right away. Only once the `db` container comes online does the `web` container start building and initializing the firefish tables.
Once the server is up you can use a web browser to access the web interface at `http://serverip:3000` (where `serverip` is the IP of the server you are running the firefish server on).
To publish your server, please follow the instructions in [section 5 of this installation guide](./install.md#5-preparation-for-publishing-a-server).

317
docs/install.md Normal file
View file

@ -0,0 +1,317 @@
# Install Firefish
This document shows an example procedure for installing Firefish on Debian 12. Note that there is much room for customizing the server setup; this document merely demonstrates a simple installation.
If you want to use the pre-built container image, please refer to [`install-container.md`](./install-container.md).
Make sure that you can use the `sudo` command before proceeding.
## 1. Install dependencies
### Utilities
```sh
sudo apt update
sudo apt install build-essential python3 curl wget git lsb-release
```
### Node.js and pnpm
Instructions can be found at [this repository](https://github.com/nodesource/distributions).
```sh
NODE_MAJOR=20
curl -fsSL "https://deb.nodesource.com/setup_${NODE_MAJOR}.x" | sudo -E bash -
sudo apt install nodejs
# check version
node --version
```
You also need to enable `pnpm`.
```sh
sudo corepack enable
corepack prepare pnpm@latest --activate
# check version
pnpm --version
```
### PostgreSQL and PGroonga
PostgreSQL install instructions can be found at [this page](https://www.postgresql.org/download/).
```sh
sudo sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt update
sudo apt install postgresql-16
sudo systemctl enable --now postgresql
# check version
psql --version
```
PGroonga install instructions can be found at [this page](https://pgroonga.github.io/install/).
```sh
wget "https://apache.jfrog.io/artifactory/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb"
sudo apt install "./apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb"
wget "https://packages.groonga.org/debian/groonga-apt-source-latest-$(lsb_release --codename --short).deb"
sudo apt install "./groonga-apt-source-latest-$(lsb_release --codename --short).deb"
sudo apt update
sudo apt install postgresql-16-pgdg-pgroonga
rm "apache-arrow-apt-source-latest-$(lsb_release --codename --short).deb" "groonga-apt-source-latest-$(lsb_release --codename --short).deb"
```
### Redis
Instructions can be found at [this page](https://redis.io/docs/install/install-redis/).
```sh
curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list
sudo apt update
sudo apt install redis
sudo systemctl enable --now redis-server
# check version
redis-cli --version
```
### FFmpeg
```sh
sudo apt install ffmpeg
```
## 2. Set up a database
1. Create a database user
```sh
sudo -u postgres createuser --no-createdb --no-createrole --no-superuser --encrypted --pwprompt firefish
```
If you forgot the password you typed, you can reset it by executing `sudo -u postgres psql -c "ALTER USER firefish PASSWORD 'password';"`.
2. Create a database
```sh
sudo -u postgres createdb --encoding='UTF8' --owner=firefish firefish_db
```
3. Enable PGronnga extension
```sh
sudo -u postgres psql --command='CREATE EXTENSION pgroonga;' --dbname=firefish_db
```
## 3. Configure Firefish
1. Create an user for Firefish and switch user
```sh
sudo useradd --create-home --user-group --shell /bin/bash firefish
sudo su --login firefish
# check the current working directory
# the result should be /home/firefish
pwd
```
1. Install Rust toolchain
Instructions can be found at [this page](https://www.rust-lang.org/tools/install).
```sh
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
. "${HOME}/.cargo/env"
# check version
cargo --version
```
3. Clone the Firefish repository
```sh
git clone --branch=main https://firefish.dev/firefish/firefish.git
```
1. Copy and edit the config file
```sh
cd firefish
cp .config/example.yml .config/default.yml
nano .config/default.yml
```
```yaml
url: https://your-server-domain.example.com # change here
port: 3000
db:
host: localhost
port: 5432
db: firefish_db
user: firefish
pass: your-database-password # and here
```
## 4. Build Firefish
1. Build
```sh
pnpm install --frozen-lockfile
NODE_ENV=production pnpm run build
```
1. Execute database migrations
```sh
pnpm run migrate
```
1. Logout from `firefish` user
```sh
exit
```
## 5. Preparation for publishing a server
### 1. Set up a firewall
To expose your server securely, you may want to set up a firewall. We use [ufw](https://launchpad.net/ufw) in this instruction.
```sh
sudo apt install ufw
# if you use SSH
# SSH_PORT=22
# sudo ufw limit "${SSH_PORT}/tcp"
sudo ufw default deny
sudo ufw allow 80
sudo ufw allow 443
sudo ufw --force enable
# check status
sudo ufw status
```
### 2. Set up a reverse proxy
In this instruction, we use [Caddy](https://caddyserver.com/) to make the Firefish server accesible from internet. However, you can also use [Nginx](https://nginx.org/en/) if you want ([example Nginx config file](../firefish.nginx.conf)).
1. Install Caddy
```sh
sudo apt install debian-keyring debian-archive-keyring apt-transport-https
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list
sudo apt update
sudo apt install caddy
# check version
caddy version
```
1. Replace the config file
```sh
sudo mv /etc/caddy/Caddyfile /etc/caddy/Caddyfile.bak
sudo nano /etc/caddy/Caddyfile
```
```Caddyfile
your-server-domain.example.com {
reverse_proxy http://127.0.0.1:3000
log {
output file /var/log/caddy/firefish.log
}
}
```
1. Restart Caddy
```sh
sudo systemctl restart caddy
```
## 6. Publish your Firefish server
1. Create a service file
```sh
sudo nano /etc/systemd/system/firefish.service
```
```service
[Unit]
Description=Firefish daemon
Requires=redis.service caddy.service postgresql.service
After=redis.service caddy.service postgresql.service network-online.target
[Service]
Type=simple
User=firefish
Group=firefish
UMask=0027
ExecStart=/usr/bin/pnpm run start
WorkingDirectory=/home/firefish/firefish
Environment="NODE_ENV=production"
Environment="npm_config_cache=/tmp"
# uncomment the following line if you use jemalloc (note that the path varies on different environments)
# Environment="LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2"
StandardOutput=journal
StandardError=journal
SyslogIdentifier=firefish
TimeoutSec=60
Restart=always
CapabilityBoundingSet=
DevicePolicy=closed
NoNewPrivileges=true
LockPersonality=true
PrivateDevices=true
PrivateIPC=true
PrivateMounts=true
PrivateUsers=true
ProtectClock=true
ProtectControlGroups=true
ProtectHostname=true
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectKernelLogs=true
ProtectProc=invisible
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
SecureBits=noroot-locked
SystemCallArchitectures=native
SystemCallFilter=~@chown @clock @cpu-emulation @debug @ipc @keyring @memlock @module @mount @obsolete @privileged @raw-io @reboot @resources @setuid @swap
SystemCallFilter=capset pipe pipe2 setpriority
[Install]
WantedBy=multi-user.target
```
1. Start Firefish
```sh
sudo systemctl enable --now firefish
```
## Upgrading
Please refer to the [upgrade instruction](./upgrade.md). Be sure to switch to `firefish` user and go to the Firefish directory before executing the `git` command:
```sh
sudo su --login firefish
cd ~/firefish
```
## Customize
- To add custom CSS for all users, edit `./custom/assets/instance.css`.
- To add static assets (such as images for the splash screen), place them in the `./custom/assets/` directory. They'll then be available on `https://yourserver.tld/static-assets/filename.ext`.
- To add custom locales, place them in the `./custom/locales/` directory. If you name your custom locale the same as an existing locale, it will overwrite it. If you give it a unique name, it will be added to the list. Also make sure that the first part of the filename matches the locale you're basing it on. (Example: `en-FOO.yml`)
- To add custom error images, place them in the `./custom/assets/badges` directory, replacing the files already there.
- To add custom sounds, place only mp3 files in the `./custom/assets/sounds` directory.
- To update custom assets without rebuilding, just run `pnpm run gulp`.
- To block ChatGPT, CommonCrawl, or other crawlers from indexing your instance, uncomment the respective rules in `./custom/robots.txt`.
## Tips & Tricks
- When editing the config file, please don't fill out the settings at the bottom. They're designed *only* for managed hosting, not self hosting. Those settings are much better off being set in Firefish's control panel.
- Port 3000 (used in the default config) might be already used on your server for something else. To find an open port for Firefish, run `for p in {3000..4000}; do ss -tlnH | tr -s ' ' | cut -d" " -sf4 | grep -q "${p}$" || echo "${p}"; done | head -n 1`. Replace 3000 with the minimum port and 4000 with the maximum port if you need it.
- We'd recommend you use a S3 Bucket/CDN for Object Storage, especially if you use containers.
- When using object storage, setting a proper `Access-Control-Allow-Origin` response header is highly recommended.
- We'd recommend against using CloudFlare, but if you do, make sure to turn code minification off.
- For push notifications, run `npx web-push generate-vapid-keys`, then put the public and private keys into Control Panel > General > ServiceWorker.
- For translations, make a [DeepL](https://deepl.com) account and generate an API key, then put it into Control Panel > General > DeepL Translation.
- To add another admin account:
- Go to the user's page > 3 Dots > About > Moderation > turn on "Moderator"
- Go back to Overview > click the clipboard icon next to the ID
- Run `psql -d firefish` (or whatever the database name is)
- Run `UPDATE "user" SET "isAdmin" = true WHERE id='999999';` (replace `999999` with the copied ID)
- Restart your Firefish server

View file

@ -1,45 +0,0 @@
# Running a Firefish server with Kubernetes and Helm
This is a [Helm](https://helm.sh/) chart directory in the root of the project
that you can use to deploy firefish to a Kubernetes cluster
## Deployment
1. Copy the example helm values and make your changes:
```shell
cp .config/helm_values_example.yml .config/helm_values.yml
```
2. Update helm dependencies:
```shell
cd chart
helm dependency list $dir 2> /dev/null | tail +2 | head -n -1 | awk '{ print "helm repo add " $1 " " $3 }' | while read cmd; do $cmd; done;
cd ../
```
3. Create the firefish helm release (also used to update existing deployment):
```shell
helm upgrade \
--install \
--namespace firefish \
--create-namespace \
firefish chart/ \
-f .config/helm_values.yml
```
4. Watch your firefish server spin up:
```shell
kubectl -n firefish get po -w
```
5. Initial the admin user and managed config:
```shell
export firefish_USERNAME="my_desired_admin_handle" && \
export firefish_PASSWORD="myDesiredInitialPassword" && \
export firefish_HOST="firefish.example.com" && \
export firefish_TOKEN=$(curl -X POST https://$firefish_HOST/api/admin/accounts/create -H "Content-Type: application/json" -d "{ \"username\":\"$firefish_USERNAME\", \"password\":\"$firefish_PASSWORD\" }" | jq -r '.token') && \
echo "Save this token: ${firefish_TOKEN}" && \
curl -X POST -H "Authorization: Bearer $firefish_TOKEN" https://$firefish_HOST/api/admin/accounts/hosted
```
6. Enjoy!

View file

@ -1,106 +0,0 @@
# Migrating from Misskey/FoundKey to Firefish
All the guides below assume you're starting in the root of the repo directory.
### Before proceeding
- **Ensure you have stopped all master and worker processes of Misskey.**
- **Ensure you have backups of the database before performing any commands.**
## Misskey v13 and above
Tested with Misskey v13.11.3.
If your Misskey v13 is older, we recommend updating your Misskey to v13.11.3.
```sh
wget -O mkv13.patch https://firefish.dev/firefish/firefish/-/raw/develop/docs/mkv13.patch
wget -O mkv13_restore.patch https://firefish.dev/firefish/firefish/-/raw/develop/docs/mkv13_restore.patch
git apply mkv13.patch mkv13_restore.patch
cd packages/backend
LINE_NUM="$(pnpm typeorm migration:show -d ormconfig.js | grep -n activeEmailValidation1657346559800 | cut -d ':' -f 1)"
NUM_MIGRATIONS="$(pnpm typeorm migration:show -d ormconfig.js | tail -n+"$LINE_NUM" | grep '\[X\]' | wc -l)"
for i in $(seq 1 $NUM_MIGRATIONS); do pnpm typeorm migration:revert -d ormconfig.js; done
cd ../../
git remote set-url origin https://firefish.dev/firefish/firefish.git
git fetch origin
git stash push
rm -rf fluent-emojis misskey-assets
git switch main # or beta or develop
git pull --ff
wget -O renote_muting.patch https://firefish.dev/firefish/firefish/-/raw/develop/docs/renote_muting.patch
git apply renote_muting.patch
pnpm install
NODE_ENV=production pnpm run build
pnpm run migrate
git stash push
```
Depending on the version you're migrating from, you may have to open Postgres with `psql -d your_database` and run the following commands:
```sql
ALTER TABLE "meta" ADD COLUMN "disableLocalTimeline" boolean DEFAULT false;
ALTER TABLE "meta" ADD COLUMN "disableGlobalTimeline" boolean DEFAULT false;
ALTER TABLE "meta" ADD COLUMN "localDriveCapacityMb" integer DEFAULT 512;
ALTER TABLE "meta" ADD COLUMN "remoteDriveCapacityMb" integer DEFAULT 128;
ALTER TABLE "user" ADD COLUMN "isSilenced" boolean DEFAULT false;
ALTER TABLE "user" ADD COLUMN "isAdmin" boolean DEFAULT false;
ALTER TABLE "user" ADD COLUMN "isModerator" boolean DEFAULT false;
ALTER TABLE "user" ADD COLUMN "remoteDriveCapacityMb" integer DEFAULT 128;
ALTER TABLE "user" ADD COLUMN "driveCapacityOverrideMb" integer DEFAULT 128;
ALTER TABLE "instance" ADD COLUMN "caughtAt" date;
ALTER TABLE "instance" ADD COLUMN "latestRequestSentAt" date;
ALTER TABLE "instance" ADD COLUMN "latestStatus" character varying(512);
ALTER TABLE "instance" ADD COLUMN "lastCommunicatedAt" date;
```
then quit with `\q`, and restart Firefish.
Note: Ignore errors of `column "xxx" of relation "xxx" already exists`.
If no other errors happened, your Firefish is ready to launch!
## Misskey v12.119 and before
```sh
git remote set-url origin https://firefish.dev/firefish/firefish.git
git fetch
git checkout main # or beta or develop
git pull --ff
NODE_ENV=production pnpm run migrate
# build using prefered method
```
## FoundKey
```sh
wget -O fk.patch https://firefish.dev/firefish/firefish/-/raw/develop/docs/fk.patch
git apply fk.patch
cd packages/backend
LINE_NUM="$(npx typeorm migration:show -d ormconfig.js | grep -n uniformThemecolor1652859567549 | cut -d ':' -f 1)"
NUM_MIGRATIONS="$(npx typeorm migration:show -d ormconfig.js | tail -n+"$LINE_NUM" | grep '\[X\]' | wc -l)"
for i in $(seq 1 $NUM_MIGRATIONS); do
npx typeorm migration:revert -d ormconfig.js
done
git remote set-url origin https://firefish.dev/firefish/firefish.git
git fetch
git checkout main # or beta or develop
git pull --ff
NODE_ENV=production pnpm run migrate
# build using prefered method
```
## Reverse
You ***cannot*** migrate back to Misskey from Firefish due to re-hashing passwords on signin with argon2. You can migrate to [Sharkey](https://github.com/transfem-org/Sharkey), a soft fork of Misskey that uses argon2 though. You can also migrate from Firefish to FoundKey, although this is not recommended due to FoundKey being end-of-life, and may have some problems with alt-text.

View file

@ -1,45 +0,0 @@
diff --git a/packages/backend/migration/1672704017999-remove-lastCommunicatedAt.js b/packages/backend/migration/1672704017999-remove-lastCommunicatedAt.js
index 38a676985..c4ae690e0 100644
--- a/packages/backend/migration/1672704017999-remove-lastCommunicatedAt.js
+++ b/packages/backend/migration/1672704017999-remove-lastCommunicatedAt.js
@@ -6,6 +6,8 @@ export class removeLastCommunicatedAt1672704017999 {
}
async down(queryRunner) {
- await queryRunner.query(`ALTER TABLE "instance" ADD "lastCommunicatedAt" TIMESTAMP WITH TIME ZONE NOT NULL`);
+ await queryRunner.query(`ALTER TABLE "instance" ADD "lastCommunicatedAt" TIMESTAMP WITH TIME ZONE`);
+ await queryRunner.query(`UPDATE "instance" SET "lastCommunicatedAt" = COALESCE("infoUpdatedAt", "caughtAt")`);
+ await queryRunner.query(`ALTER TABLE "instance" ALTER COLUMN "lastCommunicatedAt" SET NOT NULL`);
}
}
diff --git a/packages/backend/migration/1673336077243-PollChoiceLength.js b/packages/backend/migration/1673336077243-PollChoiceLength.js
index 810c626e0..5809528cb 100644
--- a/packages/backend/migration/1673336077243-PollChoiceLength.js
+++ b/packages/backend/migration/1673336077243-PollChoiceLength.js
@@ -6,6 +6,6 @@ export class PollChoiceLength1673336077243 {
}
async down(queryRunner) {
- await queryRunner.query(`ALTER TABLE "poll" ALTER COLUMN "choices" TYPE character varying(128) array`);
+ //await queryRunner.query(`ALTER TABLE "poll" ALTER COLUMN "choices" TYPE character varying(128) array`);
}
}
diff --git a/packages/backend/migration/1674118260469-achievement.js b/packages/backend/migration/1674118260469-achievement.js
index 131ab96f8..57a922f83 100644
--- a/packages/backend/migration/1674118260469-achievement.js
+++ b/packages/backend/migration/1674118260469-achievement.js
@@ -18,12 +18,13 @@ export class achievement1674118260469 {
async down(queryRunner) {
await queryRunner.query(`CREATE TYPE "public"."user_profile_mutingnotificationtypes_enum_old" AS ENUM('follow', 'mention', 'reply', 'renote', 'quote', 'reaction', 'pollVote', 'receiveFollowRequest', 'followRequestAccepted', 'groupInvited', 'app', 'pollEnded')`);
+ await queryRunner.query(`CREATE TYPE "public"."notification_type_enum_old" AS ENUM('follow', 'mention', 'reply', 'renote', 'quote', 'reaction', 'pollVote', 'pollEnded', 'receiveFollowRequest', 'followRequestAccepted', 'groupInvited', 'app')`);
await queryRunner.query(`ALTER TABLE "user_profile" ALTER COLUMN "mutingNotificationTypes" DROP DEFAULT`);
await queryRunner.query(`ALTER TABLE "user_profile" ALTER COLUMN "mutingNotificationTypes" TYPE "public"."user_profile_mutingnotificationtypes_enum_old"[] USING "mutingNotificationTypes"::"text"::"public"."user_profile_mutingnotificationtypes_enum_old"[]`);
await queryRunner.query(`ALTER TABLE "user_profile" ALTER COLUMN "mutingNotificationTypes" SET DEFAULT '{}'`);
await queryRunner.query(`DROP TYPE "public"."user_profile_mutingnotificationtypes_enum"`);
await queryRunner.query(`ALTER TYPE "public"."user_profile_mutingnotificationtypes_enum_old" RENAME TO "user_profile_mutingnotificationtypes_enum"`);
- await queryRunner.query(`CREATE TYPE "public"."notification_type_enum_old" AS ENUM('follow', 'mention', 'reply', 'renote', 'quote', 'reaction', 'pollVote', 'pollEnded', 'receiveFollowRequest', 'followRequestAccepted', 'groupInvited', 'app')`);
+ await queryRunner.query(`DELETE FROM "public"."notification" WHERE "type" = 'achievementEarned'`);
await queryRunner.query(`ALTER TABLE "notification" ALTER COLUMN "type" TYPE "public"."notification_type_enum_old" USING "type"::"text"::"public"."notification_type_enum_old"`);
await queryRunner.query(`DROP TYPE "public"."notification_type_enum"`);
await queryRunner.query(`ALTER TYPE "public"."notification_type_enum_old" RENAME TO "notification_type_enum"`);

View file

@ -1,127 +0,0 @@
diff --git a/packages/backend/migration/1680491187535-cleanup.js b/packages/backend/migration/1680491187535-cleanup.js
index 1e609ca06..0e6accf3e 100644
--- a/packages/backend/migration/1680491187535-cleanup.js
+++ b/packages/backend/migration/1680491187535-cleanup.js
@@ -1,10 +1,40 @@
export class cleanup1680491187535 {
- name = 'cleanup1680491187535'
+ name = "cleanup1680491187535";
- async up(queryRunner) {
- await queryRunner.query(`DROP TABLE "antenna_note" `);
- }
+ async up(queryRunner) {
+ await queryRunner.query(`DROP TABLE "antenna_note" `);
+ }
- async down(queryRunner) {
- }
+ async down(queryRunner) {
+ await queryRunner.query(
+ `CREATE TABLE antenna_note ( id character varying(32) NOT NULL, "noteId" character varying(32) NOT NULL, "antennaId" character varying(32) NOT NULL, read boolean DEFAULT false NOT NULL)`,
+ );
+ await queryRunner.query(
+ `COMMENT ON COLUMN antenna_note."noteId" IS 'The note ID.'`,
+ );
+ await queryRunner.query(
+ `COMMENT ON COLUMN antenna_note."antennaId" IS 'The antenna ID.'`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY antenna_note ADD CONSTRAINT "PK_fb28d94d0989a3872df19fd6ef8" PRIMARY KEY (id)`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_0d775946662d2575dfd2068a5f" ON antenna_note USING btree ("antennaId")`,
+ );
+ await queryRunner.query(
+ `CREATE UNIQUE INDEX "IDX_335a0bf3f904406f9ef3dd51c2" ON antenna_note USING btree ("noteId", "antennaId")`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_9937ea48d7ae97ffb4f3f063a4" ON antenna_note USING btree (read)`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_bd0397be22147e17210940e125" ON antenna_note USING btree ("noteId")`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY antenna_note ADD CONSTRAINT "FK_0d775946662d2575dfd2068a5f5" FOREIGN KEY ("antennaId") REFERENCES antenna(id) ON DELETE CASCADE`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY antenna_note ADD CONSTRAINT "FK_bd0397be22147e17210940e125b" FOREIGN KEY ("noteId") REFERENCES note(id) ON DELETE CASCADE`,
+ );
+ }
}
diff --git a/packages/backend/migration/1680582195041-cleanup.js b/packages/backend/migration/1680582195041-cleanup.js
index c587e456a..a91d6ff3c 100644
--- a/packages/backend/migration/1680582195041-cleanup.js
+++ b/packages/backend/migration/1680582195041-cleanup.js
@@ -1,11 +1,64 @@
export class cleanup1680582195041 {
- name = 'cleanup1680582195041'
+ name = "cleanup1680582195041";
- async up(queryRunner) {
- await queryRunner.query(`DROP TABLE "notification" `);
- }
+ async up(queryRunner) {
+ await queryRunner.query(`DROP TABLE "notification"`);
+ }
- async down(queryRunner) {
-
- }
+ async down(queryRunner) {
+ await queryRunner.query(
+ `CREATE TABLE notification ( id character varying(32) NOT NULL, "createdAt" timestamp with time zone NOT NULL, "notifieeId" character varying(32) NOT NULL, "notifierId" character varying(32), "isRead" boolean DEFAULT false NOT NULL, "noteId" character varying(32), reaction character varying(128), choice integer, "followRequestId" character varying(32), type notification_type_enum NOT NULL, "customBody" character varying(2048), "customHeader" character varying(256), "customIcon" character varying(1024), "appAccessTokenId" character varying(32), achievement character varying(128))`,
+ );
+ await queryRunner.query(
+ `COMMENT ON COLUMN notification."createdAt" IS 'The created date of the Notification.'`,
+ );
+ await queryRunner.query(
+ `COMMENT ON COLUMN notification."notifieeId" IS 'The ID of recipient user of the Notification.'`,
+ );
+ await queryRunner.query(
+ `COMMENT ON COLUMN notification."notifierId" IS 'The ID of sender user of the Notification.'`,
+ );
+ await queryRunner.query(
+ `COMMENT ON COLUMN notification."isRead" IS 'Whether the Notification is read.'`,
+ );
+ await queryRunner.query(
+ `COMMENT ON COLUMN notification.type IS 'The type of the Notification.'`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY notification ADD CONSTRAINT "PK_705b6c7cdf9b2c2ff7ac7872cb7" PRIMARY KEY (id)`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_080ab397c379af09b9d2169e5b" ON notification USING btree ("isRead")`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_33f33cc8ef29d805a97ff4628b" ON notification USING btree (type)`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_3b4e96eec8d36a8bbb9d02aa71" ON notification USING btree ("notifierId")`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_3c601b70a1066d2c8b517094cb" ON notification USING btree ("notifieeId")`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_b11a5e627c41d4dc3170f1d370" ON notification USING btree ("createdAt")`,
+ );
+ await queryRunner.query(
+ `CREATE INDEX "IDX_e22bf6bda77b6adc1fd9e75c8c" ON notification USING btree ("appAccessTokenId")`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY notification ADD CONSTRAINT "FK_3b4e96eec8d36a8bbb9d02aa710" FOREIGN KEY ("notifierId") REFERENCES "user"(id) ON DELETE CASCADE`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY notification ADD CONSTRAINT "FK_3c601b70a1066d2c8b517094cb9" FOREIGN KEY ("notifieeId") REFERENCES "user"(id) ON DELETE CASCADE`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY notification ADD CONSTRAINT "FK_769cb6b73a1efe22ddf733ac453" FOREIGN KEY ("noteId") REFERENCES note(id) ON DELETE CASCADE`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY notification ADD CONSTRAINT "FK_bd7fab507621e635b32cd31892c" FOREIGN KEY ("followRequestId") REFERENCES follow_request(id) ON DELETE CASCADE`,
+ );
+ await queryRunner.query(
+ `ALTER TABLE ONLY notification ADD CONSTRAINT "FK_e22bf6bda77b6adc1fd9e75c8c9" FOREIGN KEY ("appAccessTokenId") REFERENCES access_token(id) ON DELETE CASCADE`,
+ );
+ }
}

View file

@ -1,13 +1,17 @@
# Unreleased
# Notice for server administrators
You can skip intermediate versions when upgrading from an old version, but please read the notices and follow the instructions for each intermediate version before [upgrading](./upgrade.md).
## v20240319
The full-text search engine used in Firefish has been changed to [PGroonga](https://pgroonga.github.io/). This is no longer an optional feature, so please enable PGroonga on your system. If you are using Sonic, Meilisearch, or Elasticsearch, you can also uninstall it from your system and remove the settings from `.config/default.yml`.
## For systemd/pm2 users
### For systemd/pm2 users
- Required Node.js version has been bumped from v18.16.0 to v18.17.0.
- You need to install PGroonga on your system. Please follow the instructions below.
### 1. Install PGroonga
#### 1. Install PGroonga
Please execute `psql --version` to check your PostgreSQL major version. This will print a message like this:
@ -17,9 +21,9 @@ psql (PostgreSQL) 16.1
In this case, your PostgreSQL major version is `16`.
There are official installation instructions for many operating systems on <https://pgroonga.github.io/install>, so please follow the instructions on this page. However, since many users are using Ubuntu, and there are no instructions for Arch Linux, we explicitly list the instructions for Ubuntu and Arch Linux here. Please keep in mind that this is not official information and the procedures may change.
There are official installation instructions for many operating systems on <https://pgroonga.github.io/install>, so please follow the instructions on this page. However, since many users are using Ubuntu, and there are no instructions for Arch Linux and Fedora, we explicitly list the instructions for Ubuntu, Arch Linux and Fedora here. Please keep in mind that this is not official information and the procedures may change.
#### Ubuntu
##### Ubuntu
1. Add apt repository
```sh
@ -39,7 +43,7 @@ There are official installation instructions for many operating systems on <http
sudo apt install postgresql-16-pgdg-pgroonga
```
#### Arch Linux
##### Arch Linux
You can install PGroonga from the Arch User Repository.
@ -49,7 +53,40 @@ git clone https://aur.archlinux.org/pgroonga.git && cd pgroonga && makepkg -si
# or yay -S pgroonga
```
### 2. Enable PGroonga
##### Fedora
You need to build PGroonga from source and create a policy package.
```sh
sudo dnf install make groonga-devel postgresql-server-devel redhat-rpm-config
wget https://packages.groonga.org/source/pgroonga/pgroonga-3.1.8.tar.gz
tar xvf pgroonga-3.1.8.tar.gz
cd pgroonga-3.1.8
make
sudo make install
```
```sh
cat > pgroonga.te << EOF
module pgroonga 1.0;
require {
type postgresql_t;
type postgresql_db_t;
class file map;
}
allow postgresql_t postgresql_db_t:file map;
EOF
```
```sh
checkmodule -M -m -o pgroonga.mod pgroonga.te
semodule_package -o pgroonga.pp -m pgroonga.mod
sudo semodule -i pgroonga.pp
```
#### 2. Enable PGroonga
After the instllation, please execute this command to enable PGroonga:
@ -66,18 +103,18 @@ db:
pass: password
```
## For Docker/Podman users
### For Docker/Podman users
Please edit your `docker-compose.yml` to replace the database container image from `docker.io/postgres` to `docker.io/groonga/pgroonga`.
Please make sure to use the same PostgreSQL version. If you are using `docker.io/postgres:16-alpine` (PostgreSQL v16), the corresponding image tag is `docker.io/groonga/pgroonga:latest-alpine-16` (or `docker.io/groonga/pgroonga:latest-alpine-16-slim`).
The list of tags can be found on <https://hub.docker.com/r/groonga/pgroonga/tags>. Tags are named as `{PGroonga version}-{alpine or debian}-{PostgreSQL major version}`.
The list of tags can be found on <https://hub.docker.com/r/groonga/pgroonga/tags>.
Please make sure to use the same PostgreSQL version. If you are using `docker.io/postgres:16-alpine` (PostgreSQL v16), the corresponding image is `docker.io/groonga/pgroonga:3.1.8-alpine-16` (or `docker.io/groonga/pgroonga:3.1.8-alpine-16-slim`). There are also tags called `latest-alpine-16` and `latest-alpine-16-slim`, but please be careful if you use these tags since [PGroonga may introduce breaking changes](https://pgroonga.github.io/upgrade/), similar to PostgreSQL.
```yaml
db:
restart: unless-stopped
image: docker.io/groonga/pgroonga:latest-alpine-16-slim # change here
image: docker.io/groonga/pgroonga:3.1.8-alpine-16-slim # change here
container_name: firefish_db
```
@ -95,15 +132,15 @@ docker pull registry.firefish.dev/firefish/firefish && docker-compose up --detac
# or podman pull registry.firefish.dev/firefish/firefish && podman-compose up --detach
```
# v20240301
## v20240301
## For all users
### For all users
A new setting item has been added to control the log levels, so please consider updating your `.config/default.yml`. ([example settings](https://firefish.dev/firefish/firefish/-/blob/e7689fb302a0eed192b9515162258a39800f838a/.config/example.yml#L170-179))
# v20240225
## v20240225
## For Docker/Podman users
### For Docker/Podman users
- The bug where `custom` directory was not working has (finally) been fixed. Please add the `custom` directory to `volumes` in your `docker-compose.yml`:
```yaml
@ -118,9 +155,9 @@ A new setting item has been added to control the log levels, so please consider
- ./.config:/firefish/.config:ro
```
# v20240222
## v20240222
## For Docker/Podman users
### For Docker/Podman users
- You only need to pull the new container image (`docker/podman pull`) to upgrade your server, so we assume that many of you don't update the code (`git pull --ff`), but it's still worth noting here that we have renamed `docker-compose.yml` to `docker-compose.example.yml` in the repository, and `docker-compose.yml` is now set to be untracked by git.
- Since `docker-compose.yml` may be edited by users (e.g., change port number, add reverse proxy), it shouldn't have been tracked by git in the first place.
@ -146,9 +183,9 @@ A new setting item has been added to control the log levels, so please consider
- Also, PostgreSQL v12.2 (`docker.io/postgres:12.2-alpine`) has been used in this compose file, but we highly recommend that you upgrade it to a newer version (e.g., `docker.io/postgres:16-alpine`).
- Note: some manual (painful) operations are needed to upgrade the PostgreSQL major version, so please be careful when performing upgrades: <https://github.com/docker-library/postgres/issues/37>
# v20240214
## v20240214
## For systemd/pm2 users
### For systemd/pm2 users
- Required Rust version has been bumped from v1.70 to v1.74.
```sh
@ -156,9 +193,9 @@ A new setting item has been added to control the log levels, so please consider
rustup update # update version
```
# v20240213
## v20240213
## For systemd/pm2 users
### For systemd/pm2 users
- `packages/backend/native-utils` can be removed.
- This directory was removed in the repository, but it's not completely removed from your system by `git pull --ff`, because some folders like `packages/backend/native-utils/built` are not tracked by git.
@ -167,16 +204,16 @@ A new setting item has been added to control the log levels, so please consider
rm --recursive --force packages/backend/native-utils
```
# v20240206
## v20240206
## For all users
### For all users
- The git repository has been moved, so please update the `git remote` url.
```sh
git remote set-url origin https://firefish.dev/firefish/firefish.git
```
## For systemd/pm2 users
### For systemd/pm2 users
- Required Rust version has been bumped from v1.68 to v1.70.
- `libvips` is no longer required (unless your server os is *BSD), so you may uninstall it from your system. Make sure to execute the following commands after that:
@ -185,6 +222,6 @@ A new setting item has been added to control the log levels, so please consider
pnpm install
```
## For Docker/Podman users
### For Docker/Podman users
- The image tag has been changed to `registry.firefish.dev/firefish/firefish:latest`, so please update `docker-compose.yml`.

View file

@ -1,23 +0,0 @@
diff --git a/packages/backend/migration/1665091090561-add-renote-muting.js b/packages/backend/migration/1665091090561-add-renote-muting.js
index 2c76aaff5..f8541c818 100644
--- a/packages/backend/migration/1665091090561-add-renote-muting.js
+++ b/packages/backend/migration/1665091090561-add-renote-muting.js
@@ -4,18 +4,6 @@ export class addRenoteMuting1665091090561 {
}
async up(queryRunner) {
- await queryRunner.query(
- `CREATE TABLE "renote_muting" ("id" character varying(32) NOT NULL, "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL, "muteeId" character varying(32) NOT NULL, "muterId" character varying(32) NOT NULL, CONSTRAINT "PK_renoteMuting_id" PRIMARY KEY ("id"))`,
- );
- await queryRunner.query(
- `CREATE INDEX "IDX_renote_muting_createdAt" ON "muting" ("createdAt") `,
- );
- await queryRunner.query(
- `CREATE INDEX "IDX_renote_muting_muteeId" ON "muting" ("muteeId") `,
- );
- await queryRunner.query(
- `CREATE INDEX "IDX_renote_muting_muterId" ON "muting" ("muterId") `,
- );
}
async down(queryRunner) {}

41
docs/upgrade.md Normal file
View file

@ -0,0 +1,41 @@
# Upgrade instruction
## For systemd/pm2 users
1. Check [`docs/notice-for-admins.md`](./notice-for-admins.md)
1. Stop the server
```sh
sudo systemctl stop your-firefish-service.service
# or pm2 stop firefish
```
1. Pull the latest source code
```sh
git checkout -- packages/backend/assets
git pull --ff origin main
```
1. Build Firefish and apply changes to the database
```sh
corepack prepare pnpm@latest --activate
pnpm install --frozen-lockfile
NODE_ENV='production' NODE_OPTIONS='--max_old_space_size=3072' pnpm run rebuild
pnpm run migrate
```
1. Start the server
```sh
sudo systemctl start your-firefish-service.service
# or pm2 start firefish
```
## For Docker/Podman users
1. Check [`docs/notice-for-admins.md`](./notice-for-admins.md)
1. Pull the latest container image
```sh
docker pull registry.firefish.dev/firefish/firefish:latest
# or podman pull registry.firefish.dev/firefish/firefish:latest
```
1. Start the container
```sh
docker compose up --detach
# or podman-compose up --detach
```

View file

@ -1,13 +0,0 @@
# Replace example.tld with your domain
<VirtualHost *:80>
ServerName example.tld
# For WebSocket
ProxyPass "/streaming" "ws://127.0.0.1:3000/streaming/"
# Proxy to Node
ProxyPass "/" "http://127.0.0.1:3000/"
ProxyPassReverse "/" "http://127.0.0.1:3000/"
ProxyPreserveHost On
# For files proxy
AllowEncodedSlashes On
</VirtualHost>

View file

@ -1,294 +0,0 @@
{
"nodes": {
"devenv": {
"inputs": {
"flake-compat": "flake-compat",
"nix": "nix",
"nixpkgs": "nixpkgs",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1685521914,
"narHash": "sha256-0fdFP5IASLwJ0PSXrErW8PZon9TVYmi8VRF8OtjGkV4=",
"owner": "cachix",
"repo": "devenv",
"rev": "e206d8f2e3e8d6aa943656052f15bdfea8146b8d",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1685514167,
"narHash": "sha256-urRxF0ZGSNeZjM4kALNg3wTh7fBscbqQmS6S/HU7Wms=",
"owner": "nix-community",
"repo": "fenix",
"rev": "3abfea51663583186f687c49a157eab1639349ca",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1685457039,
"narHash": "sha256-bEFtQm+YyLxQjKQAaBHJyPN1z2wbhBnr2g1NJWSYjwM=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "80717d11615b6f42d1ad2e18ead51193fc15de69",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"locked": {
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"devenv",
"pre-commit-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1660459072,
"narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"lowdown-src": {
"flake": false,
"locked": {
"lastModified": 1633514407,
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
"owner": "kristapsdz",
"repo": "lowdown",
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
"type": "github"
},
"original": {
"owner": "kristapsdz",
"repo": "lowdown",
"type": "github"
}
},
"nix": {
"inputs": {
"lowdown-src": "lowdown-src",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1676545802,
"narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
"owner": "domenkozar",
"repo": "nix",
"rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "relaxed-flakes",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1678875422,
"narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"dir": "lib",
"lastModified": 1682879489,
"narHash": "sha256-sASwo8gBt7JDnOOstnps90K1wxmVfyhsTPPNTGBPjjg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "da45bf6ec7bbcc5d1e14d3795c025199f28e0de0",
"type": "github"
},
"original": {
"dir": "lib",
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1678872516,
"narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-22.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1685399834,
"narHash": "sha256-Lt7//5snriXSdJo5hlVcDkpERL1piiih0UXIz1RUcC4=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "58c85835512b0db938600b6fe13cc3e3dc4b364e",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-utils": "flake-utils",
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1682596858,
"narHash": "sha256-Hf9XVpqaGqe/4oDGr30W8HlsWvJXtMsEPHDqHZA6dDg=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "fb58866e20af98779017134319b5663b8215d912",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"fenix": "fenix",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_2"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1685465261,
"narHash": "sha256-aJ2nUinUrNcFi+pb47bS5IIAeSiUEEPLJY8W4Q8Pcjk=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "d2b3caa5b5694125fad04a9699e919444439f6a2",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,86 +0,0 @@
{
description = "Firefish development flake";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-unstable";
# Flake Parts framework(https://flake.parts)
flake-parts.url = "github:hercules-ci/flake-parts";
# Devenv for better devShells(https://devenv.sh)
devenv.url = "github:cachix/devenv";
# Fenix for rust development
fenix.url = "github:nix-community/fenix";
fenix.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.devenv.flakeModule
];
# Define the systems that this works on. Only tested with x66_64-linux, add more if you test and it works.
systems = [
"x86_64-linux"
];
# Expose these attributes for every system defined above.
perSystem = { config, pkgs, ... }: {
# Devenv shells
devenv = {
shells = {
# The default shell, used by nix-direnv
default = {
name = "firefish-dev-shell";
# Add additional packages to our environment
packages = [
pkgs.nodePackages.pnpm
pkgs.python3
];
# No need to warn on a new version, we'll update as needed.
devenv.warnOnNewVersion = false;
# Enable typescript support
languages.typescript.enable = true;
# Enable javascript for NPM and PNPM
languages.javascript.enable = true;
languages.javascript.package = pkgs.nodejs_18;
# Enable stable Rust for the backend
languages.rust.enable = true;
languages.rust.version = "stable";
processes = {
dev-server.exec = "pnpm run dev";
};
scripts = {
build.exec = "pnpm run build";
clean.exec = "pnpm run clean";
clear-state.exec = "rm -rf .devenv/state/redis .devenv/state/postgres";
format.exec = "pnpm run format";
install-deps.exec = "pnpm install";
migrate.exec = "pnpm run migrate";
prepare-config.exec = "cp .config/devenv.yml .config/default.yml";
};
services = {
postgres = {
enable = true;
package = pkgs.postgresql_12;
initialDatabases = [{
name = "firefish";
}];
initialScript = ''
CREATE USER firefish WITH PASSWORD 'firefish';
ALTER USER firefish WITH SUPERUSER;
GRANT ALL ON DATABASE firefish TO firefish;
'';
listen_addresses = "127.0.0.1";
port = 5432;
};
redis = {
enable = true;
bind = "127.0.0.1";
port = 6379;
};
};
};
};
};
};
};
}

View file

@ -2246,10 +2246,12 @@ replyUnmute: Treu el silencia de les respostes a les línies de temps
searchWordsDescription: "Per cercar publicacions, escriu el terme a buscar. Separa
les paraules amb espais per fer condicions AND o escriules dins de cometes per fer
una cerca OR.\nPer exemple, 'dia nit' trobarà publicacions que continguin tan 'dia'
com 'nit', i 'dia OR nit' trobaran publicacions que continguin tan 'dia' com 'nit'
o ambdues.\nPots combinar condicions AND/OR per exemple '(dia OR nit) endormiscar'.\n
\nSi vols anar a una pàgina d'usuari o publicació en concret, escriu la adreça URL
o la ID en aquest camp i fes clic al botó 'Trobar'. Fent clic a 'Cercar' trobarà
com 'nit', i 'dia OR nit' trobara publicacions que continguin tant 'dia' com 'nit'
(o ambdues).\nPots combinar condicions AND/OR per exemple '(dia OR nit) endormiscar'.\n
Si vols cercar per una seqüencia de paraules (per exemple una frase) has d'escriure-les
entre cometes dobles, per no fer una cerca amb condicionant AND: \"Avui he aprés\"\
\n \nSi vols anar a una pàgina d'usuari o publicació en concret, escriu la adreça
URL o la ID en aquest camp i fes clic al botó 'Trobar'. Fent clic a 'Cercar' trobarà
publicacions que, literalment , continguin la ID/adreça URL."
searchPostsWithFiles: Només publicacions amb fitxers
searchCwAndAlt: Inclou avisos de contingut i arxius amb descripcions
@ -2275,7 +2277,7 @@ messagingUnencryptedInfo: Els xats a Firefish no són encriptats d'extrem a extr
No comparteixis dades sensibles fent servir Firefish.
searchRangeDescription: "Si vols filtrar per un període de temps, has de fer servir
aquest format: 20220615-20231031\n\nSi no escrius l'any (per exemple 0105-0106 o
20231105-0110), serà interpretat com l'any en curs.\n\nInclús pots morir la data
20231105-0110), serà interpretat com l'any en curs.\n\nInclús pots ometre la data
de començament o de finalització. Per exemple, -0102 filtrarà els resultats per
mostrar només publicacions fetes abans del 2 de gener d'aquest any, i 20231026-
filtrarà els resultats per mostrar publicacions fetes després del 26 d'octubre del

View file

@ -1193,7 +1193,7 @@ releaseToReload: "Release to reload"
reloading: "Reloading"
enableTimelineStreaming: "Update timelines automatically"
searchWords: "Words to search / ID or URL to lookup"
searchWordsDescription: "To search for posts, enter the search term. Separate words
searchWordsDescription: "Enter the search term here to search for posts. Separate words
with a space for an AND search, or 'OR' (without quotes) between words for an OR
search.\nFor example, 'morning night' will find posts that contain both 'morning'
and 'night', and 'morning OR night' will find posts that contain either 'morning'

View file

@ -31,12 +31,12 @@ uploading: "正在上传..."
save: "保存"
users: "用户"
addUser: "添加用户"
favorite: "添加到书签"
favorites: "书签"
favorite: "添加到收藏"
favorites: "收藏"
unfavorite: "取消收藏"
favorited: "已添加到书签。"
alreadyFavorited: "书签中已存在。"
cantFavorite: "无法添加到书签。"
favorited: "已添加到收藏。"
alreadyFavorited: "收藏中已存在。"
cantFavorite: "无法添加到收藏。"
pin: "置顶"
unpin: "取消置顶"
copyContent: "复制内容"
@ -689,7 +689,7 @@ disableShowingAnimatedImages: "不播放动画"
verificationEmailSent: "已发送确认电子邮件。请访问电子邮件中的链接以完成验证。"
notSet: "未设置"
emailVerified: "电子邮件地址已验证"
noteFavoritesCount: "加入书签的帖子数"
noteFavoritesCount: "加入收藏的帖子数"
pageLikesCount: "页面点赞次数"
pageLikedCount: "页面被点赞次数"
contact: "联系人"
@ -1281,8 +1281,8 @@ _permissions:
"write:blocks": "编辑屏蔽名单"
"read:drive": "查看网盘"
"write:drive": "管理网盘文件"
"read:favorites": "查看收藏"
"write:favorites": "编辑收藏"
"read:favorites": "查看收藏"
"write:favorites": "编辑收藏"
"read:following": "查看关注信息"
"write:following": "关注/取消关注其它账号"
"read:messaging": "查看聊天消息"
@ -2045,8 +2045,8 @@ searchPostsWithFiles: 只带有文件的帖子
searchCwAndAlt: 包括内容警告和文件描述
publishTimelines: 为访客发布时间线
publishTimelinesDescription: 如果启用,在用户登出时本地和全局时间线也会显示在 {url} 上。
searchWordsDescription: "要搜索帖子,请输入关键词。交集搜索关键词之间使用空格进行区分,并集搜索关键词之间使用 OR 进行区分。\n例如 '早上
晚上' 将查找包含 '早上' 和 '晚上' 的帖子,而 '早上 OR 晚上' 将查找包含 '早上' 或 '晚上' (以及同时包含两者)的帖子。\n您还可以组合交集/并集条件,例如
searchWordsDescription: "在此处输入搜索词以搜索帖子。交集搜索关键词之间使用空格进行区分,并集搜索关键词之间使用 OR 进行区分。\n例如
'早上 晚上' 将查找包含 '早上' 和 '晚上' 的帖子,而 '早上 OR 晚上' 将查找包含 '早上' 或 '晚上' (以及同时包含两者)的帖子。\n您还可以组合交集/并集条件,例如
'(早上 OR 晚上) 困了' 。\n如果您想搜索单词序列例如一个英语句子您必须将其放在双引号中例如 \"Today I learned\" 以区分于交集搜索。\n
\n如果您想转到特定的用户页面或帖子页面请在此字段中输入用户 ID 或 URL然后单击 “查询” 按钮。 单击 “搜索” 将搜索字面包含用户 ID/URL
的帖子。"

View file

@ -1,6 +1,6 @@
{
"name": "firefish",
"version": "20240301",
"version": "20240319",
"repository": {
"type": "git",
"url": "https://firefish.dev/firefish/firefish.git"
@ -19,10 +19,10 @@
"gulp": "gulp build",
"watch": "pnpm run dev",
"dev": "pnpm node ./scripts/dev.mjs",
"dev:up": "pnpm node ./scripts/dev-up.mjs",
"dev:down": "pnpm node ./scripts/dev-down.mjs",
"dev:init": "pnpm run dev:down && pnpm run dev:up",
"dev:staging": "NODE_OPTIONS=--max_old_space_size=3072 NODE_ENV=development pnpm run build && pnpm run start",
"db:up": "pnpm node ./scripts/db-up.mjs",
"db:down": "pnpm node ./scripts/db-down.mjs",
"db:init": "pnpm run dev:down && pnpm run dev:up",
"lint": "pnpm -r --parallel run lint",
"debug": "pnpm run build:debug && pnpm run start",
"build:debug": "pnpm run clean && pnpm node ./scripts/dev-build.mjs && pnpm run gulp",

View file

@ -21,7 +21,6 @@
]
}
},
"license": "MIT",
"devDependencies": {
"@napi-rs/cli": "2.18.0",
"ava": "6.1.2"

View file

@ -1,28 +0,0 @@
import { MigrationInterface, QueryRunner } from "typeorm";
export class RemoveMentionedUsersColumn1710688552234
implements MigrationInterface
{
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`ALTER TABLE "note" DROP COLUMN "mentionedRemoteUsers"`,
);
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`ALTER TABLE "note" ADD "mentionedRemoteUsers" TEXT NOT NULL DEFAULT '[]'::text`,
);
await queryRunner.query(`CREATE TEMP TABLE IF NOT EXISTS "temp_mentions" AS
SELECT "id", "url", "uri", "username", "host"
FROM "user"
JOIN "user_profile" ON "user"."id" = "user_profile". "userId" WHERE "user"."host" IS NOT NULL`);
await queryRunner.query(
`CREATE UNIQUE INDEX "temp_mentions_id" ON "temp_mentions"("id")`,
);
await queryRunner.query(`UPDATE "note" SET "mentionedRemoteUsers" = (
SELECT COALESCE(json_agg(row_to_json("data")::jsonb - 'id')::text, '[]') FROM "temp_mentions" AS "data"
WHERE "data"."id" = ANY("note"."mentions")
)`);
}
}

View file

@ -172,6 +172,12 @@ export class Note {
})
public mentions: User["id"][];
// FIXME: WHAT IS THIS
@Column("text", {
default: "[]",
})
public mentionedRemoteUsers: string;
@Column("varchar", {
length: 128,
array: true,
@ -301,3 +307,10 @@ export class Note {
}
}
}
export type IMentionedRemoteUsers = {
uri: string;
url?: string;
username: string;
host: string;
}[];

View file

@ -1,6 +1,6 @@
import { In, IsNull } from "typeorm";
import config from "@/config/index.js";
import type { Note } from "@/models/entities/note.js";
import type { Note, IMentionedRemoteUsers } from "@/models/entities/note.js";
import type { DriveFile } from "@/models/entities/drive-file.js";
import { DriveFiles, Notes, Users, Emojis, Polls } from "@/models/index.js";
import type { Emoji } from "@/models/entities/emoji.js";
@ -61,34 +61,33 @@ export default async function renderNote(
const attributedTo = `${config.url}/users/${note.userId}`;
const mentionedUsers =
note.mentions.length > 0
? await Users.findBy({
id: In(note.mentions),
})
: [];
const mentionUris = mentionedUsers
// only remote users
.filter((user) => Users.isRemoteUser(user))
.map((user) => user.uri);
const mentions = (
JSON.parse(note.mentionedRemoteUsers) as IMentionedRemoteUsers
).map((x) => x.uri);
let to: string[] = [];
let cc: string[] = [];
if (note.visibility === "public") {
to = ["https://www.w3.org/ns/activitystreams#Public"];
cc = [`${attributedTo}/followers`].concat(mentionUris);
cc = [`${attributedTo}/followers`].concat(mentions);
} else if (note.visibility === "home") {
to = [`${attributedTo}/followers`];
cc = ["https://www.w3.org/ns/activitystreams#Public"].concat(mentionUris);
cc = ["https://www.w3.org/ns/activitystreams#Public"].concat(mentions);
} else if (note.visibility === "followers") {
to = [`${attributedTo}/followers`];
cc = mentionUris;
cc = mentions;
} else {
to = mentionUris;
to = mentions;
}
const mentionedUsers =
note.mentions.length > 0
? await Users.findBy({
id: In(note.mentions),
})
: [];
const hashtagTags = (note.tags || []).map((tag) => renderHashtag(tag));
const mentionTags = mentionedUsers.map((u) => renderMention(u));

View file

@ -134,6 +134,13 @@ export async function createMessage(
userId: message.userId,
visibility: "specified",
mentions: [recipientUser].map((u) => u.id),
mentionedRemoteUsers: JSON.stringify(
[recipientUser].map((u) => ({
uri: u.uri,
username: u.username,
host: u.host,
})),
),
} as Note;
let renderedNote: Record<string, unknown> = await renderNote(

View file

@ -18,6 +18,7 @@ import { registerOrFetchInstanceDoc } from "@/services/register-or-fetch-instanc
import { extractMentions } from "@/misc/extract-mentions.js";
import { extractCustomEmojisFromMfm } from "@/misc/extract-custom-emojis-from-mfm.js";
import { extractHashtags } from "@/misc/extract-hashtags.js";
import type { IMentionedRemoteUsers } from "@/models/entities/note.js";
import { Note } from "@/models/entities/note.js";
import {
Mutings,
@ -750,6 +751,21 @@ async function insertNote(
// Append mentions data
if (mentionedUsers.length > 0) {
insert.mentions = mentionedUsers.map((u) => u.id);
const profiles = await UserProfiles.findBy({ userId: In(insert.mentions) });
insert.mentionedRemoteUsers = JSON.stringify(
mentionedUsers
.filter((u) => Users.isRemoteUser(u))
.map((u) => {
const profile = profiles.find((p) => p.userId === u.id);
const url = profile != null ? profile.url : null;
return {
uri: u.uri,
url: url == null ? undefined : url,
username: u.username,
host: u.host,
} as IMentionedRemoteUsers[0];
}),
);
}
// 投稿を作成

View file

@ -1,4 +1,4 @@
import { Brackets, In, IsNull, Not } from "typeorm";
import { Brackets, In } from "typeorm";
import { publishNoteStream } from "@/services/stream.js";
import renderDelete from "@/remote/activitypub/renderer/delete.js";
import renderAnnounce from "@/remote/activitypub/renderer/announce.js";
@ -7,7 +7,7 @@ import { renderActivity } from "@/remote/activitypub/renderer/index.js";
import renderTombstone from "@/remote/activitypub/renderer/tombstone.js";
import config from "@/config/index.js";
import type { User, ILocalUser, IRemoteUser } from "@/models/entities/user.js";
import type { Note } from "@/models/entities/note.js";
import type { Note, IMentionedRemoteUsers } from "@/models/entities/note.js";
import { Notes, Users, Instances } from "@/models/index.js";
import {
deliverToFollowers,
@ -199,12 +199,11 @@ async function getMentionedRemoteUsers(note: Note) {
const where = [] as any[];
// mention / reply / dm
if (note.mentions.length > 0) {
where.push({
id: In(note.mentions),
// only remote users, local users are on the server and do not need to be notified
host: Not(IsNull()),
});
const uris = (
JSON.parse(note.mentionedRemoteUsers) as IMentionedRemoteUsers
).map((x) => x.uri);
if (uris.length > 0) {
where.push({ uri: In(uris) });
}
// renote / quote

View file

@ -22,6 +22,7 @@
<script lang="ts" setup>
import preprocess from "@/scripts/preprocess";
import { me } from "@/me";
defineProps<{
text: string;

View file

@ -0,0 +1,38 @@
<template>
<MkLoading v-if="!err" />
<XNotFound v-else />
</template>
<script lang="ts" setup>
import * as os from "@/os";
import { useRouter } from "@/router";
import { userPage } from "@/filters/user";
import { notePage } from "@/filters/note";
import { onMounted, ref, defineAsyncComponent } from "vue";
const XNotFound = defineAsyncComponent(() => import("./not-found.vue"));
const err = ref(false);
const urlParams = new URLSearchParams(window.location.search);
const uri = urlParams.get("uri");
const router = useRouter();
onMounted(() => {
os.api("ap/show", { uri })
.then((res) => {
switch (res.type) {
case "User":
router.push(userPage(res.object));
break;
case "Note":
router.push(notePage(res.object));
break;
default:
err.value = true;
break;
}
})
.catch((error) => {
err.value = true;
});
});
</script>

View file

@ -320,6 +320,11 @@ export const routes = [
component: page(() => import("./pages/follow.vue")),
loginRequired: true,
},
{
path: "/authorize_interaction",
component: page(() => import("./pages/authorize_interaction.vue")),
loginRequired: true,
},
{
path: "/share",
component: page(() => import("./pages/share.vue")),

View file

@ -6,7 +6,7 @@ import { execa } from "execa";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
execa("podman-compose", ["down"], {
cwd: join(__dirname, "/../dev"),
cwd: join(__dirname, "/../dev/db-container"),
stdio: "inherit",
});
})();

View file

@ -6,7 +6,7 @@ import { execa } from "execa";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
execa("podman-compose", ["up", "--detach"], {
cwd: join(__dirname, "/../dev"),
cwd: join(__dirname, "/../dev/db-container"),
stdio: "inherit",
});
})();