Migrating Mastodon
Setup new server
Create a folder for the new container files. I use ~/containers/mastodon/{addr}/
.
Create the subfolders and files that we're going to need
mkdir -p backups nginx/lebase postgres14 public/system redis
Generate a password
cat /dev/urandom | tr -dc "a-zA-Z0-9" |fold -w 24 | head -n 1
Replace the period in the volume path (.) with $PWD and use that, the generated password and the image name in the next command
podman run --rm --name postgres \
-v <volume path>:/var/lib/postgresql/data \
-e POSTGRES_PASSWORD=<password> \
-d <image name>
For example
podman run --rm --name postgres \
-v $PWD/postgres14:/var/lib/postgresql/data \
-e POSTGRES_PASSWORD="wLWoH4ghPXt7JjYI26Bi5Hfh" \
-d docker.io/postgres:14-alpine
Podman will have created a postgres data directory on disk, so now we want to create the Role.
Exec into a psql shell
docker exec -it postgres psql -U postgres
Run the following (replacing with the password used above)
CREATE USER mastodon WITH PASSWORD '<password>' CREATEDB;
exit
Stop the Postgres container
podman stop postgres
Get the appropriate docker-compose.yaml file from the Mastodon Github and save it to this folder.
For reference, here's what my compose.yaml file looks like:
version: '3'
services:
db:
restart: always
image: docker.io/postgres:14-alpine
shm_size: 256mb
networks:
- internal_network
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'postgres']
volumes:
- ./postgres14:/var/lib/postgresql/data
- ./backups:/backups
environment:
- 'POSTGRES_HOST_AUTH_METHOD=trust'
redis:
restart: always
image: docker.io/redis:7-alpine
networks:
- internal_network
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
volumes:
- ./redis:/data
# es:
# restart: always
# image: docker.elastic.co/elasticsearch/elasticsearch:7.17.4
# environment:
# - "ES_JAVA_OPTS=-Xms512m -Xmx512m -Des.enforce.bootstrap.checks=true"
# - "xpack.license.self_generated.type=basic"
# - "xpack.security.enabled=false"
# - "xpack.watcher.enabled=false"
# - "xpack.graph.enabled=false"
# - "xpack.ml.enabled=false"
# - "bootstrap.memory_lock=true"
# - "cluster.name=es-mastodon"
# - "discovery.type=single-node"
# - "thread_pool.write.queue_size=1000"
# networks:
# - external_network
# - internal_network
# healthcheck:
# test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
# volumes:
# - ./elasticsearch:/usr/share/elasticsearch/data
# ulimits:
# memlock:
# soft: -1
# hard: -1
# nofile:
# soft: 65536
# hard: 65536
# ports:
# - '127.0.0.1:9200:9200'
web:
build: .
image: ghcr.io/mastodon/mastodon:v4.2.17
restart: always
env_file: .env.production
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000"
networks:
- external_network
- internal_network
healthcheck:
# prettier-ignore
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:3000/health || exit 1']
ports:
- '127.0.0.1:3000:3000'
depends_on:
- db
- redis
# - es
volumes:
- ./public/system:/mastodon/public/system
streaming:
build: .
image: ghcr.io/mastodon/mastodon:v4.2.17
restart: always
env_file: .env.production
command: node ./streaming
networks:
- external_network
- internal_network
healthcheck:
# prettier-ignore
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:4000/api/v1/streaming/health || exit 1']
ports:
- '127.0.0.1:4000:4000'
depends_on:
- db
- redis
sidekiq:
build: .
image: ghcr.io/mastodon/mastodon:v4.2.17
restart: always
env_file: .env.production
command: bundle exec sidekiq
depends_on:
- db
- redis
networks:
- external_network
- internal_network
volumes:
- ./public/system:/mastodon/public/system
healthcheck:
test: ['CMD-SHELL', "ps aux | grep '[s]idekiq\ 6' || false"]
## Uncomment to enable federation with tor instances along with adding the following ENV variables
## http_proxy=http://privoxy:8118
## ALLOW_ACCESS_TO_HIDDEN_SERVICE=true
# tor:
# image: sirboops/tor
# networks:
# - external_network
# - internal_network
#
# privoxy:
# image: sirboops/privoxy
# volumes:
# - ./priv-config:/opt/config
# networks:
# - external_network
# - internal_network
networks:
external_network:
internal_network:
internal: true
Have Podman pull in the required files
podman-compose pull
Stop Mastodon on old server
As root, run the following:
systemctl stop mastodon-*.service
Dump and Restore PostgreSQL database
On the old server, run the following:
pg_dump -Fc mastodon_production -f backup.dump
Then, copy that file to the new server, into the container directory, then into the container.
podman-compose up -d db
podman cp ~/containers/mastodon/example.com/backup.dump examplecom_db_1:/backup.dump
podman exec -t examplecom_db_1 createdb -U mastodon -T template0 mastodon_production
(replace # in -j# with the number of CPUs in your system to improve restore performance)
podman exec -t examplecom_db_1 pg_restore -Fc -j# -U mastodon -n public --no-owner \
--role=mastodon -d mastodon_production --verbose /backup.dump
Copy .env.production file
Yeah, just copy the file from the old server to our container folder on the new server.
Save Redis Database and copy over
Stop redis; run the following as root on the old server:
systemctl stop redis
Start Mastodon and Redis on new server
podman-compose up -d
Rebuild feeds
podman exec -it {web_server} sh
RAILS_ENV=production ./bin/tootctl feeds build
Update DNS to point to new server
Not much of a walk through for this one. Just update the DNS for the domain to the new server.
Update Nginx config and rerun LetsEncrypt
Install certbot
sudo apt update
sudo apt install python3 python3-dev python3-venv libaugeas-dev gcc
sudo apt-get remove certbot
sudo python3 -m venv /opt/certbot/
sudo /opt/certbot/bin/pip install --upgrade pip
sudo /opt/certbot/bin/pip install certbot certbot-nginx
sudo ln -s /opt/certbot/bin/certbot /usr/bin/certbot
Let's Encrypt Nginx config file:
server {
listen 80;
listen [::]:80;
root /var/www/html;
index index.html index.htm index.nginx-debian.html;
server_name example.com;
location / {
try_files $uri $uri/ =404;
}
}
Reload Nginx, and request certificate:
sudo systemctl reload nginx
sudo certbot certonly -d example.com --nginx
Final Nginx config file:
server {
listen 80;
listen [::]:80;
root /var/www/html;
index index.html index.htm;
server_name example.com; # Replace with your domain name
location ~ /.well-known/acme-challenge {
try_files $uri $uri/ =404;
}
location / {
return 301 https://$server_name$request_uri;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
root /mnt/none;
index index.html index.htm;
http2 on;
server_name example.com; # Replace with your domain name
# Replace your domain in these paths
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
ssl_session_timeout 5m;
ssl_prefer_server_ciphers On;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
absolute_redirect off;
server_name_in_redirect off;
error_page 404 /404.html;
error_page 410 /410.html;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://localhost:3000;
}
location ^~ /api/v1/streaming {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://localhost:4000;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
tcp_nodelay on;
}
}
Setup Backups
Add backup.sh
to the container folder
#!/usr/bin/env bash
# Mastodon Backup Script
pod=examplecom
printf -v date '%(%Y-%m-%d)T' -1
printf -v dow '%(%u)T' -1
printf -v dom '%(%d)T' -1
# Backup .env.production
cp .env.production backups/working/.env.production
# Backup the Postgres Database
podman exec ${pod}_db_1 pg_dump -Fc mastodon_production -U mastodon -f /backups/working/postgres.dump
# Backup user uploads
# Backup Redis
podman exec ${pod}_redis_1 redis-cli SAVE > /dev/null
podman exec ${pod}_redis_1 cp /data/dump.rdb /backups/working/dump.rdb
# Bundle and compress
tar -czf backups/backup.tgz backups/working/
# Copy files to appropriate folders
cp backups/backup.tgz backups/daily/backup-${date}.tgz
if [ "$dow" -eq "1" ] ; then
cp backups/backup.tgz backups/weekly/backup-${date}.tgz
fi
if [ "$dom" -eq "1" ] ; then
cp backups/backup.tgz backups/monthly/backup-${date}.tgz
fi
# Cleanup folders
ls backups/daily -1t | tail -n +8 | xargs rm -f
ls backups/weekly -1t | tail -n +6 | xargs rm -f
ls backups/monthly -1t | tail -n +13 | xargs rm -f
rm -f backups/working/.[!.]*
rm -f backups/working/*
rm -f backups/backup.tgz
# rclone files to BackBlaze
rclone sync backups/ backups:/beggar/${pod}/backups/ --fast-list --refresh-times