diff --git a/howto/template.md b/howto/template.md
index 12e5f94683c9aabe5385db86a1b67a34ac2668bd..50d1c14a55c1a627f9a4b483c3028fec8fda31ce 100644
--- a/howto/template.md
+++ b/howto/template.md
@@ -163,7 +163,7 @@ label ~Foo.
 <!-- that has a launch checklist -->
 
 <!-- if this is an old project being documented, summarize the known -->
-<!-- issues with the project. --> 
+<!-- issues with the project. -->
 
 ## Security and risk assessment
 
diff --git a/service/object-storage.md b/service/object-storage.md
index c12c34d644d61e237579ee492e6c87daf217bf1a..6aae5eb2022a40821ce63a48c24169febd2ffbf2 100644
--- a/service/object-storage.md
+++ b/service/object-storage.md
@@ -15,13 +15,59 @@
 
 # How-to
 
-<!-- more in-depth procedure that may require interpretation -->
+## Accessing the web interface
+
+To see if the service works, you can connect to the admin interface
+through <https://localhost:9090> (TODO: use real IP/hostname
+here?) with a normal web browser. The username is `admin` and the
+password is in `/etc/default/minio` on the server (currently
+`minio-01`).
+
+## Configuring the local mc client
+
+You *must* use the web interface (above) to create a first access
+key.
+
+This was done through the web interface, and then the access key was
+recorded on the UNIX `root` account with:
+
+    podman run -v /root/.mc:/root/.mc --rm -it quay.io/minio/mc alias set admin http://minio-01.torproject.org:9000
+
+Notice how we currently use container images to run the `mc` tool. The
+above configuration will make further commands possible, see for
+example [creating a bucket](#creating-a-bucket).
+
+## Creating a bucket
+
+A bucket can be created on a MinIO server using the `mc` commandline
+tool.
+
+WARNING: you should NOT create buckets under the main `admin`
+account. Create a new account for your application as `admin`, then as
+that new account, create a specific access key, as per above.
+
+The following will create a bucket named `foo` under the main `admin`
+account:
+
+    root@minio-01:~# podman run -v /root/.mc:/root/.mc --rm -it quay.io/minio/mc mb admin/foo
+    Bucket created successfully `foo`.
+
+Try creating the same bucket again, to confirm it really exists, it
+should fail like this:
+
+    root@minio-01:~# podman run -v /root/.mc:/root/.mc --rm -it quay.io/minio/mc mb admin/foo
+    mc: <ERROR> Unable to make bucket `local/foo`. Your previous request to create the named bucket succeeded and you already own it.
+
+You should also see the bucket in the web interface.
 
 ## Pager playbook
 
-<!-- information about common errors from the monitoring system and -->
-<!-- how to deal with them. this should be easy to follow: think of -->
-<!-- your future self, in a stressful situation, tired and hungry. -->
+### Restarting the service
+
+The MinIO service runs under the `container-minio.service` unit. To
+restart it if it crashed, simply run:
+
+    systemctl restart container-minio.service
 
 ## Disaster recovery
 
@@ -105,7 +151,7 @@ container:
        --publish 9000:9000 \
        --publish 9090:9090 \
        --volume /srv/data:/data \
-       --env "MINIO_ROOT_USER=root" \
+       --env "MINIO_ROOT_USER=admin" \
        --env "MINIO_ROOT_PASSWORD" \
        quay.io/minio/minio server /data --console-address ":9090"
 
@@ -148,6 +194,8 @@ TODO: consider podman auto-update, see https://www.redhat.com/sysadmin/improved-
 
 <!-- this describes an acceptable level of service for this service -->
 
+TODO: sum up RFC-56 SLA discussion
+
 ## Design and architecture
 
 <!-- how this is built -->
@@ -157,34 +205,44 @@ TODO: consider podman auto-update, see https://www.redhat.com/sysadmin/improved-
 <!-- "architectural" document, which the final result might differ -->
 <!-- from, sometimes significantly -->
 
+TODO: sum up RFC-56 and current architecture
+
 ## Services
 
-<!-- open ports, daemons, cron jobs -->
+TODO: services <!-- open ports, daemons, cron jobs -->
 
 ## Storage
 
-<!-- databases? plain text file? the frigging blockchain? memory? -->
+TODO: storage design <!-- databases? plain text file? the frigging blockchain? memory? -->
 
 ## Queues
 
-<!-- email queues, job queues, schedulers -->
+MinIO has a built-in [lifecycle management](https://min.io/docs/minio/container/administration/object-management/object-lifecycle-management.html) where object can be
+configured to have an expiry date. That is done automatically inside
+MinIO with a [low priority object scanner](https://min.io/docs/minio/container/administration/object-management/object-lifecycle-management.html#lifecycle-management-object-scanner).
 
 ## Interfaces
 
-<!-- e.g. web APIs, commandline clients, etc -->
+MinIO provides an admin interface on port `9090` and a S3-compatible
+interface on port `9000`. TODO: make those real hostnames?
 
 ## Authentication
 
-<!-- SSH? LDAP? standalone? -->
+We use the built-in [MinIO identity provider](https://min.io/docs/minio/container/administration/identity-access-management/minio-identity-management.html#minio-internal-idp). Access control is
+given to users which are in turn issued access tokens.
+
+The `admin` user is defined in `/etc/default/minio` on `minio-01` and has
+an access token saved in `/root/.mc` that can be used with the `mc`
+commandline client, see the [tests section](#tests) for details.
 
 ## Implementation
 
-<!-- programming languages, frameworks, versions, license -->
+MinIO is implemented in Golang. We deploy the Docker containers with
+podman.
 
 ## Related services
 
-<!-- dependent services (e.g. authenticates against LDAP, or requires -->
-<!-- git pushes)  -->
+The service is currently used by the [Gitlab service](howto/gitlab).
 
 ## Issues
 
@@ -200,21 +258,44 @@ label ~Foo.
  [File]: https://gitlab.torproject.org/tpo/tpa/team/-/issues/new
  [search]: https://gitlab.torproject.org/tpo/tpa/team/-/issues?label_name%5B%5D=Foo
 
+TODO: make a label for object-storage
+
 ## Maintainer
 
-<!-- document who deployed and operates this service, the team and -->
-<!-- ideally the person inside that team -->
+anarcat setup this service in July 2023 and TPA is responsible for
+managing it.
 
 ## Users
 
-<!-- who the main users are, how they use the service. possibly reuse -->
-<!-- the Personas section in the RFC, if available. -->
+The service is currently used by the [Gitlab service](howto/gitlab) but may be
+expanded to other services upon request.
 
 ## Upstream
 
-<!-- who the upstreams are, if they are still active, -->
-<!-- collaborative, how do we keep up to date, support channels, see -->
-<!-- also the "Issues" section above -->
+[MinIO][] is a well-known object storage provider. It is [not packaged
+in Debian][]. It has regular releases, but they do not have release
+numbers conforming to the semantic versioning standard. Their support
+policy is unclear.
+
+[MinIO]: https://min.io/
+[not packaged in Debian]: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=859207
+
+### Licensing dispute
+
+MinIO are involved in a [licensing dispute][] with commercial storage
+providers ([Weka][] and [Nutanix][]) because the latter used MinIO in
+their products without giving attribution. See also [this hacker news
+discussion][32148007].
+
+It should also be noted that they switched to the AGPL relatively
+recently.
+
+This is not seen as a deal-breaker in using MinIO for TPA.
+
+[Weka]: https://www.weka.io/
+[Nutanix]: https://www.nutanix.com/
+[32148007]: https://news.ycombinator.com/item?id=32148007
+[licensing dispute]: https://blocksandfiles.com/2023/03/26/we-object-minio-says-no-more-open-license-for-you-weka/
 
 ## Monitoring and metrics
 
@@ -223,21 +304,27 @@ label ~Foo.
 
 ## Tests
 
-<!-- how the service can be tested, for example after major changes -->
-<!-- like IP address changes or upgrades. describe CI, test suites, linting -->
+To make sure the service still works after an upgrade, you can try
+[creating a bucket](#creating-a-bucket).
 
 ## Logs
 
-<!-- where are the logs? how long are they kept? any PII? -->
-<!-- what about performance metrics? same questions -->
+The logs from the last boot of the `container-minio.service` can be inspected with:
+
+    journalctl -u container-minio.service -b
+
+MinIO doesn't seem to keep PII in its logs but PII may of course be
+recorded in the buckets by the services and users using it. This is
+considered not the responsibility of the service.
 
 ## Backups
 
-<!-- does this service need anything special in terms of backups? -->
-<!-- e.g. locking a database? special recovery procedures? -->
+TODO: document how backups are a problem
 
 ## Other documentation
 
+TODO: reference upstream docs
+
 <!-- references to upstream documentation, if relevant -->
 
 # Discussion
@@ -258,7 +345,7 @@ label ~Foo.
 <!-- that has a launch checklist -->
 
 <!-- if this is an old project being documented, summarize the known -->
-<!-- issues with the project. --> 
+<!-- issues with the project. -->
 
 ## Security and risk assessment
 
@@ -302,7 +389,7 @@ servers. Unfortunately it suffers from a set of limitations:
     (although to its defense it does use a checksum...)
 
  2. it depends on the [deprecated puppet-certs module](https://github.com/broadinstitute/puppet-certs)
- 
+
  3. even if it would defend on the newer [puppet-certificates
     module](https://github.com/broadinstitute/puppet-certificates), that module clashes with the way we manage our own
     certificates... we might or might not want to use this module in