Compare commits

..

28 Commits

Author SHA1 Message Date
Xe Iaso
364622d890 chore: fixes found in review
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 20:37:32 +00:00
Xe Iaso
41993466a1 docs(admin/policies): don't start a sentence with as
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 19:55:15 +00:00
Xe Iaso
3894469d98 Merge branch 'main' into Xe/store-interface
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 19:49:05 +00:00
Xe Iaso
8a3520466b docs: update CHANGELOG and internal links
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 19:47:54 +00:00
Xe Iaso
066e642310 docs(policy): document storage backends
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 19:36:43 +00:00
Xe Iaso
e8e70122d6 chore(docs): listen on 0.0.0.0 for dev container support
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 19:36:32 +00:00
Xe Iaso
5b337cd322 docs(default-config): add a nudge to the storage backends section of the docs
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 19:36:11 +00:00
Xe Iaso
a6e6caad7b chore(devcontainer): remove port forwards because vs code handles that for you
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 19:34:13 +00:00
Xe Iaso
9e19dc1ee4 Update metadata
check-spelling run (pull_request) for Xe/store-interface

Signed-off-by: check-spelling-bot <check-spelling-bot@users.noreply.github.com>
on-behalf-of: @check-spelling <check-spelling-bot@check-spelling.dev>
2025-07-04 18:48:17 +00:00
Xe Iaso
b96ab68e85 test(lib/policy/config): ensure valkey stores can be loaded
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 18:46:33 +00:00
Xe Iaso
4845f8515d test(lib/store/valkey): disable tests if not using docker
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 18:38:35 +00:00
Xe Iaso
3808f7ba17 feat(lib/store): implement valkey backend
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 18:31:01 +00:00
Xe Iaso
59f69d48d2 fix(lib): make challenges live for 30 minutes by default
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 17:29:14 +00:00
Xe Iaso
ded9c32801 chore(devcontainer): adapt to docker compose, add valkey service
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-04 17:28:52 +00:00
Xe Iaso
06b2dca7fc chore: go mod tidy
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-03 18:14:23 +00:00
Xe Iaso
1dceab889c chore: spelling
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-03 18:05:21 +00:00
Xe Iaso
45f6fa2194 feat(lib/store): add bbolt store implementation
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-03 18:02:17 +00:00
Xe Iaso
ddb7b0e99e fix(decaymap): invert locking process for Delete
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-03 04:50:11 +00:00
Xe Iaso
acee62a9d0 chore: spelling
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-03 00:46:16 -04:00
Xe Iaso
2b3bfdc40b test(lib/store): make generic storage interface test adaptor
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-03 04:44:29 +00:00
Xe Iaso
e538f55e89 chore(lib): fix SA4004
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-03 04:08:32 +00:00
Xe Iaso
b8e0c1a961 chore(decaymap): fix documentation typo
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-03 04:07:44 +00:00
Xe Iaso
def6f2dc90 feat(lib): use new challenge creation flow
Previously Anubis constructed challenge strings from request metadata.
This was a good idea in spirit, but has turned out to be a very bad idea
in practice. This new flow reuses the Store facility to dynamically
create challenge values with completely random data.

This is a fairly big rewrite of how Anubis processes challenges. Right
now it defaults to using the in-memory storage backend, but on-disk
(boltdb) and valkey-based adaptors will come soon.

Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-02 23:56:23 +00:00
Xe Iaso
e5c39facfe chore(policy): import all store backends
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-02 23:34:49 +00:00
Xe Iaso
18b21330df feat(lib/store): all metapackage to import all store implementations
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-02 23:17:51 +00:00
Xe Iaso
0f9da86003 feat(lib): implement store interface
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-02 23:12:27 +00:00
Xe Iaso
32afc9c040 chore(lib/challenge): refactor Validate to take ValidateInput
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-02 22:23:10 +00:00
Xe Iaso
9245c4beec feat(decaymap): add Delete method
Signed-off-by: Xe Iaso <me@xeiaso.net>
2025-07-02 22:22:34 +00:00
17 changed files with 166 additions and 141 deletions

View File

@@ -3,7 +3,9 @@ FROM ghcr.io/xe/devcontainer-base/pre/go
WORKDIR /app
COPY go.mod go.sum package.json package-lock.json ./
RUN apt-get update \
RUN go install github.com/a-h/templ/cmd/templ \
&& npx --yes playwright@1.52.0 install --with-deps\
&& apt-get update \
&& apt-get -y install zstd brotli redis \
&& mkdir -p /home/vscode/.local/share/fish \
&& chown -R vscode:vscode /home/vscode/.local/share/fish \

View File

@@ -2,6 +2,14 @@
// README at: https://github.com/devcontainers/templates/tree/main/src/debian
{
"name": "Dev",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
// "build": {
// "dockerfile": "./Dockerfile",
// "context": "..",
// "cacheFrom": [
// "type=registry,ref=ghcr.io/techarohq/anubis/devcontainer"
// ]
// },
"dockerComposeFile": ["./docker-compose.yaml"],
"service": "workspace",
"workspaceFolder": "/workspace/anubis",

View File

@@ -1,13 +1,4 @@
services:
playwright:
image: mcr.microsoft.com/playwright:v1.52.0-noble
init: true
network_mode: service:workspace
command:
- /bin/sh
- -c
- npx -y playwright@1.52.0 run-server --port 9001 --host 0.0.0.0
valkey:
image: valkey/valkey:8
pull_policy: always
@@ -18,6 +9,8 @@ services:
build:
context: ..
dockerfile: .devcontainer/Dockerfile
cache_from:
- "type=registry,ref=ghcr.io/techarohq/anubis/devcontainer"
volumes:
- ../:/workspace/anubis:cached
environment:

View File

@@ -84,7 +84,6 @@
^\Q.github/workflows/spelling.yml\E$
^data/crawlers/
^docs/blog/tags\.yml$
^docs/docs/user/known-instances.md$
^docs/manifest/.*$
^docs/static/\.nojekyll$
^lib/policy/config/testdata/bad/unparseable\.json$

View File

@@ -20,7 +20,7 @@ bbolt
bdba
berr
bingbot
Bitcoin
bitcoin
bitrate
blogging
Bluesky
@@ -31,7 +31,6 @@ botstopper
BPort
Brightbot
broked
byteslice
Bytespider
cachebuster
cachediptoasn
@@ -131,6 +130,7 @@ Hashcash
hashrate
headermap
healthcheck
hebis
hec
hmc
hostable
@@ -250,6 +250,7 @@ RUnlock
runtimedir
sas
sasl
Scumm
searchbot
searx
sebest
@@ -262,8 +263,10 @@ shellcheck
Sidetrade
simprint
sitemap
skopeo
sls
sni
Sourceware
Spambot
sparkline
spyderbot
@@ -286,6 +289,7 @@ techarohq
templ
templruntime
testarea
testdb
Thancred
thoth
thothmock

47
.github/workflows/devcontainer.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Dev container prebuild
on:
push:
branches: ["main"]
tags: ["v*.*.*"]
jobs:
devcontainer:
runs-on: ubuntu-24.04
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-tags: true
fetch-depth: 0
persist-credentials: false
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
with:
node-version: latest
- run: |
sudo apt-get update
sudo apt-get -y install skopeo
- name: Log into registry
if: github.event_name != 'pull_request'
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: ghcr.io
username: techarohq
password: ${{ secrets.GITHUB_TOKEN }}
- name: Pre-build dev container image
uses: devcontainers/ci@8bf61b26e9c3a98f69cb6ce2f88d24ff59b785c6 # v0.3.1900000417
with:
imageName: ghcr.io/techarohq/anubis/devcontainer
cacheFrom: ghcr.io/techarohq/anubis/devcontainer
push: always
platform: linux/amd64,linux/arm64

View File

@@ -60,14 +60,14 @@ Anubis uses these environment variables for configuration:
| Environment Variable | Default value | Explanation |
| :----------------------------- | :---------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `BASE_PREFIX` | unset | If set, adds a global prefix to all Anubis endpoints (everything starting with `/.within.website/x/anubis/`). For example, setting this to `/myapp` would make Anubis accessible at `/myapp/` instead of `/`. This is useful when running Anubis behind a reverse proxy that routes based on path prefixes. |
| `BASE_PREFIX` | unset | If set, adds a global prefix to all Anubis endpoints. For example, setting this to `/myapp` would make Anubis accessible at `/myapp/` instead of `/`. This is useful when running Anubis behind a reverse proxy that routes based on path prefixes. |
| `BIND` | `:8923` | The network address that Anubis listens on. For `unix`, set this to a path: `/run/anubis/instance.sock` |
| `BIND_NETWORK` | `tcp` | The address family that Anubis listens on. Accepts `tcp`, `unix` and anything Go's [`net.Listen`](https://pkg.go.dev/net#Listen) supports. |
| `COOKIE_DOMAIN` | unset | The domain the Anubis challenge pass cookie should be set to. This should be set to the domain you bought from your registrar (EG: `techaro.lol` if your webapp is running on `anubis.techaro.lol`). See this [stackoverflow explanation of cookies](https://stackoverflow.com/a/1063760) for more information.<br/><br/>Note that unlike `REDIRECT_DOMAINS`, you should never include a port number in this variable. |
| `COOKIE_DYNAMIC_DOMAIN` | false | If set to true, automatically set cookie domain fields based on the hostname of the request. EG: if you are making a request to `anubis.techaro.lol`, the Anubis cookie will be valid for any subdomain of `techaro.lol`. |
| `COOKIE_EXPIRATION_TIME` | `168h` | The amount of time the authorization cookie is valid for. |
| `COOKIE_PARTITIONED` | `false` | If set to `true`, enables the [partitioned (CHIPS) flag](https://developers.google.com/privacy-sandbox/cookies/chips), meaning that Anubis inside an iframe has a different set of cookies than the domain hosting the iframe. |
| `COOKIE_SECURE` | `true` | If set to `true`, enables the [Secure flag](https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/Cookies#block_access_to_your_cookies), meaning that the cookies will only be transmitted over HTTPS. If Anubis is used in an unsecure context (plain HTTP), this will be need to be set to false |
| `COOKIE_SECURE` | `true` | If set to `true`, enables the [Secure flag](https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/Cookies#block_access_to_your_cookies), meaning that the cookies will only be transmitted over HTTPS. If Anubis is used in an unsecure context (plain HTTP), this will be need to be set to false |
| `DIFFICULTY` | `4` | The difficulty of the challenge, or the number of leading zeroes that must be in successful responses. |
| `ED25519_PRIVATE_KEY_HEX` | unset | The hex-encoded ed25519 private key used to sign Anubis responses. If this is not set, Anubis will generate one for you. This should be exactly 64 characters long. See below for details. |
| `ED25519_PRIVATE_KEY_HEX_FILE` | unset | Path to a file containing the hex-encoded ed25519 private key. Only one of this or its sister option may be set. |

View File

@@ -289,9 +289,10 @@ When Anubis opens a bbolt database, it takes an exclusive lock on that database.
The `bbolt` backend takes the following configuration options:
| Name | Type | Example | Description |
| :----- | :--- | :----------------- | :--------------------------------------------------------------------------------------------------------------------------- |
| `path` | path | `/data/anubis.bdb` | The filesystem path for the Anubis bbolt database. Anubis requires write access to the folder containing the bbolt database. |
| Name | Type | Example | Description |
| :------- | :----- | :----------------- | :-------------------------------------------------------------------------------------------------------------------------------- |
| `bucket` | string | `anubis` | The bbolt bucket that Anubis should place all its data into. If this is not set, then Anubis will default to the bucket `anubis`. |
| `path` | path | `/data/anubis.bdb` | The filesystem path for the Anubis bbolt database. Anubis requires write access to the folder containing the bbolt database. |
Example:

View File

@@ -21,4 +21,8 @@ If you use a browser extension such as [JShelter](https://jshelter.org/), you wi
## Does Anubis mine Bitcoin?
No. Anubis does not mine Bitcoin or any other cryptocurrency.
No. Anubis does not mine Bitcoin.
In order to mine bitcoin, you need to download a copy of the blockchain (so you have the state required to do mining) and also broadcast your mined blocks to the network should you reach a hash with the right number of leading zeroes. You also need to continuously read for newly broadcasted transactions so you can batch them into a block. This requires gigabytes of data to be transferred from the server to the client.
Anubis transfers two digit numbers of kilobytes from the server to the client (which you can independently verify with your browser's Developer Tools feature). This is orders of magnitude below what is required to mine Bitcoin.

View File

@@ -45,11 +45,6 @@ This page contains a non-exhaustive list with all websites using Anubis.
- https://gitlab.postmarketos.org/
- https://wiki.koha-community.org/
- https://extensions.typo3.org/
- https://ebird.org/
- https://fabulous.systems/
- https://coinhoards.org/
- https://pluralpedia.org/
- https://git.aya.so/
- <details>
<summary>FreeCAD</summary>
- https://forum.freecad.org/
@@ -87,10 +82,3 @@ This page contains a non-exhaustive list with all websites using Anubis.
- https://karla.hds.hebis.de/
- and many more (see https://www.hebis.de/dienste/hebis-discovery-system/)
</details>
- <details>
<summary>Duke University</summary>
- https://repository.duke.edu/
- https://archives.lib.duke.edu/
- https://find.library.duke.edu/
- https://nicholas.duke.edu/
</details>

View File

@@ -127,8 +127,3 @@ impressum:
status_codes:
CHALLENGE: 200
DENY: 200
store:
backend: bbolt
parameters:
path: /xe/data/anubis/data.bdb

View File

@@ -15,8 +15,6 @@ spec:
- name: anubis
configMap:
name: anubis-cfg
- name: temporary-data
emptyDir: {}
containers:
- name: anubis-docs
image: ghcr.io/techarohq/anubis/docs:main
@@ -53,8 +51,6 @@ spec:
volumeMounts:
- name: anubis
mountPath: /xe/cfg/anubis
- name: temporary-data
mountPath: /xe/data/anubis
resources:
limits:
cpu: 500m

View File

@@ -2,6 +2,7 @@ package bbolt
import (
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
@@ -11,85 +12,52 @@ import (
"go.etcd.io/bbolt"
)
// Sentinel error values used for testing and in admin-visible error messages.
var (
ErrBucketDoesNotExist = errors.New("bbolt: bucket does not exist")
ErrNotExists = errors.New("bbolt: value does not exist in store")
)
// Store implements store.Interface backed by bbolt[1].
//
// In essence, bbolt is a hierarchical key/value store with a twist: every value
// needs to belong to a bucket. Buckets can contain an infinite number of
// buckets. As such, Anubis nests values in buckets. Each value in the store
// is given its own bucket with two keys:
//
// 1. data - The raw data, usually in JSON
// 2. expiry - The expiry time formatted as a time.RFC3339Nano timestamp string
//
// When Anubis stores a new bit of data, it creates a new bucket for that value.
// This allows the cleanup phase to iterate over every bucket in the database and
// only scan the expiry times without having to decode the entire record.
//
// bbolt is not suitable for environments where multiple instance of Anubis need
// to read from and write to the same backend store. For that, use the valkey
// storage backend.
//
// [1]: https://github.com/etcd-io/bbolt
type Store struct {
bdb *bbolt.DB
type Item struct {
Data []byte `json:"data"`
Expires time.Time `json:"expires"`
}
type Store struct {
bucket []byte
bdb *bbolt.DB
}
// Delete a key from the datastore. If the key does not exist, return an error.
func (s *Store) Delete(ctx context.Context, key string) error {
return s.bdb.Update(func(tx *bbolt.Tx) error {
if tx.Bucket([]byte(key)) == nil {
bkt := tx.Bucket(s.bucket)
if bkt == nil {
return fmt.Errorf("%w: %q", ErrBucketDoesNotExist, string(s.bucket))
}
if bkt.Get([]byte(key)) == nil {
return fmt.Errorf("%w: %q", ErrNotExists, key)
}
return tx.DeleteBucket([]byte(key))
return bkt.Delete([]byte(key))
})
}
// Get a value from the datastore.
//
// Because each value is stored in its own bucket with data and expiry keys,
// two get operations are required:
//
// 1. Get the expiry key, parse as time.RFC3339Nano. If the key has expired, run deletion in the background and return a "key not found" error.
// 2. Get the data key, copy into the result byteslice, return it.
func (s *Store) Get(ctx context.Context, key string) ([]byte, error) {
var result []byte
var i Item
if err := s.bdb.View(func(tx *bbolt.Tx) error {
itemBucket := tx.Bucket([]byte(key))
if itemBucket == nil {
bkt := tx.Bucket(s.bucket)
if bkt == nil {
return fmt.Errorf("%w: %q", ErrBucketDoesNotExist, string(s.bucket))
}
bucketData := bkt.Get([]byte(key))
if bucketData == nil {
return fmt.Errorf("%w: %q", store.ErrNotFound, key)
}
expiryStr := itemBucket.Get([]byte("expiry"))
if expiryStr == nil {
return fmt.Errorf("[unexpected] %w: %q (expiry is nil)", store.ErrNotFound, key)
}
expiry, err := time.Parse(time.RFC3339Nano, string(expiryStr))
if err != nil {
return fmt.Errorf("[unexpected] %w: %w", store.ErrCantDecode, err)
}
if time.Now().After(expiry) {
go s.Delete(context.Background(), key)
return fmt.Errorf("%w: %q", store.ErrNotFound, key)
}
dataStr := itemBucket.Get([]byte("data"))
if dataStr == nil {
return fmt.Errorf("[unexpected] %w: %q (data is nil)", store.ErrNotFound, key)
}
result = make([]byte, len(dataStr))
if n := copy(result, dataStr); n != len(dataStr) {
return fmt.Errorf("[unexpected] %w: %d bytes copied of %d", store.ErrCantDecode, n, len(dataStr))
if err := json.Unmarshal(bucketData, &i); err != nil {
return fmt.Errorf("%w: %w", store.ErrCantDecode, err)
}
return nil
@@ -97,28 +65,32 @@ func (s *Store) Get(ctx context.Context, key string) ([]byte, error) {
return nil, err
}
return result, nil
if time.Now().After(i.Expires) {
go s.Delete(context.Background(), key)
return nil, fmt.Errorf("%w: %q", store.ErrNotFound, key)
}
return i.Data, nil
}
// Set a value into the store with a given expiry.
func (s *Store) Set(ctx context.Context, key string, value []byte, expiry time.Duration) error {
expires := time.Now().Add(expiry)
i := Item{
Data: value,
Expires: time.Now().Add(expiry),
}
data, err := json.Marshal(i)
if err != nil {
return fmt.Errorf("%w: %w", store.ErrCantEncode, err)
}
return s.bdb.Update(func(tx *bbolt.Tx) error {
valueBkt, err := tx.CreateBucketIfNotExists([]byte(key))
if err != nil {
return fmt.Errorf("%w: %w: %q (create bucket)", store.ErrCantEncode, err, key)
bkt := tx.Bucket(s.bucket)
if bkt == nil {
return fmt.Errorf("%w: %q", ErrBucketDoesNotExist, string(s.bucket))
}
if err := valueBkt.Put([]byte("expiry"), []byte(expires.Format(time.RFC3339Nano))); err != nil {
return fmt.Errorf("%w: %q (expiry)", store.ErrCantEncode, key)
}
if err := valueBkt.Put([]byte("data"), value); err != nil {
return fmt.Errorf("%w: %q (data)", store.ErrCantEncode, key)
}
return nil
return bkt.Put([]byte(key), data)
})
}
@@ -126,28 +98,31 @@ func (s *Store) cleanup(ctx context.Context) error {
now := time.Now()
return s.bdb.Update(func(tx *bbolt.Tx) error {
return tx.ForEach(func(key []byte, valueBkt *bbolt.Bucket) error {
var expiry time.Time
var err error
bkt := tx.Bucket(s.bucket)
if bkt == nil {
return fmt.Errorf("cache bucket %q does not exist", string(s.bucket))
}
expiryStr := valueBkt.Get([]byte("expiry"))
if expiryStr == nil {
slog.Warn("while running cleanup, expiry is not set somehow, file a bug?", "key", string(key))
return nil
return bkt.ForEach(func(k, v []byte) error {
var i Item
data := bkt.Get(k)
if data == nil {
return fmt.Errorf("%s in Cache bucket does not exist???", string(k))
}
expiry, err = time.Parse(time.RFC3339Nano, string(expiryStr))
if err != nil {
return fmt.Errorf("[unexpected] %w in bucket %q: %w", store.ErrCantDecode, string(key), err)
if err := json.Unmarshal(data, &i); err != nil {
return fmt.Errorf("can't unmarshal data at key %s: %w", string(k), err)
}
if now.After(expiry) {
return valueBkt.DeleteBucket(key)
if now.After(i.Expires) {
return bkt.Delete(k)
}
return nil
})
})
}
func (s *Store) cleanupThread(ctx context.Context) {

View File

@@ -12,7 +12,8 @@ func TestImpl(t *testing.T) {
path := filepath.Join(t.TempDir(), "db")
t.Log(path)
data, err := json.Marshal(Config{
Path: path,
Path: path,
Bucket: "anubis",
})
if err != nil {
t.Fatal(err)

View File

@@ -21,12 +21,8 @@ func init() {
store.Register("bbolt", Factory{})
}
// Factory builds new instances of the bbolt storage backend according to
// configuration passed via a json.RawMessage.
type Factory struct{}
// Build parses and validates the bbolt storage backend Config and creates
// a new instance of it.
func (Factory) Build(ctx context.Context, data json.RawMessage) (store.Interface, error) {
var config Config
if err := json.Unmarshal([]byte(data), &config); err != nil {
@@ -37,13 +33,28 @@ func (Factory) Build(ctx context.Context, data json.RawMessage) (store.Interface
return nil, fmt.Errorf("%w: %w", store.ErrBadConfig, err)
}
if config.Bucket == "" {
config.Bucket = "anubis"
}
bdb, err := bbolt.Open(config.Path, 0600, nil)
if err != nil {
return nil, fmt.Errorf("can't open bbolt database %s: %w", config.Path, err)
}
if err := bdb.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucketIfNotExists([]byte(config.Bucket)); err != nil {
return err
}
return nil
}); err != nil {
return nil, fmt.Errorf("can't create bbolt bucket %q: %w", config.Bucket, err)
}
result := &Store{
bdb: bdb,
bdb: bdb,
bucket: []byte(config.Bucket),
}
go result.cleanupThread(ctx)
@@ -51,8 +62,6 @@ func (Factory) Build(ctx context.Context, data json.RawMessage) (store.Interface
return result, nil
}
// Valid parses and validates the bbolt store Config or returns
// an error.
func (Factory) Valid(data json.RawMessage) error {
var config Config
if err := json.Unmarshal([]byte(data), &config); err != nil {
@@ -66,13 +75,11 @@ func (Factory) Valid(data json.RawMessage) error {
return nil
}
// Config is the bbolt storage backend configuration.
type Config struct {
// Path is the filesystem path of the database. The folder must be writable to Anubis.
Path string `json:"path"`
Path string `json:"path"`
Bucket string `json:"bucket,omitempty"`
}
// Valid validates the configuration including checking if its containing folder is writable.
func (c Config) Valid() error {
var errs []error
@@ -83,7 +90,6 @@ func (c Config) Valid() error {
if err := os.WriteFile(filepath.Join(dir, ".test-file"), []byte(""), 0600); err != nil {
errs = append(errs, ErrCantWriteToPath)
}
os.Remove(filepath.Join(dir, ".test-file"))
}
if len(errs) != 0 {

View File

@@ -3,6 +3,7 @@ package bbolt
import (
"encoding/json"
"errors"
"path/filepath"
"testing"
)
@@ -26,6 +27,13 @@ func TestFactoryValid(t *testing.T) {
cfg: Config{},
err: ErrMissingPath,
},
{
name: "unwritable folder",
cfg: Config{
Path: filepath.Join("/", "testdb"),
},
err: ErrCantWriteToPath,
},
} {
t.Run(tt.name, func(t *testing.T) {
data, err := json.Marshal(tt.cfg)

View File

@@ -38,9 +38,7 @@ func Common(t *testing.T, f store.Factory, config json.RawMessage) {
val, err := s.Get(t.Context(), t.Name())
if errors.Is(err, store.ErrNotFound) {
t.Errorf("wanted %s to exist in store but it does not: %v", t.Name(), err)
} else if err != nil {
t.Error(err)
t.Errorf("wanted %s to exist in store but it does not", t.Name())
}
if !bytes.Equal(val, []byte(t.Name())) {