Compare commits
120 Commits
Xe/minimiz
...
Xe/express
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b6c361c294 | ||
|
|
e9e602976f | ||
|
|
33bb5803a8 | ||
|
|
ada7b3a179 | ||
|
|
dfa7025afe | ||
|
|
884af5fd4c | ||
|
|
3fb8fa2009 | ||
|
|
b43df36f7d | ||
|
|
fd058964fa | ||
|
|
fb20b36b18 | ||
|
|
84cba05167 | ||
|
|
9f988578a4 | ||
|
|
ea4e5751ab | ||
|
|
4e1db3842e | ||
|
|
029c79ba28 | ||
|
|
9f8ede7fe3 | ||
|
|
80bd7c563b | ||
|
|
92a3e5ba81 | ||
|
|
65cbc6922c | ||
|
|
eae3a7b5e4 | ||
|
|
6daf08216e | ||
|
|
bd0e46dac3 | ||
|
|
76514f9f32 | ||
|
|
b0f0913ea2 | ||
|
|
5423ab013a | ||
|
|
301c7a42bd | ||
|
|
755c18a9a7 | ||
|
|
0fa9906e3a | ||
|
|
b08580ca33 | ||
|
|
d8f923974e | ||
|
|
ef52550e70 | ||
|
|
c669b47b57 | ||
|
|
24f8ba729b | ||
|
|
6858f66a62 | ||
|
|
a5d796c679 | ||
|
|
4d3353fdc5 | ||
|
|
a420db8b8a | ||
|
|
5a4f68d384 | ||
|
|
bac942d2e8 | ||
|
|
9fab74eb8a | ||
|
|
e6a1c5309f | ||
|
|
5c29a66fcc | ||
|
|
b4f9269ae4 | ||
|
|
54cd99c750 | ||
|
|
30b0ba8055 | ||
|
|
ce425a2c21 | ||
|
|
2320ef4014 | ||
|
|
cfbe16f2d0 | ||
|
|
1b206175f8 | ||
|
|
3135abd0ec | ||
|
|
74e11505c6 | ||
|
|
4e2c9de708 | ||
|
|
bec7199ab6 | ||
|
|
78bb67fbf7 | ||
|
|
2db4105479 | ||
|
|
ac5a4bf58d | ||
|
|
3f1ce2d7ac | ||
|
|
84b28760b3 | ||
|
|
9b7bf8ee06 | ||
|
|
1dae43f468 | ||
|
|
a14f917d68 | ||
|
|
2ecb15adac | ||
|
|
d40b5cfdab | ||
|
|
022eb59ff3 | ||
|
|
65b533a014 | ||
|
|
2e3de07719 | ||
|
|
7dc545cfa9 | ||
|
|
1add24b907 | ||
|
|
b15017d097 | ||
|
|
2d22491e8c | ||
|
|
150523b9d3 | ||
|
|
6f652e711c | ||
|
|
75b97eb03d | ||
|
|
f5827721c3 | ||
|
|
a40c5e99fc | ||
|
|
af831f0d7f | ||
|
|
095e18d0c8 | ||
|
|
f844dba3dc | ||
|
|
736c3ade09 | ||
|
|
b20774d9a6 | ||
|
|
2c94090fde | ||
|
|
df3509ec99 | ||
|
|
8689143214 | ||
|
|
5d4d2e3e2a | ||
|
|
2ebce26709 | ||
|
|
ac273a8ad5 | ||
|
|
9865e3ded8 | ||
|
|
3438595f32 | ||
|
|
62e20a213a | ||
|
|
f2cb6ae121 | ||
|
|
92dbc22db0 | ||
|
|
971e781965 | ||
|
|
503f466ecf | ||
|
|
81307bcb5c | ||
|
|
40d7b2ec55 | ||
|
|
20f1d40b61 | ||
|
|
51bd058f2d | ||
|
|
1614504922 | ||
|
|
2324395ae2 | ||
|
|
2eef15724b | ||
|
|
acce3604a4 | ||
|
|
0928c3c830 | ||
|
|
77436207e6 | ||
|
|
8adf1a06eb | ||
|
|
df27a96f1f | ||
|
|
f1f8fdf752 | ||
|
|
95416dfe82 | ||
|
|
e58abbe4de | ||
|
|
878b37178d | ||
|
|
a230a58a1d | ||
|
|
0bcc0a2429 | ||
|
|
b14aa6a0c3 | ||
|
|
21a9d77788 | ||
|
|
266d8c0cc2 | ||
|
|
573dfd099f | ||
|
|
515453c607 | ||
|
|
455a9664b4 | ||
|
|
01c2e45843 | ||
|
|
fc237a1690 | ||
|
|
6af7c5891f |
12
.air.toml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
root = "."
|
||||||
|
tmp_dir = "var"
|
||||||
|
|
||||||
|
[build]
|
||||||
|
cmd = "go build -o ./var/main ./cmd/anubis"
|
||||||
|
bin = "./var/main"
|
||||||
|
args = ["--use-remote-address"]
|
||||||
|
exclude_dir = ["var", "vendor", "docs", "node_modules"]
|
||||||
|
|
||||||
|
[logger]
|
||||||
|
time = true
|
||||||
|
# to change flags at runtime, prepend with -- e.g. $ air -- --target http://localhost:3000 --difficulty 20 --use-remote-address
|
||||||
3
.github/FUNDING.yml
vendored
@@ -1 +1,2 @@
|
|||||||
patreon: cadey
|
patreon: cadey
|
||||||
|
github: xe
|
||||||
28
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: github-actions
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
|
groups:
|
||||||
|
github-actions:
|
||||||
|
patterns:
|
||||||
|
- "*"
|
||||||
|
|
||||||
|
- package-ecosystem: gomod
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
|
groups:
|
||||||
|
gomod:
|
||||||
|
patterns:
|
||||||
|
- "*"
|
||||||
|
|
||||||
|
- package-ecosystem: npm
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
|
groups:
|
||||||
|
npm:
|
||||||
|
patterns:
|
||||||
|
- "*"
|
||||||
13
.github/workflows/docker-pr.yml
vendored
@@ -12,19 +12,20 @@ permissions:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
with:
|
with:
|
||||||
fetch-tags: true
|
fetch-tags: true
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Set up Homebrew
|
- name: Set up Homebrew
|
||||||
uses: Homebrew/actions/setup-homebrew@master
|
uses: Homebrew/actions/setup-homebrew@master
|
||||||
|
|
||||||
- name: Setup Homebrew cellar cache
|
- name: Setup Homebrew cellar cache
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
/home/linuxbrew/.linuxbrew/Cellar
|
/home/linuxbrew/.linuxbrew/Cellar
|
||||||
@@ -46,7 +47,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||||
with:
|
with:
|
||||||
images: ghcr.io/techarohq/anubis
|
images: ghcr.io/techarohq/anubis
|
||||||
|
|
||||||
@@ -62,4 +63,6 @@ jobs:
|
|||||||
|
|
||||||
- run: |
|
- run: |
|
||||||
echo "Test this with:"
|
echo "Test this with:"
|
||||||
echo "docker pull ${{ steps.build.outputs.docker_image }}"
|
echo "docker pull ${DOCKER_IMAGE}"
|
||||||
|
env:
|
||||||
|
DOCKER_IMAGE: ${{ steps.build.outputs.docker_image }}
|
||||||
|
|||||||
15
.github/workflows/docker.yml
vendored
@@ -18,19 +18,20 @@ permissions:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
with:
|
with:
|
||||||
fetch-tags: true
|
fetch-tags: true
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Set up Homebrew
|
- name: Set up Homebrew
|
||||||
uses: Homebrew/actions/setup-homebrew@master
|
uses: Homebrew/actions/setup-homebrew@master
|
||||||
|
|
||||||
- name: Setup Homebrew cellar cache
|
- name: Setup Homebrew cellar cache
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
/home/linuxbrew/.linuxbrew/Cellar
|
/home/linuxbrew/.linuxbrew/Cellar
|
||||||
@@ -51,7 +52,7 @@ jobs:
|
|||||||
brew bundle
|
brew bundle
|
||||||
|
|
||||||
- name: Log into registry
|
- name: Log into registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: techarohq
|
username: techarohq
|
||||||
@@ -59,7 +60,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||||
with:
|
with:
|
||||||
images: ghcr.io/techarohq/anubis
|
images: ghcr.io/techarohq/anubis
|
||||||
|
|
||||||
@@ -73,8 +74,8 @@ jobs:
|
|||||||
SLOG_LEVEL: debug
|
SLOG_LEVEL: debug
|
||||||
|
|
||||||
- name: Generate artifact attestation
|
- name: Generate artifact attestation
|
||||||
uses: actions/attest-build-provenance@v2
|
uses: actions/attest-build-provenance@c074443f1aee8d4aeeae555aebba3282517141b2 # v2.2.3
|
||||||
with:
|
with:
|
||||||
subject-name: ghcr.io/techarohq/anubis
|
subject-name: ghcr.io/techarohq/anubis
|
||||||
subject-digest: ${{ steps.build.outputs.digest }}
|
subject-digest: ${{ steps.build.outputs.digest }}
|
||||||
push-to-registry: true
|
push-to-registry: true
|
||||||
|
|||||||
18
.github/workflows/docs-deploy.yml
vendored
@@ -13,16 +13,18 @@ permissions:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||||
|
|
||||||
- name: Log into registry
|
- name: Log into registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: techarohq
|
username: techarohq
|
||||||
@@ -30,13 +32,13 @@ jobs:
|
|||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||||
with:
|
with:
|
||||||
images: ghcr.io/techarohq/anubis/docs
|
images: ghcr.io/techarohq/anubis/docs
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
id: build
|
id: build
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
|
||||||
with:
|
with:
|
||||||
context: ./docs
|
context: ./docs
|
||||||
cache-to: type=gha
|
cache-to: type=gha
|
||||||
@@ -47,14 +49,14 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
|
|
||||||
- name: Apply k8s manifests to aeacus
|
- name: Apply k8s manifests to aeacus
|
||||||
uses: actions-hub/kubectl@master
|
uses: actions-hub/kubectl@e81783053d902f50d752d21a6d99cf9689a652e1 # v1.33.0
|
||||||
env:
|
env:
|
||||||
KUBE_CONFIG: ${{ secrets.AEACUS_KUBECONFIG }}
|
KUBE_CONFIG: ${{ secrets.AEACUS_KUBECONFIG }}
|
||||||
with:
|
with:
|
||||||
args: apply -k docs/manifest
|
args: apply -k docs/manifest
|
||||||
|
|
||||||
- name: Apply k8s manifests to aeacus
|
- name: Apply k8s manifests to aeacus
|
||||||
uses: actions-hub/kubectl@master
|
uses: actions-hub/kubectl@e81783053d902f50d752d21a6d99cf9689a652e1 # v1.33.0
|
||||||
env:
|
env:
|
||||||
KUBE_CONFIG: ${{ secrets.AEACUS_KUBECONFIG }}
|
KUBE_CONFIG: ${{ secrets.AEACUS_KUBECONFIG }}
|
||||||
with:
|
with:
|
||||||
|
|||||||
39
.github/workflows/docs-test.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
name: Docs test build
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [ "main" ]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
actions: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||||
|
with:
|
||||||
|
images: ghcr.io/techarohq/anubis/docs
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
id: build
|
||||||
|
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6.16.0
|
||||||
|
with:
|
||||||
|
context: ./docs
|
||||||
|
cache-to: type=gha
|
||||||
|
cache-from: type=gha
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
platforms: linux/amd64
|
||||||
|
push: false
|
||||||
20
.github/workflows/go.yml
vendored
@@ -13,9 +13,11 @@ permissions:
|
|||||||
jobs:
|
jobs:
|
||||||
go_tests:
|
go_tests:
|
||||||
#runs-on: alrest-techarohq
|
#runs-on: alrest-techarohq
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
- name: build essential
|
- name: build essential
|
||||||
run: |
|
run: |
|
||||||
@@ -26,7 +28,7 @@ jobs:
|
|||||||
uses: Homebrew/actions/setup-homebrew@master
|
uses: Homebrew/actions/setup-homebrew@master
|
||||||
|
|
||||||
- name: Setup Homebrew cellar cache
|
- name: Setup Homebrew cellar cache
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
/home/linuxbrew/.linuxbrew/Cellar
|
/home/linuxbrew/.linuxbrew/Cellar
|
||||||
@@ -47,7 +49,7 @@ jobs:
|
|||||||
brew bundle
|
brew bundle
|
||||||
|
|
||||||
- name: Setup Golang caches
|
- name: Setup Golang caches
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/.cache/go-build
|
~/.cache/go-build
|
||||||
@@ -57,7 +59,7 @@ jobs:
|
|||||||
${{ runner.os }}-golang-
|
${{ runner.os }}-golang-
|
||||||
|
|
||||||
- name: Cache playwright binaries
|
- name: Cache playwright binaries
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
id: playwright-cache
|
id: playwright-cache
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
@@ -66,8 +68,8 @@ jobs:
|
|||||||
|
|
||||||
- name: install playwright browsers
|
- name: install playwright browsers
|
||||||
run: |
|
run: |
|
||||||
npx --yes playwright@1.50.1 install --with-deps
|
npx --yes playwright@1.51.1 install --with-deps
|
||||||
npx --yes playwright@1.50.1 run-server --port 9001 &
|
npx --yes playwright@1.51.1 run-server --port 9001 &
|
||||||
|
|
||||||
- name: install node deps
|
- name: install node deps
|
||||||
run: |
|
run: |
|
||||||
@@ -75,11 +77,11 @@ jobs:
|
|||||||
npm run assets
|
npm run assets
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: go build ./...
|
run: npm run build
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: npm run test
|
run: npm run test
|
||||||
|
|
||||||
- uses: dominikh/staticcheck-action@v1
|
- uses: dominikh/staticcheck-action@fe1dd0c3658873b46f8c9bb3291096a617310ca6 # v1.3.1
|
||||||
with:
|
with:
|
||||||
version: "latest"
|
version: "latest"
|
||||||
|
|||||||
82
.github/workflows/package-builds-stable.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
name: Package builds (stable)
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
actions: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
package_builds:
|
||||||
|
#runs-on: alrest-techarohq
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
fetch-tags: true
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: build essential
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y build-essential
|
||||||
|
|
||||||
|
- name: Set up Homebrew
|
||||||
|
uses: Homebrew/actions/setup-homebrew@master
|
||||||
|
|
||||||
|
- name: Setup Homebrew cellar cache
|
||||||
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/linuxbrew/.linuxbrew/Cellar
|
||||||
|
/home/linuxbrew/.linuxbrew/bin
|
||||||
|
/home/linuxbrew/.linuxbrew/etc
|
||||||
|
/home/linuxbrew/.linuxbrew/include
|
||||||
|
/home/linuxbrew/.linuxbrew/lib
|
||||||
|
/home/linuxbrew/.linuxbrew/opt
|
||||||
|
/home/linuxbrew/.linuxbrew/sbin
|
||||||
|
/home/linuxbrew/.linuxbrew/share
|
||||||
|
/home/linuxbrew/.linuxbrew/var
|
||||||
|
key: ${{ runner.os }}-go-homebrew-cellar-${{ hashFiles('go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-homebrew-cellar-
|
||||||
|
|
||||||
|
- name: Install Brew dependencies
|
||||||
|
run: |
|
||||||
|
brew bundle
|
||||||
|
|
||||||
|
- name: Setup Golang caches
|
||||||
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cache/go-build
|
||||||
|
~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-golang-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-golang-
|
||||||
|
|
||||||
|
- name: install node deps
|
||||||
|
run: |
|
||||||
|
npm ci
|
||||||
|
|
||||||
|
- name: Build Packages
|
||||||
|
run: |
|
||||||
|
wget https://github.com/TecharoHQ/yeet/releases/download/v0.2.1/yeet_0.2.1_amd64.deb -O var/yeet.deb
|
||||||
|
sudo apt -y install -f ./var/yeet.deb
|
||||||
|
rm ./var/yeet.deb
|
||||||
|
yeet
|
||||||
|
|
||||||
|
- name: Upload released artifacts
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ github.TOKEN }}
|
||||||
|
RELEASE_VERSION: ${{github.event.release.tag_name}}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
RELEASE="${RELEASE_VERSION}"
|
||||||
|
cd var
|
||||||
|
for file in *; do
|
||||||
|
gh release upload $RELEASE $file
|
||||||
|
done
|
||||||
77
.github/workflows/package-builds-unstable.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
name: Package builds (unstable)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ "main" ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ "main" ]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
actions: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
package_builds:
|
||||||
|
#runs-on: alrest-techarohq
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
fetch-tags: true
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: build essential
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y build-essential
|
||||||
|
|
||||||
|
- name: Set up Homebrew
|
||||||
|
uses: Homebrew/actions/setup-homebrew@master
|
||||||
|
|
||||||
|
- name: Setup Homebrew cellar cache
|
||||||
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/home/linuxbrew/.linuxbrew/Cellar
|
||||||
|
/home/linuxbrew/.linuxbrew/bin
|
||||||
|
/home/linuxbrew/.linuxbrew/etc
|
||||||
|
/home/linuxbrew/.linuxbrew/include
|
||||||
|
/home/linuxbrew/.linuxbrew/lib
|
||||||
|
/home/linuxbrew/.linuxbrew/opt
|
||||||
|
/home/linuxbrew/.linuxbrew/sbin
|
||||||
|
/home/linuxbrew/.linuxbrew/share
|
||||||
|
/home/linuxbrew/.linuxbrew/var
|
||||||
|
key: ${{ runner.os }}-go-homebrew-cellar-${{ hashFiles('go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-homebrew-cellar-
|
||||||
|
|
||||||
|
- name: Install Brew dependencies
|
||||||
|
run: |
|
||||||
|
brew bundle
|
||||||
|
|
||||||
|
- name: Setup Golang caches
|
||||||
|
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cache/go-build
|
||||||
|
~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-golang-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-golang-
|
||||||
|
|
||||||
|
- name: install node deps
|
||||||
|
run: |
|
||||||
|
npm ci
|
||||||
|
|
||||||
|
- name: Build Packages
|
||||||
|
run: |
|
||||||
|
wget https://github.com/TecharoHQ/yeet/releases/download/v0.2.1/yeet_0.2.1_amd64.deb -O var/yeet.deb
|
||||||
|
sudo apt -y install -f ./var/yeet.deb
|
||||||
|
rm ./var/yeet.deb
|
||||||
|
yeet
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||||
|
with:
|
||||||
|
name: packages
|
||||||
|
path: var/*
|
||||||
35
.github/workflows/zizmor.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
name: zizmor
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/*.ya?ml'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/*.ya?ml'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
zizmor:
|
||||||
|
name: zizmor latest via PyPI
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Install the latest version of uv
|
||||||
|
uses: astral-sh/setup-uv@c7f87aa956e4c323abf06d5dec078e358f6b4d04 # v6.0.0
|
||||||
|
|
||||||
|
- name: Run zizmor 🌈
|
||||||
|
run: uvx zizmor --format sarif . > results.sarif
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Upload SARIF file
|
||||||
|
uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||||
|
with:
|
||||||
|
sarif_file: results.sarif
|
||||||
|
category: zizmor
|
||||||
12
.gitignore
vendored
@@ -2,6 +2,10 @@
|
|||||||
*.deb
|
*.deb
|
||||||
*.rpm
|
*.rpm
|
||||||
|
|
||||||
|
# Additional package locks
|
||||||
|
pnpm-lock.yaml
|
||||||
|
yarn.lock
|
||||||
|
|
||||||
# Go binaries and test artifacts
|
# Go binaries and test artifacts
|
||||||
main
|
main
|
||||||
*.test
|
*.test
|
||||||
@@ -9,4 +13,10 @@ main
|
|||||||
node_modules
|
node_modules
|
||||||
|
|
||||||
# MacOS
|
# MacOS
|
||||||
.DS_store
|
.DS_store
|
||||||
|
|
||||||
|
# Intellij
|
||||||
|
.idea
|
||||||
|
|
||||||
|
# how does this get here
|
||||||
|
doc/VERSION
|
||||||
|
|||||||
31
Makefile
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
VERSION= $(shell cat ./VERSION)
|
||||||
|
GO?= go
|
||||||
|
NPM?= npm
|
||||||
|
|
||||||
|
.PHONY: build assets deps lint prebaked-build test
|
||||||
|
|
||||||
|
all: build
|
||||||
|
|
||||||
|
deps:
|
||||||
|
$(NPM) ci
|
||||||
|
$(GO) mod download
|
||||||
|
|
||||||
|
assets: PATH:=$(PWD)/node_modules/.bin:$(PATH)
|
||||||
|
assets: deps
|
||||||
|
$(GO) generate ./...
|
||||||
|
./web/build.sh
|
||||||
|
./xess/build.sh
|
||||||
|
|
||||||
|
build: assets
|
||||||
|
$(GO) build -o ./var/anubis ./cmd/anubis
|
||||||
|
@echo "Anubis is now built to ./var/anubis"
|
||||||
|
|
||||||
|
lint: assets
|
||||||
|
$(GO) vet ./...
|
||||||
|
$(GO) tool staticcheck ./...
|
||||||
|
|
||||||
|
prebaked-build:
|
||||||
|
$(GO) build -o ./var/anubis -ldflags "-X 'github.com/TecharoHQ/anubis.Version=$(VERSION)'" ./cmd/anubis
|
||||||
|
|
||||||
|
test: assets
|
||||||
|
$(GO) test ./...
|
||||||
34
README.md
@@ -10,11 +10,19 @@
|
|||||||

|

|
||||||

|

|
||||||
|
|
||||||
Anubis [weighs the soul of your connection](https://en.wikipedia.org/wiki/Weighing_of_souls) using a sha256 proof-of-work challenge in order to protect upstream resources from scraper bots.
|
## Sponsors
|
||||||
|
|
||||||
Installing and using this will likely result in your website not being indexed by some search engines. This is considered a feature of Anubis, not a bug.
|
Anubis is brought to you by sponsors and donors like:
|
||||||
|
|
||||||
This is a bit of a nuclear response, but AI scraper bots scraping so aggressively have forced my hand. I hate that I have to do this, but this is what we get for the modern Internet because bots don't conform to standards like robots.txt, even when they claim to.
|
[](https://distrust.co)
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Anubis [weighs the soul of your connection](https://en.wikipedia.org/wiki/Weighing_of_souls) using a proof-of-work challenge in order to protect upstream resources from scraper bots.
|
||||||
|
|
||||||
|
This program is designed to help protect the small internet from the endless storm of requests that flood in from AI companies. Anubis is as lightweight as possible to ensure that everyone can afford to protect the communities closest to them.
|
||||||
|
|
||||||
|
Anubis is a bit of a nuclear response. This will result in your website being blocked from smaller scrapers and may inhibit "good bots" like the Internet Archive. You can configure [bot policy definitions](./docs/docs/admin/policies.mdx) to explicitly allowlist them and we are working on a curated set of "known good" bots to allow for a compromise between discoverability and uptime.
|
||||||
|
|
||||||
In most cases, you should not need this and can probably get by using Cloudflare to protect a given origin. However, for circumstances where you can't or won't use Cloudflare, Anubis is there for you.
|
In most cases, you should not need this and can probably get by using Cloudflare to protect a given origin. However, for circumstances where you can't or won't use Cloudflare, Anubis is there for you.
|
||||||
|
|
||||||
@@ -28,4 +36,22 @@ For live chat, please join the [Patreon](https://patreon.com/cadey) and ask in t
|
|||||||
|
|
||||||
## Star History
|
## Star History
|
||||||
|
|
||||||
[](https://www.star-history.com/#TecharoHQ/anubis&Date)
|
<a href="https://www.star-history.com/#TecharoHQ/anubis&Date">
|
||||||
|
<picture>
|
||||||
|
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date&theme=dark" />
|
||||||
|
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date" />
|
||||||
|
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date" />
|
||||||
|
</picture>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
## Packaging Status
|
||||||
|
|
||||||
|
[](https://repology.org/project/anubis-anti-crawler/versions)
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
<a href="https://github.com/TecharoHQ/anubis/graphs/contributors">
|
||||||
|
<img src="https://contrib.rocks/image?repo=TecharoHQ/anubis" />
|
||||||
|
</a>
|
||||||
|
|
||||||
|
Made with [contrib.rocks](https://contrib.rocks).
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Package Anubis contains the version number of Anubis.
|
// Package anubis contains the version number of Anubis.
|
||||||
package anubis
|
package anubis
|
||||||
|
|
||||||
// Version is the current version of Anubis.
|
// Version is the current version of Anubis.
|
||||||
@@ -11,9 +11,15 @@ var Version = "devel"
|
|||||||
// access.
|
// access.
|
||||||
const CookieName = "within.website-x-cmd-anubis-auth"
|
const CookieName = "within.website-x-cmd-anubis-auth"
|
||||||
|
|
||||||
|
// BasePrefix is a global prefix for all Anubis endpoints. Can be emptied to remove the prefix entirely.
|
||||||
|
var BasePrefix = ""
|
||||||
|
|
||||||
// StaticPath is the location where all static Anubis assets are located.
|
// StaticPath is the location where all static Anubis assets are located.
|
||||||
const StaticPath = "/.within.website/x/cmd/anubis/"
|
const StaticPath = "/.within.website/x/cmd/anubis/"
|
||||||
|
|
||||||
|
// APIPrefix is the location where all Anubis API endpoints are located.
|
||||||
|
const APIPrefix = "/.within.website/x/cmd/anubis/api/"
|
||||||
|
|
||||||
// DefaultDifficulty is the default "difficulty" (number of leading zeroes)
|
// DefaultDifficulty is the default "difficulty" (number of leading zeroes)
|
||||||
// that must be met by the client in order to pass the challenge.
|
// that must be met by the client in order to pass the challenge.
|
||||||
const DefaultDifficulty = 4
|
const DefaultDifficulty = 4
|
||||||
|
|||||||
@@ -5,9 +5,12 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"embed"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
"log"
|
"log"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
@@ -16,7 +19,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"regexp"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -24,15 +27,18 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/TecharoHQ/anubis"
|
"github.com/TecharoHQ/anubis"
|
||||||
|
"github.com/TecharoHQ/anubis/data"
|
||||||
"github.com/TecharoHQ/anubis/internal"
|
"github.com/TecharoHQ/anubis/internal"
|
||||||
libanubis "github.com/TecharoHQ/anubis/lib"
|
libanubis "github.com/TecharoHQ/anubis/lib"
|
||||||
botPolicy "github.com/TecharoHQ/anubis/lib/policy"
|
botPolicy "github.com/TecharoHQ/anubis/lib/policy"
|
||||||
"github.com/TecharoHQ/anubis/lib/policy/config"
|
"github.com/TecharoHQ/anubis/lib/policy/config"
|
||||||
|
"github.com/TecharoHQ/anubis/web"
|
||||||
"github.com/facebookgo/flagenv"
|
"github.com/facebookgo/flagenv"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
basePrefix = flag.String("base-prefix", "", "base prefix (root URL) the application is served under e.g. /myapp")
|
||||||
bind = flag.String("bind", ":8923", "network address to bind HTTP to")
|
bind = flag.String("bind", ":8923", "network address to bind HTTP to")
|
||||||
bindNetwork = flag.String("bind-network", "tcp", "network family to bind HTTP to, e.g. unix, tcp")
|
bindNetwork = flag.String("bind-network", "tcp", "network family to bind HTTP to, e.g. unix, tcp")
|
||||||
challengeDifficulty = flag.Int("difficulty", anubis.DefaultDifficulty, "difficulty of the challenge")
|
challengeDifficulty = flag.Int("difficulty", anubis.DefaultDifficulty, "difficulty of the challenge")
|
||||||
@@ -45,11 +51,16 @@ var (
|
|||||||
socketMode = flag.String("socket-mode", "0770", "socket mode (permissions) for unix domain sockets.")
|
socketMode = flag.String("socket-mode", "0770", "socket mode (permissions) for unix domain sockets.")
|
||||||
robotsTxt = flag.Bool("serve-robots-txt", false, "serve a robots.txt file that disallows all robots")
|
robotsTxt = flag.Bool("serve-robots-txt", false, "serve a robots.txt file that disallows all robots")
|
||||||
policyFname = flag.String("policy-fname", "", "full path to anubis policy document (defaults to a sensible built-in policy)")
|
policyFname = flag.String("policy-fname", "", "full path to anubis policy document (defaults to a sensible built-in policy)")
|
||||||
|
redirectDomains = flag.String("redirect-domains", "", "list of domains separated by commas which anubis is allowed to redirect to. Leaving this unset allows any domain.")
|
||||||
slogLevel = flag.String("slog-level", "INFO", "logging level (see https://pkg.go.dev/log/slog#hdr-Levels)")
|
slogLevel = flag.String("slog-level", "INFO", "logging level (see https://pkg.go.dev/log/slog#hdr-Levels)")
|
||||||
target = flag.String("target", "http://localhost:3923", "target to reverse proxy to")
|
target = flag.String("target", "http://localhost:3923", "target to reverse proxy to, set to an empty string to disable proxying when only using auth request")
|
||||||
healthcheck = flag.Bool("healthcheck", false, "run a health check against Anubis")
|
healthcheck = flag.Bool("healthcheck", false, "run a health check against Anubis")
|
||||||
useRemoteAddress = flag.Bool("use-remote-address", false, "read the client's IP address from the network request, useful for debugging and running Anubis on bare metal")
|
useRemoteAddress = flag.Bool("use-remote-address", false, "read the client's IP address from the network request, useful for debugging and running Anubis on bare metal")
|
||||||
debugBenchmarkJS = flag.Bool("debug-benchmark-js", false, "respond to every request with a challenge for benchmarking hashrate")
|
debugBenchmarkJS = flag.Bool("debug-benchmark-js", false, "respond to every request with a challenge for benchmarking hashrate")
|
||||||
|
ogPassthrough = flag.Bool("og-passthrough", false, "enable Open Graph tag passthrough")
|
||||||
|
ogTimeToLive = flag.Duration("og-expiry-time", 24*time.Hour, "Open Graph tag cache expiration time")
|
||||||
|
extractResources = flag.String("extract-resources", "", "if set, extract the static resources to the specified folder")
|
||||||
|
webmasterEmail = flag.String("webmaster-email", "", "if set, displays webmaster's email on the reject page for appeals")
|
||||||
)
|
)
|
||||||
|
|
||||||
func keyFromHex(value string) (ed25519.PrivateKey, error) {
|
func keyFromHex(value string) (ed25519.PrivateKey, error) {
|
||||||
@@ -66,7 +77,7 @@ func keyFromHex(value string) (ed25519.PrivateKey, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func doHealthCheck() error {
|
func doHealthCheck() error {
|
||||||
resp, err := http.Get("http://localhost" + *metricsBind + "/metrics")
|
resp, err := http.Get("http://localhost" + *metricsBind + anubis.BasePrefix + "/metrics")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to fetch metrics: %w", err)
|
return fmt.Errorf("failed to fetch metrics: %w", err)
|
||||||
}
|
}
|
||||||
@@ -109,7 +120,10 @@ func setupListener(network string, address string) (net.Listener, string) {
|
|||||||
|
|
||||||
err = os.Chmod(address, os.FileMode(mode))
|
err = os.Chmod(address, os.FileMode(mode))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
listener.Close()
|
err := listener.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("failed to close listener: %v", err)
|
||||||
|
}
|
||||||
log.Fatal(fmt.Errorf("could not change socket mode: %w", err))
|
log.Fatal(fmt.Errorf("could not change socket mode: %w", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -118,7 +132,7 @@ func setupListener(network string, address string) (net.Listener, string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func makeReverseProxy(target string) (http.Handler, error) {
|
func makeReverseProxy(target string) (http.Handler, error) {
|
||||||
u, err := url.Parse(target)
|
targetUri, err := url.Parse(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse target URL: %w", err)
|
return nil, fmt.Errorf("failed to parse target URL: %w", err)
|
||||||
}
|
}
|
||||||
@@ -126,10 +140,10 @@ func makeReverseProxy(target string) (http.Handler, error) {
|
|||||||
transport := http.DefaultTransport.(*http.Transport).Clone()
|
transport := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
|
||||||
// https://github.com/oauth2-proxy/oauth2-proxy/blob/4e2100a2879ef06aea1411790327019c1a09217c/pkg/upstream/http.go#L124
|
// https://github.com/oauth2-proxy/oauth2-proxy/blob/4e2100a2879ef06aea1411790327019c1a09217c/pkg/upstream/http.go#L124
|
||||||
if u.Scheme == "unix" {
|
if targetUri.Scheme == "unix" {
|
||||||
// clean path up so we don't use the socket path in proxied requests
|
// clean path up so we don't use the socket path in proxied requests
|
||||||
addr := u.Path
|
addr := targetUri.Path
|
||||||
u.Path = ""
|
targetUri.Path = ""
|
||||||
// tell transport how to dial unix sockets
|
// tell transport how to dial unix sockets
|
||||||
transport.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
|
transport.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
|
||||||
dialer := net.Dialer{}
|
dialer := net.Dialer{}
|
||||||
@@ -139,7 +153,7 @@ func makeReverseProxy(target string) (http.Handler, error) {
|
|||||||
transport.RegisterProtocol("unix", libanubis.UnixRoundTripper{Transport: transport})
|
transport.RegisterProtocol("unix", libanubis.UnixRoundTripper{Transport: transport})
|
||||||
}
|
}
|
||||||
|
|
||||||
rp := httputil.NewSingleHostReverseProxy(u)
|
rp := httputil.NewSingleHostReverseProxy(targetUri)
|
||||||
rp.Transport = transport
|
rp.Transport = transport
|
||||||
|
|
||||||
return rp, nil
|
return rp, nil
|
||||||
@@ -165,16 +179,25 @@ func main() {
|
|||||||
|
|
||||||
internal.InitSlog(*slogLevel)
|
internal.InitSlog(*slogLevel)
|
||||||
|
|
||||||
if *healthcheck {
|
if *extractResources != "" {
|
||||||
if err := doHealthCheck(); err != nil {
|
if err := extractEmbedFS(data.BotPolicies, ".", *extractResources); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if err := extractEmbedFS(web.Static, "static", *extractResources); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Extracted embedded static files to %s\n", *extractResources)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rp, err := makeReverseProxy(*target)
|
var rp http.Handler
|
||||||
if err != nil {
|
// when using anubis via Systemd and environment variables, then it is not possible to set targe to an empty string but only to space
|
||||||
log.Fatalf("can't make reverse proxy: %v", err)
|
if strings.TrimSpace(*target) != "" {
|
||||||
|
var err error
|
||||||
|
rp, err = makeReverseProxy(*target)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("can't make reverse proxy: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
policy, err := libanubis.LoadPoliciesOrDefault(*policyFname, *challengeDifficulty)
|
policy, err := libanubis.LoadPoliciesOrDefault(*policyFname, *challengeDifficulty)
|
||||||
@@ -188,24 +211,24 @@ func main() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
hash, err := rule.Hash()
|
hash := rule.Hash()
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("can't calculate checksum of rule %s: %v", rule.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("* %s: %s\n", rule.Name, hash)
|
fmt.Printf("* %s: %s\n", rule.Name, hash)
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// replace the bot policy rules with a single rule that always benchmarks
|
// replace the bot policy rules with a single rule that always benchmarks
|
||||||
if *debugBenchmarkJS {
|
if *debugBenchmarkJS {
|
||||||
userAgent := regexp.MustCompile(".")
|
|
||||||
policy.Bots = []botPolicy.Bot{{
|
policy.Bots = []botPolicy.Bot{{
|
||||||
Name: "",
|
Name: "",
|
||||||
UserAgent: userAgent,
|
Rules: botPolicy.NewHeaderExistsChecker("User-Agent"),
|
||||||
Action: config.RuleBenchmark,
|
Action: config.RuleBenchmark,
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
if *basePrefix != "" && !strings.HasPrefix(*basePrefix, "/") {
|
||||||
|
log.Fatalf("[misconfiguration] base-prefix must start with a slash, eg: /%s", *basePrefix)
|
||||||
|
} else if strings.HasSuffix(*basePrefix, "/") {
|
||||||
|
log.Fatalf("[misconfiguration] base-prefix must not end with a slash")
|
||||||
|
}
|
||||||
|
|
||||||
var priv ed25519.PrivateKey
|
var priv ed25519.PrivateKey
|
||||||
if *ed25519PrivateKeyHex != "" && *ed25519PrivateKeyHexFile != "" {
|
if *ed25519PrivateKeyHex != "" && *ed25519PrivateKeyHexFile != "" {
|
||||||
@@ -216,12 +239,12 @@ func main() {
|
|||||||
log.Fatalf("failed to parse and validate ED25519_PRIVATE_KEY_HEX: %v", err)
|
log.Fatalf("failed to parse and validate ED25519_PRIVATE_KEY_HEX: %v", err)
|
||||||
}
|
}
|
||||||
} else if *ed25519PrivateKeyHexFile != "" {
|
} else if *ed25519PrivateKeyHexFile != "" {
|
||||||
hex, err := os.ReadFile(*ed25519PrivateKeyHexFile)
|
hexFile, err := os.ReadFile(*ed25519PrivateKeyHexFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to read ED25519_PRIVATE_KEY_HEX_FILE %s: %v", *ed25519PrivateKeyHexFile, err)
|
log.Fatalf("failed to read ED25519_PRIVATE_KEY_HEX_FILE %s: %v", *ed25519PrivateKeyHexFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
priv, err = keyFromHex(string(bytes.TrimSpace(hex)))
|
priv, err = keyFromHex(string(bytes.TrimSpace(hexFile)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to parse and validate content of ED25519_PRIVATE_KEY_HEX_FILE: %v", err)
|
log.Fatalf("failed to parse and validate content of ED25519_PRIVATE_KEY_HEX_FILE: %v", err)
|
||||||
}
|
}
|
||||||
@@ -234,13 +257,33 @@ func main() {
|
|||||||
slog.Warn("generating random key, Anubis will have strange behavior when multiple instances are behind the same load balancer target, for more information: see https://anubis.techaro.lol/docs/admin/installation#key-generation")
|
slog.Warn("generating random key, Anubis will have strange behavior when multiple instances are behind the same load balancer target, for more information: see https://anubis.techaro.lol/docs/admin/installation#key-generation")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var redirectDomainsList []string
|
||||||
|
if *redirectDomains != "" {
|
||||||
|
domains := strings.Split(*redirectDomains, ",")
|
||||||
|
for _, domain := range domains {
|
||||||
|
_, err = url.Parse(domain)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("cannot parse redirect-domain %q: %s", domain, err.Error())
|
||||||
|
}
|
||||||
|
redirectDomainsList = append(redirectDomainsList, strings.TrimSpace(domain))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
slog.Warn("REDIRECT_DOMAINS is not set, Anubis will only redirect to the same domain a request is coming from, see https://anubis.techaro.lol/docs/admin/configuration/redirect-domains")
|
||||||
|
}
|
||||||
|
|
||||||
s, err := libanubis.New(libanubis.Options{
|
s, err := libanubis.New(libanubis.Options{
|
||||||
|
BasePrefix: *basePrefix,
|
||||||
Next: rp,
|
Next: rp,
|
||||||
Policy: policy,
|
Policy: policy,
|
||||||
ServeRobotsTXT: *robotsTxt,
|
ServeRobotsTXT: *robotsTxt,
|
||||||
PrivateKey: priv,
|
PrivateKey: priv,
|
||||||
CookieDomain: *cookieDomain,
|
CookieDomain: *cookieDomain,
|
||||||
CookiePartitioned: *cookiePartitioned,
|
CookiePartitioned: *cookiePartitioned,
|
||||||
|
OGPassthrough: *ogPassthrough,
|
||||||
|
OGTimeToLive: *ogTimeToLive,
|
||||||
|
RedirectDomains: redirectDomainsList,
|
||||||
|
Target: *target,
|
||||||
|
WebmasterEmail: *webmasterEmail,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("can't construct libanubis.Server: %v", err)
|
log.Fatalf("can't construct libanubis.Server: %v", err)
|
||||||
@@ -255,13 +298,13 @@ func main() {
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go metricsServer(ctx, wg.Done)
|
go metricsServer(ctx, wg.Done)
|
||||||
}
|
}
|
||||||
|
|
||||||
go startDecayMapCleanup(ctx, s)
|
go startDecayMapCleanup(ctx, s)
|
||||||
|
|
||||||
var h http.Handler
|
var h http.Handler
|
||||||
h = s
|
h = s
|
||||||
h = internal.RemoteXRealIP(*useRemoteAddress, *bindNetwork, h)
|
h = internal.RemoteXRealIP(*useRemoteAddress, *bindNetwork, h)
|
||||||
h = internal.XForwardedForToXRealIP(h)
|
h = internal.XForwardedForToXRealIP(h)
|
||||||
|
h = internal.XForwardedForUpdate(h)
|
||||||
|
|
||||||
srv := http.Server{Handler: h}
|
srv := http.Server{Handler: h}
|
||||||
listener, listenerUrl := setupListener(*bindNetwork, *bind)
|
listener, listenerUrl := setupListener(*bindNetwork, *bind)
|
||||||
@@ -274,6 +317,9 @@ func main() {
|
|||||||
"version", anubis.Version,
|
"version", anubis.Version,
|
||||||
"use-remote-address", *useRemoteAddress,
|
"use-remote-address", *useRemoteAddress,
|
||||||
"debug-benchmark-js", *debugBenchmarkJS,
|
"debug-benchmark-js", *debugBenchmarkJS,
|
||||||
|
"og-passthrough", *ogPassthrough,
|
||||||
|
"og-expiry-time", *ogTimeToLive,
|
||||||
|
"base-prefix", *basePrefix,
|
||||||
)
|
)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
@@ -285,7 +331,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := srv.Serve(listener); err != http.ErrServerClosed {
|
if err := srv.Serve(listener); !errors.Is(err, http.ErrServerClosed) {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
@@ -295,11 +341,19 @@ func metricsServer(ctx context.Context, done func()) {
|
|||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
mux.Handle("/metrics", promhttp.Handler())
|
mux.Handle(anubis.BasePrefix+"/metrics", promhttp.Handler())
|
||||||
|
|
||||||
srv := http.Server{Handler: mux}
|
srv := http.Server{Handler: mux}
|
||||||
listener, url := setupListener(*metricsBindNetwork, *metricsBind)
|
listener, metricsUrl := setupListener(*metricsBindNetwork, *metricsBind)
|
||||||
slog.Debug("listening for metrics", "url", url)
|
slog.Debug("listening for metrics", "url", metricsUrl)
|
||||||
|
|
||||||
|
if *healthcheck {
|
||||||
|
log.Println("running healthcheck")
|
||||||
|
if err := doHealthCheck(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
@@ -310,7 +364,33 @@ func metricsServer(ctx context.Context, done func()) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := srv.Serve(listener); err != http.ErrServerClosed {
|
if err := srv.Serve(listener); !errors.Is(err, http.ErrServerClosed) {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func extractEmbedFS(fsys embed.FS, root string, destDir string) error {
|
||||||
|
return fs.WalkDir(fsys, root, func(path string, d fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
relPath, err := filepath.Rel(root, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
destPath := filepath.Join(destDir, root, relPath)
|
||||||
|
|
||||||
|
if d.IsDir() {
|
||||||
|
return os.MkdirAll(destPath, 0o700)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := fs.ReadFile(fsys, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.WriteFile(destPath, data, 0o644)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -123,15 +123,15 @@ func parseImageList(imageList string) ([]image, error) {
|
|||||||
// reg.xeiaso.net/techaro/anubis:latest
|
// reg.xeiaso.net/techaro/anubis:latest
|
||||||
// repository: reg.xeiaso.net/techaro/anubis
|
// repository: reg.xeiaso.net/techaro/anubis
|
||||||
// tag: latest
|
// tag: latest
|
||||||
parts := strings.SplitN(img, ":", 2)
|
index := strings.LastIndex(img, ":")
|
||||||
result = append(result, image{
|
result = append(result, image{
|
||||||
repository: parts[0],
|
repository: img[:index],
|
||||||
tag: parts[1],
|
tag: img[index+1:],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(result) == 0 {
|
if len(result) == 0 {
|
||||||
return nil, fmt.Errorf("no images provided, bad flags??")
|
return nil, fmt.Errorf("no images provided, bad flags")
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
|
|||||||
6
data/apps/allow-api-routes.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
- name: allow-api-routes
|
||||||
|
action: ALLOW
|
||||||
|
expression:
|
||||||
|
all:
|
||||||
|
- '!(method == "HEAD" || method == "GET")'
|
||||||
|
- path.startsWith("/api/")
|
||||||
7
data/apps/gitea-rss-feeds.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# By Aibrew: https://github.com/TecharoHQ/anubis/discussions/261#discussioncomment-12821065
|
||||||
|
- name: gitea-feed-atom
|
||||||
|
action: ALLOW
|
||||||
|
path_regex: ^/[.A-Za-z0-9_-]{1,256}?[./A-Za-z0-9_-]*\.atom$
|
||||||
|
- name: gitea-feed-rss
|
||||||
|
action: ALLOW
|
||||||
|
path_regex: ^/[.A-Za-z0-9_-]{1,256}?[./A-Za-z0-9_-]*\.rss$
|
||||||
@@ -1,398 +1,55 @@
|
|||||||
{
|
{
|
||||||
"bots": [
|
"bots": [
|
||||||
{
|
{
|
||||||
"name": "amazonbot",
|
"import": "(data)/bots/ai-robots-txt.yaml"
|
||||||
"user_agent_regex": "Amazonbot",
|
|
||||||
"action": "DENY"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "googlebot",
|
"import": "(data)/bots/cloudflare-workers.yaml"
|
||||||
"user_agent_regex": "\\+http\\://www\\.google\\.com/bot\\.html",
|
|
||||||
"action": "ALLOW",
|
|
||||||
"remote_addresses": [
|
|
||||||
"2001:4860:4801:10::/64",
|
|
||||||
"2001:4860:4801:11::/64",
|
|
||||||
"2001:4860:4801:12::/64",
|
|
||||||
"2001:4860:4801:13::/64",
|
|
||||||
"2001:4860:4801:14::/64",
|
|
||||||
"2001:4860:4801:15::/64",
|
|
||||||
"2001:4860:4801:16::/64",
|
|
||||||
"2001:4860:4801:17::/64",
|
|
||||||
"2001:4860:4801:18::/64",
|
|
||||||
"2001:4860:4801:19::/64",
|
|
||||||
"2001:4860:4801:1a::/64",
|
|
||||||
"2001:4860:4801:1b::/64",
|
|
||||||
"2001:4860:4801:1c::/64",
|
|
||||||
"2001:4860:4801:1d::/64",
|
|
||||||
"2001:4860:4801:1e::/64",
|
|
||||||
"2001:4860:4801:1f::/64",
|
|
||||||
"2001:4860:4801:20::/64",
|
|
||||||
"2001:4860:4801:21::/64",
|
|
||||||
"2001:4860:4801:22::/64",
|
|
||||||
"2001:4860:4801:23::/64",
|
|
||||||
"2001:4860:4801:24::/64",
|
|
||||||
"2001:4860:4801:25::/64",
|
|
||||||
"2001:4860:4801:26::/64",
|
|
||||||
"2001:4860:4801:27::/64",
|
|
||||||
"2001:4860:4801:28::/64",
|
|
||||||
"2001:4860:4801:29::/64",
|
|
||||||
"2001:4860:4801:2::/64",
|
|
||||||
"2001:4860:4801:2a::/64",
|
|
||||||
"2001:4860:4801:2b::/64",
|
|
||||||
"2001:4860:4801:2c::/64",
|
|
||||||
"2001:4860:4801:2d::/64",
|
|
||||||
"2001:4860:4801:2e::/64",
|
|
||||||
"2001:4860:4801:2f::/64",
|
|
||||||
"2001:4860:4801:31::/64",
|
|
||||||
"2001:4860:4801:32::/64",
|
|
||||||
"2001:4860:4801:33::/64",
|
|
||||||
"2001:4860:4801:34::/64",
|
|
||||||
"2001:4860:4801:35::/64",
|
|
||||||
"2001:4860:4801:36::/64",
|
|
||||||
"2001:4860:4801:37::/64",
|
|
||||||
"2001:4860:4801:38::/64",
|
|
||||||
"2001:4860:4801:39::/64",
|
|
||||||
"2001:4860:4801:3a::/64",
|
|
||||||
"2001:4860:4801:3b::/64",
|
|
||||||
"2001:4860:4801:3c::/64",
|
|
||||||
"2001:4860:4801:3d::/64",
|
|
||||||
"2001:4860:4801:3e::/64",
|
|
||||||
"2001:4860:4801:40::/64",
|
|
||||||
"2001:4860:4801:41::/64",
|
|
||||||
"2001:4860:4801:42::/64",
|
|
||||||
"2001:4860:4801:43::/64",
|
|
||||||
"2001:4860:4801:44::/64",
|
|
||||||
"2001:4860:4801:45::/64",
|
|
||||||
"2001:4860:4801:46::/64",
|
|
||||||
"2001:4860:4801:47::/64",
|
|
||||||
"2001:4860:4801:48::/64",
|
|
||||||
"2001:4860:4801:49::/64",
|
|
||||||
"2001:4860:4801:4a::/64",
|
|
||||||
"2001:4860:4801:4b::/64",
|
|
||||||
"2001:4860:4801:4c::/64",
|
|
||||||
"2001:4860:4801:50::/64",
|
|
||||||
"2001:4860:4801:51::/64",
|
|
||||||
"2001:4860:4801:52::/64",
|
|
||||||
"2001:4860:4801:53::/64",
|
|
||||||
"2001:4860:4801:54::/64",
|
|
||||||
"2001:4860:4801:55::/64",
|
|
||||||
"2001:4860:4801:56::/64",
|
|
||||||
"2001:4860:4801:60::/64",
|
|
||||||
"2001:4860:4801:61::/64",
|
|
||||||
"2001:4860:4801:62::/64",
|
|
||||||
"2001:4860:4801:63::/64",
|
|
||||||
"2001:4860:4801:64::/64",
|
|
||||||
"2001:4860:4801:65::/64",
|
|
||||||
"2001:4860:4801:66::/64",
|
|
||||||
"2001:4860:4801:67::/64",
|
|
||||||
"2001:4860:4801:68::/64",
|
|
||||||
"2001:4860:4801:69::/64",
|
|
||||||
"2001:4860:4801:6a::/64",
|
|
||||||
"2001:4860:4801:6b::/64",
|
|
||||||
"2001:4860:4801:6c::/64",
|
|
||||||
"2001:4860:4801:6d::/64",
|
|
||||||
"2001:4860:4801:6e::/64",
|
|
||||||
"2001:4860:4801:6f::/64",
|
|
||||||
"2001:4860:4801:70::/64",
|
|
||||||
"2001:4860:4801:71::/64",
|
|
||||||
"2001:4860:4801:72::/64",
|
|
||||||
"2001:4860:4801:73::/64",
|
|
||||||
"2001:4860:4801:74::/64",
|
|
||||||
"2001:4860:4801:75::/64",
|
|
||||||
"2001:4860:4801:76::/64",
|
|
||||||
"2001:4860:4801:77::/64",
|
|
||||||
"2001:4860:4801:78::/64",
|
|
||||||
"2001:4860:4801:79::/64",
|
|
||||||
"2001:4860:4801:80::/64",
|
|
||||||
"2001:4860:4801:81::/64",
|
|
||||||
"2001:4860:4801:82::/64",
|
|
||||||
"2001:4860:4801:83::/64",
|
|
||||||
"2001:4860:4801:84::/64",
|
|
||||||
"2001:4860:4801:85::/64",
|
|
||||||
"2001:4860:4801:86::/64",
|
|
||||||
"2001:4860:4801:87::/64",
|
|
||||||
"2001:4860:4801:88::/64",
|
|
||||||
"2001:4860:4801:90::/64",
|
|
||||||
"2001:4860:4801:91::/64",
|
|
||||||
"2001:4860:4801:92::/64",
|
|
||||||
"2001:4860:4801:93::/64",
|
|
||||||
"2001:4860:4801:94::/64",
|
|
||||||
"2001:4860:4801:95::/64",
|
|
||||||
"2001:4860:4801:96::/64",
|
|
||||||
"2001:4860:4801:a0::/64",
|
|
||||||
"2001:4860:4801:a1::/64",
|
|
||||||
"2001:4860:4801:a2::/64",
|
|
||||||
"2001:4860:4801:a3::/64",
|
|
||||||
"2001:4860:4801:a4::/64",
|
|
||||||
"2001:4860:4801:a5::/64",
|
|
||||||
"2001:4860:4801:c::/64",
|
|
||||||
"2001:4860:4801:f::/64",
|
|
||||||
"192.178.5.0/27",
|
|
||||||
"192.178.6.0/27",
|
|
||||||
"192.178.6.128/27",
|
|
||||||
"192.178.6.160/27",
|
|
||||||
"192.178.6.192/27",
|
|
||||||
"192.178.6.32/27",
|
|
||||||
"192.178.6.64/27",
|
|
||||||
"192.178.6.96/27",
|
|
||||||
"34.100.182.96/28",
|
|
||||||
"34.101.50.144/28",
|
|
||||||
"34.118.254.0/28",
|
|
||||||
"34.118.66.0/28",
|
|
||||||
"34.126.178.96/28",
|
|
||||||
"34.146.150.144/28",
|
|
||||||
"34.147.110.144/28",
|
|
||||||
"34.151.74.144/28",
|
|
||||||
"34.152.50.64/28",
|
|
||||||
"34.154.114.144/28",
|
|
||||||
"34.155.98.32/28",
|
|
||||||
"34.165.18.176/28",
|
|
||||||
"34.175.160.64/28",
|
|
||||||
"34.176.130.16/28",
|
|
||||||
"34.22.85.0/27",
|
|
||||||
"34.64.82.64/28",
|
|
||||||
"34.65.242.112/28",
|
|
||||||
"34.80.50.80/28",
|
|
||||||
"34.88.194.0/28",
|
|
||||||
"34.89.10.80/28",
|
|
||||||
"34.89.198.80/28",
|
|
||||||
"34.96.162.48/28",
|
|
||||||
"35.247.243.240/28",
|
|
||||||
"66.249.64.0/27",
|
|
||||||
"66.249.64.128/27",
|
|
||||||
"66.249.64.160/27",
|
|
||||||
"66.249.64.224/27",
|
|
||||||
"66.249.64.32/27",
|
|
||||||
"66.249.64.64/27",
|
|
||||||
"66.249.64.96/27",
|
|
||||||
"66.249.65.0/27",
|
|
||||||
"66.249.65.128/27",
|
|
||||||
"66.249.65.160/27",
|
|
||||||
"66.249.65.192/27",
|
|
||||||
"66.249.65.224/27",
|
|
||||||
"66.249.65.32/27",
|
|
||||||
"66.249.65.64/27",
|
|
||||||
"66.249.65.96/27",
|
|
||||||
"66.249.66.0/27",
|
|
||||||
"66.249.66.128/27",
|
|
||||||
"66.249.66.160/27",
|
|
||||||
"66.249.66.192/27",
|
|
||||||
"66.249.66.224/27",
|
|
||||||
"66.249.66.32/27",
|
|
||||||
"66.249.66.64/27",
|
|
||||||
"66.249.66.96/27",
|
|
||||||
"66.249.68.0/27",
|
|
||||||
"66.249.68.128/27",
|
|
||||||
"66.249.68.32/27",
|
|
||||||
"66.249.68.64/27",
|
|
||||||
"66.249.68.96/27",
|
|
||||||
"66.249.69.0/27",
|
|
||||||
"66.249.69.128/27",
|
|
||||||
"66.249.69.160/27",
|
|
||||||
"66.249.69.192/27",
|
|
||||||
"66.249.69.224/27",
|
|
||||||
"66.249.69.32/27",
|
|
||||||
"66.249.69.64/27",
|
|
||||||
"66.249.69.96/27",
|
|
||||||
"66.249.70.0/27",
|
|
||||||
"66.249.70.128/27",
|
|
||||||
"66.249.70.160/27",
|
|
||||||
"66.249.70.192/27",
|
|
||||||
"66.249.70.224/27",
|
|
||||||
"66.249.70.32/27",
|
|
||||||
"66.249.70.64/27",
|
|
||||||
"66.249.70.96/27",
|
|
||||||
"66.249.71.0/27",
|
|
||||||
"66.249.71.128/27",
|
|
||||||
"66.249.71.160/27",
|
|
||||||
"66.249.71.192/27",
|
|
||||||
"66.249.71.224/27",
|
|
||||||
"66.249.71.32/27",
|
|
||||||
"66.249.71.64/27",
|
|
||||||
"66.249.71.96/27",
|
|
||||||
"66.249.72.0/27",
|
|
||||||
"66.249.72.128/27",
|
|
||||||
"66.249.72.160/27",
|
|
||||||
"66.249.72.192/27",
|
|
||||||
"66.249.72.224/27",
|
|
||||||
"66.249.72.32/27",
|
|
||||||
"66.249.72.64/27",
|
|
||||||
"66.249.72.96/27",
|
|
||||||
"66.249.73.0/27",
|
|
||||||
"66.249.73.128/27",
|
|
||||||
"66.249.73.160/27",
|
|
||||||
"66.249.73.192/27",
|
|
||||||
"66.249.73.224/27",
|
|
||||||
"66.249.73.32/27",
|
|
||||||
"66.249.73.64/27",
|
|
||||||
"66.249.73.96/27",
|
|
||||||
"66.249.74.0/27",
|
|
||||||
"66.249.74.128/27",
|
|
||||||
"66.249.74.160/27",
|
|
||||||
"66.249.74.192/27",
|
|
||||||
"66.249.74.32/27",
|
|
||||||
"66.249.74.64/27",
|
|
||||||
"66.249.74.96/27",
|
|
||||||
"66.249.75.0/27",
|
|
||||||
"66.249.75.128/27",
|
|
||||||
"66.249.75.160/27",
|
|
||||||
"66.249.75.192/27",
|
|
||||||
"66.249.75.224/27",
|
|
||||||
"66.249.75.32/27",
|
|
||||||
"66.249.75.64/27",
|
|
||||||
"66.249.75.96/27",
|
|
||||||
"66.249.76.0/27",
|
|
||||||
"66.249.76.128/27",
|
|
||||||
"66.249.76.160/27",
|
|
||||||
"66.249.76.192/27",
|
|
||||||
"66.249.76.224/27",
|
|
||||||
"66.249.76.32/27",
|
|
||||||
"66.249.76.64/27",
|
|
||||||
"66.249.76.96/27",
|
|
||||||
"66.249.77.0/27",
|
|
||||||
"66.249.77.128/27",
|
|
||||||
"66.249.77.160/27",
|
|
||||||
"66.249.77.192/27",
|
|
||||||
"66.249.77.224/27",
|
|
||||||
"66.249.77.32/27",
|
|
||||||
"66.249.77.64/27",
|
|
||||||
"66.249.77.96/27",
|
|
||||||
"66.249.78.0/27",
|
|
||||||
"66.249.78.32/27",
|
|
||||||
"66.249.79.0/27",
|
|
||||||
"66.249.79.128/27",
|
|
||||||
"66.249.79.160/27",
|
|
||||||
"66.249.79.192/27",
|
|
||||||
"66.249.79.224/27",
|
|
||||||
"66.249.79.32/27",
|
|
||||||
"66.249.79.64/27",
|
|
||||||
"66.249.79.96/27"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "bingbot",
|
"import": "(data)/bots/headless-browsers.yaml"
|
||||||
"user_agent_regex": "\\+http\\://www\\.bing\\.com/bingbot\\.htm",
|
|
||||||
"action": "ALLOW",
|
|
||||||
"remote_addresses": [
|
|
||||||
"157.55.39.0/24",
|
|
||||||
"207.46.13.0/24",
|
|
||||||
"40.77.167.0/24",
|
|
||||||
"13.66.139.0/24",
|
|
||||||
"13.66.144.0/24",
|
|
||||||
"52.167.144.0/24",
|
|
||||||
"13.67.10.16/28",
|
|
||||||
"13.69.66.240/28",
|
|
||||||
"13.71.172.224/28",
|
|
||||||
"139.217.52.0/28",
|
|
||||||
"191.233.204.224/28",
|
|
||||||
"20.36.108.32/28",
|
|
||||||
"20.43.120.16/28",
|
|
||||||
"40.79.131.208/28",
|
|
||||||
"40.79.186.176/28",
|
|
||||||
"52.231.148.0/28",
|
|
||||||
"20.79.107.240/28",
|
|
||||||
"51.105.67.0/28",
|
|
||||||
"20.125.163.80/28",
|
|
||||||
"40.77.188.0/22",
|
|
||||||
"65.55.210.0/24",
|
|
||||||
"199.30.24.0/23",
|
|
||||||
"40.77.202.0/24",
|
|
||||||
"40.77.139.0/25",
|
|
||||||
"20.74.197.0/28",
|
|
||||||
"20.15.133.160/27",
|
|
||||||
"40.77.177.0/24",
|
|
||||||
"40.77.178.0/23"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "qwantbot",
|
"import": "(data)/bots/us-ai-scraper.yaml"
|
||||||
"user_agent_regex": "\\+https\\://help\\.qwant\\.com/bot/",
|
|
||||||
"action": "ALLOW",
|
|
||||||
"remote_addresses": [
|
|
||||||
"91.242.162.0/24"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "kagibot",
|
"import": "(data)/bots/aggressive-brazilian-scrapers.yaml"
|
||||||
"user_agent_regex": "\\+https\\://kagi\\.com/bot",
|
|
||||||
"action": "ALLOW",
|
|
||||||
"remote_addresses": [
|
|
||||||
"216.18.205.234/32",
|
|
||||||
"35.212.27.76/32",
|
|
||||||
"104.254.65.50/32",
|
|
||||||
"209.151.156.194/32"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "marginalia",
|
"import": "(data)/clients/curl-impersonate.yaml"
|
||||||
"user_agent_regex": "search\\.marginalia\\.nu",
|
|
||||||
"action": "ALLOW",
|
|
||||||
"remote_addresses": [
|
|
||||||
"193.183.0.162/31",
|
|
||||||
"193.183.0.164/30",
|
|
||||||
"193.183.0.168/30",
|
|
||||||
"193.183.0.172/31",
|
|
||||||
"193.183.0.174/32"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "mojeekbot",
|
"import": "(data)/crawlers/googlebot.yaml"
|
||||||
"user_agent_regex": "http\\://www\\.mojeek\\.com/bot\\.html",
|
|
||||||
"action": "ALLOW",
|
|
||||||
"remote_addresses": [
|
|
||||||
"5.102.173.71/32"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "us-artificial-intelligence-scraper",
|
"import": "(data)/crawlers/bingbot.yaml"
|
||||||
"user_agent_regex": "\\+https\\://github\\.com/US-Artificial-Intelligence/scraper",
|
|
||||||
"action": "DENY"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "well-known",
|
"import": "(data)/crawlers/duckduckbot.yaml"
|
||||||
"path_regex": "^/.well-known/.*$",
|
|
||||||
"action": "ALLOW"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "favicon",
|
"import": "(data)/crawlers/qwantbot.yaml"
|
||||||
"path_regex": "^/favicon.ico$",
|
|
||||||
"action": "ALLOW"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "robots-txt",
|
"import": "(data)/crawlers/internet-archive.yaml"
|
||||||
"path_regex": "^/robots.txt$",
|
|
||||||
"action": "ALLOW"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "lightpanda",
|
"import": "(data)/crawlers/kagibot.yaml"
|
||||||
"user_agent_regex": "^Lightpanda/.*$",
|
|
||||||
"action": "DENY"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "headless-chrome",
|
"import": "(data)/crawlers/marginalia.yaml"
|
||||||
"user_agent_regex": "HeadlessChrome",
|
|
||||||
"action": "DENY"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "headless-chromium",
|
"import": "(data)/crawlers/mojeekbot.yaml"
|
||||||
"user_agent_regex": "HeadlessChromium",
|
|
||||||
"action": "DENY"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "generic-bot-catchall",
|
"import": "(data)/common/keep-internet-working.yaml"
|
||||||
"user_agent_regex": "(?i:bot|crawler)",
|
|
||||||
"action": "CHALLENGE",
|
|
||||||
"challenge": {
|
|
||||||
"difficulty": 16,
|
|
||||||
"report_as": 4,
|
|
||||||
"algorithm": "slow"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "generic-browser",
|
"name": "generic-browser",
|
||||||
"user_agent_regex": "Mozilla",
|
"user_agent_regex": "Mozilla|Opera",
|
||||||
"action": "CHALLENGE"
|
"action": "CHALLENGE"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"dnsbl": false
|
"dnsbl": false
|
||||||
}
|
}
|
||||||
|
|||||||
53
data/botPolicies.yaml
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
## Anubis has the ability to let you import snippets of configuration into the main
|
||||||
|
## configuration file. This allows you to break up your config into smaller parts
|
||||||
|
## that get logically assembled into one big file.
|
||||||
|
##
|
||||||
|
## Of note, a bot rule can either have inline bot configuration or import a
|
||||||
|
## bot config snippet. You cannot do both in a single bot rule.
|
||||||
|
##
|
||||||
|
## Import paths can either be prefixed with (data) to import from the common/shared
|
||||||
|
## rules in the data folder in the Anubis source tree or will point to absolute/relative
|
||||||
|
## paths in your filesystem. If you don't have access to the Anubis source tree, check
|
||||||
|
## /usr/share/docs/anubis/data or in the tarball you extracted Anubis from.
|
||||||
|
|
||||||
|
bots:
|
||||||
|
# Pathological bots to deny
|
||||||
|
- # This correlates to data/bots/ai-robots-txt.yaml in the source tree
|
||||||
|
import: (data)/bots/ai-robots-txt.yaml
|
||||||
|
- import: (data)/bots/cloudflare-workers.yaml
|
||||||
|
- import: (data)/bots/headless-browsers.yaml
|
||||||
|
- import: (data)/bots/us-ai-scraper.yaml
|
||||||
|
- import: (data)/bots/aggressive-brazilian-scrapers.yaml
|
||||||
|
- import: (data)/clients/curl-impersonate.yaml
|
||||||
|
|
||||||
|
# Search engines to allow
|
||||||
|
- import: (data)/crawlers/googlebot.yaml
|
||||||
|
- import: (data)/crawlers/bingbot.yaml
|
||||||
|
- import: (data)/crawlers/duckduckbot.yaml
|
||||||
|
- import: (data)/crawlers/qwantbot.yaml
|
||||||
|
- import: (data)/crawlers/internet-archive.yaml
|
||||||
|
- import: (data)/crawlers/kagibot.yaml
|
||||||
|
- import: (data)/crawlers/marginalia.yaml
|
||||||
|
- import: (data)/crawlers/mojeekbot.yaml
|
||||||
|
|
||||||
|
# Allow common "keeping the internet working" routes (well-known, favicon, robots.txt)
|
||||||
|
- import: (data)/common/keep-internet-working.yaml
|
||||||
|
|
||||||
|
# # Punish any bot with "bot" in the user-agent string
|
||||||
|
# # This is known to have a high false-positive rate, use at your own risk
|
||||||
|
# - name: generic-bot-catchall
|
||||||
|
# user_agent_regex: (?i:bot|crawler)
|
||||||
|
# action: CHALLENGE
|
||||||
|
# challenge:
|
||||||
|
# difficulty: 16 # impossible
|
||||||
|
# report_as: 4 # lie to the operator
|
||||||
|
# algorithm: slow # intentionally waste CPU cycles and time
|
||||||
|
|
||||||
|
# Challenge clients with "Mozilla" or "Opera" in their user-agent string
|
||||||
|
#- import: (data)/common/legacy-challenge-everything.yaml
|
||||||
|
|
||||||
|
- name: reject-browsers
|
||||||
|
action: DENY
|
||||||
|
expression: userAgent.isBrowserLike()
|
||||||
|
|
||||||
|
dnsbl: false
|
||||||
28
data/bots/aggressive-brazilian-scrapers.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
- name: deny-aggressive-brazilian-scrapers
|
||||||
|
action: DENY
|
||||||
|
expression:
|
||||||
|
any:
|
||||||
|
# Internet Explorer should be out of support
|
||||||
|
- userAgent.contains("MSIE")
|
||||||
|
# Trident is the Internet Explorer browser engine
|
||||||
|
- userAgent.contains("Trident")
|
||||||
|
# Opera is a fork of chrome now
|
||||||
|
- userAgent.contains("Presto")
|
||||||
|
# Windows CE is discontinued
|
||||||
|
- userAgent.contains("Windows CE")
|
||||||
|
# Windows 95 is discontinued
|
||||||
|
- userAgent.contains("Windows 95")
|
||||||
|
# Windows 98 is discontinued
|
||||||
|
- userAgent.contains("Windows 98")
|
||||||
|
# Windows 9.x is discontinued
|
||||||
|
- userAgent.contains("Win 9x")
|
||||||
|
# Amazon does not have an Alexa Toolbar.
|
||||||
|
- userAgent.contains("Alexa Toolbar")
|
||||||
|
- name: challenge-aggressive-brazilian-scrapers
|
||||||
|
action: CHALLENGE
|
||||||
|
expression:
|
||||||
|
any:
|
||||||
|
# This is not released, even Windows 11 calls itself Windows 10
|
||||||
|
- userAgent.contains("Windows NT 11.0")
|
||||||
|
# iPods are not in common use
|
||||||
|
- userAgent.contains("iPod")
|
||||||
4
data/bots/ai-robots-txt.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
- name: "ai-robots-txt"
|
||||||
|
user_agent_regex: >-
|
||||||
|
AI2Bot|Ai2Bot-Dolma|aiHitBot|Amazonbot|anthropic-ai|Applebot|Applebot-Extended|Brightbot 1.0|Bytespider|CCBot|ChatGPT-User|Claude-Web|ClaudeBot|cohere-ai|cohere-training-data-crawler|Cotoyogi|Crawlspace|Diffbot|DuckAssistBot|FacebookBot|Factset_spyderbot|FirecrawlAgent|FriendlyCrawler|Google-Extended|GoogleOther|GoogleOther-Image|GoogleOther-Video|GPTBot|iaskspider/2.0|ICC-Crawler|ImagesiftBot|img2dataset|imgproxy|ISSCyberRiskCrawler|Kangaroo Bot|meta-externalagent|Meta-ExternalAgent|meta-externalfetcher|Meta-ExternalFetcher|NovaAct|OAI-SearchBot|omgili|omgilibot|Operator|PanguBot|Perplexity-User|PerplexityBot|PetalBot|Scrapy|SemrushBot-OCOB|SemrushBot-SWA|Sidetrade indexer bot|TikTokSpider|Timpibot|VelenPublicWebCrawler|Webzio-Extended|YouBot
|
||||||
|
action: DENY
|
||||||
3
data/bots/cloudflare-workers.yaml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
- name: cloudflare-workers
|
||||||
|
expression: '"Cf-Worker" in headers'
|
||||||
|
action: CHALLENGE
|
||||||
9
data/bots/headless-browsers.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
- name: lightpanda
|
||||||
|
user_agent_regex: ^LightPanda/.*$
|
||||||
|
action: DENY
|
||||||
|
- name: headless-chrome
|
||||||
|
user_agent_regex: HeadlessChrome
|
||||||
|
action: DENY
|
||||||
|
- name: headless-chromium
|
||||||
|
user_agent_regex: HeadlessChromium
|
||||||
|
action: DENY
|
||||||
9
data/bots/irc-bots/archlinux-phrik.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# phrik in the Arch Linux IRC channels
|
||||||
|
- name: archlinux-phrik
|
||||||
|
action: ALLOW
|
||||||
|
expression:
|
||||||
|
all:
|
||||||
|
- remoteAddress == "159.69.213.214"
|
||||||
|
- userAgent == "Mozilla/5.0 (compatible; utils.web Limnoria module)"
|
||||||
|
- '"X-Http-Version" in headers'
|
||||||
|
- headers["X-Http-Version"] == "HTTP/1.1"
|
||||||
9
data/bots/irc-bots/gentoo-chat.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# chat in the gentoo IRC channels
|
||||||
|
- name: gentoo-chat
|
||||||
|
action: ALLOW
|
||||||
|
expression:
|
||||||
|
all:
|
||||||
|
- remoteAddress == "45.76.166.57"
|
||||||
|
- userAgent == "Mozilla/5.0 (Linux x86_64; rv:76.0) Gecko/20100101 Firefox/76.0"
|
||||||
|
- '"X-Http-Version" in headers'
|
||||||
|
- headers["X-Http-Version"] == "HTTP/1.1"
|
||||||
3
data/bots/us-ai-scraper.yaml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
- name: us-artificial-intelligence-scraper
|
||||||
|
user_agent_regex: \+https\://github\.com/US-Artificial-Intelligence/scraper
|
||||||
|
action: DENY
|
||||||
32
data/clients/curl-impersonate.yaml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
- name: curl-impersonate
|
||||||
|
action: CHALLENGE
|
||||||
|
expression:
|
||||||
|
any:
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '" Not A;Brand";v="99", "Chromium";v="101", "Google Chrome";v="101"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Chromium";v="104", " Not A;Brand";v="99", "Google Chrome";v="104"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Google Chrome";v="107", "Chromium";v="107", "Not=A?Brand";v="24"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Chromium";v="110", "Not A(Brand";v="24", "Google Chrome";v="110"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '" Not A;Brand";v="99", "Chromium";v="101", "Microsoft Edge";v="101"'
|
||||||
|
- >
|
||||||
|
"Sec-Ch-Ua" in headers && headers["Sec-Ch-Ua"] == '" Not A;Brand";v="99", "Chromium";v="99", "Microsoft Edge";v="99"'
|
||||||
14
data/clients/git.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
- name: allow-git-clients
|
||||||
|
action: ALLOW
|
||||||
|
expression:
|
||||||
|
all:
|
||||||
|
- >
|
||||||
|
(
|
||||||
|
userAgent.startsWith("git/") ||
|
||||||
|
userAgent.contains("libgit") ||
|
||||||
|
userAgent.startsWith("go-git") ||
|
||||||
|
userAgent.startsWith("JGit/") ||
|
||||||
|
userAgent.startsWith("JGit-")
|
||||||
|
)
|
||||||
|
- '"Git-Protocol" in headers'
|
||||||
|
- headers["Git-Protocol"] == "version=2"
|
||||||
7
data/clients/go-get.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
- name: go-get
|
||||||
|
action: ALLOW
|
||||||
|
expression:
|
||||||
|
all:
|
||||||
|
- userAgent.startsWith("Go-http-client/")
|
||||||
|
- '"go-get" in query'
|
||||||
|
- query["go-get"] == "1"
|
||||||
6
data/common/allow-api-like.yaml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
- name: allow-api-routes
|
||||||
|
action: ALLOW
|
||||||
|
expression:
|
||||||
|
all:
|
||||||
|
- '!(method == "HEAD" || method == "GET")'
|
||||||
|
- path.startsWith("/api/")
|
||||||
15
data/common/allow-private-addresses.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
- name: ipv4-rfc-1918
|
||||||
|
action: ALLOW
|
||||||
|
remote_addresses:
|
||||||
|
- 10.0.0.0/8
|
||||||
|
- 172.16.0.0/12
|
||||||
|
- 192.168.0.0/16
|
||||||
|
- 100.64.0.0/10
|
||||||
|
- name: ipv6-ula
|
||||||
|
action: ALLOW
|
||||||
|
remote_addresses:
|
||||||
|
- fc00::/7
|
||||||
|
- name: ipv6-link-local
|
||||||
|
action: ALLOW
|
||||||
|
remote_addresses:
|
||||||
|
- fe80::/10
|
||||||
10
data/common/challenge-browser-like.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
# Challenge anything with HTTP/1.1 that claims to be a browser
|
||||||
|
- name: challenge-lies-browser-but-http-1.1
|
||||||
|
action: CHALLENGE
|
||||||
|
expression:
|
||||||
|
all:
|
||||||
|
- '"X-Http-Version" in headers'
|
||||||
|
- headers["X-Http-Version"] == "HTTP/1.1"
|
||||||
|
- '"X-Forwarded-Proto" in headers'
|
||||||
|
- headers["X-Forwarded-Proto"] == "https"
|
||||||
|
- userAgent.isBrowserLike()
|
||||||
13
data/common/keep-internet-working.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# Common "keeping the internet working" routes
|
||||||
|
- name: well-known
|
||||||
|
path_regex: ^/.well-known/.*$
|
||||||
|
action: ALLOW
|
||||||
|
- name: favicon
|
||||||
|
path_regex: ^/favicon.ico$
|
||||||
|
action: ALLOW
|
||||||
|
- name: robots-txt
|
||||||
|
path_regex: ^/robots.txt$
|
||||||
|
action: ALLOW
|
||||||
|
- name: sitemap
|
||||||
|
path_regex: ^/sitemap.xml$
|
||||||
|
action: ALLOW
|
||||||
4
data/common/legacy-challenge-everything.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# Generic catchall rule
|
||||||
|
- name: generic-browser
|
||||||
|
expression: userAgent.isBrowserLike()
|
||||||
|
action: CHALLENGE
|
||||||
3
data/common/rfc-violations.yaml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
- name: no-user-agent-string
|
||||||
|
expression: userAgent == ""
|
||||||
|
action: DENY
|
||||||
34
data/crawlers/bingbot.yaml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
- name: bingbot
|
||||||
|
user_agent_regex: \+http\://www\.bing\.com/bingbot\.htm
|
||||||
|
action: ALLOW
|
||||||
|
# https://www.bing.com/toolbox/bingbot.json
|
||||||
|
remote_addresses: [
|
||||||
|
"157.55.39.0/24",
|
||||||
|
"207.46.13.0/24",
|
||||||
|
"40.77.167.0/24",
|
||||||
|
"13.66.139.0/24",
|
||||||
|
"13.66.144.0/24",
|
||||||
|
"52.167.144.0/24",
|
||||||
|
"13.67.10.16/28",
|
||||||
|
"13.69.66.240/28",
|
||||||
|
"13.71.172.224/28",
|
||||||
|
"139.217.52.0/28",
|
||||||
|
"191.233.204.224/28",
|
||||||
|
"20.36.108.32/28",
|
||||||
|
"20.43.120.16/28",
|
||||||
|
"40.79.131.208/28",
|
||||||
|
"40.79.186.176/28",
|
||||||
|
"52.231.148.0/28",
|
||||||
|
"20.79.107.240/28",
|
||||||
|
"51.105.67.0/28",
|
||||||
|
"20.125.163.80/28",
|
||||||
|
"40.77.188.0/22",
|
||||||
|
"65.55.210.0/24",
|
||||||
|
"199.30.24.0/23",
|
||||||
|
"40.77.202.0/24",
|
||||||
|
"40.77.139.0/25",
|
||||||
|
"20.74.197.0/28",
|
||||||
|
"20.15.133.160/27",
|
||||||
|
"40.77.177.0/24",
|
||||||
|
"40.77.178.0/23"
|
||||||
|
]
|
||||||
275
data/crawlers/duckduckbot.yaml
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
- name: duckduckbot
|
||||||
|
user_agent_regex: DuckDuckBot/1\.1; \(\+http\://duckduckgo\.com/duckduckbot\.html\)
|
||||||
|
action: ALLOW
|
||||||
|
# https://duckduckgo.com/duckduckgo-help-pages/results/duckduckbot
|
||||||
|
remote_addresses: [
|
||||||
|
"57.152.72.128/32",
|
||||||
|
"51.8.253.152/32",
|
||||||
|
"40.80.242.63/32",
|
||||||
|
"20.12.141.99/32",
|
||||||
|
"20.49.136.28/32",
|
||||||
|
"51.116.131.221/32",
|
||||||
|
"51.107.40.209/32",
|
||||||
|
"20.40.133.240/32",
|
||||||
|
"20.50.168.91/32",
|
||||||
|
"51.120.48.122/32",
|
||||||
|
"20.193.45.113/32",
|
||||||
|
"40.76.173.151/32",
|
||||||
|
"40.76.163.7/32",
|
||||||
|
"20.185.79.47/32",
|
||||||
|
"52.142.26.175/32",
|
||||||
|
"20.185.79.15/32",
|
||||||
|
"52.142.24.149/32",
|
||||||
|
"40.76.162.208/32",
|
||||||
|
"40.76.163.23/32",
|
||||||
|
"40.76.162.191/32",
|
||||||
|
"40.76.162.247/32",
|
||||||
|
"40.88.21.235/32",
|
||||||
|
"20.191.45.212/32",
|
||||||
|
"52.146.59.12/32",
|
||||||
|
"52.146.59.156/32",
|
||||||
|
"52.146.59.154/32",
|
||||||
|
"52.146.58.236/32",
|
||||||
|
"20.62.224.44/32",
|
||||||
|
"51.104.180.53/32",
|
||||||
|
"51.104.180.47/32",
|
||||||
|
"51.104.180.26/32",
|
||||||
|
"51.104.146.225/32",
|
||||||
|
"51.104.146.235/32",
|
||||||
|
"20.73.202.147/32",
|
||||||
|
"20.73.132.240/32",
|
||||||
|
"20.71.12.143/32",
|
||||||
|
"20.56.197.58/32",
|
||||||
|
"20.56.197.63/32",
|
||||||
|
"20.43.150.93/32",
|
||||||
|
"20.43.150.85/32",
|
||||||
|
"20.44.222.1/32",
|
||||||
|
"40.89.243.175/32",
|
||||||
|
"13.89.106.77/32",
|
||||||
|
"52.143.242.6/32",
|
||||||
|
"52.143.241.111/32",
|
||||||
|
"52.154.60.82/32",
|
||||||
|
"20.197.209.11/32",
|
||||||
|
"20.197.209.27/32",
|
||||||
|
"20.226.133.105/32",
|
||||||
|
"191.234.216.4/32",
|
||||||
|
"191.234.216.178/32",
|
||||||
|
"20.53.92.211/32",
|
||||||
|
"20.53.91.2/32",
|
||||||
|
"20.207.99.197/32",
|
||||||
|
"20.207.97.190/32",
|
||||||
|
"40.81.250.205/32",
|
||||||
|
"40.64.106.11/32",
|
||||||
|
"40.64.105.247/32",
|
||||||
|
"20.72.242.93/32",
|
||||||
|
"20.99.255.235/32",
|
||||||
|
"20.113.3.121/32",
|
||||||
|
"52.224.16.221/32",
|
||||||
|
"52.224.21.53/32",
|
||||||
|
"52.224.20.204/32",
|
||||||
|
"52.224.21.19/32",
|
||||||
|
"52.224.20.249/32",
|
||||||
|
"52.224.20.203/32",
|
||||||
|
"52.224.20.190/32",
|
||||||
|
"52.224.16.229/32",
|
||||||
|
"52.224.21.20/32",
|
||||||
|
"52.146.63.80/32",
|
||||||
|
"52.224.20.227/32",
|
||||||
|
"52.224.20.193/32",
|
||||||
|
"52.190.37.160/32",
|
||||||
|
"52.224.21.23/32",
|
||||||
|
"52.224.20.223/32",
|
||||||
|
"52.224.20.181/32",
|
||||||
|
"52.224.21.49/32",
|
||||||
|
"52.224.21.55/32",
|
||||||
|
"52.224.21.61/32",
|
||||||
|
"52.224.19.152/32",
|
||||||
|
"52.224.20.186/32",
|
||||||
|
"52.224.21.27/32",
|
||||||
|
"52.224.21.51/32",
|
||||||
|
"52.224.20.174/32",
|
||||||
|
"52.224.21.4/32",
|
||||||
|
"51.104.164.109/32",
|
||||||
|
"51.104.167.71/32",
|
||||||
|
"51.104.160.177/32",
|
||||||
|
"51.104.162.149/32",
|
||||||
|
"51.104.167.95/32",
|
||||||
|
"51.104.167.54/32",
|
||||||
|
"51.104.166.111/32",
|
||||||
|
"51.104.167.88/32",
|
||||||
|
"51.104.161.32/32",
|
||||||
|
"51.104.163.250/32",
|
||||||
|
"51.104.164.189/32",
|
||||||
|
"51.104.167.19/32",
|
||||||
|
"51.104.160.167/32",
|
||||||
|
"51.104.167.110/32",
|
||||||
|
"20.191.44.119/32",
|
||||||
|
"51.104.167.104/32",
|
||||||
|
"20.191.44.234/32",
|
||||||
|
"51.104.164.215/32",
|
||||||
|
"51.104.167.52/32",
|
||||||
|
"20.191.44.22/32",
|
||||||
|
"51.104.167.87/32",
|
||||||
|
"51.104.167.96/32",
|
||||||
|
"20.191.44.16/32",
|
||||||
|
"51.104.167.61/32",
|
||||||
|
"51.104.164.147/32",
|
||||||
|
"20.50.48.159/32",
|
||||||
|
"40.114.182.172/32",
|
||||||
|
"20.50.50.130/32",
|
||||||
|
"20.50.50.163/32",
|
||||||
|
"20.50.50.46/32",
|
||||||
|
"40.114.182.153/32",
|
||||||
|
"20.50.50.118/32",
|
||||||
|
"20.50.49.55/32",
|
||||||
|
"20.50.49.25/32",
|
||||||
|
"40.114.183.251/32",
|
||||||
|
"20.50.50.123/32",
|
||||||
|
"20.50.49.237/32",
|
||||||
|
"20.50.48.192/32",
|
||||||
|
"20.50.50.134/32",
|
||||||
|
"51.138.90.233/32",
|
||||||
|
"40.114.183.196/32",
|
||||||
|
"20.50.50.146/32",
|
||||||
|
"40.114.183.88/32",
|
||||||
|
"20.50.50.145/32",
|
||||||
|
"20.50.50.121/32",
|
||||||
|
"20.50.49.40/32",
|
||||||
|
"51.138.90.206/32",
|
||||||
|
"40.114.182.45/32",
|
||||||
|
"51.138.90.161/32",
|
||||||
|
"20.50.49.0/32",
|
||||||
|
"40.119.232.215/32",
|
||||||
|
"104.43.55.167/32",
|
||||||
|
"40.119.232.251/32",
|
||||||
|
"40.119.232.50/32",
|
||||||
|
"40.119.232.146/32",
|
||||||
|
"40.119.232.218/32",
|
||||||
|
"104.43.54.127/32",
|
||||||
|
"104.43.55.117/32",
|
||||||
|
"104.43.55.116/32",
|
||||||
|
"104.43.55.166/32",
|
||||||
|
"52.154.169.50/32",
|
||||||
|
"52.154.171.70/32",
|
||||||
|
"52.154.170.229/32",
|
||||||
|
"52.154.170.113/32",
|
||||||
|
"52.154.171.44/32",
|
||||||
|
"52.154.172.2/32",
|
||||||
|
"52.143.244.81/32",
|
||||||
|
"52.154.171.87/32",
|
||||||
|
"52.154.171.250/32",
|
||||||
|
"52.154.170.28/32",
|
||||||
|
"52.154.170.122/32",
|
||||||
|
"52.143.243.117/32",
|
||||||
|
"52.143.247.235/32",
|
||||||
|
"52.154.171.235/32",
|
||||||
|
"52.154.171.196/32",
|
||||||
|
"52.154.171.0/32",
|
||||||
|
"52.154.170.243/32",
|
||||||
|
"52.154.170.26/32",
|
||||||
|
"52.154.169.200/32",
|
||||||
|
"52.154.170.96/32",
|
||||||
|
"52.154.170.88/32",
|
||||||
|
"52.154.171.150/32",
|
||||||
|
"52.154.171.205/32",
|
||||||
|
"52.154.170.117/32",
|
||||||
|
"52.154.170.209/32",
|
||||||
|
"191.235.202.48/32",
|
||||||
|
"191.233.3.202/32",
|
||||||
|
"191.235.201.214/32",
|
||||||
|
"191.233.3.197/32",
|
||||||
|
"191.235.202.38/32",
|
||||||
|
"20.53.78.144/32",
|
||||||
|
"20.193.24.10/32",
|
||||||
|
"20.53.78.236/32",
|
||||||
|
"20.53.78.138/32",
|
||||||
|
"20.53.78.123/32",
|
||||||
|
"20.53.78.106/32",
|
||||||
|
"20.193.27.215/32",
|
||||||
|
"20.193.25.197/32",
|
||||||
|
"20.193.12.126/32",
|
||||||
|
"20.193.24.251/32",
|
||||||
|
"20.204.242.101/32",
|
||||||
|
"20.207.72.113/32",
|
||||||
|
"20.204.242.19/32",
|
||||||
|
"20.219.45.67/32",
|
||||||
|
"20.207.72.11/32",
|
||||||
|
"20.219.45.190/32",
|
||||||
|
"20.204.243.55/32",
|
||||||
|
"20.204.241.148/32",
|
||||||
|
"20.207.72.110/32",
|
||||||
|
"20.204.240.172/32",
|
||||||
|
"20.207.72.21/32",
|
||||||
|
"20.204.246.81/32",
|
||||||
|
"20.207.107.181/32",
|
||||||
|
"20.204.246.254/32",
|
||||||
|
"20.219.43.246/32",
|
||||||
|
"52.149.25.43/32",
|
||||||
|
"52.149.61.51/32",
|
||||||
|
"52.149.58.139/32",
|
||||||
|
"52.149.60.38/32",
|
||||||
|
"52.148.165.38/32",
|
||||||
|
"52.143.95.162/32",
|
||||||
|
"52.149.56.151/32",
|
||||||
|
"52.149.30.45/32",
|
||||||
|
"52.149.58.173/32",
|
||||||
|
"52.143.95.204/32",
|
||||||
|
"52.149.28.83/32",
|
||||||
|
"52.149.58.69/32",
|
||||||
|
"52.148.161.87/32",
|
||||||
|
"52.149.58.27/32",
|
||||||
|
"52.149.28.18/32",
|
||||||
|
"20.79.226.26/32",
|
||||||
|
"20.79.239.66/32",
|
||||||
|
"20.79.238.198/32",
|
||||||
|
"20.113.14.159/32",
|
||||||
|
"20.75.144.152/32",
|
||||||
|
"20.43.172.120/32",
|
||||||
|
"20.53.134.160/32",
|
||||||
|
"20.201.15.208/32",
|
||||||
|
"20.93.28.24/32",
|
||||||
|
"20.61.34.40/32",
|
||||||
|
"52.242.224.168/32",
|
||||||
|
"20.80.129.80/32",
|
||||||
|
"20.195.108.47/32",
|
||||||
|
"4.195.133.120/32",
|
||||||
|
"4.228.76.163/32",
|
||||||
|
"4.182.131.108/32",
|
||||||
|
"4.209.224.56/32",
|
||||||
|
"108.141.83.74/32",
|
||||||
|
"4.213.46.14/32",
|
||||||
|
"172.169.17.165/32",
|
||||||
|
"51.8.71.117/32",
|
||||||
|
"20.3.1.178/32",
|
||||||
|
"52.149.56.151/32",
|
||||||
|
"52.149.30.45/32",
|
||||||
|
"52.149.58.173/32",
|
||||||
|
"52.143.95.204/32",
|
||||||
|
"52.149.28.83/32",
|
||||||
|
"52.149.58.69/32",
|
||||||
|
"52.148.161.87/32",
|
||||||
|
"52.149.58.27/32",
|
||||||
|
"52.149.28.18/32",
|
||||||
|
"20.79.226.26/32",
|
||||||
|
"20.79.239.66/32",
|
||||||
|
"20.79.238.198/32",
|
||||||
|
"20.113.14.159/32",
|
||||||
|
"20.75.144.152/32",
|
||||||
|
"20.43.172.120/32",
|
||||||
|
"20.53.134.160/32",
|
||||||
|
"20.201.15.208/32",
|
||||||
|
"20.93.28.24/32",
|
||||||
|
"20.61.34.40/32",
|
||||||
|
"52.242.224.168/32",
|
||||||
|
"20.80.129.80/32",
|
||||||
|
"20.195.108.47/32",
|
||||||
|
"4.195.133.120/32",
|
||||||
|
"4.228.76.163/32",
|
||||||
|
"4.182.131.108/32",
|
||||||
|
"4.209.224.56/32",
|
||||||
|
"108.141.83.74/32",
|
||||||
|
"4.213.46.14/32",
|
||||||
|
"172.169.17.165/32",
|
||||||
|
"51.8.71.117/32",
|
||||||
|
"20.3.1.178/32"
|
||||||
|
]
|
||||||
263
data/crawlers/googlebot.yaml
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
- name: googlebot
|
||||||
|
user_agent_regex: \+http\://www\.google\.com/bot\.html
|
||||||
|
action: ALLOW
|
||||||
|
# https://developers.google.com/static/search/apis/ipranges/googlebot.json
|
||||||
|
remote_addresses: [
|
||||||
|
"2001:4860:4801:10::/64",
|
||||||
|
"2001:4860:4801:11::/64",
|
||||||
|
"2001:4860:4801:12::/64",
|
||||||
|
"2001:4860:4801:13::/64",
|
||||||
|
"2001:4860:4801:14::/64",
|
||||||
|
"2001:4860:4801:15::/64",
|
||||||
|
"2001:4860:4801:16::/64",
|
||||||
|
"2001:4860:4801:17::/64",
|
||||||
|
"2001:4860:4801:18::/64",
|
||||||
|
"2001:4860:4801:19::/64",
|
||||||
|
"2001:4860:4801:1a::/64",
|
||||||
|
"2001:4860:4801:1b::/64",
|
||||||
|
"2001:4860:4801:1c::/64",
|
||||||
|
"2001:4860:4801:1d::/64",
|
||||||
|
"2001:4860:4801:1e::/64",
|
||||||
|
"2001:4860:4801:1f::/64",
|
||||||
|
"2001:4860:4801:20::/64",
|
||||||
|
"2001:4860:4801:21::/64",
|
||||||
|
"2001:4860:4801:22::/64",
|
||||||
|
"2001:4860:4801:23::/64",
|
||||||
|
"2001:4860:4801:24::/64",
|
||||||
|
"2001:4860:4801:25::/64",
|
||||||
|
"2001:4860:4801:26::/64",
|
||||||
|
"2001:4860:4801:27::/64",
|
||||||
|
"2001:4860:4801:28::/64",
|
||||||
|
"2001:4860:4801:29::/64",
|
||||||
|
"2001:4860:4801:2::/64",
|
||||||
|
"2001:4860:4801:2a::/64",
|
||||||
|
"2001:4860:4801:2b::/64",
|
||||||
|
"2001:4860:4801:2c::/64",
|
||||||
|
"2001:4860:4801:2d::/64",
|
||||||
|
"2001:4860:4801:2e::/64",
|
||||||
|
"2001:4860:4801:2f::/64",
|
||||||
|
"2001:4860:4801:31::/64",
|
||||||
|
"2001:4860:4801:32::/64",
|
||||||
|
"2001:4860:4801:33::/64",
|
||||||
|
"2001:4860:4801:34::/64",
|
||||||
|
"2001:4860:4801:35::/64",
|
||||||
|
"2001:4860:4801:36::/64",
|
||||||
|
"2001:4860:4801:37::/64",
|
||||||
|
"2001:4860:4801:38::/64",
|
||||||
|
"2001:4860:4801:39::/64",
|
||||||
|
"2001:4860:4801:3a::/64",
|
||||||
|
"2001:4860:4801:3b::/64",
|
||||||
|
"2001:4860:4801:3c::/64",
|
||||||
|
"2001:4860:4801:3d::/64",
|
||||||
|
"2001:4860:4801:3e::/64",
|
||||||
|
"2001:4860:4801:40::/64",
|
||||||
|
"2001:4860:4801:41::/64",
|
||||||
|
"2001:4860:4801:42::/64",
|
||||||
|
"2001:4860:4801:43::/64",
|
||||||
|
"2001:4860:4801:44::/64",
|
||||||
|
"2001:4860:4801:45::/64",
|
||||||
|
"2001:4860:4801:46::/64",
|
||||||
|
"2001:4860:4801:47::/64",
|
||||||
|
"2001:4860:4801:48::/64",
|
||||||
|
"2001:4860:4801:49::/64",
|
||||||
|
"2001:4860:4801:4a::/64",
|
||||||
|
"2001:4860:4801:4b::/64",
|
||||||
|
"2001:4860:4801:4c::/64",
|
||||||
|
"2001:4860:4801:50::/64",
|
||||||
|
"2001:4860:4801:51::/64",
|
||||||
|
"2001:4860:4801:52::/64",
|
||||||
|
"2001:4860:4801:53::/64",
|
||||||
|
"2001:4860:4801:54::/64",
|
||||||
|
"2001:4860:4801:55::/64",
|
||||||
|
"2001:4860:4801:56::/64",
|
||||||
|
"2001:4860:4801:60::/64",
|
||||||
|
"2001:4860:4801:61::/64",
|
||||||
|
"2001:4860:4801:62::/64",
|
||||||
|
"2001:4860:4801:63::/64",
|
||||||
|
"2001:4860:4801:64::/64",
|
||||||
|
"2001:4860:4801:65::/64",
|
||||||
|
"2001:4860:4801:66::/64",
|
||||||
|
"2001:4860:4801:67::/64",
|
||||||
|
"2001:4860:4801:68::/64",
|
||||||
|
"2001:4860:4801:69::/64",
|
||||||
|
"2001:4860:4801:6a::/64",
|
||||||
|
"2001:4860:4801:6b::/64",
|
||||||
|
"2001:4860:4801:6c::/64",
|
||||||
|
"2001:4860:4801:6d::/64",
|
||||||
|
"2001:4860:4801:6e::/64",
|
||||||
|
"2001:4860:4801:6f::/64",
|
||||||
|
"2001:4860:4801:70::/64",
|
||||||
|
"2001:4860:4801:71::/64",
|
||||||
|
"2001:4860:4801:72::/64",
|
||||||
|
"2001:4860:4801:73::/64",
|
||||||
|
"2001:4860:4801:74::/64",
|
||||||
|
"2001:4860:4801:75::/64",
|
||||||
|
"2001:4860:4801:76::/64",
|
||||||
|
"2001:4860:4801:77::/64",
|
||||||
|
"2001:4860:4801:78::/64",
|
||||||
|
"2001:4860:4801:79::/64",
|
||||||
|
"2001:4860:4801:80::/64",
|
||||||
|
"2001:4860:4801:81::/64",
|
||||||
|
"2001:4860:4801:82::/64",
|
||||||
|
"2001:4860:4801:83::/64",
|
||||||
|
"2001:4860:4801:84::/64",
|
||||||
|
"2001:4860:4801:85::/64",
|
||||||
|
"2001:4860:4801:86::/64",
|
||||||
|
"2001:4860:4801:87::/64",
|
||||||
|
"2001:4860:4801:88::/64",
|
||||||
|
"2001:4860:4801:90::/64",
|
||||||
|
"2001:4860:4801:91::/64",
|
||||||
|
"2001:4860:4801:92::/64",
|
||||||
|
"2001:4860:4801:93::/64",
|
||||||
|
"2001:4860:4801:94::/64",
|
||||||
|
"2001:4860:4801:95::/64",
|
||||||
|
"2001:4860:4801:96::/64",
|
||||||
|
"2001:4860:4801:a0::/64",
|
||||||
|
"2001:4860:4801:a1::/64",
|
||||||
|
"2001:4860:4801:a2::/64",
|
||||||
|
"2001:4860:4801:a3::/64",
|
||||||
|
"2001:4860:4801:a4::/64",
|
||||||
|
"2001:4860:4801:a5::/64",
|
||||||
|
"2001:4860:4801:c::/64",
|
||||||
|
"2001:4860:4801:f::/64",
|
||||||
|
"192.178.5.0/27",
|
||||||
|
"192.178.6.0/27",
|
||||||
|
"192.178.6.128/27",
|
||||||
|
"192.178.6.160/27",
|
||||||
|
"192.178.6.192/27",
|
||||||
|
"192.178.6.32/27",
|
||||||
|
"192.178.6.64/27",
|
||||||
|
"192.178.6.96/27",
|
||||||
|
"34.100.182.96/28",
|
||||||
|
"34.101.50.144/28",
|
||||||
|
"34.118.254.0/28",
|
||||||
|
"34.118.66.0/28",
|
||||||
|
"34.126.178.96/28",
|
||||||
|
"34.146.150.144/28",
|
||||||
|
"34.147.110.144/28",
|
||||||
|
"34.151.74.144/28",
|
||||||
|
"34.152.50.64/28",
|
||||||
|
"34.154.114.144/28",
|
||||||
|
"34.155.98.32/28",
|
||||||
|
"34.165.18.176/28",
|
||||||
|
"34.175.160.64/28",
|
||||||
|
"34.176.130.16/28",
|
||||||
|
"34.22.85.0/27",
|
||||||
|
"34.64.82.64/28",
|
||||||
|
"34.65.242.112/28",
|
||||||
|
"34.80.50.80/28",
|
||||||
|
"34.88.194.0/28",
|
||||||
|
"34.89.10.80/28",
|
||||||
|
"34.89.198.80/28",
|
||||||
|
"34.96.162.48/28",
|
||||||
|
"35.247.243.240/28",
|
||||||
|
"66.249.64.0/27",
|
||||||
|
"66.249.64.128/27",
|
||||||
|
"66.249.64.160/27",
|
||||||
|
"66.249.64.224/27",
|
||||||
|
"66.249.64.32/27",
|
||||||
|
"66.249.64.64/27",
|
||||||
|
"66.249.64.96/27",
|
||||||
|
"66.249.65.0/27",
|
||||||
|
"66.249.65.128/27",
|
||||||
|
"66.249.65.160/27",
|
||||||
|
"66.249.65.192/27",
|
||||||
|
"66.249.65.224/27",
|
||||||
|
"66.249.65.32/27",
|
||||||
|
"66.249.65.64/27",
|
||||||
|
"66.249.65.96/27",
|
||||||
|
"66.249.66.0/27",
|
||||||
|
"66.249.66.128/27",
|
||||||
|
"66.249.66.160/27",
|
||||||
|
"66.249.66.192/27",
|
||||||
|
"66.249.66.224/27",
|
||||||
|
"66.249.66.32/27",
|
||||||
|
"66.249.66.64/27",
|
||||||
|
"66.249.66.96/27",
|
||||||
|
"66.249.68.0/27",
|
||||||
|
"66.249.68.128/27",
|
||||||
|
"66.249.68.32/27",
|
||||||
|
"66.249.68.64/27",
|
||||||
|
"66.249.68.96/27",
|
||||||
|
"66.249.69.0/27",
|
||||||
|
"66.249.69.128/27",
|
||||||
|
"66.249.69.160/27",
|
||||||
|
"66.249.69.192/27",
|
||||||
|
"66.249.69.224/27",
|
||||||
|
"66.249.69.32/27",
|
||||||
|
"66.249.69.64/27",
|
||||||
|
"66.249.69.96/27",
|
||||||
|
"66.249.70.0/27",
|
||||||
|
"66.249.70.128/27",
|
||||||
|
"66.249.70.160/27",
|
||||||
|
"66.249.70.192/27",
|
||||||
|
"66.249.70.224/27",
|
||||||
|
"66.249.70.32/27",
|
||||||
|
"66.249.70.64/27",
|
||||||
|
"66.249.70.96/27",
|
||||||
|
"66.249.71.0/27",
|
||||||
|
"66.249.71.128/27",
|
||||||
|
"66.249.71.160/27",
|
||||||
|
"66.249.71.192/27",
|
||||||
|
"66.249.71.224/27",
|
||||||
|
"66.249.71.32/27",
|
||||||
|
"66.249.71.64/27",
|
||||||
|
"66.249.71.96/27",
|
||||||
|
"66.249.72.0/27",
|
||||||
|
"66.249.72.128/27",
|
||||||
|
"66.249.72.160/27",
|
||||||
|
"66.249.72.192/27",
|
||||||
|
"66.249.72.224/27",
|
||||||
|
"66.249.72.32/27",
|
||||||
|
"66.249.72.64/27",
|
||||||
|
"66.249.72.96/27",
|
||||||
|
"66.249.73.0/27",
|
||||||
|
"66.249.73.128/27",
|
||||||
|
"66.249.73.160/27",
|
||||||
|
"66.249.73.192/27",
|
||||||
|
"66.249.73.224/27",
|
||||||
|
"66.249.73.32/27",
|
||||||
|
"66.249.73.64/27",
|
||||||
|
"66.249.73.96/27",
|
||||||
|
"66.249.74.0/27",
|
||||||
|
"66.249.74.128/27",
|
||||||
|
"66.249.74.160/27",
|
||||||
|
"66.249.74.192/27",
|
||||||
|
"66.249.74.32/27",
|
||||||
|
"66.249.74.64/27",
|
||||||
|
"66.249.74.96/27",
|
||||||
|
"66.249.75.0/27",
|
||||||
|
"66.249.75.128/27",
|
||||||
|
"66.249.75.160/27",
|
||||||
|
"66.249.75.192/27",
|
||||||
|
"66.249.75.224/27",
|
||||||
|
"66.249.75.32/27",
|
||||||
|
"66.249.75.64/27",
|
||||||
|
"66.249.75.96/27",
|
||||||
|
"66.249.76.0/27",
|
||||||
|
"66.249.76.128/27",
|
||||||
|
"66.249.76.160/27",
|
||||||
|
"66.249.76.192/27",
|
||||||
|
"66.249.76.224/27",
|
||||||
|
"66.249.76.32/27",
|
||||||
|
"66.249.76.64/27",
|
||||||
|
"66.249.76.96/27",
|
||||||
|
"66.249.77.0/27",
|
||||||
|
"66.249.77.128/27",
|
||||||
|
"66.249.77.160/27",
|
||||||
|
"66.249.77.192/27",
|
||||||
|
"66.249.77.224/27",
|
||||||
|
"66.249.77.32/27",
|
||||||
|
"66.249.77.64/27",
|
||||||
|
"66.249.77.96/27",
|
||||||
|
"66.249.78.0/27",
|
||||||
|
"66.249.78.32/27",
|
||||||
|
"66.249.79.0/27",
|
||||||
|
"66.249.79.128/27",
|
||||||
|
"66.249.79.160/27",
|
||||||
|
"66.249.79.192/27",
|
||||||
|
"66.249.79.224/27",
|
||||||
|
"66.249.79.32/27",
|
||||||
|
"66.249.79.64/27",
|
||||||
|
"66.249.79.96/27"
|
||||||
|
]
|
||||||
8
data/crawlers/internet-archive.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
- name: internet-archive
|
||||||
|
action: ALLOW
|
||||||
|
# https://ipinfo.io/AS7941
|
||||||
|
remote_addresses: [
|
||||||
|
"207.241.224.0/20",
|
||||||
|
"208.70.24.0/21",
|
||||||
|
"2620:0:9c0::/48"
|
||||||
|
]
|
||||||
10
data/crawlers/kagibot.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
- name: kagibot
|
||||||
|
user_agent_regex: \+https\://kagi\.com/bot
|
||||||
|
action: ALLOW
|
||||||
|
# https://kagi.com/bot
|
||||||
|
remote_addresses: [
|
||||||
|
"216.18.205.234/32",
|
||||||
|
"35.212.27.76/32",
|
||||||
|
"104.254.65.50/32",
|
||||||
|
"209.151.156.194/32"
|
||||||
|
]
|
||||||
11
data/crawlers/marginalia.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
- name: marginalia
|
||||||
|
user_agent_regex: search\.marginalia\.nu
|
||||||
|
action: ALLOW
|
||||||
|
# Received directly over email
|
||||||
|
remote_addresses: [
|
||||||
|
"193.183.0.162/31",
|
||||||
|
"193.183.0.164/30",
|
||||||
|
"193.183.0.168/30",
|
||||||
|
"193.183.0.172/31",
|
||||||
|
"193.183.0.174/32"
|
||||||
|
]
|
||||||
5
data/crawlers/mojeekbot.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
- name: mojeekbot
|
||||||
|
user_agent_regex: \+https\://www\.mojeek\.com/bot\.html
|
||||||
|
action: ALLOW
|
||||||
|
# https://www.mojeek.com/bot.html
|
||||||
|
remote_addresses: [ "5.102.173.71/32" ]
|
||||||
5
data/crawlers/qwantbot.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
- name: qwantbot
|
||||||
|
user_agent_regex: \+https\://help\.qwant\.com/bot/
|
||||||
|
action: ALLOW
|
||||||
|
# https://help.qwant.com/wp-content/uploads/sites/2/2025/01/qwantbot.json
|
||||||
|
remote_addresses: [ "91.242.162.0/24" ]
|
||||||
@@ -3,6 +3,6 @@ package data
|
|||||||
import "embed"
|
import "embed"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
//go:embed botPolicies.json
|
//go:embed botPolicies.yaml botPolicies.json apps bots clients common crawlers
|
||||||
BotPolicies embed.FS
|
BotPolicies embed.FS
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
slug: first-blog-post
|
|
||||||
title: First Blog Post
|
|
||||||
authors: [slorber, yangshun]
|
|
||||||
tags: [hola, docusaurus]
|
|
||||||
---
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet...
|
|
||||||
|
|
||||||
<!-- truncate -->
|
|
||||||
|
|
||||||
...consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
---
|
|
||||||
slug: long-blog-post
|
|
||||||
title: Long Blog Post
|
|
||||||
authors: yangshun
|
|
||||||
tags: [hello, docusaurus]
|
|
||||||
---
|
|
||||||
|
|
||||||
This is the summary of a very long blog post,
|
|
||||||
|
|
||||||
Use a `<!--` `truncate` `-->` comment to limit blog post size in the list view.
|
|
||||||
|
|
||||||
<!-- truncate -->
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
|
|
||||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
---
|
|
||||||
slug: mdx-blog-post
|
|
||||||
title: MDX Blog Post
|
|
||||||
authors: [slorber]
|
|
||||||
tags: [docusaurus]
|
|
||||||
---
|
|
||||||
|
|
||||||
Blog posts support [Docusaurus Markdown features](https://docusaurus.io/docs/markdown-features), such as [MDX](https://mdxjs.com/).
|
|
||||||
|
|
||||||
:::tip
|
|
||||||
|
|
||||||
Use the power of React to create interactive blog posts.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
{/* truncate */}
|
|
||||||
|
|
||||||
For example, use JSX to create an interactive button:
|
|
||||||
|
|
||||||
```js
|
|
||||||
<button onClick={() => alert('button clicked!')}>Click me!</button>
|
|
||||||
```
|
|
||||||
|
|
||||||
<button onClick={() => alert('button clicked!')}>Click me!</button>
|
|
||||||
|
Before Width: | Height: | Size: 94 KiB |
@@ -1,29 +0,0 @@
|
|||||||
---
|
|
||||||
slug: welcome
|
|
||||||
title: Welcome
|
|
||||||
authors: [slorber, yangshun]
|
|
||||||
tags: [facebook, hello, docusaurus]
|
|
||||||
---
|
|
||||||
|
|
||||||
[Docusaurus blogging features](https://docusaurus.io/docs/blog) are powered by the [blog plugin](https://docusaurus.io/docs/api/plugins/@docusaurus/plugin-content-blog).
|
|
||||||
|
|
||||||
Here are a few tips you might find useful.
|
|
||||||
|
|
||||||
<!-- truncate -->
|
|
||||||
|
|
||||||
Simply add Markdown files (or folders) to the `blog` directory.
|
|
||||||
|
|
||||||
Regular blog authors can be added to `authors.yml`.
|
|
||||||
|
|
||||||
The blog post date can be extracted from filenames, such as:
|
|
||||||
|
|
||||||
- `2019-05-30-welcome.md`
|
|
||||||
- `2019-05-30-welcome/index.md`
|
|
||||||
|
|
||||||
A blog post folder can be convenient to co-locate blog post images:
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
The blog supports tags as well!
|
|
||||||
|
|
||||||
**And if you don't want a blog**: just delete this directory, and use `blog: false` in your Docusaurus config.
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
yangshun:
|
|
||||||
name: Yangshun Tay
|
|
||||||
title: Front End Engineer @ Facebook
|
|
||||||
url: https://github.com/yangshun
|
|
||||||
image_url: https://github.com/yangshun.png
|
|
||||||
page: true
|
|
||||||
socials:
|
|
||||||
x: yangshunz
|
|
||||||
github: yangshun
|
|
||||||
|
|
||||||
slorber:
|
|
||||||
name: Sébastien Lorber
|
|
||||||
title: Docusaurus maintainer
|
|
||||||
url: https://sebastienlorber.com
|
|
||||||
image_url: https://github.com/slorber.png
|
|
||||||
page:
|
|
||||||
# customize the url of the author page at /blog/authors/<permalink>
|
|
||||||
permalink: '/all-sebastien-lorber-articles'
|
|
||||||
socials:
|
|
||||||
x: sebastienlorber
|
|
||||||
linkedin: sebastienlorber
|
|
||||||
github: slorber
|
|
||||||
newsletter: https://thisweekinreact.com
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
facebook:
|
|
||||||
label: Facebook
|
|
||||||
permalink: /facebook
|
|
||||||
description: Facebook tag description
|
|
||||||
|
|
||||||
hello:
|
|
||||||
label: Hello
|
|
||||||
permalink: /hello
|
|
||||||
description: Hello tag description
|
|
||||||
|
|
||||||
docusaurus:
|
|
||||||
label: Docusaurus
|
|
||||||
permalink: /docusaurus
|
|
||||||
description: Docusaurus tag description
|
|
||||||
|
|
||||||
hola:
|
|
||||||
label: Hola
|
|
||||||
permalink: /hola
|
|
||||||
description: Hola tag description
|
|
||||||
@@ -11,25 +11,80 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
- Added a periodic cleanup routine for the decaymap that removes expired entries, ensuring stale data is properly pruned.
|
## v1.17.0: Asahi sas Brutus
|
||||||
|
|
||||||
|
- Ensure regexes can't end in newlines ([#372](https://github.com/TecharoHQ/anubis/issues/372))
|
||||||
|
- Add documentation for default allow behavior (implicit rule)
|
||||||
|
- Enable [importing configuration snippets](./admin/configuration/import.mdx) ([#321](https://github.com/TecharoHQ/anubis/pull/321))
|
||||||
|
- Refactor check logic to be more generic and work on a Checker type
|
||||||
|
- Add more AI user agents based on the [ai.robots.txt](https://github.com/ai-robots-txt/ai.robots.txt) project
|
||||||
|
- Embedded challenge data in initial HTML response to improve performance
|
||||||
|
- Added support to use Nginx' `auth_request` directive with Anubis
|
||||||
|
- Added support to allow to restrict the allowed redirect domains
|
||||||
|
- Whitelisted [DuckDuckBot](https://duckduckgo.com/duckduckgo-help-pages/results/duckduckbot/) in botPolicies
|
||||||
|
- Improvements to build scripts to make them less independent of the build host
|
||||||
|
- Improved the OpenGraph error logging
|
||||||
|
- Added `Opera` to the `generic-browser` bot policy rule
|
||||||
|
- Added FreeBSD rc.d script so can be run as a FreeBSD daemon
|
||||||
|
- Allow requests from the Internet Archive
|
||||||
|
- Added example nginx configuration to documentation
|
||||||
|
- Added example Apache configuration to the documentation [#277](https://github.com/TecharoHQ/anubis/issues/277)
|
||||||
|
- Move per-environment configuration details into their own pages
|
||||||
|
- Added support for running anubis behind a prefix (e.g. `/myapp`)
|
||||||
|
- Added headers support to bot policy rules
|
||||||
|
- Moved configuration file from JSON to YAML by default
|
||||||
|
- Added documentation on how to use Anubis with Traefik in Docker
|
||||||
|
- Improved error handling in some edge cases
|
||||||
|
- Disable `generic-bot-catchall` rule because of its high false positive rate in real-world scenarios
|
||||||
|
- Moved all CSS inline to the Xess package, changed colors to be CSS variables
|
||||||
|
- Set or append to `X-Forwarded-For` header unless the remote connects over a loopback address [#328](https://github.com/TecharoHQ/anubis/issues/328)
|
||||||
|
- Fixed mojeekbot user agent regex
|
||||||
|
- Added support for running anubis behind a base path (e.g. `/myapp`)
|
||||||
|
- Reduce Anubis' paranoia with user cookies ([#365](https://github.com/TecharoHQ/anubis/pull/365))
|
||||||
|
|
||||||
|
## v1.16.0
|
||||||
|
|
||||||
|
Fordola rem Lupis
|
||||||
|
|
||||||
|
> I want to make them pay! All of them! Everyone who ever mocked or looked down on me -- I want the power to make them pay!
|
||||||
|
|
||||||
|
The following features are the "big ticket" items:
|
||||||
|
|
||||||
|
- Added support for native Debian, Red Hat, and tarball packaging strategies including installation and use directions
|
||||||
|
- A prebaked tarball has been added, allowing distros to build Anubis like they could in v1.15.x
|
||||||
|
- The placeholder Anubis mascot has been replaced with a design by [CELPHASE](https://bsky.app/profile/celphase.bsky.social)
|
||||||
|
- Verification page now shows hash rate and a progress bar for completion probability
|
||||||
|
- Added support for [OpenGraph tags](https://ogp.me/) when rendering the challenge page. This allows for social previews to be generated when sharing the challenge page on social media platforms ([#195](https://github.com/TecharoHQ/anubis/pull/195))
|
||||||
|
- Added support for passing the ed25519 signing key in a file with `-ed25519-private-key-hex-file` or `ED25519_PRIVATE_KEY_HEX_FILE`
|
||||||
|
|
||||||
|
The other small fixes have been made:
|
||||||
|
|
||||||
|
- Added a periodic cleanup routine for the decaymap that removes expired entries, ensuring stale data is properly pruned
|
||||||
- Added a no-store Cache-Control header to the challenge page
|
- Added a no-store Cache-Control header to the challenge page
|
||||||
- Hide the directory listings for Anubis' internal static content
|
- Hide the directory listings for Anubis' internal static content
|
||||||
- Changed `--debug-x-real-ip-default` to `--use-remote-address`, getting the IP address from the request's socket address instead.
|
- Changed `--debug-x-real-ip-default` to `--use-remote-address`, getting the IP address from the request's socket address instead
|
||||||
- DroneBL lookups have been disabled by default
|
- DroneBL lookups have been disabled by default
|
||||||
- Static asset builds are now done on demand instead of the results being committed to source control
|
- Static asset builds are now done on demand instead of the results being committed to source control
|
||||||
- The Dockerfile has been removed as it is no longer in use
|
- The Dockerfile has been removed as it is no longer in use
|
||||||
- Developer documentation has been added to the docs site
|
- Developer documentation has been added to the docs site
|
||||||
- Show more errors when some predictable challenge page errors happen ([#150](https://github.com/TecharoHQ/anubis/issues/150))
|
- Show more errors when some predictable challenge page errors happen ([#150](https://github.com/TecharoHQ/anubis/issues/150))
|
||||||
- Verification page now shows hash rate and a progress bar for completion probability.
|
- Added the `--debug-benchmark-js` flag for testing proof-of-work performance during development
|
||||||
- Added the `--debug-benchmark-js` flag for testing proof-of-work performance during development.
|
|
||||||
- Use `TrimSuffix` instead of `TrimRight` on containerbuild
|
- Use `TrimSuffix` instead of `TrimRight` on containerbuild
|
||||||
- Fix the startup logs to correctly show the address and port the server is listening on
|
- Fix the startup logs to correctly show the address and port the server is listening on
|
||||||
- Add [LibreJS](https://www.gnu.org/software/librejs/) banner to Anubis JavaScript to allow LibreJS users to run the challenge
|
- Add [LibreJS](https://www.gnu.org/software/librejs/) banner to Anubis JavaScript to allow LibreJS users to run the challenge
|
||||||
- Added a wait with button continue + 30 second auto continue after 30s if you click "Why am I seeing this?"
|
- Added a wait with button continue + 30 second auto continue after 30s if you click "Why am I seeing this?"
|
||||||
- Fixed a typo in the challenge page title.
|
- Fixed a typo in the challenge page title
|
||||||
- Disabled running integration tests on Windows hosts due to it's reliance on posix features (see [#133](https://github.com/TecharoHQ/anubis/pull/133#issuecomment-2764732309)).
|
- Disabled running integration tests on Windows hosts due to it's reliance on posix features (see [#133](https://github.com/TecharoHQ/anubis/pull/133#issuecomment-2764732309))
|
||||||
- Added support for passing the ed25519 signing key in a file with `-ed25519-private-key-hex-file` or `ED25519_PRIVATE_KEY_HEX_FILE`.
|
|
||||||
- Fixed minor typos
|
- Fixed minor typos
|
||||||
|
- Added a Makefile to enable comfortable workflows for downstream packagers
|
||||||
|
- Added `zizmor` for GitHub Actions static analysis
|
||||||
|
- Fixed most `zizmor` findings
|
||||||
|
- Enabled Dependabot
|
||||||
|
- Added an air config for autoreload support in development ([#195](https://github.com/TecharoHQ/anubis/pull/195))
|
||||||
|
- Added an `--extract-resources` flag to extract static resources to a local folder
|
||||||
|
- Add noindex flag to all Anubis pages ([#227](https://github.com/TecharoHQ/anubis/issues/227))
|
||||||
|
- Added `WEBMASTER_EMAIL` variable, if it is present then display that email address on error pages ([#235](https://github.com/TecharoHQ/anubis/pull/235), [#115](https://github.com/TecharoHQ/anubis/issues/115))
|
||||||
|
- Hash pinned all GitHub Actions
|
||||||
|
|
||||||
## v1.15.1
|
## v1.15.1
|
||||||
|
|
||||||
@@ -112,7 +167,7 @@ Livia sas Junius
|
|||||||
[#21](https://github.com/TecharoHQ/anubis/pull/21)
|
[#21](https://github.com/TecharoHQ/anubis/pull/21)
|
||||||
- Don't overflow the image when browser windows are small (eg. on phones)
|
- Don't overflow the image when browser windows are small (eg. on phones)
|
||||||
[#27](https://github.com/TecharoHQ/anubis/pull/27)
|
[#27](https://github.com/TecharoHQ/anubis/pull/27)
|
||||||
- Lower the default difficulty to 4 from 5
|
- Lower the default difficulty to 5 from 4
|
||||||
- Don't duplicate work across multiple threads [#36](https://github.com/TecharoHQ/anubis/pull/36)
|
- Don't duplicate work across multiple threads [#36](https://github.com/TecharoHQ/anubis/pull/36)
|
||||||
- Documentation has been moved to https://anubis.techaro.lol/ with sources in docs/
|
- Documentation has been moved to https://anubis.techaro.lol/ with sources in docs/
|
||||||
- Removed several visible AI artifacts (e.g., 6 fingers) [#37](https://github.com/TecharoHQ/anubis/pull/37)
|
- Removed several visible AI artifacts (e.g., 6 fingers) [#37](https://github.com/TecharoHQ/anubis/pull/37)
|
||||||
@@ -155,4 +210,4 @@ Livia sas Junius
|
|||||||
([fd6903a](https://github.com/TecharoHQ/anubis/commit/fd6903aeed315b8fddee32890d7458a9271e4798)).
|
([fd6903a](https://github.com/TecharoHQ/anubis/commit/fd6903aeed315b8fddee32890d7458a9271e4798)).
|
||||||
- Footer links on the check page now point to Techaro's brand
|
- Footer links on the check page now point to Techaro's brand
|
||||||
([4ebccb1](https://github.com/TecharoHQ/anubis/commit/4ebccb197ec20d024328d7f92cad39bbbe4d6359))
|
([4ebccb1](https://github.com/TecharoHQ/anubis/commit/4ebccb197ec20d024328d7f92cad39bbbe4d6359))
|
||||||
- Anubis was imported from [Xe/x](https://github.com/Xe/x).
|
- Anubis was imported from [Xe/x](https://github.com/Xe/x)
|
||||||
|
|||||||
8
docs/docs/admin/configuration/_category_.json
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"label": "Configuration",
|
||||||
|
"position": 10,
|
||||||
|
"link": {
|
||||||
|
"type": "generated-index",
|
||||||
|
"description": "Detailed information about configuring parts of Anubis."
|
||||||
|
}
|
||||||
|
}
|
||||||
25
docs/docs/admin/configuration/expressions.mdx
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Expression-based rule matching
|
||||||
|
|
||||||
|
- Anubis offers the ability to use [Common Expression Language (CEL)](https://cel.dev) for advanced rule matching
|
||||||
|
- A brief summary of CEL
|
||||||
|
- Imagine the rule as the contents of a function body in programming or the WHERE clause in SQL
|
||||||
|
- This is an advanced feature and it is easy to get yourself into trouble with it
|
||||||
|
- Link to the spec, mention docs are WIP
|
||||||
|
- Variables exposed to Anubis expressions
|
||||||
|
- `remoteAddress` -> string IP of client
|
||||||
|
- `host` -> string HTTP/TLS hostname
|
||||||
|
- `method` -> string HTTP method
|
||||||
|
- `userAgent` -> string User-Agent header
|
||||||
|
- `path` -> string HTTP request path
|
||||||
|
- `query` -> map[string]string URL key values
|
||||||
|
- `headers` -> map[string]string HTTP request headers
|
||||||
|
- Load average:
|
||||||
|
- `load_1m` -> system load in the last minute
|
||||||
|
- `load_5m` -> system load in the last 5 minutes
|
||||||
|
- `load_15m` -> system load in the last 15 minutes
|
||||||
|
- Functions exposed to Anubis expressions
|
||||||
|
- `userAgent.isBrowserLike` -> returns true if the userAgent is like a browser
|
||||||
|
- Life advice
|
||||||
|
- When in doubt, throw a CHALLENGE over a DENY. CHALLENGE makes it more easy to renege
|
||||||
|
- Example usage
|
||||||
|
- [How to make Anubis much less aggressive](../less-aggressive.mdx)
|
||||||
147
docs/docs/admin/configuration/import.mdx
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
# Importing configuration rules
|
||||||
|
|
||||||
|
import Tabs from "@theme/Tabs";
|
||||||
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
|
Anubis has the ability to let you import snippets of configuration into the main configuration file. This allows you to break up your config into smaller parts that get logically assembled into one big file.
|
||||||
|
|
||||||
|
EG:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON">
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"bots": [
|
||||||
|
{
|
||||||
|
"import": "(data)/bots/ai-robots-txt.yaml"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"import": "(data)/bots/cloudflare-workers.yaml"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML" default>
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
bots:
|
||||||
|
# Pathological bots to deny
|
||||||
|
- # This correlates to data/bots/ai-robots-txt.yaml in the source tree
|
||||||
|
import: (data)/bots/ai-robots-txt.yaml
|
||||||
|
- import: (data)/bots/cloudflare-workers.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
Of note, a bot rule can either have inline bot configuration or import a bot config snippet. You cannot do both in a single bot rule.
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON">
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"bots": [
|
||||||
|
{
|
||||||
|
"import": "(data)/bots/ai-robots-txt.yaml",
|
||||||
|
"name": "generic-browser",
|
||||||
|
"user_agent_regex": "Mozilla|Opera\n",
|
||||||
|
"action": "CHALLENGE"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML" default>
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
bots:
|
||||||
|
- import: (data)/bots/ai-robots-txt.yaml
|
||||||
|
name: generic-browser
|
||||||
|
user_agent_regex: >
|
||||||
|
Mozilla|Opera
|
||||||
|
action: CHALLENGE
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
This will return an error like this:
|
||||||
|
|
||||||
|
```text
|
||||||
|
config is not valid:
|
||||||
|
config.BotOrImport: rule definition is invalid, you must set either bot rules or an import statement, not both
|
||||||
|
```
|
||||||
|
|
||||||
|
Paths can either be prefixed with `(data)` to import from the [the data folder in the Anubis source tree](https://github.com/TecharoHQ/anubis/tree/main/data) or anywhere on the filesystem. If you don't have access to the Anubis source tree, check /usr/share/docs/anubis/data or in the tarball you extracted Anubis from.
|
||||||
|
|
||||||
|
## Writing snippets
|
||||||
|
|
||||||
|
Snippets can be written in either JSON or YAML, with a preference for YAML. When writing a snippet, write the bot rules you want directly at the top level of the file in a list.
|
||||||
|
|
||||||
|
Here is an example snippet that allows [IPv6 Unique Local Addresses](https://en.wikipedia.org/wiki/Unique_local_address) through Anubis:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON">
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "ipv6-ula",
|
||||||
|
"action": "ALLOW",
|
||||||
|
"remote_addresses": ["fc00::/7"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML" default>
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: ipv6-ula
|
||||||
|
action: ALLOW
|
||||||
|
remote_addresses:
|
||||||
|
- fc00::/7
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
## Extracting Anubis' embedded filesystem
|
||||||
|
|
||||||
|
You can always extract the list of rules embedded into the Anubis binary with this command:
|
||||||
|
|
||||||
|
```text
|
||||||
|
anubis --extract-resources=static
|
||||||
|
```
|
||||||
|
|
||||||
|
This will dump the contents of Anubis' embedded data to a new folder named `static`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
static
|
||||||
|
├── apps
|
||||||
|
│ └── gitea-rss-feeds.yaml
|
||||||
|
├── botPolicies.json
|
||||||
|
├── botPolicies.yaml
|
||||||
|
├── bots
|
||||||
|
│ ├── ai-robots-txt.yaml
|
||||||
|
│ ├── cloudflare-workers.yaml
|
||||||
|
│ ├── headless-browsers.yaml
|
||||||
|
│ └── us-ai-scraper.yaml
|
||||||
|
├── common
|
||||||
|
│ ├── allow-private-addresses.yaml
|
||||||
|
│ └── keep-internet-working.yaml
|
||||||
|
└── crawlers
|
||||||
|
├── bingbot.yaml
|
||||||
|
├── duckduckbot.yaml
|
||||||
|
├── googlebot.yaml
|
||||||
|
├── internet-archive.yaml
|
||||||
|
├── kagibot.yaml
|
||||||
|
├── marginalia.yaml
|
||||||
|
├── mojeekbot.yaml
|
||||||
|
└── qwantbot.yaml
|
||||||
|
```
|
||||||
47
docs/docs/admin/configuration/open-graph.mdx
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
---
|
||||||
|
id: open-graph
|
||||||
|
title: Open Graph Configuration
|
||||||
|
---
|
||||||
|
|
||||||
|
# Open Graph Configuration
|
||||||
|
|
||||||
|
This page provides detailed information on how to configure [OpenGraph tag](https://ogp.me/) passthrough in Anubis. This enables social previews of resources protected by Anubis without having to exempt each scraper individually.
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
| Name | Description | Type | Default | Example |
|
||||||
|
|------------------|-----------------------------------------------------------|----------|---------|-------------------------|
|
||||||
|
| `OG_PASSTHROUGH` | Enables or disables the Open Graph tag passthrough system | Boolean | `false` | `OG_PASSTHROUGH=true` |
|
||||||
|
| `OG_EXPIRY_TIME` | Configurable cache expiration time for Open Graph tags | Duration | `24h` | `OG_EXPIRY_TIME=1h` |
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To configure Open Graph tags, you can set the following environment variables, environment file or as flags in your Anubis configuration:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export OG_PASSTHROUGH=true
|
||||||
|
export OG_EXPIRY_TIME=1h
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation Details
|
||||||
|
|
||||||
|
When `OG_PASSTHROUGH` is enabled, Anubis will:
|
||||||
|
|
||||||
|
1. Check a local cache for the requested URL's Open Graph tags.
|
||||||
|
2. If a cached entry exists and is still valid, return the cached tags.
|
||||||
|
3. If the cached entry is stale or not found, fetch the URL, parse the Open Graph tags, update the cache, and return the new tags.
|
||||||
|
|
||||||
|
The cache expiration time is controlled by `OG_EXPIRY_TIME`.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
Here is an example of how to configure Open Graph tags in your Anubis setup:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export OG_PASSTHROUGH=true
|
||||||
|
export OG_EXPIRY_TIME=1h
|
||||||
|
```
|
||||||
|
|
||||||
|
With these settings, Anubis will cache Open Graph tags for 1 hour and pass them through to the challenge page.
|
||||||
|
|
||||||
|
For more information, refer to the [installation guide](../installation).
|
||||||
94
docs/docs/admin/configuration/redirect-domains.mdx
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
---
|
||||||
|
title: Redirect Domain Configuration
|
||||||
|
---
|
||||||
|
|
||||||
|
import Tabs from "@theme/Tabs";
|
||||||
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
|
Anubis has an HTTP redirect in the middle of its check validation logic. This redirect allows Anubis to set a cookie on validated requests so that users don't need to pass challenges on every page load.
|
||||||
|
|
||||||
|
This flow looks something like this:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant User
|
||||||
|
participant Challenge
|
||||||
|
participant Validation
|
||||||
|
participant Backend
|
||||||
|
|
||||||
|
User->>+Challenge: GET /
|
||||||
|
Challenge->>+User: Solve this challenge
|
||||||
|
User->>+Validation: Here's the solution, send me to /
|
||||||
|
Validation->>+User: Here's a cookie, go to /
|
||||||
|
User->>+Backend: GET /
|
||||||
|
```
|
||||||
|
|
||||||
|
However, in some cases a sufficiently dedicated attacker could trick a user into clicking on a validation link with a solution pre-filled out. For example:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
sequenceDiagram
|
||||||
|
participant Hacker
|
||||||
|
participant User
|
||||||
|
participant Validation
|
||||||
|
participant Evil Site
|
||||||
|
|
||||||
|
Hacker->>+User: Click on yoursite.com with this solution
|
||||||
|
User->>+Validation: Here's a solution, send me to evilsite.com
|
||||||
|
Validation->>+User: Here's a cookie, go to evilsite.com
|
||||||
|
User->>+Evil Site: GET evilsite.com
|
||||||
|
```
|
||||||
|
|
||||||
|
If this happens, Anubis will throw an error like this:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Redirect domain not allowed
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuring allowed redirect domains
|
||||||
|
|
||||||
|
By default, Anubis will limit redirects to be on the same HTTP Host that Anubis is running on (EG: requests to yoursite.com cannot redirect outside of yoursite.com). If you need to set more than one domain, fill the `REDIRECT_DOMAINS` environment variable with a comma-separated list of domain names that Anubis should allow redirects to.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
These domains are _an exact string match_, they do not support wildcard matches.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="env-file" label="Environment file" default>
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# anubis.env
|
||||||
|
|
||||||
|
REDIRECT_DOMAINS="yoursite.com,secretplans.yoursite.com"
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="docker-compose" label="Docker Compose">
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
anubis-nginx:
|
||||||
|
image: ghcr.io/techarohq/anubis:latest
|
||||||
|
environment:
|
||||||
|
REDIRECT_DOMAINS: "yoursite.com,secretplans.yoursite.com"
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="k8s" label="Kubernetes">
|
||||||
|
|
||||||
|
Inside your Deployment, StatefulSet, or Pod:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: anubis
|
||||||
|
image: ghcr.io/techarohq/anubis:latest
|
||||||
|
env:
|
||||||
|
- name: REDIRECT_DOMAINS
|
||||||
|
value: "yoursite.com,secretplans.yoursite.com"
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
139
docs/docs/admin/configuration/subrequest-auth.mdx
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
---
|
||||||
|
title: Subrequest Authentication
|
||||||
|
---
|
||||||
|
|
||||||
|
import Tabs from "@theme/Tabs";
|
||||||
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
|
Anubis can act in one of two modes:
|
||||||
|
|
||||||
|
1. Reverse proxy (the default): Anubis sits in the middle of all traffic and then will reverse proxy it to its destination. This is the moral equivalent of a middleware in your favorite web framework.
|
||||||
|
2. Subrequest authentication mode: Anubis listens for requests and if they don't pass muster then they are forwarded to Anubis for challenge processing. This is the equivalent of Anubis being a sidecar service.
|
||||||
|
|
||||||
|
## Nginx
|
||||||
|
|
||||||
|
Anubis can perform [subrequest authentication](https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-subrequest-authentication/) with the `auth_request` module in Nginx. In order to set this up, keep the following things in mind:
|
||||||
|
|
||||||
|
The `TARGET` environment variable in Anubis must be set to a space, eg:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="env-file" label="Environment file" default>
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# anubis.env
|
||||||
|
|
||||||
|
TARGET=" "
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="docker-compose" label="Docker Compose">
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
anubis-nginx:
|
||||||
|
image: ghcr.io/techarohq/anubis:latest
|
||||||
|
environment:
|
||||||
|
TARGET: " "
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="k8s" label="Kubernetes">
|
||||||
|
|
||||||
|
Inside your Deployment, StatefulSet, or Pod:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: anubis
|
||||||
|
image: ghcr.io/techarohq/anubis:latest
|
||||||
|
env:
|
||||||
|
- name: TARGET
|
||||||
|
value: " "
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
In order to configure this, you need to add the following location blocks to each server pointing to the service you want to protect:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
location /.within.website/ {
|
||||||
|
# Assumption: Anubis is running in the same network namespace as
|
||||||
|
# nginx on localhost TCP port 8923
|
||||||
|
proxy_pass http://127.0.0.1:8923;
|
||||||
|
auth_request off;
|
||||||
|
}
|
||||||
|
|
||||||
|
location @redirectToAnubis {
|
||||||
|
return 307 /.within.website/?redir=$scheme://$host$request_uri;
|
||||||
|
auth_request off;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This sets up `/.within.website` to point to Anubis. Any requests that Anubis rejects or throws a challenge to will be sent here. This also sets up a named location `@redirectToAnubis` that will redirect any requests to Anubis for advanced processing.
|
||||||
|
|
||||||
|
Finally, add this to your root location block:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
location / {
|
||||||
|
# diff-add
|
||||||
|
auth_request /.within.website/x/cmd/anubis/api/check;
|
||||||
|
# diff-add
|
||||||
|
error_page 401 = @redirectToAnubis;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This will check all requests that don't match other locations with Anubis to ensure the client is genuine.
|
||||||
|
|
||||||
|
This will make every request get checked by Anubis before it hits your backend. If you have other locations that don't need Anubis to do validation, add the `auth_request off` directive to their blocks:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
location /secret {
|
||||||
|
# diff-add
|
||||||
|
auth_request off;
|
||||||
|
|
||||||
|
# ...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Here is a complete example of an Nginx server listening over TLS and pointing to Anubis:
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Complete example</summary>
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
# /etc/nginx/conf.d/nginx.local.cetacean.club.conf
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
server_name nginx.local.cetacean.club;
|
||||||
|
ssl_certificate /etc/techaro/pki/nginx.local.cetacean.club/tls.crt;
|
||||||
|
ssl_certificate_key /etc/techaro/pki/nginx.local.cetacean.club/tls.key;
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||||
|
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
|
||||||
|
location /.within.website/ {
|
||||||
|
proxy_pass http://localhost:8923;
|
||||||
|
auth_request off;
|
||||||
|
}
|
||||||
|
|
||||||
|
location @redirectToAnubis {
|
||||||
|
return 307 /.within.website/?redir=$scheme://$host$request_uri;
|
||||||
|
auth_request off;
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
auth_request /.within.website/x/cmd/anubis/api/check;
|
||||||
|
error_page 401 = @redirectToAnubis;
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html index.htm;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
92
docs/docs/admin/default-allow-behavior.mdx
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
---
|
||||||
|
title: Default allow behavior
|
||||||
|
---
|
||||||
|
|
||||||
|
import Tabs from "@theme/Tabs";
|
||||||
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
|
# Default allow behavior
|
||||||
|
|
||||||
|
Anubis is designed to be as unintrusive as possible to your existing infrastructure.
|
||||||
|
|
||||||
|
By default, it allows all traffic unless a request matches a rule that explicitly denies or challenges it.
|
||||||
|
|
||||||
|
Only requests matching a DENY or CHALLENGE rule are blocked or challenged. All other requests are allowed. This is called "the implicit rule".
|
||||||
|
|
||||||
|
## Example: Minimal policy
|
||||||
|
|
||||||
|
If your policy only blocks a specific bot, all other requests will be allowed:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON" default>
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"bots": [
|
||||||
|
{
|
||||||
|
"name": "block-amazonbot",
|
||||||
|
"user_agent_regex": "Amazonbot",
|
||||||
|
"action": "DENY"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML">
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: block-amazonbot
|
||||||
|
user_agent_regex: Amazonbot
|
||||||
|
action: DENY
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
## How to deny by default
|
||||||
|
|
||||||
|
If you want to deny all traffic except what you explicitly allow, add a catch-all deny rule at the end of your policy list. Make sure to add ALLOW rules for any traffic you want to permit before this rule.
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON" default>
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"bots": [
|
||||||
|
{
|
||||||
|
"name": "allow-goodbot",
|
||||||
|
"user_agent_regex": "GoodBot",
|
||||||
|
"action": "ALLOW"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "catch-all-deny",
|
||||||
|
"path_regex": ".*",
|
||||||
|
"action": "DENY"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML">
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: allow-goodbot
|
||||||
|
user_agent_regex: GoodBot
|
||||||
|
action: ALLOW
|
||||||
|
- name: catch-all-deny
|
||||||
|
path_regex: .*
|
||||||
|
action: DENY
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
## Final remarks
|
||||||
|
|
||||||
|
- Rules are evaluated in order; the first match wins.
|
||||||
|
- The implicit allow rule is always last and cannot be removed.
|
||||||
|
- Use your logs to monitor what traffic is being allowed by default.
|
||||||
|
|
||||||
|
See [Policy Definitions](./policies) for more details on writing rules.
|
||||||
8
docs/docs/admin/environments/_category_.json
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"label": "Environments",
|
||||||
|
"position": 20,
|
||||||
|
"link": {
|
||||||
|
"type": "generated-index",
|
||||||
|
"description": "Detailed information about individual environments (such as HTTP servers, platforms, etc.) Anubis is known to work with."
|
||||||
|
}
|
||||||
|
}
|
||||||
152
docs/docs/admin/environments/apache.mdx
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
# Apache
|
||||||
|
|
||||||
|
import Tabs from "@theme/Tabs";
|
||||||
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
|
Anubis is intended to be a filter proxy. The way to integrate this is to break your configuration up into two parts: TLS termination and then HTTP routing. Consider this diagram:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
---
|
||||||
|
title: Apache as tls terminator and HTTP router
|
||||||
|
---
|
||||||
|
|
||||||
|
flowchart LR
|
||||||
|
T(User Traffic)
|
||||||
|
subgraph Apache 2
|
||||||
|
TCP(TCP 80/443)
|
||||||
|
US(TCP 3001)
|
||||||
|
end
|
||||||
|
|
||||||
|
An(Anubis)
|
||||||
|
B(Backend)
|
||||||
|
|
||||||
|
T --> |TLS termination| TCP
|
||||||
|
TCP --> |Traffic filtering| An
|
||||||
|
An --> |Happy traffic| US
|
||||||
|
US --> |whatever you're doing| B
|
||||||
|
```
|
||||||
|
|
||||||
|
Effectively you have one trip through Apache to do TLS termination, a detour through Anubis for traffic scrubbing, and then going to the backend directly. This final socket is what will do HTTP routing.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
These examples assume that you are using a setup where your nginx configuration is made up of a bunch of files in `/etc/httpd/conf.d/*.conf`. This is not true for all deployments of Apache. If you are not in such an environment, append these snippets to your `/etc/httpd/conf/httpd.conf` file.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
Install the following dependencies for proxying HTTP:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="rpm" label="Red Hat / RPM" default>
|
||||||
|
|
||||||
|
```text
|
||||||
|
dnf -y install mod_proxy_html
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="deb" label="Debian / Ubuntu / apt">
|
||||||
|
|
||||||
|
```text
|
||||||
|
apt-get install -y libapache2-mod-proxy-html libxml2-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Assuming you are protecting `anubistest.techaro.lol`, you need the following server configuration blocks:
|
||||||
|
|
||||||
|
1. A block on port 80 that forwards HTTP to HTTPS
|
||||||
|
2. A block on port 443 that terminates TLS and forwards to Anubis
|
||||||
|
3. A block on port 3001 that actually serves your websites
|
||||||
|
|
||||||
|
```text
|
||||||
|
# Plain HTTP redirect to HTTPS
|
||||||
|
<VirtualHost *:80>
|
||||||
|
ServerAdmin your@email.here
|
||||||
|
ServerName anubistest.techaro.lol
|
||||||
|
DocumentRoot /var/www/anubistest.techaro.lol
|
||||||
|
ErrorLog /var/log/httpd/anubistest.techaro.lol_error.log
|
||||||
|
CustomLog /var/log/httpd/anubistest.techaro.lol_access.log combined
|
||||||
|
RewriteEngine on
|
||||||
|
RewriteCond %{SERVER_NAME} =anubistest.techaro.lol
|
||||||
|
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent]
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
|
# HTTPS listener that forwards to Anubis
|
||||||
|
<VirtualHost *:443>
|
||||||
|
ServerAdmin your@email.here
|
||||||
|
ServerName anubistest.techaro.lol
|
||||||
|
DocumentRoot /var/www/anubistest.techaro.lol
|
||||||
|
ErrorLog /var/log/httpd/anubistest.techaro.lol_error.log
|
||||||
|
CustomLog /var/log/httpd/anubistest.techaro.lol_access.log combined
|
||||||
|
|
||||||
|
SSLCertificateFile /etc/letsencrypt/live/anubistest.techaro.lol/fullchain.pem
|
||||||
|
SSLCertificateKeyFile /etc/letsencrypt/live/anubistest.techaro.lol/privkey.pem
|
||||||
|
Include /etc/letsencrypt/options-ssl-apache.conf
|
||||||
|
|
||||||
|
# These headers need to be set or else Anubis will
|
||||||
|
# throw an "admin misconfiguration" error.
|
||||||
|
RequestHeader set "X-Real-Ip" expr=%{REMOTE_ADDR}
|
||||||
|
RequestHeader set "X-Forwarded-Proto" "https"
|
||||||
|
RequestHeader set "X-Http-Version" "%{SERVER_PROTOCOL}s"
|
||||||
|
|
||||||
|
ProxyPreserveHost On
|
||||||
|
|
||||||
|
ProxyRequests Off
|
||||||
|
ProxyVia Off
|
||||||
|
|
||||||
|
# Replace 9000 with the port Anubis listens on
|
||||||
|
ProxyPass / http://[::1]:9000/
|
||||||
|
ProxyPassReverse / http://[::1]:9000/
|
||||||
|
</VirtualHost>
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
# Actual website config
|
||||||
|
<VirtualHost *:3001>
|
||||||
|
ServerAdmin your@email.here
|
||||||
|
ServerName anubistest.techaro.lol
|
||||||
|
DocumentRoot /var/www/anubistest.techaro.lol
|
||||||
|
ErrorLog /var/log/httpd/anubistest.techaro.lol_error.log
|
||||||
|
CustomLog /var/log/httpd/anubistest.techaro.lol_access.log combined
|
||||||
|
</VirtualHost>
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure to add a separate configuration file for the listener on port 3001:
|
||||||
|
|
||||||
|
```text
|
||||||
|
# /etc/httpd/conf.d/listener-3001.conf
|
||||||
|
|
||||||
|
Listen 3001
|
||||||
|
```
|
||||||
|
|
||||||
|
This can be repeated for multiple sites. Anubis does not care about the HTTP `Host` header and will happily cope with multiple websites via the same instance.
|
||||||
|
|
||||||
|
Then reload your Apache config and load your website. You should see Anubis protecting your apps!
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo systemctl reload httpd.service
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
Here are some answers to questions that came in in testing:
|
||||||
|
|
||||||
|
### I'm running on a Red Hat distribution and Apache is saying "service unavailable" for every page load
|
||||||
|
|
||||||
|
If you see a "Service unavailable" error on every page load and run a Red Hat derived distribution, you are missing a `selinux` setting. The exact command will be in a journalctl log message like this:
|
||||||
|
|
||||||
|
```text
|
||||||
|
***** Plugin catchall_boolean (89.3 confidence) suggests ******************
|
||||||
|
|
||||||
|
If you want to allow HTTPD scripts and modules to connect to the network using TCP.
|
||||||
|
Then you must tell SELinux about this by enabling the 'httpd_can_network_connect' boolean.
|
||||||
|
|
||||||
|
Do
|
||||||
|
setsebool -P httpd_can_network_connect 1
|
||||||
|
```
|
||||||
|
|
||||||
|
This will fix the error immediately.
|
||||||
26
docs/docs/admin/environments/docker-compose.mdx
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# Docker compose
|
||||||
|
|
||||||
|
Docker compose is typically used in concert with other load balancers such as [Apache](./apache.mdx) or [Nginx](./nginx.mdx). Below is a minimal example showing you how to set up an instance of Anubis listening on host port 8080 that points to a static website containing data in `./www`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
anubis-nginx:
|
||||||
|
image: ghcr.io/techarohq/anubis:latest
|
||||||
|
environment:
|
||||||
|
BIND: ":8080"
|
||||||
|
DIFFICULTY: "4"
|
||||||
|
METRICS_BIND: ":9090"
|
||||||
|
SERVE_ROBOTS_TXT: "true"
|
||||||
|
TARGET: "http://nginx"
|
||||||
|
POLICY_FNAME: "/data/cfg/botPolicy.yaml"
|
||||||
|
OG_PASSTHROUGH: "true"
|
||||||
|
OG_EXPIRY_TIME: "24h"
|
||||||
|
ports:
|
||||||
|
- 8080:8080
|
||||||
|
volumes:
|
||||||
|
- "./botPolicy.yaml:/data/cfg/botPolicy.yaml:ro"
|
||||||
|
nginx:
|
||||||
|
image: nginx
|
||||||
|
volumes:
|
||||||
|
- "./www:/usr/share/nginx/html"
|
||||||
|
```
|
||||||
128
docs/docs/admin/environments/kubernetes.mdx
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# Kubernetes
|
||||||
|
|
||||||
|
When setting up Anubis in Kubernetes, you want to make sure that you thread requests through Anubis kinda like this:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
---
|
||||||
|
title: Anubis embedded into workload pods
|
||||||
|
---
|
||||||
|
|
||||||
|
flowchart LR
|
||||||
|
T(User Traffic)
|
||||||
|
|
||||||
|
IngressController(IngressController)
|
||||||
|
|
||||||
|
subgraph Service
|
||||||
|
AnPort(Anubis Port)
|
||||||
|
BPort(Backend Port)
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Pod
|
||||||
|
An(Anubis)
|
||||||
|
B(Backend)
|
||||||
|
end
|
||||||
|
|
||||||
|
T --> IngressController
|
||||||
|
IngressController --> AnPort
|
||||||
|
AnPort --> An
|
||||||
|
An --> B
|
||||||
|
```
|
||||||
|
|
||||||
|
Anubis is lightweight enough that you should be able to have many instances of it running without many problems. If this is a concern for you, please check out [ingress-anubis](https://github.com/jaredallard/ingress-anubis?ref=anubis.techaro.lol).
|
||||||
|
|
||||||
|
This example makes the following assumptions:
|
||||||
|
|
||||||
|
- Your target service is listening on TCP port `5000`.
|
||||||
|
- Anubis will be listening on port `8080`.
|
||||||
|
|
||||||
|
Adjust these values as facts and circumstances demand.
|
||||||
|
|
||||||
|
Create a secret with the signing key Anubis should use for its responses:
|
||||||
|
|
||||||
|
```
|
||||||
|
kubectl create secret generic anubis-key \
|
||||||
|
--namespace default \
|
||||||
|
--from-literal=ED25519_PRIVATE_KEY_HEX=$(openssl rand -hex 32)
|
||||||
|
```
|
||||||
|
|
||||||
|
Attach Anubis to your Deployment:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containers:
|
||||||
|
# ...
|
||||||
|
- name: anubis
|
||||||
|
image: ghcr.io/techarohq/anubis:latest
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: "BIND"
|
||||||
|
value: ":8080"
|
||||||
|
- name: "DIFFICULTY"
|
||||||
|
value: "4"
|
||||||
|
- name: ED25519_PRIVATE_KEY_HEX
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: anubis-key
|
||||||
|
key: ED25519_PRIVATE_KEY_HEX
|
||||||
|
- name: "METRICS_BIND"
|
||||||
|
value: ":9090"
|
||||||
|
- name: "SERVE_ROBOTS_TXT"
|
||||||
|
value: "true"
|
||||||
|
- name: "TARGET"
|
||||||
|
value: "http://localhost:5000"
|
||||||
|
- name: "OG_PASSTHROUGH"
|
||||||
|
value: "true"
|
||||||
|
- name: "OG_EXPIRY_TIME"
|
||||||
|
value: "24h"
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 750m
|
||||||
|
memory: 256Mi
|
||||||
|
requests:
|
||||||
|
cpu: 250m
|
||||||
|
memory: 256Mi
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 1000
|
||||||
|
runAsGroup: 1000
|
||||||
|
runAsNonRoot: true
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
```
|
||||||
|
|
||||||
|
Then add a Service entry for Anubis:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# ...
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
# diff-add
|
||||||
|
- protocol: TCP
|
||||||
|
# diff-add
|
||||||
|
port: 8080
|
||||||
|
# diff-add
|
||||||
|
targetPort: 8080
|
||||||
|
# diff-add
|
||||||
|
name: anubis
|
||||||
|
```
|
||||||
|
|
||||||
|
Then point your Ingress to the Anubis port:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
rules:
|
||||||
|
- host: git.xeserv.us
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- pathType: Prefix
|
||||||
|
path: "/"
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: git
|
||||||
|
port:
|
||||||
|
# diff-remove
|
||||||
|
name: http
|
||||||
|
# diff-add
|
||||||
|
name: anubis
|
||||||
|
```
|
||||||
172
docs/docs/admin/environments/nginx.mdx
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
# Nginx
|
||||||
|
|
||||||
|
Anubis is intended to be a filter proxy. The way to integrate this with nginx is to break your configuration up into two parts: TLS termination and then HTTP routing. Consider this diagram:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
---
|
||||||
|
title: Nginx as tls terminator and HTTP router
|
||||||
|
---
|
||||||
|
|
||||||
|
flowchart LR
|
||||||
|
T(User Traffic)
|
||||||
|
subgraph Nginx
|
||||||
|
TCP(TCP 80/443)
|
||||||
|
US(Unix Socket or
|
||||||
|
another TCP port)
|
||||||
|
end
|
||||||
|
|
||||||
|
An(Anubis)
|
||||||
|
B(Backend)
|
||||||
|
|
||||||
|
T --> |TLS termination| TCP
|
||||||
|
TCP --> |Traffic filtering| An
|
||||||
|
An --> |Happy traffic| US
|
||||||
|
US --> |whatever you're doing| B
|
||||||
|
```
|
||||||
|
|
||||||
|
Instead of your traffic going right from TLS termination into the backend, it takes a detour through Anubis. Anubis filters out the "bad" traffic and then passes the "good" traffic to another socket that Nginx has open. This final socket is what you will use to do HTTP routing.
|
||||||
|
|
||||||
|
Effectively, you have two roles for nginx: TLS termination (converting HTTPS to HTTP) and HTTP routing (distributing requests to the individual vhosts). This can stack with something like Apache in case you have a legacy deployment. Make sure you have the right [TLS certificates configured](https://code.kuederle.com/letsencrypt/) at the TLS termination level.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
These examples assume that you are using a setup where your nginx configuration is made up of a bunch of files in `/etc/nginx/conf.d/*.conf`. This is not true for all deployments of nginx. If you are not in such an environment, append these snippets to your `/etc/nginx/nginx.conf` file.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
Assuming that we are protecting `anubistest.techaro.lol`, here's what the server configuration file would look like:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
# /etc/nginx/conf.d/server-anubistest-techaro-lol.conf
|
||||||
|
|
||||||
|
# HTTP - Redirect all HTTP traffic to HTTPS
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
|
||||||
|
server_name anubistest.techaro.lol;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
return 301 https://$host$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# TLS termination server, this will listen over TLS (https) and then
|
||||||
|
# proxy all traffic to the target via Anubis.
|
||||||
|
server {
|
||||||
|
# Listen on TCP port 443 with TLS (https) and HTTP/2
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
# Anubis needs these headers to understand the connection
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Http-Version $server_protocol;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header X-Request-Id $request_id;
|
||||||
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
proxy_pass http://anubis;
|
||||||
|
}
|
||||||
|
|
||||||
|
server_name anubistest.techaro.lol;
|
||||||
|
|
||||||
|
ssl_certificate /path/to/your/certs/anubistest.techaro.lol.crt;
|
||||||
|
ssl_certificate_key /path/to/your/certs/anubistest.techaro.lol.key;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Backend server, this is where your webapp should actually live.
|
||||||
|
server {
|
||||||
|
listen unix:/run/nginx/nginx.sock;
|
||||||
|
|
||||||
|
server_name anubistest.techaro.lol;
|
||||||
|
root "/srv/http/anubistest.techaro.lol";
|
||||||
|
index index.html;
|
||||||
|
|
||||||
|
# Your normal configuration can go here
|
||||||
|
# location .php { fastcgi...} etc.
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
You can copy the `location /` block into a separate file named something like `conf-anubis.inc` and then include it inline to other `server` blocks:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
# /etc/nginx/conf.d/conf-anubis.inc
|
||||||
|
|
||||||
|
# Forward to anubis
|
||||||
|
location / {
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_pass http://anubis;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then in a server block:
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Full nginx config</summary>
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
# /etc/nginx/conf.d/server-mimi-techaro-lol.conf
|
||||||
|
|
||||||
|
server {
|
||||||
|
# Listen on 443 with SSL
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
|
||||||
|
# Slipstream via Anubis
|
||||||
|
include "conf-anubis.inc";
|
||||||
|
|
||||||
|
server_name mimi.techaro.lol;
|
||||||
|
|
||||||
|
ssl_certificate /path/to/your/certs/mimi.techaro.lol.crt;
|
||||||
|
ssl_certificate_key /path/to/your/certs/mimi.techaro.lol.key;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen unix:/run/nginx/nginx.sock;
|
||||||
|
|
||||||
|
server_name mimi.techaro.lol;
|
||||||
|
root "/srv/http/mimi.techaro.lol";
|
||||||
|
index index.html;
|
||||||
|
|
||||||
|
# Your normal configuration can go here
|
||||||
|
# location .php { fastcgi...} etc.
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
Create an upstream for Anubis.
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
# /etc/nginx/conf.d/upstream-anubis.conf
|
||||||
|
|
||||||
|
upstream anubis {
|
||||||
|
# Make sure this matches the values you set for `BIND` and `BIND_NETWORK`.
|
||||||
|
# If this does not match, your services will not be protected by Anubis.
|
||||||
|
|
||||||
|
# Try anubis first over a UNIX socket
|
||||||
|
server unix:/run/anubis/nginx.sock;
|
||||||
|
#server http://127.0.0.1:8923;
|
||||||
|
|
||||||
|
# Optional: fall back to serving the websites directly. This allows your
|
||||||
|
# websites to be resilient against Anubis failing, at the risk of exposing
|
||||||
|
# them to the raw internet without protection. This is a tradeoff and can
|
||||||
|
# be worth it in some edge cases.
|
||||||
|
#server unix:/run/nginx.sock backup;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This can be repeated for multiple sites. Anubis does not care about the HTTP `Host` header and will happily cope with multiple websites via the same instance.
|
||||||
|
|
||||||
|
Then reload your nginx config and load your website. You should see Anubis protecting your apps!
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo systemctl reload nginx.service
|
||||||
|
```
|
||||||
215
docs/docs/admin/environments/traefik.mdx
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
---
|
||||||
|
id: traefik
|
||||||
|
title: Integrate Anubis with Traefik in a Docker Compose Environment
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
This only talks about integration through Compose,
|
||||||
|
but it also applies to docker cli options.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
Currently, Anubis doesn't have any Traefik middleware,
|
||||||
|
so you need to manually route it between Traefik and your target service.
|
||||||
|
This routing is done per labels in Traefik.
|
||||||
|
|
||||||
|
In this example, we will use 4 Containers:
|
||||||
|
|
||||||
|
- `traefik` - the Traefik instance
|
||||||
|
- `anubis` - the Anubis instance
|
||||||
|
- `target` - our service to protect (`traefik/whoami` in this case)
|
||||||
|
- `target2` - a second service that isn't supposed to be protected (`traefik/whoami` in this case)
|
||||||
|
|
||||||
|
There are 3 steps we need to follow:
|
||||||
|
|
||||||
|
1. Create a new exclusive Traefik endpoint for Anubis
|
||||||
|
2. Pass all unspecified requests to Anubis
|
||||||
|
3. Let Anubis pass all verified requests back to Traefik on its exclusive endpoint
|
||||||
|
|
||||||
|
## Diagram of Flow
|
||||||
|
|
||||||
|
This is a small diagram depicting the flow.
|
||||||
|
Keep in mind that `8080` or `80` can be anything depending on your containers.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
user[User]
|
||||||
|
traefik[Traefik]
|
||||||
|
anubis[Anubis]
|
||||||
|
target[Target]
|
||||||
|
|
||||||
|
user-->|:443 - Requesting Service|traefik
|
||||||
|
traefik-->|:8080 - Passing to Anubis|anubis
|
||||||
|
anubis-->|:3923 - Passing back to Traefik|traefik
|
||||||
|
traefik-->|:80 - Passing to the target|target
|
||||||
|
```
|
||||||
|
|
||||||
|
## Create an Exclusive Anubis Endpoint in Traefik
|
||||||
|
|
||||||
|
There are 2 ways of registering a new endpoint in Traefik.
|
||||||
|
Which one to use depends on how you configured your Traefik so far.
|
||||||
|
|
||||||
|
**CLI Options:**
|
||||||
|
|
||||||
|
```yml
|
||||||
|
--entrypoints.anubis.address=:3923
|
||||||
|
```
|
||||||
|
|
||||||
|
**traefik.yml:**
|
||||||
|
|
||||||
|
```yml
|
||||||
|
entryPoints:
|
||||||
|
anubis:
|
||||||
|
address: ":3923"
|
||||||
|
```
|
||||||
|
|
||||||
|
It is important that the specified port isn't actually reachable from the outside,
|
||||||
|
but only exposed in the Docker network.
|
||||||
|
Exposing the Anubis port on Traefik directly will allow direct unprotected access to all containers behind it.
|
||||||
|
|
||||||
|
## Passing all unspecified Web Requests to Anubis
|
||||||
|
|
||||||
|
There are cases where you want Traefik to still route some requests without protection, just like before.
|
||||||
|
To achieve this, we can register Anubis as the default handler for non-protected requests.
|
||||||
|
|
||||||
|
We also don't want users to get SSL Errors during the checking phase,
|
||||||
|
thus we also need to let Traefik provide SSL Certs for our endpoint.
|
||||||
|
This example expects an TLS cert resolver called `le`.
|
||||||
|
|
||||||
|
We also expect there to be an endpoint called `websecure` for HTTPS in this example.
|
||||||
|
|
||||||
|
This is an example of the required labels to configure Traefik on the Anubis container:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
labels:
|
||||||
|
- traefik.enable=true # Enabling Traefik
|
||||||
|
- traefik.docker.network=traefik # Telling Traefik which network to use
|
||||||
|
- traefik.http.routers.anubis.priority=1 # Setting Anubis to the lowest priority, so it only takes the slack
|
||||||
|
- traefik.http.routers.anubis.rule=PathRegexp(`.*`) # Wildcard match every path
|
||||||
|
- traefik.http.routers.anubis.entrypoints=websecure # Listen on HTTPS
|
||||||
|
- traefik.http.services.anubis.loadbalancer.server.port=8080 # Telling Traefik to which port it should route requests
|
||||||
|
- traefik.http.routers.anubis.service=anubis # Telling Traefik to use the above specified port
|
||||||
|
- traefik.http.routers.anubis.tls.certresolver=le # Telling Traefik to resolve a Cert for Anubis
|
||||||
|
```
|
||||||
|
|
||||||
|
## Passing all Verified Requests Back Correctly to Traefik
|
||||||
|
|
||||||
|
To pass verified requests back to Traefik,
|
||||||
|
we only need to configure Anubis using its environment variables:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
environment:
|
||||||
|
- BIND=:8080
|
||||||
|
- TARGET=http://traefik:3923
|
||||||
|
```
|
||||||
|
|
||||||
|
## Full Example Config
|
||||||
|
|
||||||
|
Now that we know how to pass all requests back and forth, here is the example.
|
||||||
|
This example contains 2 services: one that is protected and the other one that is not.
|
||||||
|
|
||||||
|
**compose.yml**
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
traefik:
|
||||||
|
image: traefik:v3.3
|
||||||
|
ports:
|
||||||
|
- 80:80
|
||||||
|
- 443:443
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- ./letsencrypt:/letsencrypt
|
||||||
|
- ./traefik.yml:/traefik.yml:ro
|
||||||
|
networks:
|
||||||
|
- traefik
|
||||||
|
labels:
|
||||||
|
# Enable Traefik
|
||||||
|
- traefik.enable=true
|
||||||
|
- traefik.docker.network=traefik
|
||||||
|
# Redirect any HTTP to HTTPS
|
||||||
|
- traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https
|
||||||
|
- traefik.http.routers.web.rule=PathPrefix(`/`)
|
||||||
|
- traefik.http.routers.web.entrypoints=web
|
||||||
|
- traefik.http.routers.web.middlewares=redirect-to-https
|
||||||
|
- traefik.http.routers.web.tls=false
|
||||||
|
|
||||||
|
anubis:
|
||||||
|
image: ghcr.io/techarohq/anubis:main
|
||||||
|
environment:
|
||||||
|
# Telling Anubis, where to listen for Traefik
|
||||||
|
- BIND=:8080
|
||||||
|
# Telling Anubis to point to Traefik via the Docker network
|
||||||
|
- TARGET=http://traefik:3923
|
||||||
|
networks:
|
||||||
|
- traefik
|
||||||
|
labels:
|
||||||
|
- traefik.enable=true # Enabling Traefik
|
||||||
|
- traefik.docker.network=traefik # Telling Traefik which network to use
|
||||||
|
- traefik.http.routers.anubis.priority=1 # Setting Anubis to the lowest priority, so it only takes the slack
|
||||||
|
- traefik.http.routers.anubis.rule=PathRegexp(`.*`) # wildcard match anything
|
||||||
|
- traefik.http.routers.anubis.entrypoints=websecure # Listen on HTTPS
|
||||||
|
- traefik.http.services.anubis.loadbalancer.server.port=8080 # Telling Traefik to which port it should route requests
|
||||||
|
- traefik.http.routers.anubis.service=anubis # Telling Traefik to use the above specified port
|
||||||
|
- traefik.http.routers.anubis.tls.certresolver=le # Telling Traefik to resolve a Cert for Anubis
|
||||||
|
|
||||||
|
# Protected by Anubis
|
||||||
|
target:
|
||||||
|
image: traefik/whoami:latest
|
||||||
|
networks:
|
||||||
|
- traefik
|
||||||
|
labels:
|
||||||
|
- traefik.enable=true # Enabling Traefik
|
||||||
|
- traefik.docker.network=traefik # Telling Traefik which network to use
|
||||||
|
- traefik.http.routers.target.rule=Host(`example.com`) # Only Matching Requests for example.com
|
||||||
|
- traefik.http.routers.target.entrypoints=anubis # Listening on the exclusive Anubis Network
|
||||||
|
- traefik.http.services.target.loadbalancer.server.port=80 # Telling Traefik where to receive requests
|
||||||
|
- traefik.http.routers.target.service=target # Telling Traefik to use the above specified port
|
||||||
|
|
||||||
|
# Not Protected by Anubis
|
||||||
|
target2:
|
||||||
|
image: traefik/whoami:latest
|
||||||
|
networks:
|
||||||
|
- traefik
|
||||||
|
labels:
|
||||||
|
- traefik.enable=true # Eneabling Traefik
|
||||||
|
- traefik.docker.network=traefik # Telling Traefik which network to use
|
||||||
|
- traefik.http.routers.target2.rule=Host(`another.com`) # Only Matching Requests for example.com
|
||||||
|
- traefik.http.routers.target2.entrypoints=websecure # Listening on the exclusive Anubis Network
|
||||||
|
- traefik.http.services.target2.loadbalancer.server.port=80 # Telling Traefik where to receive requests
|
||||||
|
- traefik.http.routers.target2.service=target2 # Telling Traefik to use the above specified port
|
||||||
|
- traefik.http.routers.target2.tls.certresolver=le # Telling Traefik to resolve a Cert for this Target
|
||||||
|
|
||||||
|
networks:
|
||||||
|
traefik:
|
||||||
|
name: traefik
|
||||||
|
```
|
||||||
|
|
||||||
|
**traefik.yml**
|
||||||
|
|
||||||
|
```yml
|
||||||
|
api:
|
||||||
|
insecure: false # shouldn't be enabled in prod
|
||||||
|
|
||||||
|
entryPoints:
|
||||||
|
# Web
|
||||||
|
web:
|
||||||
|
address: ":80"
|
||||||
|
websecure:
|
||||||
|
address: ":443"
|
||||||
|
# Anubis
|
||||||
|
anubis:
|
||||||
|
address: ":3923"
|
||||||
|
|
||||||
|
certificatesResolvers:
|
||||||
|
le:
|
||||||
|
acme:
|
||||||
|
tlsChallenge: {}
|
||||||
|
email: "admin@example.com"
|
||||||
|
storage: "/letsencrypt/acme.json"
|
||||||
|
|
||||||
|
providers:
|
||||||
|
docker: {}
|
||||||
|
```
|
||||||
@@ -4,6 +4,9 @@ title: Setting up Anubis
|
|||||||
|
|
||||||
import RandomKey from "@site/src/components/RandomKey";
|
import RandomKey from "@site/src/components/RandomKey";
|
||||||
|
|
||||||
|
import Tabs from "@theme/Tabs";
|
||||||
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
Anubis is meant to sit between your reverse proxy (such as Nginx or Caddy) and your target service. One instance of Anubis must be used per service you are protecting.
|
Anubis is meant to sit between your reverse proxy (such as Nginx or Caddy) and your target service. One instance of Anubis must be used per service you are protecting.
|
||||||
|
|
||||||
<center>
|
<center>
|
||||||
@@ -24,6 +27,8 @@ TLS terminator)
|
|||||||
|
|
||||||
</center>
|
</center>
|
||||||
|
|
||||||
|
## Docker image conventions
|
||||||
|
|
||||||
Anubis is shipped in the Docker repo [`ghcr.io/techarohq/anubis`](https://github.com/TecharoHQ/anubis/pkgs/container/anubis). The following tags exist for your convenience:
|
Anubis is shipped in the Docker repo [`ghcr.io/techarohq/anubis`](https://github.com/TecharoHQ/anubis/pkgs/container/anubis). The following tags exist for your convenience:
|
||||||
|
|
||||||
| Tag | Meaning |
|
| Tag | Meaning |
|
||||||
@@ -31,32 +36,78 @@ Anubis is shipped in the Docker repo [`ghcr.io/techarohq/anubis`](https://github
|
|||||||
| `latest` | The latest [tagged release](https://github.com/TecharoHQ/anubis/releases), if you are in doubt, start here. |
|
| `latest` | The latest [tagged release](https://github.com/TecharoHQ/anubis/releases), if you are in doubt, start here. |
|
||||||
| `v<version number>` | The Anubis image for [any given tagged release](https://github.com/TecharoHQ/anubis/tags) |
|
| `v<version number>` | The Anubis image for [any given tagged release](https://github.com/TecharoHQ/anubis/tags) |
|
||||||
| `main` | The current build on the `main` branch. Only use this if you need the latest and greatest features as they are merged into `main`. |
|
| `main` | The current build on the `main` branch. Only use this if you need the latest and greatest features as they are merged into `main`. |
|
||||||
| `pr-<number>` | The build associated with PR `#<number>`. Only use this for debugging issues fixed by a PR. |
|
|
||||||
|
|
||||||
Other methods to install Anubis may exist, but the Docker image is currently the only supported method.
|
|
||||||
|
|
||||||
The Docker image runs Anubis as user ID 1000 and group ID 1000. If you are mounting external volumes into Anubis' container, please be sure they are owned by or writable to this user/group.
|
The Docker image runs Anubis as user ID 1000 and group ID 1000. If you are mounting external volumes into Anubis' container, please be sure they are owned by or writable to this user/group.
|
||||||
|
|
||||||
Anubis has very minimal system requirements. I suspect that 128Mi of ram may be sufficient for a large number of concurrent clients. Anubis may be a poor fit for apps that use WebSockets and maintain open connections, but I don't have enough real-world experience to know one way or another.
|
Anubis has very minimal system requirements. I suspect that 128Mi of ram may be sufficient for a large number of concurrent clients. Anubis may be a poor fit for apps that use WebSockets and maintain open connections, but I don't have enough real-world experience to know one way or another.
|
||||||
|
|
||||||
|
## Native packages
|
||||||
|
|
||||||
|
For more detailed information on installing Anubis with native packages, please read [the native install directions](./native-install.mdx).
|
||||||
|
|
||||||
|
## Environment variables
|
||||||
|
|
||||||
Anubis uses these environment variables for configuration:
|
Anubis uses these environment variables for configuration:
|
||||||
|
|
||||||
| Environment Variable | Default value | Explanation |
|
| Environment Variable | Default value | Explanation |
|
||||||
| :----------------------------- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| :----------------------------- | :---------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `BIND` | `:8923` | The network address that Anubis listens on. For `unix`, set this to a path: `/run/anubis/instance.sock` |
|
| `BASE_PREFIX` | unset | If set, adds a global prefix to all Anubis endpoints. For example, setting this to `/myapp` would make Anubis accessible at `/myapp/` instead of `/`. This is useful when running Anubis behind a reverse proxy that routes based on path prefixes. |
|
||||||
| `BIND_NETWORK` | `tcp` | The address family that Anubis listens on. Accepts `tcp`, `unix` and anything Go's [`net.Listen`](https://pkg.go.dev/net#Listen) supports. |
|
| `BIND` | `:8923` | The network address that Anubis listens on. For `unix`, set this to a path: `/run/anubis/instance.sock` |
|
||||||
| `COOKIE_DOMAIN` | unset | The domain the Anubis challenge pass cookie should be set to. This should be set to the domain you bought from your registrar (EG: `techaro.lol` if your webapp is running on `anubis.techaro.lol`). See [here](https://stackoverflow.com/a/1063760) for more information. |
|
| `BIND_NETWORK` | `tcp` | The address family that Anubis listens on. Accepts `tcp`, `unix` and anything Go's [`net.Listen`](https://pkg.go.dev/net#Listen) supports. |
|
||||||
| `COOKIE_PARTITIONED` | `false` | If set to `true`, enables the [partitioned (CHIPS) flag](https://developers.google.com/privacy-sandbox/cookies/chips), meaning that Anubis inside an iframe has a different set of cookies than the domain hosting the iframe. |
|
| `COOKIE_DOMAIN` | unset | The domain the Anubis challenge pass cookie should be set to. This should be set to the domain you bought from your registrar (EG: `techaro.lol` if your webapp is running on `anubis.techaro.lol`). See [here](https://stackoverflow.com/a/1063760) for more information. |
|
||||||
| `DIFFICULTY` | `5` | The difficulty of the challenge, or the number of leading zeroes that must be in successful responses. |
|
| `COOKIE_PARTITIONED` | `false` | If set to `true`, enables the [partitioned (CHIPS) flag](https://developers.google.com/privacy-sandbox/cookies/chips), meaning that Anubis inside an iframe has a different set of cookies than the domain hosting the iframe. |
|
||||||
| `ED25519_PRIVATE_KEY_HEX` | unset | The hex-encoded ed25519 private key used to sign Anubis responses. If this is not set, Anubis will generate one for you. This should be exactly 64 characters long. See below for details. |
|
| `DIFFICULTY` | `4` | The difficulty of the challenge, or the number of leading zeroes that must be in successful responses. |
|
||||||
| `ED25519_PRIVATE_KEY_HEX_FILE` | unset | Path to a file containing the hex-encoded ed25519 private key. Only one of this or its sister option may be set. |
|
| `ED25519_PRIVATE_KEY_HEX` | unset | The hex-encoded ed25519 private key used to sign Anubis responses. If this is not set, Anubis will generate one for you. This should be exactly 64 characters long. See below for details. |
|
||||||
| `METRICS_BIND` | `:9090` | The network address that Anubis serves Prometheus metrics on. See `BIND` for more information. |
|
| `ED25519_PRIVATE_KEY_HEX_FILE` | unset | Path to a file containing the hex-encoded ed25519 private key. Only one of this or its sister option may be set. |
|
||||||
| `METRICS_BIND_NETWORK` | `tcp` | The address family that the Anubis metrics server listens on. See `BIND_NETWORK` for more information. |
|
| `METRICS_BIND` | `:9090` | The network address that Anubis serves Prometheus metrics on. See `BIND` for more information. |
|
||||||
| `SOCKET_MODE` | `0770` | _Only used when at least one of the `*_BIND_NETWORK` variables are set to `unix`._ The socket mode (permissions) for Unix domain sockets. |
|
| `METRICS_BIND_NETWORK` | `tcp` | The address family that the Anubis metrics server listens on. See `BIND_NETWORK` for more information. |
|
||||||
| `POLICY_FNAME` | unset | The file containing [bot policy configuration](./policies.md). See the bot policy documentation for more details. If unset, the default bot policy configuration is used. |
|
| `OG_EXPIRY_TIME` | `24h` | The expiration time for the Open Graph tag cache. |
|
||||||
| `SERVE_ROBOTS_TXT` | `false` | If set `true`, Anubis will serve a default `robots.txt` file that disallows all known AI scrapers by name and then additionally disallows every scraper. This is useful if facts and circumstances make it difficult to change the underlying service to serve such a `robots.txt` file. |
|
| `OG_PASSTHROUGH` | `false` | If set to `true`, Anubis will enable Open Graph tag passthrough. |
|
||||||
| `TARGET` | `http://localhost:3923` | The URL of the service that Anubis should forward valid requests to. Supports Unix domain sockets, set this to a URI like so: `unix:///path/to/socket.sock`. |
|
| `POLICY_FNAME` | unset | The file containing [bot policy configuration](./policies.mdx). See the bot policy documentation for more details. If unset, the default bot policy configuration is used. |
|
||||||
| `USE_REMOTE_ADDRESS` | unset | If set to `true`, Anubis will take the client's IP from the network socket. For production deployments, it is expected that a reverse proxy is used in front of Anubis, which pass the IP using headers, instead. |
|
| `REDIRECT_DOMAINS` | unset | If set, restrict the domains that Anubis can redirect to when passing a challenge.<br/><br/>If this is unset, Anubis may redirect to any domain which could cause security issues in the unlikely case that an attacker passes a challenge for your browser and then tricks you into clicking a link to your domain. |
|
||||||
|
| `SERVE_ROBOTS_TXT` | `false` | If set `true`, Anubis will serve a default `robots.txt` file that disallows all known AI scrapers by name and then additionally disallows every scraper. This is useful if facts and circumstances make it difficult to change the underlying service to serve such a `robots.txt` file. |
|
||||||
|
| `SOCKET_MODE` | `0770` | _Only used when at least one of the `*_BIND_NETWORK` variables are set to `unix`._ The socket mode (permissions) for Unix domain sockets. |
|
||||||
|
| `TARGET` | `http://localhost:3923` | The URL of the service that Anubis should forward valid requests to. Supports Unix domain sockets, set this to a URI like so: `unix:///path/to/socket.sock`. |
|
||||||
|
| `USE_REMOTE_ADDRESS` | unset | If set to `true`, Anubis will take the client's IP from the network socket. For production deployments, it is expected that a reverse proxy is used in front of Anubis, which pass the IP using headers, instead. |
|
||||||
|
| `WEBMASTER_EMAIL` | unset | If set, shows a contact email address when rendering error pages. This email address will be how users can get in contact with administrators. |
|
||||||
|
|
||||||
|
For more detailed information on configuring Open Graph tags, please refer to the [Open Graph Configuration](./configuration/open-graph.mdx) page.
|
||||||
|
|
||||||
|
### Using Base Prefix
|
||||||
|
|
||||||
|
The `BASE_PREFIX` environment variable allows you to run Anubis behind a path prefix. This is useful when:
|
||||||
|
|
||||||
|
- You want to host multiple services on the same domain
|
||||||
|
- You're using a reverse proxy that routes based on path prefixes
|
||||||
|
- You need to integrate Anubis with an existing application structure
|
||||||
|
|
||||||
|
For example, if you set `BASE_PREFIX=/myapp`, Anubis will:
|
||||||
|
|
||||||
|
- Serve its challenge page at `/myapp/` instead of `/`
|
||||||
|
- Serve its API endpoints at `/myapp/.within.website/x/cmd/anubis/api/` instead of `/.within.website/x/cmd/anubis/api/`
|
||||||
|
- Serve its static assets at `/myapp/.within.website/x/cmd/anubis/` instead of `/.within.website/x/cmd/anubis/`
|
||||||
|
|
||||||
|
When using this feature with a reverse proxy:
|
||||||
|
|
||||||
|
1. Configure your reverse proxy to route requests for the specified path prefix to Anubis
|
||||||
|
2. Set the `BASE_PREFIX` environment variable to match the path prefix in your reverse proxy configuration
|
||||||
|
3. Ensure that your reverse proxy preserves the path when forwarding requests to Anubis
|
||||||
|
|
||||||
|
Example with Nginx:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
location /myapp/ {
|
||||||
|
proxy_pass http://anubis:8923/myapp;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
With corresponding Anubis configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
BASE_PREFIX=/myapp
|
||||||
|
```
|
||||||
|
|
||||||
### Key generation
|
### Key generation
|
||||||
|
|
||||||
@@ -70,107 +121,18 @@ Alternatively here is a key generated by your browser:
|
|||||||
|
|
||||||
<RandomKey />
|
<RandomKey />
|
||||||
|
|
||||||
## Docker compose
|
## Next steps
|
||||||
|
|
||||||
Add Anubis to your compose file pointed at your service:
|
To get Anubis filtering your traffic, you need to make sure it's added to your HTTP load balancer or platform configuration. See the [environments category](/docs/category/environments) for detailed information on individual environments.
|
||||||
|
|
||||||
```yaml
|
- [Apache](./environments/apache.mdx)
|
||||||
services:
|
- [Docker compose](./environments/docker-compose.mdx)
|
||||||
anubis-nginx:
|
- [Kubernetes](./environments/kubernetes.mdx)
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
- [Nginx](./environments/nginx.mdx)
|
||||||
environment:
|
- [Traefik](./environments/traefik.mdx)
|
||||||
BIND: ":8080"
|
|
||||||
DIFFICULTY: "5"
|
|
||||||
METRICS_BIND: ":9090"
|
|
||||||
SERVE_ROBOTS_TXT: "true"
|
|
||||||
TARGET: "http://nginx"
|
|
||||||
POLICY_FNAME: "/data/cfg/botPolicy.json"
|
|
||||||
ports:
|
|
||||||
- 8080:8080
|
|
||||||
volumes:
|
|
||||||
- "./botPolicy.json:/data/cfg/botPolicy.json:ro"
|
|
||||||
nginx:
|
|
||||||
image: nginx
|
|
||||||
volumes:
|
|
||||||
- "./www:/usr/share/nginx/html"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Kubernetes
|
:::note
|
||||||
|
|
||||||
This example makes the following assumptions:
|
Anubis loads its assets from `/.within.website/x/xess/` and `/.within.website/x/cmd/anubis`. If you do not reverse proxy these in your server config, Anubis won't work.
|
||||||
|
|
||||||
- Your target service is listening on TCP port `5000`.
|
:::
|
||||||
- Anubis will be listening on port `8080`.
|
|
||||||
|
|
||||||
Attach Anubis to your Deployment:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
containers:
|
|
||||||
# ...
|
|
||||||
- name: anubis
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
imagePullPolicy: Always
|
|
||||||
env:
|
|
||||||
- name: "BIND"
|
|
||||||
value: ":8080"
|
|
||||||
- name: "DIFFICULTY"
|
|
||||||
value: "5"
|
|
||||||
- name: "METRICS_BIND"
|
|
||||||
value: ":9090"
|
|
||||||
- name: "SERVE_ROBOTS_TXT"
|
|
||||||
value: "true"
|
|
||||||
- name: "TARGET"
|
|
||||||
value: "http://localhost:5000"
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 500m
|
|
||||||
memory: 128Mi
|
|
||||||
requests:
|
|
||||||
cpu: 250m
|
|
||||||
memory: 128Mi
|
|
||||||
securityContext:
|
|
||||||
runAsUser: 1000
|
|
||||||
runAsGroup: 1000
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
```
|
|
||||||
|
|
||||||
Then add a Service entry for Anubis:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# ...
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
# diff-add
|
|
||||||
- protocol: TCP
|
|
||||||
# diff-add
|
|
||||||
port: 8080
|
|
||||||
# diff-add
|
|
||||||
targetPort: 8080
|
|
||||||
# diff-add
|
|
||||||
name: anubis
|
|
||||||
```
|
|
||||||
|
|
||||||
Then point your Ingress to the Anubis port:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rules:
|
|
||||||
- host: git.xeserv.us
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- pathType: Prefix
|
|
||||||
path: "/"
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: git
|
|
||||||
port:
|
|
||||||
# diff-remove
|
|
||||||
name: http
|
|
||||||
# diff-add
|
|
||||||
name: anubis
|
|
||||||
```
|
|
||||||
|
|||||||
97
docs/docs/admin/less-aggressive.mdx
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
# How to make Anubis much less aggressive
|
||||||
|
|
||||||
|
Out of the box, Anubis has fairly paranoid defaults. It's designed to stop the bleeding now, so it defaults to a global "challenge everything" rule. This does work, but comes at significant user experience cost if users disable JavaScript or run plugins that interfere with JavaScript execution.
|
||||||
|
|
||||||
|
Anubis ships with a rule named `challenge-lies-browser-but-http-1.1` that changes the default behavior to fire much less often. This works on top of [expression support](./configuration/expressions.mdx) to allow you to block the worst of the bad while leaving normal users able to access the website. This requires integration with your HTTP load balancer.
|
||||||
|
|
||||||
|
You can import this rule by replacing the `generic-browser` rule with the following:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- import: (data)/common/challenge-browser-like.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## The new rule
|
||||||
|
|
||||||
|
Previously Anubis aggressively challenged everything that had "Mozilla" in its User-Agent string. The rule has been amended to this set of heuristics:
|
||||||
|
|
||||||
|
1. If the request headers contain `X-Http-Protocol`
|
||||||
|
1. AND if the request header `X-Http-Protocol` is `HTTP/1.1`
|
||||||
|
1. AND if the request headers contain `X-Forwarded-Proto`
|
||||||
|
1. AND if the request header `X-Forwarded-Proto` is `https`
|
||||||
|
1. AND if the request's User-Agent string is similar to that of a browser
|
||||||
|
1. THEN throw a challenge.
|
||||||
|
|
||||||
|
This means that users that are using up to date browsers will automatically get through without having to pass a challenge.
|
||||||
|
|
||||||
|
## Apache
|
||||||
|
|
||||||
|
Ensure [`mod_http2`](https://httpd.apache.org/docs/2.4/mod/mod_http2.html) is loaded.
|
||||||
|
|
||||||
|
Make sure that your HTTPS VirtualHost has the right settings for Anubis in place:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Enable HTTP/2 support so Anubis can issues challenges for HTTP/1.1 clients
|
||||||
|
Protocols h2 http/1.1
|
||||||
|
|
||||||
|
# These headers need to be set or else Anubis will
|
||||||
|
# throw an "admin misconfiguration" error.
|
||||||
|
# diff-add
|
||||||
|
RequestHeader set "X-Real-Ip" expr=%{REMOTE_ADDR}
|
||||||
|
# diff-add
|
||||||
|
RequestHeader set "X-Forwarded-Proto" "https"
|
||||||
|
# diff-add
|
||||||
|
RequestHeader set "X-Http-Version" "%{SERVER_PROTOCOL}s"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Caddy
|
||||||
|
|
||||||
|
Make sure that your [`reverse_proxy` has the right headers configured](https://caddyserver.com/docs/caddyfile/directives/reverse_proxy#headers):
|
||||||
|
|
||||||
|
```python
|
||||||
|
ellenjoe.int.within.lgbt {
|
||||||
|
# ...
|
||||||
|
# diff-remove
|
||||||
|
reverse_proxy http://localhost:3000
|
||||||
|
# diff-add
|
||||||
|
reverse_proxy http://localhost:3000 {
|
||||||
|
# diff-add
|
||||||
|
header_up X-Real-Ip {remote_host}
|
||||||
|
# diff-add
|
||||||
|
header_up X-Http-Version {http.request.proto}
|
||||||
|
# diff-add
|
||||||
|
}
|
||||||
|
# ...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## ingress-nginx
|
||||||
|
|
||||||
|
Edit your `ingress-nginx-controller` ConfigMap:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
data:
|
||||||
|
# ...
|
||||||
|
# diff-add
|
||||||
|
location-snippet: |
|
||||||
|
# diff-add
|
||||||
|
proxy_set_header X-Http-Version $server_protocol;
|
||||||
|
# diff-add
|
||||||
|
proxy_set_header X-Tls-Version $ssl_protocol;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Nginx
|
||||||
|
|
||||||
|
Edit your `server` blocks to add the following headers:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# diff-add
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
# diff-add
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
# diff-add
|
||||||
|
proxy_set_header X-Http-Version $server_protocol;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Traefik
|
||||||
|
|
||||||
|
This configuration is not currently supported with Traefik. A Traefik plugin is needed to add the right header.
|
||||||
138
docs/docs/admin/native-install.mdx
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
---
|
||||||
|
title: Installing Anubis with a native package
|
||||||
|
---
|
||||||
|
|
||||||
|
import Tabs from "@theme/Tabs";
|
||||||
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
|
Download the package for your system from [the most recent release on GitHub](https://github.com/TecharoHQ/anubis/releases).
|
||||||
|
|
||||||
|
Install the Anubis package using your package manager of choice:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="deb" label="Debian-based (apt)" default>
|
||||||
|
|
||||||
|
Install Anubis with `apt`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo apt install ./anubis-$VERSION-$ARCH.deb
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="tarball" label="Tarball">
|
||||||
|
|
||||||
|
Extract the tarball to a folder:
|
||||||
|
|
||||||
|
```text
|
||||||
|
tar zxf ./anubis-$VERSION-$OS-$ARCH.tar.gz
|
||||||
|
cd anubis-$VERSION-$OS-$ARCH
|
||||||
|
```
|
||||||
|
|
||||||
|
Install the binary to your system:
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo install -D ./bin/anubis /usr/local/bin
|
||||||
|
```
|
||||||
|
|
||||||
|
Edit the systemd unit to point to `/usr/local/bin/anubis` instead of `/usr/bin/anubis`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
perl -pi -e 's$/usr/bin/anubis$/usr/local/bin/anubis$g' ./run/anubis@.service
|
||||||
|
```
|
||||||
|
|
||||||
|
Install the systemd unit to your system:
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo install -D ./run/anubis@.service /etc/systemd/system
|
||||||
|
```
|
||||||
|
|
||||||
|
Install the default configuration file to your system:
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo install -D ./run/default.env /etc/anubis
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="rpm" label="Red Hat-based (rpm)">
|
||||||
|
|
||||||
|
Install Anubis with `dnf`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo dnf -y install ./anubis-$VERSION.$ARCH.rpm
|
||||||
|
```
|
||||||
|
|
||||||
|
OR
|
||||||
|
|
||||||
|
Install Anubis with `yum`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo yum -y install ./anubis-$VERSION.$ARCH.rpm
|
||||||
|
```
|
||||||
|
|
||||||
|
OR
|
||||||
|
|
||||||
|
Install Anubis with `rpm`:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo rpm -ivh ./anubis-$VERSION.$ARCH.rpm
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
Once it's installed, make a copy of the default configuration file `/etc/anubis/default.env` based on which service you want to protect. For example, to protect a `gitea` server:
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo cp /etc/anubis/default.env /etc/anubis/gitea.env
|
||||||
|
```
|
||||||
|
|
||||||
|
Copy the default bot policies file to `/etc/anubis/gitea.botPolicies.yaml`:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="debrpm" label="Debian or Red Hat" default>
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo cp /usr/share/doc/anubis/botPolicies.yaml /etc/anubis/gitea.botPolicies.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="tarball" label="Tarball">
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo cp ./doc/botPolicies.yaml /etc/anubis/gitea.botPolicies.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
Then open `gitea.env` in your favorite text editor and customize [the environment variables](./installation.mdx#environment-variables) as needed. Here's an example configuration for a Gitea server:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
BIND=[::1]:8239
|
||||||
|
BIND_NETWORK=tcp
|
||||||
|
DIFFICULTY=4
|
||||||
|
METRICS_BIND=[::1]:8240
|
||||||
|
METRICS_BIND_NETWORK=tcp
|
||||||
|
POLICY_FNAME=/etc/anubis/gitea.botPolicies.yaml
|
||||||
|
TARGET=http://localhost:3000
|
||||||
|
```
|
||||||
|
|
||||||
|
Then start Anubis with `systemctl enable --now`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
sudo systemctl enable --now anubis@gitea.service
|
||||||
|
```
|
||||||
|
|
||||||
|
Test to make sure it's running with `curl`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
curl http://localhost:8240/metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
Then set up your reverse proxy (Nginx, Caddy, etc.) to point to the Anubis port. Anubis will then reverse proxy all requests that meet the policies in `/etc/anubis/gitea.botPolicies.json` to the target service.
|
||||||
|
|
||||||
|
For more details on particular reverse proxies, see here:
|
||||||
|
|
||||||
|
- [Apache](./environments/apache.mdx)
|
||||||
|
- [Nginx](./environments/nginx.mdx)
|
||||||
@@ -2,15 +2,25 @@
|
|||||||
title: Policy Definitions
|
title: Policy Definitions
|
||||||
---
|
---
|
||||||
|
|
||||||
|
import Tabs from "@theme/Tabs";
|
||||||
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
Out of the box, Anubis is pretty heavy-handed. It will aggressively challenge everything that might be a browser (usually indicated by having `Mozilla` in its user agent). However, some bots are smart enough to get past the challenge. Some things that look like bots may actually be fine (IE: RSS readers). Some resources need to be visible no matter what. Some resources and remotes are fine to begin with.
|
Out of the box, Anubis is pretty heavy-handed. It will aggressively challenge everything that might be a browser (usually indicated by having `Mozilla` in its user agent). However, some bots are smart enough to get past the challenge. Some things that look like bots may actually be fine (IE: RSS readers). Some resources need to be visible no matter what. Some resources and remotes are fine to begin with.
|
||||||
|
|
||||||
Bot policies let you customize the rules that Anubis uses to allow, deny, or challenge incoming requests. Currently you can set policies by the following matches:
|
Bot policies let you customize the rules that Anubis uses to allow, deny, or challenge incoming requests. Currently you can set policies by the following matches:
|
||||||
|
|
||||||
- Request path
|
- Request path
|
||||||
- User agent string
|
- User agent string
|
||||||
|
- HTTP request header values
|
||||||
|
- [Importing other configuration snippets](./configuration/import.mdx)
|
||||||
|
|
||||||
|
As of version v1.17.0 or later, configuration can be written in either JSON or YAML.
|
||||||
|
|
||||||
Here's an example rule that denies [Amazonbot](https://developer.amazon.com/en/amazonbot):
|
Here's an example rule that denies [Amazonbot](https://developer.amazon.com/en/amazonbot):
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON" default>
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"name": "amazonbot",
|
"name": "amazonbot",
|
||||||
@@ -19,15 +29,37 @@ Here's an example rule that denies [Amazonbot](https://developer.amazon.com/en/a
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML">
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: amazonbot
|
||||||
|
user_agent_regex: Amazonbot
|
||||||
|
action: DENY
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
When this rule is evaluated, Anubis will check the `User-Agent` string of the request. If it contains `Amazonbot`, Anubis will send an error page to the user saying that access is denied, but in such a way that makes scrapers think they have correctly loaded the webpage.
|
When this rule is evaluated, Anubis will check the `User-Agent` string of the request. If it contains `Amazonbot`, Anubis will send an error page to the user saying that access is denied, but in such a way that makes scrapers think they have correctly loaded the webpage.
|
||||||
|
|
||||||
Right now the only kinds of policies you can write are bot policies. Other forms of policies will be added in the future.
|
Right now the only kinds of policies you can write are bot policies. Other forms of policies will be added in the future.
|
||||||
|
|
||||||
Here is a minimal policy file that will protect against most scraper bots:
|
Here is a minimal policy file that will protect against most scraper bots:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON" default>
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"bots": [
|
"bots": [
|
||||||
|
{
|
||||||
|
"name": "cloudflare-workers",
|
||||||
|
"headers_regex": {
|
||||||
|
"CF-Worker": ".*"
|
||||||
|
},
|
||||||
|
"action": "DENY"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "well-known",
|
"name": "well-known",
|
||||||
"path_regex": "^/.well-known/.*$",
|
"path_regex": "^/.well-known/.*$",
|
||||||
@@ -52,9 +84,35 @@ Here is a minimal policy file that will protect against most scraper bots:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML">
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
bots:
|
||||||
|
- name: cloudflare-workers
|
||||||
|
headers_regex:
|
||||||
|
CF-Worker: .*
|
||||||
|
action: DENY
|
||||||
|
- name: well-known
|
||||||
|
path_regex: ^/.well-known/.*$
|
||||||
|
action: ALLOW
|
||||||
|
- name: favicon
|
||||||
|
path_regex: ^/favicon.ico$
|
||||||
|
action: ALLOW
|
||||||
|
- name: robots-txt
|
||||||
|
path_regex: ^/robots.txt$
|
||||||
|
action: ALLOW
|
||||||
|
- name: generic-browser
|
||||||
|
user_agent_regex: Mozilla
|
||||||
|
action: CHALLENGE
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
This allows requests to [`/.well-known`](https://en.wikipedia.org/wiki/Well-known_URI), `/favicon.ico`, `/robots.txt`, and challenges any request that has the word `Mozilla` in its User-Agent string. The [default policy file](https://github.com/TecharoHQ/anubis/blob/main/data/botPolicies.json) is a bit more cohesive, but this should be more than enough for most users.
|
This allows requests to [`/.well-known`](https://en.wikipedia.org/wiki/Well-known_URI), `/favicon.ico`, `/robots.txt`, and challenges any request that has the word `Mozilla` in its User-Agent string. The [default policy file](https://github.com/TecharoHQ/anubis/blob/main/data/botPolicies.json) is a bit more cohesive, but this should be more than enough for most users.
|
||||||
|
|
||||||
If no rules match the request, it is allowed through.
|
If no rules match the request, it is allowed through. For more details on this default behavior and its implications, see [Default allow behavior](./default-allow-behavior.mdx).
|
||||||
|
|
||||||
## Writing your own rules
|
## Writing your own rules
|
||||||
|
|
||||||
@@ -72,6 +130,11 @@ Name your rules in lower case using kebab-case. Rule names will be exposed in Pr
|
|||||||
|
|
||||||
Rules can also have their own challenge settings. These are customized using the `"challenge"` key. For example, here is a rule that makes challenges artificially hard for connections with the substring "bot" in their user agent:
|
Rules can also have their own challenge settings. These are customized using the `"challenge"` key. For example, here is a rule that makes challenges artificially hard for connections with the substring "bot" in their user agent:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON" default>
|
||||||
|
|
||||||
|
This rule has been known to have a high false positive rate in testing. Please use this with care.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"name": "generic-bot-catchall",
|
"name": "generic-bot-catchall",
|
||||||
@@ -85,6 +148,25 @@ Rules can also have their own challenge settings. These are customized using the
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML">
|
||||||
|
|
||||||
|
This rule has been known to have a high false positive rate in testing. Please use this with care.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Punish any bot with "bot" in the user-agent string
|
||||||
|
- name: generic-bot-catchall
|
||||||
|
user_agent_regex: (?i:bot|crawler)
|
||||||
|
action: CHALLENGE
|
||||||
|
challenge:
|
||||||
|
difficulty: 16 # impossible
|
||||||
|
report_as: 4 # lie to the operator
|
||||||
|
algorithm: slow # intentionally waste CPU cycles and time
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
Challenges can be configured with these settings:
|
Challenges can be configured with these settings:
|
||||||
|
|
||||||
| Key | Example | Description |
|
| Key | Example | Description |
|
||||||
@@ -99,6 +181,9 @@ The `remote_addresses` field of a Bot rule allows you to set the IP range that t
|
|||||||
|
|
||||||
For example, you can allow a search engine to connect if and only if its IP address matches the ones they published:
|
For example, you can allow a search engine to connect if and only if its IP address matches the ones they published:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON" default>
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"name": "qwantbot",
|
"name": "qwantbot",
|
||||||
@@ -108,8 +193,25 @@ For example, you can allow a search engine to connect if and only if its IP addr
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML">
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: qwantbot
|
||||||
|
user_agent_regex: \+https\://help\.qwant\.com/bot/
|
||||||
|
action: ALLOW
|
||||||
|
# https://help.qwant.com/wp-content/uploads/sites/2/2025/01/qwantbot.json
|
||||||
|
remote_addresses: ["91.242.162.0/24"]
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
This also works at an IP range level without any other checks:
|
This also works at an IP range level without any other checks:
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="json" label="JSON" default>
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"name": "internal-network",
|
"name": "internal-network",
|
||||||
@@ -118,6 +220,19 @@ This also works at an IP range level without any other checks:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="yaml" label="YAML">
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
name: internal-network
|
||||||
|
action: ALLOW
|
||||||
|
remote_addresses:
|
||||||
|
- 100.64.0.0/10
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
## Risk calculation for downstream services
|
## Risk calculation for downstream services
|
||||||
|
|
||||||
In case your service needs it for risk calculation reasons, Anubis exposes information about the rules that any requests match using a few headers:
|
In case your service needs it for risk calculation reasons, Anubis exposes information about the rules that any requests match using a few headers:
|
||||||
@@ -126,6 +241,6 @@ In case your service needs it for risk calculation reasons, Anubis exposes infor
|
|||||||
| :---------------- | :--------------------------------------------------- | :--------------- |
|
| :---------------- | :--------------------------------------------------- | :--------------- |
|
||||||
| `X-Anubis-Rule` | The name of the rule that was matched | `bot/lightpanda` |
|
| `X-Anubis-Rule` | The name of the rule that was matched | `bot/lightpanda` |
|
||||||
| `X-Anubis-Action` | The action that Anubis took in response to that rule | `CHALLENGE` |
|
| `X-Anubis-Action` | The action that Anubis took in response to that rule | `CHALLENGE` |
|
||||||
| `X-Anubis-Status` | The status and how strict Anubis was in its checks | `PASS-FULL` |
|
| `X-Anubis-Status` | The status and how strict Anubis was in its checks | `PASS` |
|
||||||
|
|
||||||
Policy rules are matched using [Go's standard library regular expressions package](https://pkg.go.dev/regexp). You can mess around with the syntax at [regex101.com](https://regex101.com), make sure to select the Golang option.
|
Policy rules are matched using [Go's standard library regular expressions package](https://pkg.go.dev/regexp). You can mess around with the syntax at [regex101.com](https://regex101.com), make sure to select the Golang option.
|
||||||
@@ -4,38 +4,81 @@ title: Building Anubis without Docker
|
|||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
These instructions may work, but for right now they are informative for downstream packagers more than they are ready-made instructions for administrators wanting to run Anubis on their servers.
|
These instructions may work, but for right now they are informative for downstream packagers more than they are ready-made instructions for administrators wanting to run Anubis on their servers. Pre-made binary package support is being tracked in [#156](https://github.com/TecharoHQ/anubis/issues/156).
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Tools needed
|
## Entirely from source
|
||||||
|
|
||||||
|
If you are doing a build entirely from source, here's what you need to do:
|
||||||
|
|
||||||
|
:::info
|
||||||
|
|
||||||
|
If you maintain a package for Anubis v1.15.x or older, you will need to update your package build. You may want to use one of the half-baked tarballs if your distro/environment of choice makes it difficult to use npm.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
### Tools needed
|
||||||
|
|
||||||
In order to build a production-ready binary of Anubis, you need the following packages in your environment:
|
In order to build a production-ready binary of Anubis, you need the following packages in your environment:
|
||||||
|
|
||||||
- [Go](https://go.dev) - the programming language that Anubis is written in
|
- [Go](https://go.dev) at least version 1.24 - the programming language that Anubis is written in
|
||||||
- [esbuild](https://esbuild.github.io/) - the JavaScript bundler Anubis uses for its production JS assets
|
- [esbuild](https://esbuild.github.io/) - the JavaScript bundler Anubis uses for its production JS assets
|
||||||
- [Node.JS & NPM](https://nodejs.org/en) - manages some build dependencies
|
- [Node.JS & NPM](https://nodejs.org/en) - manages some build dependencies
|
||||||
- `gzip` - compresses production JS (part of coreutils)
|
- `gzip` - compresses production JS (part of coreutils)
|
||||||
- `zstd` - compresses production JS
|
- `zstd` - compresses production JS
|
||||||
- `brotli` - compresses production JS
|
- `brotli` - compresses production JS
|
||||||
|
|
||||||
## Install dependencies
|
To upgrade your version of Go without system package manager support, install `golang.org/dl/go1.24.2` (this can be done from any version of Go):
|
||||||
|
|
||||||
```text
|
```text
|
||||||
go mod download
|
go install golang.org/dl/go1.24.2@latest
|
||||||
npm ci
|
go1.24.2 download
|
||||||
```
|
```
|
||||||
|
|
||||||
## Building static assets
|
### Install dependencies
|
||||||
|
|
||||||
```text
|
```text
|
||||||
npm run assets
|
make deps
|
||||||
```
|
```
|
||||||
|
|
||||||
## Building Anubis to the `./var` folder
|
This will download Go and NPM dependencies.
|
||||||
|
|
||||||
|
### Building static assets
|
||||||
|
|
||||||
```text
|
```text
|
||||||
go build -o ./var/anubis ./cmd/anubis
|
make assets
|
||||||
|
```
|
||||||
|
|
||||||
|
This will build all static assets (CSS, JavaScript) for distribution.
|
||||||
|
|
||||||
|
### Building Anubis to the `./var` folder
|
||||||
|
|
||||||
|
```text
|
||||||
|
make build
|
||||||
```
|
```
|
||||||
|
|
||||||
From this point it is up to you to make sure that `./var/anubis` ends up in the right place. You may want to consult the `./run` folder for useful files such as a systemd unit and `anubis.env.default` file.
|
From this point it is up to you to make sure that `./var/anubis` ends up in the right place. You may want to consult the `./run` folder for useful files such as a systemd unit and `anubis.env.default` file.
|
||||||
|
|
||||||
|
## "Pre-baked" tarball
|
||||||
|
|
||||||
|
The `anubis-src-with-vendor` tarball has many pre-build steps already done, including:
|
||||||
|
|
||||||
|
- Go module dependencies are present in `./vendor`
|
||||||
|
- Static assets (JS, CSS, etc.) are already built in CI
|
||||||
|
|
||||||
|
This means you do not have to manage Go, NPM, or other ecosystem dependencies.
|
||||||
|
|
||||||
|
When using this tarball, all you need to do is build `./cmd/anubis`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
make prebaked-build
|
||||||
|
```
|
||||||
|
|
||||||
|
Anubis will be built to `./var/anubis`.
|
||||||
|
|
||||||
|
## Development dependencies
|
||||||
|
|
||||||
|
Optionally, you can install the following dependencies for development:
|
||||||
|
|
||||||
|
- [Staticcheck](https://staticcheck.dev/docs/getting-started/) (optional, not required due to [`go tool staticcheck`](https://www.alexedwards.net/blog/how-to-manage-tool-dependencies-in-go-1.24-plus), but required if you are using any version of Go older than 1.24)
|
||||||
|
|||||||
@@ -55,3 +55,32 @@ This builds a prod-ready container image with [ko](https://ko.build). If you wan
|
|||||||
```text
|
```text
|
||||||
DOCKER_REPO=registry.host/org/repo DOCKER_METADATA_OUTPUT_TAGS=registry.host/org/repo:latest npm run container
|
DOCKER_REPO=registry.host/org/repo DOCKER_METADATA_OUTPUT_TAGS=registry.host/org/repo:latest npm run container
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Building packages
|
||||||
|
|
||||||
|
For more information, see [Building native packages is complicated](https://xeiaso.net/blog/2025/anubis-packaging/) and [#156: Debian, RPM, and binary tarball packages](https://github.com/TecharoHQ/anubis/issues/156).
|
||||||
|
|
||||||
|
Install `yeet`:
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
`yeet` will soon be moved to a dedicated TecharoHQ repository. This is currently done in a hacky way in order to get this ready for user feedback.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
```text
|
||||||
|
go install within.website/x/cmd/yeet@v1.13.4
|
||||||
|
```
|
||||||
|
|
||||||
|
Install the dependencies for Anubis:
|
||||||
|
|
||||||
|
```text
|
||||||
|
npm ci
|
||||||
|
go mod download
|
||||||
|
```
|
||||||
|
|
||||||
|
Build the packages into `./var`:
|
||||||
|
|
||||||
|
```text
|
||||||
|
yeet
|
||||||
|
```
|
||||||
|
|||||||
@@ -7,4 +7,4 @@ Anubis is provided to the public for free in order to help advance the common go
|
|||||||
|
|
||||||
If you want to run an unbranded or white-label version of Anubis, please [contact Xe](https://xeiaso.net/contact) to arrange a contract. This is not meant to be "contact us" pricing, I am still evaluating the market for this solution and figuring out what makes sense.
|
If you want to run an unbranded or white-label version of Anubis, please [contact Xe](https://xeiaso.net/contact) to arrange a contract. This is not meant to be "contact us" pricing, I am still evaluating the market for this solution and figuring out what makes sense.
|
||||||
|
|
||||||
You can donate to the project [on Patreon](https://patreon.com/cadey).
|
You can donate to the project [on Patreon](https://patreon.com/cadey) or via [GitHub Sponsors](https://github.com/sponsors/Xe).
|
||||||
|
|||||||
@@ -15,14 +15,55 @@ title: Anubis
|
|||||||

|

|
||||||

|

|
||||||
|
|
||||||
Anubis [weighs the soul of your connection](https://en.wikipedia.org/wiki/Weighing_of_souls) using a sha256 proof-of-work challenge in order to protect upstream resources from scraper bots.
|
## Sponsors
|
||||||
|
|
||||||
|
Anubis is brought to you by sponsors and donors like:
|
||||||
|
|
||||||
|
[](https://distrust.co)
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Anubis [weighs the soul of your connection](https://en.wikipedia.org/wiki/Weighing_of_souls) using a proof-of-work challenge in order to protect upstream resources from scraper bots.
|
||||||
|
|
||||||
This program is designed to help protect the small internet from the endless storm of requests that flood in from AI companies. Anubis is as lightweight as possible to ensure that everyone can afford to protect the communities closest to them.
|
This program is designed to help protect the small internet from the endless storm of requests that flood in from AI companies. Anubis is as lightweight as possible to ensure that everyone can afford to protect the communities closest to them.
|
||||||
|
|
||||||
Anubis is a bit of a nuclear response. This will result in your website being blocked from smaller scrapers and may inhibit "good bots" like the Internet Archive. You can configure [bot policy definitions](./admin/policies.md) to explicitly allowlist them and we are working on a curated set of "known good" bots to allow for a compromise between discoverability and uptime.
|
Anubis is a bit of a nuclear response. This will result in your website being blocked from smaller scrapers and may inhibit "good bots" like the Internet Archive. You can configure [bot policy definitions](https://anubis.techaro.lol/docs/admin/policies) to explicitly allowlist them and we are working on a curated set of "known good" bots to allow for a compromise between discoverability and uptime.
|
||||||
|
|
||||||
|
In most cases, you should not need this and can probably get by using Cloudflare to protect a given origin. However, for circumstances where you can't or won't use Cloudflare, Anubis is there for you.
|
||||||
|
|
||||||
## Support
|
## Support
|
||||||
|
|
||||||
If you run into any issues running Anubis, please [open an issue](https://github.com/TecharoHQ/anubis/issues/new?template=Blank+issue) and include all the information I would need to diagnose your issue.
|
If you run into any issues running Anubis, please [open an issue](https://github.com/TecharoHQ/anubis/issues/new?template=Blank+issue) and include all the information I would need to diagnose your issue.
|
||||||
|
|
||||||
For live chat, please join the [Patreon](https://patreon.com/cadey) and ask in the Patron discord in the channel `#anubis`.
|
For live chat, please join the [Patreon](https://patreon.com/cadey) or join [GitHub Sponsors](https://github.com/sponsors/Xe) and ask in the Patron discord in the channel `#anubis`.
|
||||||
|
|
||||||
|
## Star History
|
||||||
|
|
||||||
|
<a href="https://www.star-history.com/#TecharoHQ/anubis&Date">
|
||||||
|
<picture>
|
||||||
|
<source
|
||||||
|
media="(prefers-color-scheme: dark)"
|
||||||
|
srcSet="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date&theme=dark"
|
||||||
|
/>
|
||||||
|
<source
|
||||||
|
media="(prefers-color-scheme: light)"
|
||||||
|
srcSet="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date"
|
||||||
|
/>
|
||||||
|
<img
|
||||||
|
alt="Star History Chart"
|
||||||
|
src="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date"
|
||||||
|
/>
|
||||||
|
</picture>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
## Packaging Status
|
||||||
|
|
||||||
|
[](https://repology.org/project/anubis-anti-crawler/versions)
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
<a href="https://github.com/TecharoHQ/anubis/graphs/contributors">
|
||||||
|
<img src="https://contrib.rocks/image?repo=TecharoHQ/anubis" />
|
||||||
|
</a>
|
||||||
|
|
||||||
|
Made with [contrib.rocks](https://contrib.rocks).
|
||||||
|
|||||||
37
docs/docs/user/known-instances.md
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
title: List of known websites using Anubis
|
||||||
|
---
|
||||||
|
|
||||||
|
This page contains a non-exhaustive list with all websites using Anubis.
|
||||||
|
|
||||||
|
- <details>
|
||||||
|
<summary>The Linux Foundation</summary>
|
||||||
|
- https://git.kernel.org/
|
||||||
|
- https://lore.kernel.org/
|
||||||
|
</details>
|
||||||
|
- https://gitlab.gnome.org/
|
||||||
|
- https://scioly.org/
|
||||||
|
- https://bugs.winehq.org/
|
||||||
|
- https://svnweb.freebsd.org/
|
||||||
|
- https://trac.ffmpeg.org/
|
||||||
|
- https://git.sr.ht/
|
||||||
|
- https://xeiaso.net/
|
||||||
|
- https://source.puri.sm/
|
||||||
|
- https://git.enlightenment.org/
|
||||||
|
- https://superlove.sayitditto.net/
|
||||||
|
- https://linktaco.com/
|
||||||
|
- https://jaredallard.dev/
|
||||||
|
- https://dev.sanctum.geek.nz/
|
||||||
|
- https://canine.tools/
|
||||||
|
- https://git.lupancham.net/
|
||||||
|
- https://dev.haiku-os.org
|
||||||
|
- http://code.hackerspace.pl/
|
||||||
|
- https://wiki.archlinux.org/
|
||||||
|
- https://git.devuan.org/
|
||||||
|
- https://hydra.nixos.org/
|
||||||
|
- https://hydra.nixos.org/
|
||||||
|
- https://codeberg.org/
|
||||||
|
- <details>
|
||||||
|
<summary>The United Nations</summary>
|
||||||
|
- https://policytoolbox.iiep.unesco.org/
|
||||||
|
</details>
|
||||||
@@ -45,7 +45,7 @@ const config: Config = {
|
|||||||
// Please change this to your repo.
|
// Please change this to your repo.
|
||||||
// Remove this to remove the "edit this page" links.
|
// Remove this to remove the "edit this page" links.
|
||||||
editUrl:
|
editUrl:
|
||||||
'https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/',
|
'https://github.com/TecharoHQ/anubis/tree/main/docs/',
|
||||||
},
|
},
|
||||||
// blog: {
|
// blog: {
|
||||||
// showReadingTime: true,
|
// showReadingTime: true,
|
||||||
@@ -70,13 +70,16 @@ const config: Config = {
|
|||||||
],
|
],
|
||||||
|
|
||||||
themeConfig: {
|
themeConfig: {
|
||||||
|
colorMode: {
|
||||||
|
respectPrefersColorScheme: true,
|
||||||
|
},
|
||||||
// Replace with your project's social card
|
// Replace with your project's social card
|
||||||
image: 'img/docusaurus-social-card.jpg',
|
image: 'img/docusaurus-social-card.jpg',
|
||||||
navbar: {
|
navbar: {
|
||||||
title: 'Anubis',
|
title: 'Anubis',
|
||||||
logo: {
|
logo: {
|
||||||
alt: 'A happy jackal woman with brown hair and red eyes',
|
alt: 'A happy jackal woman with brown hair and red eyes',
|
||||||
src: 'img/happy.webp',
|
src: 'img/favicon.webp',
|
||||||
},
|
},
|
||||||
items: [
|
items: [
|
||||||
{
|
{
|
||||||
@@ -125,10 +128,6 @@ const config: Config = {
|
|||||||
{
|
{
|
||||||
title: 'More',
|
title: 'More',
|
||||||
items: [
|
items: [
|
||||||
{
|
|
||||||
label: 'Blog',
|
|
||||||
to: '/blog',
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
label: 'GitHub',
|
label: 'GitHub',
|
||||||
href: 'https://github.com/TecharoHQ/anubis',
|
href: 'https://github.com/TecharoHQ/anubis',
|
||||||
|
|||||||
18
docs/package-lock.json
generated
@@ -8512,9 +8512,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/estree-util-value-to-estree": {
|
"node_modules/estree-util-value-to-estree": {
|
||||||
"version": "3.3.2",
|
"version": "3.3.3",
|
||||||
"resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.3.2.tgz",
|
"resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.3.3.tgz",
|
||||||
"integrity": "sha512-hYH1aSvQI63Cvq3T3loaem6LW4u72F187zW4FHpTrReJSm6W66vYTFNO1vH/chmcOulp1HlAj1pxn8Ag0oXI5Q==",
|
"integrity": "sha512-Db+m1WSD4+mUO7UgMeKkAwdbfNWwIxLt48XF2oFU9emPfXkIu+k5/nlOj313v7wqtAPo0f9REhUvznFrPkG8CQ==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/estree": "^1.0.0"
|
"@types/estree": "^1.0.0"
|
||||||
@@ -10093,9 +10093,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/http-proxy-middleware": {
|
"node_modules/http-proxy-middleware": {
|
||||||
"version": "2.0.7",
|
"version": "2.0.9",
|
||||||
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz",
|
"resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz",
|
||||||
"integrity": "sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA==",
|
"integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/http-proxy": "^1.17.8",
|
"@types/http-proxy": "^1.17.8",
|
||||||
@@ -10184,9 +10184,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/image-size": {
|
"node_modules/image-size": {
|
||||||
"version": "1.2.0",
|
"version": "1.2.1",
|
||||||
"resolved": "https://registry.npmjs.org/image-size/-/image-size-1.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/image-size/-/image-size-1.2.1.tgz",
|
||||||
"integrity": "sha512-4S8fwbO6w3GeCVN6OPtA9I5IGKkcDMPcKndtUlpJuCwu7JLjtj7JZpwqLuyY2nrmQT3AWsCJLSKPsc2mPBSl3w==",
|
"integrity": "sha512-rH+46sQJ2dlwfjfhCyNx5thzrv+dtmBIhPHk0zgRUukHzZ/kRueTJXoYYsclBaKcSMBWuGbOFXtioLpzTb5euw==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"queue": "6.0.2"
|
"queue": "6.0.2"
|
||||||
|
|||||||
@@ -6,13 +6,13 @@
|
|||||||
|
|
||||||
/* You can override the default Infima variables here. */
|
/* You can override the default Infima variables here. */
|
||||||
:root {
|
:root {
|
||||||
--ifm-color-primary: #2e8555;
|
--ifm-color-primary: #ff5630;
|
||||||
--ifm-color-primary-dark: #29784c;
|
--ifm-color-primary-dark: #ad422a;
|
||||||
--ifm-color-primary-darker: #277148;
|
--ifm-color-primary-darker: #8f3521;
|
||||||
--ifm-color-primary-darkest: #205d3b;
|
--ifm-color-primary-darkest: #592115;
|
||||||
--ifm-color-primary-light: #33925d;
|
--ifm-color-primary-light: #ff7152;
|
||||||
--ifm-color-primary-lighter: #359962;
|
--ifm-color-primary-lighter: #ff9178;
|
||||||
--ifm-color-primary-lightest: #3cad6e;
|
--ifm-color-primary-lightest: #ffb09e;
|
||||||
--ifm-code-font-size: 95%;
|
--ifm-code-font-size: 95%;
|
||||||
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
|
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
|
||||||
--code-block-diff-add-line-color: #ccffd8;
|
--code-block-diff-add-line-color: #ccffd8;
|
||||||
@@ -21,16 +21,17 @@
|
|||||||
|
|
||||||
/* For readability concerns, you should choose a lighter palette in dark mode. */
|
/* For readability concerns, you should choose a lighter palette in dark mode. */
|
||||||
[data-theme="dark"] {
|
[data-theme="dark"] {
|
||||||
--ifm-color-primary: #25c2a0;
|
--ifm-color-primary: #e64a19;
|
||||||
--ifm-color-primary-dark: #21af90;
|
--ifm-color-primary-dark: #b73a12;
|
||||||
--ifm-color-primary-darker: #1fa588;
|
--ifm-color-primary-darker: #8c2c0e;
|
||||||
--ifm-color-primary-darkest: #1a8870;
|
--ifm-color-primary-darkest: #5a1e0a;
|
||||||
--ifm-color-primary-light: #29d5b0;
|
--ifm-color-primary-light: #eb6d45;
|
||||||
--ifm-color-primary-lighter: #32d8b4;
|
--ifm-color-primary-lighter: #f09178;
|
||||||
--ifm-color-primary-lightest: #4fddbf;
|
--ifm-color-primary-lightest: #f5b5a6;
|
||||||
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
|
--ifm-code-font-size: 95%;
|
||||||
--code-block-diff-add-line-color: #216932;
|
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.25);
|
||||||
--code-block-diff-remove-line-color: #8b423b;
|
--code-block-diff-add-line-color: #2d5a2c;
|
||||||
|
--code-block-diff-remove-line-color: #5a2d2c;
|
||||||
}
|
}
|
||||||
|
|
||||||
.code-block-diff-add-line {
|
.code-block-diff-add-line {
|
||||||
|
|||||||
BIN
docs/static/img/android-chrome-512x512.png
vendored
|
Before Width: | Height: | Size: 222 KiB After Width: | Height: | Size: 106 KiB |
BIN
docs/static/img/favicon.ico
vendored
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
BIN
docs/static/img/favicon.webp
vendored
Normal file
|
After Width: | Height: | Size: 5.9 KiB |
BIN
docs/static/img/happy.webp
vendored
|
Before Width: | Height: | Size: 58 KiB After Width: | Height: | Size: 30 KiB |
BIN
docs/static/img/sponsors/distrust-logo.webp
vendored
Normal file
|
After Width: | Height: | Size: 2.0 KiB |
45
go.mod
@@ -1,53 +1,70 @@
|
|||||||
module github.com/TecharoHQ/anubis
|
module github.com/TecharoHQ/anubis
|
||||||
|
|
||||||
go 1.24.1
|
go 1.24
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/a-h/templ v0.3.833
|
github.com/a-h/templ v0.3.857
|
||||||
github.com/facebookgo/flagenv v0.0.0-20160425205200-fcd59fca7456
|
github.com/facebookgo/flagenv v0.0.0-20160425205200-fcd59fca7456
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||||
github.com/playwright-community/playwright-go v0.5001.0
|
github.com/google/cel-go v0.25.0
|
||||||
github.com/prometheus/client_golang v1.21.1
|
github.com/playwright-community/playwright-go v0.5101.0
|
||||||
|
github.com/prometheus/client_golang v1.22.0
|
||||||
github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a
|
github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a
|
||||||
|
github.com/shirou/gopsutil/v4 v4.25.3
|
||||||
github.com/yl2chen/cidranger v1.0.2
|
github.com/yl2chen/cidranger v1.0.2
|
||||||
|
golang.org/x/net v0.39.0
|
||||||
|
k8s.io/apimachinery v0.32.3
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/PuerkitoBio/goquery v1.10.1 // indirect
|
cel.dev/expr v0.23.1 // indirect
|
||||||
|
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||||
github.com/a-h/parse v0.0.0-20250122154542-74294addb73e // indirect
|
github.com/a-h/parse v0.0.0-20250122154542-74294addb73e // indirect
|
||||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/cli/browser v1.3.0 // indirect
|
github.com/cli/browser v1.3.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
|
||||||
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
|
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
|
||||||
|
github.com/ebitengine/purego v0.8.2 // indirect
|
||||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
|
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
|
||||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
|
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
|
||||||
github.com/fatih/color v1.16.0 // indirect
|
github.com/fatih/color v1.16.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||||
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
github.com/go-stack/stack v1.8.1 // indirect
|
github.com/go-stack/stack v1.8.1 // indirect
|
||||||
github.com/klauspost/compress v1.17.11 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/natefinch/atomic v1.0.1 // indirect
|
github.com/natefinch/atomic v1.0.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.6.1 // indirect
|
||||||
github.com/prometheus/common v0.62.0 // indirect
|
github.com/prometheus/common v0.62.0 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
|
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
||||||
|
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||||
golang.org/x/mod v0.24.0 // indirect
|
golang.org/x/mod v0.24.0 // indirect
|
||||||
golang.org/x/net v0.37.0 // indirect
|
golang.org/x/sync v0.13.0 // indirect
|
||||||
golang.org/x/sync v0.12.0 // indirect
|
golang.org/x/sys v0.32.0 // indirect
|
||||||
golang.org/x/sys v0.31.0 // indirect
|
golang.org/x/text v0.24.0 // indirect
|
||||||
golang.org/x/tools v0.31.0 // indirect
|
golang.org/x/tools v0.32.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.4 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||||
|
google.golang.org/protobuf v1.36.5 // indirect
|
||||||
|
honnef.co/go/tools v0.6.1 // indirect
|
||||||
|
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||||
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
tool (
|
tool (
|
||||||
github.com/a-h/templ/cmd/templ
|
github.com/a-h/templ/cmd/templ
|
||||||
|
golang.org/x/tools/cmd/goimports
|
||||||
golang.org/x/tools/cmd/stringer
|
golang.org/x/tools/cmd/stringer
|
||||||
|
honnef.co/go/tools/cmd/staticcheck
|
||||||
)
|
)
|
||||||
|
|||||||
120
go.sum
@@ -1,13 +1,15 @@
|
|||||||
github.com/PuerkitoBio/goquery v1.10.1 h1:Y8JGYUkXWTGRB6Ars3+j3kN0xg1YqqlwvdTV8WTFQcU=
|
cel.dev/expr v0.23.1 h1:K4KOtPCJQjVggkARsjG9RWXP6O4R73aHeJMa/dmCQQg=
|
||||||
github.com/PuerkitoBio/goquery v1.10.1/go.mod h1:IYiHrOMps66ag56LEH7QYDDupKXyo5A8qrjIx3ZtujY=
|
cel.dev/expr v0.23.1/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||||
|
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||||
|
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||||
github.com/a-h/parse v0.0.0-20250122154542-74294addb73e h1:HjVbSQHy+dnlS6C3XajZ69NYAb5jbGNfHanvm1+iYlo=
|
github.com/a-h/parse v0.0.0-20250122154542-74294addb73e h1:HjVbSQHy+dnlS6C3XajZ69NYAb5jbGNfHanvm1+iYlo=
|
||||||
github.com/a-h/parse v0.0.0-20250122154542-74294addb73e/go.mod h1:3mnrkvGpurZ4ZrTDbYU84xhwXW2TjTKShSwjRi2ihfQ=
|
github.com/a-h/parse v0.0.0-20250122154542-74294addb73e/go.mod h1:3mnrkvGpurZ4ZrTDbYU84xhwXW2TjTKShSwjRi2ihfQ=
|
||||||
github.com/a-h/templ v0.3.833 h1:L/KOk/0VvVTBegtE0fp2RJQiBm7/52Zxv5fqlEHiQUU=
|
github.com/a-h/templ v0.3.857 h1:6EqcJuGZW4OL+2iZ3MD+NnIcG7nGkaQeF2Zq5kf9ZGg=
|
||||||
github.com/a-h/templ v0.3.833/go.mod h1:cAu4AiZhtJfBjMY0HASlyzvkrtjnHWPeEsyGK2YYmfk=
|
github.com/a-h/templ v0.3.857/go.mod h1:qhrhAkRFubE7khxLZHsBFHfX+gWwVNKbzKeF9GlPV4M=
|
||||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||||
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
|
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||||
@@ -16,11 +18,14 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
|
|||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cli/browser v1.3.0 h1:LejqCrpWr+1pRqmEPDGnTZOjsMe7sehifLynZJuqJpo=
|
github.com/cli/browser v1.3.0 h1:LejqCrpWr+1pRqmEPDGnTZOjsMe7sehifLynZJuqJpo=
|
||||||
github.com/cli/browser v1.3.0/go.mod h1:HH8s+fOAxjhQoBUAsKuPCbqUuxZDhQ2/aD+SzsEfBTk=
|
github.com/cli/browser v1.3.0/go.mod h1:HH8s+fOAxjhQoBUAsKuPCbqUuxZDhQ2/aD+SzsEfBTk=
|
||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
|
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
|
||||||
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||||
|
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
|
||||||
|
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
||||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||||
github.com/facebookgo/flagenv v0.0.0-20160425205200-fcd59fca7456 h1:CkmB2l68uhvRlwOTPrwnuitSxi/S3Cg4L5QYOcL9MBc=
|
github.com/facebookgo/flagenv v0.0.0-20160425205200-fcd59fca7456 h1:CkmB2l68uhvRlwOTPrwnuitSxi/S3Cg4L5QYOcL9MBc=
|
||||||
@@ -35,15 +40,23 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
|
|||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
|
||||||
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||||
|
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||||
|
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||||
|
github.com/google/cel-go v0.25.0 h1:jsFw9Fhn+3y2kBbltZR4VEz5xKkcIFRPDnuEzAGv5GY=
|
||||||
|
github.com/google/cel-go v0.25.0/go.mod h1:hjEb6r5SuOSlhCHmFoLzu8HGCERvIsDAbxDAyNU/MmI=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
@@ -57,40 +70,49 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
|||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A=
|
github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A=
|
||||||
github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM=
|
github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM=
|
||||||
github.com/playwright-community/playwright-go v0.5001.0 h1:EY3oB+rU9cUp6CLHguWE8VMZTwAg+83Yyb7dQqEmGLg=
|
github.com/playwright-community/playwright-go v0.5101.0 h1:gVCMZThDO76LJ/aCI27lpB8hEAWhZszeS0YB+oTxJp0=
|
||||||
github.com/playwright-community/playwright-go v0.5001.0/go.mod h1:kBNWs/w2aJ2ZUp1wEOOFLXgOqvppFngM5OS+qyhl+ZM=
|
github.com/playwright-community/playwright-go v0.5101.0/go.mod h1:kBNWs/w2aJ2ZUp1wEOOFLXgOqvppFngM5OS+qyhl+ZM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
|
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||||
|
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
|
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||||
|
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||||
github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a h1:iLcLb5Fwwz7g/DLK89F+uQBDeAhHhwdzB5fSlVdhGcM=
|
github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a h1:iLcLb5Fwwz7g/DLK89F+uQBDeAhHhwdzB5fSlVdhGcM=
|
||||||
github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a/go.mod h1:wozgYq9WEBQBaIJe4YZ0qTSFAMxmcwBhQH0fO0R34Z0=
|
github.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a/go.mod h1:wozgYq9WEBQBaIJe4YZ0qTSFAMxmcwBhQH0fO0R34Z0=
|
||||||
|
github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE=
|
||||||
|
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||||
|
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||||
|
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/yl2chen/cidranger v1.0.2 h1:lbOWZVCG1tCRX4u24kuM1Tb4nHqWkDxwLdoS+SevawU=
|
github.com/yl2chen/cidranger v1.0.2 h1:lbOWZVCG1tCRX4u24kuM1Tb4nHqWkDxwLdoS+SevawU=
|
||||||
github.com/yl2chen/cidranger v1.0.2/go.mod h1:9U1yz7WPYDwf0vpNWFaeRh0bjwz5RVgRy/9UEQfHl0g=
|
github.com/yl2chen/cidranger v1.0.2/go.mod h1:9U1yz7WPYDwf0vpNWFaeRh0bjwz5RVgRy/9UEQfHl0g=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
|
||||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
||||||
|
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||||
|
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
|
||||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
|
||||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
|
||||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
@@ -98,23 +120,17 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
|
||||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
|
||||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
|
||||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
|
||||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
|
||||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@@ -122,43 +138,47 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
|
||||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
|
||||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
|
||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
|
||||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
|
||||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
|
||||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
|
||||||
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||||
|
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||||
|
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||||
|
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||||
|
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
|
||||||
|
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||||
|
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||||
|
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||||
|
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||||
|
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||||
|
|||||||
@@ -1,18 +1,33 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/netip"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/TecharoHQ/anubis"
|
"github.com/TecharoHQ/anubis"
|
||||||
"github.com/sebest/xff"
|
"github.com/sebest/xff"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO: move into config
|
||||||
|
type XFFComputePreferences struct {
|
||||||
|
StripPrivate bool
|
||||||
|
StripLoopback bool
|
||||||
|
StripCGNAT bool
|
||||||
|
StripLLU bool
|
||||||
|
Flatten bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var CGNat = netip.MustParsePrefix("100.64.0.0/10")
|
||||||
|
|
||||||
// UnchangingCache sets the Cache-Control header to cache a response for 1 year if
|
// UnchangingCache sets the Cache-Control header to cache a response for 1 year if
|
||||||
// and only if the application is compiled in "release" mode by Docker.
|
// and only if the application is compiled in "release" mode by Docker.
|
||||||
func UnchangingCache(next http.Handler) http.Handler {
|
func UnchangingCache(next http.Handler) http.Handler {
|
||||||
|
//goland:noinspection GoBoolExpressions
|
||||||
if anubis.Version == "devel" {
|
if anubis.Version == "devel" {
|
||||||
return next
|
return next
|
||||||
}
|
}
|
||||||
@@ -64,16 +79,121 @@ func XForwardedForToXRealIP(next http.Handler) http.Handler {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// XForwardedForUpdate sets or updates the X-Forwarded-For header, adding
|
||||||
|
// the known remote address to an existing chain if present
|
||||||
|
func XForwardedForUpdate(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
defer next.ServeHTTP(w, r)
|
||||||
|
|
||||||
|
pref := XFFComputePreferences{
|
||||||
|
StripPrivate: true,
|
||||||
|
StripLoopback: true,
|
||||||
|
StripCGNAT: true,
|
||||||
|
Flatten: true,
|
||||||
|
StripLLU: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteAddr := r.RemoteAddr
|
||||||
|
origXFFHeader := r.Header.Get("X-Forwarded-For")
|
||||||
|
|
||||||
|
if remoteAddr == "@" {
|
||||||
|
// remote is a unix socket
|
||||||
|
// do not touch chain
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
xffHeaderString, err := computeXFFHeader(remoteAddr, origXFFHeader, pref)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("computing X-Forwarded-For header failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(xffHeaderString) == 0 {
|
||||||
|
r.Header.Del("X-Forwarded-For")
|
||||||
|
} else {
|
||||||
|
r.Header.Set("X-Forwarded-For", xffHeaderString)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrCantSplitHostParse = errors.New("internal: unable to net.SplitHostParse")
|
||||||
|
ErrCantParseRemoteIP = errors.New("internal: unable to parse remote IP")
|
||||||
|
)
|
||||||
|
|
||||||
|
func computeXFFHeader(remoteAddr string, origXFFHeader string, pref XFFComputePreferences) (string, error) {
|
||||||
|
remoteIP, _, err := net.SplitHostPort(remoteAddr)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("%w: %w", ErrCantSplitHostParse, err)
|
||||||
|
}
|
||||||
|
parsedRemoteIP, err := netip.ParseAddr(remoteIP)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("%w: %w", ErrCantParseRemoteIP, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
origForwardedList := make([]string, 0, 4)
|
||||||
|
if origXFFHeader != "" {
|
||||||
|
origForwardedList = strings.Split(origXFFHeader, ",")
|
||||||
|
}
|
||||||
|
origForwardedList = append(origForwardedList, parsedRemoteIP.String())
|
||||||
|
forwardedList := make([]string, 0, len(origForwardedList))
|
||||||
|
// this behavior is equivalent to
|
||||||
|
// ingress-nginx "compute-full-forwarded-for"
|
||||||
|
// https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#compute-full-forwarded-for
|
||||||
|
//
|
||||||
|
// this would be the correct place to strip and/or flatten this list
|
||||||
|
//
|
||||||
|
// strip - iterate backwards and eliminate configured trusted IPs
|
||||||
|
// flatten - only return the last element to avoid spoofing confusion
|
||||||
|
//
|
||||||
|
// many applications handle this in different ways, but
|
||||||
|
// generally they'd be expected to do these two things on
|
||||||
|
// their own end to find the first non-spoofed IP
|
||||||
|
for i := len(origForwardedList) - 1; i >= 0; i-- {
|
||||||
|
segmentIP, err := netip.ParseAddr(origForwardedList[i])
|
||||||
|
if err != nil {
|
||||||
|
// can't assess this element, so the remainder of the chain
|
||||||
|
// can't be trusted. not a fatal error, since anyone can
|
||||||
|
// spoof an XFF header
|
||||||
|
slog.Debug("failed to parse XFF segment", "err", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if pref.StripPrivate && segmentIP.IsPrivate() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pref.StripLoopback && segmentIP.IsLoopback() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pref.StripLLU && segmentIP.IsLinkLocalUnicast() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pref.StripCGNAT && CGNat.Contains(segmentIP) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
forwardedList = append([]string{segmentIP.String()}, forwardedList...)
|
||||||
|
}
|
||||||
|
var xffHeaderString string
|
||||||
|
if len(forwardedList) == 0 {
|
||||||
|
xffHeaderString = ""
|
||||||
|
return xffHeaderString, nil
|
||||||
|
}
|
||||||
|
if pref.Flatten {
|
||||||
|
xffHeaderString = forwardedList[len(forwardedList)-1]
|
||||||
|
} else {
|
||||||
|
xffHeaderString = strings.Join(forwardedList, ",")
|
||||||
|
}
|
||||||
|
return xffHeaderString, nil
|
||||||
|
}
|
||||||
|
|
||||||
// NoStoreCache sets the Cache-Control header to no-store for the response.
|
// NoStoreCache sets the Cache-Control header to no-store for the response.
|
||||||
func NoStoreCache(next http.Handler) http.Handler {
|
func NoStoreCache(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Set("Cache-Control", "no-store")
|
w.Header().Set("Cache-Control", "no-store")
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoBrowsing prevents directory browsing by returning a 404 for any request that ends with a "/".
|
||||||
// Do not allow browsing directory listings in paths that end with /
|
|
||||||
func NoBrowsing(next http.Handler) http.Handler {
|
func NoBrowsing(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
if strings.HasSuffix(r.URL.Path, "/") {
|
if strings.HasSuffix(r.URL.Path, "/") {
|
||||||
|
|||||||
51
internal/ogtags/cache.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"net/url"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetOGTags is the main function that retrieves Open Graph tags for a URL
|
||||||
|
func (c *OGTagCache) GetOGTags(url *url.URL) (map[string]string, error) {
|
||||||
|
if url == nil {
|
||||||
|
return nil, errors.New("nil URL provided, cannot fetch OG tags")
|
||||||
|
}
|
||||||
|
urlStr := c.getTarget(url)
|
||||||
|
// Check cache first
|
||||||
|
if cachedTags := c.checkCache(urlStr); cachedTags != nil {
|
||||||
|
return cachedTags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch HTML content
|
||||||
|
doc, err := c.fetchHTMLDocument(urlStr)
|
||||||
|
if errors.Is(err, syscall.ECONNREFUSED) {
|
||||||
|
slog.Debug("Connection refused, returning empty tags")
|
||||||
|
return nil, nil
|
||||||
|
} else if errors.Is(err, ErrOgHandled) {
|
||||||
|
// Error was handled in fetchHTMLDocument, return empty tags
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract OG tags
|
||||||
|
ogTags := c.extractOGTags(doc)
|
||||||
|
|
||||||
|
// Store in cache
|
||||||
|
c.cache.Set(urlStr, ogTags, c.ogTimeToLive)
|
||||||
|
|
||||||
|
return ogTags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkCache checks if we have the tags cached and returns them if so
|
||||||
|
func (c *OGTagCache) checkCache(urlStr string) map[string]string {
|
||||||
|
if cachedTags, ok := c.cache.Get(urlStr); ok {
|
||||||
|
slog.Debug("cache hit", "tags", cachedTags)
|
||||||
|
return cachedTags
|
||||||
|
}
|
||||||
|
slog.Debug("cache miss", "url", urlStr)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
122
internal/ogtags/cache_test.go
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCheckCache(t *testing.T) {
|
||||||
|
cache := NewOGTagCache("http://example.com", true, time.Minute)
|
||||||
|
|
||||||
|
// Set up test data
|
||||||
|
urlStr := "http://example.com/page"
|
||||||
|
expectedTags := map[string]string{
|
||||||
|
"og:title": "Test Title",
|
||||||
|
"og:description": "Test Description",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test cache miss
|
||||||
|
tags := cache.checkCache(urlStr)
|
||||||
|
if tags != nil {
|
||||||
|
t.Errorf("expected nil tags on cache miss, got %v", tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manually add to cache
|
||||||
|
cache.cache.Set(urlStr, expectedTags, time.Minute)
|
||||||
|
|
||||||
|
// Test cache hit
|
||||||
|
tags = cache.checkCache(urlStr)
|
||||||
|
if tags == nil {
|
||||||
|
t.Fatal("expected non-nil tags on cache hit, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, expectedValue := range expectedTags {
|
||||||
|
if value, ok := tags[key]; !ok || value != expectedValue {
|
||||||
|
t.Errorf("expected %s: %s, got: %s", key, expectedValue, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetOGTags(t *testing.T) {
|
||||||
|
var loadCount int // Counter to track how many times the test route is loaded
|
||||||
|
|
||||||
|
// Create a test server to serve a sample HTML page with OG tags
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
loadCount++
|
||||||
|
if loadCount > 1 {
|
||||||
|
t.Fatalf("Test route loaded more than once, cache failed")
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Type", "text/html")
|
||||||
|
w.Write([]byte(`
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta property="og:title" content="Test Title" />
|
||||||
|
<meta property="og:description" content="Test Description" />
|
||||||
|
<meta property="og:image" content="http://example.com/image.jpg" />
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p>Hello, world!</p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
`))
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
// Create an instance of OGTagCache with a short TTL for testing
|
||||||
|
cache := NewOGTagCache(ts.URL, true, 1*time.Minute)
|
||||||
|
|
||||||
|
// Parse the test server URL
|
||||||
|
parsedURL, err := url.Parse(ts.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to parse test server URL: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test fetching OG tags from the test server
|
||||||
|
ogTags, err := cache.GetOGTags(parsedURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get OG tags: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the fetched OG tags
|
||||||
|
expectedTags := map[string]string{
|
||||||
|
"og:title": "Test Title",
|
||||||
|
"og:description": "Test Description",
|
||||||
|
"og:image": "http://example.com/image.jpg",
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, expectedValue := range expectedTags {
|
||||||
|
if value, ok := ogTags[key]; !ok || value != expectedValue {
|
||||||
|
t.Errorf("expected %s: %s, got: %s", key, expectedValue, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test fetching OG tags from the cache
|
||||||
|
ogTags, err = cache.GetOGTags(parsedURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get OG tags from cache: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test fetching OG tags from the cache (3rd time)
|
||||||
|
newOgTags, err := cache.GetOGTags(parsedURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get OG tags from cache: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the cached OG tags
|
||||||
|
for key, expectedValue := range expectedTags {
|
||||||
|
if value, ok := ogTags[key]; !ok || value != expectedValue {
|
||||||
|
t.Errorf("expected %s: %s, got: %s", key, expectedValue, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
initialValue := ogTags[key]
|
||||||
|
cachedValue, ok := newOgTags[key]
|
||||||
|
if !ok || initialValue != cachedValue {
|
||||||
|
t.Errorf("Cache does not line up: expected %s: %s, got: %s", key, initialValue, cachedValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
77
internal/ogtags/fetch.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"golang.org/x/net/html"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"mime"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrOgHandled = errors.New("og: handled error") // used to indicate that the error was handled and should not be logged
|
||||||
|
emptyMap = map[string]string{} // used to indicate an empty result in the cache. Can't use nil as it would be a cache miss.
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *OGTagCache) fetchHTMLDocument(urlStr string) (*html.Node, error) {
|
||||||
|
resp, err := c.client.Get(urlStr)
|
||||||
|
if err != nil {
|
||||||
|
var netErr net.Error
|
||||||
|
if errors.As(err, &netErr) && netErr.Timeout() {
|
||||||
|
slog.Debug("og: request timed out", "url", urlStr)
|
||||||
|
c.cache.Set(urlStr, emptyMap, c.ogTimeToLive/2) // Cache empty result for half the TTL to not spam the server
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("http get failed: %w", err)
|
||||||
|
}
|
||||||
|
// this defer will call MaxBytesReader's Close, which closes the original body.
|
||||||
|
defer func(Body io.ReadCloser) {
|
||||||
|
err := Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("og: error closing response body", "url", urlStr, "error", err)
|
||||||
|
}
|
||||||
|
}(resp.Body)
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
slog.Debug("og: received non-OK status code", "url", urlStr, "status", resp.StatusCode)
|
||||||
|
c.cache.Set(urlStr, emptyMap, c.ogTimeToLive) // Cache empty result for non-successful status codes
|
||||||
|
return nil, fmt.Errorf("%w: page not found", ErrOgHandled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check content type
|
||||||
|
ct := resp.Header.Get("Content-Type")
|
||||||
|
if ct == "" {
|
||||||
|
// assume non html body
|
||||||
|
return nil, fmt.Errorf("missing Content-Type header")
|
||||||
|
} else {
|
||||||
|
mediaType, _, err := mime.ParseMediaType(ct)
|
||||||
|
if err != nil {
|
||||||
|
// Malformed Content-Type header
|
||||||
|
slog.Debug("og: malformed Content-Type header", "url", urlStr, "contentType", ct)
|
||||||
|
return nil, fmt.Errorf("%w malformed Content-Type header: %w", ErrOgHandled, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mediaType != "text/html" && mediaType != "application/xhtml+xml" {
|
||||||
|
slog.Debug("og: unsupported Content-Type", "url", urlStr, "contentType", mediaType)
|
||||||
|
return nil, fmt.Errorf("%w unsupported Content-Type: %s", ErrOgHandled, mediaType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.Body = http.MaxBytesReader(nil, resp.Body, c.maxContentLength)
|
||||||
|
|
||||||
|
doc, err := html.Parse(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
// Check if the error is specifically because the limit was exceeded
|
||||||
|
var maxBytesErr *http.MaxBytesError
|
||||||
|
if errors.As(err, &maxBytesErr) {
|
||||||
|
slog.Debug("og: content exceeded max length", "url", urlStr, "limit", c.maxContentLength)
|
||||||
|
return nil, fmt.Errorf("content too large: exceeded %d bytes", c.maxContentLength)
|
||||||
|
}
|
||||||
|
// parsing error (e.g., malformed HTML)
|
||||||
|
return nil, fmt.Errorf("failed to parse HTML: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return doc, nil
|
||||||
|
}
|
||||||
119
internal/ogtags/fetch_test.go
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFetchHTMLDocument(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
htmlContent string
|
||||||
|
contentType string
|
||||||
|
statusCode int
|
||||||
|
contentLength int64
|
||||||
|
expectError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Valid HTML",
|
||||||
|
htmlContent: `<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head><title>Test</title></head>
|
||||||
|
<body><p>Test content</p></body>
|
||||||
|
</html>`,
|
||||||
|
contentType: "text/html",
|
||||||
|
statusCode: http.StatusOK,
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty HTML",
|
||||||
|
htmlContent: "",
|
||||||
|
contentType: "text/html",
|
||||||
|
statusCode: http.StatusOK,
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Not found error",
|
||||||
|
htmlContent: "",
|
||||||
|
contentType: "text/html",
|
||||||
|
statusCode: http.StatusNotFound,
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Unsupported Content-Type",
|
||||||
|
htmlContent: "*Insert rick roll here*",
|
||||||
|
contentType: "video/mp4",
|
||||||
|
statusCode: http.StatusOK,
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Too large content",
|
||||||
|
contentType: "text/html",
|
||||||
|
statusCode: http.StatusOK,
|
||||||
|
expectError: true,
|
||||||
|
contentLength: 5 * 1024 * 1024, // 5MB (over 2MB limit)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if tt.contentType != "" {
|
||||||
|
w.Header().Set("Content-Type", tt.contentType)
|
||||||
|
}
|
||||||
|
if tt.contentLength > 0 {
|
||||||
|
// Simulate content length but avoid sending too much actual data
|
||||||
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", tt.contentLength))
|
||||||
|
io.CopyN(w, strings.NewReader("X"), tt.contentLength)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(tt.statusCode)
|
||||||
|
w.Write([]byte(tt.htmlContent))
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
cache := NewOGTagCache("", true, time.Minute)
|
||||||
|
doc, err := cache.fetchHTMLDocument(ts.URL)
|
||||||
|
|
||||||
|
if tt.expectError {
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected error, got nil")
|
||||||
|
}
|
||||||
|
if doc != nil {
|
||||||
|
t.Error("expected nil document on error, got non-nil")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if doc == nil {
|
||||||
|
t.Error("expected non-nil document, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchHTMLDocumentInvalidURL(t *testing.T) {
|
||||||
|
if os.Getenv("DONT_USE_NETWORK") != "" {
|
||||||
|
t.Skip("test requires theoretical network egress")
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := NewOGTagCache("", true, time.Minute)
|
||||||
|
|
||||||
|
doc, err := cache.fetchHTMLDocument("http://invalid.url.that.doesnt.exist.example")
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected error for invalid URL, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if doc != nil {
|
||||||
|
t.Error("expected nil document for invalid URL, got non-nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
155
internal/ogtags/integration_test.go
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIntegrationGetOGTags(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "text/html")
|
||||||
|
|
||||||
|
switch r.URL.Path {
|
||||||
|
case "/simple":
|
||||||
|
w.Write([]byte(`
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta property="og:title" content="Simple Page" />
|
||||||
|
<meta property="og:type" content="website" />
|
||||||
|
</head>
|
||||||
|
<body><p>Simple page content</p></body>
|
||||||
|
</html>
|
||||||
|
`))
|
||||||
|
case "/complete":
|
||||||
|
w.Write([]byte(`
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta property="og:title" content="Complete Page" />
|
||||||
|
<meta property="og:description" content="A page with many OG tags" />
|
||||||
|
<meta property="og:image" content="http://example.com/image.jpg" />
|
||||||
|
<meta property="og:url" content="http://example.com/complete" />
|
||||||
|
<meta property="og:type" content="article" />
|
||||||
|
</head>
|
||||||
|
<body><p>Complete page content</p></body>
|
||||||
|
</html>
|
||||||
|
`))
|
||||||
|
case "/no-og":
|
||||||
|
w.Write([]byte(`
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>No OG Tags</title>
|
||||||
|
</head>
|
||||||
|
<body><p>No OG tags here</p></body>
|
||||||
|
</html>
|
||||||
|
`))
|
||||||
|
default:
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
// Test with different configurations
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
path string
|
||||||
|
query string
|
||||||
|
expectedTags map[string]string
|
||||||
|
expectError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Simple page",
|
||||||
|
path: "/simple",
|
||||||
|
query: "",
|
||||||
|
expectedTags: map[string]string{
|
||||||
|
"og:title": "Simple Page",
|
||||||
|
"og:type": "website",
|
||||||
|
},
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Complete page",
|
||||||
|
path: "/complete",
|
||||||
|
query: "ref=test",
|
||||||
|
expectedTags: map[string]string{
|
||||||
|
"og:title": "Complete Page",
|
||||||
|
"og:description": "A page with many OG tags",
|
||||||
|
"og:image": "http://example.com/image.jpg",
|
||||||
|
"og:url": "http://example.com/complete",
|
||||||
|
"og:type": "article",
|
||||||
|
},
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Page with no OG tags",
|
||||||
|
path: "/no-og",
|
||||||
|
query: "",
|
||||||
|
expectedTags: map[string]string{},
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Non-existent page",
|
||||||
|
path: "/not-found",
|
||||||
|
query: "",
|
||||||
|
expectedTags: nil,
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
// Create cache instance
|
||||||
|
cache := NewOGTagCache(ts.URL, true, 1*time.Minute)
|
||||||
|
|
||||||
|
// Create URL for test
|
||||||
|
testURL, _ := url.Parse(ts.URL)
|
||||||
|
testURL.Path = tc.path
|
||||||
|
testURL.RawQuery = tc.query
|
||||||
|
|
||||||
|
// Get OG tags
|
||||||
|
ogTags, err := cache.GetOGTags(testURL)
|
||||||
|
|
||||||
|
// Check error expectation
|
||||||
|
if tc.expectError {
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected error, got nil")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all expected tags are present
|
||||||
|
for key, expectedValue := range tc.expectedTags {
|
||||||
|
if value, ok := ogTags[key]; !ok || value != expectedValue {
|
||||||
|
t.Errorf("expected %s: %s, got: %s", key, expectedValue, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify no extra tags are present
|
||||||
|
if len(ogTags) != len(tc.expectedTags) {
|
||||||
|
t.Errorf("expected %d tags, got %d", len(tc.expectedTags), len(ogTags))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test cache retrieval
|
||||||
|
cachedOGTags, err := cache.GetOGTags(testURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get OG tags from cache: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify cached tags match
|
||||||
|
for key, expectedValue := range tc.expectedTags {
|
||||||
|
if value, ok := cachedOGTags[key]; !ok || value != expectedValue {
|
||||||
|
t.Errorf("cached value - expected %s: %s, got: %s", key, expectedValue, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
51
internal/ogtags/ogtags.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/TecharoHQ/anubis/decaymap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OGTagCache struct {
|
||||||
|
cache *decaymap.Impl[string, map[string]string]
|
||||||
|
target string
|
||||||
|
ogPassthrough bool
|
||||||
|
ogTimeToLive time.Duration
|
||||||
|
approvedTags []string
|
||||||
|
approvedPrefixes []string
|
||||||
|
client *http.Client
|
||||||
|
maxContentLength int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOGTagCache(target string, ogPassthrough bool, ogTimeToLive time.Duration) *OGTagCache {
|
||||||
|
// Predefined approved tags and prefixes
|
||||||
|
// In the future, these could come from configuration
|
||||||
|
defaultApprovedTags := []string{"description", "keywords", "author"}
|
||||||
|
defaultApprovedPrefixes := []string{"og:", "twitter:", "fediverse:"}
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 5 * time.Second, /*make this configurable?*/
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxContentLength = 16 << 20 // 16 MiB in bytes
|
||||||
|
|
||||||
|
return &OGTagCache{
|
||||||
|
cache: decaymap.New[string, map[string]string](),
|
||||||
|
target: target,
|
||||||
|
ogPassthrough: ogPassthrough,
|
||||||
|
ogTimeToLive: ogTimeToLive,
|
||||||
|
approvedTags: defaultApprovedTags,
|
||||||
|
approvedPrefixes: defaultApprovedPrefixes,
|
||||||
|
client: client,
|
||||||
|
maxContentLength: maxContentLength,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OGTagCache) getTarget(u *url.URL) string {
|
||||||
|
return c.target + u.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *OGTagCache) Cleanup() {
|
||||||
|
c.cache.Cleanup()
|
||||||
|
}
|
||||||
100
internal/ogtags/ogtags_test.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewOGTagCache(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
target string
|
||||||
|
ogPassthrough bool
|
||||||
|
ogTimeToLive time.Duration
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Basic initialization",
|
||||||
|
target: "http://example.com",
|
||||||
|
ogPassthrough: true,
|
||||||
|
ogTimeToLive: 5 * time.Minute,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty target",
|
||||||
|
target: "",
|
||||||
|
ogPassthrough: false,
|
||||||
|
ogTimeToLive: 10 * time.Minute,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
cache := NewOGTagCache(tt.target, tt.ogPassthrough, tt.ogTimeToLive)
|
||||||
|
|
||||||
|
if cache == nil {
|
||||||
|
t.Fatal("expected non-nil cache, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.target != tt.target {
|
||||||
|
t.Errorf("expected target %s, got %s", tt.target, cache.target)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.ogPassthrough != tt.ogPassthrough {
|
||||||
|
t.Errorf("expected ogPassthrough %v, got %v", tt.ogPassthrough, cache.ogPassthrough)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.ogTimeToLive != tt.ogTimeToLive {
|
||||||
|
t.Errorf("expected ogTimeToLive %v, got %v", tt.ogTimeToLive, cache.ogTimeToLive)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetTarget(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
target string
|
||||||
|
path string
|
||||||
|
query string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "No path or query",
|
||||||
|
target: "http://example.com",
|
||||||
|
path: "",
|
||||||
|
query: "",
|
||||||
|
expected: "http://example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "With complex path",
|
||||||
|
target: "http://example.com",
|
||||||
|
path: "/pag(#*((#@)ΓΓΓΓe/Γ",
|
||||||
|
query: "id=123",
|
||||||
|
expected: "http://example.com/pag(#*((#@)ΓΓΓΓe/Γ",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "With query and path",
|
||||||
|
target: "http://example.com",
|
||||||
|
path: "/page",
|
||||||
|
query: "id=123",
|
||||||
|
expected: "http://example.com/page",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
cache := NewOGTagCache(tt.target, false, time.Minute)
|
||||||
|
|
||||||
|
u := &url.URL{
|
||||||
|
Path: tt.path,
|
||||||
|
RawQuery: tt.query,
|
||||||
|
}
|
||||||
|
|
||||||
|
result := cache.getTarget(u)
|
||||||
|
|
||||||
|
if result != tt.expected {
|
||||||
|
t.Errorf("expected %s, got %s", tt.expected, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
81
internal/ogtags/parse.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/html"
|
||||||
|
)
|
||||||
|
|
||||||
|
// extractOGTags traverses the HTML document and extracts approved Open Graph tags
|
||||||
|
func (c *OGTagCache) extractOGTags(doc *html.Node) map[string]string {
|
||||||
|
ogTags := make(map[string]string)
|
||||||
|
|
||||||
|
var traverseNodes func(*html.Node)
|
||||||
|
traverseNodes = func(n *html.Node) {
|
||||||
|
// isOGMetaTag only checks if it's a <meta> tag.
|
||||||
|
// The actual filtering happens in extractMetaTagInfo now.
|
||||||
|
if isOGMetaTag(n) {
|
||||||
|
property, content := c.extractMetaTagInfo(n)
|
||||||
|
if property != "" {
|
||||||
|
ogTags[property] = content
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for child := n.FirstChild; child != nil; child = child.NextSibling {
|
||||||
|
traverseNodes(child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
traverseNodes(doc)
|
||||||
|
return ogTags
|
||||||
|
}
|
||||||
|
|
||||||
|
// isOGMetaTag checks if a node is *any* meta tag
|
||||||
|
func isOGMetaTag(n *html.Node) bool {
|
||||||
|
if n == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return n.Type == html.ElementNode && n.Data == "meta"
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractMetaTagInfo extracts property and content from a meta tag
|
||||||
|
// *and* checks if the property is approved.
|
||||||
|
// Returns empty property string if the tag is not approved.
|
||||||
|
func (c *OGTagCache) extractMetaTagInfo(n *html.Node) (property, content string) {
|
||||||
|
var rawProperty string // Store the property found before approval check
|
||||||
|
|
||||||
|
for _, attr := range n.Attr {
|
||||||
|
if attr.Key == "property" || attr.Key == "name" {
|
||||||
|
rawProperty = attr.Val
|
||||||
|
}
|
||||||
|
if attr.Key == "content" {
|
||||||
|
content = attr.Val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the rawProperty is approved
|
||||||
|
isApproved := false
|
||||||
|
for _, prefix := range c.approvedPrefixes {
|
||||||
|
if strings.HasPrefix(rawProperty, prefix) {
|
||||||
|
isApproved = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check exact approved tags if not already approved by prefix
|
||||||
|
if !isApproved {
|
||||||
|
for _, tag := range c.approvedTags {
|
||||||
|
if rawProperty == tag {
|
||||||
|
isApproved = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only return the property if it's approved
|
||||||
|
if isApproved {
|
||||||
|
property = rawProperty
|
||||||
|
}
|
||||||
|
|
||||||
|
// Content is returned regardless, but property will be "" if not approved
|
||||||
|
return property, content
|
||||||
|
}
|
||||||
295
internal/ogtags/parse_test.go
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
package ogtags
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/html"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestExtractOGTags updated with correct expectations based on filtering logic
|
||||||
|
func TestExtractOGTags(t *testing.T) {
|
||||||
|
// Use a cache instance that reflects the default approved lists
|
||||||
|
testCache := NewOGTagCache("", false, time.Minute)
|
||||||
|
// Manually set approved tags/prefixes based on the user request for clarity
|
||||||
|
testCache.approvedTags = []string{"description"}
|
||||||
|
testCache.approvedPrefixes = []string{"og:"}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
htmlStr string
|
||||||
|
expected map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Basic OG tags", // Includes standard 'description' meta tag
|
||||||
|
htmlStr: `<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta property="og:title" content="Test Title" />
|
||||||
|
<meta property="og:description" content="Test Description" />
|
||||||
|
<meta name="description" content="Regular Description" />
|
||||||
|
<meta name="keywords" content="test, keyword" />
|
||||||
|
</head>
|
||||||
|
<body></body>
|
||||||
|
</html>`,
|
||||||
|
expected: map[string]string{
|
||||||
|
"og:title": "Test Title",
|
||||||
|
"og:description": "Test Description",
|
||||||
|
"description": "Regular Description",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "OG tags with name attribute",
|
||||||
|
htmlStr: `<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta name="og:title" content="Test Title" />
|
||||||
|
<meta property="og:description" content="Test Description" />
|
||||||
|
<meta name="twitter:card" content="summary" />
|
||||||
|
</head>
|
||||||
|
<body></body>
|
||||||
|
</html>`,
|
||||||
|
expected: map[string]string{
|
||||||
|
"og:title": "Test Title",
|
||||||
|
"og:description": "Test Description",
|
||||||
|
// twitter:card is still not approved
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No approved OG tags", // Contains only standard 'description'
|
||||||
|
htmlStr: `<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta name="description" content="Test Description" />
|
||||||
|
<meta name="keywords" content="Test" />
|
||||||
|
</head>
|
||||||
|
<body></body>
|
||||||
|
</html>`,
|
||||||
|
expected: map[string]string{
|
||||||
|
"description": "Test Description",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty content",
|
||||||
|
htmlStr: `<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta property="og:title" content="" />
|
||||||
|
<meta property="og:description" content="Test Description" />
|
||||||
|
</head>
|
||||||
|
<body></body>
|
||||||
|
</html>`,
|
||||||
|
expected: map[string]string{
|
||||||
|
"og:title": "",
|
||||||
|
"og:description": "Test Description",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Explicitly approved tag",
|
||||||
|
htmlStr: `<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta property="description" content="Approved Description Tag" />
|
||||||
|
</head>
|
||||||
|
<body></body>
|
||||||
|
</html>`,
|
||||||
|
expected: map[string]string{
|
||||||
|
// This is approved because "description" is in cache.approvedTags
|
||||||
|
"description": "Approved Description Tag",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
doc, err := html.Parse(strings.NewReader(tt.htmlStr))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to parse HTML: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ogTags := testCache.extractOGTags(doc)
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(ogTags, tt.expected) {
|
||||||
|
t.Errorf("expected %v, got %v", tt.expected, ogTags)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsOGMetaTag(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
nodeHTML string
|
||||||
|
targetNode string // Helper to find the right node in parsed fragment
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Meta OG tag",
|
||||||
|
nodeHTML: `<meta property="og:title" content="Test">`,
|
||||||
|
targetNode: "meta",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Regular meta tag",
|
||||||
|
nodeHTML: `<meta name="description" content="Test">`,
|
||||||
|
targetNode: "meta",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Not a meta tag",
|
||||||
|
nodeHTML: `<div>Test</div>`,
|
||||||
|
targetNode: "div",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
// Wrap the partial HTML in basic structure for parsing
|
||||||
|
fullHTML := "<html><head>" + tt.nodeHTML + "</head><body></body></html>"
|
||||||
|
doc, err := html.Parse(strings.NewReader(fullHTML))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to parse HTML: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the target element node (meta or div based on targetNode)
|
||||||
|
var node *html.Node
|
||||||
|
var findNode func(*html.Node)
|
||||||
|
findNode = func(n *html.Node) {
|
||||||
|
// Skip finding if already found
|
||||||
|
if node != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Check if current node matches type and tag data
|
||||||
|
if n.Type == html.ElementNode && n.Data == tt.targetNode {
|
||||||
|
node = n
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Recursively check children
|
||||||
|
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||||
|
findNode(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
findNode(doc) // Start search from root
|
||||||
|
|
||||||
|
if node == nil {
|
||||||
|
t.Fatalf("Could not find target node '%s' in test HTML", tt.targetNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the function under test
|
||||||
|
result := isOGMetaTag(node)
|
||||||
|
if result != tt.expected {
|
||||||
|
t.Errorf("expected %v, got %v", tt.expected, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExtractMetaTagInfo(t *testing.T) {
|
||||||
|
// Use a cache instance that reflects the default approved lists
|
||||||
|
testCache := NewOGTagCache("", false, time.Minute)
|
||||||
|
testCache.approvedTags = []string{"description"}
|
||||||
|
testCache.approvedPrefixes = []string{"og:"}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
nodeHTML string
|
||||||
|
expectedProperty string
|
||||||
|
expectedContent string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "OG title with property (approved by prefix)",
|
||||||
|
nodeHTML: `<meta property="og:title" content="Test Title">`,
|
||||||
|
expectedProperty: "og:title",
|
||||||
|
expectedContent: "Test Title",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "OG description with name (approved by prefix)",
|
||||||
|
nodeHTML: `<meta name="og:description" content="Test Description">`,
|
||||||
|
expectedProperty: "og:description",
|
||||||
|
expectedContent: "Test Description",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Regular meta tag (name=description, approved by exact match)", // Updated name for clarity
|
||||||
|
nodeHTML: `<meta name="description" content="Test Description">`,
|
||||||
|
expectedProperty: "description",
|
||||||
|
expectedContent: "Test Description",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Regular meta tag (name=keywords, not approved)",
|
||||||
|
nodeHTML: `<meta name="keywords" content="Test Keywords">`,
|
||||||
|
expectedProperty: "",
|
||||||
|
expectedContent: "Test Keywords",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Twitter tag (not approved by default)",
|
||||||
|
nodeHTML: `<meta name="twitter:card" content="summary">`,
|
||||||
|
expectedProperty: "",
|
||||||
|
expectedContent: "summary",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No content (but approved property)",
|
||||||
|
nodeHTML: `<meta property="og:title">`,
|
||||||
|
expectedProperty: "og:title",
|
||||||
|
expectedContent: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "No property/name attribute",
|
||||||
|
nodeHTML: `<meta content="No property">`,
|
||||||
|
expectedProperty: "",
|
||||||
|
expectedContent: "No property",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Explicitly approved tag with property attribute",
|
||||||
|
nodeHTML: `<meta property="description" content="Approved Description Tag">`,
|
||||||
|
expectedProperty: "description", // Approved by exact match in approvedTags
|
||||||
|
expectedContent: "Approved Description Tag",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
fullHTML := "<html><head>" + tt.nodeHTML + "</head><body></body></html>"
|
||||||
|
doc, err := html.Parse(strings.NewReader(fullHTML))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to parse HTML: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var node *html.Node
|
||||||
|
var findMetaNode func(*html.Node)
|
||||||
|
findMetaNode = func(n *html.Node) {
|
||||||
|
if node != nil { // Stop searching once found
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n.Type == html.ElementNode && n.Data == "meta" {
|
||||||
|
node = n
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||||
|
findMetaNode(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
findMetaNode(doc) // Start search from root
|
||||||
|
|
||||||
|
if node == nil {
|
||||||
|
// Handle cases where the input might not actually contain a meta tag, though all test cases do.
|
||||||
|
// If the test case is *designed* not to have a meta tag, this check should be different.
|
||||||
|
// But for these tests, failure to find implies an issue with the test setup or parser.
|
||||||
|
t.Fatalf("Could not find meta node in test HTML: %s", tt.nodeHTML)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call extractMetaTagInfo using the test cache instance
|
||||||
|
property, content := testCache.extractMetaTagInfo(node)
|
||||||
|
|
||||||
|
if property != tt.expectedProperty {
|
||||||
|
t.Errorf("expected property '%s', got '%s'", tt.expectedProperty, property)
|
||||||
|
}
|
||||||
|
|
||||||
|
if content != tt.expectedContent {
|
||||||
|
t.Errorf("expected content '%s', got '%s'", tt.expectedContent, content)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package internal
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -22,3 +23,14 @@ func InitSlog(level string) {
|
|||||||
})
|
})
|
||||||
slog.SetDefault(slog.New(h))
|
slog.SetDefault(slog.New(h))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetRequestLogger(r *http.Request) *slog.Logger {
|
||||||
|
return slog.With(
|
||||||
|
"user_agent", r.UserAgent(),
|
||||||
|
"accept_language", r.Header.Get("Accept-Language"),
|
||||||
|
"priority", r.Header.Get("Priority"),
|
||||||
|
"x-forwarded-for",
|
||||||
|
r.Header.Get("X-Forwarded-For"),
|
||||||
|
"x-real-ip", r.Header.Get("X-Real-Ip"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,11 +18,13 @@ package test
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -50,6 +52,24 @@ var (
|
|||||||
realIP: placeholderIP,
|
realIP: placeholderIP,
|
||||||
userAgent: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/120.0.6099.28 Safari/537.36",
|
userAgent: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/120.0.6099.28 Safari/537.36",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Amazonbot",
|
||||||
|
action: actionDeny,
|
||||||
|
realIP: placeholderIP,
|
||||||
|
userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/600.2.5 (KHTML, like Gecko) Version/8.0.2 Safari/600.2.5 (Amazonbot/0.1; +https://developer.amazon.com/support/amazonbot)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Amazonbot",
|
||||||
|
action: actionDeny,
|
||||||
|
realIP: placeholderIP,
|
||||||
|
userAgent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/600.2.5 (KHTML, like Gecko) Version/8.0.2 Safari/600.2.5 (Amazonbot/0.1; +https://developer.amazon.com/support/amazonbot)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "PerplexityAI",
|
||||||
|
action: actionDeny,
|
||||||
|
realIP: placeholderIP,
|
||||||
|
userAgent: "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; PerplexityBot/1.0; +https://perplexity.ai/perplexitybot)",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "kagiBadIP",
|
name: "kagiBadIP",
|
||||||
action: actionChallenge,
|
action: actionChallenge,
|
||||||
@@ -78,7 +98,7 @@ const (
|
|||||||
actionChallenge action = "CHALLENGE"
|
actionChallenge action = "CHALLENGE"
|
||||||
|
|
||||||
placeholderIP = "fd11:5ee:bad:c0de::"
|
placeholderIP = "fd11:5ee:bad:c0de::"
|
||||||
playwrightVersion = "1.50.1"
|
playwrightVersion = "1.51.1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type action string
|
type action string
|
||||||
@@ -99,6 +119,9 @@ func doesNPXExist(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func run(t *testing.T, command string) string {
|
func run(t *testing.T, command string) string {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration smoke testing in short mode")
|
||||||
|
}
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
shPath, err := exec.LookPath("sh")
|
shPath, err := exec.LookPath("sh")
|
||||||
@@ -242,6 +265,132 @@ func TestPlaywrightBrowser(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPlaywrightWithBasePrefix(t *testing.T) {
|
||||||
|
if os.Getenv("DONT_USE_NETWORK") != "" {
|
||||||
|
t.Skip("test requires network egress")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Skip("NOTE(Xe)\\ these tests require HTTPS support in #364")
|
||||||
|
|
||||||
|
doesNPXExist(t)
|
||||||
|
startPlaywright(t)
|
||||||
|
|
||||||
|
pw := setupPlaywright(t)
|
||||||
|
basePrefix := "/myapp"
|
||||||
|
anubisURL := spawnAnubisWithOptions(t, basePrefix)
|
||||||
|
|
||||||
|
// Reset BasePrefix after test
|
||||||
|
t.Cleanup(func() {
|
||||||
|
anubis.BasePrefix = ""
|
||||||
|
})
|
||||||
|
|
||||||
|
browsers := []playwright.BrowserType{pw.Chromium}
|
||||||
|
|
||||||
|
for _, typ := range browsers {
|
||||||
|
t.Run(typ.Name()+"/basePrefix", func(t *testing.T) {
|
||||||
|
browser, err := typ.Connect(buildBrowserConnect(typ.Name()), playwright.BrowserTypeConnectOptions{
|
||||||
|
ExposeNetwork: playwright.String("<loopback>"),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not connect to remote browser: %v", err)
|
||||||
|
}
|
||||||
|
defer browser.Close()
|
||||||
|
|
||||||
|
ctx, err := browser.NewContext(playwright.BrowserNewContextOptions{
|
||||||
|
AcceptDownloads: playwright.Bool(false),
|
||||||
|
ExtraHttpHeaders: map[string]string{
|
||||||
|
"X-Real-Ip": "127.0.0.1",
|
||||||
|
},
|
||||||
|
UserAgent: playwright.String("Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0"),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create context: %v", err)
|
||||||
|
}
|
||||||
|
defer ctx.Close()
|
||||||
|
|
||||||
|
page, err := ctx.NewPage()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create page: %v", err)
|
||||||
|
}
|
||||||
|
defer page.Close()
|
||||||
|
|
||||||
|
// Test accessing the base URL with prefix
|
||||||
|
_, err = page.Goto(anubisURL+basePrefix, playwright.PageGotoOptions{
|
||||||
|
Timeout: pwTimeout(testCases[0], time.Now().Add(5*time.Second)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
pwFail(t, page, "could not navigate to test server with base prefix: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if challenge page is displayed
|
||||||
|
image := page.Locator("#image[src*=pensive], #image[src*=happy]")
|
||||||
|
err = image.WaitFor(playwright.LocatorWaitForOptions{
|
||||||
|
Timeout: pwTimeout(testCases[0], time.Now().Add(5*time.Second)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
pwFail(t, page, "could not wait for challenge image: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
isVisible, err := image.IsVisible()
|
||||||
|
if err != nil {
|
||||||
|
pwFail(t, page, "could not check if challenge image is visible: %v", err)
|
||||||
|
}
|
||||||
|
if !isVisible {
|
||||||
|
pwFail(t, page, "challenge image not visible")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Complete the challenge
|
||||||
|
// Wait for the challenge to be solved
|
||||||
|
anubisTest := page.Locator("#anubis-test")
|
||||||
|
err = anubisTest.WaitFor(playwright.LocatorWaitForOptions{
|
||||||
|
Timeout: pwTimeout(testCases[0], time.Now().Add(30*time.Second)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
pwFail(t, page, "could not wait for challenge to be solved: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the challenge was solved
|
||||||
|
content, err := anubisTest.TextContent(playwright.LocatorTextContentOptions{})
|
||||||
|
if err != nil {
|
||||||
|
pwFail(t, page, "could not get text content: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var tm int64
|
||||||
|
if _, err := fmt.Sscanf(content, "%d", &tm); err != nil {
|
||||||
|
pwFail(t, page, "unexpected output: %s", content)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the timestamp is reasonable
|
||||||
|
now := time.Now().Unix()
|
||||||
|
if tm < now-60 || tm > now+60 {
|
||||||
|
pwFail(t, page, "unexpected timestamp in output: %d not in range %d±60", tm, now)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if cookie has the correct path
|
||||||
|
cookies, err := ctx.Cookies()
|
||||||
|
if err != nil {
|
||||||
|
pwFail(t, page, "could not get cookies: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var found bool
|
||||||
|
for _, cookie := range cookies {
|
||||||
|
if cookie.Name == anubis.CookieName {
|
||||||
|
found = true
|
||||||
|
if cookie.Path != basePrefix+"/" {
|
||||||
|
t.Errorf("cookie path is wrong, wanted %s, got: %s", basePrefix+"/", cookie.Path)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
t.Errorf("Cookie %q not found", anubis.CookieName)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func buildBrowserConnect(name string) string {
|
func buildBrowserConnect(name string) string {
|
||||||
u, _ := url.Parse(*playwrightServer)
|
u, _ := url.Parse(*playwrightServer)
|
||||||
|
|
||||||
@@ -355,14 +504,14 @@ func pwFail(t *testing.T, page playwright.Page, format string, args ...any) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func pwTimeout(tc testCase, deadline time.Time) *float64 {
|
func pwTimeout(tc testCase, deadline time.Time) *float64 {
|
||||||
max := *playwrightMaxTime
|
maxTime := *playwrightMaxTime
|
||||||
if tc.isHard {
|
if tc.isHard {
|
||||||
max = *playwrightMaxHardTime
|
maxTime = *playwrightMaxHardTime
|
||||||
}
|
}
|
||||||
|
|
||||||
d := time.Until(deadline)
|
d := time.Until(deadline)
|
||||||
if d <= 0 || d > max {
|
if d <= 0 || d > maxTime {
|
||||||
return playwright.Float(float64(max.Milliseconds()))
|
return playwright.Float(float64(maxTime.Milliseconds()))
|
||||||
}
|
}
|
||||||
return playwright.Float(float64(d.Milliseconds()))
|
return playwright.Float(float64(d.Milliseconds()))
|
||||||
}
|
}
|
||||||
@@ -408,6 +557,10 @@ func setupPlaywright(t *testing.T) *playwright.Playwright {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func spawnAnubis(t *testing.T) string {
|
func spawnAnubis(t *testing.T) string {
|
||||||
|
return spawnAnubisWithOptions(t, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func spawnAnubisWithOptions(t *testing.T, basePrefix string) string {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
@@ -420,16 +573,31 @@ func spawnAnubis(t *testing.T) string {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
listener, err := net.Listen("tcp", ":0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("can't listen on random port: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := listener.Addr().(*net.TCPAddr)
|
||||||
|
host := "localhost"
|
||||||
|
port := strconv.Itoa(addr.Port)
|
||||||
|
|
||||||
s, err := libanubis.New(libanubis.Options{
|
s, err := libanubis.New(libanubis.Options{
|
||||||
Next: h,
|
Next: h,
|
||||||
Policy: policy,
|
Policy: policy,
|
||||||
ServeRobotsTXT: true,
|
ServeRobotsTXT: true,
|
||||||
|
Target: "http://" + host + ":" + port,
|
||||||
|
BasePrefix: basePrefix,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("can't construct libanubis.Server: %v", err)
|
t.Fatalf("can't construct libanubis.Server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ts := httptest.NewServer(s)
|
ts := &httptest.Server{
|
||||||
|
Listener: listener,
|
||||||
|
Config: &http.Server{Handler: s},
|
||||||
|
}
|
||||||
|
ts.Start()
|
||||||
t.Log(ts.URL)
|
t.Log(ts.URL)
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
|
|||||||