mirror of
https://github.com/TecharoHQ/anubis.git
synced 2026-04-12 19:48:44 +00:00
This is a huge change to Anubis and will make it a lot more invisible at the cost of requiring additional server configuration to make it happen. If you add this bit of nginx config to your location block: ```nginx proxy_set_header X-Http-Version $server_protocol; ``` And then adjust the bottom bot rule to this: ```yaml - import: (data)/common/challenge-browser-like.yaml ``` Anubis will be way less aggressive than it was before. This will let through any traffic that comes from a browser that actually is a browser via some more advanced heuristics. I think that this rule alone is the key feature of v1.18.0. Signed-off-by: Xe Iaso <me@xeiaso.net>
50 lines
2.0 KiB
YAML
50 lines
2.0 KiB
YAML
## Anubis has the ability to let you import snippets of configuration into the main
|
|
## configuration file. This allows you to break up your config into smaller parts
|
|
## that get logically assembled into one big file.
|
|
##
|
|
## Of note, a bot rule can either have inline bot configuration or import a
|
|
## bot config snippet. You cannot do both in a single bot rule.
|
|
##
|
|
## Import paths can either be prefixed with (data) to import from the common/shared
|
|
## rules in the data folder in the Anubis source tree or will point to absolute/relative
|
|
## paths in your filesystem. If you don't have access to the Anubis source tree, check
|
|
## /usr/share/docs/anubis/data or in the tarball you extracted Anubis from.
|
|
|
|
bots:
|
|
# Pathological bots to deny
|
|
- # This correlates to data/bots/ai-robots-txt.yaml in the source tree
|
|
import: (data)/bots/ai-robots-txt.yaml
|
|
- import: (data)/bots/cloudflare-workers.yaml
|
|
- import: (data)/bots/headless-browsers.yaml
|
|
- import: (data)/bots/us-ai-scraper.yaml
|
|
- import: (data)/bots/aggressive-brazilian-scrapers.yaml
|
|
- import: (data)/clients/curl-impersonate.yaml
|
|
|
|
# Search engines to allow
|
|
- import: (data)/crawlers/googlebot.yaml
|
|
- import: (data)/crawlers/bingbot.yaml
|
|
- import: (data)/crawlers/duckduckbot.yaml
|
|
- import: (data)/crawlers/qwantbot.yaml
|
|
- import: (data)/crawlers/internet-archive.yaml
|
|
- import: (data)/crawlers/kagibot.yaml
|
|
- import: (data)/crawlers/marginalia.yaml
|
|
- import: (data)/crawlers/mojeekbot.yaml
|
|
|
|
# Allow common "keeping the internet working" routes (well-known, favicon, robots.txt)
|
|
- import: (data)/common/keep-internet-working.yaml
|
|
|
|
# # Punish any bot with "bot" in the user-agent string
|
|
# # This is known to have a high false-positive rate, use at your own risk
|
|
# - name: generic-bot-catchall
|
|
# user_agent_regex: (?i:bot|crawler)
|
|
# action: CHALLENGE
|
|
# challenge:
|
|
# difficulty: 16 # impossible
|
|
# report_as: 4 # lie to the operator
|
|
# algorithm: slow # intentionally waste CPU cycles and time
|
|
|
|
# Challenge things that claim to be a browser but are not
|
|
- import: (data)/common/challenge-browser-like.yaml
|
|
|
|
dnsbl: false
|