Compare commits

...

No commits in common. "v0.3.0" and "main" have entirely different histories.
v0.3.0 ... main

92 changed files with 9274 additions and 1353 deletions

9
.ecrc Normal file
View file

@ -0,0 +1,9 @@
{
"Exclude": [
".git",
"go.mod", "go.sum",
"vendor",
"LICENSE",
"_test.go"
]
}

17
.editorconfig Normal file
View file

@ -0,0 +1,17 @@
root = true
[*]
indent_style = space
indent_size = 2
tab_width = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.go]
indent_style = tab
[*.md]
trim_trailing_whitespace = false
indent_size = 1

11
.env-dev Normal file
View file

@ -0,0 +1,11 @@
ACME_API=https://acme.mock.directory
ACME_ACCEPT_TERMS=true
PAGES_DOMAIN=localhost.mock.directory
RAW_DOMAIN=raw.localhost.mock.directory
PAGES_BRANCHES=pages,master,main
GITEA_ROOT=https://codeberg.org
PORT=4430
HTTP_PORT=8880
ENABLE_HTTP_SERVER=true
LOG_LEVEL=trace
ACME_ACCOUNT_CONFIG=integration/acme-account.json

1
.envrc Normal file
View file

@ -0,0 +1 @@
use_flake

View file

@ -0,0 +1,5 @@
blank_issues_enabled: true
contact_links:
- name: Codeberg Pages Usage Support
url: https://codeberg.org/Codeberg/Community/issues/
about: If you need help with configuring Codeberg Pages on codeberg.org, please go here.

14
.gitignore vendored
View file

@ -1,2 +1,12 @@
target/
completions/
.idea/
.cache/
*.iml
key-database.pogreb/
acme-account.json
build/
vendor/
pages
certs.sqlite
.bash_history
pkg/
.direnv/

34
.golangci.yml Normal file
View file

@ -0,0 +1,34 @@
linters-settings:
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- importShadow
- ifElseChain
- hugeParam
linters:
disable-all: true
enable:
- unconvert
- gocritic
- gofumpt
- bidichk
- errcheck
- gofmt
- goimports
- gosimple
- govet
- ineffassign
- misspell
- staticcheck
- typecheck
- unused
- whitespace
run:
timeout: 5m

8
.prettierrc.json Normal file
View file

@ -0,0 +1,8 @@
{
"semi": true,
"trailingComma": "all",
"singleQuote": true,
"printWidth": 120,
"tabWidth": 2,
"endOfLine": "lf"
}

26
.vscode/launch.json vendored Normal file
View file

@ -0,0 +1,26 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Launch PagesServer",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}/main.go",
"args": ["sqlite", "sqlite_unlock_notify", "netgo"],
"envFile": "${workspaceFolder}/.env-dev"
},
{
"name": "Launch PagesServer integration test",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}/integration/main_test.go",
"args": ["codeberg.org/codeberg/pages/integration/..."],
"buildFlags": ["-tags", "'integration sqlite sqlite_unlock_notify netgo'"]
}
]
}

123
.woodpecker/build.yml Normal file
View file

@ -0,0 +1,123 @@
when:
- event: [push, pull_request, tag, cron]
branch: ${CI_REPO_DEFAULT_BRANCH}
steps:
# use vendor to cache dependencies
vendor:
image: golang:1.24
commands:
- go mod vendor
build:
depends_on: vendor
image: codeberg.org/6543/docker-images/golang_just:go-1.24
commands:
- go version
- just build
when:
- event: [push, pull_request, tag]
docker-dryrun:
depends_on: vendor
image: woodpeckerci/plugin-docker-buildx:5.2.1
settings:
dockerfile: Dockerfile
platforms: linux/amd64
dry-run: true
tags: latest
when:
- event: [pull_request]
path: Dockerfile
build-tag:
depends_on: vendor
image: codeberg.org/6543/docker-images/golang_just:go-1.24
commands:
- go version
- just build-tag ${CI_COMMIT_TAG##v}
when:
- event: [tag]
test:
depends_on: build
image: codeberg.org/6543/docker-images/golang_just:go-1.24
commands:
- just test
when:
- event: [pull_request]
integration-tests:
failure: ignore
depends_on: build
image: codeberg.org/6543/docker-images/golang_just:go-1.24
commands:
- just integration
environment:
ACME_API: https://acme.mock.directory
PAGES_DOMAIN: localhost.mock.directory
RAW_DOMAIN: raw.localhost.mock.directory
PORT: 4430
when:
- event: [pull_request]
release:
depends_on: build
image: woodpeckerci/plugin-release:0.2.5
settings:
base_url: https://codeberg.org
file_exists: overwrite
files: build/codeberg-pages-server
api_key:
from_secret: bot_token
when:
- event: [tag]
docker-next:
depends_on: vendor
image: woodpeckerci/plugin-docker-buildx:5.2.1
settings:
registry: codeberg.org
dockerfile: Dockerfile
platforms: linux/amd64,arm64
repo: codeberg.org/codeberg/pages-server
tags: next
username:
from_secret: bot_user
password:
from_secret: bot_token
when:
- event: [push]
'Publish PR image':
image: woodpeckerci/plugin-docker-buildx:5.2.1
depends_on: test
settings:
registry: codeberg.org
dockerfile: Dockerfile
platforms: linux/amd64
repo: codeberg.org/codeberg/pages-server
tags: next
username:
from_secret: bot_user
password:
from_secret: bot_token
when:
evaluate: 'CI_COMMIT_PULL_REQUEST_LABELS contains "build_pr_image"'
event: pull_request
docker-release:
depends_on: vendor
image: woodpeckerci/plugin-docker-buildx:5.2.1
settings:
registry: codeberg.org
dockerfile: Dockerfile
platforms: linux/amd64,arm64
repo: codeberg.org/codeberg/pages-server
tags: [latest, '${CI_COMMIT_TAG}']
username:
from_secret: bot_user
password:
from_secret: bot_token
when:
- event: [tag]

View file

@ -1,29 +0,0 @@
branches: ["main"]
matrix:
RELEASE_DEBUG:
- ""
- "--release"
pipeline:
lint:
image: rust:alpine
pull: true
when:
path:
include: ["src/**", "Cargo.toml", "Cargo.lock", ".woodpecker/lamp.yml"]
commands:
- apk add musl-dev
- rustup component add clippy rustfmt
- cargo fmt -- --check
- cargo clippy --locked ${RELEASE_DEBUG} -- --deny clippy::all --deny warnings
build:
image: rust:alpine
pull: true
when:
path:
include: ["src/**", "Cargo.toml", "Cargo.lock", ".woodpecker/lamp.yml"]
commands:
- apk add musl-dev
- cargo build --locked ${RELEASE_DEBUG}

30
.woodpecker/lint.yml Normal file
View file

@ -0,0 +1,30 @@
when:
- event: pull_request
branch:
- ${CI_REPO_DEFAULT_BRANCH}
steps:
lint:
depends_on: []
image: golangci/golangci-lint:v1.64.8
commands:
- go version
- go install mvdan.cc/gofumpt@latest
- "[ $(gofumpt -extra -l . | wc -l) != 0 ] && { echo 'code not formated'; exit 1; }"
- golangci-lint run --timeout 10m --build-tags integration
editor-config:
depends_on: []
image: mstruebing/editorconfig-checker:v3.2.0
yamllint:
image: pipelinecomponents/yamllint:0.33.0
depends_on: []
commands:
- yamllint .
prettier:
image: docker.io/woodpeckerci/plugin-prettier:1.3.0
depends_on: []
settings:
version: 3.2.5

19
.yamllint.yaml Normal file
View file

@ -0,0 +1,19 @@
extends: default
rules:
comments:
require-starting-space: false
ignore-shebangs: true
min-spaces-from-content: 1
braces:
min-spaces-inside: 1
max-spaces-inside: 1
document-start:
present: false
indentation:
spaces: 2
indent-sequences: true
line-length:
max: 256
new-lines:
type: unix

View file

@ -1,4 +0,0 @@
ACTION=="add", SUBSYSTEM=="backlight", RUN+="/bin/chgrp video /sys/class/backlight/%k/brightness"
ACTION=="add", SUBSYSTEM=="backlight", RUN+="/bin/chmod g+w /sys/class/backlight/%k/brightness"
ACTION=="add", SUBSYSTEM=="leds", RUN+="/bin/chgrp video /sys/class/leds/%k/brightness"
ACTION=="add", SUBSYSTEM=="leds", RUN+="/bin/chmod g+w /sys/class/leds/%k/brightness"

163
Cargo.lock generated
View file

@ -1,163 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "clap"
version = "3.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab8b79fe3946ceb4a0b1c080b4018992b8d27e9ff363644c1c9b6387c854614d"
dependencies = [
"atty",
"bitflags",
"clap_lex",
"indexmap",
"strsim",
"termcolor",
"textwrap",
]
[[package]]
name = "clap_complete"
version = "3.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ead064480dfc4880a10764488415a97fdd36a4cf1bb022d372f02e8faf8386e1"
dependencies = [
"clap",
]
[[package]]
name = "clap_lex"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
dependencies = [
"os_str_bytes",
]
[[package]]
name = "exitcode"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de853764b47027c2e862a995c34978ffa63c1501f2e15f987ba11bd4f9bba193"
[[package]]
name = "hashbrown"
version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "607c8a29735385251a339424dd462993c0fed8fa09d378f259377df08c126022"
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "indexmap"
version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
dependencies = [
"autocfg",
"hashbrown",
]
[[package]]
name = "lamp"
version = "0.3.0"
dependencies = [
"clap",
"clap_complete",
"exitcode",
]
[[package]]
name = "libc"
version = "0.2.126"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
[[package]]
name = "os_str_bytes"
version = "6.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa"
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "termcolor"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
dependencies = [
"winapi-util",
]
[[package]]
name = "textwrap"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View file

@ -1,34 +0,0 @@
# This file is part of the "lamp" program.
# Copyright (C) 2022 crapStone
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
[package]
name = "lamp"
version = "0.3.0"
authors = ["crapStone <crapstone01@gmail.com>"]
license = "GPL-3.0-or-later"
description = "A Linux backlight utility inspired by acpibacklight"
homepage = "https://codeberg.org/crapStone/lamp"
repository = "https://codeberg.org/crapStone/lamp"
readme = "README.md"
categories = ["command-line-utilities"]
edition = "2021"
build = "build.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[build-dependencies]
clap = "3.2.12"
clap_complete = "3.2.3"
[dependencies]
clap = "3.2.12"
exitcode = "1.1.2"
[profile.release]
lto = true
panic = "abort"
strip = true
codegen-units = 1

36
Dockerfile Normal file
View file

@ -0,0 +1,36 @@
# Set the default Go version as a build argument
ARG XGO="go-1.24.x"
# Use xgo (a Go cross-compiler tool) as build image
FROM --platform=$BUILDPLATFORM techknowlogick/xgo:${XGO} AS build
# Set the working directory and copy the source code
WORKDIR /go/src/codeberg.org/codeberg/pages
COPY . /go/src/codeberg.org/codeberg/pages
# Set the target architecture (can be set using --build-arg), buildx set it automatically
ARG TARGETOS TARGETARCH
# Build the binary using xgo
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} CGO_ENABLED=1 \
xgo -x -v --targets=${TARGETOS}/${TARGETARCH} -tags='sqlite sqlite_unlock_notify netgo' -ldflags='-s -w -extldflags "-static" -linkmode external' -out pages .
RUN mv -vf /build/pages-* /go/src/codeberg.org/codeberg/pages/pages
# Use a scratch image as the base image for the final container,
# which will contain only the built binary and the CA certificates
FROM scratch
# Copy the built binary and the CA certificates from the build container to the final container
COPY --from=build /go/src/codeberg.org/codeberg/pages/pages /pages
COPY --from=build \
/etc/ssl/certs/ca-certificates.crt \
/etc/ssl/certs/ca-certificates.crt
# Expose ports 80 and 443 for the built binary to listen on
EXPOSE 80/tcp
EXPOSE 443/tcp
# Set the entrypoint for the container to the built binary
ENTRYPOINT ["/pages"]

51
FEATURES.md Normal file
View file

@ -0,0 +1,51 @@
# Features
## Custom domains
Custom domains can be used by creating a `.domains` file with the domain name, e.g.:
```text
codeberg.page
```
You also have to set some DNS records, see the [Codeberg Documentation](https://docs.codeberg.org/codeberg-pages/using-custom-domain/).
## Redirects
Redirects can be created with a `_redirects` file with the following format:
```text
# Comment
from to [status]
```
- Lines starting with `#` are ignored
- `from` - the path to redirect from (Note: repository and branch names are removed from request URLs)
- `to` - the path or URL to redirect to
- `status` - status code to use when redirecting (default 301)
### Status codes
- `200` - returns content from specified path (no external URLs) without changing the URL (rewrite)
- `301` - Moved Permanently (Permanent redirect)
- `302` - Found (Temporary redirect)
### Examples
#### SPA (single-page application) rewrite
Redirects all paths to `/index.html` for single-page apps.
```text
/* /index.html 200
```
#### Splats
Redirects every path under `/articles` to `/posts` while keeping the path.
```text
/articles/* /posts/:splat 302
```
Example: `/articles/2022/10/12/post-1/` -> `/posts/2022/10/12/post-1/`

52
Justfile Normal file
View file

@ -0,0 +1,52 @@
CGO_FLAGS := '-extldflags "-static" -linkmode external'
TAGS := 'sqlite sqlite_unlock_notify netgo'
dev *FLAGS:
#!/usr/bin/env bash
set -euxo pipefail
set -a # automatically export all variables
source .env-dev
set +a
go run -tags '{{TAGS}}' . {{FLAGS}}
build:
CGO_ENABLED=1 go build -tags '{{TAGS}}' -ldflags '-s -w {{CGO_FLAGS}}' -v -o build/codeberg-pages-server ./
build-tag VERSION:
CGO_ENABLED=1 go build -tags '{{TAGS}}' -ldflags '-s -w -X "codeberg.org/codeberg/pages/server/version.Version={{VERSION}}" {{CGO_FLAGS}}' -v -o build/codeberg-pages-server ./
lint: tool-golangci tool-gofumpt
golangci-lint run --timeout 5m --build-tags integration
# TODO: run editorconfig-checker
fmt: tool-gofumpt
gofumpt -w --extra .
clean:
go clean ./...
rm -rf build/ integration/certs.sqlite integration/acme-account.json
tool-golangci:
@hash golangci-lint> /dev/null 2>&1; if [ $? -ne 0 ]; then \
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest; \
fi
tool-gofumpt:
@hash gofumpt> /dev/null 2>&1; if [ $? -ne 0 ]; then \
go install mvdan.cc/gofumpt@latest; \
fi
test:
go test -race -cover -tags '{{TAGS}}' codeberg.org/codeberg/pages/config/ codeberg.org/codeberg/pages/html/ codeberg.org/codeberg/pages/server/...
test-run TEST:
go test -race -tags '{{TAGS}}' -run "^{{TEST}}$" codeberg.org/codeberg/pages/config/ codeberg.org/codeberg/pages/html/ codeberg.org/codeberg/pages/server/...
integration:
go test -race -tags 'integration {{TAGS}}' codeberg.org/codeberg/pages/integration/...
integration-run TEST:
go test -race -tags 'integration {{TAGS}}' -run "^{{TEST}}$" codeberg.org/codeberg/pages/integration/...
docker:
docker run --rm -it --user $(id -u) -v $(pwd):/work --workdir /work -e HOME=/work codeberg.org/6543/docker-images/golang_just

979
LICENSE
View file

@ -1,674 +1,305 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
European Union Public Licence v. 1.2
EUPL © the European Union 2007, 2016
This European Union Public Licence (the 'EUPL') applies to the Work (as defined
below) which is provided under the terms of this Licence. Any use of the Work,
other than as authorised under this Licence is prohibited (to the extent such
use is covered by a right of the copyright holder of the Work).
The Work is provided under the terms of this Licence when the Licensor (as
defined below) has placed the following notice immediately following the copyright
notice for the Work:
Licensed under the EUPL
or has expressed by any other means his willingness to license under the EUPL.
1. Definitions
In this Licence, the following terms have the following meaning:
— 'The Licence': this Licence.
— 'The Original Work': the work or software distributed or communicated by
the Licensor under this Licence, available as Source Code and also as Executable
Code as the case may be.
— 'Derivative Works': the works or software that could be created by the Licensee,
based upon the Original Work or modifications thereof. This Licence does not
define the extent of modification or dependence on the Original Work required
in order to classify a work as a Derivative Work; this extent is determined
by copyright law applicable in the country mentioned in Article 15.
— 'The Work': the Original Work or its Derivative Works.
— 'The Source Code': the human-readable form of the Work which is the most
convenient for people to study and modify.
— 'The Executable Code': any code which has generally been compiled and which
is meant to be interpreted by a computer as a program.
— 'The Licensor': the natural or legal person that distributes or communicates
the Work under the Licence.
— 'Contributor(s)': any natural or legal person who modifies the Work under
the Licence, or otherwise contributes to the creation of a Derivative Work.
— 'The Licensee' or 'You': any natural or legal person who makes any usage
of the Work under the terms of the Licence.
— 'Distribution' or 'Communication': any act of selling, giving, lending,
renting, distributing, communicating, transmitting, or otherwise making available,
online or offline, copies of the Work or providing access to its essential
functionalities at the disposal of any other natural or legal person.
2. Scope of the rights granted by the Licence
The Licensor hereby grants You a worldwide, royalty-free, non-exclusive, sublicensable
licence to do the following, for the duration of copyright vested in the Original
Work:
— use the Work in any circumstance and for all usage,
— reproduce the Work,
— modify the Work, and make Derivative Works based upon the Work,
— communicate to the public, including the right to make available or display
the Work or copies thereof to the public and perform publicly, as the case
may be, the Work,
— distribute the Work or copies thereof,
— lend and rent the Work or copies thereof,
— sublicense rights in the Work or copies thereof.
Those rights can be exercised on any media, supports and formats, whether
now known or later invented, as far as the applicable law permits so.
In the countries where moral rights apply, the Licensor waives his right to
exercise his moral right to the extent allowed by law in order to make effective
the licence of the economic rights here above listed.
The Licensor grants to the Licensee royalty-free, non-exclusive usage rights
to any patents held by the Licensor, to the extent necessary to make use of
the rights granted on the Work under this Licence.
3. Communication of the Source Code
The Licensor may provide the Work either in its Source Code form, or as Executable
Code. If the Work is provided as Executable Code, the Licensor provides in
addition a machine-readable copy of the Source Code of the Work along with
each copy of the Work that the Licensor distributes or indicates, in a notice
following the copyright notice attached to the Work, a repository where the
Source Code is easily and freely accessible for as long as the Licensor continues
to distribute or communicate the Work.
4. Limitations on copyright
Nothing in this Licence is intended to deprive the Licensee of the benefits
from any exception or limitation to the exclusive rights of the rights owners
in the Work, of the exhaustion of those rights or of other applicable limitations
thereto.
5. Obligations of the Licensee
The grant of the rights mentioned above is subject to some restrictions and
obligations imposed on the Licensee. Those obligations are the following:
Attribution right: The Licensee shall keep intact all copyright, patent or
trademarks notices and all notices that refer to the Licence and to the disclaimer
of warranties. The Licensee must include a copy of such notices and a copy
of the Licence with every copy of the Work he/she distributes or communicates.
The Licensee must cause any Derivative Work to carry prominent notices stating
that the Work has been modified and the date of modification.
Copyleft clause: If the Licensee distributes or communicates copies of the
Original Works or Derivative Works, this Distribution or Communication will
be done under the terms of this Licence or of a later version of this Licence
unless the Original Work is expressly distributed only under this version
of the Licence — for example by communicating 'EUPL v. 1.2 only'. The Licensee
(becoming Licensor) cannot offer or impose any additional terms or conditions
on the Work or Derivative Work that alter or restrict the terms of the Licence.
Compatibility clause: If the Licensee Distributes or Communicates Derivative
Works or copies thereof based upon both the Work and another work licensed
under a Compatible Licence, this Distribution or Communication can be done
under the terms of this Compatible Licence. For the sake of this clause, 'Compatible
Licence' refers to the licences listed in the appendix attached to this Licence.
Should the Licensee's obligations under the Compatible Licence conflict with
his/her obligations under this Licence, the obligations of the Compatible
Licence shall prevail.
Provision of Source Code: When distributing or communicating copies of the
Work, the Licensee will provide a machine-readable copy of the Source Code
or indicate a repository where this Source will be easily and freely available
for as long as the Licensee continues to distribute or communicate the Work.
Legal Protection: This Licence does not grant permission to use the trade
names, trademarks, service marks, or names of the Licensor, except as required
for reasonable and customary use in describing the origin of the Work and
reproducing the content of the copyright notice.
6. Chain of Authorship
The original Licensor warrants that the copyright in the Original Work granted
hereunder is owned by him/her or licensed to him/her and that he/she has the
power and authority to grant the Licence.
Each Contributor warrants that the copyright in the modifications he/she brings
to the Work are owned by him/her or licensed to him/her and that he/she has
the power and authority to grant the Licence.
Each time You accept the Licence, the original Licensor and subsequent Contributors
grant You a licence to their contributions to the Work, under the terms of
this Licence.
7. Disclaimer of Warranty
The Work is a work in progress, which is continuously improved by numerous
Contributors. It is not a finished work and may therefore contain defects
or 'bugs' inherent to this type of development.
For the above reason, the Work is provided under the Licence on an 'as is'
basis and without warranties of any kind concerning the Work, including without
limitation merchantability, fitness for a particular purpose, absence of defects
or errors, accuracy, non-infringement of intellectual property rights other
than copyright as stated in Article 6 of this Licence.
This disclaimer of warranty is an essential part of the Licence and a condition
for the grant of any rights to the Work.
8. Disclaimer of Liability
Except in the cases of wilful misconduct or damages directly caused to natural
persons, the Licensor will in no event be liable for any direct or indirect,
material or moral, damages of any kind, arising out of the Licence or of the
use of the Work, including without limitation, damages for loss of goodwill,
work stoppage, computer failure or malfunction, loss of data or any commercial
damage, even if the Licensor has been advised of the possibility of such damage.
However, the Licensor will be liable under statutory product liability laws
as far such laws apply to the Work.
9. Additional agreements
While distributing the Work, You may choose to conclude an additional agreement,
defining obligations or services consistent with this Licence. However, if
accepting obligations, You may act only on your own behalf and on your sole
responsibility, not on behalf of the original Licensor or any other Contributor,
and only if You agree to indemnify, defend, and hold each Contributor harmless
for any liability incurred by, or claims asserted against such Contributor
by the fact You have accepted any warranty or additional liability.
10. Acceptance of the Licence
The provisions of this Licence can be accepted by clicking on an icon 'I agree'
placed under the bottom of a window displaying the text of this Licence or
by affirming consent in any other similar way, in accordance with the rules
of applicable law. Clicking on that icon indicates your clear and irrevocable
acceptance of this Licence and all of its terms and conditions.
Similarly, you irrevocably accept this Licence and all of its terms and conditions
by exercising any rights granted to You by Article 2 of this Licence, such
as the use of the Work, the creation by You of a Derivative Work or the Distribution
or Communication by You of the Work or copies thereof.
11. Information to the public
In case of any Distribution or Communication of the Work by means of electronic
communication by You (for example, by offering to download the Work from a
remote location) the distribution channel or media (for example, a website)
must at least provide to the public the information requested by the applicable
law regarding the Licensor, the Licence and the way it may be accessible,
concluded, stored and reproduced by the Licensee.
12. Termination of the Licence
The Licence and the rights granted hereunder will terminate automatically
upon any breach by the Licensee of the terms of the Licence.
Such a termination will not terminate the licences of any person who has received
the Work from the Licensee under the Licence, provided such persons remain
in full compliance with the Licence.
13. Miscellaneous
Without prejudice of Article 9 above, the Licence represents the complete
agreement between the Parties as to the Work.
If any provision of the Licence is invalid or unenforceable under applicable
law, this will not affect the validity or enforceability of the Licence as
a whole. Such provision will be construed or reformed so as necessary to make
it valid and enforceable.
The European Commission may publish other linguistic versions or new versions
of this Licence or updated versions of the Appendix, so far this is required
and reasonable, without reducing the scope of the rights granted by the Licence.
New versions of the Licence will be published with a unique version number.
All linguistic versions of this Licence, approved by the European Commission,
have identical value. Parties can take advantage of the linguistic version
of their choice.
14. Jurisdiction
Without prejudice to specific agreement between parties,
— any litigation resulting from the interpretation of this License, arising
between the European Union institutions, bodies, offices or agencies, as a
Licensor, and any Licensee, will be subject to the jurisdiction of the Court
of Justice of the European Union, as laid down in article 272 of the Treaty
on the Functioning of the European Union,
— any litigation arising between other parties and resulting from the interpretation
of this License, will be subject to the exclusive jurisdiction of the competent
court where the Licensor resides or conducts its primary business.
15. Applicable Law
Without prejudice to specific agreement between parties,
— this Licence shall be governed by the law of the European Union Member State
where the Licensor has his seat, resides or has his registered office,
— this licence shall be governed by Belgian law if the Licensor has no seat,
residence or registered office inside a European Union Member State.
Appendix
'Compatible Licences' according to Article 5 EUPL are:
— GNU General Public License (GPL) v. 2, v. 3
— GNU Affero General Public License (AGPL) v. 3
— Open Software License (OSL) v. 2.1, v. 3.0
— Eclipse Public License (EPL) v. 1.0
— CeCILL v. 2.0, v. 2.1
— Mozilla Public Licence (MPL) v. 2
— GNU Lesser General Public Licence (LGPL) v. 2.1, v. 3
— Creative Commons Attribution-ShareAlike v. 3.0 Unported (CC BY-SA 3.0) for
works other than software
— European Union Public Licence (EUPL) v. 1.1, v. 1.2
— Québec Free and Open-Source Licence — Reciprocity (LiLiQ-R) or Strong Reciprocity
(LiLiQ-R+).
The European Commission may update this Appendix to later versions of the
above licences without producing a new version of the EUPL, as long as they
provide the rights granted in Article 2 of this Licence and protect the covered
Source Code from exclusive appropriation.
All other changes or additions to this Appendix require the production of
a new EUPL version.

175
README.md
View file

@ -1,49 +1,150 @@
# Lamp
# Codeberg Pages
Lamp is a backlight control program written in Rust and inspired by
[acpibacklight](https://gitlab.com/wavexx/acpilight).
[![License: EUPL-1.2](https://img.shields.io/badge/License-EUPL--1.2-blue)](https://opensource.org/license/eupl-1-2/)
[![status-badge](https://ci.codeberg.org/api/badges/Codeberg/pages-server/status.svg)](https://ci.codeberg.org/Codeberg/pages-server)
<a href="https://matrix.to/#/#gitea-pages-server:matrix.org" title="Join the Matrix room at https://matrix.to/#/#gitea-pages-server:matrix.org">
<img src="https://img.shields.io/matrix/gitea-pages-server:matrix.org?label=matrix">
</a>
## Features
Gitea lacks the ability to host static pages from Git.
The Codeberg Pages Server addresses this lack by implementing a standalone service
that connects to Gitea via API.
It is suitable to be deployed by other Gitea instances, too, to offer static pages hosting to their users.
In contrast to acpilight lamp is not backwards compatible with xbacklight.
It is intended to be used as a standalone replacement for new scripts.
**End user documentation** can mainly be found at the [Wiki](https://codeberg.org/Codeberg/pages-server/wiki/Overview)
and the [Codeberg Documentation](https://docs.codeberg.org/codeberg-pages/).
```none
-c, --controller <DEVICE> Select device to control
-d, --decrease <PERCENT> Decreases brightness
-f, --full Sets brightness to highest value
-g, --get Prints current brightness value
-h, --help Print help information
-i, --increase <PERCENT> Increases brightness
-l, --list Lists all devices with controllable brightness and led values
-s, --set <VALUE> Sets brightness to given value
-t, --type <controller_type> choose controller type [default: lin] [possible values: raw,
lin, log]
-V, --version Print version information
-z, --zero Sets brightness to lowest value
```
<a href="https://codeberg.org/Codeberg/pages-server"> <img src="https://codeberg.org/Codeberg/GetItOnCodeberg/raw/branch/main/get-it-on-blue-on-white.svg" alt="Get It On Codeberg" width="250"/> </a>
## Install
## Quickstart
Binary packages for the following systems are currently available.
This is the new Codeberg Pages server, a solution for serving static pages from Gitea repositories.
Mapping custom domains is not static anymore, but can be done with DNS:
[![Packaging status](https://repology.org/badge/vertical-allrepos/lamp.svg)](https://repology.org/project/lamp/versions)
1. add a `.domains` text file to your repository, containing the allowed domains, separated by new lines. The
first line will be the canonical domain/URL; all other occurrences will be redirected to it.
You can also install it via `cargo`:
2. add a CNAME entry to your domain, pointing to `[[{branch}.]{repo}.]{owner}.codeberg.page` (repo defaults to
"pages", "branch" defaults to the default branch if "repo" is "pages", or to "pages" if "repo" is something else.
If the branch name contains slash characters, you need to replace "/" in the branch name to "~"):
`www.example.org. IN CNAME main.pages.example.codeberg.page.`
3. if a CNAME is set for "www.example.org", you can redirect there from the naked domain by adding an ALIAS record
for "example.org" (if your provider allows ALIAS or similar records, otherwise use A/AAAA), together with a TXT
record that points to your repo (just like the CNAME record):
`example.org IN ALIAS codeberg.page.`
`example.org IN TXT main.pages.example.codeberg.page.`
Certificates are generated, updated and cleaned up automatically via Let's Encrypt through a TLS challenge.
## Chat for admins & devs
[matrix: #gitea-pages-server:matrix.org](https://matrix.to/#/#gitea-pages-server:matrix.org)
## Deployment
**Warning: Some Caveats Apply**
> Currently, the deployment requires you to have some knowledge of system administration as well as understanding and building code,
> so you can eventually edit non-configurable and codeberg-specific settings.
> In the future, we'll try to reduce these and make hosting Codeberg Pages as easy as setting up Gitea.
> If you consider using Pages in practice, please consider contacting us first,
> we'll then try to share some basic steps and document the current usage for admins
> (might be changing in the current state).
Deploying the software itself is very easy. You can grab a current release binary or build yourself,
configure the environment as described below, and you are done.
The hard part is about adding **custom domain support** if you intend to use it.
SSL certificates (request + renewal) is automatically handled by the Pages Server,
but if you want to run it on a shared IP address (and not a standalone),
you'll need to configure your reverse proxy not to terminate the TLS connections,
but forward the requests on the IP level to the Pages Server.
You can check out a proof of concept in the `examples/haproxy-sni` folder,
and especially have a look at [this section of the haproxy.cfg](https://codeberg.org/Codeberg/pages-server/src/branch/main/examples/haproxy-sni/haproxy.cfg#L38).
If you want to test a change, you can open a PR and ask for the label `build_pr_image` to be added.
This will trigger a build of the PR which will build a docker image to be used for testing.
### Environment Variables
- `ACME_ACCEPT_TERMS` (default: use self-signed certificate): Set this to "true" to accept the Terms of Service of your ACME provider.
- `ACME_API` (default: <https://acme-v02.api.letsencrypt.org/directory>): set this to <https://acme.mock.directory> to use invalid certificates without any verification (great for debugging). ZeroSSL might be better in the future as it doesn't have rate limits and doesn't clash with the official Codeberg certificates (which are using Let's Encrypt), but I couldn't get it to work yet.
- `ACME_EAB_KID` & `ACME_EAB_HMAC` (default: don't use EAB): EAB credentials, for example for ZeroSSL.
- `ACME_EMAIL` (default: `noreply@example.email`): Set the email sent to the ACME API server to receive, for example, renewal reminders.
- `ACME_USE_RATE_LIMITS` (default: true): Set this to false to disable rate limits, e.g. with ZeroSSL.
- `DNS_PROVIDER` (default: use self-signed certificate): Code of the ACME DNS provider for the main domain wildcard. See <https://go-acme.github.io/lego/dns/> for available values & additional environment variables.
- `ENABLE_HTTP_SERVER` (default: false): Set this to true to enable the HTTP-01 challenge and redirect all other HTTP requests to HTTPS. Currently only works with port 80.
- `GITEA_API_TOKEN` (default: empty): API token for the Gitea instance to access non-public (e.g. limited) repos.
- `GITEA_ROOT` (default: `https://codeberg.org`): root of the upstream Gitea instance.
- `HOST` & `PORT` (default: `[::]` & `443`): listen address.
- `LOG_LEVEL` (default: warn): Set this to specify the level of logging.
- `NO_DNS_01` (default: `false`): Disable the use of ACME DNS. This means that the wildcard certificate is self-signed and all domains and subdomains will have a distinct certificate. Because this may lead to a rate limit from the ACME provider, this option is not recommended for Gitea/Forgejo instances with open registrations or a great number of users/orgs.
- `PAGES_DOMAIN` (default: `codeberg.page`): main domain for pages.
- `RAW_DOMAIN` (default: `raw.codeberg.page`): domain for raw resources (must be subdomain of `PAGES_DOMAIN`).
### Custom Error Page
A custom error page template can be served by creating `custom/error.html`.
Data available to the template includes:
- `{{ .StatusCode }}`: The HTTP status code (e.g. 404)
- `{{ .StatusText }}`: The textual name associated with the status code (e.g. Not Found)
- `{{ .Message }}`: The reason for the error
## Contributing to the development
The Codeberg team is very open to your contribution.
Since we are working nicely in a team, it might be hard at times to get started
(still check out the issues, we always aim to have some things to get you started).
If you have any questions, want to work on a feature or could imagine collaborating with us for some time,
feel free to ping us in an issue or in a general [Matrix chat room](#chat-for-admins--devs).
You can also contact the maintainer(s) of this project:
- [crapStone](https://codeberg.org/crapStone) [(Matrix)](https://matrix.to/#/@crapstone:obermui.de)
Previous maintainers:
- [momar](https://codeberg.org/momar) [(Matrix)](https://matrix.to/#/@moritz:wuks.space)
- [6543](https://codeberg.org/6543) [(Matrix)](https://matrix.to/#/@marddl:obermui.de)
### First steps
The code of this repository is split in several modules.
The [Architecture is explained](https://codeberg.org/Codeberg/pages-server/wiki/Architecture) in the wiki.
The `cmd` folder holds the data necessary for interacting with the service via the cli.
The heart of the software lives in the `server` folder and is split in several modules.
Again: Feel free to get in touch with us for any questions that might arise.
Thank you very much.
### Test Server
Make sure you have [golang](https://go.dev) v1.21 or newer and [just](https://just.systems/man/en/) installed.
run `just dev`
now these pages should work:
- <https://cb_pages_tests.localhost.mock.directory:4430/images/827679288a.jpg>
- <https://momar.localhost.mock.directory:4430/ci-testing/>
- <https://momar.localhost.mock.directory:4430/pag/@master/>
- <https://mock-pages.codeberg-test.org:4430/README.md>
### Profiling
> This section is just a collection of commands for quick reference. If you want to learn more about profiling read [this](https://go.dev/doc/diagnostics) article or google `golang profiling`.
First enable profiling by supplying the cli arg `--enable-profiling` or using the environment variable `EENABLE_PROFILING`.
Get cpu and mem stats:
```bash
cargo install lamp
go tool pprof -raw -output=cpu.txt 'http://localhost:9999/debug/pprof/profile?seconds=60' &
curl -so mem.txt 'http://localhost:9999/debug/pprof/heap?seconds=60'
```
You have to make sure, that you have write access to `/sys/class/backlight/`.
This can be achieved by using udev rules like `90-backlight.rules` in this repo.
## Build
lamp is a pure Rust project so you can simply run `cargo build` after installing the Rust toolchain.
Formatting is done via `cargo fmt` with the default rules and in the pipeline `clippy` is run with the following arguments:
```bash
cargo clippy -- --deny clippy::all --deny warnings
```
More endpoints are documented here: <https://pkg.go.dev/net/http/pprof>

View file

@ -1,40 +0,0 @@
// This file is part of the "lamp" program.
// Copyright (C) 2022 crapStone
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::{
fs,
io::{self, Error},
path::PathBuf,
};
use clap_complete::{generate_to, shells, Generator};
include!("src/cli.rs");
const OUTDIR: &str = "target/completions";
fn main() -> Result<(), Error> {
let mut cmd = build_cli();
if let Err(why) = fs::create_dir(OUTDIR) {
if why.kind() != io::ErrorKind::AlreadyExists {
eprintln!("cargo:error=could not create directory: {OUTDIR}");
return Err(why);
}
}
let path = generate_completions(shells::Bash, &mut cmd)?;
println!("cargo:debug=completion file is generated: {path:?}");
let path = generate_completions(shells::Zsh, &mut cmd)?;
println!("cargo:debug=completion file is generated: {path:?}");
let path = generate_completions(shells::Fish, &mut cmd)?;
println!("cargo:debug=completion file is generated: {path:?}");
Ok(())
}
fn generate_completions<G: Generator>(shell: G, cmd: &mut clap::Command) -> Result<PathBuf, Error> {
generate_to(shell, cmd, env!("CARGO_PKG_NAME"), OUTDIR)
}

69
cli/certs.go Normal file
View file

@ -0,0 +1,69 @@
package cli
import (
"fmt"
"time"
"github.com/urfave/cli/v2"
)
var Certs = &cli.Command{
Name: "certs",
Usage: "manage certs manually",
Subcommands: []*cli.Command{
{
Name: "list",
Usage: "list all certificates in the database",
Action: listCerts,
},
{
Name: "remove",
Usage: "remove a certificate from the database",
Action: removeCert,
},
},
Flags: CertStorageFlags,
}
func listCerts(ctx *cli.Context) error {
certDB, closeFn, err := OpenCertDB(ctx)
if err != nil {
return err
}
defer closeFn()
items, err := certDB.Items(0, 0)
if err != nil {
return err
}
fmt.Printf("Domain\tValidTill\n\n")
for _, cert := range items {
fmt.Printf("%s\t%s\n",
cert.Domain,
time.Unix(cert.ValidTill, 0).Format(time.RFC3339))
}
return nil
}
func removeCert(ctx *cli.Context) error {
if ctx.Args().Len() < 1 {
return fmt.Errorf("'certs remove' requires at least one domain as an argument")
}
domains := ctx.Args().Slice()
certDB, closeFn, err := OpenCertDB(ctx)
if err != nil {
return err
}
defer closeFn()
for _, domain := range domains {
fmt.Printf("Removing domain %s from the database...\n", domain)
if err := certDB.Delete(domain); err != nil {
return err
}
}
return nil
}

214
cli/flags.go Normal file
View file

@ -0,0 +1,214 @@
package cli
import (
"github.com/urfave/cli/v2"
)
var (
CertStorageFlags = []cli.Flag{
&cli.StringFlag{
Name: "db-type",
Usage: "Specify the database driver. Valid options are \"sqlite3\", \"mysql\" and \"postgres\". Read more at https://xorm.io",
Value: "sqlite3",
EnvVars: []string{"DB_TYPE"},
},
&cli.StringFlag{
Name: "db-conn",
Usage: "Specify the database connection. For \"sqlite3\" it's the filepath. Read more at https://go.dev/doc/tutorial/database-access",
Value: "certs.sqlite",
EnvVars: []string{"DB_CONN"},
},
}
ServerFlags = append(CertStorageFlags, []cli.Flag{
// #############
// ### Forge ###
// #############
// ForgeRoot specifies the root URL of the Forge instance, without a trailing slash.
&cli.StringFlag{
Name: "forge-root",
Aliases: []string{"gitea-root"},
Usage: "specifies the root URL of the Forgejo/Gitea instance, without a trailing slash.",
EnvVars: []string{"FORGE_ROOT", "GITEA_ROOT"},
},
// ForgeApiToken specifies an api token for the Forge instance
&cli.StringFlag{
Name: "forge-api-token",
Aliases: []string{"gitea-api-token"},
Usage: "specifies an api token for the Forgejo/Gitea instance",
EnvVars: []string{"FORGE_API_TOKEN", "GITEA_API_TOKEN"},
},
&cli.BoolFlag{
Name: "enable-lfs-support",
Usage: "enable lfs support, gitea must be version v1.17.0 or higher",
EnvVars: []string{"ENABLE_LFS_SUPPORT"},
Value: false,
},
&cli.BoolFlag{
Name: "enable-symlink-support",
Usage: "follow symlinks if enabled, gitea must be version v1.18.0 or higher",
EnvVars: []string{"ENABLE_SYMLINK_SUPPORT"},
Value: false,
},
&cli.StringFlag{
Name: "default-mime-type",
Usage: "specifies the default mime type for files that don't have a specific mime type.",
EnvVars: []string{"DEFAULT_MIME_TYPE"},
Value: "application/octet-stream",
},
&cli.StringSliceFlag{
Name: "forbidden-mime-types",
Usage: "specifies the forbidden mime types. Use this flag multiple times for multiple mime types.",
EnvVars: []string{"FORBIDDEN_MIME_TYPES"},
},
// ###########################
// ### Page Server Domains ###
// ###########################
// MainDomainSuffix specifies the main domain (starting with a dot) for which subdomains shall be served as static
// pages, or used for comparison in CNAME lookups. Static pages can be accessed through
// https://{owner}.{MainDomain}[/{repo}], with repo defaulting to "pages".
&cli.StringFlag{
Name: "pages-domain",
Usage: "specifies the main domain (starting with a dot) for which subdomains shall be served as static pages",
EnvVars: []string{"PAGES_DOMAIN"},
},
// RawDomain specifies the domain from which raw repository content shall be served in the following format:
// https://{RawDomain}/{owner}/{repo}[/{branch|tag|commit}/{version}]/{filepath...}
// (set to []byte(nil) to disable raw content hosting)
&cli.StringFlag{
Name: "raw-domain",
Usage: "specifies the domain from which raw repository content shall be served, not set disable raw content hosting",
EnvVars: []string{"RAW_DOMAIN"},
},
// #########################
// ### Page Server Setup ###
// #########################
&cli.StringFlag{
Name: "host",
Usage: "specifies host of listening address",
EnvVars: []string{"HOST"},
Value: "[::]",
},
&cli.UintFlag{
Name: "port",
Usage: "specifies the https port to listen to ssl requests",
EnvVars: []string{"PORT", "HTTPS_PORT"},
Value: 443,
},
&cli.UintFlag{
Name: "http-port",
Usage: "specifies the http port, you also have to enable http server via ENABLE_HTTP_SERVER=true",
EnvVars: []string{"HTTP_PORT"},
Value: 80,
},
&cli.BoolFlag{
Name: "enable-http-server",
Usage: "start a http server to redirect to https and respond to http acme challenges",
EnvVars: []string{"ENABLE_HTTP_SERVER"},
Value: false,
},
&cli.BoolFlag{
Name: "use-proxy-protocol",
Usage: "use the proxy protocol",
EnvVars: []string{"USE_PROXY_PROTOCOL"},
Value: false,
},
// Default branches to fetch assets from
&cli.StringSliceFlag{
Name: "pages-branch",
Usage: "define a branch to fetch assets from. Use this flag multiple times for multiple branches.",
EnvVars: []string{"PAGES_BRANCHES"},
Value: cli.NewStringSlice("pages"),
},
&cli.StringSliceFlag{
Name: "allowed-cors-domains",
Usage: "specify allowed CORS domains. Use this flag multiple times for multiple domains.",
EnvVars: []string{"ALLOWED_CORS_DOMAINS"},
},
&cli.StringSliceFlag{
Name: "blacklisted-paths",
Usage: "return an error on these url paths.Use this flag multiple times for multiple paths.",
EnvVars: []string{"BLACKLISTED_PATHS"},
},
&cli.StringFlag{
Name: "log-level",
Value: "warn",
Usage: "specify at which log level should be logged. Possible options: info, warn, error, fatal",
EnvVars: []string{"LOG_LEVEL"},
},
&cli.StringFlag{
Name: "config-file",
Usage: "specify the location of the config file",
Aliases: []string{"config"},
EnvVars: []string{"CONFIG_FILE"},
},
&cli.BoolFlag{
Name: "enable-profiling",
Usage: "enables the go http profiling endpoints",
EnvVars: []string{"ENABLE_PROFILING"},
},
&cli.StringFlag{
Name: "profiling-address",
Usage: "specify ip address and port the profiling server should listen on",
EnvVars: []string{"PROFILING_ADDRESS"},
Value: "localhost:9999",
},
// ############################
// ### ACME Client Settings ###
// ############################
&cli.StringFlag{
Name: "acme-api-endpoint",
EnvVars: []string{"ACME_API"},
Value: "https://acme-v02.api.letsencrypt.org/directory",
},
&cli.StringFlag{
Name: "acme-email",
EnvVars: []string{"ACME_EMAIL"},
Value: "noreply@example.email",
},
&cli.BoolFlag{
Name: "acme-use-rate-limits",
// TODO: Usage
EnvVars: []string{"ACME_USE_RATE_LIMITS"},
Value: true,
},
&cli.BoolFlag{
Name: "acme-accept-terms",
Usage: "To accept the ACME ToS",
EnvVars: []string{"ACME_ACCEPT_TERMS"},
},
&cli.StringFlag{
Name: "acme-eab-kid",
Usage: "Register the current account to the ACME server with external binding.",
EnvVars: []string{"ACME_EAB_KID"},
},
&cli.StringFlag{
Name: "acme-eab-hmac",
Usage: "Register the current account to the ACME server with external binding.",
EnvVars: []string{"ACME_EAB_HMAC"},
},
&cli.StringFlag{
Name: "dns-provider",
Usage: "Use DNS-Challenge for main domain. Read more at: https://go-acme.github.io/lego/dns/",
EnvVars: []string{"DNS_PROVIDER"},
},
&cli.BoolFlag{
Name: "no-dns-01",
Usage: "Always use individual certificates instead of a DNS-01 wild card certificate",
EnvVars: []string{"NO_DNS_01"},
},
&cli.StringFlag{
Name: "acme-account-config",
Usage: "json file of acme account",
Value: "acme-account.json",
EnvVars: []string{"ACME_ACCOUNT_CONFIG"},
},
}...)
)

39
cli/setup.go Normal file
View file

@ -0,0 +1,39 @@
package cli
import (
"fmt"
"github.com/rs/zerolog/log"
"github.com/urfave/cli/v2"
"codeberg.org/codeberg/pages/server/database"
"codeberg.org/codeberg/pages/server/version"
)
func CreatePagesApp() *cli.App {
app := cli.NewApp()
app.Name = "pages-server"
app.Version = version.Version
app.Usage = "pages server"
app.Flags = ServerFlags
app.Commands = []*cli.Command{
Certs,
}
return app
}
func OpenCertDB(ctx *cli.Context) (certDB database.CertDB, closeFn func(), err error) {
certDB, err = database.NewXormDB(ctx.String("db-type"), ctx.String("db-conn"))
if err != nil {
return nil, nil, fmt.Errorf("could not connect to database: %w", err)
}
closeFn = func() {
if err := certDB.Close(); err != nil {
log.Error().Err(err)
}
}
return certDB, closeFn, nil
}

View file

@ -0,0 +1,33 @@
logLevel = 'trace'
[server]
host = '127.0.0.1'
port = 443
httpPort = 80
httpServerEnabled = true
mainDomain = 'codeberg.page'
rawDomain = 'raw.codeberg.page'
allowedCorsDomains = ['fonts.codeberg.org', 'design.codeberg.org']
blacklistedPaths = ['do/not/use']
[forge]
root = 'https://codeberg.org'
token = 'XXXXXXXX'
lfsEnabled = true
followSymlinks = true
defaultMimeType = "application/wasm"
forbiddenMimeTypes = ["text/html"]
[database]
type = 'sqlite'
conn = 'certs.sqlite'
[ACME]
email = 'a@b.c'
apiEndpoint = 'https://example.com'
acceptTerms = false
useRateLimits = true
eab_hmac = 'asdf'
eab_kid = 'qwer'
dnsProvider = 'cloudflare.com'
accountConfigFile = 'nope'

48
config/config.go Normal file
View file

@ -0,0 +1,48 @@
package config
type Config struct {
LogLevel string `default:"warn"`
Server ServerConfig
Forge ForgeConfig
Database DatabaseConfig
ACME ACMEConfig
}
type ServerConfig struct {
Host string `default:"[::]"`
Port uint16 `default:"443"`
HttpPort uint16 `default:"80"`
HttpServerEnabled bool `default:"true"`
UseProxyProtocol bool `default:"false"`
MainDomain string
RawDomain string
PagesBranches []string
AllowedCorsDomains []string
BlacklistedPaths []string
}
type ForgeConfig struct {
Root string
Token string
LFSEnabled bool `default:"false"`
FollowSymlinks bool `default:"false"`
DefaultMimeType string `default:"application/octet-stream"`
ForbiddenMimeTypes []string
}
type DatabaseConfig struct {
Type string `default:"sqlite3"`
Conn string `default:"certs.sqlite"`
}
type ACMEConfig struct {
Email string
APIEndpoint string `default:"https://acme-v02.api.letsencrypt.org/directory"`
AcceptTerms bool `default:"false"`
UseRateLimits bool `default:"true"`
EAB_HMAC string
EAB_KID string
DNSProvider string
NoDNS01 bool `default:"false"`
AccountConfigFile string `default:"acme-account.json"`
}

154
config/setup.go Normal file
View file

@ -0,0 +1,154 @@
package config
import (
"os"
"path"
"github.com/creasty/defaults"
"github.com/pelletier/go-toml/v2"
"github.com/rs/zerolog/log"
"github.com/urfave/cli/v2"
)
var ALWAYS_BLACKLISTED_PATHS = []string{
"/.well-known/acme-challenge/",
}
func NewDefaultConfig() Config {
config := Config{}
if err := defaults.Set(&config); err != nil {
panic(err)
}
// defaults does not support setting arrays from strings
config.Server.PagesBranches = []string{"main", "master", "pages"}
return config
}
func ReadConfig(ctx *cli.Context) (*Config, error) {
config := NewDefaultConfig()
// if config is not given as argument return empty config
if !ctx.IsSet("config-file") {
return &config, nil
}
configFile := path.Clean(ctx.String("config-file"))
log.Debug().Str("config-file", configFile).Msg("reading config file")
content, err := os.ReadFile(configFile)
if err != nil {
return nil, err
}
err = toml.Unmarshal(content, &config)
return &config, err
}
func MergeConfig(ctx *cli.Context, config *Config) {
if ctx.IsSet("log-level") {
config.LogLevel = ctx.String("log-level")
}
mergeServerConfig(ctx, &config.Server)
mergeForgeConfig(ctx, &config.Forge)
mergeDatabaseConfig(ctx, &config.Database)
mergeACMEConfig(ctx, &config.ACME)
}
func mergeServerConfig(ctx *cli.Context, config *ServerConfig) {
if ctx.IsSet("host") {
config.Host = ctx.String("host")
}
if ctx.IsSet("port") {
config.Port = uint16(ctx.Uint("port"))
}
if ctx.IsSet("http-port") {
config.HttpPort = uint16(ctx.Uint("http-port"))
}
if ctx.IsSet("enable-http-server") {
config.HttpServerEnabled = ctx.Bool("enable-http-server")
}
if ctx.IsSet("use-proxy-protocol") {
config.UseProxyProtocol = ctx.Bool("use-proxy-protocol")
}
if ctx.IsSet("pages-domain") {
config.MainDomain = ctx.String("pages-domain")
}
if ctx.IsSet("raw-domain") {
config.RawDomain = ctx.String("raw-domain")
}
if ctx.IsSet("pages-branch") {
config.PagesBranches = ctx.StringSlice("pages-branch")
}
if ctx.IsSet("allowed-cors-domains") {
config.AllowedCorsDomains = ctx.StringSlice("allowed-cors-domains")
}
if ctx.IsSet("blacklisted-paths") {
config.BlacklistedPaths = ctx.StringSlice("blacklisted-paths")
}
// add the paths that should always be blacklisted
config.BlacklistedPaths = append(config.BlacklistedPaths, ALWAYS_BLACKLISTED_PATHS...)
}
func mergeForgeConfig(ctx *cli.Context, config *ForgeConfig) {
if ctx.IsSet("forge-root") {
config.Root = ctx.String("forge-root")
}
if ctx.IsSet("forge-api-token") {
config.Token = ctx.String("forge-api-token")
}
if ctx.IsSet("enable-lfs-support") {
config.LFSEnabled = ctx.Bool("enable-lfs-support")
}
if ctx.IsSet("enable-symlink-support") {
config.FollowSymlinks = ctx.Bool("enable-symlink-support")
}
if ctx.IsSet("default-mime-type") {
config.DefaultMimeType = ctx.String("default-mime-type")
}
if ctx.IsSet("forbidden-mime-types") {
config.ForbiddenMimeTypes = ctx.StringSlice("forbidden-mime-types")
}
}
func mergeDatabaseConfig(ctx *cli.Context, config *DatabaseConfig) {
if ctx.IsSet("db-type") {
config.Type = ctx.String("db-type")
}
if ctx.IsSet("db-conn") {
config.Conn = ctx.String("db-conn")
}
}
func mergeACMEConfig(ctx *cli.Context, config *ACMEConfig) {
if ctx.IsSet("acme-email") {
config.Email = ctx.String("acme-email")
}
if ctx.IsSet("acme-api-endpoint") {
config.APIEndpoint = ctx.String("acme-api-endpoint")
}
if ctx.IsSet("acme-accept-terms") {
config.AcceptTerms = ctx.Bool("acme-accept-terms")
}
if ctx.IsSet("acme-use-rate-limits") {
config.UseRateLimits = ctx.Bool("acme-use-rate-limits")
}
if ctx.IsSet("acme-eab-hmac") {
config.EAB_HMAC = ctx.String("acme-eab-hmac")
}
if ctx.IsSet("acme-eab-kid") {
config.EAB_KID = ctx.String("acme-eab-kid")
}
if ctx.IsSet("dns-provider") {
config.DNSProvider = ctx.String("dns-provider")
}
if ctx.IsSet("no-dns-01") {
config.NoDNS01 = ctx.Bool("no-dns-01")
}
if ctx.IsSet("acme-account-config") {
config.AccountConfigFile = ctx.String("acme-account-config")
}
}

630
config/setup_test.go Normal file
View file

@ -0,0 +1,630 @@
package config
import (
"context"
"os"
"testing"
"github.com/pelletier/go-toml/v2"
"github.com/stretchr/testify/assert"
"github.com/urfave/cli/v2"
cmd "codeberg.org/codeberg/pages/cli"
)
func runApp(t *testing.T, fn func(*cli.Context) error, args []string) {
app := cmd.CreatePagesApp()
app.Action = fn
appCtx, appCancel := context.WithCancel(context.Background())
defer appCancel()
// os.Args always contains the binary name
args = append([]string{"testing"}, args...)
err := app.RunContext(appCtx, args)
assert.NoError(t, err)
}
// fixArrayFromCtx fixes the number of "changed" strings in a string slice according to the number of values in the context.
// This is a workaround because the cli library has a bug where the number of values in the context gets bigger the more tests are run.
func fixArrayFromCtx(ctx *cli.Context, key string, expected []string) []string {
if ctx.IsSet(key) {
ctxSlice := ctx.StringSlice(key)
if len(ctxSlice) > 1 {
for i := 1; i < len(ctxSlice); i++ {
expected = append([]string{"changed"}, expected...)
}
}
}
return expected
}
func readTestConfig() (*Config, error) {
content, err := os.ReadFile("assets/test_config.toml")
if err != nil {
return nil, err
}
expectedConfig := NewDefaultConfig()
err = toml.Unmarshal(content, &expectedConfig)
if err != nil {
return nil, err
}
return &expectedConfig, nil
}
func TestReadConfigShouldReturnEmptyConfigWhenConfigArgEmpty(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg, err := ReadConfig(ctx)
expected := NewDefaultConfig()
assert.Equal(t, &expected, cfg)
return err
},
[]string{},
)
}
func TestReadConfigShouldReturnConfigFromFileWhenConfigArgPresent(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg, err := ReadConfig(ctx)
if err != nil {
return err
}
expectedConfig, err := readTestConfig()
if err != nil {
return err
}
assert.Equal(t, expectedConfig, cfg)
return nil
},
[]string{"--config-file", "assets/test_config.toml"},
)
}
func TestValuesReadFromConfigFileShouldBeOverwrittenByArgs(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg, err := ReadConfig(ctx)
if err != nil {
return err
}
MergeConfig(ctx, cfg)
expectedConfig, err := readTestConfig()
if err != nil {
return err
}
expectedConfig.LogLevel = "debug"
expectedConfig.Forge.Root = "not-codeberg.org"
expectedConfig.ACME.AcceptTerms = true
expectedConfig.Server.Host = "172.17.0.2"
expectedConfig.Server.BlacklistedPaths = append(expectedConfig.Server.BlacklistedPaths, ALWAYS_BLACKLISTED_PATHS...)
assert.Equal(t, expectedConfig, cfg)
return nil
},
[]string{
"--config-file", "assets/test_config.toml",
"--log-level", "debug",
"--forge-root", "not-codeberg.org",
"--acme-accept-terms",
"--host", "172.17.0.2",
},
)
}
func TestMergeConfigShouldReplaceAllExistingValuesGivenAllArgsExist(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg := &Config{
LogLevel: "original",
Server: ServerConfig{
Host: "original",
Port: 8080,
HttpPort: 80,
HttpServerEnabled: false,
MainDomain: "original",
RawDomain: "original",
PagesBranches: []string{"original"},
AllowedCorsDomains: []string{"original"},
BlacklistedPaths: []string{"original"},
},
Forge: ForgeConfig{
Root: "original",
Token: "original",
LFSEnabled: false,
FollowSymlinks: false,
DefaultMimeType: "original",
ForbiddenMimeTypes: []string{"original"},
},
Database: DatabaseConfig{
Type: "original",
Conn: "original",
},
ACME: ACMEConfig{
Email: "original",
APIEndpoint: "original",
AcceptTerms: false,
UseRateLimits: false,
EAB_HMAC: "original",
EAB_KID: "original",
DNSProvider: "original",
NoDNS01: false,
AccountConfigFile: "original",
},
}
MergeConfig(ctx, cfg)
expectedConfig := &Config{
LogLevel: "changed",
Server: ServerConfig{
Host: "changed",
Port: 8443,
HttpPort: 443,
HttpServerEnabled: true,
MainDomain: "changed",
RawDomain: "changed",
PagesBranches: []string{"changed"},
AllowedCorsDomains: []string{"changed"},
BlacklistedPaths: append([]string{"changed"}, ALWAYS_BLACKLISTED_PATHS...),
},
Forge: ForgeConfig{
Root: "changed",
Token: "changed",
LFSEnabled: true,
FollowSymlinks: true,
DefaultMimeType: "changed",
ForbiddenMimeTypes: []string{"changed"},
},
Database: DatabaseConfig{
Type: "changed",
Conn: "changed",
},
ACME: ACMEConfig{
Email: "changed",
APIEndpoint: "changed",
AcceptTerms: true,
UseRateLimits: true,
EAB_HMAC: "changed",
EAB_KID: "changed",
DNSProvider: "changed",
NoDNS01: true,
AccountConfigFile: "changed",
},
}
assert.Equal(t, expectedConfig, cfg)
return nil
},
[]string{
"--log-level", "changed",
// Server
"--pages-domain", "changed",
"--raw-domain", "changed",
"--allowed-cors-domains", "changed",
"--blacklisted-paths", "changed",
"--pages-branch", "changed",
"--host", "changed",
"--port", "8443",
"--http-port", "443",
"--enable-http-server",
// Forge
"--forge-root", "changed",
"--forge-api-token", "changed",
"--enable-lfs-support",
"--enable-symlink-support",
"--default-mime-type", "changed",
"--forbidden-mime-types", "changed",
// Database
"--db-type", "changed",
"--db-conn", "changed",
// ACME
"--acme-email", "changed",
"--acme-api-endpoint", "changed",
"--acme-accept-terms",
"--acme-use-rate-limits",
"--acme-eab-hmac", "changed",
"--acme-eab-kid", "changed",
"--dns-provider", "changed",
"--no-dns-01",
"--acme-account-config", "changed",
},
)
}
func TestMergeServerConfigShouldAddDefaultBlacklistedPathsToBlacklistedPaths(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg := &ServerConfig{}
mergeServerConfig(ctx, cfg)
expected := ALWAYS_BLACKLISTED_PATHS
assert.Equal(t, expected, cfg.BlacklistedPaths)
return nil
},
[]string{},
)
}
func TestMergeServerConfigShouldReplaceAllExistingValuesGivenAllArgsExist(t *testing.T) {
for range []uint8{0, 1} {
runApp(
t,
func(ctx *cli.Context) error {
cfg := &ServerConfig{
Host: "original",
Port: 8080,
HttpPort: 80,
HttpServerEnabled: false,
MainDomain: "original",
RawDomain: "original",
AllowedCorsDomains: []string{"original"},
BlacklistedPaths: []string{"original"},
}
mergeServerConfig(ctx, cfg)
expectedConfig := &ServerConfig{
Host: "changed",
Port: 8443,
HttpPort: 443,
HttpServerEnabled: true,
MainDomain: "changed",
RawDomain: "changed",
AllowedCorsDomains: fixArrayFromCtx(ctx, "allowed-cors-domains", []string{"changed"}),
BlacklistedPaths: fixArrayFromCtx(ctx, "blacklisted-paths", append([]string{"changed"}, ALWAYS_BLACKLISTED_PATHS...)),
}
assert.Equal(t, expectedConfig, cfg)
return nil
},
[]string{
"--pages-domain", "changed",
"--raw-domain", "changed",
"--allowed-cors-domains", "changed",
"--blacklisted-paths", "changed",
"--host", "changed",
"--port", "8443",
"--http-port", "443",
"--enable-http-server",
},
)
}
}
func TestMergeServerConfigShouldReplaceOnlyOneValueExistingValueGivenOnlyOneArgExists(t *testing.T) {
type testValuePair struct {
args []string
callback func(*ServerConfig)
}
testValuePairs := []testValuePair{
{args: []string{"--host", "changed"}, callback: func(sc *ServerConfig) { sc.Host = "changed" }},
{args: []string{"--port", "8443"}, callback: func(sc *ServerConfig) { sc.Port = 8443 }},
{args: []string{"--http-port", "443"}, callback: func(sc *ServerConfig) { sc.HttpPort = 443 }},
{args: []string{"--enable-http-server"}, callback: func(sc *ServerConfig) { sc.HttpServerEnabled = true }},
{args: []string{"--pages-domain", "changed"}, callback: func(sc *ServerConfig) { sc.MainDomain = "changed" }},
{args: []string{"--raw-domain", "changed"}, callback: func(sc *ServerConfig) { sc.RawDomain = "changed" }},
{args: []string{"--pages-branch", "changed"}, callback: func(sc *ServerConfig) { sc.PagesBranches = []string{"changed"} }},
{args: []string{"--allowed-cors-domains", "changed"}, callback: func(sc *ServerConfig) { sc.AllowedCorsDomains = []string{"changed"} }},
{args: []string{"--blacklisted-paths", "changed"}, callback: func(sc *ServerConfig) { sc.BlacklistedPaths = []string{"changed"} }},
}
for _, pair := range testValuePairs {
runApp(
t,
func(ctx *cli.Context) error {
cfg := ServerConfig{
Host: "original",
Port: 8080,
HttpPort: 80,
HttpServerEnabled: false,
MainDomain: "original",
RawDomain: "original",
PagesBranches: []string{"original"},
AllowedCorsDomains: []string{"original"},
BlacklistedPaths: []string{"original"},
}
expectedConfig := cfg
pair.callback(&expectedConfig)
expectedConfig.BlacklistedPaths = append(expectedConfig.BlacklistedPaths, ALWAYS_BLACKLISTED_PATHS...)
expectedConfig.PagesBranches = fixArrayFromCtx(ctx, "pages-branch", expectedConfig.PagesBranches)
expectedConfig.AllowedCorsDomains = fixArrayFromCtx(ctx, "allowed-cors-domains", expectedConfig.AllowedCorsDomains)
expectedConfig.BlacklistedPaths = fixArrayFromCtx(ctx, "blacklisted-paths", expectedConfig.BlacklistedPaths)
mergeServerConfig(ctx, &cfg)
assert.Equal(t, expectedConfig, cfg)
return nil
},
pair.args,
)
}
}
func TestMergeForgeConfigShouldReplaceAllExistingValuesGivenAllArgsExist(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg := &ForgeConfig{
Root: "original",
Token: "original",
LFSEnabled: false,
FollowSymlinks: false,
DefaultMimeType: "original",
ForbiddenMimeTypes: []string{"original"},
}
mergeForgeConfig(ctx, cfg)
expectedConfig := &ForgeConfig{
Root: "changed",
Token: "changed",
LFSEnabled: true,
FollowSymlinks: true,
DefaultMimeType: "changed",
ForbiddenMimeTypes: fixArrayFromCtx(ctx, "forbidden-mime-types", []string{"changed"}),
}
assert.Equal(t, expectedConfig, cfg)
return nil
},
[]string{
"--forge-root", "changed",
"--forge-api-token", "changed",
"--enable-lfs-support",
"--enable-symlink-support",
"--default-mime-type", "changed",
"--forbidden-mime-types", "changed",
},
)
}
func TestMergeForgeConfigShouldReplaceOnlyOneValueExistingValueGivenOnlyOneArgExists(t *testing.T) {
type testValuePair struct {
args []string
callback func(*ForgeConfig)
}
testValuePairs := []testValuePair{
{args: []string{"--forge-root", "changed"}, callback: func(gc *ForgeConfig) { gc.Root = "changed" }},
{args: []string{"--forge-api-token", "changed"}, callback: func(gc *ForgeConfig) { gc.Token = "changed" }},
{args: []string{"--enable-lfs-support"}, callback: func(gc *ForgeConfig) { gc.LFSEnabled = true }},
{args: []string{"--enable-symlink-support"}, callback: func(gc *ForgeConfig) { gc.FollowSymlinks = true }},
{args: []string{"--default-mime-type", "changed"}, callback: func(gc *ForgeConfig) { gc.DefaultMimeType = "changed" }},
{args: []string{"--forbidden-mime-types", "changed"}, callback: func(gc *ForgeConfig) { gc.ForbiddenMimeTypes = []string{"changed"} }},
}
for _, pair := range testValuePairs {
runApp(
t,
func(ctx *cli.Context) error {
cfg := ForgeConfig{
Root: "original",
Token: "original",
LFSEnabled: false,
FollowSymlinks: false,
DefaultMimeType: "original",
ForbiddenMimeTypes: []string{"original"},
}
expectedConfig := cfg
pair.callback(&expectedConfig)
mergeForgeConfig(ctx, &cfg)
expectedConfig.ForbiddenMimeTypes = fixArrayFromCtx(ctx, "forbidden-mime-types", expectedConfig.ForbiddenMimeTypes)
assert.Equal(t, expectedConfig, cfg)
return nil
},
pair.args,
)
}
}
func TestMergeForgeConfigShouldReplaceValuesGivenGiteaOptionsExist(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg := &ForgeConfig{
Root: "original",
Token: "original",
}
mergeForgeConfig(ctx, cfg)
expectedConfig := &ForgeConfig{
Root: "changed",
Token: "changed",
}
assert.Equal(t, expectedConfig, cfg)
return nil
},
[]string{
"--gitea-root", "changed",
"--gitea-api-token", "changed",
},
)
}
func TestMergeDatabaseConfigShouldReplaceAllExistingValuesGivenAllArgsExist(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg := &DatabaseConfig{
Type: "original",
Conn: "original",
}
mergeDatabaseConfig(ctx, cfg)
expectedConfig := &DatabaseConfig{
Type: "changed",
Conn: "changed",
}
assert.Equal(t, expectedConfig, cfg)
return nil
},
[]string{
"--db-type", "changed",
"--db-conn", "changed",
},
)
}
func TestMergeDatabaseConfigShouldReplaceOnlyOneValueExistingValueGivenOnlyOneArgExists(t *testing.T) {
type testValuePair struct {
args []string
callback func(*DatabaseConfig)
}
testValuePairs := []testValuePair{
{args: []string{"--db-type", "changed"}, callback: func(gc *DatabaseConfig) { gc.Type = "changed" }},
{args: []string{"--db-conn", "changed"}, callback: func(gc *DatabaseConfig) { gc.Conn = "changed" }},
}
for _, pair := range testValuePairs {
runApp(
t,
func(ctx *cli.Context) error {
cfg := DatabaseConfig{
Type: "original",
Conn: "original",
}
expectedConfig := cfg
pair.callback(&expectedConfig)
mergeDatabaseConfig(ctx, &cfg)
assert.Equal(t, expectedConfig, cfg)
return nil
},
pair.args,
)
}
}
func TestMergeACMEConfigShouldReplaceAllExistingValuesGivenAllArgsExist(t *testing.T) {
runApp(
t,
func(ctx *cli.Context) error {
cfg := &ACMEConfig{
Email: "original",
APIEndpoint: "original",
AcceptTerms: false,
UseRateLimits: false,
EAB_HMAC: "original",
EAB_KID: "original",
DNSProvider: "original",
NoDNS01: false,
AccountConfigFile: "original",
}
mergeACMEConfig(ctx, cfg)
expectedConfig := &ACMEConfig{
Email: "changed",
APIEndpoint: "changed",
AcceptTerms: true,
UseRateLimits: true,
EAB_HMAC: "changed",
EAB_KID: "changed",
DNSProvider: "changed",
NoDNS01: true,
AccountConfigFile: "changed",
}
assert.Equal(t, expectedConfig, cfg)
return nil
},
[]string{
"--acme-email", "changed",
"--acme-api-endpoint", "changed",
"--acme-accept-terms",
"--acme-use-rate-limits",
"--acme-eab-hmac", "changed",
"--acme-eab-kid", "changed",
"--dns-provider", "changed",
"--no-dns-01",
"--acme-account-config", "changed",
},
)
}
func TestMergeACMEConfigShouldReplaceOnlyOneValueExistingValueGivenOnlyOneArgExists(t *testing.T) {
type testValuePair struct {
args []string
callback func(*ACMEConfig)
}
testValuePairs := []testValuePair{
{args: []string{"--acme-email", "changed"}, callback: func(gc *ACMEConfig) { gc.Email = "changed" }},
{args: []string{"--acme-api-endpoint", "changed"}, callback: func(gc *ACMEConfig) { gc.APIEndpoint = "changed" }},
{args: []string{"--acme-accept-terms"}, callback: func(gc *ACMEConfig) { gc.AcceptTerms = true }},
{args: []string{"--acme-use-rate-limits"}, callback: func(gc *ACMEConfig) { gc.UseRateLimits = true }},
{args: []string{"--acme-eab-hmac", "changed"}, callback: func(gc *ACMEConfig) { gc.EAB_HMAC = "changed" }},
{args: []string{"--acme-eab-kid", "changed"}, callback: func(gc *ACMEConfig) { gc.EAB_KID = "changed" }},
{args: []string{"--dns-provider", "changed"}, callback: func(gc *ACMEConfig) { gc.DNSProvider = "changed" }},
{args: []string{"--no-dns-01"}, callback: func(gc *ACMEConfig) { gc.NoDNS01 = true }},
{args: []string{"--acme-account-config", "changed"}, callback: func(gc *ACMEConfig) { gc.AccountConfigFile = "changed" }},
}
for _, pair := range testValuePairs {
runApp(
t,
func(ctx *cli.Context) error {
cfg := ACMEConfig{
Email: "original",
APIEndpoint: "original",
AcceptTerms: false,
UseRateLimits: false,
EAB_HMAC: "original",
EAB_KID: "original",
DNSProvider: "original",
AccountConfigFile: "original",
}
expectedConfig := cfg
pair.callback(&expectedConfig)
mergeACMEConfig(ctx, &cfg)
assert.Equal(t, expectedConfig, cfg)
return nil
},
pair.args,
)
}
}

32
example_config.toml Normal file
View file

@ -0,0 +1,32 @@
logLevel = 'debug'
[server]
host = '[::]'
port = 443
httpPort = 80
httpServerEnabled = true
mainDomain = 'codeberg.page'
rawDomain = 'raw.codeberg.page'
pagesBranches = ["pages"]
allowedCorsDomains = []
blacklistedPaths = []
[forge]
root = 'https://codeberg.org'
token = 'ASDF1234'
lfsEnabled = true
followSymlinks = true
[database]
type = 'sqlite'
conn = 'certs.sqlite'
[ACME]
email = 'noreply@example.email'
apiEndpoint = 'https://acme-v02.api.letsencrypt.org/directory'
acceptTerms = false
useRateLimits = false
eab_hmac = ''
eab_kid = ''
dnsProvider = ''
accountConfigFile = 'acme-account.json'

1
examples/haproxy-sni/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
*.dump

View file

@ -0,0 +1,25 @@
# HAProxy with SNI & Host-based rules
This is a proof of concept, enabling HAProxy to use _either_ SNI to redirect to backends with their own HTTPS certificates (which are then fully exposed to the client; HAProxy only proxies on a TCP level in that case), _as well as_ to terminate HTTPS and use the Host header to redirect to backends that use HTTP (or a new HTTPS connection).
## How it works
1. The `http_redirect_frontend` is only there to listen on port 80 and redirect every request to HTTPS.
2. The `https_sni_frontend` listens on port 443 and chooses a backend based on the SNI hostname of the TLS connection.
3. The `https_termination_backend` passes all requests to a unix socket (using the plain TCP data).
4. The `https_termination_frontend` listens on said unix socket, terminates the HTTPS connections and then chooses a backend based on the Host header.
In the example (see [haproxy.cfg](haproxy.cfg)), the `pages_backend` is listening via HTTPS and is providing its own HTTPS certificates, while the `gitea_backend` only provides HTTP.
## How to test
```bash
docker-compose up &
./test.sh
docker-compose down
# For manual testing: all HTTPS URLs connect to localhost:443 & certificates are not verified.
./test.sh [curl-options...] <url>
```
![Screenshot of the test script's output](/attachments/c82d79ea-7586-4d4b-b340-3ad0030185d6)

View file

@ -0,0 +1,8 @@
-----BEGIN DH PARAMETERS-----
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
-----END DH PARAMETERS-----

View file

@ -0,0 +1,21 @@
version: '3'
services:
haproxy:
image: haproxy
ports: ['443:443']
volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
- ./dhparam.pem:/etc/ssl/dhparam.pem:ro
- ./haproxy-certificates:/etc/ssl/private/haproxy:ro
cap_add:
- NET_ADMIN
gitea:
image: caddy
volumes:
- ./gitea-www:/srv:ro
- ./gitea.Caddyfile:/etc/caddy/Caddyfile:ro
pages:
image: caddy
volumes:
- ./pages-www:/srv:ro
- ./pages.Caddyfile:/etc/caddy/Caddyfile:ro

View file

@ -0,0 +1 @@
Hello to Gitea!

View file

@ -0,0 +1,3 @@
http://codeberg.org
file_server

View file

@ -0,0 +1,26 @@
-----BEGIN CERTIFICATE-----
MIIEUDCCArigAwIBAgIRAMq3iwF963VGkzXFpbrpAtkwDQYJKoZIhvcNAQELBQAw
gYkxHjAcBgNVBAoTFW1rY2VydCBkZXZlbG9wbWVudCBDQTEvMC0GA1UECwwmbW9t
YXJAbW9yaXR6LWxhcHRvcCAoTW9yaXR6IE1hcnF1YXJkdCkxNjA0BgNVBAMMLW1r
Y2VydCBtb21hckBtb3JpdHotbGFwdG9wIChNb3JpdHogTWFycXVhcmR0KTAeFw0y
MTA2MDYwOTQ4NDFaFw0yMzA5MDYwOTQ4NDFaMFoxJzAlBgNVBAoTHm1rY2VydCBk
ZXZlbG9wbWVudCBjZXJ0aWZpY2F0ZTEvMC0GA1UECwwmbW9tYXJAbW9yaXR6LWxh
cHRvcCAoTW9yaXR6IE1hcnF1YXJkdCkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
ggEKAoIBAQCrSPSPM6grNZMG4ZKFCVxuXu+qkHdzSR96QUxi00VkIrkGPmyMN7q7
rUQJto9C9guJio3n7y3Bvr5kBjICjyWQd7GfkVuYgiYiG/O2hy1u1dIMCAB/Zhx1
F1mvRfn/Q4eZk2GSOUM+kC0xaNsn2827VGLOGFywUhRmu7J9QSQ3x1Pi5BME7eNC
AKup0CbrMrZSzKAEuYujLY0UYRxUrguMnV60wxJDCYE14YDxn9t0g7wQmzyndupk
AMLNJZX5L83RA6vUEuTVYBFcyB0Fu3oBLQ31y5QOZ7WF/QiO5cPicQJI/oyXlHq4
97BWS/H28kj1H5ZM8+5yhCYDtgj7dERpAgMBAAGjYTBfMA4GA1UdDwEB/wQEAwIF
oDATBgNVHSUEDDAKBggrBgEFBQcDATAfBgNVHSMEGDAWgBSOSXQZqt2gjbTOkE9Q
ddI8SYPqrDAXBgNVHREEEDAOggxjb2RlYmVyZy5vcmcwDQYJKoZIhvcNAQELBQAD
ggGBAJ/57DGqfuOa3aS/nLeAzl8komvyHuoOZi9yDK2Jqr+COxP58zSu8xwhiZfc
TJvIyB9QR7imGiQ7fEKby40q8uxGGx13oY7gQy7PG8hHk2dkfDZuSQacnpPRC3W0
0dL2CQIog6rw6jJHjxneitkX9FUmOnHIKy7LHya0Sthg36Z0Qw5JA3SCy6OQNepR
R2XzwTZ0KFk6gAuKCto8ENUlU5lV9PM4X3U0cBOIc5LJAPM+cxEDUocFtFqKJPbe
YYlSeB200YhYOdi+x34n9xnQjFu/jVlWF+Y0tMBB1WWq6rZbnuylwWLYQZAo10Co
D3oWsYRlD/ZL7X20ztIy8vRXz33ugnxxf88Q7csWDYb4S325svLfI2EjciIxYmBo
dSJxXRQkadjIoI7gNvzeWBkYSJpQUbaD4nT2xRS8vfuv42/DrIehb8SbTivHmcB3
OibpWIvDtS1B8thIlzl0edb+8pb6mof7pOBxoZdcBsSAk2/48s+jfRHfD9XcuKnv
hGCdSQ==
-----END CERTIFICATE-----

View file

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCrSPSPM6grNZMG
4ZKFCVxuXu+qkHdzSR96QUxi00VkIrkGPmyMN7q7rUQJto9C9guJio3n7y3Bvr5k
BjICjyWQd7GfkVuYgiYiG/O2hy1u1dIMCAB/Zhx1F1mvRfn/Q4eZk2GSOUM+kC0x
aNsn2827VGLOGFywUhRmu7J9QSQ3x1Pi5BME7eNCAKup0CbrMrZSzKAEuYujLY0U
YRxUrguMnV60wxJDCYE14YDxn9t0g7wQmzyndupkAMLNJZX5L83RA6vUEuTVYBFc
yB0Fu3oBLQ31y5QOZ7WF/QiO5cPicQJI/oyXlHq497BWS/H28kj1H5ZM8+5yhCYD
tgj7dERpAgMBAAECggEAAeW+/88cr83aIRtimiKuaXKXyRXsnNRUivAqPnYEsMVJ
s24BmdQMN4QF2u2wzJcZLZ7hT45wvVK1nToMV8bqLZ2F1DSyBRB8B6iznHQG5tFr
kEKObtrcuddWYQCvckp3OBZP4GTN/+Vs+r0koF5o+whGR+4xKKrgGvs9UPHlytBf
0DMzAzWzGPp6qBPw2sUx/fa9r5TqFW+p4SEOZJUqL2/zEZ6KBWbKw5T1e1y2kMEc
cquUQ4avqK/N1nwRNKUnTvW827v0k7HQ2cFdrjIATNlICslOWJQicG5GUOuSBkTC
0FFkSTtHP4qm0BqShjv6NDmzX+3WCVkGOKFOI+zuWQKBgQDBq8yEcvfMJY98KNlR
eKKdJAMJvKdoD65Yv6EG7ZzpeEWHaTGhu71RPgHYkHn8h1T/9WniroSk19+zb4lP
mMsBwxpg5HejWPzIiiJRkRCRA7aZZfvaXfIWryB4kI1tlGHBNN/+SYpG1zdNumtp
Xyb/sQWMMWRZdRgclF8V+NvduwKBgQDiaM59gBROleREduFZE1a0oXtt+CrwrPlz
hclrkYl1FbTA4TdL4JNbj5jCXCR8YakFhxWEmhwq+Dgl1NQY/YjHyG3w2imaeASX
QUsEvAIvNrv1mIELiYCLmUElyX4WL3UhqveOFcZUvR1Z4TTwruPQmXf6BJEBLbWI
f7odmG6yKwKBgQCzpuLjZiZY9+qe2OGmQopNzE8JJDgCPrGS38fGvnnU1N1iXAFP
LvDRwPxDYNnXl84QVR2wygR/SUTYlTlBXdHKw6nfgW89Vlm+yOxGz5MXgeNLbp/u
k0DzK+aqECUxJfh8GclCgANF7XP+pVPn/f0WKKalwld86DLCqBuALUX+6wKBgCUh
gxvZ8Xqh4nnH9VUicsnU4eU7Ge+2roJfopTdnWlyUd6AEQ2EmyYc+rSFYAZ2Db42
VTUWASCa7LpnmREwI0qAeGdToBcRL8+OibsRClqr409331IBDu/WBnUoAmGpDtCi
tU68C3bCPRoMcR430GzZfm+maBGFaYwlRmSsJxtZAoGADSA3uAZBuWNDPNKUas2k
Z2dXFEPNpViMjQzJ+Ko7lbOBpUUUQfZF2VMSK4lcnhhbmhcMrYzWWmh6uaw78aHY
e3M//BfcVMdxHw7EemGOViNNq3uDIwzvYteoe6fAOA7MaV+WjJaf+smceR4o38fk
U9RTkKpRJIcvEW5bvTI9h4o=
-----END PRIVATE KEY-----

View file

@ -0,0 +1,99 @@
#####################################
## Global Configuration & Defaults ##
#####################################
global
log stderr format iso local7
# generated 2021-06-05, Mozilla Guideline v5.6, HAProxy 2.1, OpenSSL 1.1.1d, intermediate configuration
# https://ssl-config.mozilla.org/#server=haproxy&version=2.1&config=intermediate&openssl=1.1.1d&guideline=5.6
ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
ssl-dh-param-file /etc/ssl/dhparam.pem
defaults
log global
timeout connect 30000
timeout check 300000
timeout client 300000
timeout server 300000
############################################################################
## Frontends: HTTP; HTTPS → HTTPS SNI-based; HTTPS → HTTP(S) header-based ##
############################################################################
frontend http_redirect_frontend
# HTTP backend to redirect everything to HTTPS
bind :::80 v4v6
mode http
http-request redirect scheme https
frontend https_sni_frontend
# TCP backend to forward to HTTPS backends based on SNI
bind :::443 v4v6
mode tcp
# Wait up to 5s for a SNI header & only accept TLS connections
tcp-request inspect-delay 5s
tcp-request content capture req.ssl_sni len 255
log-format "%ci:%cp -> %[capture.req.hdr(0)] @ %f (%fi:%fp) -> %b (%bi:%bp)"
tcp-request content accept if { req.ssl_hello_type 1 }
###################################################
## Rules: forward to HTTPS(S) header-based rules ##
###################################################
acl use_http_backend req.ssl_sni -i "codeberg.org"
acl use_http_backend req.ssl_sni -i "join.codeberg.org"
# TODO: use this if no SNI exists
use_backend https_termination_backend if use_http_backend
############################
## Rules: HTTPS SNI-based ##
############################
# use_backend xyz_backend if { req.ssl_sni -i "xyz" }
default_backend pages_backend
frontend https_termination_frontend
# Terminate TLS for HTTP backends
bind /tmp/haproxy-tls-termination.sock accept-proxy ssl strict-sni alpn h2,http/1.1 crt /etc/ssl/private/haproxy/
mode http
# HSTS (63072000 seconds)
http-response set-header Strict-Transport-Security max-age=63072000
http-request capture req.hdr(Host) len 255
log-format "%ci:%cp -> %[capture.req.hdr(0)] @ %f (%fi:%fp) -> %b (%bi:%bp)"
##################################
## Rules: HTTPS(S) header-based ##
##################################
use_backend gitea_backend if { hdr(host) -i codeberg.org }
backend https_termination_backend
# Redirect to the terminating HTTPS frontend for all HTTP backends
server https_termination_server /tmp/haproxy-tls-termination.sock send-proxy-v2-ssl-cn
mode tcp
###############################
## Backends: HTTPS SNI-based ##
###############################
backend pages_backend
# Pages server is a HTTP backend that uses its own certificates for custom domains
server pages_server pages:443
mode tcp
####################################
## Backends: HTTP(S) header-based ##
####################################
backend gitea_backend
server gitea_server gitea:80
mode http

View file

@ -0,0 +1 @@
Hello to Pages!

View file

@ -0,0 +1,4 @@
https://example-page.org
tls internal
file_server

22
examples/haproxy-sni/test.sh Executable file
View file

@ -0,0 +1,22 @@
#!/bin/sh
if [ $# -gt 0 ]; then
exec curl -k --resolve '*:443:127.0.0.1' "$@"
fi
fail() {
echo "[FAIL] $@"
exit 1
}
echo "Connecting to Gitea..."
res=$(curl https://codeberg.org -sk --resolve '*:443:127.0.0.1' --trace-ascii gitea.dump | tee /dev/stderr)
echo "$res" | grep -Fx 'Hello to Gitea!' >/dev/null || fail "Gitea didn't answer"
grep '^== Info: issuer: O=mkcert development CA;' gitea.dump || { grep grep '^== Info: issuer:' gitea.dump; fail "Gitea didn't use the correct certificate!"; }
echo "Connecting to Pages..."
res=$(curl https://example-page.org -sk --resolve '*:443:127.0.0.1' --trace-ascii pages.dump | tee /dev/stderr)
echo "$res" | grep -Fx 'Hello to Pages!' >/dev/null || fail "Pages didn't answer"
grep '^== Info: issuer: CN=Caddy Local Authority\b' pages.dump || { grep '^== Info: issuer:' pages.dump; fail "Pages didn't use the correct certificate!"; }
echo "All tests succeeded"
rm *.dump

71
flake.lock generated Normal file
View file

@ -0,0 +1,71 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"id": "flake-utils",
"type": "indirect"
}
},
"nixpkgs": {
"locked": {
"lastModified": 0,
"narHash": "sha256-WFZDy4bG2RkkCQloIEG8BXEvzyKklFVJbAismOJsIp4=",
"path": "/nix/store/c77dsgfxjywplw8bk8s8jlkdsr7a1bi9-source",
"type": "path"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"systems": "systems_2"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"id": "systems",
"type": "indirect"
}
}
},
"root": "root",
"version": 7
}

27
flake.nix Normal file
View file

@ -0,0 +1,27 @@
{
outputs = {
self,
nixpkgs,
flake-utils,
systems,
}:
flake-utils.lib.eachSystem (import systems)
(system: let
pkgs = import nixpkgs {
inherit system;
};
in {
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [
glibc.static
go
gofumpt
golangci-lint
gopls
gotools
go-tools
sqlite-interactive
];
};
});
}

237
go.mod Normal file
View file

@ -0,0 +1,237 @@
module codeberg.org/codeberg/pages
go 1.24.0
require (
code.gitea.io/sdk/gitea v0.20.0
github.com/OrlovEvgeny/go-mcache v0.0.0-20200121124330-1a8195b34f3a
github.com/creasty/defaults v1.8.0
github.com/go-acme/lego/v4 v4.21.0
github.com/go-sql-driver/mysql v1.8.1
github.com/hashicorp/go-uuid v1.0.3
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/joho/godotenv v1.5.1
github.com/lib/pq v1.10.9
github.com/mattn/go-sqlite3 v1.14.24
github.com/microcosm-cc/bluemonday v1.0.27
github.com/pelletier/go-toml/v2 v2.2.3
github.com/pires/go-proxyproto v0.8.0
github.com/reugn/equalizer v0.0.0-20210216135016-a959c509d7ad
github.com/rs/zerolog v1.33.0
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.27.5
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394
xorm.io/xorm v1.3.9
)
require (
cloud.google.com/go/auth v0.14.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/42wim/httpsig v1.2.2 // indirect
github.com/AdamSLevy/jsonrpc2/v14 v14.1.0 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87 // indirect
github.com/akamai/AkamaiOPEN-edgegrid-golang v1.2.2 // indirect
github.com/aliyun/alibaba-cloud-sdk-go v1.63.83 // indirect
github.com/aws/aws-sdk-go-v2 v1.33.0 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.0 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.53 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 // indirect
github.com/aws/aws-sdk-go-v2/service/lightsail v1.42.10 // indirect
github.com/aws/aws-sdk-go-v2/service/route53 v1.48.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.10 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.8 // indirect
github.com/aws/smithy-go v1.22.1 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/boombuler/barcode v1.0.2 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/civo/civogo v0.3.92 // indirect
github.com/cloudflare/cloudflare-go v0.114.0 // indirect
github.com/cpu/goacmedns v0.1.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidmz/go-pageant v1.0.2 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/dnsimple/dnsimple-go v1.7.0 // indirect
github.com/exoscale/egoscale/v3 v3.1.8 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-fed/httpsig v1.1.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.4 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.24.0 // indirect
github.com/go-resty/resty/v2 v2.16.3 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/goccy/go-json v0.10.4 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/gophercloud/gophercloud v1.14.1 // indirect
github.com/gophercloud/utils v0.0.0-20231010081019-80377eca5d56 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.132 // indirect
github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df // indirect
github.com/infobloxopen/infoblox-go-client v1.1.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 // indirect
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/labbsr0x/bindman-dns-webhook v1.0.2 // indirect
github.com/labbsr0x/goh v1.0.1 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/linode/linodego v1.46.0 // indirect
github.com/liquidweb/liquidweb-cli v0.7.0 // indirect
github.com/liquidweb/liquidweb-go v1.6.4 // indirect
github.com/magiconair/properties v1.8.9 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.62 // indirect
github.com/mimuret/golang-iij-dpf v0.9.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04 // indirect
github.com/nrdcg/auroradns v1.1.0 // indirect
github.com/nrdcg/bunny-go v0.0.0-20240207213615-dde5bf4577a3 // indirect
github.com/nrdcg/desec v0.10.0 // indirect
github.com/nrdcg/dnspod-go v0.4.0 // indirect
github.com/nrdcg/freemyip v0.3.0 // indirect
github.com/nrdcg/goinwx v0.10.0 // indirect
github.com/nrdcg/mailinabox v0.2.0 // indirect
github.com/nrdcg/namesilo v0.2.1 // indirect
github.com/nrdcg/nodion v0.1.0 // indirect
github.com/nrdcg/porkbun v0.4.0 // indirect
github.com/nzdjb/go-metaname v1.0.0 // indirect
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect
github.com/oracle/oci-go-sdk/v65 v65.81.2 // indirect
github.com/ovh/go-ovh v1.6.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/peterhellberg/link v1.2.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/pquerna/otp v1.4.0 // indirect
github.com/regfish/regfish-dnsapi-go v0.1.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sacloud/api-client-go v0.2.10 // indirect
github.com/sacloud/go-http v0.1.9 // indirect
github.com/sacloud/iaas-api-go v1.14.0 // indirect
github.com/sacloud/packages-go v0.0.11 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 // indirect
github.com/selectel/domains-go v1.1.0 // indirect
github.com/selectel/go-selvpcclient/v3 v3.2.1 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 // indirect
github.com/softlayer/softlayer-go v1.1.7 // indirect
github.com/softlayer/xmlrpc v0.0.0-20200409220501-5f089df7cb7e // indirect
github.com/sony/gobreaker v1.0.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.19.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/syndtr/goleveldb v1.0.0 // indirect
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1084 // indirect
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1084 // indirect
github.com/tjfoc/gmsm v1.4.1 // indirect
github.com/transip/gotransip/v6 v6.26.0 // indirect
github.com/ultradns/ultradns-go-sdk v1.8.0-20241010134910-243eeec // indirect
github.com/vinyldns/go-vinyldns v0.9.16 // indirect
github.com/volcengine/volc-sdk-golang v1.0.193 // indirect
github.com/vultr/govultr/v3 v3.14.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20241220122821-aeb3b05efd1c // indirect
github.com/yandex-cloud/go-sdk v0.0.0-20241220131134-2393e243c134 // indirect
go.mongodb.org/mongo-driver v1.17.2 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/ratelimit v0.3.1 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/mod v0.24.0 // indirect
golang.org/x/net v0.37.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.31.0 // indirect
google.golang.org/api v0.217.0 // indirect
google.golang.org/genproto v0.0.0-20250115164207-1a7da9e5054f // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
google.golang.org/grpc v1.69.4 // indirect
google.golang.org/protobuf v1.36.3 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/ns1/ns1-go.v2 v2.13.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.32.1 // indirect
k8s.io/apimachinery v0.32.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
xorm.io/builder v0.3.13 // indirect
)

2586
go.sum Normal file

File diff suppressed because it is too large Load diff

71
html/html.go Normal file
View file

@ -0,0 +1,71 @@
package html
import (
_ "embed"
"net/http"
"os"
"path"
"text/template" // do not use html/template here, we sanitize the message before passing it to the template
"codeberg.org/codeberg/pages/server/context"
"github.com/microcosm-cc/bluemonday"
"github.com/rs/zerolog/log"
)
//go:embed templates/error.html
var errorPage string
var (
errorTemplate = template.Must(template.New("error").Parse(loadCustomTemplateOrDefault()))
sanitizer = createBlueMondayPolicy()
)
type TemplateContext struct {
StatusCode int
StatusText string
Message string
}
// ReturnErrorPage sets the response status code and writes the error page to the response body.
// The error page contains a sanitized version of the message and the statusCode both in text and numeric form.
//
// Currently, only the following html tags are supported: <code>
func ReturnErrorPage(ctx *context.Context, msg string, statusCode int) {
ctx.RespWriter.Header().Set("Content-Type", "text/html; charset=utf-8")
ctx.RespWriter.WriteHeader(statusCode)
templateContext := TemplateContext{
StatusCode: statusCode,
StatusText: http.StatusText(statusCode),
Message: sanitizer.Sanitize(msg),
}
err := errorTemplate.Execute(ctx.RespWriter, templateContext)
if err != nil {
log.Err(err).Str("message", msg).Int("status", statusCode).Msg("could not write response")
}
}
func createBlueMondayPolicy() *bluemonday.Policy {
p := bluemonday.NewPolicy()
p.AllowElements("code")
return p
}
func loadCustomTemplateOrDefault() string {
contents, err := os.ReadFile("custom/error.html")
if err != nil {
if !os.IsNotExist(err) {
wd, wdErr := os.Getwd()
if wdErr != nil {
log.Err(err).Msg("could not load custom error page 'custom/error.html'")
} else {
log.Err(err).Msgf("could not load custom error page '%v'", path.Join(wd, "custom/error.html"))
}
}
return errorPage
}
return string(contents)
}

54
html/html_test.go Normal file
View file

@ -0,0 +1,54 @@
package html
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSanitizerSimpleString(t *testing.T) {
str := "simple text message without any html elements"
assert.Equal(t, str, sanitizer.Sanitize(str))
}
func TestSanitizerStringWithCodeTag(t *testing.T) {
str := "simple text message with <code>html</code> tag"
assert.Equal(t, str, sanitizer.Sanitize(str))
}
func TestSanitizerStringWithCodeTagWithAttribute(t *testing.T) {
str := "simple text message with <code id=\"code\">html</code> tag"
expected := "simple text message with <code>html</code> tag"
assert.Equal(t, expected, sanitizer.Sanitize(str))
}
func TestSanitizerStringWithATag(t *testing.T) {
str := "simple text message with <a>a link to another page</a>"
expected := "simple text message with a link to another page"
assert.Equal(t, expected, sanitizer.Sanitize(str))
}
func TestSanitizerStringWithATagAndHref(t *testing.T) {
str := "simple text message with <a href=\"http://evil.site\">a link to another page</a>"
expected := "simple text message with a link to another page"
assert.Equal(t, expected, sanitizer.Sanitize(str))
}
func TestSanitizerStringWithImgTag(t *testing.T) {
str := "simple text message with a <img alt=\"not found\" src=\"http://evil.site\">"
expected := "simple text message with a "
assert.Equal(t, expected, sanitizer.Sanitize(str))
}
func TestSanitizerStringWithImgTagAndOnerrorAttribute(t *testing.T) {
str := "simple text message with a <img alt=\"not found\" src=\"http://evil.site\" onerror=\"alert(secret)\">"
expected := "simple text message with a "
assert.Equal(t, expected, sanitizer.Sanitize(str))
}

58
html/templates/error.html Normal file
View file

@ -0,0 +1,58 @@
<!doctype html>
<html class="codeberg-design">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width" />
<title>{{.StatusText}}</title>
<link rel="stylesheet" href="https://design.codeberg.org/design-kit/codeberg.css" />
<link rel="stylesheet" href="https://fonts.codeberg.org/dist/inter/Inter%20Web/inter.css" />
<style>
body {
margin: 0;
padding: 1rem;
box-sizing: border-box;
width: 100%;
min-height: 100vh;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
}
code {
border-radius: 0.25rem;
padding: 0.25rem;
background-color: silver;
}
</style>
</head>
<body>
<svg xmlns="http://www.w3.org/2000/svg" height="10em" viewBox="0 0 24 24" fill="var(--blue-color)">
<path
d="M 9 2 C 5.1458514 2 2 5.1458514 2 9 C 2 12.854149 5.1458514 16 9 16 C 10.747998 16 12.345009 15.348024 13.574219 14.28125 L 14 14.707031 L 14 16 L 19.585938 21.585938 C 20.137937 22.137937 21.033938 22.137938 21.585938 21.585938 C 22.137938 21.033938 22.137938 20.137938 21.585938 19.585938 L 16 14 L 14.707031 14 L 14.28125 13.574219 C 15.348024 12.345009 16 10.747998 16 9 C 16 5.1458514 12.854149 2 9 2 z M 9 4 C 11.773268 4 14 6.2267316 14 9 C 14 11.773268 11.773268 14 9 14 C 6.2267316 14 4 11.773268 4 9 C 4 6.2267316 6.2267316 4 9 4 z"
/>
</svg>
<h1 class="mb-0 text-primary">{{.StatusText}} (Error {{.StatusCode}})!</h1>
<h5 class="text-center" style="max-width: 25em">
<p>Sorry, but this page couldn't be served:</p>
<p><b>"{{.Message}}"</b></p>
<p>
The page you tried to reach is hosted on Codeberg Pages, which might currently be experiencing technical
difficulties. If that is the case, it could take a little while until this page is available again.
</p>
<p>
Otherwise, this page might also be unavailable due to a configuration error. If you are the owner of this
website, please make sure to check the
<a href="https://docs.codeberg.org/codeberg-pages/troubleshooting/" target="_blank"
>troubleshooting section in the Docs</a
>!
</p>
</h5>
<small class="text-muted">
<img src="https://design.codeberg.org/logo-kit/icon.svg" class="align-top" />
Static pages made easy -
<a href="https://codeberg.page">Codeberg Pages</a>
</small>
</body>
</html>

282
integration/get_test.go Normal file
View file

@ -0,0 +1,282 @@
//go:build integration
// +build integration
package integration
import (
"bytes"
"crypto/tls"
"io"
"log"
"net/http"
"net/http/cookiejar"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetRedirect(t *testing.T) {
log.Println("=== TestGetRedirect ===")
// test custom domain redirect
resp, err := getTestHTTPSClient().Get("https://calciumdibromid.localhost.mock.directory:4430")
if !assert.NoError(t, err) {
t.FailNow()
}
if !assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode) {
t.FailNow()
}
assert.EqualValues(t, "https://www.cabr2.de/", resp.Header.Get("Location"))
assert.EqualValues(t, `<a href="https://www.cabr2.de/">Temporary Redirect</a>.`, strings.TrimSpace(string(getBytes(resp.Body))))
}
func TestGetContent(t *testing.T) {
log.Println("=== TestGetContent ===")
// test get image
resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/images/827679288a.jpg")
assert.NoError(t, err)
if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
t.FailNow()
}
assert.EqualValues(t, "image/jpeg", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "124635", resp.Header.Get("Content-Length"))
assert.EqualValues(t, 124635, getSize(resp.Body))
assert.Len(t, resp.Header.Get("ETag"), 42)
// specify branch
resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/pag/@master/")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.True(t, getSize(resp.Body) > 1000)
assert.Len(t, resp.Header.Get("ETag"), 44)
// access branch name contains '/'
resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/blumia/@docs~main/")
assert.NoError(t, err)
if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
t.FailNow()
}
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.True(t, getSize(resp.Body) > 100)
assert.Len(t, resp.Header.Get("ETag"), 44)
// TODO: test get of non cacheable content (content size > fileCacheSizeLimit)
}
func TestCustomDomain(t *testing.T) {
log.Println("=== TestCustomDomain ===")
resp, err := getTestHTTPSClient().Get("https://mock-pages.codeberg-test.org:4430/README.md")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "text/markdown; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "106", resp.Header.Get("Content-Length"))
assert.EqualValues(t, 106, getSize(resp.Body))
}
func TestCustomDomainRedirects(t *testing.T) {
log.Println("=== TestCustomDomainRedirects ===")
// test redirect from default pages domain to custom domain
resp, err := getTestHTTPSClient().Get("https://6543.localhost.mock.directory:4430/test_pages-server_custom-mock-domain/@main/README.md")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode)
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
// TODO: custom port is not evaluated (witch does hurt tests & dev env only)
// assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/@main/README.md", resp.Header.Get("Location"))
assert.EqualValues(t, "https://mock-pages.codeberg-test.org/@main/README.md", resp.Header.Get("Location"))
assert.EqualValues(t, `https:/codeberg.org/6543/test_pages-server_custom-mock-domain/src/branch/main/README.md; rel="canonical"; rel="canonical"`, resp.Header.Get("Link"))
// test redirect from an custom domain to the primary custom domain (www.example.com -> example.com)
// regression test to https://codeberg.org/Codeberg/pages-server/issues/153
resp, err = getTestHTTPSClient().Get("https://mock-pages-redirect.codeberg-test.org:4430/README.md")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode)
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
// TODO: custom port is not evaluated (witch does hurt tests & dev env only)
// assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/README.md", resp.Header.Get("Location"))
assert.EqualValues(t, "https://mock-pages.codeberg-test.org/README.md", resp.Header.Get("Location"))
}
func TestRawCustomDomain(t *testing.T) {
log.Println("=== TestRawCustomDomain ===")
// test raw domain response for custom domain branch
resp, err := getTestHTTPSClient().Get("https://raw.localhost.mock.directory:4430/cb_pages_tests/raw-test/example") // need cb_pages_tests fork
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "76", resp.Header.Get("Content-Length"))
assert.EqualValues(t, 76, getSize(resp.Body))
}
func TestRawIndex(t *testing.T) {
log.Println("=== TestRawIndex ===")
// test raw domain response for index.html
resp, err := getTestHTTPSClient().Get("https://raw.localhost.mock.directory:4430/cb_pages_tests/raw-test/@branch-test/index.html") // need cb_pages_tests fork
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "597", resp.Header.Get("Content-Length"))
assert.EqualValues(t, 597, getSize(resp.Body))
}
func TestGetNotFound(t *testing.T) {
log.Println("=== TestGetNotFound ===")
// test custom not found pages
resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/pages-404-demo/blah")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusNotFound, resp.StatusCode)
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "37", resp.Header.Get("Content-Length"))
assert.EqualValues(t, 37, getSize(resp.Body))
}
func TestRedirect(t *testing.T) {
log.Println("=== TestRedirect ===")
// test redirects
resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/redirect")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
assert.EqualValues(t, "https://example.com/", resp.Header.Get("Location"))
}
func TestSPARedirect(t *testing.T) {
log.Println("=== TestSPARedirect ===")
// test SPA redirects
url := "https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/app/aqdjw"
resp, err := getTestHTTPSClient().Get(url)
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, url, resp.Request.URL.String())
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "258", resp.Header.Get("Content-Length"))
assert.EqualValues(t, 258, getSize(resp.Body))
}
func TestSplatRedirect(t *testing.T) {
log.Println("=== TestSplatRedirect ===")
// test splat redirects
resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/articles/qfopefe")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
assert.EqualValues(t, "/posts/qfopefe", resp.Header.Get("Location"))
}
func TestFollowSymlink(t *testing.T) {
log.Printf("=== TestFollowSymlink ===\n")
// file symlink
resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/link")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "application/octet-stream", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "4", resp.Header.Get("Content-Length"))
body := getBytes(resp.Body)
assert.EqualValues(t, 4, len(body))
assert.EqualValues(t, "abc\n", string(body))
// relative file links (../index.html file in this case)
resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/dir_aim/some/")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusOK, resp.StatusCode)
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "an index\n", string(getBytes(resp.Body)))
}
func TestLFSSupport(t *testing.T) {
log.Printf("=== TestLFSSupport ===\n")
resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/lfs.txt")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusOK, resp.StatusCode)
body := strings.TrimSpace(string(getBytes(resp.Body)))
assert.EqualValues(t, 12, len(body))
assert.EqualValues(t, "actual value", body)
}
func TestGetOptions(t *testing.T) {
log.Println("=== TestGetOptions ===")
req, _ := http.NewRequest(http.MethodOptions, "https://mock-pages.codeberg-test.org:4430/README.md", http.NoBody)
resp, err := getTestHTTPSClient().Do(req)
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusNoContent, resp.StatusCode)
assert.EqualValues(t, "GET, HEAD, OPTIONS", resp.Header.Get("Allow"))
}
func TestHttpRedirect(t *testing.T) {
log.Println("=== TestHttpRedirect ===")
resp, err := getTestHTTPSClient().Get("http://mock-pages.codeberg-test.org:8880/README.md")
assert.NoError(t, err)
if !assert.NotNil(t, resp) {
t.FailNow()
}
assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/README.md", resp.Header.Get("Location"))
}
func getTestHTTPSClient() *http.Client {
cookieJar, _ := cookiejar.New(nil)
return &http.Client{
Jar: cookieJar,
CheckRedirect: func(_ *http.Request, _ []*http.Request) error {
return http.ErrUseLastResponse
},
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
}
func getBytes(stream io.Reader) []byte {
buf := new(bytes.Buffer)
_, _ = buf.ReadFrom(stream)
return buf.Bytes()
}
func getSize(stream io.Reader) int {
buf := new(bytes.Buffer)
_, _ = buf.ReadFrom(stream)
return buf.Len()
}

69
integration/main_test.go Normal file
View file

@ -0,0 +1,69 @@
//go:build integration
// +build integration
package integration
import (
"context"
"log"
"os"
"testing"
"time"
"github.com/urfave/cli/v2"
cmd "codeberg.org/codeberg/pages/cli"
"codeberg.org/codeberg/pages/server"
)
func TestMain(m *testing.M) {
log.Println("=== TestMain: START Server ===")
serverCtx, serverCancel := context.WithCancel(context.Background())
if err := startServer(serverCtx); err != nil {
log.Fatalf("could not start server: %v", err)
}
defer func() {
serverCancel()
log.Println("=== TestMain: Server STOPPED ===")
}()
time.Sleep(10 * time.Second)
m.Run()
}
func startServer(ctx context.Context) error {
args := []string{"integration"}
setEnvIfNotSet("ACME_API", "https://acme.mock.directory")
setEnvIfNotSet("PAGES_DOMAIN", "localhost.mock.directory")
setEnvIfNotSet("RAW_DOMAIN", "raw.localhost.mock.directory")
setEnvIfNotSet("PAGES_BRANCHES", "pages,main,master")
setEnvIfNotSet("PORT", "4430")
setEnvIfNotSet("HTTP_PORT", "8880")
setEnvIfNotSet("ENABLE_HTTP_SERVER", "true")
setEnvIfNotSet("DB_TYPE", "sqlite3")
setEnvIfNotSet("GITEA_ROOT", "https://codeberg.org")
setEnvIfNotSet("LOG_LEVEL", "trace")
setEnvIfNotSet("ENABLE_LFS_SUPPORT", "true")
setEnvIfNotSet("ENABLE_SYMLINK_SUPPORT", "true")
setEnvIfNotSet("ACME_ACCOUNT_CONFIG", "integration/acme-account.json")
app := cli.NewApp()
app.Name = "pages-server"
app.Action = server.Serve
app.Flags = cmd.ServerFlags
go func() {
if err := app.RunContext(ctx, args); err != nil {
log.Fatalf("run server error: %v", err)
}
}()
return nil
}
func setEnvIfNotSet(key, value string) {
if _, set := os.LookupEnv(key); !set {
os.Setenv(key, value)
}
}

21
main.go Normal file
View file

@ -0,0 +1,21 @@
package main
import (
"os"
_ "github.com/joho/godotenv/autoload"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/cli"
"codeberg.org/codeberg/pages/server"
)
func main() {
app := cli.CreatePagesApp()
app.Action = server.Serve
if err := app.Run(os.Args); err != nil {
log.Error().Err(err).Msg("A fatal error occurred")
os.Exit(1)
}
}

27
renovate.json Normal file
View file

@ -0,0 +1,27 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended",
":maintainLockFilesWeekly",
":enablePreCommit",
"schedule:automergeDaily",
"schedule:weekends"
],
"automergeType": "branch",
"automergeMajor": false,
"automerge": true,
"prConcurrentLimit": 5,
"labels": ["dependencies"],
"packageRules": [
{
"matchManagers": ["gomod", "dockerfile"]
},
{
"groupName": "golang deps non-major",
"matchManagers": ["gomod"],
"matchUpdateTypes": ["minor", "patch"],
"extends": ["schedule:daily"]
}
],
"postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"]
}

26
server/acme/client.go Normal file
View file

@ -0,0 +1,26 @@
package acme
import (
"errors"
"fmt"
"codeberg.org/codeberg/pages/config"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/certificates"
)
var ErrAcmeMissConfig = errors.New("ACME client has wrong config")
func CreateAcmeClient(cfg config.ACMEConfig, enableHTTPServer bool, challengeCache cache.ICache) (*certificates.AcmeClient, error) {
// check config
if (!cfg.AcceptTerms || (cfg.DNSProvider == "" && !cfg.NoDNS01)) && cfg.APIEndpoint != "https://acme.mock.directory" {
return nil, fmt.Errorf("%w: you must set $ACME_ACCEPT_TERMS and $DNS_PROVIDER or $NO_DNS_01, unless $ACME_API is set to https://acme.mock.directory", ErrAcmeMissConfig)
}
if cfg.EAB_HMAC != "" && cfg.EAB_KID == "" {
return nil, fmt.Errorf("%w: ACME_EAB_HMAC also needs ACME_EAB_KID to be set", ErrAcmeMissConfig)
} else if cfg.EAB_HMAC == "" && cfg.EAB_KID != "" {
return nil, fmt.Errorf("%w: ACME_EAB_KID also needs ACME_EAB_HMAC to be set", ErrAcmeMissConfig)
}
return certificates.NewAcmeClient(cfg, enableHTTPServer, challengeCache)
}

10
server/cache/interface.go vendored Normal file
View file

@ -0,0 +1,10 @@
package cache
import "time"
// ICache is an interface that defines how the pages server interacts with the cache.
type ICache interface {
Set(key string, value interface{}, ttl time.Duration) error
Get(key string) (interface{}, bool)
Remove(key string)
}

7
server/cache/memory.go vendored Normal file
View file

@ -0,0 +1,7 @@
package cache
import "github.com/OrlovEvgeny/go-mcache"
func NewInMemoryCache() ICache {
return mcache.New()
}

View file

@ -0,0 +1,29 @@
package certificates
import (
"crypto"
"github.com/go-acme/lego/v4/registration"
)
type AcmeAccount struct {
Email string
Registration *registration.Resource
Key crypto.PrivateKey `json:"-"`
KeyPEM string `json:"Key"`
}
// make sure AcmeAccount match User interface
var _ registration.User = &AcmeAccount{}
func (u *AcmeAccount) GetEmail() string {
return u.Email
}
func (u AcmeAccount) GetRegistration() *registration.Resource {
return u.Registration
}
func (u *AcmeAccount) GetPrivateKey() crypto.PrivateKey {
return u.Key
}

View file

@ -0,0 +1,93 @@
package certificates
import (
"fmt"
"sync"
"time"
"github.com/go-acme/lego/v4/lego"
"github.com/go-acme/lego/v4/providers/dns"
"github.com/reugn/equalizer"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/config"
"codeberg.org/codeberg/pages/server/cache"
)
type AcmeClient struct {
legoClient *lego.Client
dnsChallengerLegoClient *lego.Client
obtainLocks sync.Map
acmeUseRateLimits bool
// limiter
acmeClientOrderLimit *equalizer.TokenBucket
acmeClientRequestLimit *equalizer.TokenBucket
acmeClientFailLimit *equalizer.TokenBucket
acmeClientCertificateLimitPerUser map[string]*equalizer.TokenBucket
}
func NewAcmeClient(cfg config.ACMEConfig, enableHTTPServer bool, challengeCache cache.ICache) (*AcmeClient, error) {
acmeConfig, err := setupAcmeConfig(cfg)
if err != nil {
return nil, err
}
acmeClient, err := lego.NewClient(acmeConfig)
if err != nil {
log.Fatal().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
} else {
err = acmeClient.Challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{challengeCache})
if err != nil {
log.Error().Err(err).Msg("Can't create TLS-ALPN-01 provider")
}
if enableHTTPServer {
err = acmeClient.Challenge.SetHTTP01Provider(AcmeHTTPChallengeProvider{challengeCache})
if err != nil {
log.Error().Err(err).Msg("Can't create HTTP-01 provider")
}
}
}
mainDomainAcmeClient, err := lego.NewClient(acmeConfig)
if err != nil {
log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
} else {
if cfg.DNSProvider == "" {
// using mock wildcard certs
mainDomainAcmeClient = nil
} else {
// use DNS-Challenge https://go-acme.github.io/lego/dns/
provider, err := dns.NewDNSChallengeProviderByName(cfg.DNSProvider)
if err != nil {
return nil, fmt.Errorf("can not create DNS Challenge provider: %w", err)
}
if err := mainDomainAcmeClient.Challenge.SetDNS01Provider(provider); err != nil {
return nil, fmt.Errorf("can not create DNS-01 provider: %w", err)
}
}
}
return &AcmeClient{
legoClient: acmeClient,
dnsChallengerLegoClient: mainDomainAcmeClient,
acmeUseRateLimits: cfg.UseRateLimits,
obtainLocks: sync.Map{},
// limiter
// rate limit is 300 / 3 hours, we want 200 / 2 hours but to refill more often, so that's 25 new domains every 15 minutes
// TODO: when this is used a lot, we probably have to think of a somewhat better solution?
acmeClientOrderLimit: equalizer.NewTokenBucket(25, 15*time.Minute),
// rate limit is 20 / second, we want 5 / second (especially as one cert takes at least two requests)
acmeClientRequestLimit: equalizer.NewTokenBucket(5, 1*time.Second),
// rate limit is 5 / hour https://letsencrypt.org/docs/failed-validation-limit/
acmeClientFailLimit: equalizer.NewTokenBucket(5, 1*time.Hour),
// checkUserLimit() use this to rate also per user
acmeClientCertificateLimitPerUser: map[string]*equalizer.TokenBucket{},
}, nil
}

View file

@ -0,0 +1,110 @@
package certificates
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/json"
"fmt"
"os"
"codeberg.org/codeberg/pages/config"
"github.com/go-acme/lego/v4/certcrypto"
"github.com/go-acme/lego/v4/lego"
"github.com/go-acme/lego/v4/registration"
"github.com/rs/zerolog/log"
)
const challengePath = "/.well-known/acme-challenge/"
func setupAcmeConfig(cfg config.ACMEConfig) (*lego.Config, error) {
var myAcmeAccount AcmeAccount
var myAcmeConfig *lego.Config
if cfg.AccountConfigFile == "" {
return nil, fmt.Errorf("invalid acme config file: '%s'", cfg.AccountConfigFile)
}
if account, err := os.ReadFile(cfg.AccountConfigFile); err == nil {
log.Info().Msgf("found existing acme account config file '%s'", cfg.AccountConfigFile)
if err := json.Unmarshal(account, &myAcmeAccount); err != nil {
return nil, err
}
myAcmeAccount.Key, err = certcrypto.ParsePEMPrivateKey([]byte(myAcmeAccount.KeyPEM))
if err != nil {
return nil, err
}
myAcmeConfig = lego.NewConfig(&myAcmeAccount)
myAcmeConfig.CADirURL = cfg.APIEndpoint
myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
// Validate Config
_, err := lego.NewClient(myAcmeConfig)
if err != nil {
log.Info().Err(err).Msg("config validation failed, you might just delete the config file and let it recreate")
return nil, fmt.Errorf("acme config validation failed: %w", err)
}
return myAcmeConfig, nil
} else if !os.IsNotExist(err) {
return nil, err
}
log.Info().Msgf("no existing acme account config found, try to create a new one")
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
myAcmeAccount = AcmeAccount{
Email: cfg.Email,
Key: privateKey,
KeyPEM: string(certcrypto.PEMEncode(privateKey)),
}
myAcmeConfig = lego.NewConfig(&myAcmeAccount)
myAcmeConfig.CADirURL = cfg.APIEndpoint
myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
tempClient, err := lego.NewClient(myAcmeConfig)
if err != nil {
log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
} else {
// accept terms & log in to EAB
if cfg.EAB_KID == "" || cfg.EAB_HMAC == "" {
reg, err := tempClient.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: cfg.AcceptTerms})
if err != nil {
log.Error().Err(err).Msg("Can't register ACME account, continuing with mock certs only")
} else {
myAcmeAccount.Registration = reg
}
} else {
reg, err := tempClient.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{
TermsOfServiceAgreed: cfg.AcceptTerms,
Kid: cfg.EAB_KID,
HmacEncoded: cfg.EAB_HMAC,
})
if err != nil {
log.Error().Err(err).Msg("Can't register ACME account, continuing with mock certs only")
} else {
myAcmeAccount.Registration = reg
}
}
if myAcmeAccount.Registration != nil {
acmeAccountJSON, err := json.Marshal(myAcmeAccount)
if err != nil {
log.Error().Err(err).Msg("json.Marshalfailed, waiting for manual restart to avoid rate limits")
select {}
}
log.Info().Msgf("new acme account created. write to config file '%s'", cfg.AccountConfigFile)
err = os.WriteFile(cfg.AccountConfigFile, acmeAccountJSON, 0o600)
if err != nil {
log.Error().Err(err).Msg("os.WriteFile failed, waiting for manual restart to avoid rate limits")
select {}
}
}
}
return myAcmeConfig, nil
}

View file

@ -0,0 +1,83 @@
package certificates
import (
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/go-acme/lego/v4/challenge"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
)
type AcmeTLSChallengeProvider struct {
challengeCache cache.ICache
}
// make sure AcmeTLSChallengeProvider match Provider interface
var _ challenge.Provider = AcmeTLSChallengeProvider{}
func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error {
return a.challengeCache.Set(domain, keyAuth, 1*time.Hour)
}
func (a AcmeTLSChallengeProvider) CleanUp(domain, _, _ string) error {
a.challengeCache.Remove(domain)
return nil
}
type AcmeHTTPChallengeProvider struct {
challengeCache cache.ICache
}
// make sure AcmeHTTPChallengeProvider match Provider interface
var _ challenge.Provider = AcmeHTTPChallengeProvider{}
func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error {
return a.challengeCache.Set(domain+"/"+token, keyAuth, 1*time.Hour)
}
func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
a.challengeCache.Remove(domain + "/" + token)
return nil
}
func SetupHTTPACMEChallengeServer(challengeCache cache.ICache, sslPort uint) http.HandlerFunc {
// handle custom-ssl-ports to be added on https redirects
portPart := ""
if sslPort != 443 {
portPart = fmt.Sprintf(":%d", sslPort)
}
return func(w http.ResponseWriter, req *http.Request) {
ctx := context.New(w, req)
domain := ctx.TrimHostPort()
// it's an acme request
if strings.HasPrefix(ctx.Path(), challengePath) {
challenge, ok := challengeCache.Get(domain + "/" + strings.TrimPrefix(ctx.Path(), challengePath))
if !ok || challenge == nil {
log.Info().Msgf("HTTP-ACME challenge for '%s' failed: token not found", domain)
ctx.String("no challenge for this token", http.StatusNotFound)
}
log.Info().Msgf("HTTP-ACME challenge for '%s' succeeded", domain)
ctx.String(challenge.(string))
return
}
// it's a normal http request that needs to be redirected
u, err := url.Parse(fmt.Sprintf("https://%s%s%s", domain, portPart, ctx.Path()))
if err != nil {
log.Error().Err(err).Msg("could not craft http to https redirect")
ctx.String("", http.StatusInternalServerError)
}
newURL := u.String()
log.Debug().Msgf("redirect http to https: %s", newURL)
ctx.Redirect(newURL, http.StatusMovedPermanently)
}
}

View file

@ -0,0 +1,416 @@
package certificates
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/go-acme/lego/v4/certcrypto"
"github.com/go-acme/lego/v4/certificate"
"github.com/go-acme/lego/v4/challenge/tlsalpn01"
"github.com/go-acme/lego/v4/lego"
"github.com/hashicorp/golang-lru/v2/expirable"
"github.com/reugn/equalizer"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/server/cache"
psContext "codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/database"
dnsutils "codeberg.org/codeberg/pages/server/dns"
"codeberg.org/codeberg/pages/server/gitea"
"codeberg.org/codeberg/pages/server/upstream"
)
var ErrUserRateLimitExceeded = errors.New("rate limit exceeded: 10 certificates per user per 24 hours")
// TLSConfig returns the configuration for generating, serving and cleaning up Let's Encrypt certificates.
func TLSConfig(mainDomainSuffix string,
giteaClient *gitea.Client,
acmeClient *AcmeClient,
firstDefaultBranch string,
challengeCache, canonicalDomainCache cache.ICache,
certDB database.CertDB,
noDNS01 bool,
rawDomain string,
) *tls.Config {
// every cert is at most 24h in the cache and 7 days before expiry the cert is renewed
keyCache := expirable.NewLRU[string, *tls.Certificate](32, nil, 24*time.Hour)
return &tls.Config{
// check DNS name & get certificate from Let's Encrypt
GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
ctx := psContext.New(nil, nil)
log := log.With().Str("ReqId", ctx.ReqId).Logger()
domain := strings.ToLower(strings.TrimSpace(info.ServerName))
log.Debug().Str("domain", domain).Msg("start: get tls certificate")
if len(domain) < 1 {
return nil, errors.New("missing domain info via SNI (RFC 4366, Section 3.1)")
}
// https request init is actually a acme challenge
if info.SupportedProtos != nil {
for _, proto := range info.SupportedProtos {
if proto != tlsalpn01.ACMETLS1Protocol {
continue
}
log.Info().Msgf("Detect ACME-TLS1 challenge for '%s'", domain)
challenge, ok := challengeCache.Get(domain)
if !ok {
return nil, errors.New("no challenge for this domain")
}
cert, err := tlsalpn01.ChallengeCert(domain, challenge.(string))
if err != nil {
return nil, err
}
return cert, nil
}
}
targetOwner := ""
mayObtainCert := true
if strings.HasSuffix(domain, mainDomainSuffix) || strings.EqualFold(domain, mainDomainSuffix[1:]) {
if noDNS01 {
// Limit the domains allowed to request a certificate to pages-server domains
// and domains for an existing user of org
if !strings.EqualFold(domain, mainDomainSuffix[1:]) && !strings.EqualFold(domain, rawDomain) {
targetOwner := strings.TrimSuffix(domain, mainDomainSuffix)
owner_exist, err := giteaClient.GiteaCheckIfOwnerExists(targetOwner)
mayObtainCert = owner_exist
if err != nil {
log.Error().Err(err).Msgf("Failed to check '%s' existence on the forge: %s", targetOwner, err)
mayObtainCert = false
}
}
} else {
// deliver default certificate for the main domain (*.codeberg.page)
domain = mainDomainSuffix
}
} else {
var targetRepo, targetBranch string
targetOwner, targetRepo, targetBranch = dnsutils.GetTargetFromDNS(domain, mainDomainSuffix, firstDefaultBranch)
if targetOwner == "" {
// DNS not set up, return main certificate to redirect to the docs
domain = mainDomainSuffix
} else {
targetOpt := &upstream.Options{
TargetOwner: targetOwner,
TargetRepo: targetRepo,
TargetBranch: targetBranch,
}
_, valid := targetOpt.CheckCanonicalDomain(ctx, giteaClient, domain, mainDomainSuffix, canonicalDomainCache)
if !valid {
// We shouldn't obtain a certificate when we cannot check if the
// repository has specified this domain in the `.domains` file.
mayObtainCert = false
}
}
}
if tlsCertificate, ok := keyCache.Get(domain); ok {
// we can use an existing certificate object
return tlsCertificate, nil
}
var tlsCertificate *tls.Certificate
var err error
if tlsCertificate, err = acmeClient.retrieveCertFromDB(log, domain, mainDomainSuffix, false, certDB); err != nil {
if !errors.Is(err, database.ErrNotFound) {
return nil, err
}
// we could not find a cert in db, request a new certificate
// first check if we are allowed to obtain a cert for this domain
if strings.EqualFold(domain, mainDomainSuffix) {
return nil, errors.New("won't request certificate for main domain, something really bad has happened")
}
if !mayObtainCert {
return nil, fmt.Errorf("won't request certificate for %q", domain)
}
tlsCertificate, err = acmeClient.obtainCert(log, acmeClient.legoClient, []string{domain}, nil, targetOwner, false, mainDomainSuffix, certDB)
if err != nil {
return nil, err
}
}
keyCache.Add(domain, tlsCertificate)
return tlsCertificate, nil
},
NextProtos: []string{
"h2",
"http/1.1",
tlsalpn01.ACMETLS1Protocol,
},
// generated 2021-07-13, Mozilla Guideline v5.6, Go 1.14.4, intermediate configuration
// https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
},
}
}
func (c *AcmeClient) checkUserLimit(user string) error {
userLimit, ok := c.acmeClientCertificateLimitPerUser[user]
if !ok {
// Each user can only add 10 new domains per day.
userLimit = equalizer.NewTokenBucket(10, time.Hour*24)
c.acmeClientCertificateLimitPerUser[user] = userLimit
}
if !userLimit.Ask() {
return fmt.Errorf("user '%s' error: %w", user, ErrUserRateLimitExceeded)
}
return nil
}
func (c *AcmeClient) retrieveCertFromDB(log zerolog.Logger, sni, mainDomainSuffix string, useDnsProvider bool, certDB database.CertDB) (*tls.Certificate, error) {
// parse certificate from database
res, err := certDB.Get(sni)
if err != nil {
return nil, err
} else if res == nil {
return nil, database.ErrNotFound
}
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
if err != nil {
return nil, err
}
// TODO: document & put into own function
if !strings.EqualFold(sni, mainDomainSuffix) {
tlsCertificate.Leaf, err = leaf(&tlsCertificate)
if err != nil {
return nil, err
}
// renew certificates 7 days before they expire
if tlsCertificate.Leaf.NotAfter.Before(time.Now().Add(7 * 24 * time.Hour)) {
// TODO: use ValidTill of custom cert struct
if len(res.CSR) > 0 {
// CSR stores the time when the renewal shall be tried again
nextTryUnix, err := strconv.ParseInt(string(res.CSR), 10, 64)
if err == nil && time.Now().Before(time.Unix(nextTryUnix, 0)) {
return &tlsCertificate, nil
}
}
// TODO: make a queue ?
go (func() {
res.CSR = nil // acme client doesn't like CSR to be set
if _, err := c.obtainCert(log, c.legoClient, []string{sni}, res, "", useDnsProvider, mainDomainSuffix, certDB); err != nil {
log.Error().Msgf("Couldn't renew certificate for %s: %v", sni, err)
}
})()
}
}
return &tlsCertificate, nil
}
func (c *AcmeClient) obtainCert(log zerolog.Logger, acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string, useDnsProvider bool, mainDomainSuffix string, keyDatabase database.CertDB) (*tls.Certificate, error) {
name := strings.TrimPrefix(domains[0], "*")
// lock to avoid simultaneous requests
_, working := c.obtainLocks.LoadOrStore(name, struct{}{})
if working {
for working {
time.Sleep(100 * time.Millisecond)
_, working = c.obtainLocks.Load(name)
}
cert, err := c.retrieveCertFromDB(log, name, mainDomainSuffix, useDnsProvider, keyDatabase)
if err != nil {
return nil, fmt.Errorf("certificate failed in synchronous request: %w", err)
}
return cert, nil
}
defer c.obtainLocks.Delete(name)
if acmeClient == nil {
if useDnsProvider {
return mockCert(domains[0], "DNS ACME client is not defined", mainDomainSuffix, keyDatabase)
} else {
return mockCert(domains[0], "ACME client uninitialized. This is a server error, please report!", mainDomainSuffix, keyDatabase)
}
}
// request actual cert
var res *certificate.Resource
var err error
if renew != nil && renew.CertURL != "" {
if c.acmeUseRateLimits {
c.acmeClientRequestLimit.Take()
}
log.Debug().Msgf("Renewing certificate for: %v", domains)
res, err = acmeClient.Certificate.Renew(*renew, true, false, "")
if err != nil {
log.Error().Err(err).Msgf("Couldn't renew certificate for %v, trying to request a new one", domains)
if c.acmeUseRateLimits {
c.acmeClientFailLimit.Take()
}
res = nil
}
}
if res == nil {
if user != "" {
if err := c.checkUserLimit(user); err != nil {
return nil, err
}
}
if c.acmeUseRateLimits {
c.acmeClientOrderLimit.Take()
c.acmeClientRequestLimit.Take()
}
log.Debug().Msgf("Re-requesting new certificate for %v", domains)
res, err = acmeClient.Certificate.Obtain(certificate.ObtainRequest{
Domains: domains,
Bundle: true,
MustStaple: false,
})
if c.acmeUseRateLimits && err != nil {
c.acmeClientFailLimit.Take()
}
}
if err != nil {
log.Error().Err(err).Msgf("Couldn't obtain again a certificate or %v", domains)
if renew != nil && renew.CertURL != "" {
tlsCertificate, err := tls.X509KeyPair(renew.Certificate, renew.PrivateKey)
if err != nil {
mockC, err2 := mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
if err2 != nil {
return nil, errors.Join(err, err2)
}
return mockC, err
}
leaf, err := leaf(&tlsCertificate)
if err == nil && leaf.NotAfter.After(time.Now()) {
tlsCertificate.Leaf = leaf
// avoid sending a mock cert instead of a still valid cert, instead abuse CSR field to store time to try again at
renew.CSR = []byte(strconv.FormatInt(time.Now().Add(6*time.Hour).Unix(), 10))
if err := keyDatabase.Put(name, renew); err != nil {
mockC, err2 := mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
if err2 != nil {
return nil, errors.Join(err, err2)
}
return mockC, err
}
return &tlsCertificate, nil
}
}
return mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
}
log.Debug().Msgf("Obtained certificate for %v", domains)
if err := keyDatabase.Put(name, res); err != nil {
return nil, err
}
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
if err != nil {
return nil, err
}
return &tlsCertificate, nil
}
func SetupMainDomainCertificates(log zerolog.Logger, mainDomainSuffix string, acmeClient *AcmeClient, certDB database.CertDB) error {
// getting main cert before ACME account so that we can fail here without hitting rate limits
mainCertBytes, err := certDB.Get(mainDomainSuffix)
if err != nil && !errors.Is(err, database.ErrNotFound) {
return fmt.Errorf("cert database is not working: %w", err)
}
if mainCertBytes == nil {
_, err = acmeClient.obtainCert(log, acmeClient.dnsChallengerLegoClient, []string{"*" + mainDomainSuffix, mainDomainSuffix[1:]}, nil, "", true, mainDomainSuffix, certDB)
if err != nil {
log.Error().Err(err).Msg("Couldn't renew main domain certificate, continuing with mock certs only")
}
}
return nil
}
func MaintainCertDB(log zerolog.Logger, ctx context.Context, interval time.Duration, acmeClient *AcmeClient, mainDomainSuffix string, certDB database.CertDB) {
for {
// delete expired certs that will be invalid until next clean up
threshold := time.Now().Add(interval)
expiredCertCount := 0
certs, err := certDB.Items(0, 0)
if err != nil {
log.Error().Err(err).Msg("could not get certs from list")
} else {
for _, cert := range certs {
if !strings.EqualFold(cert.Domain, strings.TrimPrefix(mainDomainSuffix, ".")) {
if time.Unix(cert.ValidTill, 0).Before(threshold) {
err := certDB.Delete(cert.Domain)
if err != nil {
log.Error().Err(err).Msgf("Deleting expired certificate for %q failed", cert.Domain)
} else {
expiredCertCount++
}
}
}
}
log.Debug().Msgf("Removed %d expired certificates from the database", expiredCertCount)
}
// update main cert
res, err := certDB.Get(mainDomainSuffix)
if err != nil {
log.Error().Msgf("Couldn't get cert for domain %q", mainDomainSuffix)
} else if res == nil {
log.Error().Msgf("Couldn't renew certificate for main domain %q expected main domain cert to exist, but it's missing - seems like the database is corrupted", mainDomainSuffix)
} else {
tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
if err != nil {
log.Error().Err(fmt.Errorf("could not parse cert for mainDomainSuffix: %w", err))
} else if tlsCertificates[0].NotAfter.Before(time.Now().Add(30 * 24 * time.Hour)) {
// renew main certificate 30 days before it expires
go (func() {
_, err = acmeClient.obtainCert(log, acmeClient.dnsChallengerLegoClient, []string{"*" + mainDomainSuffix, mainDomainSuffix[1:]}, res, "", true, mainDomainSuffix, certDB)
if err != nil {
log.Error().Err(err).Msg("Couldn't renew certificate for main domain")
}
})()
}
}
select {
case <-ctx.Done():
return
case <-time.After(interval):
}
}
}
// leaf returns the parsed leaf certificate, either from c.Leaf or by parsing
// the corresponding c.Certificate[0].
// After successfully parsing the cert c.Leaf gets set to the parsed cert.
func leaf(c *tls.Certificate) (*x509.Certificate, error) {
if c.Leaf != nil {
return c.Leaf, nil
}
leaf, err := x509.ParseCertificate(c.Certificate[0])
if err != nil {
return nil, fmt.Errorf("tlsCert - failed to parse leaf: %w", err)
}
c.Leaf = leaf
return leaf, err
}

View file

@ -0,0 +1,87 @@
package certificates
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"math/big"
"time"
"github.com/go-acme/lego/v4/certcrypto"
"github.com/go-acme/lego/v4/certificate"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/server/database"
)
func mockCert(domain, msg, mainDomainSuffix string, keyDatabase database.CertDB) (*tls.Certificate, error) {
key, err := certcrypto.GeneratePrivateKey(certcrypto.RSA2048)
if err != nil {
return nil, err
}
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: domain,
Organization: []string{"Codeberg Pages Error Certificate (couldn't obtain ACME certificate)"},
OrganizationalUnit: []string{
"Will not try again for 6 hours to avoid hitting rate limits for your domain.",
"Check https://docs.codeberg.org/codeberg-pages/troubleshooting/ for troubleshooting tips, and feel " +
"free to create an issue at https://codeberg.org/Codeberg/pages-server if you can't solve it.\n",
"Error message: " + msg,
},
},
// certificates younger than 7 days are renewed, so this enforces the cert to not be renewed for a 6 hours
NotAfter: time.Now().Add(time.Hour*24*7 + time.Hour*6),
NotBefore: time.Now(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
certBytes, err := x509.CreateCertificate(
rand.Reader,
&template,
&template,
&key.(*rsa.PrivateKey).PublicKey,
key,
)
if err != nil {
return nil, err
}
out := &bytes.Buffer{}
err = pem.Encode(out, &pem.Block{
Bytes: certBytes,
Type: "CERTIFICATE",
})
if err != nil {
return nil, err
}
outBytes := out.Bytes()
res := &certificate.Resource{
PrivateKey: certcrypto.PEMEncode(key),
Certificate: outBytes,
IssuerCertificate: outBytes,
Domain: domain,
}
databaseName := domain
if domain == "*"+mainDomainSuffix || domain == mainDomainSuffix[1:] {
databaseName = mainDomainSuffix
}
if err := keyDatabase.Put(databaseName, res); err != nil {
log.Error().Err(err)
}
tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
if err != nil {
return nil, err
}
return &tlsCertificate, nil
}

View file

@ -0,0 +1,21 @@
package certificates
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"codeberg.org/codeberg/pages/server/database"
)
func TestMockCert(t *testing.T) {
db := database.NewMockCertDB(t)
db.Mock.On("Put", mock.Anything, mock.Anything).Return(nil)
cert, err := mockCert("example.com", "some error msg", "codeberg.page", db)
assert.NoError(t, err)
if assert.NotEmpty(t, cert) {
assert.NotEmpty(t, cert.Certificate)
}
}

72
server/context/context.go Normal file
View file

@ -0,0 +1,72 @@
package context
import (
stdContext "context"
"net/http"
"codeberg.org/codeberg/pages/server/utils"
"github.com/hashicorp/go-uuid"
"github.com/rs/zerolog/log"
)
type Context struct {
RespWriter http.ResponseWriter
Req *http.Request
StatusCode int
ReqId string
}
func New(w http.ResponseWriter, r *http.Request) *Context {
req_uuid, err := uuid.GenerateUUID()
if err != nil {
log.Error().Err(err).Msg("Failed to generate request id, assigning error value")
req_uuid = "ERROR"
}
return &Context{
RespWriter: w,
Req: r,
StatusCode: http.StatusOK,
ReqId: req_uuid,
}
}
func (c *Context) Context() stdContext.Context {
if c.Req != nil {
return c.Req.Context()
}
return stdContext.Background()
}
func (c *Context) Response() *http.Response {
if c.Req != nil && c.Req.Response != nil {
return c.Req.Response
}
return nil
}
func (c *Context) String(raw string, status ...int) {
code := http.StatusOK
if len(status) != 0 {
code = status[0]
}
c.RespWriter.WriteHeader(code)
_, _ = c.RespWriter.Write([]byte(raw))
}
func (c *Context) Redirect(uri string, statusCode int) {
http.Redirect(c.RespWriter, c.Req, uri, statusCode)
}
// Path returns the cleaned requested path.
func (c *Context) Path() string {
return utils.CleanPath(c.Req.URL.Path)
}
func (c *Context) Host() string {
return c.Req.URL.Host
}
func (c *Context) TrimHostPort() string {
return utils.TrimHostPort(c.Req.Host)
}

View file

@ -0,0 +1,78 @@
package database
import (
"fmt"
"github.com/go-acme/lego/v4/certcrypto"
"github.com/go-acme/lego/v4/certificate"
"github.com/rs/zerolog/log"
)
//go:generate go install github.com/vektra/mockery/v2@latest
//go:generate mockery --name CertDB --output . --filename mock.go --inpackage --case underscore
type CertDB interface {
Close() error
Put(name string, cert *certificate.Resource) error
Get(name string) (*certificate.Resource, error)
Delete(key string) error
Items(page, pageSize int) ([]*Cert, error)
}
type Cert struct {
Domain string `xorm:"pk NOT NULL UNIQUE 'domain'"`
Created int64 `xorm:"created NOT NULL DEFAULT 0 'created'"`
Updated int64 `xorm:"updated NOT NULL DEFAULT 0 'updated'"`
ValidTill int64 `xorm:" NOT NULL DEFAULT 0 'valid_till'"`
// certificate.Resource
CertURL string `xorm:"'cert_url'"`
CertStableURL string `xorm:"'cert_stable_url'"`
PrivateKey []byte `xorm:"'private_key'"`
Certificate []byte `xorm:"'certificate'"`
IssuerCertificate []byte `xorm:"'issuer_certificate'"`
}
func (c Cert) Raw() *certificate.Resource {
return &certificate.Resource{
Domain: c.Domain,
CertURL: c.CertURL,
CertStableURL: c.CertStableURL,
PrivateKey: c.PrivateKey,
Certificate: c.Certificate,
IssuerCertificate: c.IssuerCertificate,
}
}
func toCert(name string, c *certificate.Resource) (*Cert, error) {
tlsCertificates, err := certcrypto.ParsePEMBundle(c.Certificate)
if err != nil {
return nil, err
}
if len(tlsCertificates) == 0 || tlsCertificates[0] == nil {
err := fmt.Errorf("parsed cert resource has no cert")
log.Error().Err(err).Str("domain", c.Domain).Msgf("cert: %v", c)
return nil, err
}
validTill := tlsCertificates[0].NotAfter.Unix()
// handle wildcard certs
if name[:1] == "." {
name = "*" + name
}
if name != c.Domain {
err := fmt.Errorf("domain key '%s' and cert domain '%s' not equal", name, c.Domain)
log.Error().Err(err).Msg("toCert conversion did discover mismatch")
// TODO: fail hard: return nil, err
}
return &Cert{
Domain: c.Domain,
ValidTill: validTill,
CertURL: c.CertURL,
CertStableURL: c.CertStableURL,
PrivateKey: c.PrivateKey,
Certificate: c.Certificate,
IssuerCertificate: c.IssuerCertificate,
}, nil
}

122
server/database/mock.go Normal file
View file

@ -0,0 +1,122 @@
// Code generated by mockery v2.20.0. DO NOT EDIT.
package database
import (
certificate "github.com/go-acme/lego/v4/certificate"
mock "github.com/stretchr/testify/mock"
)
// MockCertDB is an autogenerated mock type for the CertDB type
type MockCertDB struct {
mock.Mock
}
// Close provides a mock function with given fields:
func (_m *MockCertDB) Close() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// Delete provides a mock function with given fields: key
func (_m *MockCertDB) Delete(key string) error {
ret := _m.Called(key)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(key)
} else {
r0 = ret.Error(0)
}
return r0
}
// Get provides a mock function with given fields: name
func (_m *MockCertDB) Get(name string) (*certificate.Resource, error) {
ret := _m.Called(name)
var r0 *certificate.Resource
var r1 error
if rf, ok := ret.Get(0).(func(string) (*certificate.Resource, error)); ok {
return rf(name)
}
if rf, ok := ret.Get(0).(func(string) *certificate.Resource); ok {
r0 = rf(name)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*certificate.Resource)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(name)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Items provides a mock function with given fields: page, pageSize
func (_m *MockCertDB) Items(page int, pageSize int) ([]*Cert, error) {
ret := _m.Called(page, pageSize)
var r0 []*Cert
var r1 error
if rf, ok := ret.Get(0).(func(int, int) ([]*Cert, error)); ok {
return rf(page, pageSize)
}
if rf, ok := ret.Get(0).(func(int, int) []*Cert); ok {
r0 = rf(page, pageSize)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*Cert)
}
}
if rf, ok := ret.Get(1).(func(int, int) error); ok {
r1 = rf(page, pageSize)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Put provides a mock function with given fields: name, cert
func (_m *MockCertDB) Put(name string, cert *certificate.Resource) error {
ret := _m.Called(name, cert)
var r0 error
if rf, ok := ret.Get(0).(func(string, *certificate.Resource) error); ok {
r0 = rf(name, cert)
} else {
r0 = ret.Error(0)
}
return r0
}
type mockConstructorTestingTNewMockCertDB interface {
mock.TestingT
Cleanup(func())
}
// NewMockCertDB creates a new instance of MockCertDB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockCertDB(t mockConstructorTestingTNewMockCertDB) *MockCertDB {
mock := &MockCertDB{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

138
server/database/xorm.go Normal file
View file

@ -0,0 +1,138 @@
package database
import (
"errors"
"fmt"
"github.com/rs/zerolog/log"
"github.com/go-acme/lego/v4/certificate"
"xorm.io/xorm"
// register sql driver
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
)
var _ CertDB = xDB{}
var ErrNotFound = errors.New("entry not found")
type xDB struct {
engine *xorm.Engine
}
func NewXormDB(dbType, dbConn string) (CertDB, error) {
if !supportedDriver(dbType) {
return nil, fmt.Errorf("not supported db type '%s'", dbType)
}
if dbConn == "" {
return nil, fmt.Errorf("no db connection provided")
}
e, err := xorm.NewEngine(dbType, dbConn)
if err != nil {
return nil, err
}
if err := e.Sync2(new(Cert)); err != nil {
return nil, fmt.Errorf("could not sync db model :%w", err)
}
return &xDB{
engine: e,
}, nil
}
func (x xDB) Close() error {
return x.engine.Close()
}
func (x xDB) Put(domain string, cert *certificate.Resource) error {
log.Trace().Str("domain", cert.Domain).Msg("inserting cert to db")
c, err := toCert(domain, cert)
if err != nil {
return err
}
sess := x.engine.NewSession()
if err := sess.Begin(); err != nil {
return err
}
defer sess.Close()
if exist, _ := sess.ID(c.Domain).Exist(new(Cert)); exist {
if _, err := sess.ID(c.Domain).Update(c); err != nil {
return err
}
} else {
if _, err = sess.Insert(c); err != nil {
return err
}
}
return sess.Commit()
}
func (x xDB) Get(domain string) (*certificate.Resource, error) {
// handle wildcard certs
if domain[:1] == "." {
domain = "*" + domain
}
cert := new(Cert)
log.Trace().Str("domain", domain).Msg("get cert from db")
if found, err := x.engine.ID(domain).Get(cert); err != nil {
return nil, err
} else if !found {
return nil, fmt.Errorf("%w: name='%s'", ErrNotFound, domain)
}
return cert.Raw(), nil
}
func (x xDB) Delete(domain string) error {
// handle wildcard certs
if domain[:1] == "." {
domain = "*" + domain
}
log.Trace().Str("domain", domain).Msg("delete cert from db")
_, err := x.engine.ID(domain).Delete(new(Cert))
return err
}
// Items return al certs from db, if pageSize is 0 it does not use limit
func (x xDB) Items(page, pageSize int) ([]*Cert, error) {
// paginated return
if pageSize > 0 {
certs := make([]*Cert, 0, pageSize)
if page >= 0 {
page = 1
}
err := x.engine.Limit(pageSize, (page-1)*pageSize).Find(&certs)
return certs, err
}
// return all
certs := make([]*Cert, 0, 64)
err := x.engine.Find(&certs)
return certs, err
}
// Supported database drivers
const (
DriverSqlite = "sqlite3"
DriverMysql = "mysql"
DriverPostgres = "postgres"
)
func supportedDriver(driver string) bool {
switch driver {
case DriverMysql, DriverPostgres, DriverSqlite:
return true
default:
return false
}
}

View file

@ -0,0 +1,92 @@
package database
import (
"errors"
"testing"
"github.com/go-acme/lego/v4/certificate"
"github.com/stretchr/testify/assert"
"xorm.io/xorm"
)
func newTestDB(t *testing.T) *xDB {
e, err := xorm.NewEngine("sqlite3", ":memory:")
assert.NoError(t, err)
assert.NoError(t, e.Sync2(new(Cert)))
return &xDB{engine: e}
}
func TestSanitizeWildcardCerts(t *testing.T) {
certDB := newTestDB(t)
_, err := certDB.Get(".not.found")
assert.True(t, errors.Is(err, ErrNotFound))
// TODO: cert key and domain mismatch are don not fail hard jet
// https://codeberg.org/Codeberg/pages-server/src/commit/d8595cee882e53d7f44f1ddc4ef8a1f7b8f31d8d/server/database/interface.go#L64
//
// assert.Error(t, certDB.Put(".wildcard.de", &certificate.Resource{
// Domain: "*.localhost.mock.directory",
// Certificate: localhost_mock_directory_certificate,
// }))
// insert new wildcard cert
assert.NoError(t, certDB.Put(".wildcard.de", &certificate.Resource{
Domain: "*.wildcard.de",
Certificate: localhost_mock_directory_certificate,
}))
// update existing cert
assert.NoError(t, certDB.Put(".wildcard.de", &certificate.Resource{
Domain: "*.wildcard.de",
Certificate: localhost_mock_directory_certificate,
}))
c1, err := certDB.Get(".wildcard.de")
assert.NoError(t, err)
c2, err := certDB.Get("*.wildcard.de")
assert.NoError(t, err)
assert.EqualValues(t, c1, c2)
}
var localhost_mock_directory_certificate = []byte(`-----BEGIN CERTIFICATE-----
MIIDczCCAlugAwIBAgIIJyBaXHmLk6gwDQYJKoZIhvcNAQELBQAwKDEmMCQGA1UE
AxMdUGViYmxlIEludGVybWVkaWF0ZSBDQSA0OWE0ZmIwHhcNMjMwMjEwMDEwOTA2
WhcNMjgwMjEwMDEwOTA2WjAjMSEwHwYDVQQDExhsb2NhbGhvc3QubW9jay5kaXJl
Y3RvcnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIU/CjzS7t62Gj
neEMqvP7sn99ULT7AEUzEfWL05fWG2z714qcUg1hXkZLgdVDgmsCpplyddip7+2t
ZH/9rLPLMqJphzvOL4CF6jDLbeifETtKyjnt9vUZFnnNWcP3tu8lo8iYSl08qsUI
Pp/hiEriAQzCDjTbR5m9xUPNPYqxzcS4ALzmmCX9Qfc4CuuhMkdv2G4TT7rylWrA
SCSRPnGjeA7pCByfNrO/uXbxmzl3sMO3k5sqgMkx1QIHEN412V8+vtx88mt2sM6k
xjzGZWWKXlRq+oufIKX9KPplhsCjMH6E3VNAzgOPYDqXagtUcGmLWghURltO8Mt2
zwM6OgjjAgMBAAGjgaUwgaIwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsG
AQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBSMQvlJ1755
sarf8i1KNqj7s5o/aDAfBgNVHSMEGDAWgBTcZcxJMhWdP7MecHCCpNkFURC/YzAj
BgNVHREEHDAaghhsb2NhbGhvc3QubW9jay5kaXJlY3RvcnkwDQYJKoZIhvcNAQEL
BQADggEBACcd7TT28OWwzQN2PcH0aG38JX5Wp2iOS/unDCfWjNAztXHW7nBDMxza
VtyebkJfccexpuVuOsjOX+bww0vtEYIvKX3/GbkhogksBrNkE0sJZtMnZWMR33wa
YxAy/kJBTmLi02r8fX9ZhwjldStHKBav4USuP7DXZjrgX7LFQhR4LIDrPaYqQRZ8
ltC3mM9LDQ9rQyIFP5cSBMO3RUAm4I8JyLoOdb/9G2uxjHr7r6eG1g8DmLYSKBsQ
mWGQDOYgR3cGltDe2yMxM++yHY+b1uhxGOWMrDA1+1k7yI19LL8Ifi2FMovDfu/X
JxYk1NNNtdctwaYJFenmGQvDaIq1KgE=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDUDCCAjigAwIBAgIIKBJ7IIA6W1swDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
AxMVUGViYmxlIFJvb3QgQ0EgNTdmZjE2MCAXDTIzMDIwOTA1MzMxMloYDzIwNTMw
MjA5MDUzMzEyWjAoMSYwJAYDVQQDEx1QZWJibGUgSW50ZXJtZWRpYXRlIENBIDQ5
YTRmYjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOvlqRx8SXQFWo2
gFCiXxls53eENcyr8+meFyjgnS853eEvplaPxoa2MREKd+ZYxM8EMMfj2XGvR3UI
aqR5QyLQ9ihuRqvQo4fG91usBHgH+vDbGPdMX8gDmm9HgnmtOVhSKJU+M2jfE1SW
UuWB9xOa3LMreTXbTNfZEMoXf+GcWZMbx5WPgEga3DvfmV+RsfNvB55eD7YAyZgF
ZnQ3Dskmnxxlkz0EGgd7rqhFHHNB9jARlL22gITADwoWZidlr3ciM9DISymRKQ0c
mRN15fQjNWdtuREgJlpXecbYQMGhdTOmFrqdHkveD1o63rGSC4z+s/APV6xIbcRp
aNpO7L8CAwEAAaOBgzCBgDAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYB
BQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNxlzEky
FZ0/sx5wcIKk2QVREL9jMB8GA1UdIwQYMBaAFOqfkm9rebIz4z0SDIKW5edLg5JM
MA0GCSqGSIb3DQEBCwUAA4IBAQBRG9AHEnyj2fKzVDDbQaKHjAF5jh0gwyHoIeRK
FkP9mQNSWxhvPWI0tK/E49LopzmVuzSbDd5kZsaii73rAs6f6Rf9W5veo3AFSEad
stM+Zv0f2vWB38nuvkoCRLXMX+QUeuL65rKxdEpyArBju4L3/PqAZRgMLcrH+ak8
nvw5RdAq+Km/ZWyJgGikK6cfMmh91YALCDFnoWUWrCjkBaBFKrG59ONV9f0IQX07
aNfFXFCF5l466xw9dHjw5iaFib10cpY3iq4kyPYIMs6uaewkCtxWKKjiozM4g4w3
HqwyUyZ52WUJOJ/6G9DJLDtN3fgGR+IAp8BhYd5CqOscnt3h
-----END CERTIFICATE-----`)

66
server/dns/dns.go Normal file
View file

@ -0,0 +1,66 @@
package dns
import (
"net"
"strings"
"time"
"github.com/hashicorp/golang-lru/v2/expirable"
)
const (
lookupCacheValidity = 30 * time.Second
defaultPagesRepo = "pages"
)
// TODO(#316): refactor to not use global variables
var lookupCache *expirable.LRU[string, string] = expirable.NewLRU[string, string](4096, nil, lookupCacheValidity)
// GetTargetFromDNS searches for CNAME or TXT entries on the request domain ending with MainDomainSuffix.
// If everything is fine, it returns the target data.
func GetTargetFromDNS(domain, mainDomainSuffix, firstDefaultBranch string) (targetOwner, targetRepo, targetBranch string) {
// Get CNAME or TXT
var cname string
var err error
if entry, ok := lookupCache.Get(domain); ok {
cname = entry
} else {
cname, err = net.LookupCNAME(domain)
cname = strings.TrimSuffix(cname, ".")
if err != nil || !strings.HasSuffix(cname, mainDomainSuffix) {
cname = ""
// TODO: check if the A record matches!
names, err := net.LookupTXT(domain)
if err == nil {
for _, name := range names {
name = strings.TrimSuffix(strings.TrimSpace(name), ".")
if strings.HasSuffix(name, mainDomainSuffix) {
cname = name
break
}
}
}
}
_ = lookupCache.Add(domain, cname)
}
if cname == "" {
return
}
cnameParts := strings.Split(strings.TrimSuffix(cname, mainDomainSuffix), ".")
targetOwner = cnameParts[len(cnameParts)-1]
if len(cnameParts) > 1 {
targetRepo = cnameParts[len(cnameParts)-2]
}
if len(cnameParts) > 2 {
targetBranch = cnameParts[len(cnameParts)-3]
}
if targetRepo == "" {
targetRepo = defaultPagesRepo
}
if targetBranch == "" && targetRepo != defaultPagesRepo {
targetBranch = firstDefaultBranch
}
// if targetBranch is still empty, the caller must find the default branch
return
}

154
server/gitea/cache.go Normal file
View file

@ -0,0 +1,154 @@
package gitea
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
)
const (
// defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
defaultBranchCacheTimeout = 15 * time.Minute
// branchExistenceCacheTimeout specifies the timeout for the branch timestamp & existence cache. It should be shorter
// than fileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
// picked up faster, while still allowing the content to be cached longer if nothing changes.
branchExistenceCacheTimeout = 5 * time.Minute
// fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
// on your available memory.
// TODO: move as option into cache interface
fileCacheTimeout = 5 * time.Minute
// ownerExistenceCacheTimeout specifies the timeout for the existence of a repo/org
ownerExistenceCacheTimeout = 5 * time.Minute
// fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
fileCacheSizeLimit = int64(1000 * 1000)
)
type FileResponse struct {
Exists bool `json:"exists"`
IsSymlink bool `json:"isSymlink"`
ETag string `json:"eTag"`
MimeType string `json:"mimeType"` // uncompressed MIME type
RawMime string `json:"rawMime"` // raw MIME type (if compressed, type of compression)
Body []byte `json:"-"` // saved separately
}
func (f FileResponse) IsEmpty() bool {
return len(f.Body) == 0
}
func (f FileResponse) createHttpResponse(cacheKey string, decompress bool) (header http.Header, statusCode int) {
header = make(http.Header)
if f.Exists {
statusCode = http.StatusOK
} else {
statusCode = http.StatusNotFound
}
if f.IsSymlink {
header.Set(giteaObjectTypeHeader, objTypeSymlink)
}
header.Set(ETagHeader, f.ETag)
if decompress {
header.Set(ContentTypeHeader, f.MimeType)
} else {
header.Set(ContentTypeHeader, f.RawMime)
}
header.Set(ContentLengthHeader, fmt.Sprintf("%d", len(f.Body)))
header.Set(PagesCacheIndicatorHeader, "true")
log.Trace().Msgf("fileCache for %q used", cacheKey)
return header, statusCode
}
type BranchTimestamp struct {
NotFound bool `json:"notFound"`
Branch string `json:"branch,omitempty"`
Timestamp time.Time `json:"timestamp,omitempty"`
}
type writeCacheReader struct {
originalReader io.ReadCloser
buffer *bytes.Buffer
fileResponse *FileResponse
cacheKey string
cache cache.ICache
hasError bool
doNotCache bool
complete bool
log zerolog.Logger
}
func (t *writeCacheReader) Read(p []byte) (n int, err error) {
t.log.Trace().Msgf("[cache] read %q", t.cacheKey)
n, err = t.originalReader.Read(p)
if err == io.EOF {
t.complete = true
}
if err != nil && err != io.EOF {
t.log.Trace().Err(err).Msgf("[cache] original reader for %q has returned an error", t.cacheKey)
t.hasError = true
} else if n > 0 {
if t.buffer.Len()+n > int(fileCacheSizeLimit) {
t.doNotCache = true
t.buffer.Reset()
} else {
_, _ = t.buffer.Write(p[:n])
}
}
return
}
func (t *writeCacheReader) Close() error {
doWrite := !t.hasError && !t.doNotCache && t.complete
fc := *t.fileResponse
fc.Body = t.buffer.Bytes()
if doWrite {
jsonToCache, err := json.Marshal(fc)
if err != nil {
t.log.Trace().Err(err).Msgf("[cache] marshaling json for %q has returned an error", t.cacheKey+"|Metadata")
}
err = t.cache.Set(t.cacheKey+"|Metadata", jsonToCache, fileCacheTimeout)
if err != nil {
t.log.Trace().Err(err).Msgf("[cache] writer for %q has returned an error", t.cacheKey+"|Metadata")
}
err = t.cache.Set(t.cacheKey+"|Body", fc.Body, fileCacheTimeout)
if err != nil {
t.log.Trace().Err(err).Msgf("[cache] writer for %q has returned an error", t.cacheKey+"|Body")
}
}
t.log.Trace().Msgf("cacheReader for %q saved=%t closed", t.cacheKey, doWrite)
return t.originalReader.Close()
}
func (f FileResponse) CreateCacheReader(ctx *context.Context, r io.ReadCloser, cache cache.ICache, cacheKey string) io.ReadCloser {
log := log.With().Str("ReqId", ctx.ReqId).Logger()
if r == nil || cache == nil || cacheKey == "" {
log.Error().Msg("could not create CacheReader")
return nil
}
return &writeCacheReader{
originalReader: r,
buffer: bytes.NewBuffer(make([]byte, 0)),
fileResponse: &f,
cache: cache,
cacheKey: cacheKey,
log: log,
}
}

386
server/gitea/client.go Normal file
View file

@ -0,0 +1,386 @@
package gitea
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"code.gitea.io/sdk/gitea"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/config"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/version"
)
var ErrorNotFound = errors.New("not found")
const (
// cache key prefixes
branchTimestampCacheKeyPrefix = "branchTime"
defaultBranchCacheKeyPrefix = "defaultBranch"
rawContentCacheKeyPrefix = "rawContent"
ownerExistenceKeyPrefix = "ownerExist"
// pages server
PagesCacheIndicatorHeader = "X-Pages-Cache"
symlinkReadLimit = 10000
// gitea
giteaObjectTypeHeader = "X-Gitea-Object-Type"
objTypeSymlink = "symlink"
// std
ETagHeader = "ETag"
ContentTypeHeader = "Content-Type"
ContentLengthHeader = "Content-Length"
ContentEncodingHeader = "Content-Encoding"
)
type Client struct {
sdkClient *gitea.Client
sdkFileClient *gitea.Client
responseCache cache.ICache
giteaRoot string
followSymlinks bool
supportLFS bool
forbiddenMimeTypes map[string]bool
defaultMimeType string
}
func NewClient(cfg config.ForgeConfig, respCache cache.ICache) (*Client, error) {
// url.Parse returns valid on almost anything...
rootURL, err := url.ParseRequestURI(cfg.Root)
if err != nil {
return nil, fmt.Errorf("invalid forgejo/gitea root url: %w", err)
}
giteaRoot := strings.TrimSuffix(rootURL.String(), "/")
forbiddenMimeTypes := make(map[string]bool, len(cfg.ForbiddenMimeTypes))
for _, mimeType := range cfg.ForbiddenMimeTypes {
forbiddenMimeTypes[mimeType] = true
}
defaultMimeType := cfg.DefaultMimeType
if defaultMimeType == "" {
defaultMimeType = "application/octet-stream"
}
sdkClient, err := gitea.NewClient(
giteaRoot,
gitea.SetHTTPClient(&http.Client{Timeout: 10 * time.Second}),
gitea.SetToken(cfg.Token),
gitea.SetUserAgent("pages-server/"+version.Version),
)
if err != nil {
return nil, err
}
sdkFileClient, err := gitea.NewClient(
giteaRoot,
gitea.SetHTTPClient(&http.Client{Timeout: 1 * time.Hour}),
gitea.SetToken(cfg.Token),
gitea.SetUserAgent("pages-server/"+version.Version),
)
return &Client{
sdkClient: sdkClient,
sdkFileClient: sdkFileClient,
responseCache: respCache,
giteaRoot: giteaRoot,
followSymlinks: cfg.FollowSymlinks,
supportLFS: cfg.LFSEnabled,
forbiddenMimeTypes: forbiddenMimeTypes,
defaultMimeType: defaultMimeType,
}, err
}
func (client *Client) ContentWebLink(targetOwner, targetRepo, branch, resource string) string {
return path.Join(client.giteaRoot, targetOwner, targetRepo, "src/branch", branch, resource)
}
func (client *Client) GiteaRawContent(ctx *context.Context, targetOwner, targetRepo, ref, resource string) ([]byte, error) {
reader, _, _, err := client.ServeRawContent(ctx, targetOwner, targetRepo, ref, resource, false)
if err != nil {
return nil, err
}
defer reader.Close()
return io.ReadAll(reader)
}
func (client *Client) ServeRawContent(ctx *context.Context, targetOwner, targetRepo, ref, resource string, decompress bool) (io.ReadCloser, http.Header, int, error) {
cacheKey := fmt.Sprintf("%s/%s/%s|%s|%s", rawContentCacheKeyPrefix, targetOwner, targetRepo, ref, resource)
log := log.With().Str("ReqId", ctx.ReqId).Str("cache_key", cacheKey).Logger()
log.Trace().Msg("try file in cache")
// handle if cache entry exist
if cacheMetadata, ok := client.responseCache.Get(cacheKey + "|Metadata"); ok {
var cache FileResponse
err := json.Unmarshal(cacheMetadata.([]byte), &cache)
if err != nil {
log.Error().Err(err).Msgf("[cache] failed to unmarshal metadata for: %s", cacheKey)
return nil, nil, http.StatusNotFound, err
}
if !cache.Exists {
return nil, nil, http.StatusNotFound, ErrorNotFound
}
body, ok := client.responseCache.Get(cacheKey + "|Body")
if !ok {
log.Error().Msgf("[cache] failed to get body for: %s", cacheKey)
return nil, nil, http.StatusNotFound, ErrorNotFound
}
cache.Body = body.([]byte)
cachedHeader, cachedStatusCode := cache.createHttpResponse(cacheKey, decompress)
if cache.Exists {
if cache.IsSymlink {
linkDest := string(cache.Body)
log.Debug().Msgf("[cache] follow symlink from %q to %q", resource, linkDest)
return client.ServeRawContent(ctx, targetOwner, targetRepo, ref, linkDest, decompress)
} else {
log.Debug().Msgf("[cache] return %d bytes", len(cache.Body))
return io.NopCloser(bytes.NewReader(cache.Body)), cachedHeader, cachedStatusCode, nil
}
} else {
return nil, nil, http.StatusNotFound, ErrorNotFound
}
}
log.Trace().Msg("file not in cache")
// not in cache, open reader via gitea api
reader, resp, err := client.sdkFileClient.GetFileReader(targetOwner, targetRepo, ref, resource, client.supportLFS)
if resp != nil {
switch resp.StatusCode {
case http.StatusOK:
// first handle symlinks
{
objType := resp.Header.Get(giteaObjectTypeHeader)
log.Trace().Msgf("server raw content object %q", objType)
if client.followSymlinks && objType == objTypeSymlink {
defer reader.Close()
// read limited chars for symlink
linkDestBytes, err := io.ReadAll(io.LimitReader(reader, symlinkReadLimit))
if err != nil {
return nil, nil, http.StatusInternalServerError, err
}
linkDest := strings.TrimSpace(string(linkDestBytes))
// handle relative links
// we first remove the link from the path, and make a relative join (resolve parent paths like "/../" too)
linkDest = path.Join(path.Dir(resource), linkDest)
// we store symlink not content to reduce duplicates in cache
fileResponse := FileResponse{
Exists: true,
IsSymlink: true,
Body: []byte(linkDest),
ETag: resp.Header.Get(ETagHeader),
}
log.Trace().Msgf("file response has %d bytes", len(fileResponse.Body))
jsonToCache, err := json.Marshal(fileResponse)
if err != nil {
log.Error().Err(err).Msgf("[cache] marshaling json metadata for %q has returned an error", cacheKey)
}
if err := client.responseCache.Set(cacheKey+"|Metadata", jsonToCache, fileCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
if err := client.responseCache.Set(cacheKey+"|Body", fileResponse.Body, fileCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
log.Debug().Msgf("follow symlink from %q to %q", resource, linkDest)
return client.ServeRawContent(ctx, targetOwner, targetRepo, ref, linkDest, decompress)
}
}
// now we are sure it's content so set the MIME type
mimeType, rawType := client.getMimeTypeByExtension(resource)
resp.Response.Header.Set(ContentTypeHeader, mimeType)
if decompress {
resp.Response.Header.Set(ContentTypeHeader, mimeType)
} else {
resp.Response.Header.Set(ContentTypeHeader, rawType)
}
// now we write to cache and respond at the same time
fileResp := FileResponse{
Exists: true,
ETag: resp.Header.Get(ETagHeader),
MimeType: mimeType,
RawMime: rawType,
}
return fileResp.CreateCacheReader(ctx, reader, client.responseCache, cacheKey), resp.Response.Header, resp.StatusCode, nil
case http.StatusNotFound:
jsonToCache, err := json.Marshal(FileResponse{ETag: resp.Header.Get(ETagHeader)})
if err != nil {
log.Error().Err(err).Msgf("[cache] marshaling json metadata for %q has returned an error", cacheKey)
}
if err := client.responseCache.Set(cacheKey+"|Metadata", jsonToCache, fileCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
return nil, resp.Response.Header, http.StatusNotFound, ErrorNotFound
default:
return nil, resp.Response.Header, resp.StatusCode, fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
}
}
return nil, nil, http.StatusInternalServerError, err
}
func (client *Client) GiteaGetRepoBranchTimestamp(repoOwner, repoName, branchName string) (*BranchTimestamp, error) {
cacheKey := fmt.Sprintf("%s/%s/%s/%s", branchTimestampCacheKeyPrefix, repoOwner, repoName, branchName)
if stampRaw, ok := client.responseCache.Get(cacheKey); ok {
var stamp BranchTimestamp
err := json.Unmarshal(stampRaw.([]byte), &stamp)
if err != nil {
log.Error().Err(err).Bytes("stamp", stampRaw.([]byte)).Msgf("[cache] failed to unmarshal timestamp for: %s", cacheKey)
return &BranchTimestamp{}, ErrorNotFound
}
if stamp.NotFound {
log.Trace().Msgf("[cache] branch %q does not exist", branchName)
return &BranchTimestamp{}, ErrorNotFound
} else {
log.Trace().Msgf("[cache] use branch %q exist", branchName)
// This comes from the refactoring of the caching library.
// The branch as reported by the API was stored in the cache, and I'm not sure if there are
// situations where it differs from the name in the request, hence this is left here.
return &stamp, nil
}
}
branch, resp, err := client.sdkClient.GetRepoBranch(repoOwner, repoName, branchName)
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
log.Trace().Msgf("[cache] set cache branch %q not found", branchName)
jsonToCache, err := json.Marshal(BranchTimestamp{NotFound: true})
if err != nil {
log.Error().Err(err).Msgf("[cache] marshaling empty timestamp for '%s' has returned an error", cacheKey)
}
if err := client.responseCache.Set(cacheKey, jsonToCache, branchExistenceCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
return &BranchTimestamp{}, ErrorNotFound
}
return &BranchTimestamp{}, err
}
if resp.StatusCode != http.StatusOK {
return &BranchTimestamp{}, fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
}
stamp := &BranchTimestamp{
Branch: branch.Name,
Timestamp: branch.Commit.Timestamp,
}
log.Trace().Msgf("set cache branch [%s] exist", branchName)
jsonToCache, err := json.Marshal(stamp)
if err != nil {
log.Error().Err(err).Msgf("[cache] marshaling timestamp for %q has returned an error", cacheKey)
}
if err := client.responseCache.Set(cacheKey, jsonToCache, branchExistenceCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
return stamp, nil
}
func (client *Client) GiteaGetRepoDefaultBranch(repoOwner, repoName string) (string, error) {
cacheKey := fmt.Sprintf("%s/%s/%s", defaultBranchCacheKeyPrefix, repoOwner, repoName)
if branch, ok := client.responseCache.Get(cacheKey); ok {
return string(branch.([]byte)), nil
}
repo, resp, err := client.sdkClient.GetRepo(repoOwner, repoName)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
}
branch := repo.DefaultBranch
if err := client.responseCache.Set(cacheKey, []byte(branch), defaultBranchCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
return branch, nil
}
func (client *Client) GiteaCheckIfOwnerExists(owner string) (bool, error) {
cacheKey := fmt.Sprintf("%s/%s", ownerExistenceKeyPrefix, owner)
if existRaw, ok := client.responseCache.Get(cacheKey); ok && existRaw != nil {
exist, err := strconv.ParseBool(existRaw.(string))
return exist, err
}
_, resp, err := client.sdkClient.GetUserInfo(owner)
if resp.StatusCode == http.StatusOK && err == nil {
if err := client.responseCache.Set(cacheKey, []byte("true"), ownerExistenceCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
return true, nil
} else if resp.StatusCode != http.StatusNotFound {
return false, err
}
_, resp, err = client.sdkClient.GetOrg(owner)
if resp.StatusCode == http.StatusOK && err == nil {
if err := client.responseCache.Set(cacheKey, []byte("true"), ownerExistenceCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
return true, nil
} else if resp.StatusCode != http.StatusNotFound {
return false, err
}
if err := client.responseCache.Set(cacheKey, []byte("false"), ownerExistenceCacheTimeout); err != nil {
log.Error().Err(err).Msg("[cache] error on cache write")
}
return false, nil
}
func (client *Client) extToMime(ext string) string {
mimeType := mime.TypeByExtension(path.Ext(ext))
mimeTypeSplit := strings.SplitN(mimeType, ";", 2)
if client.forbiddenMimeTypes[mimeTypeSplit[0]] || mimeType == "" {
mimeType = client.defaultMimeType
}
log.Trace().Msgf("probe mime of extension '%q' is '%q'", ext, mimeType)
return mimeType
}
func (client *Client) getMimeTypeByExtension(resource string) (mimeType, rawType string) {
rawExt := path.Ext(resource)
innerExt := rawExt
switch rawExt {
case ".gz", ".br", ".zst":
innerExt = path.Ext(resource[:len(resource)-len(rawExt)])
}
rawType = client.extToMime(rawExt)
mimeType = rawType
if innerExt != rawExt {
mimeType = client.extToMime(innerExt)
}
log.Trace().Msgf("probe mime of %q is (%q / raw %q)", resource, mimeType, rawType)
return mimeType, rawType
}

114
server/handler/handler.go Normal file
View file

@ -0,0 +1,114 @@
package handler
import (
"net/http"
"strings"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/config"
"codeberg.org/codeberg/pages/html"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
)
const (
headerAccessControlAllowOrigin = "Access-Control-Allow-Origin"
headerAccessControlAllowMethods = "Access-Control-Allow-Methods"
defaultPagesRepo = "pages"
)
// Handler handles a single HTTP request to the web server.
func Handler(
cfg config.ServerConfig,
giteaClient *gitea.Client,
canonicalDomainCache, redirectsCache cache.ICache,
) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
ctx := context.New(w, req)
log := log.With().Str("ReqId", ctx.ReqId).Strs("Handler", []string{req.Host, req.RequestURI}).Logger()
log.Debug().Msg("\n----------------------------------------------------------")
ctx.RespWriter.Header().Set("Server", "pages-server")
// Force new default from specification (since November 2020) - see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#strict-origin-when-cross-origin
ctx.RespWriter.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin")
// Enable browser caching for up to 10 minutes
ctx.RespWriter.Header().Set("Cache-Control", "public, max-age=600")
trimmedHost := ctx.TrimHostPort()
// Add HSTS for RawDomain and MainDomain
if hsts := getHSTSHeader(trimmedHost, cfg.MainDomain, cfg.RawDomain); hsts != "" {
ctx.RespWriter.Header().Set("Strict-Transport-Security", hsts)
}
// Handle all http methods
ctx.RespWriter.Header().Set("Allow", http.MethodGet+", "+http.MethodHead+", "+http.MethodOptions)
switch ctx.Req.Method {
case http.MethodOptions:
// return Allow header
ctx.RespWriter.WriteHeader(http.StatusNoContent)
return
case http.MethodGet,
http.MethodHead:
// end switch case and handle allowed requests
break
default:
// Block all methods not required for static pages
ctx.String("Method not allowed", http.StatusMethodNotAllowed)
return
}
// Block blacklisted paths (like ACME challenges)
for _, blacklistedPath := range cfg.BlacklistedPaths {
if strings.HasPrefix(ctx.Path(), blacklistedPath) {
html.ReturnErrorPage(ctx, "requested path is blacklisted", http.StatusForbidden)
return
}
}
// Allow CORS for specified domains
allowCors := false
for _, allowedCorsDomain := range cfg.AllowedCorsDomains {
if strings.EqualFold(trimmedHost, allowedCorsDomain) {
allowCors = true
break
}
}
if allowCors {
ctx.RespWriter.Header().Set(headerAccessControlAllowOrigin, "*")
ctx.RespWriter.Header().Set(headerAccessControlAllowMethods, http.MethodGet+", "+http.MethodHead)
}
// Prepare request information to Gitea
pathElements := strings.Split(strings.Trim(ctx.Path(), "/"), "/")
if cfg.RawDomain != "" && strings.EqualFold(trimmedHost, cfg.RawDomain) {
log.Debug().Msg("raw domain request detected")
handleRaw(log, ctx, giteaClient,
cfg.MainDomain,
trimmedHost,
pathElements,
canonicalDomainCache, redirectsCache)
} else if strings.HasSuffix(trimmedHost, cfg.MainDomain) {
log.Debug().Msg("subdomain request detected")
handleSubDomain(log, ctx, giteaClient,
cfg.MainDomain,
cfg.PagesBranches,
trimmedHost,
pathElements,
canonicalDomainCache, redirectsCache)
} else {
log.Debug().Msg("custom domain request detected")
handleCustomDomain(log, ctx, giteaClient,
cfg.MainDomain,
trimmedHost,
pathElements,
cfg.PagesBranches[0],
canonicalDomainCache, redirectsCache)
}
}
}

View file

@ -0,0 +1,72 @@
package handler
import (
"net/http"
"path"
"strings"
"codeberg.org/codeberg/pages/html"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/dns"
"codeberg.org/codeberg/pages/server/gitea"
"codeberg.org/codeberg/pages/server/upstream"
"github.com/rs/zerolog"
)
func handleCustomDomain(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
mainDomainSuffix string,
trimmedHost string,
pathElements []string,
firstDefaultBranch string,
canonicalDomainCache, redirectsCache cache.ICache,
) {
// Serve pages from custom domains
targetOwner, targetRepo, targetBranch := dns.GetTargetFromDNS(trimmedHost, mainDomainSuffix, firstDefaultBranch)
if targetOwner == "" {
html.ReturnErrorPage(ctx,
"could not obtain repo owner from custom domain",
http.StatusFailedDependency)
return
}
pathParts := pathElements
canonicalLink := false
if strings.HasPrefix(pathElements[0], "@") {
targetBranch = pathElements[0][1:]
pathParts = pathElements[1:]
canonicalLink = true
}
// Try to use the given repo on the given branch or the default branch
log.Debug().Msg("custom domain preparations, now trying with details from DNS")
if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
TryIndexPages: true,
TargetOwner: targetOwner,
TargetRepo: targetRepo,
TargetBranch: targetBranch,
TargetPath: path.Join(pathParts...),
}, canonicalLink); works {
canonicalDomain, valid := targetOpt.CheckCanonicalDomain(ctx, giteaClient, trimmedHost, mainDomainSuffix, canonicalDomainCache)
if !valid {
html.ReturnErrorPage(ctx, "domain not specified in <code>.domains</code> file", http.StatusMisdirectedRequest)
return
} else if canonicalDomain != trimmedHost {
// only redirect if the target is also a codeberg page!
targetOwner, _, _ = dns.GetTargetFromDNS(strings.SplitN(canonicalDomain, "/", 2)[0], mainDomainSuffix, firstDefaultBranch)
if targetOwner != "" {
ctx.Redirect("https://"+canonicalDomain+"/"+targetOpt.TargetPath, http.StatusTemporaryRedirect)
return
}
html.ReturnErrorPage(ctx, "target is no codeberg page", http.StatusFailedDependency)
return
}
log.Debug().Str("url", trimmedHost).Msg("tryBranch, now trying upstream")
tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
return
}
html.ReturnErrorPage(ctx, "could not find target for custom domain", http.StatusFailedDependency)
}

View file

@ -0,0 +1,71 @@
package handler
import (
"fmt"
"net/http"
"path"
"strings"
"github.com/rs/zerolog"
"codeberg.org/codeberg/pages/html"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
"codeberg.org/codeberg/pages/server/upstream"
)
func handleRaw(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
mainDomainSuffix string,
trimmedHost string,
pathElements []string,
canonicalDomainCache, redirectsCache cache.ICache,
) {
// Serve raw content from RawDomain
log.Debug().Msg("raw domain")
if len(pathElements) < 2 {
html.ReturnErrorPage(
ctx,
"a url in the form of <code>https://{domain}/{owner}/{repo}[/@{branch}]/{path}</code> is required",
http.StatusBadRequest,
)
return
}
// raw.codeberg.org/example/myrepo/@main/index.html
if len(pathElements) > 2 && strings.HasPrefix(pathElements[2], "@") {
log.Debug().Msg("raw domain preparations, now trying with specified branch")
if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
ServeRaw: true,
TargetOwner: pathElements[0],
TargetRepo: pathElements[1],
TargetBranch: pathElements[2][1:],
TargetPath: path.Join(pathElements[3:]...),
}, true); works {
log.Trace().Msg("tryUpstream: serve raw domain with specified branch")
tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
return
}
log.Debug().Msg("missing branch info")
html.ReturnErrorPage(ctx, "missing branch info", http.StatusFailedDependency)
return
}
log.Debug().Msg("raw domain preparations, now trying with default branch")
if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
TryIndexPages: false,
ServeRaw: true,
TargetOwner: pathElements[0],
TargetRepo: pathElements[1],
TargetPath: path.Join(pathElements[2:]...),
}, true); works {
log.Trace().Msg("tryUpstream: serve raw domain with default branch")
tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
} else {
html.ReturnErrorPage(ctx,
fmt.Sprintf("raw domain could not find repo <code>%s/%s</code> or repo is empty", targetOpt.TargetOwner, targetOpt.TargetRepo),
http.StatusNotFound)
}
}

View file

@ -0,0 +1,156 @@
package handler
import (
"fmt"
"net/http"
"path"
"strings"
"github.com/rs/zerolog"
"golang.org/x/exp/slices"
"codeberg.org/codeberg/pages/html"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
"codeberg.org/codeberg/pages/server/upstream"
)
func handleSubDomain(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
mainDomainSuffix string,
defaultPagesBranches []string,
trimmedHost string,
pathElements []string,
canonicalDomainCache, redirectsCache cache.ICache,
) {
// Serve pages from subdomains of MainDomainSuffix
log.Debug().Msg("main domain suffix")
targetOwner := strings.TrimSuffix(trimmedHost, mainDomainSuffix)
targetRepo := pathElements[0]
if targetOwner == "www" {
// www.codeberg.page redirects to codeberg.page // TODO: rm hardcoded - use cname?
ctx.Redirect("https://"+mainDomainSuffix[1:]+ctx.Path(), http.StatusPermanentRedirect)
return
}
// Check if the first directory is a repo with the second directory as a branch
// example.codeberg.page/myrepo/@main/index.html
if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") {
if targetRepo == defaultPagesRepo {
// example.codeberg.org/pages/@... redirects to example.codeberg.org/@...
ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), http.StatusTemporaryRedirect)
return
}
log.Debug().Msg("main domain preparations, now trying with specified repo & branch")
if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
TryIndexPages: true,
TargetOwner: targetOwner,
TargetRepo: pathElements[0],
TargetBranch: pathElements[1][1:],
TargetPath: path.Join(pathElements[2:]...),
}, true); works {
log.Trace().Msg("tryUpstream: serve with specified repo and branch")
tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
} else {
html.ReturnErrorPage(
ctx,
formatSetBranchNotFoundMessage(pathElements[1][1:], targetOwner, pathElements[0]),
http.StatusFailedDependency,
)
}
return
}
// Check if the first directory is a branch for the defaultPagesRepo
// example.codeberg.page/@main/index.html
if strings.HasPrefix(pathElements[0], "@") {
targetBranch := pathElements[0][1:]
// if the default pages branch can be determined exactly, it does not need to be set
if len(defaultPagesBranches) == 1 && slices.Contains(defaultPagesBranches, targetBranch) {
// example.codeberg.org/@pages/... redirects to example.codeberg.org/...
ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), http.StatusTemporaryRedirect)
return
}
log.Debug().Msg("main domain preparations, now trying with specified branch")
if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
TryIndexPages: true,
TargetOwner: targetOwner,
TargetRepo: defaultPagesRepo,
TargetBranch: targetBranch,
TargetPath: path.Join(pathElements[1:]...),
}, true); works {
log.Trace().Msg("tryUpstream: serve default pages repo with specified branch")
tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
} else {
html.ReturnErrorPage(
ctx,
formatSetBranchNotFoundMessage(targetBranch, targetOwner, defaultPagesRepo),
http.StatusFailedDependency,
)
}
return
}
for _, defaultPagesBranch := range defaultPagesBranches {
// Check if the first directory is a repo with a default pages branch
// example.codeberg.page/myrepo/index.html
// example.codeberg.page/{PAGES_BRANCHE}/... is not allowed here.
log.Debug().Msg("main domain preparations, now trying with specified repo")
if pathElements[0] != defaultPagesBranch {
if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
TryIndexPages: true,
TargetOwner: targetOwner,
TargetRepo: pathElements[0],
TargetBranch: defaultPagesBranch,
TargetPath: path.Join(pathElements[1:]...),
}, false); works {
log.Debug().Msg("tryBranch, now trying upstream 5")
tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
return
}
}
// Try to use the defaultPagesRepo on an default pages branch
// example.codeberg.page/index.html
log.Debug().Msg("main domain preparations, now trying with default repo")
if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
TryIndexPages: true,
TargetOwner: targetOwner,
TargetRepo: defaultPagesRepo,
TargetBranch: defaultPagesBranch,
TargetPath: path.Join(pathElements...),
}, false); works {
log.Debug().Msg("tryBranch, now trying upstream 6")
tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
return
}
}
// Try to use the defaultPagesRepo on its default branch
// example.codeberg.page/index.html
log.Debug().Msg("main domain preparations, now trying with default repo/branch")
if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
TryIndexPages: true,
TargetOwner: targetOwner,
TargetRepo: defaultPagesRepo,
TargetPath: path.Join(pathElements...),
}, false); works {
log.Debug().Msg("tryBranch, now trying upstream 6")
tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
return
}
// Couldn't find a valid repo/branch
html.ReturnErrorPage(ctx,
fmt.Sprintf("could not find a valid repository or branch for repository: <code>%s</code>", targetRepo),
http.StatusNotFound)
}
func formatSetBranchNotFoundMessage(branch, owner, repo string) string {
return fmt.Sprintf("explicitly set branch <code>%q</code> does not exist at <code>%s/%s</code>", branch, owner, repo)
}

View file

@ -0,0 +1,58 @@
package handler
import (
"net/http"
"net/http/httptest"
"testing"
"time"
"codeberg.org/codeberg/pages/config"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/gitea"
"github.com/rs/zerolog/log"
)
func TestHandlerPerformance(t *testing.T) {
cfg := config.ForgeConfig{
Root: "https://codeberg.org",
Token: "",
LFSEnabled: false,
FollowSymlinks: false,
}
giteaClient, _ := gitea.NewClient(cfg, cache.NewInMemoryCache())
serverCfg := config.ServerConfig{
MainDomain: "codeberg.page",
RawDomain: "raw.codeberg.page",
BlacklistedPaths: []string{
"/.well-known/acme-challenge/",
},
AllowedCorsDomains: []string{"raw.codeberg.org", "fonts.codeberg.org", "design.codeberg.org"},
PagesBranches: []string{"pages"},
}
testHandler := Handler(serverCfg, giteaClient, cache.NewInMemoryCache(), cache.NewInMemoryCache())
testCase := func(uri string, status int) {
t.Run(uri, func(t *testing.T) {
req := httptest.NewRequest("GET", uri, http.NoBody)
w := httptest.NewRecorder()
log.Printf("Start: %v\n", time.Now())
start := time.Now()
testHandler(w, req)
end := time.Now()
log.Printf("Done: %v\n", time.Now())
resp := w.Result()
if resp.StatusCode != status {
t.Errorf("request failed with status code %d", resp.StatusCode)
} else {
t.Logf("request took %d milliseconds", end.Sub(start).Milliseconds())
}
})
}
testCase("https://mondstern.codeberg.page/", 404) // TODO: expect 200
testCase("https://codeberg.page/", 404) // TODO: expect 200
testCase("https://example.momar.xyz/", 424)
}

15
server/handler/hsts.go Normal file
View file

@ -0,0 +1,15 @@
package handler
import (
"strings"
)
// getHSTSHeader returns a HSTS header with includeSubdomains & preload for MainDomainSuffix and RawDomain, or an empty
// string for custom domains.
func getHSTSHeader(host, mainDomainSuffix, rawDomain string) string {
if strings.HasSuffix(host, mainDomainSuffix) || strings.EqualFold(host, rawDomain) {
return "max-age=63072000; includeSubdomains; preload"
} else {
return ""
}
}

84
server/handler/try.go Normal file
View file

@ -0,0 +1,84 @@
package handler
import (
"fmt"
"net/http"
"strings"
"github.com/rs/zerolog"
"codeberg.org/codeberg/pages/html"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
"codeberg.org/codeberg/pages/server/upstream"
)
// tryUpstream forwards the target request to the Gitea API, and shows an error page on failure.
func tryUpstream(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
mainDomainSuffix, trimmedHost string,
options *upstream.Options,
canonicalDomainCache cache.ICache,
redirectsCache cache.ICache,
) {
// check if a canonical domain exists on a request on MainDomain
if strings.HasSuffix(trimmedHost, mainDomainSuffix) && !options.ServeRaw {
canonicalDomain, _ := options.CheckCanonicalDomain(ctx, giteaClient, "", mainDomainSuffix, canonicalDomainCache)
if !strings.HasSuffix(strings.SplitN(canonicalDomain, "/", 2)[0], mainDomainSuffix) {
canonicalPath := ctx.Req.RequestURI
if options.TargetRepo != defaultPagesRepo {
path := strings.SplitN(canonicalPath, "/", 3)
if len(path) >= 3 {
canonicalPath = "/" + path[2]
}
}
redirect_to := "https://" + canonicalDomain + canonicalPath
log.Debug().Str("to", redirect_to).Msg("redirecting")
ctx.Redirect(redirect_to, http.StatusTemporaryRedirect)
return
}
}
// Add host for debugging.
options.Host = trimmedHost
// Try to request the file from the Gitea API
log.Debug().Msg("requesting from upstream")
if !options.Upstream(ctx, giteaClient, redirectsCache) {
html.ReturnErrorPage(ctx, fmt.Sprintf("Forge returned %d %s", ctx.StatusCode, http.StatusText(ctx.StatusCode)), ctx.StatusCode)
}
}
// tryBranch checks if a branch exists and populates the target variables. If canonicalLink is non-empty,
// it will also disallow search indexing and add a Link header to the canonical URL.
func tryBranch(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
targetOptions *upstream.Options, canonicalLink bool,
) (*upstream.Options, bool) {
if targetOptions.TargetOwner == "" || targetOptions.TargetRepo == "" {
log.Debug().Msg("tryBranch: owner or repo is empty")
return nil, false
}
// Replace "~" to "/" so we can access branch that contains slash character
// Branch name cannot contain "~" so doing this is okay
targetOptions.TargetBranch = strings.ReplaceAll(targetOptions.TargetBranch, "~", "/")
// Check if the branch exists, otherwise treat it as a file path
branchExist, _ := targetOptions.GetBranchTimestamp(giteaClient)
if !branchExist {
log.Debug().Msg("tryBranch: branch doesn't exist")
return nil, false
}
if canonicalLink {
// Hide from search machines & add canonical link
ctx.RespWriter.Header().Set("X-Robots-Tag", "noarchive, noindex")
ctx.RespWriter.Header().Set("Link", targetOptions.ContentWebLink(giteaClient)+"; rel=\"canonical\"")
}
log.Debug().Msg("tryBranch: true")
return targetOptions, true
}

21
server/profiling.go Normal file
View file

@ -0,0 +1,21 @@
package server
import (
"net/http"
_ "net/http/pprof"
"github.com/rs/zerolog/log"
)
func StartProfilingServer(listeningAddress string) {
server := &http.Server{
Addr: listeningAddress,
Handler: http.DefaultServeMux,
}
log.Info().Msgf("Starting debug server on %s", listeningAddress)
go func() {
log.Fatal().Err(server.ListenAndServe()).Msg("Failed to start debug server")
}()
}

145
server/startup.go Normal file
View file

@ -0,0 +1,145 @@
package server
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"os"
"strings"
"time"
"github.com/pires/go-proxyproto"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/urfave/cli/v2"
cmd "codeberg.org/codeberg/pages/cli"
"codeberg.org/codeberg/pages/config"
"codeberg.org/codeberg/pages/server/acme"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/certificates"
"codeberg.org/codeberg/pages/server/gitea"
"codeberg.org/codeberg/pages/server/handler"
)
// Serve sets up and starts the web server.
func Serve(ctx *cli.Context) error {
// initialize logger with Trace, overridden later with actual level
log.Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Caller().Logger().Level(zerolog.TraceLevel)
cfg, err := config.ReadConfig(ctx)
if err != nil {
log.Error().Err(err).Msg("could not read config")
}
config.MergeConfig(ctx, cfg)
// Initialize the logger.
logLevel, err := zerolog.ParseLevel(cfg.LogLevel)
if err != nil {
return err
}
fmt.Printf("Setting log level to: %s\n", logLevel)
log.Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Caller().Logger().Level(logLevel)
listeningSSLAddress := fmt.Sprintf("%s:%d", cfg.Server.Host, cfg.Server.Port)
listeningHTTPAddress := fmt.Sprintf("%s:%d", cfg.Server.Host, cfg.Server.HttpPort)
if cfg.Server.RawDomain != "" {
cfg.Server.AllowedCorsDomains = append(cfg.Server.AllowedCorsDomains, cfg.Server.RawDomain)
}
// Make sure MainDomain has a leading dot
if !strings.HasPrefix(cfg.Server.MainDomain, ".") {
// TODO make this better
cfg.Server.MainDomain = "." + cfg.Server.MainDomain
}
if len(cfg.Server.PagesBranches) == 0 {
return fmt.Errorf("no default branches set (PAGES_BRANCHES)")
}
// Init ssl cert database
certDB, closeFn, err := cmd.OpenCertDB(ctx)
if err != nil {
return err
}
defer closeFn()
challengeCache := cache.NewInMemoryCache()
// canonicalDomainCache stores canonical domains
canonicalDomainCache := cache.NewInMemoryCache()
// redirectsCache stores redirects in _redirects files
redirectsCache := cache.NewInMemoryCache()
// clientResponseCache stores responses from the Gitea server
clientResponseCache := cache.NewInMemoryCache()
giteaClient, err := gitea.NewClient(cfg.Forge, clientResponseCache)
if err != nil {
return fmt.Errorf("could not create new gitea client: %v", err)
}
acmeClient, err := acme.CreateAcmeClient(cfg.ACME, cfg.Server.HttpServerEnabled, challengeCache)
if err != nil {
return err
}
if err := certificates.SetupMainDomainCertificates(log.Logger, cfg.Server.MainDomain, acmeClient, certDB); err != nil {
return err
}
// Create listener for SSL connections
log.Info().Msgf("Create TCP listener for SSL on %s", listeningSSLAddress)
listener, err := net.Listen("tcp", listeningSSLAddress)
if err != nil {
return fmt.Errorf("couldn't create listener: %v", err)
}
if cfg.Server.UseProxyProtocol {
listener = &proxyproto.Listener{Listener: listener}
}
// Setup listener for SSL connections
listener = tls.NewListener(listener, certificates.TLSConfig(
cfg.Server.MainDomain,
giteaClient,
acmeClient,
cfg.Server.PagesBranches[0],
challengeCache, canonicalDomainCache,
certDB,
cfg.ACME.NoDNS01,
cfg.Server.RawDomain,
))
interval := 12 * time.Hour
certMaintainCtx, cancelCertMaintain := context.WithCancel(context.Background())
defer cancelCertMaintain()
go certificates.MaintainCertDB(log.Logger, certMaintainCtx, interval, acmeClient, cfg.Server.MainDomain, certDB)
if cfg.Server.HttpServerEnabled {
// Create handler for http->https redirect and http acme challenges
httpHandler := certificates.SetupHTTPACMEChallengeServer(challengeCache, uint(cfg.Server.Port))
// Create listener for http and start listening
go func() {
log.Info().Msgf("Start HTTP server listening on %s", listeningHTTPAddress)
err := http.ListenAndServe(listeningHTTPAddress, httpHandler)
if err != nil {
log.Error().Err(err).Msg("Couldn't start HTTP server")
}
}()
}
if ctx.IsSet("enable-profiling") {
StartProfilingServer(ctx.String("profiling-address"))
}
// Create ssl handler based on settings
sslHandler := handler.Handler(cfg.Server, giteaClient, canonicalDomainCache, redirectsCache)
// Start the ssl listener
log.Info().Msgf("Start SSL server using TCP listener on %s", listener.Addr())
return http.Serve(listener, sslHandler)
}

View file

@ -0,0 +1,71 @@
package upstream
import (
"errors"
"strings"
"time"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
)
// canonicalDomainCacheTimeout specifies the timeout for the canonical domain cache.
var canonicalDomainCacheTimeout = 15 * time.Minute
const canonicalDomainConfig = ".domains"
// CheckCanonicalDomain returns the canonical domain specified in the repo (using the `.domains` file).
func (o *Options) CheckCanonicalDomain(ctx *context.Context, giteaClient *gitea.Client, actualDomain, mainDomainSuffix string, canonicalDomainCache cache.ICache) (domain string, valid bool) {
// Check if this request is cached.
if cachedValue, ok := canonicalDomainCache.Get(o.TargetOwner + "/" + o.TargetRepo + "/" + o.TargetBranch); ok {
domains := cachedValue.([]string)
for _, domain := range domains {
if domain == actualDomain {
valid = true
break
}
}
return domains[0], valid
}
body, err := giteaClient.GiteaRawContent(ctx, o.TargetOwner, o.TargetRepo, o.TargetBranch, canonicalDomainConfig)
if err != nil && !errors.Is(err, gitea.ErrorNotFound) {
log.Error().Err(err).Msgf("could not read %s of %s/%s", canonicalDomainConfig, o.TargetOwner, o.TargetRepo)
}
var domains []string
for _, domain := range strings.Split(string(body), "\n") {
domain = strings.ToLower(domain)
domain = strings.TrimSpace(domain)
domain = strings.TrimPrefix(domain, "http://")
domain = strings.TrimPrefix(domain, "https://")
if domain != "" && !strings.HasPrefix(domain, "#") && !strings.ContainsAny(domain, "\t /") && strings.ContainsRune(domain, '.') {
domains = append(domains, domain)
}
if domain == actualDomain {
valid = true
}
}
// Add [owner].[pages-domain] as valid domain.
domains = append(domains, o.TargetOwner+mainDomainSuffix)
if domains[len(domains)-1] == actualDomain {
valid = true
}
// If the target repository isn't called pages, add `/[repository]` to the
// previous valid domain.
if o.TargetRepo != "" && o.TargetRepo != "pages" {
domains[len(domains)-1] += "/" + o.TargetRepo
}
// Add result to cache.
_ = canonicalDomainCache.Set(o.TargetOwner+"/"+o.TargetRepo+"/"+o.TargetBranch, domains, canonicalDomainCacheTimeout)
// Return the first domain from the list and return if any of the domains
// matched the requested domain.
return domains[0], valid
}

31
server/upstream/header.go Normal file
View file

@ -0,0 +1,31 @@
package upstream
import (
"net/http"
"time"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
)
// setHeader set values to response header
func (o *Options) setHeader(ctx *context.Context, header http.Header) {
if eTag := header.Get(gitea.ETagHeader); eTag != "" {
ctx.RespWriter.Header().Set(gitea.ETagHeader, eTag)
}
if cacheIndicator := header.Get(gitea.PagesCacheIndicatorHeader); cacheIndicator != "" {
ctx.RespWriter.Header().Set(gitea.PagesCacheIndicatorHeader, cacheIndicator)
}
if length := header.Get(gitea.ContentLengthHeader); length != "" {
ctx.RespWriter.Header().Set(gitea.ContentLengthHeader, length)
}
if mime := header.Get(gitea.ContentTypeHeader); mime == "" || o.ServeRaw {
ctx.RespWriter.Header().Set(gitea.ContentTypeHeader, rawMime)
} else {
ctx.RespWriter.Header().Set(gitea.ContentTypeHeader, mime)
}
if encoding := header.Get(gitea.ContentEncodingHeader); encoding != "" && encoding != "identity" {
ctx.RespWriter.Header().Set(gitea.ContentEncodingHeader, encoding)
}
ctx.RespWriter.Header().Set(headerLastModified, o.BranchTimestamp.In(time.UTC).Format(http.TimeFormat))
}

47
server/upstream/helper.go Normal file
View file

@ -0,0 +1,47 @@
package upstream
import (
"errors"
"fmt"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/server/gitea"
)
// GetBranchTimestamp finds the default branch (if branch is "") and save branch and it's last modification time to Options
func (o *Options) GetBranchTimestamp(giteaClient *gitea.Client) (bool, error) {
log := log.With().Strs("BranchInfo", []string{o.TargetOwner, o.TargetRepo, o.TargetBranch}).Logger()
if o.TargetBranch == "" {
// Get default branch
defaultBranch, err := giteaClient.GiteaGetRepoDefaultBranch(o.TargetOwner, o.TargetRepo)
if err != nil {
log.Err(err).Msg("Couldn't fetch default branch from repository")
return false, err
}
log.Debug().Msgf("Successfully fetched default branch %q from Gitea", defaultBranch)
o.TargetBranch = defaultBranch
}
timestamp, err := giteaClient.GiteaGetRepoBranchTimestamp(o.TargetOwner, o.TargetRepo, o.TargetBranch)
if err != nil {
if !errors.Is(err, gitea.ErrorNotFound) {
log.Error().Err(err).Msg("Could not get latest commit timestamp from branch")
}
return false, err
}
if timestamp == nil || timestamp.Branch == "" {
return false, fmt.Errorf("empty response")
}
log.Debug().Msgf("Successfully fetched latest commit timestamp from branch: %#v", timestamp)
o.BranchTimestamp = timestamp.Timestamp
o.TargetBranch = timestamp.Branch
return true, nil
}
func (o *Options) ContentWebLink(giteaClient *gitea.Client) string {
return giteaClient.ContentWebLink(o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath) + "; rel=\"canonical\""
}

View file

@ -0,0 +1,108 @@
package upstream
import (
"strconv"
"strings"
"time"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
"github.com/rs/zerolog/log"
)
type Redirect struct {
From string
To string
StatusCode int
}
// rewriteURL returns the destination URL and true if r matches reqURL.
func (r *Redirect) rewriteURL(reqURL string) (dstURL string, ok bool) {
// check if from url matches request url
if strings.TrimSuffix(r.From, "/") == strings.TrimSuffix(reqURL, "/") {
return r.To, true
}
// handle wildcard redirects
if strings.HasSuffix(r.From, "/*") {
trimmedFromURL := strings.TrimSuffix(r.From, "/*")
if reqURL == trimmedFromURL || strings.HasPrefix(reqURL, trimmedFromURL+"/") {
if strings.Contains(r.To, ":splat") {
matched := strings.TrimPrefix(reqURL, trimmedFromURL)
matched = strings.TrimPrefix(matched, "/")
return strings.ReplaceAll(r.To, ":splat", matched), true
}
return r.To, true
}
}
return "", false
}
// redirectsCacheTimeout specifies the timeout for the redirects cache.
var redirectsCacheTimeout = 10 * time.Minute
const redirectsConfig = "_redirects"
// getRedirects returns redirects specified in the _redirects file.
func (o *Options) getRedirects(ctx *context.Context, giteaClient *gitea.Client, redirectsCache cache.ICache) []Redirect {
var redirects []Redirect
cacheKey := o.TargetOwner + "/" + o.TargetRepo + "/" + o.TargetBranch
// Check for cached redirects
if cachedValue, ok := redirectsCache.Get(cacheKey); ok {
redirects = cachedValue.([]Redirect)
} else {
// Get _redirects file and parse
body, err := giteaClient.GiteaRawContent(ctx, o.TargetOwner, o.TargetRepo, o.TargetBranch, redirectsConfig)
if err == nil {
for _, line := range strings.Split(string(body), "\n") {
redirectArr := strings.Fields(line)
// Ignore comments and invalid lines
if strings.HasPrefix(line, "#") || len(redirectArr) < 2 {
continue
}
// Get redirect status code
statusCode := 301
if len(redirectArr) == 3 {
statusCode, err = strconv.Atoi(redirectArr[2])
if err != nil {
log.Info().Err(err).Msgf("could not read %s of %s/%s", redirectsConfig, o.TargetOwner, o.TargetRepo)
}
}
redirects = append(redirects, Redirect{
From: redirectArr[0],
To: redirectArr[1],
StatusCode: statusCode,
})
}
}
_ = redirectsCache.Set(cacheKey, redirects, redirectsCacheTimeout)
}
return redirects
}
func (o *Options) matchRedirects(ctx *context.Context, giteaClient *gitea.Client, redirects []Redirect, redirectsCache cache.ICache) (final bool) {
reqURL := ctx.Req.RequestURI
// remove repo and branch from request url
reqURL = strings.TrimPrefix(reqURL, "/"+o.TargetRepo)
reqURL = strings.TrimPrefix(reqURL, "/@"+o.TargetBranch)
for _, redirect := range redirects {
if dstURL, ok := redirect.rewriteURL(reqURL); ok {
if o.TargetPath == dstURL { // recursion base case, rewrite directly when paths are the same
return true
} else if redirect.StatusCode == 200 { // do rewrite if status code is 200
o.TargetPath = dstURL
o.Upstream(ctx, giteaClient, redirectsCache)
} else {
ctx.Redirect(dstURL, redirect.StatusCode)
}
return true
}
}
return false
}

View file

@ -0,0 +1,36 @@
package upstream
import (
"testing"
)
func TestRedirect_rewriteURL(t *testing.T) {
for _, tc := range []struct {
redirect Redirect
reqURL string
wantDstURL string
wantOk bool
}{
{Redirect{"/", "/dst", 200}, "/", "/dst", true},
{Redirect{"/", "/dst", 200}, "/foo", "", false},
{Redirect{"/src", "/dst", 200}, "/src", "/dst", true},
{Redirect{"/src", "/dst", 200}, "/foo", "", false},
{Redirect{"/src", "/dst", 200}, "/src/foo", "", false},
{Redirect{"/*", "/dst", 200}, "/", "/dst", true},
{Redirect{"/*", "/dst", 200}, "/src", "/dst", true},
{Redirect{"/src/*", "/dst/:splat", 200}, "/src", "/dst/", true},
{Redirect{"/src/*", "/dst/:splat", 200}, "/src/", "/dst/", true},
{Redirect{"/src/*", "/dst/:splat", 200}, "/src/foo", "/dst/foo", true},
{Redirect{"/src/*", "/dst/:splat", 200}, "/src/foo/bar", "/dst/foo/bar", true},
{Redirect{"/src/*", "/dst/:splatsuffix", 200}, "/src/foo", "/dst/foosuffix", true},
{Redirect{"/src/*", "/dst:splat", 200}, "/src/foo", "/dstfoo", true},
{Redirect{"/src/*", "/dst", 200}, "/srcfoo", "", false},
// This is the example from FEATURES.md:
{Redirect{"/articles/*", "/posts/:splat", 302}, "/articles/2022/10/12/post-1/", "/posts/2022/10/12/post-1/", true},
} {
if dstURL, ok := tc.redirect.rewriteURL(tc.reqURL); dstURL != tc.wantDstURL || ok != tc.wantOk {
t.Errorf("%#v.rewriteURL(%q) = %q, %v; want %q, %v",
tc.redirect, tc.reqURL, dstURL, ok, tc.wantDstURL, tc.wantOk)
}
}
}

318
server/upstream/upstream.go Normal file
View file

@ -0,0 +1,318 @@
package upstream
import (
"cmp"
"errors"
"fmt"
"io"
"net/http"
"slices"
"strconv"
"strings"
"time"
"github.com/rs/zerolog/log"
"codeberg.org/codeberg/pages/html"
"codeberg.org/codeberg/pages/server/cache"
"codeberg.org/codeberg/pages/server/context"
"codeberg.org/codeberg/pages/server/gitea"
)
const (
headerLastModified = "Last-Modified"
headerIfModifiedSince = "If-Modified-Since"
headerAcceptEncoding = "Accept-Encoding"
headerContentEncoding = "Content-Encoding"
rawMime = "text/plain; charset=utf-8"
)
// upstreamIndexPages lists pages that may be considered as index pages for directories.
var upstreamIndexPages = []string{
"index.html",
}
// upstreamNotFoundPages lists pages that may be considered as custom 404 Not Found pages.
var upstreamNotFoundPages = []string{
"404.html",
}
// Options provides various options for the upstream request.
type Options struct {
TargetOwner string
TargetRepo string
TargetBranch string
TargetPath string
// Used for debugging purposes.
Host string
TryIndexPages bool
BranchTimestamp time.Time
// internal
appendTrailingSlash bool
redirectIfExists string
ServeRaw bool
}
// allowed encodings
var allowedEncodings = map[string]string{
"gzip": ".gz",
"br": ".br",
"zstd": ".zst",
"identity": "",
}
// parses Accept-Encoding header into a list of acceptable encodings
func AcceptEncodings(header string) []string {
log.Trace().Msgf("got accept-encoding: %s", header)
encodings := []string{}
globQuality := 0.0
qualities := make(map[string]float64)
for _, encoding := range strings.Split(header, ",") {
name, quality_str, has_quality := strings.Cut(encoding, ";q=")
quality := 1.0
if has_quality {
var err error
quality, err = strconv.ParseFloat(quality_str, 64)
if err != nil || quality < 0 {
continue
}
}
name = strings.TrimSpace(name)
if name == "*" {
globQuality = quality
} else {
_, allowed := allowedEncodings[name]
if allowed {
qualities[name] = quality
if quality > 0 {
encodings = append(encodings, name)
}
}
}
}
if globQuality > 0 {
for encoding := range allowedEncodings {
_, exists := qualities[encoding]
if !exists {
encodings = append(encodings, encoding)
qualities[encoding] = globQuality
}
}
} else {
_, exists := qualities["identity"]
if !exists {
encodings = append(encodings, "identity")
qualities["identity"] = -1
}
}
slices.SortStableFunc(encodings, func(x, y string) int {
// sort in reverse order; big quality comes first
return cmp.Compare(qualities[y], qualities[x])
})
log.Trace().Msgf("decided encoding order: %v", encodings)
return encodings
}
// Upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context.
func (o *Options) Upstream(ctx *context.Context, giteaClient *gitea.Client, redirectsCache cache.ICache) bool {
log := log.With().Str("ReqId", ctx.ReqId).Strs("upstream", []string{o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath}).Logger()
log.Debug().Msg("Start")
if o.TargetOwner == "" || o.TargetRepo == "" {
html.ReturnErrorPage(ctx, "forge client: either repo owner or name info is missing", http.StatusBadRequest)
return true
}
// Check if the branch exists and when it was modified
if o.BranchTimestamp.IsZero() {
branchExist, err := o.GetBranchTimestamp(giteaClient)
// handle 404
if err != nil && errors.Is(err, gitea.ErrorNotFound) || !branchExist {
html.ReturnErrorPage(ctx,
fmt.Sprintf("branch <code>%q</code> for <code>%s/%s</code> not found", o.TargetBranch, o.TargetOwner, o.TargetRepo),
http.StatusNotFound)
return true
}
// handle unexpected errors
if err != nil {
html.ReturnErrorPage(ctx,
fmt.Sprintf("could not get timestamp of branch <code>%q</code>: '%v'", o.TargetBranch, err),
http.StatusFailedDependency)
return true
}
}
// Check if the browser has a cached version
if ctx.Response() != nil {
if ifModifiedSince, err := time.Parse(time.RFC1123, ctx.Response().Header.Get(headerIfModifiedSince)); err == nil {
if ifModifiedSince.After(o.BranchTimestamp) {
ctx.RespWriter.WriteHeader(http.StatusNotModified)
log.Trace().Msg("check response against last modified: valid")
return true
}
}
log.Trace().Msg("check response against last modified: outdated")
}
log.Debug().Msg("Preparing")
var reader io.ReadCloser
var header http.Header
var statusCode int
var err error
// pick first non-404 response for encoding, *only* if not root
if o.TargetPath == "" || strings.HasSuffix(o.TargetPath, "/") {
err = gitea.ErrorNotFound
} else {
for _, encoding := range AcceptEncodings(ctx.Req.Header.Get(headerAcceptEncoding)) {
log.Trace().Msgf("try %s encoding", encoding)
// add extension for encoding
path := o.TargetPath + allowedEncodings[encoding]
reader, header, statusCode, err = giteaClient.ServeRawContent(ctx, o.TargetOwner, o.TargetRepo, o.TargetBranch, path, true)
if statusCode == http.StatusNotFound {
continue
}
if err != nil {
break
}
log.Debug().Msgf("using %s encoding", encoding)
if encoding != "identity" {
header.Set(headerContentEncoding, encoding)
}
break
}
if reader != nil {
defer reader.Close()
}
}
log.Debug().Msg("Aquisting")
// Handle not found error
if err != nil && errors.Is(err, gitea.ErrorNotFound) {
log.Debug().Msg("Handling not found error")
// Get and match redirects
redirects := o.getRedirects(ctx, giteaClient, redirectsCache)
if o.matchRedirects(ctx, giteaClient, redirects, redirectsCache) {
log.Trace().Msg("redirect")
return true
}
if o.TryIndexPages {
log.Trace().Msg("try index page")
// copy the o struct & try if an index page exists
optionsForIndexPages := *o
optionsForIndexPages.TryIndexPages = false
optionsForIndexPages.appendTrailingSlash = true
for _, indexPage := range upstreamIndexPages {
optionsForIndexPages.TargetPath = strings.TrimSuffix(o.TargetPath, "/") + "/" + indexPage
if optionsForIndexPages.Upstream(ctx, giteaClient, redirectsCache) {
return true
}
}
log.Trace().Msg("try html file with path name")
// compatibility fix for GitHub Pages (/example → /example.html)
optionsForIndexPages.appendTrailingSlash = false
optionsForIndexPages.redirectIfExists = strings.TrimSuffix(ctx.Path(), "/") + ".html"
optionsForIndexPages.TargetPath = o.TargetPath + ".html"
if optionsForIndexPages.Upstream(ctx, giteaClient, redirectsCache) {
return true
}
}
log.Debug().Msg("not found")
ctx.StatusCode = http.StatusNotFound
if o.TryIndexPages {
log.Trace().Msg("try not found page")
// copy the o struct & try if a not found page exists
optionsForNotFoundPages := *o
optionsForNotFoundPages.TryIndexPages = false
optionsForNotFoundPages.appendTrailingSlash = false
for _, notFoundPage := range upstreamNotFoundPages {
optionsForNotFoundPages.TargetPath = "/" + notFoundPage
if optionsForNotFoundPages.Upstream(ctx, giteaClient, redirectsCache) {
return true
}
}
log.Trace().Msg("not found page missing")
}
return false
}
// handle unexpected client errors
if err != nil || reader == nil || statusCode != http.StatusOK {
log.Debug().Msg("Handling error")
var msg string
if err != nil {
msg = "forge client: returned unexpected error"
log.Error().Err(err).Msg(msg)
msg = fmt.Sprintf("%s: '%v'", msg, err)
}
if reader == nil {
msg = "forge client: returned no reader"
log.Error().Msg(msg)
}
if statusCode != http.StatusOK {
msg = fmt.Sprintf("forge client: couldn't fetch contents: <code>%d - %s</code>", statusCode, http.StatusText(statusCode))
log.Error().Msg(msg)
}
html.ReturnErrorPage(ctx, msg, http.StatusInternalServerError)
return true
}
// Append trailing slash if missing (for index files), and redirect to fix filenames in general
// o.appendTrailingSlash is only true when looking for index pages
if o.appendTrailingSlash && !strings.HasSuffix(ctx.Path(), "/") {
log.Trace().Msg("append trailing slash and redirect")
ctx.Redirect(ctx.Path()+"/", http.StatusTemporaryRedirect)
return true
}
if strings.HasSuffix(ctx.Path(), "/index.html") && !o.ServeRaw {
log.Trace().Msg("remove index.html from path and redirect")
ctx.Redirect(strings.TrimSuffix(ctx.Path(), "index.html"), http.StatusTemporaryRedirect)
return true
}
if o.redirectIfExists != "" {
ctx.Redirect(o.redirectIfExists, http.StatusTemporaryRedirect)
return true
}
// Set ETag & MIME
o.setHeader(ctx, header)
log.Debug().Msg("Prepare response")
ctx.RespWriter.WriteHeader(ctx.StatusCode)
// Write the response body to the original request
if reader != nil {
_, err := io.Copy(ctx.RespWriter, reader)
if err != nil {
log.Error().Err(err).Msgf("Couldn't write body for %q", o.TargetPath)
html.ReturnErrorPage(ctx, "", http.StatusInternalServerError)
return true
}
}
log.Debug().Msg("Sending response")
return true
}

27
server/utils/utils.go Normal file
View file

@ -0,0 +1,27 @@
package utils
import (
"net/url"
"path"
"strings"
)
func TrimHostPort(host string) string {
i := strings.IndexByte(host, ':')
if i >= 0 {
return host[:i]
}
return host
}
func CleanPath(uriPath string) string {
unescapedPath, _ := url.PathUnescape(uriPath)
cleanedPath := path.Join("/", unescapedPath)
// If the path refers to a directory, add a trailing slash.
if !strings.HasSuffix(cleanedPath, "/") && (strings.HasSuffix(unescapedPath, "/") || strings.HasSuffix(unescapedPath, "/.") || strings.HasSuffix(unescapedPath, "/..")) {
cleanedPath += "/"
}
return cleanedPath
}

View file

@ -0,0 +1,69 @@
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestTrimHostPort(t *testing.T) {
assert.EqualValues(t, "aa", TrimHostPort("aa"))
assert.EqualValues(t, "", TrimHostPort(":"))
assert.EqualValues(t, "example.com", TrimHostPort("example.com:80"))
}
// TestCleanPath is mostly copied from fasthttp, to keep the behaviour we had before migrating away from it.
// Source (MIT licensed): https://github.com/valyala/fasthttp/blob/v1.48.0/uri_test.go#L154
// Copyright (c) 2015-present Aliaksandr Valialkin, VertaMedia, Kirill Danshin, Erik Dubbelboer, FastHTTP Authors
func TestCleanPath(t *testing.T) {
// double slash
testURIPathNormalize(t, "/aa//bb", "/aa/bb")
// triple slash
testURIPathNormalize(t, "/x///y/", "/x/y/")
// multi slashes
testURIPathNormalize(t, "/abc//de///fg////", "/abc/de/fg/")
// encoded slashes
testURIPathNormalize(t, "/xxxx%2fyyy%2f%2F%2F", "/xxxx/yyy/")
// dotdot
testURIPathNormalize(t, "/aaa/..", "/")
// dotdot with trailing slash
testURIPathNormalize(t, "/xxx/yyy/../", "/xxx/")
// multi dotdots
testURIPathNormalize(t, "/aaa/bbb/ccc/../../ddd", "/aaa/ddd")
// dotdots separated by other data
testURIPathNormalize(t, "/a/b/../c/d/../e/..", "/a/c/")
// too many dotdots
testURIPathNormalize(t, "/aaa/../../../../xxx", "/xxx")
testURIPathNormalize(t, "/../../../../../..", "/")
testURIPathNormalize(t, "/../../../../../../", "/")
// encoded dotdots
testURIPathNormalize(t, "/aaa%2Fbbb%2F%2E.%2Fxxx", "/aaa/xxx")
// double slash with dotdots
testURIPathNormalize(t, "/aaa////..//b", "/b")
// fake dotdot
testURIPathNormalize(t, "/aaa/..bbb/ccc/..", "/aaa/..bbb/")
// single dot
testURIPathNormalize(t, "/a/./b/././c/./d.html", "/a/b/c/d.html")
testURIPathNormalize(t, "./foo/", "/foo/")
testURIPathNormalize(t, "./../.././../../aaa/bbb/../../../././../", "/")
testURIPathNormalize(t, "./a/./.././../b/./foo.html", "/b/foo.html")
}
func testURIPathNormalize(t *testing.T, requestURI, expectedPath string) {
cleanedPath := CleanPath(requestURI)
if cleanedPath != expectedPath {
t.Fatalf("Unexpected path %q. Expected %q. requestURI=%q", cleanedPath, expectedPath, requestURI)
}
}

View file

@ -0,0 +1,3 @@
package version
var Version string = "dev"

View file

@ -1,89 +0,0 @@
// This file is part of the "lamp" program.
// Copyright (C) 2022 crapStone
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use clap::{App, Arg};
pub fn build_cli() -> App<'static> {
App::new("lamp")
.version(env!("CARGO_PKG_VERSION"))
.author("crapStone <crapstone01@gmail.com>")
.about("Utility to interact with backlight")
.global_setting(clap::AppSettings::ArgRequiredElseHelp)
.arg(
Arg::with_name("set")
.short('s')
.long("set")
.help("Sets brightness to given value")
.takes_value(true)
.value_name("VALUE"),
)
.arg(
Arg::with_name("inc")
.short('i')
.long("increase")
.help("Increases brightness")
.takes_value(true)
.value_name("PERCENT"),
)
.arg(
Arg::with_name("dec")
.short('d')
.long("decrease")
.help("Decreases brightness")
.takes_value(true)
.value_name("PERCENT"),
)
.arg(
Arg::with_name("get")
.short('g')
.long("get")
.help("Prints current brightness value"),
)
.arg(
Arg::with_name("zero")
.short('z')
.long("zero")
.help("Sets brightness to lowest value"),
)
.arg(
Arg::with_name("full")
.short('f')
.long("full")
.help("Sets brightness to highest value"),
)
.arg(
Arg::with_name("list")
.short('l')
.long("list")
.help("Lists all devices with controllable brightness and led values")
.exclusive(true),
)
.arg(
Arg::with_name("controller")
.short('c')
.long("controller")
.help("Select device to control")
.value_name("DEVICE")
.takes_value(true),
)
.arg(
Arg::with_name("ctrl_type")
.short('t')
.long("type")
.value_name("controller_type")
.takes_value(true)
.possible_values(&["raw", "lin", "log"])
.default_value("lin")
.help("choose controller type")
.long_help(
r#"You can choose between these controller types:
raw: uses the raw values found in the device files
lin: uses percentage values (0.0 - 1.0) with a linear curve
log: uses percentage values (0.0 - 1.0) with a logarithmic curve
the perceived brightness for the human eyes should be linear with this controller
"#,
),
)
}

View file

@ -1,198 +0,0 @@
// This file is part of the "lamp" program.
// Copyright (C) 2022 crapStone
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::exit;
const SYS_PATHS: [&str; 2] = ["/sys/class/backlight", "/sys/class/leds"];
pub trait Controller {
fn get_brightness(&self) -> u32;
fn get_max_brightness(&self) -> u32;
fn set_brightness(&self, value: u32);
fn check_brightness_value(&self, value: u32) {
let max = self.get_max_brightness();
if value > max {
eprintln!("brightness value too high: {value} > {max}",);
exit(exitcode::DATAERR);
}
}
}
pub struct RawController {
path: PathBuf,
}
impl RawController {
pub fn new(path: PathBuf) -> Self {
Self { path }
}
}
impl Controller for RawController {
fn get_brightness(&self) -> u32 {
read_file_to_int(self.path.join("brightness"))
}
fn get_max_brightness(&self) -> u32 {
read_file_to_int(self.path.join("max_brightness"))
}
fn set_brightness(&self, value: u32) {
self.check_brightness_value(value);
let path = self.path.join("brightness");
let mut file = match OpenOptions::new().write(true).read(true).open(&path) {
Err(why) => {
eprintln!("couldn't open '{}': {:?}", &path.display(), why.kind());
exit(exitcode::OSFILE);
}
Ok(file) => file,
};
match write!(file, "{}", value) {
Ok(_) => {}
Err(err) => {
eprintln!(
"could not write '{value}' to file '{}': {:?}",
&path.display(),
err.kind()
);
exit(exitcode::OSFILE);
}
};
}
}
pub struct LinController {
parent_controller: RawController,
}
impl LinController {
pub fn new(path: PathBuf) -> Self {
Self {
parent_controller: RawController::new(path),
}
}
}
impl Controller for LinController {
fn get_brightness(&self) -> u32 {
((self.parent_controller.get_brightness() as f64
/ self.parent_controller.get_max_brightness() as f64)
* self.get_max_brightness() as f64) as u32
}
fn get_max_brightness(&self) -> u32 {
100
}
fn set_brightness(&self, value: u32) {
self.check_brightness_value(value);
if value > self.get_max_brightness() {
eprintln!(
"brightness value too high! {value} > {}",
self.get_max_brightness()
);
exit(exitcode::DATAERR);
}
self.parent_controller.set_brightness(
(value * self.parent_controller.get_max_brightness()) / self.get_max_brightness(),
)
}
}
pub struct LogController {
parent_controller: RawController,
}
impl LogController {
pub fn new(path: PathBuf) -> Self {
Self {
parent_controller: RawController::new(path),
}
}
}
impl Controller for LogController {
fn get_brightness(&self) -> u32 {
((self.parent_controller.get_brightness() as f64).log10()
/ (self.parent_controller.get_max_brightness() as f64).log10()
* self.get_max_brightness() as f64) as u32
}
fn get_max_brightness(&self) -> u32 {
100
}
fn set_brightness(&self, value: u32) {
self.check_brightness_value(value);
if value > self.get_max_brightness() {
eprintln!(
"brightness value too high! {value} > {}",
self.get_max_brightness()
);
exit(exitcode::DATAERR);
}
self.parent_controller.set_brightness(10f64.powf(
(value as f64 / self.get_max_brightness() as f64)
* (self.parent_controller.get_max_brightness() as f64).log10(),
) as u32)
}
}
fn read_file_to_int(path: PathBuf) -> u32 {
let mut file = match File::open(&path) {
Err(why) => {
eprintln!("couldn't open {}: {:?}", path.display(), why.kind());
exit(exitcode::OSFILE);
}
Ok(file) => file,
};
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => {
eprintln!("couldn't read {}: {:?}", path.display(), why.kind());
exit(exitcode::OSFILE);
}
Ok(_) => return s.trim().parse().unwrap(),
}
}
/// Searches through all paths in `SYS_PATHS` and creates a `HashMap` with the name and absolute path.
///
/// It returns a `Tuple` of the default backlight name and the `HashMap`.
pub fn get_controllers() -> (String, HashMap<String, PathBuf>) {
let mut controllers: HashMap<String, PathBuf> = HashMap::new();
let mut default = None;
for path in SYS_PATHS {
if Path::new(path).exists() {
for name in Path::new(path).read_dir().unwrap() {
let name = name.unwrap().path();
let key = String::from(name.file_name().unwrap().to_str().unwrap());
if default.is_none() {
default = Some(key.clone());
}
controllers.insert(key, name);
}
}
}
(default.unwrap(), controllers)
}

View file

@ -1,83 +0,0 @@
// This file is part of the "lamp" program.
// Copyright (C) 2022 crapStone
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
mod cli;
mod controllers;
use std::process::exit;
use controllers::{Controller, LinController, LogController, RawController};
use crate::cli::build_cli;
fn main() {
let app = build_cli();
let matches = app.get_matches();
let (default_ctrl, ctrls) = controllers::get_controllers();
let p = match matches.value_of("controller") {
Some(ctrl) => {
let p = ctrls.get(ctrl);
if p == None {
eprintln!("no device with name '{ctrl}' found");
eprintln!("use --list to ge a list of all available devices");
exit(exitcode::DATAERR);
}
p.unwrap().to_owned()
}
None => ctrls.get(&default_ctrl).unwrap().to_owned(),
};
let controller: Box<dyn Controller> = match matches.value_of("ctrl_type") {
Some("raw") => Box::new(RawController::new(p)),
Some("lin") => Box::new(LinController::new(p)),
Some("log") => Box::new(LogController::new(p)),
Some(_) | None => panic!("{ERROR_MSG}"),
};
if matches.is_present("list") {
println!("{default_ctrl} [default]");
for ctrl in ctrls.keys() {
if ctrl != &default_ctrl {
println!("{ctrl}");
}
}
exit(exitcode::OK);
} else if let Some(value) = matches.value_of("set") {
let new_value = value.parse::<u32>().unwrap();
controller.set_brightness(new_value);
} else if let Some(value) = matches.value_of("inc") {
let new_value = controller.get_brightness() + value.parse::<u32>().unwrap();
controller.set_brightness(new_value.min(controller.get_max_brightness()));
} else if let Some(value) = matches.value_of("dec") {
let new_value = controller.get_brightness() - value.parse::<u32>().unwrap();
controller.set_brightness(new_value.max(0));
} else if matches.is_present("get") {
println!("{}", controller.get_brightness());
} else if matches.is_present("zero") {
controller.set_brightness(0);
} else if matches.is_present("full") {
controller.set_brightness(controller.get_max_brightness());
} else {
build_cli().print_long_help().unwrap();
}
exit(exitcode::OK);
}
// https://xkcd.com/2200/
const ERROR_MSG: &str = r#"
ERROR!
If you're seeing this, the code is in what I thought was an unreachable state.
I could give you advice for what to do. but honestly, why should you trust me?
I clearly screwed this up. I'm writing a message that should never appear,
yet I know it will probably appear someday.
On a deep level, I know I'm not up to this task. I'm so sorry.
"#;