+func ReturnErrorPage(ctx *context.Context, msg string, statusCode int) {
+ ctx.RespWriter.Header().Set("Content-Type", "text/html; charset=utf-8")
+ ctx.RespWriter.WriteHeader(statusCode)
+
+ templateContext := TemplateContext{
+ StatusCode: statusCode,
+ StatusText: http.StatusText(statusCode),
+ Message: sanitizer.Sanitize(msg),
+ }
+
+ err := errorTemplate.Execute(ctx.RespWriter, templateContext)
+ if err != nil {
+ log.Err(err).Str("message", msg).Int("status", statusCode).Msg("could not write response")
+ }
+}
+
+func createBlueMondayPolicy() *bluemonday.Policy {
+ p := bluemonday.NewPolicy()
+
+ p.AllowElements("code")
+
+ return p
+}
+
+func loadCustomTemplateOrDefault() string {
+ contents, err := os.ReadFile("custom/error.html")
+ if err != nil {
+ if !os.IsNotExist(err) {
+ wd, wdErr := os.Getwd()
+ if wdErr != nil {
+ log.Err(err).Msg("could not load custom error page 'custom/error.html'")
+ } else {
+ log.Err(err).Msgf("could not load custom error page '%v'", path.Join(wd, "custom/error.html"))
+ }
+ }
+ return errorPage
+ }
+ return string(contents)
+}
diff --git a/html/html_test.go b/html/html_test.go
new file mode 100644
index 0000000..b395bb2
--- /dev/null
+++ b/html/html_test.go
@@ -0,0 +1,54 @@
+package html
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSanitizerSimpleString(t *testing.T) {
+ str := "simple text message without any html elements"
+
+ assert.Equal(t, str, sanitizer.Sanitize(str))
+}
+
+func TestSanitizerStringWithCodeTag(t *testing.T) {
+ str := "simple text message with html
tag"
+
+ assert.Equal(t, str, sanitizer.Sanitize(str))
+}
+
+func TestSanitizerStringWithCodeTagWithAttribute(t *testing.T) {
+ str := "simple text message with html
tag"
+ expected := "simple text message with html
tag"
+
+ assert.Equal(t, expected, sanitizer.Sanitize(str))
+}
+
+func TestSanitizerStringWithATag(t *testing.T) {
+ str := "simple text message with a link to another page"
+ expected := "simple text message with a link to another page"
+
+ assert.Equal(t, expected, sanitizer.Sanitize(str))
+}
+
+func TestSanitizerStringWithATagAndHref(t *testing.T) {
+ str := "simple text message with a link to another page"
+ expected := "simple text message with a link to another page"
+
+ assert.Equal(t, expected, sanitizer.Sanitize(str))
+}
+
+func TestSanitizerStringWithImgTag(t *testing.T) {
+ str := "simple text message with a
"
+ expected := "simple text message with a "
+
+ assert.Equal(t, expected, sanitizer.Sanitize(str))
+}
+
+func TestSanitizerStringWithImgTagAndOnerrorAttribute(t *testing.T) {
+ str := "simple text message with a
"
+ expected := "simple text message with a "
+
+ assert.Equal(t, expected, sanitizer.Sanitize(str))
+}
diff --git a/html/templates/error.html b/html/templates/error.html
new file mode 100644
index 0000000..ccaa682
--- /dev/null
+++ b/html/templates/error.html
@@ -0,0 +1,58 @@
+
+
+
+
+
+ {{.StatusText}}
+
+
+
+
+
+
+
+
+ {{.StatusText}} (Error {{.StatusCode}})!
+
+
Sorry, but this page couldn't be served:
+ "{{.Message}}"
+
+ The page you tried to reach is hosted on Codeberg Pages, which might currently be experiencing technical
+ difficulties. If that is the case, it could take a little while until this page is available again.
+
+
+ Otherwise, this page might also be unavailable due to a configuration error. If you are the owner of this
+ website, please make sure to check the
+ troubleshooting section in the Docs!
+
+
+
+
+ Static pages made easy -
+ Codeberg Pages
+
+
+
diff --git a/integration/get_test.go b/integration/get_test.go
new file mode 100644
index 0000000..cfb7188
--- /dev/null
+++ b/integration/get_test.go
@@ -0,0 +1,282 @@
+//go:build integration
+// +build integration
+
+package integration
+
+import (
+ "bytes"
+ "crypto/tls"
+ "io"
+ "log"
+ "net/http"
+ "net/http/cookiejar"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetRedirect(t *testing.T) {
+ log.Println("=== TestGetRedirect ===")
+ // test custom domain redirect
+ resp, err := getTestHTTPSClient().Get("https://calciumdibromid.localhost.mock.directory:4430")
+ if !assert.NoError(t, err) {
+ t.FailNow()
+ }
+ if !assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, "https://www.cabr2.de/", resp.Header.Get("Location"))
+ assert.EqualValues(t, `Temporary Redirect.`, strings.TrimSpace(string(getBytes(resp.Body))))
+}
+
+func TestGetContent(t *testing.T) {
+ log.Println("=== TestGetContent ===")
+ // test get image
+ resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/images/827679288a.jpg")
+ assert.NoError(t, err)
+ if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, "image/jpeg", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "124635", resp.Header.Get("Content-Length"))
+ assert.EqualValues(t, 124635, getSize(resp.Body))
+ assert.Len(t, resp.Header.Get("ETag"), 42)
+
+ // specify branch
+ resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/pag/@master/")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusOK, resp.StatusCode)
+ assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.True(t, getSize(resp.Body) > 1000)
+ assert.Len(t, resp.Header.Get("ETag"), 44)
+
+ // access branch name contains '/'
+ resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/blumia/@docs~main/")
+ assert.NoError(t, err)
+ if !assert.EqualValues(t, http.StatusOK, resp.StatusCode) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.True(t, getSize(resp.Body) > 100)
+ assert.Len(t, resp.Header.Get("ETag"), 44)
+
+ // TODO: test get of non cacheable content (content size > fileCacheSizeLimit)
+}
+
+func TestCustomDomain(t *testing.T) {
+ log.Println("=== TestCustomDomain ===")
+ resp, err := getTestHTTPSClient().Get("https://mock-pages.codeberg-test.org:4430/README.md")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusOK, resp.StatusCode)
+ assert.EqualValues(t, "text/markdown; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "106", resp.Header.Get("Content-Length"))
+ assert.EqualValues(t, 106, getSize(resp.Body))
+}
+
+func TestCustomDomainRedirects(t *testing.T) {
+ log.Println("=== TestCustomDomainRedirects ===")
+ // test redirect from default pages domain to custom domain
+ resp, err := getTestHTTPSClient().Get("https://6543.localhost.mock.directory:4430/test_pages-server_custom-mock-domain/@main/README.md")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode)
+ assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
+ // TODO: custom port is not evaluated (witch does hurt tests & dev env only)
+ // assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/@main/README.md", resp.Header.Get("Location"))
+ assert.EqualValues(t, "https://mock-pages.codeberg-test.org/@main/README.md", resp.Header.Get("Location"))
+ assert.EqualValues(t, `https:/codeberg.org/6543/test_pages-server_custom-mock-domain/src/branch/main/README.md; rel="canonical"; rel="canonical"`, resp.Header.Get("Link"))
+
+ // test redirect from an custom domain to the primary custom domain (www.example.com -> example.com)
+ // regression test to https://codeberg.org/Codeberg/pages-server/issues/153
+ resp, err = getTestHTTPSClient().Get("https://mock-pages-redirect.codeberg-test.org:4430/README.md")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusTemporaryRedirect, resp.StatusCode)
+ assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
+ // TODO: custom port is not evaluated (witch does hurt tests & dev env only)
+ // assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/README.md", resp.Header.Get("Location"))
+ assert.EqualValues(t, "https://mock-pages.codeberg-test.org/README.md", resp.Header.Get("Location"))
+}
+
+func TestRawCustomDomain(t *testing.T) {
+ log.Println("=== TestRawCustomDomain ===")
+ // test raw domain response for custom domain branch
+ resp, err := getTestHTTPSClient().Get("https://raw.localhost.mock.directory:4430/cb_pages_tests/raw-test/example") // need cb_pages_tests fork
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusOK, resp.StatusCode)
+ assert.EqualValues(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "76", resp.Header.Get("Content-Length"))
+ assert.EqualValues(t, 76, getSize(resp.Body))
+}
+
+func TestRawIndex(t *testing.T) {
+ log.Println("=== TestRawIndex ===")
+ // test raw domain response for index.html
+ resp, err := getTestHTTPSClient().Get("https://raw.localhost.mock.directory:4430/cb_pages_tests/raw-test/@branch-test/index.html") // need cb_pages_tests fork
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusOK, resp.StatusCode)
+ assert.EqualValues(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "597", resp.Header.Get("Content-Length"))
+ assert.EqualValues(t, 597, getSize(resp.Body))
+}
+
+func TestGetNotFound(t *testing.T) {
+ log.Println("=== TestGetNotFound ===")
+ // test custom not found pages
+ resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/pages-404-demo/blah")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusNotFound, resp.StatusCode)
+ assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "37", resp.Header.Get("Content-Length"))
+ assert.EqualValues(t, 37, getSize(resp.Body))
+}
+
+func TestRedirect(t *testing.T) {
+ log.Println("=== TestRedirect ===")
+ // test redirects
+ resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/redirect")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
+ assert.EqualValues(t, "https://example.com/", resp.Header.Get("Location"))
+}
+
+func TestSPARedirect(t *testing.T) {
+ log.Println("=== TestSPARedirect ===")
+ // test SPA redirects
+ url := "https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/app/aqdjw"
+ resp, err := getTestHTTPSClient().Get(url)
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusOK, resp.StatusCode)
+ assert.EqualValues(t, url, resp.Request.URL.String())
+ assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "258", resp.Header.Get("Content-Length"))
+ assert.EqualValues(t, 258, getSize(resp.Body))
+}
+
+func TestSplatRedirect(t *testing.T) {
+ log.Println("=== TestSplatRedirect ===")
+ // test splat redirects
+ resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/some_redirects/articles/qfopefe")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
+ assert.EqualValues(t, "/posts/qfopefe", resp.Header.Get("Location"))
+}
+
+func TestFollowSymlink(t *testing.T) {
+ log.Printf("=== TestFollowSymlink ===\n")
+
+ // file symlink
+ resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/link")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusOK, resp.StatusCode)
+ assert.EqualValues(t, "application/octet-stream", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "4", resp.Header.Get("Content-Length"))
+ body := getBytes(resp.Body)
+ assert.EqualValues(t, 4, len(body))
+ assert.EqualValues(t, "abc\n", string(body))
+
+ // relative file links (../index.html file in this case)
+ resp, err = getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/dir_aim/some/")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusOK, resp.StatusCode)
+ assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "an index\n", string(getBytes(resp.Body)))
+}
+
+func TestLFSSupport(t *testing.T) {
+ log.Printf("=== TestLFSSupport ===\n")
+
+ resp, err := getTestHTTPSClient().Get("https://cb_pages_tests.localhost.mock.directory:4430/tests_for_pages-server/@main/lfs.txt")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusOK, resp.StatusCode)
+ body := strings.TrimSpace(string(getBytes(resp.Body)))
+ assert.EqualValues(t, 12, len(body))
+ assert.EqualValues(t, "actual value", body)
+}
+
+func TestGetOptions(t *testing.T) {
+ log.Println("=== TestGetOptions ===")
+ req, _ := http.NewRequest(http.MethodOptions, "https://mock-pages.codeberg-test.org:4430/README.md", http.NoBody)
+ resp, err := getTestHTTPSClient().Do(req)
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusNoContent, resp.StatusCode)
+ assert.EqualValues(t, "GET, HEAD, OPTIONS", resp.Header.Get("Allow"))
+}
+
+func TestHttpRedirect(t *testing.T) {
+ log.Println("=== TestHttpRedirect ===")
+ resp, err := getTestHTTPSClient().Get("http://mock-pages.codeberg-test.org:8880/README.md")
+ assert.NoError(t, err)
+ if !assert.NotNil(t, resp) {
+ t.FailNow()
+ }
+ assert.EqualValues(t, http.StatusMovedPermanently, resp.StatusCode)
+ assert.EqualValues(t, "text/html; charset=utf-8", resp.Header.Get("Content-Type"))
+ assert.EqualValues(t, "https://mock-pages.codeberg-test.org:4430/README.md", resp.Header.Get("Location"))
+}
+
+func getTestHTTPSClient() *http.Client {
+ cookieJar, _ := cookiejar.New(nil)
+ return &http.Client{
+ Jar: cookieJar,
+ CheckRedirect: func(_ *http.Request, _ []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ },
+ }
+}
+
+func getBytes(stream io.Reader) []byte {
+ buf := new(bytes.Buffer)
+ _, _ = buf.ReadFrom(stream)
+ return buf.Bytes()
+}
+
+func getSize(stream io.Reader) int {
+ buf := new(bytes.Buffer)
+ _, _ = buf.ReadFrom(stream)
+ return buf.Len()
+}
diff --git a/integration/main_test.go b/integration/main_test.go
new file mode 100644
index 0000000..86fd9d3
--- /dev/null
+++ b/integration/main_test.go
@@ -0,0 +1,69 @@
+//go:build integration
+// +build integration
+
+package integration
+
+import (
+ "context"
+ "log"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/urfave/cli/v2"
+
+ cmd "codeberg.org/codeberg/pages/cli"
+ "codeberg.org/codeberg/pages/server"
+)
+
+func TestMain(m *testing.M) {
+ log.Println("=== TestMain: START Server ===")
+ serverCtx, serverCancel := context.WithCancel(context.Background())
+ if err := startServer(serverCtx); err != nil {
+ log.Fatalf("could not start server: %v", err)
+ }
+ defer func() {
+ serverCancel()
+ log.Println("=== TestMain: Server STOPPED ===")
+ }()
+
+ time.Sleep(10 * time.Second)
+
+ m.Run()
+}
+
+func startServer(ctx context.Context) error {
+ args := []string{"integration"}
+ setEnvIfNotSet("ACME_API", "https://acme.mock.directory")
+ setEnvIfNotSet("PAGES_DOMAIN", "localhost.mock.directory")
+ setEnvIfNotSet("RAW_DOMAIN", "raw.localhost.mock.directory")
+ setEnvIfNotSet("PAGES_BRANCHES", "pages,main,master")
+ setEnvIfNotSet("PORT", "4430")
+ setEnvIfNotSet("HTTP_PORT", "8880")
+ setEnvIfNotSet("ENABLE_HTTP_SERVER", "true")
+ setEnvIfNotSet("DB_TYPE", "sqlite3")
+ setEnvIfNotSet("GITEA_ROOT", "https://codeberg.org")
+ setEnvIfNotSet("LOG_LEVEL", "trace")
+ setEnvIfNotSet("ENABLE_LFS_SUPPORT", "true")
+ setEnvIfNotSet("ENABLE_SYMLINK_SUPPORT", "true")
+ setEnvIfNotSet("ACME_ACCOUNT_CONFIG", "integration/acme-account.json")
+
+ app := cli.NewApp()
+ app.Name = "pages-server"
+ app.Action = server.Serve
+ app.Flags = cmd.ServerFlags
+
+ go func() {
+ if err := app.RunContext(ctx, args); err != nil {
+ log.Fatalf("run server error: %v", err)
+ }
+ }()
+
+ return nil
+}
+
+func setEnvIfNotSet(key, value string) {
+ if _, set := os.LookupEnv(key); !set {
+ os.Setenv(key, value)
+ }
+}
diff --git a/main.go b/main.go
new file mode 100644
index 0000000..87e21f3
--- /dev/null
+++ b/main.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "os"
+
+ _ "github.com/joho/godotenv/autoload"
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/cli"
+ "codeberg.org/codeberg/pages/server"
+)
+
+func main() {
+ app := cli.CreatePagesApp()
+ app.Action = server.Serve
+
+ if err := app.Run(os.Args); err != nil {
+ log.Error().Err(err).Msg("A fatal error occurred")
+ os.Exit(1)
+ }
+}
diff --git a/renovate.json b/renovate.json
new file mode 100644
index 0000000..9dd1cd7
--- /dev/null
+++ b/renovate.json
@@ -0,0 +1,27 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:recommended",
+ ":maintainLockFilesWeekly",
+ ":enablePreCommit",
+ "schedule:automergeDaily",
+ "schedule:weekends"
+ ],
+ "automergeType": "branch",
+ "automergeMajor": false,
+ "automerge": true,
+ "prConcurrentLimit": 5,
+ "labels": ["dependencies"],
+ "packageRules": [
+ {
+ "matchManagers": ["gomod", "dockerfile"]
+ },
+ {
+ "groupName": "golang deps non-major",
+ "matchManagers": ["gomod"],
+ "matchUpdateTypes": ["minor", "patch"],
+ "extends": ["schedule:daily"]
+ }
+ ],
+ "postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"]
+}
diff --git a/server/acme/client.go b/server/acme/client.go
new file mode 100644
index 0000000..d5c83d0
--- /dev/null
+++ b/server/acme/client.go
@@ -0,0 +1,26 @@
+package acme
+
+import (
+ "errors"
+ "fmt"
+
+ "codeberg.org/codeberg/pages/config"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/certificates"
+)
+
+var ErrAcmeMissConfig = errors.New("ACME client has wrong config")
+
+func CreateAcmeClient(cfg config.ACMEConfig, enableHTTPServer bool, challengeCache cache.ICache) (*certificates.AcmeClient, error) {
+ // check config
+ if (!cfg.AcceptTerms || (cfg.DNSProvider == "" && !cfg.NoDNS01)) && cfg.APIEndpoint != "https://acme.mock.directory" {
+ return nil, fmt.Errorf("%w: you must set $ACME_ACCEPT_TERMS and $DNS_PROVIDER or $NO_DNS_01, unless $ACME_API is set to https://acme.mock.directory", ErrAcmeMissConfig)
+ }
+ if cfg.EAB_HMAC != "" && cfg.EAB_KID == "" {
+ return nil, fmt.Errorf("%w: ACME_EAB_HMAC also needs ACME_EAB_KID to be set", ErrAcmeMissConfig)
+ } else if cfg.EAB_HMAC == "" && cfg.EAB_KID != "" {
+ return nil, fmt.Errorf("%w: ACME_EAB_KID also needs ACME_EAB_HMAC to be set", ErrAcmeMissConfig)
+ }
+
+ return certificates.NewAcmeClient(cfg, enableHTTPServer, challengeCache)
+}
diff --git a/server/cache/interface.go b/server/cache/interface.go
new file mode 100644
index 0000000..b3412cc
--- /dev/null
+++ b/server/cache/interface.go
@@ -0,0 +1,10 @@
+package cache
+
+import "time"
+
+// ICache is an interface that defines how the pages server interacts with the cache.
+type ICache interface {
+ Set(key string, value interface{}, ttl time.Duration) error
+ Get(key string) (interface{}, bool)
+ Remove(key string)
+}
diff --git a/server/cache/memory.go b/server/cache/memory.go
new file mode 100644
index 0000000..093696f
--- /dev/null
+++ b/server/cache/memory.go
@@ -0,0 +1,7 @@
+package cache
+
+import "github.com/OrlovEvgeny/go-mcache"
+
+func NewInMemoryCache() ICache {
+ return mcache.New()
+}
diff --git a/server/certificates/acme_account.go b/server/certificates/acme_account.go
new file mode 100644
index 0000000..57f4d44
--- /dev/null
+++ b/server/certificates/acme_account.go
@@ -0,0 +1,29 @@
+package certificates
+
+import (
+ "crypto"
+
+ "github.com/go-acme/lego/v4/registration"
+)
+
+type AcmeAccount struct {
+ Email string
+ Registration *registration.Resource
+ Key crypto.PrivateKey `json:"-"`
+ KeyPEM string `json:"Key"`
+}
+
+// make sure AcmeAccount match User interface
+var _ registration.User = &AcmeAccount{}
+
+func (u *AcmeAccount) GetEmail() string {
+ return u.Email
+}
+
+func (u AcmeAccount) GetRegistration() *registration.Resource {
+ return u.Registration
+}
+
+func (u *AcmeAccount) GetPrivateKey() crypto.PrivateKey {
+ return u.Key
+}
diff --git a/server/certificates/acme_client.go b/server/certificates/acme_client.go
new file mode 100644
index 0000000..f42fd8f
--- /dev/null
+++ b/server/certificates/acme_client.go
@@ -0,0 +1,93 @@
+package certificates
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/go-acme/lego/v4/lego"
+ "github.com/go-acme/lego/v4/providers/dns"
+ "github.com/reugn/equalizer"
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/config"
+ "codeberg.org/codeberg/pages/server/cache"
+)
+
+type AcmeClient struct {
+ legoClient *lego.Client
+ dnsChallengerLegoClient *lego.Client
+
+ obtainLocks sync.Map
+
+ acmeUseRateLimits bool
+
+ // limiter
+ acmeClientOrderLimit *equalizer.TokenBucket
+ acmeClientRequestLimit *equalizer.TokenBucket
+ acmeClientFailLimit *equalizer.TokenBucket
+ acmeClientCertificateLimitPerUser map[string]*equalizer.TokenBucket
+}
+
+func NewAcmeClient(cfg config.ACMEConfig, enableHTTPServer bool, challengeCache cache.ICache) (*AcmeClient, error) {
+ acmeConfig, err := setupAcmeConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ acmeClient, err := lego.NewClient(acmeConfig)
+ if err != nil {
+ log.Fatal().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
+ } else {
+ err = acmeClient.Challenge.SetTLSALPN01Provider(AcmeTLSChallengeProvider{challengeCache})
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create TLS-ALPN-01 provider")
+ }
+ if enableHTTPServer {
+ err = acmeClient.Challenge.SetHTTP01Provider(AcmeHTTPChallengeProvider{challengeCache})
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create HTTP-01 provider")
+ }
+ }
+ }
+
+ mainDomainAcmeClient, err := lego.NewClient(acmeConfig)
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
+ } else {
+ if cfg.DNSProvider == "" {
+ // using mock wildcard certs
+ mainDomainAcmeClient = nil
+ } else {
+ // use DNS-Challenge https://go-acme.github.io/lego/dns/
+ provider, err := dns.NewDNSChallengeProviderByName(cfg.DNSProvider)
+ if err != nil {
+ return nil, fmt.Errorf("can not create DNS Challenge provider: %w", err)
+ }
+ if err := mainDomainAcmeClient.Challenge.SetDNS01Provider(provider); err != nil {
+ return nil, fmt.Errorf("can not create DNS-01 provider: %w", err)
+ }
+ }
+ }
+
+ return &AcmeClient{
+ legoClient: acmeClient,
+ dnsChallengerLegoClient: mainDomainAcmeClient,
+
+ acmeUseRateLimits: cfg.UseRateLimits,
+
+ obtainLocks: sync.Map{},
+
+ // limiter
+
+ // rate limit is 300 / 3 hours, we want 200 / 2 hours but to refill more often, so that's 25 new domains every 15 minutes
+ // TODO: when this is used a lot, we probably have to think of a somewhat better solution?
+ acmeClientOrderLimit: equalizer.NewTokenBucket(25, 15*time.Minute),
+ // rate limit is 20 / second, we want 5 / second (especially as one cert takes at least two requests)
+ acmeClientRequestLimit: equalizer.NewTokenBucket(5, 1*time.Second),
+ // rate limit is 5 / hour https://letsencrypt.org/docs/failed-validation-limit/
+ acmeClientFailLimit: equalizer.NewTokenBucket(5, 1*time.Hour),
+ // checkUserLimit() use this to rate also per user
+ acmeClientCertificateLimitPerUser: map[string]*equalizer.TokenBucket{},
+ }, nil
+}
diff --git a/server/certificates/acme_config.go b/server/certificates/acme_config.go
new file mode 100644
index 0000000..2b5151d
--- /dev/null
+++ b/server/certificates/acme_config.go
@@ -0,0 +1,110 @@
+package certificates
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "codeberg.org/codeberg/pages/config"
+ "github.com/go-acme/lego/v4/certcrypto"
+ "github.com/go-acme/lego/v4/lego"
+ "github.com/go-acme/lego/v4/registration"
+ "github.com/rs/zerolog/log"
+)
+
+const challengePath = "/.well-known/acme-challenge/"
+
+func setupAcmeConfig(cfg config.ACMEConfig) (*lego.Config, error) {
+ var myAcmeAccount AcmeAccount
+ var myAcmeConfig *lego.Config
+
+ if cfg.AccountConfigFile == "" {
+ return nil, fmt.Errorf("invalid acme config file: '%s'", cfg.AccountConfigFile)
+ }
+
+ if account, err := os.ReadFile(cfg.AccountConfigFile); err == nil {
+ log.Info().Msgf("found existing acme account config file '%s'", cfg.AccountConfigFile)
+ if err := json.Unmarshal(account, &myAcmeAccount); err != nil {
+ return nil, err
+ }
+
+ myAcmeAccount.Key, err = certcrypto.ParsePEMPrivateKey([]byte(myAcmeAccount.KeyPEM))
+ if err != nil {
+ return nil, err
+ }
+
+ myAcmeConfig = lego.NewConfig(&myAcmeAccount)
+ myAcmeConfig.CADirURL = cfg.APIEndpoint
+ myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
+
+ // Validate Config
+ _, err := lego.NewClient(myAcmeConfig)
+ if err != nil {
+ log.Info().Err(err).Msg("config validation failed, you might just delete the config file and let it recreate")
+ return nil, fmt.Errorf("acme config validation failed: %w", err)
+ }
+
+ return myAcmeConfig, nil
+ } else if !os.IsNotExist(err) {
+ return nil, err
+ }
+
+ log.Info().Msgf("no existing acme account config found, try to create a new one")
+
+ privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ myAcmeAccount = AcmeAccount{
+ Email: cfg.Email,
+ Key: privateKey,
+ KeyPEM: string(certcrypto.PEMEncode(privateKey)),
+ }
+ myAcmeConfig = lego.NewConfig(&myAcmeAccount)
+ myAcmeConfig.CADirURL = cfg.APIEndpoint
+ myAcmeConfig.Certificate.KeyType = certcrypto.RSA2048
+ tempClient, err := lego.NewClient(myAcmeConfig)
+ if err != nil {
+ log.Error().Err(err).Msg("Can't create ACME client, continuing with mock certs only")
+ } else {
+ // accept terms & log in to EAB
+ if cfg.EAB_KID == "" || cfg.EAB_HMAC == "" {
+ reg, err := tempClient.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: cfg.AcceptTerms})
+ if err != nil {
+ log.Error().Err(err).Msg("Can't register ACME account, continuing with mock certs only")
+ } else {
+ myAcmeAccount.Registration = reg
+ }
+ } else {
+ reg, err := tempClient.Registration.RegisterWithExternalAccountBinding(registration.RegisterEABOptions{
+ TermsOfServiceAgreed: cfg.AcceptTerms,
+ Kid: cfg.EAB_KID,
+ HmacEncoded: cfg.EAB_HMAC,
+ })
+ if err != nil {
+ log.Error().Err(err).Msg("Can't register ACME account, continuing with mock certs only")
+ } else {
+ myAcmeAccount.Registration = reg
+ }
+ }
+
+ if myAcmeAccount.Registration != nil {
+ acmeAccountJSON, err := json.Marshal(myAcmeAccount)
+ if err != nil {
+ log.Error().Err(err).Msg("json.Marshalfailed, waiting for manual restart to avoid rate limits")
+ select {}
+ }
+ log.Info().Msgf("new acme account created. write to config file '%s'", cfg.AccountConfigFile)
+ err = os.WriteFile(cfg.AccountConfigFile, acmeAccountJSON, 0o600)
+ if err != nil {
+ log.Error().Err(err).Msg("os.WriteFile failed, waiting for manual restart to avoid rate limits")
+ select {}
+ }
+ }
+ }
+
+ return myAcmeConfig, nil
+}
diff --git a/server/certificates/cached_challengers.go b/server/certificates/cached_challengers.go
new file mode 100644
index 0000000..39439fb
--- /dev/null
+++ b/server/certificates/cached_challengers.go
@@ -0,0 +1,83 @@
+package certificates
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/go-acme/lego/v4/challenge"
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+)
+
+type AcmeTLSChallengeProvider struct {
+ challengeCache cache.ICache
+}
+
+// make sure AcmeTLSChallengeProvider match Provider interface
+var _ challenge.Provider = AcmeTLSChallengeProvider{}
+
+func (a AcmeTLSChallengeProvider) Present(domain, _, keyAuth string) error {
+ return a.challengeCache.Set(domain, keyAuth, 1*time.Hour)
+}
+
+func (a AcmeTLSChallengeProvider) CleanUp(domain, _, _ string) error {
+ a.challengeCache.Remove(domain)
+ return nil
+}
+
+type AcmeHTTPChallengeProvider struct {
+ challengeCache cache.ICache
+}
+
+// make sure AcmeHTTPChallengeProvider match Provider interface
+var _ challenge.Provider = AcmeHTTPChallengeProvider{}
+
+func (a AcmeHTTPChallengeProvider) Present(domain, token, keyAuth string) error {
+ return a.challengeCache.Set(domain+"/"+token, keyAuth, 1*time.Hour)
+}
+
+func (a AcmeHTTPChallengeProvider) CleanUp(domain, token, _ string) error {
+ a.challengeCache.Remove(domain + "/" + token)
+ return nil
+}
+
+func SetupHTTPACMEChallengeServer(challengeCache cache.ICache, sslPort uint) http.HandlerFunc {
+ // handle custom-ssl-ports to be added on https redirects
+ portPart := ""
+ if sslPort != 443 {
+ portPart = fmt.Sprintf(":%d", sslPort)
+ }
+
+ return func(w http.ResponseWriter, req *http.Request) {
+ ctx := context.New(w, req)
+ domain := ctx.TrimHostPort()
+
+ // it's an acme request
+ if strings.HasPrefix(ctx.Path(), challengePath) {
+ challenge, ok := challengeCache.Get(domain + "/" + strings.TrimPrefix(ctx.Path(), challengePath))
+ if !ok || challenge == nil {
+ log.Info().Msgf("HTTP-ACME challenge for '%s' failed: token not found", domain)
+ ctx.String("no challenge for this token", http.StatusNotFound)
+ }
+ log.Info().Msgf("HTTP-ACME challenge for '%s' succeeded", domain)
+ ctx.String(challenge.(string))
+ return
+ }
+
+ // it's a normal http request that needs to be redirected
+ u, err := url.Parse(fmt.Sprintf("https://%s%s%s", domain, portPart, ctx.Path()))
+ if err != nil {
+ log.Error().Err(err).Msg("could not craft http to https redirect")
+ ctx.String("", http.StatusInternalServerError)
+ }
+
+ newURL := u.String()
+ log.Debug().Msgf("redirect http to https: %s", newURL)
+ ctx.Redirect(newURL, http.StatusMovedPermanently)
+ }
+}
diff --git a/server/certificates/certificates.go b/server/certificates/certificates.go
new file mode 100644
index 0000000..aeb619f
--- /dev/null
+++ b/server/certificates/certificates.go
@@ -0,0 +1,416 @@
+package certificates
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-acme/lego/v4/certcrypto"
+ "github.com/go-acme/lego/v4/certificate"
+ "github.com/go-acme/lego/v4/challenge/tlsalpn01"
+ "github.com/go-acme/lego/v4/lego"
+ "github.com/hashicorp/golang-lru/v2/expirable"
+ "github.com/reugn/equalizer"
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/server/cache"
+ psContext "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/database"
+ dnsutils "codeberg.org/codeberg/pages/server/dns"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "codeberg.org/codeberg/pages/server/upstream"
+)
+
+var ErrUserRateLimitExceeded = errors.New("rate limit exceeded: 10 certificates per user per 24 hours")
+
+// TLSConfig returns the configuration for generating, serving and cleaning up Let's Encrypt certificates.
+func TLSConfig(mainDomainSuffix string,
+ giteaClient *gitea.Client,
+ acmeClient *AcmeClient,
+ firstDefaultBranch string,
+ challengeCache, canonicalDomainCache cache.ICache,
+ certDB database.CertDB,
+ noDNS01 bool,
+ rawDomain string,
+) *tls.Config {
+ // every cert is at most 24h in the cache and 7 days before expiry the cert is renewed
+ keyCache := expirable.NewLRU[string, *tls.Certificate](32, nil, 24*time.Hour)
+
+ return &tls.Config{
+ // check DNS name & get certificate from Let's Encrypt
+ GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ ctx := psContext.New(nil, nil)
+ log := log.With().Str("ReqId", ctx.ReqId).Logger()
+
+ domain := strings.ToLower(strings.TrimSpace(info.ServerName))
+ log.Debug().Str("domain", domain).Msg("start: get tls certificate")
+ if len(domain) < 1 {
+ return nil, errors.New("missing domain info via SNI (RFC 4366, Section 3.1)")
+ }
+
+ // https request init is actually a acme challenge
+ if info.SupportedProtos != nil {
+ for _, proto := range info.SupportedProtos {
+ if proto != tlsalpn01.ACMETLS1Protocol {
+ continue
+ }
+ log.Info().Msgf("Detect ACME-TLS1 challenge for '%s'", domain)
+
+ challenge, ok := challengeCache.Get(domain)
+ if !ok {
+ return nil, errors.New("no challenge for this domain")
+ }
+ cert, err := tlsalpn01.ChallengeCert(domain, challenge.(string))
+ if err != nil {
+ return nil, err
+ }
+ return cert, nil
+ }
+ }
+
+ targetOwner := ""
+ mayObtainCert := true
+
+ if strings.HasSuffix(domain, mainDomainSuffix) || strings.EqualFold(domain, mainDomainSuffix[1:]) {
+ if noDNS01 {
+ // Limit the domains allowed to request a certificate to pages-server domains
+ // and domains for an existing user of org
+ if !strings.EqualFold(domain, mainDomainSuffix[1:]) && !strings.EqualFold(domain, rawDomain) {
+ targetOwner := strings.TrimSuffix(domain, mainDomainSuffix)
+ owner_exist, err := giteaClient.GiteaCheckIfOwnerExists(targetOwner)
+ mayObtainCert = owner_exist
+ if err != nil {
+ log.Error().Err(err).Msgf("Failed to check '%s' existence on the forge: %s", targetOwner, err)
+ mayObtainCert = false
+ }
+ }
+ } else {
+ // deliver default certificate for the main domain (*.codeberg.page)
+ domain = mainDomainSuffix
+ }
+ } else {
+ var targetRepo, targetBranch string
+ targetOwner, targetRepo, targetBranch = dnsutils.GetTargetFromDNS(domain, mainDomainSuffix, firstDefaultBranch)
+ if targetOwner == "" {
+ // DNS not set up, return main certificate to redirect to the docs
+ domain = mainDomainSuffix
+ } else {
+ targetOpt := &upstream.Options{
+ TargetOwner: targetOwner,
+ TargetRepo: targetRepo,
+ TargetBranch: targetBranch,
+ }
+ _, valid := targetOpt.CheckCanonicalDomain(ctx, giteaClient, domain, mainDomainSuffix, canonicalDomainCache)
+ if !valid {
+ // We shouldn't obtain a certificate when we cannot check if the
+ // repository has specified this domain in the `.domains` file.
+ mayObtainCert = false
+ }
+ }
+ }
+
+ if tlsCertificate, ok := keyCache.Get(domain); ok {
+ // we can use an existing certificate object
+ return tlsCertificate, nil
+ }
+
+ var tlsCertificate *tls.Certificate
+ var err error
+ if tlsCertificate, err = acmeClient.retrieveCertFromDB(log, domain, mainDomainSuffix, false, certDB); err != nil {
+ if !errors.Is(err, database.ErrNotFound) {
+ return nil, err
+ }
+ // we could not find a cert in db, request a new certificate
+
+ // first check if we are allowed to obtain a cert for this domain
+ if strings.EqualFold(domain, mainDomainSuffix) {
+ return nil, errors.New("won't request certificate for main domain, something really bad has happened")
+ }
+ if !mayObtainCert {
+ return nil, fmt.Errorf("won't request certificate for %q", domain)
+ }
+
+ tlsCertificate, err = acmeClient.obtainCert(log, acmeClient.legoClient, []string{domain}, nil, targetOwner, false, mainDomainSuffix, certDB)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ keyCache.Add(domain, tlsCertificate)
+
+ return tlsCertificate, nil
+ },
+ NextProtos: []string{
+ "h2",
+ "http/1.1",
+ tlsalpn01.ACMETLS1Protocol,
+ },
+
+ // generated 2021-07-13, Mozilla Guideline v5.6, Go 1.14.4, intermediate configuration
+ // https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ },
+ }
+}
+
+func (c *AcmeClient) checkUserLimit(user string) error {
+ userLimit, ok := c.acmeClientCertificateLimitPerUser[user]
+ if !ok {
+ // Each user can only add 10 new domains per day.
+ userLimit = equalizer.NewTokenBucket(10, time.Hour*24)
+ c.acmeClientCertificateLimitPerUser[user] = userLimit
+ }
+ if !userLimit.Ask() {
+ return fmt.Errorf("user '%s' error: %w", user, ErrUserRateLimitExceeded)
+ }
+ return nil
+}
+
+func (c *AcmeClient) retrieveCertFromDB(log zerolog.Logger, sni, mainDomainSuffix string, useDnsProvider bool, certDB database.CertDB) (*tls.Certificate, error) {
+ // parse certificate from database
+ res, err := certDB.Get(sni)
+ if err != nil {
+ return nil, err
+ } else if res == nil {
+ return nil, database.ErrNotFound
+ }
+
+ tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: document & put into own function
+ if !strings.EqualFold(sni, mainDomainSuffix) {
+ tlsCertificate.Leaf, err = leaf(&tlsCertificate)
+ if err != nil {
+ return nil, err
+ }
+ // renew certificates 7 days before they expire
+ if tlsCertificate.Leaf.NotAfter.Before(time.Now().Add(7 * 24 * time.Hour)) {
+ // TODO: use ValidTill of custom cert struct
+ if len(res.CSR) > 0 {
+ // CSR stores the time when the renewal shall be tried again
+ nextTryUnix, err := strconv.ParseInt(string(res.CSR), 10, 64)
+ if err == nil && time.Now().Before(time.Unix(nextTryUnix, 0)) {
+ return &tlsCertificate, nil
+ }
+ }
+ // TODO: make a queue ?
+ go (func() {
+ res.CSR = nil // acme client doesn't like CSR to be set
+ if _, err := c.obtainCert(log, c.legoClient, []string{sni}, res, "", useDnsProvider, mainDomainSuffix, certDB); err != nil {
+ log.Error().Msgf("Couldn't renew certificate for %s: %v", sni, err)
+ }
+ })()
+ }
+ }
+
+ return &tlsCertificate, nil
+}
+
+func (c *AcmeClient) obtainCert(log zerolog.Logger, acmeClient *lego.Client, domains []string, renew *certificate.Resource, user string, useDnsProvider bool, mainDomainSuffix string, keyDatabase database.CertDB) (*tls.Certificate, error) {
+ name := strings.TrimPrefix(domains[0], "*")
+
+ // lock to avoid simultaneous requests
+ _, working := c.obtainLocks.LoadOrStore(name, struct{}{})
+ if working {
+ for working {
+ time.Sleep(100 * time.Millisecond)
+ _, working = c.obtainLocks.Load(name)
+ }
+ cert, err := c.retrieveCertFromDB(log, name, mainDomainSuffix, useDnsProvider, keyDatabase)
+ if err != nil {
+ return nil, fmt.Errorf("certificate failed in synchronous request: %w", err)
+ }
+ return cert, nil
+ }
+ defer c.obtainLocks.Delete(name)
+
+ if acmeClient == nil {
+ if useDnsProvider {
+ return mockCert(domains[0], "DNS ACME client is not defined", mainDomainSuffix, keyDatabase)
+ } else {
+ return mockCert(domains[0], "ACME client uninitialized. This is a server error, please report!", mainDomainSuffix, keyDatabase)
+ }
+ }
+
+ // request actual cert
+ var res *certificate.Resource
+ var err error
+ if renew != nil && renew.CertURL != "" {
+ if c.acmeUseRateLimits {
+ c.acmeClientRequestLimit.Take()
+ }
+ log.Debug().Msgf("Renewing certificate for: %v", domains)
+ res, err = acmeClient.Certificate.Renew(*renew, true, false, "")
+ if err != nil {
+ log.Error().Err(err).Msgf("Couldn't renew certificate for %v, trying to request a new one", domains)
+ if c.acmeUseRateLimits {
+ c.acmeClientFailLimit.Take()
+ }
+ res = nil
+ }
+ }
+ if res == nil {
+ if user != "" {
+ if err := c.checkUserLimit(user); err != nil {
+ return nil, err
+ }
+ }
+
+ if c.acmeUseRateLimits {
+ c.acmeClientOrderLimit.Take()
+ c.acmeClientRequestLimit.Take()
+ }
+ log.Debug().Msgf("Re-requesting new certificate for %v", domains)
+ res, err = acmeClient.Certificate.Obtain(certificate.ObtainRequest{
+ Domains: domains,
+ Bundle: true,
+ MustStaple: false,
+ })
+ if c.acmeUseRateLimits && err != nil {
+ c.acmeClientFailLimit.Take()
+ }
+ }
+ if err != nil {
+ log.Error().Err(err).Msgf("Couldn't obtain again a certificate or %v", domains)
+ if renew != nil && renew.CertURL != "" {
+ tlsCertificate, err := tls.X509KeyPair(renew.Certificate, renew.PrivateKey)
+ if err != nil {
+ mockC, err2 := mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
+ if err2 != nil {
+ return nil, errors.Join(err, err2)
+ }
+ return mockC, err
+ }
+ leaf, err := leaf(&tlsCertificate)
+ if err == nil && leaf.NotAfter.After(time.Now()) {
+ tlsCertificate.Leaf = leaf
+ // avoid sending a mock cert instead of a still valid cert, instead abuse CSR field to store time to try again at
+ renew.CSR = []byte(strconv.FormatInt(time.Now().Add(6*time.Hour).Unix(), 10))
+ if err := keyDatabase.Put(name, renew); err != nil {
+ mockC, err2 := mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
+ if err2 != nil {
+ return nil, errors.Join(err, err2)
+ }
+ return mockC, err
+ }
+ return &tlsCertificate, nil
+ }
+ }
+ return mockCert(domains[0], err.Error(), mainDomainSuffix, keyDatabase)
+ }
+ log.Debug().Msgf("Obtained certificate for %v", domains)
+
+ if err := keyDatabase.Put(name, res); err != nil {
+ return nil, err
+ }
+ tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ return &tlsCertificate, nil
+}
+
+func SetupMainDomainCertificates(log zerolog.Logger, mainDomainSuffix string, acmeClient *AcmeClient, certDB database.CertDB) error {
+ // getting main cert before ACME account so that we can fail here without hitting rate limits
+ mainCertBytes, err := certDB.Get(mainDomainSuffix)
+ if err != nil && !errors.Is(err, database.ErrNotFound) {
+ return fmt.Errorf("cert database is not working: %w", err)
+ }
+
+ if mainCertBytes == nil {
+ _, err = acmeClient.obtainCert(log, acmeClient.dnsChallengerLegoClient, []string{"*" + mainDomainSuffix, mainDomainSuffix[1:]}, nil, "", true, mainDomainSuffix, certDB)
+ if err != nil {
+ log.Error().Err(err).Msg("Couldn't renew main domain certificate, continuing with mock certs only")
+ }
+ }
+
+ return nil
+}
+
+func MaintainCertDB(log zerolog.Logger, ctx context.Context, interval time.Duration, acmeClient *AcmeClient, mainDomainSuffix string, certDB database.CertDB) {
+ for {
+ // delete expired certs that will be invalid until next clean up
+ threshold := time.Now().Add(interval)
+ expiredCertCount := 0
+
+ certs, err := certDB.Items(0, 0)
+ if err != nil {
+ log.Error().Err(err).Msg("could not get certs from list")
+ } else {
+ for _, cert := range certs {
+ if !strings.EqualFold(cert.Domain, strings.TrimPrefix(mainDomainSuffix, ".")) {
+ if time.Unix(cert.ValidTill, 0).Before(threshold) {
+ err := certDB.Delete(cert.Domain)
+ if err != nil {
+ log.Error().Err(err).Msgf("Deleting expired certificate for %q failed", cert.Domain)
+ } else {
+ expiredCertCount++
+ }
+ }
+ }
+ }
+ log.Debug().Msgf("Removed %d expired certificates from the database", expiredCertCount)
+ }
+
+ // update main cert
+ res, err := certDB.Get(mainDomainSuffix)
+ if err != nil {
+ log.Error().Msgf("Couldn't get cert for domain %q", mainDomainSuffix)
+ } else if res == nil {
+ log.Error().Msgf("Couldn't renew certificate for main domain %q expected main domain cert to exist, but it's missing - seems like the database is corrupted", mainDomainSuffix)
+ } else {
+ tlsCertificates, err := certcrypto.ParsePEMBundle(res.Certificate)
+ if err != nil {
+ log.Error().Err(fmt.Errorf("could not parse cert for mainDomainSuffix: %w", err))
+ } else if tlsCertificates[0].NotAfter.Before(time.Now().Add(30 * 24 * time.Hour)) {
+ // renew main certificate 30 days before it expires
+ go (func() {
+ _, err = acmeClient.obtainCert(log, acmeClient.dnsChallengerLegoClient, []string{"*" + mainDomainSuffix, mainDomainSuffix[1:]}, res, "", true, mainDomainSuffix, certDB)
+ if err != nil {
+ log.Error().Err(err).Msg("Couldn't renew certificate for main domain")
+ }
+ })()
+ }
+ }
+
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(interval):
+ }
+ }
+}
+
+// leaf returns the parsed leaf certificate, either from c.Leaf or by parsing
+// the corresponding c.Certificate[0].
+// After successfully parsing the cert c.Leaf gets set to the parsed cert.
+func leaf(c *tls.Certificate) (*x509.Certificate, error) {
+ if c.Leaf != nil {
+ return c.Leaf, nil
+ }
+
+ leaf, err := x509.ParseCertificate(c.Certificate[0])
+ if err != nil {
+ return nil, fmt.Errorf("tlsCert - failed to parse leaf: %w", err)
+ }
+
+ c.Leaf = leaf
+
+ return leaf, err
+}
diff --git a/server/certificates/mock.go b/server/certificates/mock.go
new file mode 100644
index 0000000..a28d0f4
--- /dev/null
+++ b/server/certificates/mock.go
@@ -0,0 +1,87 @@
+package certificates
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "math/big"
+ "time"
+
+ "github.com/go-acme/lego/v4/certcrypto"
+ "github.com/go-acme/lego/v4/certificate"
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/server/database"
+)
+
+func mockCert(domain, msg, mainDomainSuffix string, keyDatabase database.CertDB) (*tls.Certificate, error) {
+ key, err := certcrypto.GeneratePrivateKey(certcrypto.RSA2048)
+ if err != nil {
+ return nil, err
+ }
+
+ template := x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ Subject: pkix.Name{
+ CommonName: domain,
+ Organization: []string{"Codeberg Pages Error Certificate (couldn't obtain ACME certificate)"},
+ OrganizationalUnit: []string{
+ "Will not try again for 6 hours to avoid hitting rate limits for your domain.",
+ "Check https://docs.codeberg.org/codeberg-pages/troubleshooting/ for troubleshooting tips, and feel " +
+ "free to create an issue at https://codeberg.org/Codeberg/pages-server if you can't solve it.\n",
+ "Error message: " + msg,
+ },
+ },
+
+ // certificates younger than 7 days are renewed, so this enforces the cert to not be renewed for a 6 hours
+ NotAfter: time.Now().Add(time.Hour*24*7 + time.Hour*6),
+ NotBefore: time.Now(),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ }
+ certBytes, err := x509.CreateCertificate(
+ rand.Reader,
+ &template,
+ &template,
+ &key.(*rsa.PrivateKey).PublicKey,
+ key,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := &bytes.Buffer{}
+ err = pem.Encode(out, &pem.Block{
+ Bytes: certBytes,
+ Type: "CERTIFICATE",
+ })
+ if err != nil {
+ return nil, err
+ }
+ outBytes := out.Bytes()
+ res := &certificate.Resource{
+ PrivateKey: certcrypto.PEMEncode(key),
+ Certificate: outBytes,
+ IssuerCertificate: outBytes,
+ Domain: domain,
+ }
+ databaseName := domain
+ if domain == "*"+mainDomainSuffix || domain == mainDomainSuffix[1:] {
+ databaseName = mainDomainSuffix
+ }
+ if err := keyDatabase.Put(databaseName, res); err != nil {
+ log.Error().Err(err)
+ }
+
+ tlsCertificate, err := tls.X509KeyPair(res.Certificate, res.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ return &tlsCertificate, nil
+}
diff --git a/server/certificates/mock_test.go b/server/certificates/mock_test.go
new file mode 100644
index 0000000..644e8a9
--- /dev/null
+++ b/server/certificates/mock_test.go
@@ -0,0 +1,21 @@
+package certificates
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+
+ "codeberg.org/codeberg/pages/server/database"
+)
+
+func TestMockCert(t *testing.T) {
+ db := database.NewMockCertDB(t)
+ db.Mock.On("Put", mock.Anything, mock.Anything).Return(nil)
+
+ cert, err := mockCert("example.com", "some error msg", "codeberg.page", db)
+ assert.NoError(t, err)
+ if assert.NotEmpty(t, cert) {
+ assert.NotEmpty(t, cert.Certificate)
+ }
+}
diff --git a/server/context/context.go b/server/context/context.go
new file mode 100644
index 0000000..e695ab7
--- /dev/null
+++ b/server/context/context.go
@@ -0,0 +1,72 @@
+package context
+
+import (
+ stdContext "context"
+ "net/http"
+
+ "codeberg.org/codeberg/pages/server/utils"
+ "github.com/hashicorp/go-uuid"
+ "github.com/rs/zerolog/log"
+)
+
+type Context struct {
+ RespWriter http.ResponseWriter
+ Req *http.Request
+ StatusCode int
+ ReqId string
+}
+
+func New(w http.ResponseWriter, r *http.Request) *Context {
+ req_uuid, err := uuid.GenerateUUID()
+ if err != nil {
+ log.Error().Err(err).Msg("Failed to generate request id, assigning error value")
+ req_uuid = "ERROR"
+ }
+
+ return &Context{
+ RespWriter: w,
+ Req: r,
+ StatusCode: http.StatusOK,
+ ReqId: req_uuid,
+ }
+}
+
+func (c *Context) Context() stdContext.Context {
+ if c.Req != nil {
+ return c.Req.Context()
+ }
+ return stdContext.Background()
+}
+
+func (c *Context) Response() *http.Response {
+ if c.Req != nil && c.Req.Response != nil {
+ return c.Req.Response
+ }
+ return nil
+}
+
+func (c *Context) String(raw string, status ...int) {
+ code := http.StatusOK
+ if len(status) != 0 {
+ code = status[0]
+ }
+ c.RespWriter.WriteHeader(code)
+ _, _ = c.RespWriter.Write([]byte(raw))
+}
+
+func (c *Context) Redirect(uri string, statusCode int) {
+ http.Redirect(c.RespWriter, c.Req, uri, statusCode)
+}
+
+// Path returns the cleaned requested path.
+func (c *Context) Path() string {
+ return utils.CleanPath(c.Req.URL.Path)
+}
+
+func (c *Context) Host() string {
+ return c.Req.URL.Host
+}
+
+func (c *Context) TrimHostPort() string {
+ return utils.TrimHostPort(c.Req.Host)
+}
diff --git a/server/database/interface.go b/server/database/interface.go
new file mode 100644
index 0000000..7fdbae7
--- /dev/null
+++ b/server/database/interface.go
@@ -0,0 +1,78 @@
+package database
+
+import (
+ "fmt"
+
+ "github.com/go-acme/lego/v4/certcrypto"
+ "github.com/go-acme/lego/v4/certificate"
+ "github.com/rs/zerolog/log"
+)
+
+//go:generate go install github.com/vektra/mockery/v2@latest
+//go:generate mockery --name CertDB --output . --filename mock.go --inpackage --case underscore
+
+type CertDB interface {
+ Close() error
+ Put(name string, cert *certificate.Resource) error
+ Get(name string) (*certificate.Resource, error)
+ Delete(key string) error
+ Items(page, pageSize int) ([]*Cert, error)
+}
+
+type Cert struct {
+ Domain string `xorm:"pk NOT NULL UNIQUE 'domain'"`
+ Created int64 `xorm:"created NOT NULL DEFAULT 0 'created'"`
+ Updated int64 `xorm:"updated NOT NULL DEFAULT 0 'updated'"`
+ ValidTill int64 `xorm:" NOT NULL DEFAULT 0 'valid_till'"`
+ // certificate.Resource
+ CertURL string `xorm:"'cert_url'"`
+ CertStableURL string `xorm:"'cert_stable_url'"`
+ PrivateKey []byte `xorm:"'private_key'"`
+ Certificate []byte `xorm:"'certificate'"`
+ IssuerCertificate []byte `xorm:"'issuer_certificate'"`
+}
+
+func (c Cert) Raw() *certificate.Resource {
+ return &certificate.Resource{
+ Domain: c.Domain,
+ CertURL: c.CertURL,
+ CertStableURL: c.CertStableURL,
+ PrivateKey: c.PrivateKey,
+ Certificate: c.Certificate,
+ IssuerCertificate: c.IssuerCertificate,
+ }
+}
+
+func toCert(name string, c *certificate.Resource) (*Cert, error) {
+ tlsCertificates, err := certcrypto.ParsePEMBundle(c.Certificate)
+ if err != nil {
+ return nil, err
+ }
+ if len(tlsCertificates) == 0 || tlsCertificates[0] == nil {
+ err := fmt.Errorf("parsed cert resource has no cert")
+ log.Error().Err(err).Str("domain", c.Domain).Msgf("cert: %v", c)
+ return nil, err
+ }
+ validTill := tlsCertificates[0].NotAfter.Unix()
+
+ // handle wildcard certs
+ if name[:1] == "." {
+ name = "*" + name
+ }
+ if name != c.Domain {
+ err := fmt.Errorf("domain key '%s' and cert domain '%s' not equal", name, c.Domain)
+ log.Error().Err(err).Msg("toCert conversion did discover mismatch")
+ // TODO: fail hard: return nil, err
+ }
+
+ return &Cert{
+ Domain: c.Domain,
+ ValidTill: validTill,
+
+ CertURL: c.CertURL,
+ CertStableURL: c.CertStableURL,
+ PrivateKey: c.PrivateKey,
+ Certificate: c.Certificate,
+ IssuerCertificate: c.IssuerCertificate,
+ }, nil
+}
diff --git a/server/database/mock.go b/server/database/mock.go
new file mode 100644
index 0000000..e7e2c38
--- /dev/null
+++ b/server/database/mock.go
@@ -0,0 +1,122 @@
+// Code generated by mockery v2.20.0. DO NOT EDIT.
+
+package database
+
+import (
+ certificate "github.com/go-acme/lego/v4/certificate"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// MockCertDB is an autogenerated mock type for the CertDB type
+type MockCertDB struct {
+ mock.Mock
+}
+
+// Close provides a mock function with given fields:
+func (_m *MockCertDB) Close() error {
+ ret := _m.Called()
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Delete provides a mock function with given fields: key
+func (_m *MockCertDB) Delete(key string) error {
+ ret := _m.Called(key)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string) error); ok {
+ r0 = rf(key)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Get provides a mock function with given fields: name
+func (_m *MockCertDB) Get(name string) (*certificate.Resource, error) {
+ ret := _m.Called(name)
+
+ var r0 *certificate.Resource
+ var r1 error
+ if rf, ok := ret.Get(0).(func(string) (*certificate.Resource, error)); ok {
+ return rf(name)
+ }
+ if rf, ok := ret.Get(0).(func(string) *certificate.Resource); ok {
+ r0 = rf(name)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*certificate.Resource)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(name)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Items provides a mock function with given fields: page, pageSize
+func (_m *MockCertDB) Items(page int, pageSize int) ([]*Cert, error) {
+ ret := _m.Called(page, pageSize)
+
+ var r0 []*Cert
+ var r1 error
+ if rf, ok := ret.Get(0).(func(int, int) ([]*Cert, error)); ok {
+ return rf(page, pageSize)
+ }
+ if rf, ok := ret.Get(0).(func(int, int) []*Cert); ok {
+ r0 = rf(page, pageSize)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*Cert)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(int, int) error); ok {
+ r1 = rf(page, pageSize)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Put provides a mock function with given fields: name, cert
+func (_m *MockCertDB) Put(name string, cert *certificate.Resource) error {
+ ret := _m.Called(name, cert)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string, *certificate.Resource) error); ok {
+ r0 = rf(name, cert)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+type mockConstructorTestingTNewMockCertDB interface {
+ mock.TestingT
+ Cleanup(func())
+}
+
+// NewMockCertDB creates a new instance of MockCertDB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+func NewMockCertDB(t mockConstructorTestingTNewMockCertDB) *MockCertDB {
+ mock := &MockCertDB{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/server/database/xorm.go b/server/database/xorm.go
new file mode 100644
index 0000000..63fa39e
--- /dev/null
+++ b/server/database/xorm.go
@@ -0,0 +1,138 @@
+package database
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/rs/zerolog/log"
+
+ "github.com/go-acme/lego/v4/certificate"
+ "xorm.io/xorm"
+
+ // register sql driver
+ _ "github.com/go-sql-driver/mysql"
+ _ "github.com/lib/pq"
+ _ "github.com/mattn/go-sqlite3"
+)
+
+var _ CertDB = xDB{}
+
+var ErrNotFound = errors.New("entry not found")
+
+type xDB struct {
+ engine *xorm.Engine
+}
+
+func NewXormDB(dbType, dbConn string) (CertDB, error) {
+ if !supportedDriver(dbType) {
+ return nil, fmt.Errorf("not supported db type '%s'", dbType)
+ }
+ if dbConn == "" {
+ return nil, fmt.Errorf("no db connection provided")
+ }
+
+ e, err := xorm.NewEngine(dbType, dbConn)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := e.Sync2(new(Cert)); err != nil {
+ return nil, fmt.Errorf("could not sync db model :%w", err)
+ }
+
+ return &xDB{
+ engine: e,
+ }, nil
+}
+
+func (x xDB) Close() error {
+ return x.engine.Close()
+}
+
+func (x xDB) Put(domain string, cert *certificate.Resource) error {
+ log.Trace().Str("domain", cert.Domain).Msg("inserting cert to db")
+
+ c, err := toCert(domain, cert)
+ if err != nil {
+ return err
+ }
+
+ sess := x.engine.NewSession()
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+ defer sess.Close()
+
+ if exist, _ := sess.ID(c.Domain).Exist(new(Cert)); exist {
+ if _, err := sess.ID(c.Domain).Update(c); err != nil {
+ return err
+ }
+ } else {
+ if _, err = sess.Insert(c); err != nil {
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
+
+func (x xDB) Get(domain string) (*certificate.Resource, error) {
+ // handle wildcard certs
+ if domain[:1] == "." {
+ domain = "*" + domain
+ }
+
+ cert := new(Cert)
+ log.Trace().Str("domain", domain).Msg("get cert from db")
+ if found, err := x.engine.ID(domain).Get(cert); err != nil {
+ return nil, err
+ } else if !found {
+ return nil, fmt.Errorf("%w: name='%s'", ErrNotFound, domain)
+ }
+ return cert.Raw(), nil
+}
+
+func (x xDB) Delete(domain string) error {
+ // handle wildcard certs
+ if domain[:1] == "." {
+ domain = "*" + domain
+ }
+
+ log.Trace().Str("domain", domain).Msg("delete cert from db")
+ _, err := x.engine.ID(domain).Delete(new(Cert))
+ return err
+}
+
+// Items return al certs from db, if pageSize is 0 it does not use limit
+func (x xDB) Items(page, pageSize int) ([]*Cert, error) {
+ // paginated return
+ if pageSize > 0 {
+ certs := make([]*Cert, 0, pageSize)
+ if page >= 0 {
+ page = 1
+ }
+ err := x.engine.Limit(pageSize, (page-1)*pageSize).Find(&certs)
+ return certs, err
+ }
+
+ // return all
+ certs := make([]*Cert, 0, 64)
+ err := x.engine.Find(&certs)
+ return certs, err
+}
+
+// Supported database drivers
+const (
+ DriverSqlite = "sqlite3"
+ DriverMysql = "mysql"
+ DriverPostgres = "postgres"
+)
+
+func supportedDriver(driver string) bool {
+ switch driver {
+ case DriverMysql, DriverPostgres, DriverSqlite:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/server/database/xorm_test.go b/server/database/xorm_test.go
new file mode 100644
index 0000000..50d8a7f
--- /dev/null
+++ b/server/database/xorm_test.go
@@ -0,0 +1,92 @@
+package database
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/go-acme/lego/v4/certificate"
+ "github.com/stretchr/testify/assert"
+ "xorm.io/xorm"
+)
+
+func newTestDB(t *testing.T) *xDB {
+ e, err := xorm.NewEngine("sqlite3", ":memory:")
+ assert.NoError(t, err)
+ assert.NoError(t, e.Sync2(new(Cert)))
+ return &xDB{engine: e}
+}
+
+func TestSanitizeWildcardCerts(t *testing.T) {
+ certDB := newTestDB(t)
+
+ _, err := certDB.Get(".not.found")
+ assert.True(t, errors.Is(err, ErrNotFound))
+
+ // TODO: cert key and domain mismatch are don not fail hard jet
+ // https://codeberg.org/Codeberg/pages-server/src/commit/d8595cee882e53d7f44f1ddc4ef8a1f7b8f31d8d/server/database/interface.go#L64
+ //
+ // assert.Error(t, certDB.Put(".wildcard.de", &certificate.Resource{
+ // Domain: "*.localhost.mock.directory",
+ // Certificate: localhost_mock_directory_certificate,
+ // }))
+
+ // insert new wildcard cert
+ assert.NoError(t, certDB.Put(".wildcard.de", &certificate.Resource{
+ Domain: "*.wildcard.de",
+ Certificate: localhost_mock_directory_certificate,
+ }))
+
+ // update existing cert
+ assert.NoError(t, certDB.Put(".wildcard.de", &certificate.Resource{
+ Domain: "*.wildcard.de",
+ Certificate: localhost_mock_directory_certificate,
+ }))
+
+ c1, err := certDB.Get(".wildcard.de")
+ assert.NoError(t, err)
+ c2, err := certDB.Get("*.wildcard.de")
+ assert.NoError(t, err)
+ assert.EqualValues(t, c1, c2)
+}
+
+var localhost_mock_directory_certificate = []byte(`-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgIIJyBaXHmLk6gwDQYJKoZIhvcNAQELBQAwKDEmMCQGA1UE
+AxMdUGViYmxlIEludGVybWVkaWF0ZSBDQSA0OWE0ZmIwHhcNMjMwMjEwMDEwOTA2
+WhcNMjgwMjEwMDEwOTA2WjAjMSEwHwYDVQQDExhsb2NhbGhvc3QubW9jay5kaXJl
+Y3RvcnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIU/CjzS7t62Gj
+neEMqvP7sn99ULT7AEUzEfWL05fWG2z714qcUg1hXkZLgdVDgmsCpplyddip7+2t
+ZH/9rLPLMqJphzvOL4CF6jDLbeifETtKyjnt9vUZFnnNWcP3tu8lo8iYSl08qsUI
+Pp/hiEriAQzCDjTbR5m9xUPNPYqxzcS4ALzmmCX9Qfc4CuuhMkdv2G4TT7rylWrA
+SCSRPnGjeA7pCByfNrO/uXbxmzl3sMO3k5sqgMkx1QIHEN412V8+vtx88mt2sM6k
+xjzGZWWKXlRq+oufIKX9KPplhsCjMH6E3VNAzgOPYDqXagtUcGmLWghURltO8Mt2
+zwM6OgjjAgMBAAGjgaUwgaIwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsG
+AQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBSMQvlJ1755
+sarf8i1KNqj7s5o/aDAfBgNVHSMEGDAWgBTcZcxJMhWdP7MecHCCpNkFURC/YzAj
+BgNVHREEHDAaghhsb2NhbGhvc3QubW9jay5kaXJlY3RvcnkwDQYJKoZIhvcNAQEL
+BQADggEBACcd7TT28OWwzQN2PcH0aG38JX5Wp2iOS/unDCfWjNAztXHW7nBDMxza
+VtyebkJfccexpuVuOsjOX+bww0vtEYIvKX3/GbkhogksBrNkE0sJZtMnZWMR33wa
+YxAy/kJBTmLi02r8fX9ZhwjldStHKBav4USuP7DXZjrgX7LFQhR4LIDrPaYqQRZ8
+ltC3mM9LDQ9rQyIFP5cSBMO3RUAm4I8JyLoOdb/9G2uxjHr7r6eG1g8DmLYSKBsQ
+mWGQDOYgR3cGltDe2yMxM++yHY+b1uhxGOWMrDA1+1k7yI19LL8Ifi2FMovDfu/X
+JxYk1NNNtdctwaYJFenmGQvDaIq1KgE=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDUDCCAjigAwIBAgIIKBJ7IIA6W1swDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
+AxMVUGViYmxlIFJvb3QgQ0EgNTdmZjE2MCAXDTIzMDIwOTA1MzMxMloYDzIwNTMw
+MjA5MDUzMzEyWjAoMSYwJAYDVQQDEx1QZWJibGUgSW50ZXJtZWRpYXRlIENBIDQ5
+YTRmYjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOvlqRx8SXQFWo2
+gFCiXxls53eENcyr8+meFyjgnS853eEvplaPxoa2MREKd+ZYxM8EMMfj2XGvR3UI
+aqR5QyLQ9ihuRqvQo4fG91usBHgH+vDbGPdMX8gDmm9HgnmtOVhSKJU+M2jfE1SW
+UuWB9xOa3LMreTXbTNfZEMoXf+GcWZMbx5WPgEga3DvfmV+RsfNvB55eD7YAyZgF
+ZnQ3Dskmnxxlkz0EGgd7rqhFHHNB9jARlL22gITADwoWZidlr3ciM9DISymRKQ0c
+mRN15fQjNWdtuREgJlpXecbYQMGhdTOmFrqdHkveD1o63rGSC4z+s/APV6xIbcRp
+aNpO7L8CAwEAAaOBgzCBgDAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYB
+BQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNxlzEky
+FZ0/sx5wcIKk2QVREL9jMB8GA1UdIwQYMBaAFOqfkm9rebIz4z0SDIKW5edLg5JM
+MA0GCSqGSIb3DQEBCwUAA4IBAQBRG9AHEnyj2fKzVDDbQaKHjAF5jh0gwyHoIeRK
+FkP9mQNSWxhvPWI0tK/E49LopzmVuzSbDd5kZsaii73rAs6f6Rf9W5veo3AFSEad
+stM+Zv0f2vWB38nuvkoCRLXMX+QUeuL65rKxdEpyArBju4L3/PqAZRgMLcrH+ak8
+nvw5RdAq+Km/ZWyJgGikK6cfMmh91YALCDFnoWUWrCjkBaBFKrG59ONV9f0IQX07
+aNfFXFCF5l466xw9dHjw5iaFib10cpY3iq4kyPYIMs6uaewkCtxWKKjiozM4g4w3
+HqwyUyZ52WUJOJ/6G9DJLDtN3fgGR+IAp8BhYd5CqOscnt3h
+-----END CERTIFICATE-----`)
diff --git a/server/dns/dns.go b/server/dns/dns.go
new file mode 100644
index 0000000..e29e42c
--- /dev/null
+++ b/server/dns/dns.go
@@ -0,0 +1,66 @@
+package dns
+
+import (
+ "net"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/golang-lru/v2/expirable"
+)
+
+const (
+ lookupCacheValidity = 30 * time.Second
+ defaultPagesRepo = "pages"
+)
+
+// TODO(#316): refactor to not use global variables
+var lookupCache *expirable.LRU[string, string] = expirable.NewLRU[string, string](4096, nil, lookupCacheValidity)
+
+// GetTargetFromDNS searches for CNAME or TXT entries on the request domain ending with MainDomainSuffix.
+// If everything is fine, it returns the target data.
+func GetTargetFromDNS(domain, mainDomainSuffix, firstDefaultBranch string) (targetOwner, targetRepo, targetBranch string) {
+ // Get CNAME or TXT
+ var cname string
+ var err error
+
+ if entry, ok := lookupCache.Get(domain); ok {
+ cname = entry
+ } else {
+ cname, err = net.LookupCNAME(domain)
+ cname = strings.TrimSuffix(cname, ".")
+ if err != nil || !strings.HasSuffix(cname, mainDomainSuffix) {
+ cname = ""
+ // TODO: check if the A record matches!
+ names, err := net.LookupTXT(domain)
+ if err == nil {
+ for _, name := range names {
+ name = strings.TrimSuffix(strings.TrimSpace(name), ".")
+ if strings.HasSuffix(name, mainDomainSuffix) {
+ cname = name
+ break
+ }
+ }
+ }
+ }
+ _ = lookupCache.Add(domain, cname)
+ }
+ if cname == "" {
+ return
+ }
+ cnameParts := strings.Split(strings.TrimSuffix(cname, mainDomainSuffix), ".")
+ targetOwner = cnameParts[len(cnameParts)-1]
+ if len(cnameParts) > 1 {
+ targetRepo = cnameParts[len(cnameParts)-2]
+ }
+ if len(cnameParts) > 2 {
+ targetBranch = cnameParts[len(cnameParts)-3]
+ }
+ if targetRepo == "" {
+ targetRepo = defaultPagesRepo
+ }
+ if targetBranch == "" && targetRepo != defaultPagesRepo {
+ targetBranch = firstDefaultBranch
+ }
+ // if targetBranch is still empty, the caller must find the default branch
+ return
+}
diff --git a/server/gitea/cache.go b/server/gitea/cache.go
new file mode 100644
index 0000000..03f40a9
--- /dev/null
+++ b/server/gitea/cache.go
@@ -0,0 +1,154 @@
+package gitea
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+)
+
+const (
+ // defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
+ defaultBranchCacheTimeout = 15 * time.Minute
+
+ // branchExistenceCacheTimeout specifies the timeout for the branch timestamp & existence cache. It should be shorter
+ // than fileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
+ // picked up faster, while still allowing the content to be cached longer if nothing changes.
+ branchExistenceCacheTimeout = 5 * time.Minute
+
+ // fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
+ // on your available memory.
+ // TODO: move as option into cache interface
+ fileCacheTimeout = 5 * time.Minute
+
+ // ownerExistenceCacheTimeout specifies the timeout for the existence of a repo/org
+ ownerExistenceCacheTimeout = 5 * time.Minute
+
+ // fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
+ fileCacheSizeLimit = int64(1000 * 1000)
+)
+
+type FileResponse struct {
+ Exists bool `json:"exists"`
+ IsSymlink bool `json:"isSymlink"`
+ ETag string `json:"eTag"`
+ MimeType string `json:"mimeType"` // uncompressed MIME type
+ RawMime string `json:"rawMime"` // raw MIME type (if compressed, type of compression)
+ Body []byte `json:"-"` // saved separately
+}
+
+func (f FileResponse) IsEmpty() bool {
+ return len(f.Body) == 0
+}
+
+func (f FileResponse) createHttpResponse(cacheKey string, decompress bool) (header http.Header, statusCode int) {
+ header = make(http.Header)
+
+ if f.Exists {
+ statusCode = http.StatusOK
+ } else {
+ statusCode = http.StatusNotFound
+ }
+
+ if f.IsSymlink {
+ header.Set(giteaObjectTypeHeader, objTypeSymlink)
+ }
+ header.Set(ETagHeader, f.ETag)
+
+ if decompress {
+ header.Set(ContentTypeHeader, f.MimeType)
+ } else {
+ header.Set(ContentTypeHeader, f.RawMime)
+ }
+
+ header.Set(ContentLengthHeader, fmt.Sprintf("%d", len(f.Body)))
+ header.Set(PagesCacheIndicatorHeader, "true")
+
+ log.Trace().Msgf("fileCache for %q used", cacheKey)
+ return header, statusCode
+}
+
+type BranchTimestamp struct {
+ NotFound bool `json:"notFound"`
+ Branch string `json:"branch,omitempty"`
+ Timestamp time.Time `json:"timestamp,omitempty"`
+}
+
+type writeCacheReader struct {
+ originalReader io.ReadCloser
+ buffer *bytes.Buffer
+ fileResponse *FileResponse
+ cacheKey string
+ cache cache.ICache
+ hasError bool
+ doNotCache bool
+ complete bool
+ log zerolog.Logger
+}
+
+func (t *writeCacheReader) Read(p []byte) (n int, err error) {
+ t.log.Trace().Msgf("[cache] read %q", t.cacheKey)
+ n, err = t.originalReader.Read(p)
+ if err == io.EOF {
+ t.complete = true
+ }
+ if err != nil && err != io.EOF {
+ t.log.Trace().Err(err).Msgf("[cache] original reader for %q has returned an error", t.cacheKey)
+ t.hasError = true
+ } else if n > 0 {
+ if t.buffer.Len()+n > int(fileCacheSizeLimit) {
+ t.doNotCache = true
+ t.buffer.Reset()
+ } else {
+ _, _ = t.buffer.Write(p[:n])
+ }
+ }
+ return
+}
+
+func (t *writeCacheReader) Close() error {
+ doWrite := !t.hasError && !t.doNotCache && t.complete
+ fc := *t.fileResponse
+ fc.Body = t.buffer.Bytes()
+ if doWrite {
+ jsonToCache, err := json.Marshal(fc)
+ if err != nil {
+ t.log.Trace().Err(err).Msgf("[cache] marshaling json for %q has returned an error", t.cacheKey+"|Metadata")
+ }
+ err = t.cache.Set(t.cacheKey+"|Metadata", jsonToCache, fileCacheTimeout)
+ if err != nil {
+ t.log.Trace().Err(err).Msgf("[cache] writer for %q has returned an error", t.cacheKey+"|Metadata")
+ }
+ err = t.cache.Set(t.cacheKey+"|Body", fc.Body, fileCacheTimeout)
+ if err != nil {
+ t.log.Trace().Err(err).Msgf("[cache] writer for %q has returned an error", t.cacheKey+"|Body")
+ }
+ }
+ t.log.Trace().Msgf("cacheReader for %q saved=%t closed", t.cacheKey, doWrite)
+ return t.originalReader.Close()
+}
+
+func (f FileResponse) CreateCacheReader(ctx *context.Context, r io.ReadCloser, cache cache.ICache, cacheKey string) io.ReadCloser {
+ log := log.With().Str("ReqId", ctx.ReqId).Logger()
+ if r == nil || cache == nil || cacheKey == "" {
+ log.Error().Msg("could not create CacheReader")
+ return nil
+ }
+
+ return &writeCacheReader{
+ originalReader: r,
+ buffer: bytes.NewBuffer(make([]byte, 0)),
+ fileResponse: &f,
+ cache: cache,
+ cacheKey: cacheKey,
+ log: log,
+ }
+}
diff --git a/server/gitea/client.go b/server/gitea/client.go
new file mode 100644
index 0000000..5633bf2
--- /dev/null
+++ b/server/gitea/client.go
@@ -0,0 +1,386 @@
+package gitea
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "code.gitea.io/sdk/gitea"
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/config"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/version"
+)
+
+var ErrorNotFound = errors.New("not found")
+
+const (
+ // cache key prefixes
+ branchTimestampCacheKeyPrefix = "branchTime"
+ defaultBranchCacheKeyPrefix = "defaultBranch"
+ rawContentCacheKeyPrefix = "rawContent"
+ ownerExistenceKeyPrefix = "ownerExist"
+
+ // pages server
+ PagesCacheIndicatorHeader = "X-Pages-Cache"
+ symlinkReadLimit = 10000
+
+ // gitea
+ giteaObjectTypeHeader = "X-Gitea-Object-Type"
+ objTypeSymlink = "symlink"
+
+ // std
+ ETagHeader = "ETag"
+ ContentTypeHeader = "Content-Type"
+ ContentLengthHeader = "Content-Length"
+ ContentEncodingHeader = "Content-Encoding"
+)
+
+type Client struct {
+ sdkClient *gitea.Client
+ sdkFileClient *gitea.Client
+ responseCache cache.ICache
+
+ giteaRoot string
+
+ followSymlinks bool
+ supportLFS bool
+
+ forbiddenMimeTypes map[string]bool
+ defaultMimeType string
+}
+
+func NewClient(cfg config.ForgeConfig, respCache cache.ICache) (*Client, error) {
+ // url.Parse returns valid on almost anything...
+ rootURL, err := url.ParseRequestURI(cfg.Root)
+ if err != nil {
+ return nil, fmt.Errorf("invalid forgejo/gitea root url: %w", err)
+ }
+ giteaRoot := strings.TrimSuffix(rootURL.String(), "/")
+
+ forbiddenMimeTypes := make(map[string]bool, len(cfg.ForbiddenMimeTypes))
+ for _, mimeType := range cfg.ForbiddenMimeTypes {
+ forbiddenMimeTypes[mimeType] = true
+ }
+
+ defaultMimeType := cfg.DefaultMimeType
+ if defaultMimeType == "" {
+ defaultMimeType = "application/octet-stream"
+ }
+
+ sdkClient, err := gitea.NewClient(
+ giteaRoot,
+ gitea.SetHTTPClient(&http.Client{Timeout: 10 * time.Second}),
+ gitea.SetToken(cfg.Token),
+ gitea.SetUserAgent("pages-server/"+version.Version),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ sdkFileClient, err := gitea.NewClient(
+ giteaRoot,
+ gitea.SetHTTPClient(&http.Client{Timeout: 1 * time.Hour}),
+ gitea.SetToken(cfg.Token),
+ gitea.SetUserAgent("pages-server/"+version.Version),
+ )
+
+ return &Client{
+ sdkClient: sdkClient,
+ sdkFileClient: sdkFileClient,
+ responseCache: respCache,
+
+ giteaRoot: giteaRoot,
+
+ followSymlinks: cfg.FollowSymlinks,
+ supportLFS: cfg.LFSEnabled,
+
+ forbiddenMimeTypes: forbiddenMimeTypes,
+ defaultMimeType: defaultMimeType,
+ }, err
+}
+
+func (client *Client) ContentWebLink(targetOwner, targetRepo, branch, resource string) string {
+ return path.Join(client.giteaRoot, targetOwner, targetRepo, "src/branch", branch, resource)
+}
+
+func (client *Client) GiteaRawContent(ctx *context.Context, targetOwner, targetRepo, ref, resource string) ([]byte, error) {
+ reader, _, _, err := client.ServeRawContent(ctx, targetOwner, targetRepo, ref, resource, false)
+ if err != nil {
+ return nil, err
+ }
+ defer reader.Close()
+ return io.ReadAll(reader)
+}
+
+func (client *Client) ServeRawContent(ctx *context.Context, targetOwner, targetRepo, ref, resource string, decompress bool) (io.ReadCloser, http.Header, int, error) {
+ cacheKey := fmt.Sprintf("%s/%s/%s|%s|%s", rawContentCacheKeyPrefix, targetOwner, targetRepo, ref, resource)
+ log := log.With().Str("ReqId", ctx.ReqId).Str("cache_key", cacheKey).Logger()
+ log.Trace().Msg("try file in cache")
+ // handle if cache entry exist
+ if cacheMetadata, ok := client.responseCache.Get(cacheKey + "|Metadata"); ok {
+ var cache FileResponse
+ err := json.Unmarshal(cacheMetadata.([]byte), &cache)
+ if err != nil {
+ log.Error().Err(err).Msgf("[cache] failed to unmarshal metadata for: %s", cacheKey)
+ return nil, nil, http.StatusNotFound, err
+ }
+
+ if !cache.Exists {
+ return nil, nil, http.StatusNotFound, ErrorNotFound
+ }
+
+ body, ok := client.responseCache.Get(cacheKey + "|Body")
+ if !ok {
+ log.Error().Msgf("[cache] failed to get body for: %s", cacheKey)
+ return nil, nil, http.StatusNotFound, ErrorNotFound
+ }
+ cache.Body = body.([]byte)
+
+ cachedHeader, cachedStatusCode := cache.createHttpResponse(cacheKey, decompress)
+ if cache.Exists {
+ if cache.IsSymlink {
+ linkDest := string(cache.Body)
+ log.Debug().Msgf("[cache] follow symlink from %q to %q", resource, linkDest)
+ return client.ServeRawContent(ctx, targetOwner, targetRepo, ref, linkDest, decompress)
+ } else {
+ log.Debug().Msgf("[cache] return %d bytes", len(cache.Body))
+ return io.NopCloser(bytes.NewReader(cache.Body)), cachedHeader, cachedStatusCode, nil
+ }
+ } else {
+ return nil, nil, http.StatusNotFound, ErrorNotFound
+ }
+ }
+ log.Trace().Msg("file not in cache")
+ // not in cache, open reader via gitea api
+ reader, resp, err := client.sdkFileClient.GetFileReader(targetOwner, targetRepo, ref, resource, client.supportLFS)
+ if resp != nil {
+ switch resp.StatusCode {
+ case http.StatusOK:
+ // first handle symlinks
+ {
+ objType := resp.Header.Get(giteaObjectTypeHeader)
+ log.Trace().Msgf("server raw content object %q", objType)
+ if client.followSymlinks && objType == objTypeSymlink {
+ defer reader.Close()
+ // read limited chars for symlink
+ linkDestBytes, err := io.ReadAll(io.LimitReader(reader, symlinkReadLimit))
+ if err != nil {
+ return nil, nil, http.StatusInternalServerError, err
+ }
+ linkDest := strings.TrimSpace(string(linkDestBytes))
+
+ // handle relative links
+ // we first remove the link from the path, and make a relative join (resolve parent paths like "/../" too)
+ linkDest = path.Join(path.Dir(resource), linkDest)
+
+ // we store symlink not content to reduce duplicates in cache
+ fileResponse := FileResponse{
+ Exists: true,
+ IsSymlink: true,
+ Body: []byte(linkDest),
+ ETag: resp.Header.Get(ETagHeader),
+ }
+ log.Trace().Msgf("file response has %d bytes", len(fileResponse.Body))
+ jsonToCache, err := json.Marshal(fileResponse)
+ if err != nil {
+ log.Error().Err(err).Msgf("[cache] marshaling json metadata for %q has returned an error", cacheKey)
+ }
+ if err := client.responseCache.Set(cacheKey+"|Metadata", jsonToCache, fileCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+ if err := client.responseCache.Set(cacheKey+"|Body", fileResponse.Body, fileCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+
+ log.Debug().Msgf("follow symlink from %q to %q", resource, linkDest)
+ return client.ServeRawContent(ctx, targetOwner, targetRepo, ref, linkDest, decompress)
+ }
+ }
+
+ // now we are sure it's content so set the MIME type
+ mimeType, rawType := client.getMimeTypeByExtension(resource)
+ resp.Response.Header.Set(ContentTypeHeader, mimeType)
+ if decompress {
+ resp.Response.Header.Set(ContentTypeHeader, mimeType)
+ } else {
+ resp.Response.Header.Set(ContentTypeHeader, rawType)
+ }
+
+ // now we write to cache and respond at the same time
+ fileResp := FileResponse{
+ Exists: true,
+ ETag: resp.Header.Get(ETagHeader),
+ MimeType: mimeType,
+ RawMime: rawType,
+ }
+ return fileResp.CreateCacheReader(ctx, reader, client.responseCache, cacheKey), resp.Response.Header, resp.StatusCode, nil
+
+ case http.StatusNotFound:
+ jsonToCache, err := json.Marshal(FileResponse{ETag: resp.Header.Get(ETagHeader)})
+ if err != nil {
+ log.Error().Err(err).Msgf("[cache] marshaling json metadata for %q has returned an error", cacheKey)
+ }
+ if err := client.responseCache.Set(cacheKey+"|Metadata", jsonToCache, fileCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+
+ return nil, resp.Response.Header, http.StatusNotFound, ErrorNotFound
+ default:
+ return nil, resp.Response.Header, resp.StatusCode, fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
+ }
+ }
+ return nil, nil, http.StatusInternalServerError, err
+}
+
+func (client *Client) GiteaGetRepoBranchTimestamp(repoOwner, repoName, branchName string) (*BranchTimestamp, error) {
+ cacheKey := fmt.Sprintf("%s/%s/%s/%s", branchTimestampCacheKeyPrefix, repoOwner, repoName, branchName)
+
+ if stampRaw, ok := client.responseCache.Get(cacheKey); ok {
+ var stamp BranchTimestamp
+ err := json.Unmarshal(stampRaw.([]byte), &stamp)
+ if err != nil {
+ log.Error().Err(err).Bytes("stamp", stampRaw.([]byte)).Msgf("[cache] failed to unmarshal timestamp for: %s", cacheKey)
+ return &BranchTimestamp{}, ErrorNotFound
+ }
+
+ if stamp.NotFound {
+ log.Trace().Msgf("[cache] branch %q does not exist", branchName)
+
+ return &BranchTimestamp{}, ErrorNotFound
+ } else {
+ log.Trace().Msgf("[cache] use branch %q exist", branchName)
+ // This comes from the refactoring of the caching library.
+ // The branch as reported by the API was stored in the cache, and I'm not sure if there are
+ // situations where it differs from the name in the request, hence this is left here.
+ return &stamp, nil
+ }
+ }
+
+ branch, resp, err := client.sdkClient.GetRepoBranch(repoOwner, repoName, branchName)
+ if err != nil {
+ if resp != nil && resp.StatusCode == http.StatusNotFound {
+ log.Trace().Msgf("[cache] set cache branch %q not found", branchName)
+ jsonToCache, err := json.Marshal(BranchTimestamp{NotFound: true})
+ if err != nil {
+ log.Error().Err(err).Msgf("[cache] marshaling empty timestamp for '%s' has returned an error", cacheKey)
+ }
+ if err := client.responseCache.Set(cacheKey, jsonToCache, branchExistenceCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+ return &BranchTimestamp{}, ErrorNotFound
+ }
+ return &BranchTimestamp{}, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return &BranchTimestamp{}, fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
+ }
+
+ stamp := &BranchTimestamp{
+ Branch: branch.Name,
+ Timestamp: branch.Commit.Timestamp,
+ }
+
+ log.Trace().Msgf("set cache branch [%s] exist", branchName)
+ jsonToCache, err := json.Marshal(stamp)
+ if err != nil {
+ log.Error().Err(err).Msgf("[cache] marshaling timestamp for %q has returned an error", cacheKey)
+ }
+ if err := client.responseCache.Set(cacheKey, jsonToCache, branchExistenceCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+ return stamp, nil
+}
+
+func (client *Client) GiteaGetRepoDefaultBranch(repoOwner, repoName string) (string, error) {
+ cacheKey := fmt.Sprintf("%s/%s/%s", defaultBranchCacheKeyPrefix, repoOwner, repoName)
+
+ if branch, ok := client.responseCache.Get(cacheKey); ok {
+ return string(branch.([]byte)), nil
+ }
+
+ repo, resp, err := client.sdkClient.GetRepo(repoOwner, repoName)
+ if err != nil {
+ return "", err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("unexpected status code '%d'", resp.StatusCode)
+ }
+
+ branch := repo.DefaultBranch
+ if err := client.responseCache.Set(cacheKey, []byte(branch), defaultBranchCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+ return branch, nil
+}
+
+func (client *Client) GiteaCheckIfOwnerExists(owner string) (bool, error) {
+ cacheKey := fmt.Sprintf("%s/%s", ownerExistenceKeyPrefix, owner)
+
+ if existRaw, ok := client.responseCache.Get(cacheKey); ok && existRaw != nil {
+ exist, err := strconv.ParseBool(existRaw.(string))
+ return exist, err
+ }
+
+ _, resp, err := client.sdkClient.GetUserInfo(owner)
+ if resp.StatusCode == http.StatusOK && err == nil {
+ if err := client.responseCache.Set(cacheKey, []byte("true"), ownerExistenceCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+ return true, nil
+ } else if resp.StatusCode != http.StatusNotFound {
+ return false, err
+ }
+
+ _, resp, err = client.sdkClient.GetOrg(owner)
+ if resp.StatusCode == http.StatusOK && err == nil {
+ if err := client.responseCache.Set(cacheKey, []byte("true"), ownerExistenceCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+ return true, nil
+ } else if resp.StatusCode != http.StatusNotFound {
+ return false, err
+ }
+ if err := client.responseCache.Set(cacheKey, []byte("false"), ownerExistenceCacheTimeout); err != nil {
+ log.Error().Err(err).Msg("[cache] error on cache write")
+ }
+ return false, nil
+}
+
+func (client *Client) extToMime(ext string) string {
+ mimeType := mime.TypeByExtension(path.Ext(ext))
+ mimeTypeSplit := strings.SplitN(mimeType, ";", 2)
+ if client.forbiddenMimeTypes[mimeTypeSplit[0]] || mimeType == "" {
+ mimeType = client.defaultMimeType
+ }
+ log.Trace().Msgf("probe mime of extension '%q' is '%q'", ext, mimeType)
+
+ return mimeType
+}
+
+func (client *Client) getMimeTypeByExtension(resource string) (mimeType, rawType string) {
+ rawExt := path.Ext(resource)
+ innerExt := rawExt
+ switch rawExt {
+ case ".gz", ".br", ".zst":
+ innerExt = path.Ext(resource[:len(resource)-len(rawExt)])
+ }
+ rawType = client.extToMime(rawExt)
+ mimeType = rawType
+ if innerExt != rawExt {
+ mimeType = client.extToMime(innerExt)
+ }
+ log.Trace().Msgf("probe mime of %q is (%q / raw %q)", resource, mimeType, rawType)
+ return mimeType, rawType
+}
diff --git a/server/handler/handler.go b/server/handler/handler.go
new file mode 100644
index 0000000..437697a
--- /dev/null
+++ b/server/handler/handler.go
@@ -0,0 +1,114 @@
+package handler
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/config"
+ "codeberg.org/codeberg/pages/html"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/gitea"
+)
+
+const (
+ headerAccessControlAllowOrigin = "Access-Control-Allow-Origin"
+ headerAccessControlAllowMethods = "Access-Control-Allow-Methods"
+ defaultPagesRepo = "pages"
+)
+
+// Handler handles a single HTTP request to the web server.
+func Handler(
+ cfg config.ServerConfig,
+ giteaClient *gitea.Client,
+ canonicalDomainCache, redirectsCache cache.ICache,
+) http.HandlerFunc {
+ return func(w http.ResponseWriter, req *http.Request) {
+ ctx := context.New(w, req)
+ log := log.With().Str("ReqId", ctx.ReqId).Strs("Handler", []string{req.Host, req.RequestURI}).Logger()
+ log.Debug().Msg("\n----------------------------------------------------------")
+
+ ctx.RespWriter.Header().Set("Server", "pages-server")
+
+ // Force new default from specification (since November 2020) - see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy#strict-origin-when-cross-origin
+ ctx.RespWriter.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin")
+
+ // Enable browser caching for up to 10 minutes
+ ctx.RespWriter.Header().Set("Cache-Control", "public, max-age=600")
+
+ trimmedHost := ctx.TrimHostPort()
+
+ // Add HSTS for RawDomain and MainDomain
+ if hsts := getHSTSHeader(trimmedHost, cfg.MainDomain, cfg.RawDomain); hsts != "" {
+ ctx.RespWriter.Header().Set("Strict-Transport-Security", hsts)
+ }
+
+ // Handle all http methods
+ ctx.RespWriter.Header().Set("Allow", http.MethodGet+", "+http.MethodHead+", "+http.MethodOptions)
+ switch ctx.Req.Method {
+ case http.MethodOptions:
+ // return Allow header
+ ctx.RespWriter.WriteHeader(http.StatusNoContent)
+ return
+ case http.MethodGet,
+ http.MethodHead:
+ // end switch case and handle allowed requests
+ break
+ default:
+ // Block all methods not required for static pages
+ ctx.String("Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ // Block blacklisted paths (like ACME challenges)
+ for _, blacklistedPath := range cfg.BlacklistedPaths {
+ if strings.HasPrefix(ctx.Path(), blacklistedPath) {
+ html.ReturnErrorPage(ctx, "requested path is blacklisted", http.StatusForbidden)
+ return
+ }
+ }
+
+ // Allow CORS for specified domains
+ allowCors := false
+ for _, allowedCorsDomain := range cfg.AllowedCorsDomains {
+ if strings.EqualFold(trimmedHost, allowedCorsDomain) {
+ allowCors = true
+ break
+ }
+ }
+ if allowCors {
+ ctx.RespWriter.Header().Set(headerAccessControlAllowOrigin, "*")
+ ctx.RespWriter.Header().Set(headerAccessControlAllowMethods, http.MethodGet+", "+http.MethodHead)
+ }
+
+ // Prepare request information to Gitea
+ pathElements := strings.Split(strings.Trim(ctx.Path(), "/"), "/")
+
+ if cfg.RawDomain != "" && strings.EqualFold(trimmedHost, cfg.RawDomain) {
+ log.Debug().Msg("raw domain request detected")
+ handleRaw(log, ctx, giteaClient,
+ cfg.MainDomain,
+ trimmedHost,
+ pathElements,
+ canonicalDomainCache, redirectsCache)
+ } else if strings.HasSuffix(trimmedHost, cfg.MainDomain) {
+ log.Debug().Msg("subdomain request detected")
+ handleSubDomain(log, ctx, giteaClient,
+ cfg.MainDomain,
+ cfg.PagesBranches,
+ trimmedHost,
+ pathElements,
+ canonicalDomainCache, redirectsCache)
+ } else {
+ log.Debug().Msg("custom domain request detected")
+ handleCustomDomain(log, ctx, giteaClient,
+ cfg.MainDomain,
+ trimmedHost,
+ pathElements,
+ cfg.PagesBranches[0],
+ canonicalDomainCache, redirectsCache)
+ }
+ }
+}
diff --git a/server/handler/handler_custom_domain.go b/server/handler/handler_custom_domain.go
new file mode 100644
index 0000000..8a5f9d7
--- /dev/null
+++ b/server/handler/handler_custom_domain.go
@@ -0,0 +1,72 @@
+package handler
+
+import (
+ "net/http"
+ "path"
+ "strings"
+
+ "codeberg.org/codeberg/pages/html"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/dns"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "codeberg.org/codeberg/pages/server/upstream"
+ "github.com/rs/zerolog"
+)
+
+func handleCustomDomain(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
+ mainDomainSuffix string,
+ trimmedHost string,
+ pathElements []string,
+ firstDefaultBranch string,
+ canonicalDomainCache, redirectsCache cache.ICache,
+) {
+ // Serve pages from custom domains
+ targetOwner, targetRepo, targetBranch := dns.GetTargetFromDNS(trimmedHost, mainDomainSuffix, firstDefaultBranch)
+ if targetOwner == "" {
+ html.ReturnErrorPage(ctx,
+ "could not obtain repo owner from custom domain",
+ http.StatusFailedDependency)
+ return
+ }
+
+ pathParts := pathElements
+ canonicalLink := false
+ if strings.HasPrefix(pathElements[0], "@") {
+ targetBranch = pathElements[0][1:]
+ pathParts = pathElements[1:]
+ canonicalLink = true
+ }
+
+ // Try to use the given repo on the given branch or the default branch
+ log.Debug().Msg("custom domain preparations, now trying with details from DNS")
+ if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
+ TryIndexPages: true,
+ TargetOwner: targetOwner,
+ TargetRepo: targetRepo,
+ TargetBranch: targetBranch,
+ TargetPath: path.Join(pathParts...),
+ }, canonicalLink); works {
+ canonicalDomain, valid := targetOpt.CheckCanonicalDomain(ctx, giteaClient, trimmedHost, mainDomainSuffix, canonicalDomainCache)
+ if !valid {
+ html.ReturnErrorPage(ctx, "domain not specified in .domains
file", http.StatusMisdirectedRequest)
+ return
+ } else if canonicalDomain != trimmedHost {
+ // only redirect if the target is also a codeberg page!
+ targetOwner, _, _ = dns.GetTargetFromDNS(strings.SplitN(canonicalDomain, "/", 2)[0], mainDomainSuffix, firstDefaultBranch)
+ if targetOwner != "" {
+ ctx.Redirect("https://"+canonicalDomain+"/"+targetOpt.TargetPath, http.StatusTemporaryRedirect)
+ return
+ }
+
+ html.ReturnErrorPage(ctx, "target is no codeberg page", http.StatusFailedDependency)
+ return
+ }
+
+ log.Debug().Str("url", trimmedHost).Msg("tryBranch, now trying upstream")
+ tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
+ return
+ }
+
+ html.ReturnErrorPage(ctx, "could not find target for custom domain", http.StatusFailedDependency)
+}
diff --git a/server/handler/handler_raw_domain.go b/server/handler/handler_raw_domain.go
new file mode 100644
index 0000000..bbbf7da
--- /dev/null
+++ b/server/handler/handler_raw_domain.go
@@ -0,0 +1,71 @@
+package handler
+
+import (
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+
+ "github.com/rs/zerolog"
+
+ "codeberg.org/codeberg/pages/html"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "codeberg.org/codeberg/pages/server/upstream"
+)
+
+func handleRaw(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
+ mainDomainSuffix string,
+ trimmedHost string,
+ pathElements []string,
+ canonicalDomainCache, redirectsCache cache.ICache,
+) {
+ // Serve raw content from RawDomain
+ log.Debug().Msg("raw domain")
+
+ if len(pathElements) < 2 {
+ html.ReturnErrorPage(
+ ctx,
+ "a url in the form of https://{domain}/{owner}/{repo}[/@{branch}]/{path}
is required",
+ http.StatusBadRequest,
+ )
+
+ return
+ }
+
+ // raw.codeberg.org/example/myrepo/@main/index.html
+ if len(pathElements) > 2 && strings.HasPrefix(pathElements[2], "@") {
+ log.Debug().Msg("raw domain preparations, now trying with specified branch")
+ if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
+ ServeRaw: true,
+ TargetOwner: pathElements[0],
+ TargetRepo: pathElements[1],
+ TargetBranch: pathElements[2][1:],
+ TargetPath: path.Join(pathElements[3:]...),
+ }, true); works {
+ log.Trace().Msg("tryUpstream: serve raw domain with specified branch")
+ tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
+ return
+ }
+ log.Debug().Msg("missing branch info")
+ html.ReturnErrorPage(ctx, "missing branch info", http.StatusFailedDependency)
+ return
+ }
+
+ log.Debug().Msg("raw domain preparations, now trying with default branch")
+ if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
+ TryIndexPages: false,
+ ServeRaw: true,
+ TargetOwner: pathElements[0],
+ TargetRepo: pathElements[1],
+ TargetPath: path.Join(pathElements[2:]...),
+ }, true); works {
+ log.Trace().Msg("tryUpstream: serve raw domain with default branch")
+ tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
+ } else {
+ html.ReturnErrorPage(ctx,
+ fmt.Sprintf("raw domain could not find repo %s/%s
or repo is empty", targetOpt.TargetOwner, targetOpt.TargetRepo),
+ http.StatusNotFound)
+ }
+}
diff --git a/server/handler/handler_sub_domain.go b/server/handler/handler_sub_domain.go
new file mode 100644
index 0000000..e335019
--- /dev/null
+++ b/server/handler/handler_sub_domain.go
@@ -0,0 +1,156 @@
+package handler
+
+import (
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+
+ "github.com/rs/zerolog"
+ "golang.org/x/exp/slices"
+
+ "codeberg.org/codeberg/pages/html"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "codeberg.org/codeberg/pages/server/upstream"
+)
+
+func handleSubDomain(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
+ mainDomainSuffix string,
+ defaultPagesBranches []string,
+ trimmedHost string,
+ pathElements []string,
+ canonicalDomainCache, redirectsCache cache.ICache,
+) {
+ // Serve pages from subdomains of MainDomainSuffix
+ log.Debug().Msg("main domain suffix")
+
+ targetOwner := strings.TrimSuffix(trimmedHost, mainDomainSuffix)
+ targetRepo := pathElements[0]
+
+ if targetOwner == "www" {
+ // www.codeberg.page redirects to codeberg.page // TODO: rm hardcoded - use cname?
+ ctx.Redirect("https://"+mainDomainSuffix[1:]+ctx.Path(), http.StatusPermanentRedirect)
+ return
+ }
+
+ // Check if the first directory is a repo with the second directory as a branch
+ // example.codeberg.page/myrepo/@main/index.html
+ if len(pathElements) > 1 && strings.HasPrefix(pathElements[1], "@") {
+ if targetRepo == defaultPagesRepo {
+ // example.codeberg.org/pages/@... redirects to example.codeberg.org/@...
+ ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), http.StatusTemporaryRedirect)
+ return
+ }
+
+ log.Debug().Msg("main domain preparations, now trying with specified repo & branch")
+ if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
+ TryIndexPages: true,
+ TargetOwner: targetOwner,
+ TargetRepo: pathElements[0],
+ TargetBranch: pathElements[1][1:],
+ TargetPath: path.Join(pathElements[2:]...),
+ }, true); works {
+ log.Trace().Msg("tryUpstream: serve with specified repo and branch")
+ tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
+ } else {
+ html.ReturnErrorPage(
+ ctx,
+ formatSetBranchNotFoundMessage(pathElements[1][1:], targetOwner, pathElements[0]),
+ http.StatusFailedDependency,
+ )
+ }
+ return
+ }
+
+ // Check if the first directory is a branch for the defaultPagesRepo
+ // example.codeberg.page/@main/index.html
+ if strings.HasPrefix(pathElements[0], "@") {
+ targetBranch := pathElements[0][1:]
+
+ // if the default pages branch can be determined exactly, it does not need to be set
+ if len(defaultPagesBranches) == 1 && slices.Contains(defaultPagesBranches, targetBranch) {
+ // example.codeberg.org/@pages/... redirects to example.codeberg.org/...
+ ctx.Redirect("/"+strings.Join(pathElements[1:], "/"), http.StatusTemporaryRedirect)
+ return
+ }
+
+ log.Debug().Msg("main domain preparations, now trying with specified branch")
+ if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
+ TryIndexPages: true,
+ TargetOwner: targetOwner,
+ TargetRepo: defaultPagesRepo,
+ TargetBranch: targetBranch,
+ TargetPath: path.Join(pathElements[1:]...),
+ }, true); works {
+ log.Trace().Msg("tryUpstream: serve default pages repo with specified branch")
+ tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
+ } else {
+ html.ReturnErrorPage(
+ ctx,
+ formatSetBranchNotFoundMessage(targetBranch, targetOwner, defaultPagesRepo),
+ http.StatusFailedDependency,
+ )
+ }
+ return
+ }
+
+ for _, defaultPagesBranch := range defaultPagesBranches {
+ // Check if the first directory is a repo with a default pages branch
+ // example.codeberg.page/myrepo/index.html
+ // example.codeberg.page/{PAGES_BRANCHE}/... is not allowed here.
+ log.Debug().Msg("main domain preparations, now trying with specified repo")
+ if pathElements[0] != defaultPagesBranch {
+ if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
+ TryIndexPages: true,
+ TargetOwner: targetOwner,
+ TargetRepo: pathElements[0],
+ TargetBranch: defaultPagesBranch,
+ TargetPath: path.Join(pathElements[1:]...),
+ }, false); works {
+ log.Debug().Msg("tryBranch, now trying upstream 5")
+ tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
+ return
+ }
+ }
+
+ // Try to use the defaultPagesRepo on an default pages branch
+ // example.codeberg.page/index.html
+ log.Debug().Msg("main domain preparations, now trying with default repo")
+ if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
+ TryIndexPages: true,
+ TargetOwner: targetOwner,
+ TargetRepo: defaultPagesRepo,
+ TargetBranch: defaultPagesBranch,
+ TargetPath: path.Join(pathElements...),
+ }, false); works {
+ log.Debug().Msg("tryBranch, now trying upstream 6")
+ tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
+ return
+ }
+ }
+
+ // Try to use the defaultPagesRepo on its default branch
+ // example.codeberg.page/index.html
+ log.Debug().Msg("main domain preparations, now trying with default repo/branch")
+ if targetOpt, works := tryBranch(log, ctx, giteaClient, &upstream.Options{
+ TryIndexPages: true,
+ TargetOwner: targetOwner,
+ TargetRepo: defaultPagesRepo,
+ TargetPath: path.Join(pathElements...),
+ }, false); works {
+ log.Debug().Msg("tryBranch, now trying upstream 6")
+ tryUpstream(log, ctx, giteaClient, mainDomainSuffix, trimmedHost, targetOpt, canonicalDomainCache, redirectsCache)
+ return
+ }
+
+ // Couldn't find a valid repo/branch
+ html.ReturnErrorPage(ctx,
+ fmt.Sprintf("could not find a valid repository or branch for repository: %s
", targetRepo),
+ http.StatusNotFound)
+}
+
+func formatSetBranchNotFoundMessage(branch, owner, repo string) string {
+ return fmt.Sprintf("explicitly set branch %q
does not exist at %s/%s
", branch, owner, repo)
+}
diff --git a/server/handler/handler_test.go b/server/handler/handler_test.go
new file mode 100644
index 0000000..765b3b1
--- /dev/null
+++ b/server/handler/handler_test.go
@@ -0,0 +1,58 @@
+package handler
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "codeberg.org/codeberg/pages/config"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "github.com/rs/zerolog/log"
+)
+
+func TestHandlerPerformance(t *testing.T) {
+ cfg := config.ForgeConfig{
+ Root: "https://codeberg.org",
+ Token: "",
+ LFSEnabled: false,
+ FollowSymlinks: false,
+ }
+ giteaClient, _ := gitea.NewClient(cfg, cache.NewInMemoryCache())
+ serverCfg := config.ServerConfig{
+ MainDomain: "codeberg.page",
+ RawDomain: "raw.codeberg.page",
+ BlacklistedPaths: []string{
+ "/.well-known/acme-challenge/",
+ },
+ AllowedCorsDomains: []string{"raw.codeberg.org", "fonts.codeberg.org", "design.codeberg.org"},
+ PagesBranches: []string{"pages"},
+ }
+ testHandler := Handler(serverCfg, giteaClient, cache.NewInMemoryCache(), cache.NewInMemoryCache())
+
+ testCase := func(uri string, status int) {
+ t.Run(uri, func(t *testing.T) {
+ req := httptest.NewRequest("GET", uri, http.NoBody)
+ w := httptest.NewRecorder()
+
+ log.Printf("Start: %v\n", time.Now())
+ start := time.Now()
+ testHandler(w, req)
+ end := time.Now()
+ log.Printf("Done: %v\n", time.Now())
+
+ resp := w.Result()
+
+ if resp.StatusCode != status {
+ t.Errorf("request failed with status code %d", resp.StatusCode)
+ } else {
+ t.Logf("request took %d milliseconds", end.Sub(start).Milliseconds())
+ }
+ })
+ }
+
+ testCase("https://mondstern.codeberg.page/", 404) // TODO: expect 200
+ testCase("https://codeberg.page/", 404) // TODO: expect 200
+ testCase("https://example.momar.xyz/", 424)
+}
diff --git a/server/handler/hsts.go b/server/handler/hsts.go
new file mode 100644
index 0000000..1ab73ae
--- /dev/null
+++ b/server/handler/hsts.go
@@ -0,0 +1,15 @@
+package handler
+
+import (
+ "strings"
+)
+
+// getHSTSHeader returns a HSTS header with includeSubdomains & preload for MainDomainSuffix and RawDomain, or an empty
+// string for custom domains.
+func getHSTSHeader(host, mainDomainSuffix, rawDomain string) string {
+ if strings.HasSuffix(host, mainDomainSuffix) || strings.EqualFold(host, rawDomain) {
+ return "max-age=63072000; includeSubdomains; preload"
+ } else {
+ return ""
+ }
+}
diff --git a/server/handler/try.go b/server/handler/try.go
new file mode 100644
index 0000000..e5fc49b
--- /dev/null
+++ b/server/handler/try.go
@@ -0,0 +1,84 @@
+package handler
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/rs/zerolog"
+
+ "codeberg.org/codeberg/pages/html"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "codeberg.org/codeberg/pages/server/upstream"
+)
+
+// tryUpstream forwards the target request to the Gitea API, and shows an error page on failure.
+func tryUpstream(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
+ mainDomainSuffix, trimmedHost string,
+ options *upstream.Options,
+ canonicalDomainCache cache.ICache,
+ redirectsCache cache.ICache,
+) {
+ // check if a canonical domain exists on a request on MainDomain
+ if strings.HasSuffix(trimmedHost, mainDomainSuffix) && !options.ServeRaw {
+ canonicalDomain, _ := options.CheckCanonicalDomain(ctx, giteaClient, "", mainDomainSuffix, canonicalDomainCache)
+ if !strings.HasSuffix(strings.SplitN(canonicalDomain, "/", 2)[0], mainDomainSuffix) {
+ canonicalPath := ctx.Req.RequestURI
+ if options.TargetRepo != defaultPagesRepo {
+ path := strings.SplitN(canonicalPath, "/", 3)
+ if len(path) >= 3 {
+ canonicalPath = "/" + path[2]
+ }
+ }
+
+ redirect_to := "https://" + canonicalDomain + canonicalPath
+
+ log.Debug().Str("to", redirect_to).Msg("redirecting")
+
+ ctx.Redirect(redirect_to, http.StatusTemporaryRedirect)
+ return
+ }
+ }
+
+ // Add host for debugging.
+ options.Host = trimmedHost
+
+ // Try to request the file from the Gitea API
+ log.Debug().Msg("requesting from upstream")
+ if !options.Upstream(ctx, giteaClient, redirectsCache) {
+ html.ReturnErrorPage(ctx, fmt.Sprintf("Forge returned %d %s", ctx.StatusCode, http.StatusText(ctx.StatusCode)), ctx.StatusCode)
+ }
+}
+
+// tryBranch checks if a branch exists and populates the target variables. If canonicalLink is non-empty,
+// it will also disallow search indexing and add a Link header to the canonical URL.
+func tryBranch(log zerolog.Logger, ctx *context.Context, giteaClient *gitea.Client,
+ targetOptions *upstream.Options, canonicalLink bool,
+) (*upstream.Options, bool) {
+ if targetOptions.TargetOwner == "" || targetOptions.TargetRepo == "" {
+ log.Debug().Msg("tryBranch: owner or repo is empty")
+ return nil, false
+ }
+
+ // Replace "~" to "/" so we can access branch that contains slash character
+ // Branch name cannot contain "~" so doing this is okay
+ targetOptions.TargetBranch = strings.ReplaceAll(targetOptions.TargetBranch, "~", "/")
+
+ // Check if the branch exists, otherwise treat it as a file path
+ branchExist, _ := targetOptions.GetBranchTimestamp(giteaClient)
+ if !branchExist {
+ log.Debug().Msg("tryBranch: branch doesn't exist")
+ return nil, false
+ }
+
+ if canonicalLink {
+ // Hide from search machines & add canonical link
+ ctx.RespWriter.Header().Set("X-Robots-Tag", "noarchive, noindex")
+ ctx.RespWriter.Header().Set("Link", targetOptions.ContentWebLink(giteaClient)+"; rel=\"canonical\"")
+ }
+
+ log.Debug().Msg("tryBranch: true")
+ return targetOptions, true
+}
diff --git a/server/profiling.go b/server/profiling.go
new file mode 100644
index 0000000..7d20926
--- /dev/null
+++ b/server/profiling.go
@@ -0,0 +1,21 @@
+package server
+
+import (
+ "net/http"
+ _ "net/http/pprof"
+
+ "github.com/rs/zerolog/log"
+)
+
+func StartProfilingServer(listeningAddress string) {
+ server := &http.Server{
+ Addr: listeningAddress,
+ Handler: http.DefaultServeMux,
+ }
+
+ log.Info().Msgf("Starting debug server on %s", listeningAddress)
+
+ go func() {
+ log.Fatal().Err(server.ListenAndServe()).Msg("Failed to start debug server")
+ }()
+}
diff --git a/server/startup.go b/server/startup.go
new file mode 100644
index 0000000..4ae26c1
--- /dev/null
+++ b/server/startup.go
@@ -0,0 +1,145 @@
+package server
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/pires/go-proxyproto"
+ "github.com/rs/zerolog"
+ "github.com/rs/zerolog/log"
+ "github.com/urfave/cli/v2"
+
+ cmd "codeberg.org/codeberg/pages/cli"
+ "codeberg.org/codeberg/pages/config"
+ "codeberg.org/codeberg/pages/server/acme"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/certificates"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "codeberg.org/codeberg/pages/server/handler"
+)
+
+// Serve sets up and starts the web server.
+func Serve(ctx *cli.Context) error {
+ // initialize logger with Trace, overridden later with actual level
+ log.Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Caller().Logger().Level(zerolog.TraceLevel)
+
+ cfg, err := config.ReadConfig(ctx)
+ if err != nil {
+ log.Error().Err(err).Msg("could not read config")
+ }
+
+ config.MergeConfig(ctx, cfg)
+
+ // Initialize the logger.
+ logLevel, err := zerolog.ParseLevel(cfg.LogLevel)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Setting log level to: %s\n", logLevel)
+ log.Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Caller().Logger().Level(logLevel)
+
+ listeningSSLAddress := fmt.Sprintf("%s:%d", cfg.Server.Host, cfg.Server.Port)
+ listeningHTTPAddress := fmt.Sprintf("%s:%d", cfg.Server.Host, cfg.Server.HttpPort)
+
+ if cfg.Server.RawDomain != "" {
+ cfg.Server.AllowedCorsDomains = append(cfg.Server.AllowedCorsDomains, cfg.Server.RawDomain)
+ }
+
+ // Make sure MainDomain has a leading dot
+ if !strings.HasPrefix(cfg.Server.MainDomain, ".") {
+ // TODO make this better
+ cfg.Server.MainDomain = "." + cfg.Server.MainDomain
+ }
+
+ if len(cfg.Server.PagesBranches) == 0 {
+ return fmt.Errorf("no default branches set (PAGES_BRANCHES)")
+ }
+
+ // Init ssl cert database
+ certDB, closeFn, err := cmd.OpenCertDB(ctx)
+ if err != nil {
+ return err
+ }
+ defer closeFn()
+
+ challengeCache := cache.NewInMemoryCache()
+ // canonicalDomainCache stores canonical domains
+ canonicalDomainCache := cache.NewInMemoryCache()
+ // redirectsCache stores redirects in _redirects files
+ redirectsCache := cache.NewInMemoryCache()
+ // clientResponseCache stores responses from the Gitea server
+ clientResponseCache := cache.NewInMemoryCache()
+
+ giteaClient, err := gitea.NewClient(cfg.Forge, clientResponseCache)
+ if err != nil {
+ return fmt.Errorf("could not create new gitea client: %v", err)
+ }
+
+ acmeClient, err := acme.CreateAcmeClient(cfg.ACME, cfg.Server.HttpServerEnabled, challengeCache)
+ if err != nil {
+ return err
+ }
+
+ if err := certificates.SetupMainDomainCertificates(log.Logger, cfg.Server.MainDomain, acmeClient, certDB); err != nil {
+ return err
+ }
+
+ // Create listener for SSL connections
+ log.Info().Msgf("Create TCP listener for SSL on %s", listeningSSLAddress)
+ listener, err := net.Listen("tcp", listeningSSLAddress)
+ if err != nil {
+ return fmt.Errorf("couldn't create listener: %v", err)
+ }
+
+ if cfg.Server.UseProxyProtocol {
+ listener = &proxyproto.Listener{Listener: listener}
+ }
+ // Setup listener for SSL connections
+ listener = tls.NewListener(listener, certificates.TLSConfig(
+ cfg.Server.MainDomain,
+ giteaClient,
+ acmeClient,
+ cfg.Server.PagesBranches[0],
+ challengeCache, canonicalDomainCache,
+ certDB,
+ cfg.ACME.NoDNS01,
+ cfg.Server.RawDomain,
+ ))
+
+ interval := 12 * time.Hour
+ certMaintainCtx, cancelCertMaintain := context.WithCancel(context.Background())
+ defer cancelCertMaintain()
+ go certificates.MaintainCertDB(log.Logger, certMaintainCtx, interval, acmeClient, cfg.Server.MainDomain, certDB)
+
+ if cfg.Server.HttpServerEnabled {
+ // Create handler for http->https redirect and http acme challenges
+ httpHandler := certificates.SetupHTTPACMEChallengeServer(challengeCache, uint(cfg.Server.Port))
+
+ // Create listener for http and start listening
+ go func() {
+ log.Info().Msgf("Start HTTP server listening on %s", listeningHTTPAddress)
+ err := http.ListenAndServe(listeningHTTPAddress, httpHandler)
+ if err != nil {
+ log.Error().Err(err).Msg("Couldn't start HTTP server")
+ }
+ }()
+ }
+
+ if ctx.IsSet("enable-profiling") {
+ StartProfilingServer(ctx.String("profiling-address"))
+ }
+
+ // Create ssl handler based on settings
+ sslHandler := handler.Handler(cfg.Server, giteaClient, canonicalDomainCache, redirectsCache)
+
+ // Start the ssl listener
+ log.Info().Msgf("Start SSL server using TCP listener on %s", listener.Addr())
+
+ return http.Serve(listener, sslHandler)
+}
diff --git a/server/upstream/domains.go b/server/upstream/domains.go
new file mode 100644
index 0000000..f68a02b
--- /dev/null
+++ b/server/upstream/domains.go
@@ -0,0 +1,71 @@
+package upstream
+
+import (
+ "errors"
+ "strings"
+ "time"
+
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/gitea"
+)
+
+// canonicalDomainCacheTimeout specifies the timeout for the canonical domain cache.
+var canonicalDomainCacheTimeout = 15 * time.Minute
+
+const canonicalDomainConfig = ".domains"
+
+// CheckCanonicalDomain returns the canonical domain specified in the repo (using the `.domains` file).
+func (o *Options) CheckCanonicalDomain(ctx *context.Context, giteaClient *gitea.Client, actualDomain, mainDomainSuffix string, canonicalDomainCache cache.ICache) (domain string, valid bool) {
+ // Check if this request is cached.
+ if cachedValue, ok := canonicalDomainCache.Get(o.TargetOwner + "/" + o.TargetRepo + "/" + o.TargetBranch); ok {
+ domains := cachedValue.([]string)
+ for _, domain := range domains {
+ if domain == actualDomain {
+ valid = true
+ break
+ }
+ }
+ return domains[0], valid
+ }
+
+ body, err := giteaClient.GiteaRawContent(ctx, o.TargetOwner, o.TargetRepo, o.TargetBranch, canonicalDomainConfig)
+ if err != nil && !errors.Is(err, gitea.ErrorNotFound) {
+ log.Error().Err(err).Msgf("could not read %s of %s/%s", canonicalDomainConfig, o.TargetOwner, o.TargetRepo)
+ }
+
+ var domains []string
+ for _, domain := range strings.Split(string(body), "\n") {
+ domain = strings.ToLower(domain)
+ domain = strings.TrimSpace(domain)
+ domain = strings.TrimPrefix(domain, "http://")
+ domain = strings.TrimPrefix(domain, "https://")
+ if domain != "" && !strings.HasPrefix(domain, "#") && !strings.ContainsAny(domain, "\t /") && strings.ContainsRune(domain, '.') {
+ domains = append(domains, domain)
+ }
+ if domain == actualDomain {
+ valid = true
+ }
+ }
+
+ // Add [owner].[pages-domain] as valid domain.
+ domains = append(domains, o.TargetOwner+mainDomainSuffix)
+ if domains[len(domains)-1] == actualDomain {
+ valid = true
+ }
+
+ // If the target repository isn't called pages, add `/[repository]` to the
+ // previous valid domain.
+ if o.TargetRepo != "" && o.TargetRepo != "pages" {
+ domains[len(domains)-1] += "/" + o.TargetRepo
+ }
+
+ // Add result to cache.
+ _ = canonicalDomainCache.Set(o.TargetOwner+"/"+o.TargetRepo+"/"+o.TargetBranch, domains, canonicalDomainCacheTimeout)
+
+ // Return the first domain from the list and return if any of the domains
+ // matched the requested domain.
+ return domains[0], valid
+}
diff --git a/server/upstream/header.go b/server/upstream/header.go
new file mode 100644
index 0000000..3a218a1
--- /dev/null
+++ b/server/upstream/header.go
@@ -0,0 +1,31 @@
+package upstream
+
+import (
+ "net/http"
+ "time"
+
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/gitea"
+)
+
+// setHeader set values to response header
+func (o *Options) setHeader(ctx *context.Context, header http.Header) {
+ if eTag := header.Get(gitea.ETagHeader); eTag != "" {
+ ctx.RespWriter.Header().Set(gitea.ETagHeader, eTag)
+ }
+ if cacheIndicator := header.Get(gitea.PagesCacheIndicatorHeader); cacheIndicator != "" {
+ ctx.RespWriter.Header().Set(gitea.PagesCacheIndicatorHeader, cacheIndicator)
+ }
+ if length := header.Get(gitea.ContentLengthHeader); length != "" {
+ ctx.RespWriter.Header().Set(gitea.ContentLengthHeader, length)
+ }
+ if mime := header.Get(gitea.ContentTypeHeader); mime == "" || o.ServeRaw {
+ ctx.RespWriter.Header().Set(gitea.ContentTypeHeader, rawMime)
+ } else {
+ ctx.RespWriter.Header().Set(gitea.ContentTypeHeader, mime)
+ }
+ if encoding := header.Get(gitea.ContentEncodingHeader); encoding != "" && encoding != "identity" {
+ ctx.RespWriter.Header().Set(gitea.ContentEncodingHeader, encoding)
+ }
+ ctx.RespWriter.Header().Set(headerLastModified, o.BranchTimestamp.In(time.UTC).Format(http.TimeFormat))
+}
diff --git a/server/upstream/helper.go b/server/upstream/helper.go
new file mode 100644
index 0000000..ac0ab3f
--- /dev/null
+++ b/server/upstream/helper.go
@@ -0,0 +1,47 @@
+package upstream
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/server/gitea"
+)
+
+// GetBranchTimestamp finds the default branch (if branch is "") and save branch and it's last modification time to Options
+func (o *Options) GetBranchTimestamp(giteaClient *gitea.Client) (bool, error) {
+ log := log.With().Strs("BranchInfo", []string{o.TargetOwner, o.TargetRepo, o.TargetBranch}).Logger()
+
+ if o.TargetBranch == "" {
+ // Get default branch
+ defaultBranch, err := giteaClient.GiteaGetRepoDefaultBranch(o.TargetOwner, o.TargetRepo)
+ if err != nil {
+ log.Err(err).Msg("Couldn't fetch default branch from repository")
+ return false, err
+ }
+ log.Debug().Msgf("Successfully fetched default branch %q from Gitea", defaultBranch)
+ o.TargetBranch = defaultBranch
+ }
+
+ timestamp, err := giteaClient.GiteaGetRepoBranchTimestamp(o.TargetOwner, o.TargetRepo, o.TargetBranch)
+ if err != nil {
+ if !errors.Is(err, gitea.ErrorNotFound) {
+ log.Error().Err(err).Msg("Could not get latest commit timestamp from branch")
+ }
+ return false, err
+ }
+
+ if timestamp == nil || timestamp.Branch == "" {
+ return false, fmt.Errorf("empty response")
+ }
+
+ log.Debug().Msgf("Successfully fetched latest commit timestamp from branch: %#v", timestamp)
+ o.BranchTimestamp = timestamp.Timestamp
+ o.TargetBranch = timestamp.Branch
+ return true, nil
+}
+
+func (o *Options) ContentWebLink(giteaClient *gitea.Client) string {
+ return giteaClient.ContentWebLink(o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath) + "; rel=\"canonical\""
+}
diff --git a/server/upstream/redirects.go b/server/upstream/redirects.go
new file mode 100644
index 0000000..b0762d5
--- /dev/null
+++ b/server/upstream/redirects.go
@@ -0,0 +1,108 @@
+package upstream
+
+import (
+ "strconv"
+ "strings"
+ "time"
+
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/gitea"
+ "github.com/rs/zerolog/log"
+)
+
+type Redirect struct {
+ From string
+ To string
+ StatusCode int
+}
+
+// rewriteURL returns the destination URL and true if r matches reqURL.
+func (r *Redirect) rewriteURL(reqURL string) (dstURL string, ok bool) {
+ // check if from url matches request url
+ if strings.TrimSuffix(r.From, "/") == strings.TrimSuffix(reqURL, "/") {
+ return r.To, true
+ }
+ // handle wildcard redirects
+ if strings.HasSuffix(r.From, "/*") {
+ trimmedFromURL := strings.TrimSuffix(r.From, "/*")
+ if reqURL == trimmedFromURL || strings.HasPrefix(reqURL, trimmedFromURL+"/") {
+ if strings.Contains(r.To, ":splat") {
+ matched := strings.TrimPrefix(reqURL, trimmedFromURL)
+ matched = strings.TrimPrefix(matched, "/")
+ return strings.ReplaceAll(r.To, ":splat", matched), true
+ }
+ return r.To, true
+ }
+ }
+ return "", false
+}
+
+// redirectsCacheTimeout specifies the timeout for the redirects cache.
+var redirectsCacheTimeout = 10 * time.Minute
+
+const redirectsConfig = "_redirects"
+
+// getRedirects returns redirects specified in the _redirects file.
+func (o *Options) getRedirects(ctx *context.Context, giteaClient *gitea.Client, redirectsCache cache.ICache) []Redirect {
+ var redirects []Redirect
+ cacheKey := o.TargetOwner + "/" + o.TargetRepo + "/" + o.TargetBranch
+
+ // Check for cached redirects
+ if cachedValue, ok := redirectsCache.Get(cacheKey); ok {
+ redirects = cachedValue.([]Redirect)
+ } else {
+ // Get _redirects file and parse
+ body, err := giteaClient.GiteaRawContent(ctx, o.TargetOwner, o.TargetRepo, o.TargetBranch, redirectsConfig)
+ if err == nil {
+ for _, line := range strings.Split(string(body), "\n") {
+ redirectArr := strings.Fields(line)
+
+ // Ignore comments and invalid lines
+ if strings.HasPrefix(line, "#") || len(redirectArr) < 2 {
+ continue
+ }
+
+ // Get redirect status code
+ statusCode := 301
+ if len(redirectArr) == 3 {
+ statusCode, err = strconv.Atoi(redirectArr[2])
+ if err != nil {
+ log.Info().Err(err).Msgf("could not read %s of %s/%s", redirectsConfig, o.TargetOwner, o.TargetRepo)
+ }
+ }
+
+ redirects = append(redirects, Redirect{
+ From: redirectArr[0],
+ To: redirectArr[1],
+ StatusCode: statusCode,
+ })
+ }
+ }
+ _ = redirectsCache.Set(cacheKey, redirects, redirectsCacheTimeout)
+ }
+ return redirects
+}
+
+func (o *Options) matchRedirects(ctx *context.Context, giteaClient *gitea.Client, redirects []Redirect, redirectsCache cache.ICache) (final bool) {
+ reqURL := ctx.Req.RequestURI
+ // remove repo and branch from request url
+ reqURL = strings.TrimPrefix(reqURL, "/"+o.TargetRepo)
+ reqURL = strings.TrimPrefix(reqURL, "/@"+o.TargetBranch)
+
+ for _, redirect := range redirects {
+ if dstURL, ok := redirect.rewriteURL(reqURL); ok {
+ if o.TargetPath == dstURL { // recursion base case, rewrite directly when paths are the same
+ return true
+ } else if redirect.StatusCode == 200 { // do rewrite if status code is 200
+ o.TargetPath = dstURL
+ o.Upstream(ctx, giteaClient, redirectsCache)
+ } else {
+ ctx.Redirect(dstURL, redirect.StatusCode)
+ }
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/server/upstream/redirects_test.go b/server/upstream/redirects_test.go
new file mode 100644
index 0000000..6118a70
--- /dev/null
+++ b/server/upstream/redirects_test.go
@@ -0,0 +1,36 @@
+package upstream
+
+import (
+ "testing"
+)
+
+func TestRedirect_rewriteURL(t *testing.T) {
+ for _, tc := range []struct {
+ redirect Redirect
+ reqURL string
+ wantDstURL string
+ wantOk bool
+ }{
+ {Redirect{"/", "/dst", 200}, "/", "/dst", true},
+ {Redirect{"/", "/dst", 200}, "/foo", "", false},
+ {Redirect{"/src", "/dst", 200}, "/src", "/dst", true},
+ {Redirect{"/src", "/dst", 200}, "/foo", "", false},
+ {Redirect{"/src", "/dst", 200}, "/src/foo", "", false},
+ {Redirect{"/*", "/dst", 200}, "/", "/dst", true},
+ {Redirect{"/*", "/dst", 200}, "/src", "/dst", true},
+ {Redirect{"/src/*", "/dst/:splat", 200}, "/src", "/dst/", true},
+ {Redirect{"/src/*", "/dst/:splat", 200}, "/src/", "/dst/", true},
+ {Redirect{"/src/*", "/dst/:splat", 200}, "/src/foo", "/dst/foo", true},
+ {Redirect{"/src/*", "/dst/:splat", 200}, "/src/foo/bar", "/dst/foo/bar", true},
+ {Redirect{"/src/*", "/dst/:splatsuffix", 200}, "/src/foo", "/dst/foosuffix", true},
+ {Redirect{"/src/*", "/dst:splat", 200}, "/src/foo", "/dstfoo", true},
+ {Redirect{"/src/*", "/dst", 200}, "/srcfoo", "", false},
+ // This is the example from FEATURES.md:
+ {Redirect{"/articles/*", "/posts/:splat", 302}, "/articles/2022/10/12/post-1/", "/posts/2022/10/12/post-1/", true},
+ } {
+ if dstURL, ok := tc.redirect.rewriteURL(tc.reqURL); dstURL != tc.wantDstURL || ok != tc.wantOk {
+ t.Errorf("%#v.rewriteURL(%q) = %q, %v; want %q, %v",
+ tc.redirect, tc.reqURL, dstURL, ok, tc.wantDstURL, tc.wantOk)
+ }
+ }
+}
diff --git a/server/upstream/upstream.go b/server/upstream/upstream.go
new file mode 100644
index 0000000..9aac271
--- /dev/null
+++ b/server/upstream/upstream.go
@@ -0,0 +1,318 @@
+package upstream
+
+import (
+ "cmp"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/rs/zerolog/log"
+
+ "codeberg.org/codeberg/pages/html"
+ "codeberg.org/codeberg/pages/server/cache"
+ "codeberg.org/codeberg/pages/server/context"
+ "codeberg.org/codeberg/pages/server/gitea"
+)
+
+const (
+ headerLastModified = "Last-Modified"
+ headerIfModifiedSince = "If-Modified-Since"
+ headerAcceptEncoding = "Accept-Encoding"
+ headerContentEncoding = "Content-Encoding"
+
+ rawMime = "text/plain; charset=utf-8"
+)
+
+// upstreamIndexPages lists pages that may be considered as index pages for directories.
+var upstreamIndexPages = []string{
+ "index.html",
+}
+
+// upstreamNotFoundPages lists pages that may be considered as custom 404 Not Found pages.
+var upstreamNotFoundPages = []string{
+ "404.html",
+}
+
+// Options provides various options for the upstream request.
+type Options struct {
+ TargetOwner string
+ TargetRepo string
+ TargetBranch string
+ TargetPath string
+
+ // Used for debugging purposes.
+ Host string
+
+ TryIndexPages bool
+ BranchTimestamp time.Time
+ // internal
+ appendTrailingSlash bool
+ redirectIfExists string
+
+ ServeRaw bool
+}
+
+// allowed encodings
+var allowedEncodings = map[string]string{
+ "gzip": ".gz",
+ "br": ".br",
+ "zstd": ".zst",
+ "identity": "",
+}
+
+// parses Accept-Encoding header into a list of acceptable encodings
+func AcceptEncodings(header string) []string {
+ log.Trace().Msgf("got accept-encoding: %s", header)
+ encodings := []string{}
+ globQuality := 0.0
+ qualities := make(map[string]float64)
+
+ for _, encoding := range strings.Split(header, ",") {
+ name, quality_str, has_quality := strings.Cut(encoding, ";q=")
+ quality := 1.0
+
+ if has_quality {
+ var err error
+ quality, err = strconv.ParseFloat(quality_str, 64)
+ if err != nil || quality < 0 {
+ continue
+ }
+ }
+
+ name = strings.TrimSpace(name)
+
+ if name == "*" {
+ globQuality = quality
+ } else {
+ _, allowed := allowedEncodings[name]
+ if allowed {
+ qualities[name] = quality
+ if quality > 0 {
+ encodings = append(encodings, name)
+ }
+ }
+ }
+ }
+
+ if globQuality > 0 {
+ for encoding := range allowedEncodings {
+ _, exists := qualities[encoding]
+ if !exists {
+ encodings = append(encodings, encoding)
+ qualities[encoding] = globQuality
+ }
+ }
+ } else {
+ _, exists := qualities["identity"]
+ if !exists {
+ encodings = append(encodings, "identity")
+ qualities["identity"] = -1
+ }
+ }
+
+ slices.SortStableFunc(encodings, func(x, y string) int {
+ // sort in reverse order; big quality comes first
+ return cmp.Compare(qualities[y], qualities[x])
+ })
+ log.Trace().Msgf("decided encoding order: %v", encodings)
+ return encodings
+}
+
+// Upstream requests a file from the Gitea API at GiteaRoot and writes it to the request context.
+func (o *Options) Upstream(ctx *context.Context, giteaClient *gitea.Client, redirectsCache cache.ICache) bool {
+ log := log.With().Str("ReqId", ctx.ReqId).Strs("upstream", []string{o.TargetOwner, o.TargetRepo, o.TargetBranch, o.TargetPath}).Logger()
+
+ log.Debug().Msg("Start")
+
+ if o.TargetOwner == "" || o.TargetRepo == "" {
+ html.ReturnErrorPage(ctx, "forge client: either repo owner or name info is missing", http.StatusBadRequest)
+ return true
+ }
+
+ // Check if the branch exists and when it was modified
+ if o.BranchTimestamp.IsZero() {
+ branchExist, err := o.GetBranchTimestamp(giteaClient)
+ // handle 404
+ if err != nil && errors.Is(err, gitea.ErrorNotFound) || !branchExist {
+ html.ReturnErrorPage(ctx,
+ fmt.Sprintf("branch %q
for %s/%s
not found", o.TargetBranch, o.TargetOwner, o.TargetRepo),
+ http.StatusNotFound)
+ return true
+ }
+
+ // handle unexpected errors
+ if err != nil {
+ html.ReturnErrorPage(ctx,
+ fmt.Sprintf("could not get timestamp of branch %q
: '%v'", o.TargetBranch, err),
+ http.StatusFailedDependency)
+ return true
+ }
+ }
+
+ // Check if the browser has a cached version
+ if ctx.Response() != nil {
+ if ifModifiedSince, err := time.Parse(time.RFC1123, ctx.Response().Header.Get(headerIfModifiedSince)); err == nil {
+ if ifModifiedSince.After(o.BranchTimestamp) {
+ ctx.RespWriter.WriteHeader(http.StatusNotModified)
+ log.Trace().Msg("check response against last modified: valid")
+ return true
+ }
+ }
+ log.Trace().Msg("check response against last modified: outdated")
+ }
+
+ log.Debug().Msg("Preparing")
+
+ var reader io.ReadCloser
+ var header http.Header
+ var statusCode int
+ var err error
+
+ // pick first non-404 response for encoding, *only* if not root
+ if o.TargetPath == "" || strings.HasSuffix(o.TargetPath, "/") {
+ err = gitea.ErrorNotFound
+ } else {
+ for _, encoding := range AcceptEncodings(ctx.Req.Header.Get(headerAcceptEncoding)) {
+ log.Trace().Msgf("try %s encoding", encoding)
+
+ // add extension for encoding
+ path := o.TargetPath + allowedEncodings[encoding]
+ reader, header, statusCode, err = giteaClient.ServeRawContent(ctx, o.TargetOwner, o.TargetRepo, o.TargetBranch, path, true)
+ if statusCode == http.StatusNotFound {
+ continue
+ }
+ if err != nil {
+ break
+ }
+ log.Debug().Msgf("using %s encoding", encoding)
+ if encoding != "identity" {
+ header.Set(headerContentEncoding, encoding)
+ }
+ break
+ }
+ if reader != nil {
+ defer reader.Close()
+ }
+ }
+
+ log.Debug().Msg("Aquisting")
+
+ // Handle not found error
+ if err != nil && errors.Is(err, gitea.ErrorNotFound) {
+ log.Debug().Msg("Handling not found error")
+ // Get and match redirects
+ redirects := o.getRedirects(ctx, giteaClient, redirectsCache)
+ if o.matchRedirects(ctx, giteaClient, redirects, redirectsCache) {
+ log.Trace().Msg("redirect")
+ return true
+ }
+
+ if o.TryIndexPages {
+ log.Trace().Msg("try index page")
+ // copy the o struct & try if an index page exists
+ optionsForIndexPages := *o
+ optionsForIndexPages.TryIndexPages = false
+ optionsForIndexPages.appendTrailingSlash = true
+ for _, indexPage := range upstreamIndexPages {
+ optionsForIndexPages.TargetPath = strings.TrimSuffix(o.TargetPath, "/") + "/" + indexPage
+ if optionsForIndexPages.Upstream(ctx, giteaClient, redirectsCache) {
+ return true
+ }
+ }
+ log.Trace().Msg("try html file with path name")
+ // compatibility fix for GitHub Pages (/example → /example.html)
+ optionsForIndexPages.appendTrailingSlash = false
+ optionsForIndexPages.redirectIfExists = strings.TrimSuffix(ctx.Path(), "/") + ".html"
+ optionsForIndexPages.TargetPath = o.TargetPath + ".html"
+ if optionsForIndexPages.Upstream(ctx, giteaClient, redirectsCache) {
+ return true
+ }
+ }
+
+ log.Debug().Msg("not found")
+
+ ctx.StatusCode = http.StatusNotFound
+ if o.TryIndexPages {
+ log.Trace().Msg("try not found page")
+ // copy the o struct & try if a not found page exists
+ optionsForNotFoundPages := *o
+ optionsForNotFoundPages.TryIndexPages = false
+ optionsForNotFoundPages.appendTrailingSlash = false
+ for _, notFoundPage := range upstreamNotFoundPages {
+ optionsForNotFoundPages.TargetPath = "/" + notFoundPage
+ if optionsForNotFoundPages.Upstream(ctx, giteaClient, redirectsCache) {
+ return true
+ }
+ }
+ log.Trace().Msg("not found page missing")
+ }
+
+ return false
+ }
+
+ // handle unexpected client errors
+ if err != nil || reader == nil || statusCode != http.StatusOK {
+ log.Debug().Msg("Handling error")
+ var msg string
+
+ if err != nil {
+ msg = "forge client: returned unexpected error"
+ log.Error().Err(err).Msg(msg)
+ msg = fmt.Sprintf("%s: '%v'", msg, err)
+ }
+ if reader == nil {
+ msg = "forge client: returned no reader"
+ log.Error().Msg(msg)
+ }
+ if statusCode != http.StatusOK {
+ msg = fmt.Sprintf("forge client: couldn't fetch contents: %d - %s
", statusCode, http.StatusText(statusCode))
+ log.Error().Msg(msg)
+ }
+
+ html.ReturnErrorPage(ctx, msg, http.StatusInternalServerError)
+ return true
+ }
+
+ // Append trailing slash if missing (for index files), and redirect to fix filenames in general
+ // o.appendTrailingSlash is only true when looking for index pages
+ if o.appendTrailingSlash && !strings.HasSuffix(ctx.Path(), "/") {
+ log.Trace().Msg("append trailing slash and redirect")
+ ctx.Redirect(ctx.Path()+"/", http.StatusTemporaryRedirect)
+ return true
+ }
+ if strings.HasSuffix(ctx.Path(), "/index.html") && !o.ServeRaw {
+ log.Trace().Msg("remove index.html from path and redirect")
+ ctx.Redirect(strings.TrimSuffix(ctx.Path(), "index.html"), http.StatusTemporaryRedirect)
+ return true
+ }
+ if o.redirectIfExists != "" {
+ ctx.Redirect(o.redirectIfExists, http.StatusTemporaryRedirect)
+ return true
+ }
+
+ // Set ETag & MIME
+ o.setHeader(ctx, header)
+
+ log.Debug().Msg("Prepare response")
+
+ ctx.RespWriter.WriteHeader(ctx.StatusCode)
+
+ // Write the response body to the original request
+ if reader != nil {
+ _, err := io.Copy(ctx.RespWriter, reader)
+ if err != nil {
+ log.Error().Err(err).Msgf("Couldn't write body for %q", o.TargetPath)
+ html.ReturnErrorPage(ctx, "", http.StatusInternalServerError)
+ return true
+ }
+ }
+
+ log.Debug().Msg("Sending response")
+
+ return true
+}
diff --git a/server/utils/utils.go b/server/utils/utils.go
new file mode 100644
index 0000000..91ed359
--- /dev/null
+++ b/server/utils/utils.go
@@ -0,0 +1,27 @@
+package utils
+
+import (
+ "net/url"
+ "path"
+ "strings"
+)
+
+func TrimHostPort(host string) string {
+ i := strings.IndexByte(host, ':')
+ if i >= 0 {
+ return host[:i]
+ }
+ return host
+}
+
+func CleanPath(uriPath string) string {
+ unescapedPath, _ := url.PathUnescape(uriPath)
+ cleanedPath := path.Join("/", unescapedPath)
+
+ // If the path refers to a directory, add a trailing slash.
+ if !strings.HasSuffix(cleanedPath, "/") && (strings.HasSuffix(unescapedPath, "/") || strings.HasSuffix(unescapedPath, "/.") || strings.HasSuffix(unescapedPath, "/..")) {
+ cleanedPath += "/"
+ }
+
+ return cleanedPath
+}
diff --git a/server/utils/utils_test.go b/server/utils/utils_test.go
new file mode 100644
index 0000000..b8fcea9
--- /dev/null
+++ b/server/utils/utils_test.go
@@ -0,0 +1,69 @@
+package utils
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestTrimHostPort(t *testing.T) {
+ assert.EqualValues(t, "aa", TrimHostPort("aa"))
+ assert.EqualValues(t, "", TrimHostPort(":"))
+ assert.EqualValues(t, "example.com", TrimHostPort("example.com:80"))
+}
+
+// TestCleanPath is mostly copied from fasthttp, to keep the behaviour we had before migrating away from it.
+// Source (MIT licensed): https://github.com/valyala/fasthttp/blob/v1.48.0/uri_test.go#L154
+// Copyright (c) 2015-present Aliaksandr Valialkin, VertaMedia, Kirill Danshin, Erik Dubbelboer, FastHTTP Authors
+func TestCleanPath(t *testing.T) {
+ // double slash
+ testURIPathNormalize(t, "/aa//bb", "/aa/bb")
+
+ // triple slash
+ testURIPathNormalize(t, "/x///y/", "/x/y/")
+
+ // multi slashes
+ testURIPathNormalize(t, "/abc//de///fg////", "/abc/de/fg/")
+
+ // encoded slashes
+ testURIPathNormalize(t, "/xxxx%2fyyy%2f%2F%2F", "/xxxx/yyy/")
+
+ // dotdot
+ testURIPathNormalize(t, "/aaa/..", "/")
+
+ // dotdot with trailing slash
+ testURIPathNormalize(t, "/xxx/yyy/../", "/xxx/")
+
+ // multi dotdots
+ testURIPathNormalize(t, "/aaa/bbb/ccc/../../ddd", "/aaa/ddd")
+
+ // dotdots separated by other data
+ testURIPathNormalize(t, "/a/b/../c/d/../e/..", "/a/c/")
+
+ // too many dotdots
+ testURIPathNormalize(t, "/aaa/../../../../xxx", "/xxx")
+ testURIPathNormalize(t, "/../../../../../..", "/")
+ testURIPathNormalize(t, "/../../../../../../", "/")
+
+ // encoded dotdots
+ testURIPathNormalize(t, "/aaa%2Fbbb%2F%2E.%2Fxxx", "/aaa/xxx")
+
+ // double slash with dotdots
+ testURIPathNormalize(t, "/aaa////..//b", "/b")
+
+ // fake dotdot
+ testURIPathNormalize(t, "/aaa/..bbb/ccc/..", "/aaa/..bbb/")
+
+ // single dot
+ testURIPathNormalize(t, "/a/./b/././c/./d.html", "/a/b/c/d.html")
+ testURIPathNormalize(t, "./foo/", "/foo/")
+ testURIPathNormalize(t, "./../.././../../aaa/bbb/../../../././../", "/")
+ testURIPathNormalize(t, "./a/./.././../b/./foo.html", "/b/foo.html")
+}
+
+func testURIPathNormalize(t *testing.T, requestURI, expectedPath string) {
+ cleanedPath := CleanPath(requestURI)
+ if cleanedPath != expectedPath {
+ t.Fatalf("Unexpected path %q. Expected %q. requestURI=%q", cleanedPath, expectedPath, requestURI)
+ }
+}
diff --git a/server/version/version.go b/server/version/version.go
new file mode 100644
index 0000000..aa2cbb5
--- /dev/null
+++ b/server/version/version.go
@@ -0,0 +1,3 @@
+package version
+
+var Version string = "dev"
diff --git a/src/cli.rs b/src/cli.rs
deleted file mode 100644
index 039bbbf..0000000
--- a/src/cli.rs
+++ /dev/null
@@ -1,89 +0,0 @@
-// This file is part of the "lamp" program.
-// Copyright (C) 2022 crapStone
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see .
-
-use clap::{App, Arg};
-
-pub fn build_cli() -> App<'static> {
- App::new("lamp")
- .version(env!("CARGO_PKG_VERSION"))
- .author("crapStone ")
- .about("Utility to interact with backlight")
- .global_setting(clap::AppSettings::ArgRequiredElseHelp)
- .arg(
- Arg::with_name("set")
- .short('s')
- .long("set")
- .help("Sets brightness to given value")
- .takes_value(true)
- .value_name("VALUE"),
- )
- .arg(
- Arg::with_name("inc")
- .short('i')
- .long("increase")
- .help("Increases brightness")
- .takes_value(true)
- .value_name("PERCENT"),
- )
- .arg(
- Arg::with_name("dec")
- .short('d')
- .long("decrease")
- .help("Decreases brightness")
- .takes_value(true)
- .value_name("PERCENT"),
- )
- .arg(
- Arg::with_name("get")
- .short('g')
- .long("get")
- .help("Prints current brightness value"),
- )
- .arg(
- Arg::with_name("zero")
- .short('z')
- .long("zero")
- .help("Sets brightness to lowest value"),
- )
- .arg(
- Arg::with_name("full")
- .short('f')
- .long("full")
- .help("Sets brightness to highest value"),
- )
- .arg(
- Arg::with_name("list")
- .short('l')
- .long("list")
- .help("Lists all devices with controllable brightness and led values")
- .exclusive(true),
- )
- .arg(
- Arg::with_name("controller")
- .short('c')
- .long("controller")
- .help("Select device to control, defaults to the first device found")
- .value_name("DEVICE")
- .takes_value(true),
- )
- .arg(
- Arg::with_name("ctrl_type")
- .short('t')
- .long("type")
- .value_name("controller_type")
- .takes_value(true)
- .possible_values(&["raw", "lin", "log"])
- .default_value("lin")
- .help("choose controller type")
- .long_help(
- r#"You can choose between these controller types:
-raw: uses the raw values found in the device files
-lin: uses percentage values (0.0 - 1.0) with a linear curve
-log: uses percentage values (0.0 - 1.0) with a logarithmic curve
- the perceived brightness for the human eyes should be linear with this controller
-"#,
- ),
- )
-}
diff --git a/src/controllers.rs b/src/controllers.rs
deleted file mode 100644
index d4c447d..0000000
--- a/src/controllers.rs
+++ /dev/null
@@ -1,204 +0,0 @@
-// This file is part of the "lamp" program.
-// Copyright (C) 2022 crapStone
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see .
-
-use std::collections::HashMap;
-use std::fs::{File, OpenOptions};
-use std::io::{self, prelude::*};
-use std::path::{Path, PathBuf};
-use std::process::exit;
-
-const SYS_PATHS: [&str; 2] = ["/sys/class/backlight", "/sys/class/leds"];
-
-pub trait Controller {
- fn get_brightness(&self) -> u32;
- fn get_max_brightness(&self) -> u32;
- fn set_brightness(&self, value: u32);
-
- fn check_brightness_value(&self, value: u32) {
- let max = self.get_max_brightness();
- if value > max {
- eprintln!("brightness value too high: {value} > {max}",);
- exit(exitcode::DATAERR);
- }
- }
-}
-
-pub struct RawController {
- path: PathBuf,
-}
-
-impl RawController {
- pub fn new(path: PathBuf) -> Self {
- Self { path }
- }
-}
-
-impl Controller for RawController {
- fn get_brightness(&self) -> u32 {
- read_file_to_int(self.path.join("brightness"))
- }
-
- fn get_max_brightness(&self) -> u32 {
- read_file_to_int(self.path.join("max_brightness"))
- }
-
- fn set_brightness(&self, value: u32) {
- self.check_brightness_value(value);
-
- let path = self.path.join("brightness");
-
- let mut file = match OpenOptions::new().write(true).read(true).open(&path) {
- Err(why) => {
- eprintln!("couldn't open '{}': {:?}", &path.display(), why.kind());
- exit(exitcode::OSFILE);
- }
- Ok(file) => file,
- };
-
- match write!(file, "{}", value) {
- Ok(_) => {}
- Err(err) => {
- eprintln!(
- "could not write '{value}' to file '{}': {:?}",
- &path.display(),
- err.kind()
- );
- exit(exitcode::OSFILE);
- }
- };
- }
-}
-
-pub struct LinController {
- parent_controller: RawController,
-}
-
-impl LinController {
- pub fn new(path: PathBuf) -> Self {
- Self {
- parent_controller: RawController::new(path),
- }
- }
-}
-
-impl Controller for LinController {
- fn get_brightness(&self) -> u32 {
- ((self.parent_controller.get_brightness() as f64
- / self.parent_controller.get_max_brightness() as f64)
- * self.get_max_brightness() as f64) as u32
- }
-
- fn get_max_brightness(&self) -> u32 {
- 100
- }
-
- fn set_brightness(&self, value: u32) {
- self.check_brightness_value(value);
-
- if value > self.get_max_brightness() {
- eprintln!(
- "brightness value too high! {value} > {}",
- self.get_max_brightness()
- );
- exit(exitcode::DATAERR);
- }
-
- self.parent_controller.set_brightness(
- (value * self.parent_controller.get_max_brightness()) / self.get_max_brightness(),
- )
- }
-}
-
-pub struct LogController {
- parent_controller: RawController,
-}
-
-impl LogController {
- pub fn new(path: PathBuf) -> Self {
- Self {
- parent_controller: RawController::new(path),
- }
- }
-}
-
-impl Controller for LogController {
- fn get_brightness(&self) -> u32 {
- ((self.parent_controller.get_brightness() as f64).log10()
- / (self.parent_controller.get_max_brightness() as f64).log10()
- * self.get_max_brightness() as f64) as u32
- }
-
- fn get_max_brightness(&self) -> u32 {
- 100
- }
-
- fn set_brightness(&self, value: u32) {
- self.check_brightness_value(value);
-
- if value > self.get_max_brightness() {
- eprintln!(
- "brightness value too high! {value} > {}",
- self.get_max_brightness()
- );
- exit(exitcode::DATAERR);
- }
-
- self.parent_controller.set_brightness(10f64.powf(
- (value as f64 / self.get_max_brightness() as f64)
- * (self.parent_controller.get_max_brightness() as f64).log10(),
- ) as u32)
- }
-}
-
-fn read_file_to_int(path: PathBuf) -> u32 {
- let mut file = match File::open(&path) {
- Err(why) => {
- eprintln!("couldn't open {}: {:?}", path.display(), why.kind());
- exit(exitcode::OSFILE);
- }
- Ok(file) => file,
- };
-
- let mut s = String::new();
- match file.read_to_string(&mut s) {
- Err(why) => {
- eprintln!("couldn't read {}: {:?}", path.display(), why.kind());
- exit(exitcode::OSFILE);
- }
- Ok(_) => return s.trim().parse().unwrap(),
- }
-}
-
-/// Searches through all paths in `SYS_PATHS` and creates a `HashMap` with the name and absolute path.
-///
-/// It returns a `Tuple` of the default backlight name and the `HashMap`.
-pub fn get_controllers() -> Result<(String, HashMap), io::Error> {
- let mut controllers: HashMap = HashMap::new();
-
- let mut default = None;
-
- for path in SYS_PATHS {
- if Path::new(path).exists() {
- for name in Path::new(path).read_dir()? {
- let name = name?.path();
- let key = String::from(name.file_name().unwrap().to_str().unwrap());
-
- if default.is_none() {
- default = Some(key.clone());
- }
-
- controllers.insert(key, name);
- }
- }
- }
-
- Ok((
- default.unwrap_or_else(|| {
- eprintln!("no devices found");
- exit(exitcode::OSFILE)
- }),
- controllers,
- ))
-}
diff --git a/src/main.rs b/src/main.rs
deleted file mode 100644
index 52e3180..0000000
--- a/src/main.rs
+++ /dev/null
@@ -1,92 +0,0 @@
-// This file is part of the "lamp" program.
-// Copyright (C) 2022 crapStone
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see .
-
-mod cli;
-mod controllers;
-
-use std::process::exit;
-
-use controllers::{Controller, LinController, LogController, RawController};
-
-use crate::cli::build_cli;
-
-fn main() {
- let app = build_cli();
- let matches = app.get_matches();
-
- let (default_ctrl, ctrls) = controllers::get_controllers().unwrap_or_else(|why| {
- eprintln!("an error occured when reading devices: '{why}'");
- exit(exitcode::IOERR)
- });
-
- let p = match matches.value_of("controller") {
- Some(ctrl) => {
- let p = ctrls.get(ctrl);
- if p == None {
- eprintln!("no device with name '{ctrl}' found");
- eprintln!("use --list to ge a list of all available devices");
- exit(exitcode::DATAERR);
- }
-
- p.unwrap().to_owned()
- }
- None => ctrls.get(&default_ctrl).unwrap().to_owned(),
- };
-
- let controller: Box = match matches.value_of("ctrl_type") {
- Some("raw") => Box::new(RawController::new(p)),
- Some("lin") => Box::new(LinController::new(p)),
- Some("log") => Box::new(LogController::new(p)),
- Some(_) | None => panic!("{ERROR_MSG}"),
- };
-
- if matches.is_present("list") {
- println!("{default_ctrl} [default]");
- for ctrl in ctrls.keys() {
- if ctrl != &default_ctrl {
- println!("{ctrl}");
- }
- }
- exit(exitcode::OK);
- } else if let Some(value) = matches.value_of("set") {
- let new_value = str_to_int(value);
- controller.set_brightness(new_value);
- } else if let Some(value) = matches.value_of("inc") {
- let new_value = controller.get_brightness() + str_to_int(value);
- controller.set_brightness(new_value.min(controller.get_max_brightness()));
- } else if let Some(value) = matches.value_of("dec") {
- let new_value = controller.get_brightness() - str_to_int(value);
- controller.set_brightness(new_value);
- } else if matches.is_present("get") {
- println!("{}", controller.get_brightness());
- } else if matches.is_present("zero") {
- controller.set_brightness(0);
- } else if matches.is_present("full") {
- controller.set_brightness(controller.get_max_brightness());
- } else {
- build_cli().print_long_help().unwrap();
- }
-}
-
-#[inline(always)]
-fn str_to_int(value: &str) -> u32 {
- value.parse().unwrap_or_else(|_| {
- eprintln!("cannot parse '{value}' as positive integer");
- exit(exitcode::DATAERR);
- })
-}
-
-// https://xkcd.com/2200/
-const ERROR_MSG: &str = r#"
- ERROR!
-
- If you're seeing this, the code is in what I thought was an unreachable state.
-
- I could give you advice for what to do. but honestly, why should you trust me?
- I clearly screwed this up. I'm writing a message that should never appear,
- yet I know it will probably appear someday.
-
- On a deep level, I know I'm not up to this task. I'm so sorry.
-"#;