mirror of
https://codeberg.org/Codeberg/pages-server.git
synced 2024-11-05 14:07:01 +00:00
153 lines
4.2 KiB
Go
153 lines
4.2 KiB
Go
package gitea
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/rs/zerolog/log"
|
|
|
|
"codeberg.org/codeberg/pages/server/cache"
|
|
)
|
|
|
|
const (
|
|
// defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
|
|
defaultBranchCacheTimeout = 15 * time.Minute
|
|
|
|
// branchExistenceCacheTimeout specifies the timeout for the branch timestamp & existence cache. It should be shorter
|
|
// than fileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
|
|
// picked up faster, while still allowing the content to be cached longer if nothing changes.
|
|
branchExistenceCacheTimeout = 5 * time.Minute
|
|
|
|
// fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
|
|
// on your available memory.
|
|
// TODO: move as option into cache interface
|
|
fileCacheTimeout = 5 * time.Minute
|
|
|
|
// ownerExistenceCacheTimeout specifies the timeout for the existence of a repo/org
|
|
ownerExistenceCacheTimeout = 5 * time.Minute
|
|
|
|
// fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
|
|
fileCacheSizeLimit = int64(1000 * 1000)
|
|
)
|
|
|
|
type FileResponse struct {
|
|
Exists bool
|
|
IsSymlink bool
|
|
ETag string
|
|
MimeType string
|
|
Body []byte
|
|
}
|
|
|
|
func FileResponseFromMetadataString(metadataString string) FileResponse {
|
|
parts := strings.Split(metadataString, "\n")
|
|
res := FileResponse{
|
|
Exists: parts[0] == "true",
|
|
IsSymlink: parts[1] == "true",
|
|
ETag: parts[2],
|
|
MimeType: parts[3],
|
|
}
|
|
return res
|
|
}
|
|
|
|
func (f FileResponse) MetadataAsString() string {
|
|
return strconv.FormatBool(f.Exists) + "\n" +
|
|
strconv.FormatBool(f.IsSymlink) + "\n" +
|
|
f.ETag + "\n" +
|
|
f.MimeType + "\n"
|
|
}
|
|
|
|
func (f FileResponse) IsEmpty() bool {
|
|
return len(f.Body) == 0
|
|
}
|
|
|
|
func (f FileResponse) createHttpResponse(cacheKey string) (header http.Header, statusCode int) {
|
|
header = make(http.Header)
|
|
|
|
if f.Exists {
|
|
statusCode = http.StatusOK
|
|
} else {
|
|
statusCode = http.StatusNotFound
|
|
}
|
|
|
|
if f.IsSymlink {
|
|
header.Set(giteaObjectTypeHeader, objTypeSymlink)
|
|
}
|
|
header.Set(ETagHeader, f.ETag)
|
|
header.Set(ContentTypeHeader, f.MimeType)
|
|
header.Set(ContentLengthHeader, fmt.Sprintf("%d", len(f.Body)))
|
|
header.Set(PagesCacheIndicatorHeader, "true")
|
|
|
|
log.Trace().Msgf("fileCache for %q used", cacheKey)
|
|
return header, statusCode
|
|
}
|
|
|
|
type BranchTimestamp struct {
|
|
Branch string
|
|
Timestamp time.Time
|
|
notFound bool
|
|
}
|
|
|
|
type writeCacheReader struct {
|
|
originalReader io.ReadCloser
|
|
buffer *bytes.Buffer
|
|
fileResponse *FileResponse
|
|
cacheKey string
|
|
cache cache.ICache
|
|
hasError bool
|
|
doNotCache bool
|
|
}
|
|
|
|
func (t *writeCacheReader) Read(p []byte) (n int, err error) {
|
|
log.Trace().Msgf("[cache] read %q", t.cacheKey)
|
|
n, err = t.originalReader.Read(p)
|
|
if err != nil && err != io.EOF {
|
|
log.Trace().Err(err).Msgf("[cache] original reader for %q has returned an error", t.cacheKey)
|
|
t.hasError = true
|
|
} else if n > 0 {
|
|
if t.buffer.Len()+n > int(fileCacheSizeLimit) {
|
|
t.doNotCache = true
|
|
t.buffer.Reset()
|
|
} else {
|
|
_, _ = t.buffer.Write(p[:n])
|
|
}
|
|
}
|
|
return
|
|
}
|
|
|
|
func (t *writeCacheReader) Close() error {
|
|
doWrite := !t.hasError && !t.doNotCache
|
|
fc := *t.fileResponse
|
|
fc.Body = t.buffer.Bytes()
|
|
if doWrite {
|
|
err := t.cache.Set(t.cacheKey+"|Metadata", []byte(fc.MetadataAsString()), fileCacheTimeout)
|
|
if err != nil {
|
|
log.Trace().Err(err).Msgf("[cache] writer for %q has returned an error", t.cacheKey+"|Metadata")
|
|
}
|
|
err = t.cache.Set(t.cacheKey+"|Body", fc.Body, fileCacheTimeout)
|
|
if err != nil {
|
|
log.Trace().Err(err).Msgf("[cache] writer for %q has returned an error", t.cacheKey+"|Body")
|
|
}
|
|
}
|
|
log.Trace().Msgf("cacheReader for %q saved=%t closed", t.cacheKey, doWrite)
|
|
return t.originalReader.Close()
|
|
}
|
|
|
|
func (f FileResponse) CreateCacheReader(r io.ReadCloser, cache cache.ICache, cacheKey string) io.ReadCloser {
|
|
if r == nil || cache == nil || cacheKey == "" {
|
|
log.Error().Msg("could not create CacheReader")
|
|
return nil
|
|
}
|
|
|
|
return &writeCacheReader{
|
|
originalReader: r,
|
|
buffer: bytes.NewBuffer(make([]byte, 0)),
|
|
fileResponse: &f,
|
|
cache: cache,
|
|
cacheKey: cacheKey,
|
|
}
|
|
}
|