This commit is contained in:
6543 2022-09-18 22:41:52 +02:00
parent 4a2a14272b
commit 94cb43508c
No known key found for this signature in database
GPG key ID: B8BE6D610E61C862
2 changed files with 57 additions and 30 deletions

View file

@ -1,8 +1,30 @@
package gitea
import (
"bytes"
"io"
"net/http"
"time"
"codeberg.org/codeberg/pages/server/cache"
)
const (
// defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
defaultBranchCacheTimeout = 15 * time.Minute
// branchExistenceCacheTimeout specifies the timeout for the branch timestamp & existence cache. It should be shorter
// than fileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
// picked up faster, while still allowing the content to be cached longer if nothing changes.
branchExistenceCacheTimeout = 5 * time.Minute
// fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
// on your available memory.
// TODO: move as option into cache interface
fileCacheTimeout = 5 * time.Minute
// fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
fileCacheSizeLimit = int64(1024 * 1024)
)
type FileResponse struct {
@ -42,20 +64,39 @@ type BranchTimestamp struct {
Timestamp time.Time
}
const (
// defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
defaultBranchCacheTimeout = 15 * time.Minute
type writeCacheReader struct {
r io.Reader
buff *bytes.Buffer
f *FileResponse
cacheKey string
cache cache.SetGetKey
hasErr bool
}
// branchExistenceCacheTimeout specifies the timeout for the branch timestamp & existence cache. It should be shorter
// than fileCacheTimeout, as that gets invalidated if the branch timestamp has changed. That way, repo changes will be
// picked up faster, while still allowing the content to be cached longer if nothing changes.
branchExistenceCacheTimeout = 5 * time.Minute
func (t *writeCacheReader) Read(p []byte) (n int, err error) {
n, err = t.r.Read(p)
if err != nil {
t.hasErr = true
} else if n > 0 {
_, _ = t.buff.Write(p[:n])
}
return
}
// fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
// on your available memory.
// TODO: move as option into cache interface
fileCacheTimeout = 5 * time.Minute
func (t *writeCacheReader) Close() error {
if !t.hasErr {
t.f.Body = t.buff.Bytes()
t.cache.Set(t.cacheKey, *t.f, fileCacheTimeout)
}
return t.Close()
}
// fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
fileCacheSizeLimit = int64(1024 * 1024)
)
func (f FileResponse) CreateCacheReader(r io.ReadCloser, cache cache.SetGetKey, cacheKey string) io.ReadCloser {
buf := []byte{}
return &writeCacheReader{
r: r,
buff: bytes.NewBuffer(buf),
f: &f,
cacheKey: cacheKey,
}
}

View file

@ -158,26 +158,12 @@ func (client *Client) ServeRawContent(targetOwner, targetRepo, ref, resource str
}
// now we write to cache and respond at the sime time
// TODO: at the sime time !!!
/*
we create a new type that implement an writer witch write to cache based on key etc ...
// TODO: cache is half-empty if request is cancelled - does the ctx.Err() below do the trick?
// err = res.BodyWriteTo(io.MultiWriter(ctx.Response().BodyWriter(), &cacheBodyWriter))
*/
body, err := io.ReadAll(io.LimitReader(reader, fileCacheSizeLimit))
if err != nil {
log.Error().Err(err).Msg("not expected")
}
if err := client.responseCache.Set(cacheKey, FileResponse{
fileResp := FileResponse{
Exists: true,
ETag: resp.Header.Get(eTagHeader),
MimeType: mimeType,
Body: body,
}, fileCacheTimeout); err != nil {
log.Error().Err(err).Msg("could not save content in cache")
}
return io.NopCloser(bytes.NewReader(body)), resp.Response, nil
return fileResp.CreateCacheReader(reader, client.responseCache, cacheKey), resp.Response, nil
}
case http.StatusNotFound: