wip: add fileCache back

This commit is contained in:
6543 2022-09-18 21:02:55 +02:00
parent 9626d3a8a0
commit 8dac935cd8
No known key found for this signature in database
GPG key ID: B8BE6D610E61C862
2 changed files with 127 additions and 52 deletions

View file

@ -1,10 +1,14 @@
package gitea package gitea
import "time" import (
"net/http"
"time"
)
type FileResponse struct { type FileResponse struct {
Exists bool Exists bool
ETag []byte IsSymlink bool
ETag string
MimeType string MimeType string
Body []byte Body []byte
} }
@ -13,12 +17,32 @@ func (f FileResponse) IsEmpty() bool {
return len(f.Body) != 0 return len(f.Body) != 0
} }
func (f FileResponse) createHttpResponse() *http.Response {
resp := &http.Response{
Header: make(http.Header),
}
if f.Exists {
resp.StatusCode = http.StatusOK
} else {
resp.StatusCode = http.StatusNotFound
}
if f.IsSymlink {
resp.Header.Set(giteaObjectTypeHeader, objTypeSymlink)
}
resp.Header.Set(eTagHeader, f.ETag)
resp.Header.Set(contentTypeHeader, f.MimeType)
return resp
}
type BranchTimestamp struct { type BranchTimestamp struct {
Branch string Branch string
Timestamp time.Time Timestamp time.Time
} }
var ( const (
// defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long. // defaultBranchCacheTimeout specifies the timeout for the default branch cache. It can be quite long.
defaultBranchCacheTimeout = 15 * time.Minute defaultBranchCacheTimeout = 15 * time.Minute
@ -30,8 +54,8 @@ var (
// fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending // fileCacheTimeout specifies the timeout for the file content cache - you might want to make this quite long, depending
// on your available memory. // on your available memory.
// TODO: move as option into cache interface // TODO: move as option into cache interface
// fileCacheTimeout = 5 * time.Minute fileCacheTimeout = 5 * time.Minute
// fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default. // fileCacheSizeLimit limits the maximum file size that will be cached, and is set to 1 MB by default.
// fileCacheSizeLimit = 1024 * 1024 fileCacheSizeLimit = int64(1024 * 1024)
) )

View file

@ -1,11 +1,13 @@
package gitea package gitea
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings" "strings"
"time" "time"
@ -18,9 +20,19 @@ import (
var ErrorNotFound = errors.New("not found") var ErrorNotFound = errors.New("not found")
const ( const (
// cache key prefixe
branchTimestampCacheKeyPrefix = "branchTime" branchTimestampCacheKeyPrefix = "branchTime"
defaultBranchCacheKeyPrefix = "defaultBranch" defaultBranchCacheKeyPrefix = "defaultBranch"
rawContentCacheKeyPrefix = "rawContent"
// gitea
giteaObjectTypeHeader = "X-Gitea-Object-Type" giteaObjectTypeHeader = "X-Gitea-Object-Type"
objTypeSymlink = "symlink"
// std
eTagHeader = "ETag"
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
) )
type Client struct { type Client struct {
@ -59,42 +71,38 @@ func (client *Client) GiteaRawContent(targetOwner, targetRepo, ref, resource str
} }
func (client *Client) ServeRawContent(targetOwner, targetRepo, ref, resource string) (io.ReadCloser, *http.Response, error) { func (client *Client) ServeRawContent(targetOwner, targetRepo, ref, resource string) (io.ReadCloser, *http.Response, error) {
cacheKey := fmt.Sprintf("%s/%s/%s|%s|%s", rawContentCacheKeyPrefix, targetOwner, targetRepo, ref, resource)
log := log.With().Str("cache_key", cacheKey).Logger()
// handle if cache entry exist
if cache, ok := client.responseCache.Get(cacheKey); ok == true {
cache := cache.(FileResponse)
// TODO: check against some timestamp missmatch?!?
if cache.Exists {
if cache.IsSymlink {
linkDest := string(cache.Body)
log.Debug().Msgf("[cache] follow symlink from'%s' to '%s'", resource, linkDest)
return client.ServeRawContent(targetOwner, targetRepo, ref, linkDest)
} else {
log.Debug().Msg("[cache] return bytes")
return io.NopCloser(bytes.NewReader(cache.Body)), cache.createHttpResponse(), nil
}
} else {
return nil, cache.createHttpResponse(), ErrorNotFound
}
}
// if cachedValue, ok := fileResponseCache.Get(uri + "?timestamp=" + o.timestamp()); ok && !cachedValue.(gitea.FileResponse).IsEmpty() { // if cachedValue, ok := fileResponseCache.Get(uri + "?timestamp=" + o.timestamp()); ok && !cachedValue.(gitea.FileResponse).IsEmpty() {
// cachedResponse = cachedValue.(gitea.FileResponse) // cachedResponse = cachedValue.(gitea.FileResponse)
reader, resp, err := client.sdkClient.GetFileReader(targetOwner, targetRepo, ref, resource, client.supportLFS) reader, resp, err := client.sdkClient.GetFileReader(targetOwner, targetRepo, ref, resource, client.supportLFS)
if resp != nil { if resp != nil {
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusOK: case http.StatusOK:
// first handle symlinks
// add caching {
// Write the response body to the original request
// var cacheBodyWriter bytes.Buffer
// if res != nil {
// if res.Header.ContentLength() > fileCacheSizeLimit {
// // fasthttp else will set "Content-Length: 0"
// ctx.Response().SetBodyStream(&strings.Reader{}, -1)
//
// err = res.BodyWriteTo(ctx.Response.BodyWriter())
// } else {
// // TODO: cache is half-empty if request is cancelled - does the ctx.Err() below do the trick?
// err = res.BodyWriteTo(io.MultiWriter(ctx.Response().BodyWriter(), &cacheBodyWriter))
// }
// } else {
// _, err = ctx.Write(cachedResponse.Body)
// }
// if res != nil && res.Header.ContentLength() <= fileCacheSizeLimit && ctx.Err() == nil {
// cachedResponse.Exists = true
// cachedResponse.MimeType = mimeType
// cachedResponse.Body = cacheBodyWriter.Bytes()
// _ = fileResponseCache.Set(uri+"?timestamp="+o.timestamp(), cachedResponse, fileCacheTimeout)
// }
// store ETag in resp !!!!
objType := resp.Header.Get(giteaObjectTypeHeader) objType := resp.Header.Get(giteaObjectTypeHeader)
log.Trace().Msgf("server raw content object: %s", objType) log.Trace().Msgf("server raw content object: %s", objType)
if client.followSymlinks && objType == "symlink" { if client.followSymlinks && objType == objTypeSymlink {
// limit to 1000 chars // limit to 1000 chars
defer reader.Close() defer reader.Close()
linkDestBytes, err := io.ReadAll(io.LimitReader(reader, 10000)) linkDestBytes, err := io.ReadAll(io.LimitReader(reader, 10000))
@ -103,17 +111,60 @@ func (client *Client) ServeRawContent(targetOwner, targetRepo, ref, resource str
} }
linkDest := strings.TrimSpace(string(linkDestBytes)) linkDest := strings.TrimSpace(string(linkDestBytes))
if err := client.responseCache.Set(cacheKey, FileResponse{
Exists: true,
IsSymlink: true,
Body: []byte(linkDest),
ETag: resp.Header.Get(eTagHeader),
}, fileCacheTimeout); err != nil {
log.Error().Err(err).Msg("could not save symlink in cache")
}
log.Debug().Msgf("follow symlink from '%s' to '%s'", resource, linkDest) log.Debug().Msgf("follow symlink from '%s' to '%s'", resource, linkDest)
return client.ServeRawContent(targetOwner, targetRepo, ref, linkDest) return client.ServeRawContent(targetOwner, targetRepo, ref, linkDest)
} }
}
// now we are sure it's content
{
contentLeng, err2 := strconv.ParseInt(resp.Header.Get(contentLengthHeader), 20, 64)
if err2 != nil {
log.Error().Err(err2).Msg("could not parse content length")
}
if contentLeng <= 0 && contentLeng > fileCacheSizeLimit {
// if content to big or could not be determined we return now
return reader, resp.Response, err return reader, resp.Response, err
case http.StatusNotFound: }
// add not exist caching // now we write to cache and respond at the sime time
// _ = fileResponseCache.Set(uri+"?timestamp="+o.timestamp(), gitea.FileResponse{
// Exists: false, // TODO: at the sime time !!!
// }, fileCacheTimeout) /*
we need a "go"
// TODO: cache is half-empty if request is cancelled - does the ctx.Err() below do the trick?
// err = res.BodyWriteTo(io.MultiWriter(ctx.Response().BodyWriter(), &cacheBodyWriter))
*/
body, err := io.ReadAll(io.LimitReader(reader, contentLeng))
if err != nil {
if err := client.responseCache.Set(cacheKey, FileResponse{
Exists: true,
ETag: resp.Header.Get(eTagHeader),
MimeType: resp.Header.Get(contentTypeHeader),
Body: body,
}, fileCacheTimeout); err != nil {
log.Error().Err(err).Msg("could not save content in cache")
}
}
return io.NopCloser(bytes.NewReader(body)), resp.Response, nil
}
case http.StatusNotFound:
if err := client.responseCache.Set(cacheKey, FileResponse{
Exists: false,
ETag: resp.Header.Get(eTagHeader),
}, fileCacheTimeout); err != nil {
log.Error().Err(err).Msg("could not save 404 in cache")
}
return nil, resp.Response, ErrorNotFound return nil, resp.Response, ErrorNotFound
default: default: