mirror of
https://codeberg.org/Codeberg/pages-server.git
synced 2025-01-18 16:47:54 +00:00
Cache empty files & fix #303 (missing content cache)
This commit is contained in:
parent
46c8daacba
commit
48e919a7bf
2 changed files with 12 additions and 36 deletions
|
@ -99,6 +99,7 @@ type writeCacheReader struct {
|
|||
cacheKey string
|
||||
cache cache.ICache
|
||||
hasError bool
|
||||
doNotCache bool
|
||||
}
|
||||
|
||||
func (t *writeCacheReader) Read(p []byte) (n int, err error) {
|
||||
|
@ -108,19 +109,20 @@ func (t *writeCacheReader) Read(p []byte) (n int, err error) {
|
|||
log.Trace().Err(err).Msgf("[cache] original reader for %q has returned an error", t.cacheKey)
|
||||
t.hasError = true
|
||||
} else if n > 0 {
|
||||
_, _ = t.buffer.Write(p[:n])
|
||||
if t.buffer.Len()+n > int(fileCacheSizeLimit) {
|
||||
t.doNotCache = true
|
||||
t.buffer.Reset()
|
||||
} else {
|
||||
_, _ = t.buffer.Write(p[:n])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *writeCacheReader) Close() error {
|
||||
doWrite := !t.hasError
|
||||
doWrite := !t.hasError && !t.doNotCache
|
||||
fc := *t.fileResponse
|
||||
fc.Body = t.buffer.Bytes()
|
||||
if fc.IsEmpty() {
|
||||
log.Trace().Msg("[cache] file response is empty")
|
||||
doWrite = false
|
||||
}
|
||||
if doWrite {
|
||||
err := t.cache.Set(t.cacheKey+"|Metadata", []byte(fc.MetadataAsString()), fileCacheTimeout)
|
||||
if err != nil {
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -123,19 +122,17 @@ func (client *Client) ServeRawContent(targetOwner, targetRepo, ref, resource str
|
|||
cachedHeader, cachedStatusCode := cache.createHttpResponse(cacheKey)
|
||||
// TODO: check against some timestamp mismatch?!?
|
||||
if cache.Exists {
|
||||
log.Debug().Msg("[cache] exists")
|
||||
if cache.IsSymlink {
|
||||
linkDest := string(cache.Body)
|
||||
log.Debug().Msgf("[cache] follow symlink from %q to %q", resource, linkDest)
|
||||
return client.ServeRawContent(targetOwner, targetRepo, ref, linkDest)
|
||||
} else if !cache.IsEmpty() {
|
||||
} else {
|
||||
log.Debug().Msgf("[cache] return %d bytes", len(cache.Body))
|
||||
return io.NopCloser(bytes.NewReader(cache.Body)), cachedHeader, cachedStatusCode, nil
|
||||
} else if cache.IsEmpty() {
|
||||
log.Debug().Msg("[cache] is empty")
|
||||
// TODO: empty files aren't cached anyways; but when closing the issue please make sure that a missing body cache key is also handled correctly.
|
||||
}
|
||||
} // TODO: handle missing pages if they redirect to a index.html
|
||||
} else {
|
||||
return nil, nil, http.StatusNotFound, ErrorNotFound
|
||||
}
|
||||
}
|
||||
// TODO: metadata not written, is close ever called?
|
||||
log.Trace().Msg("file not in cache")
|
||||
|
@ -186,10 +183,6 @@ func (client *Client) ServeRawContent(targetOwner, targetRepo, ref, resource str
|
|||
mimeType := client.getMimeTypeByExtension(resource)
|
||||
resp.Response.Header.Set(ContentTypeHeader, mimeType)
|
||||
|
||||
if !shouldRespBeSavedToCache(resp.Response) {
|
||||
return reader, resp.Response.Header, resp.StatusCode, err
|
||||
}
|
||||
|
||||
// now we write to cache and respond at the same time
|
||||
fileResp := FileResponse{
|
||||
Exists: true,
|
||||
|
@ -321,22 +314,3 @@ func (client *Client) getMimeTypeByExtension(resource string) string {
|
|||
log.Trace().Msgf("probe mime of %q is %q", resource, mimeType)
|
||||
return mimeType
|
||||
}
|
||||
|
||||
func shouldRespBeSavedToCache(resp *http.Response) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
contentLengthRaw := resp.Header.Get(ContentLengthHeader)
|
||||
if contentLengthRaw == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
contentLength, err := strconv.ParseInt(contentLengthRaw, 10, 64)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not parse content length")
|
||||
}
|
||||
|
||||
// if content to big or could not be determined we not cache it
|
||||
return contentLength > 0 && contentLength < fileCacheSizeLimit
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue