Compare commits

...

9 Commits

Author SHA1 Message Date
Andrea Spacca
2a29083960 handle range with no limit 2023-02-11 11:47:51 +09:00
Andrea Spacca
158e5487ee refactor CloseCheck to avoid panic on nil 2023-02-11 11:47:01 +09:00
Andrea Spacca
806286ab35 refactor CloseCheck to avoid panic on nil, handle range in gdrive storage 2023-02-11 11:46:27 +09:00
Andrea Spacca
d49aee59ba refactor CloseCheck to avoid panic on nil, handle range with no limit 2023-02-11 11:45:57 +09:00
Andrea Spacca
e08225e5f8 refactor CloseCheck to avoid panic on nil, remove range/audio/video special handling on get handler 2023-02-11 11:45:17 +09:00
Andrea Spacca
8597f1d9eb bump gdrive dependecies 2023-02-11 11:44:12 +09:00
Andrea Spacca
9e8ce19cd1 proper param name in error 2023-02-11 11:43:41 +09:00
Vladislav Grubov
2bda0a1e55 Adds 'Accept-Ranges: bytes' header to handlers 2023-02-03 22:45:55 +03:00
Vladislav Grubov
d9369e8b39 Support Range header for GET 2023-01-28 22:23:57 +03:00
9 changed files with 910 additions and 71 deletions

View File

@@ -473,9 +473,9 @@ func New() *Cmd {
chunkSize := c.Int("gdrive-chunk-size") * 1024 * 1024 chunkSize := c.Int("gdrive-chunk-size") * 1024 * 1024
if clientJSONFilepath := c.String("gdrive-client-json-filepath"); clientJSONFilepath == "" { if clientJSONFilepath := c.String("gdrive-client-json-filepath"); clientJSONFilepath == "" {
panic("client-json-filepath not set.") panic("gdrive-client-json-filepath not set.")
} else if localConfigPath := c.String("gdrive-local-config-path"); localConfigPath == "" { } else if localConfigPath := c.String("gdrive-local-config-path"); localConfigPath == "" {
panic("local-config-path not set.") panic("gdrive-local-config-path not set.")
} else if basedir := c.String("basedir"); basedir == "" { } else if basedir := c.String("basedir"); basedir == "" {
panic("basedir not set.") panic("basedir not set.")
} else if store, err := storage.NewGDriveStorage(clientJSONFilepath, localConfigPath, basedir, chunkSize, logger); err != nil { } else if store, err := storage.NewGDriveStorage(clientJSONFilepath, localConfigPath, basedir, chunkSize, logger); err != nil {

16
go.mod
View File

@@ -3,7 +3,8 @@ module github.com/dutchcoders/transfer.sh
go 1.15 go 1.15
require ( require (
cloud.google.com/go v0.77.0 // indirect cloud.google.com/go/compute v1.18.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14 github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14
github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2 github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2
github.com/aws/aws-sdk-go v1.37.14 github.com/aws/aws-sdk-go v1.37.14
@@ -15,6 +16,8 @@ require (
github.com/fatih/color v1.10.0 github.com/fatih/color v1.10.0
github.com/garyburd/redigo v1.6.2 // indirect github.com/garyburd/redigo v1.6.2 // indirect
github.com/golang/gddo v0.0.0-20210115222349-20d68f94ee1f github.com/golang/gddo v0.0.0-20210115222349-20d68f94ee1f
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.2 // indirect
github.com/gorilla/handlers v1.5.1 github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0
github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/securecookie v1.1.1 // indirect
@@ -24,12 +27,13 @@ require (
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
github.com/urfave/cli v1.22.5 github.com/urfave/cli v1.22.5
go.opencensus.io v0.22.6 // indirect
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838
golang.org/x/net v0.0.0-20220513224357-95641704303c // indirect golang.org/x/net v0.6.0 // indirect
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99 golang.org/x/oauth2 v0.5.0
google.golang.org/api v0.40.0 google.golang.org/api v0.109.0
google.golang.org/genproto v0.0.0-20210218151259-fe80b386bf06 // indirect google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15
storj.io/common v0.0.0-20220405183405-ffdc3ab808c6 storj.io/common v0.0.0-20220405183405-ffdc3ab808c6
storj.io/uplink v1.8.2 storj.io/uplink v1.8.2

707
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -36,7 +36,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/dutchcoders/transfer.sh/server/storage"
"html" "html"
htmlTemplate "html/template" htmlTemplate "html/template"
"io" "io"
@@ -54,6 +53,8 @@ import (
textTemplate "text/template" textTemplate "text/template"
"time" "time"
"github.com/dutchcoders/transfer.sh/server/storage"
web "github.com/dutchcoders/transfer.sh-web" web "github.com/dutchcoders/transfer.sh-web"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/microcosm-cc/bluemonday" "github.com/microcosm-cc/bluemonday"
@@ -151,7 +152,7 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
templatePath = "download.markdown.html" templatePath = "download.markdown.html"
var reader io.ReadCloser var reader io.ReadCloser
if reader, _, err = s.storage.Get(r.Context(), token, filename); err != nil { if reader, _, err = s.storage.Get(r.Context(), token, filename, nil); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
@@ -460,7 +461,7 @@ func (s *Server) putHandler(w http.ResponseWriter, r *http.Request) {
contentLength := r.ContentLength contentLength := r.ContentLength
defer storage.CloseCheck(r.Body.Close) defer storage.CloseCheck(r.Body)
file, err := ioutil.TempFile(s.tempPath, "transfer-") file, err := ioutil.TempFile(s.tempPath, "transfer-")
defer s.cleanTmpFile(file) defer s.cleanTmpFile(file)
@@ -692,8 +693,8 @@ func (s *Server) checkMetadata(ctx context.Context, token, filename string, incr
var metadata metadata var metadata metadata
r, _, err := s.storage.Get(ctx, token, fmt.Sprintf("%s.metadata", filename)) r, _, err := s.storage.Get(ctx, token, fmt.Sprintf("%s.metadata", filename), nil)
defer storage.CloseCheck(r.Close) defer storage.CloseCheck(r)
if err != nil { if err != nil {
return metadata, err return metadata, err
@@ -728,8 +729,8 @@ func (s *Server) checkDeletionToken(ctx context.Context, deletionToken, token, f
var metadata metadata var metadata metadata
r, _, err := s.storage.Get(ctx, token, fmt.Sprintf("%s.metadata", filename)) r, _, err := s.storage.Get(ctx, token, fmt.Sprintf("%s.metadata", filename), nil)
defer storage.CloseCheck(r.Close) defer storage.CloseCheck(r)
if s.storage.IsNotExist(err) { if s.storage.IsNotExist(err) {
return errors.New("metadata doesn't exist") return errors.New("metadata doesn't exist")
@@ -806,8 +807,8 @@ func (s *Server) zipHandler(w http.ResponseWriter, r *http.Request) {
continue continue
} }
reader, _, err := s.storage.Get(r.Context(), token, filename) reader, _, err := s.storage.Get(r.Context(), token, filename, nil)
defer storage.CloseCheck(reader.Close) defer storage.CloseCheck(reader)
if err != nil { if err != nil {
if s.storage.IsNotExist(err) { if s.storage.IsNotExist(err) {
@@ -860,10 +861,10 @@ func (s *Server) tarGzHandler(w http.ResponseWriter, r *http.Request) {
commonHeader(w, tarfilename) commonHeader(w, tarfilename)
gw := gzip.NewWriter(w) gw := gzip.NewWriter(w)
defer storage.CloseCheck(gw.Close) defer storage.CloseCheck(gw)
zw := tar.NewWriter(gw) zw := tar.NewWriter(gw)
defer storage.CloseCheck(zw.Close) defer storage.CloseCheck(zw)
for _, key := range strings.Split(files, ",") { for _, key := range strings.Split(files, ",") {
key = resolveKey(key, s.proxyPath) key = resolveKey(key, s.proxyPath)
@@ -876,8 +877,8 @@ func (s *Server) tarGzHandler(w http.ResponseWriter, r *http.Request) {
continue continue
} }
reader, contentLength, err := s.storage.Get(r.Context(), token, filename) reader, contentLength, err := s.storage.Get(r.Context(), token, filename, nil)
defer storage.CloseCheck(reader.Close) defer storage.CloseCheck(reader)
if err != nil { if err != nil {
if s.storage.IsNotExist(err) { if s.storage.IsNotExist(err) {
@@ -921,7 +922,7 @@ func (s *Server) tarHandler(w http.ResponseWriter, r *http.Request) {
commonHeader(w, tarfilename) commonHeader(w, tarfilename)
zw := tar.NewWriter(w) zw := tar.NewWriter(w)
defer storage.CloseCheck(zw.Close) defer storage.CloseCheck(zw)
for _, key := range strings.Split(files, ",") { for _, key := range strings.Split(files, ",") {
key = resolveKey(key, s.proxyPath) key = resolveKey(key, s.proxyPath)
@@ -934,8 +935,8 @@ func (s *Server) tarHandler(w http.ResponseWriter, r *http.Request) {
continue continue
} }
reader, contentLength, err := s.storage.Get(r.Context(), token, filename) reader, contentLength, err := s.storage.Get(r.Context(), token, filename, nil)
defer storage.CloseCheck(reader.Close) defer storage.CloseCheck(reader)
if err != nil { if err != nil {
if s.storage.IsNotExist(err) { if s.storage.IsNotExist(err) {
@@ -1000,6 +1001,10 @@ func (s *Server) headHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Connection", "close") w.Header().Set("Connection", "close")
w.Header().Set("X-Remaining-Downloads", remainingDownloads) w.Header().Set("X-Remaining-Downloads", remainingDownloads)
w.Header().Set("X-Remaining-Days", remainingDays) w.Header().Set("X-Remaining-Days", remainingDays)
if s.storage.IsRangeSupported() {
w.Header().Set("Accept-Ranges", "bytes")
}
} }
func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) { func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
@@ -1017,9 +1022,16 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
var rng *storage.Range
if r.Header.Get("Range") != "" {
rng = storage.ParseRange(r.Header.Get("Range"))
}
contentType := metadata.ContentType contentType := metadata.ContentType
reader, contentLength, err := s.storage.Get(r.Context(), token, filename) reader, contentLength, err := s.storage.Get(r.Context(), token, filename, rng)
defer storage.CloseCheck(reader.Close) defer storage.CloseCheck(reader)
rdr := io.Reader(reader)
if s.storage.IsNotExist(err) { if s.storage.IsNotExist(err) {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
@@ -1029,12 +1041,24 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Could not retrieve file.", http.StatusInternalServerError) http.Error(w, "Could not retrieve file.", http.StatusInternalServerError)
return return
} }
if rng != nil {
cr := rng.ContentRange()
if cr != "" {
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Content-Range", cr)
if rng.Limit > 0 {
rdr = io.LimitReader(reader, int64(rng.Limit))
}
}
}
var disposition string var disposition string
if action == "inline" { if action == "inline" {
disposition = "inline" disposition = "inline"
/* /*
metadata.ContentType is unable to determine the type of the content,
metadata.ContentType is unable to determine the type of the content,
metadata.ContentType is unable to determine the type of the content, metadata.ContentType is unable to determine the type of the content,
So add text/plain in this case to fix XSS related issues/ So add text/plain in this case to fix XSS related issues/
*/ */
@@ -1055,32 +1079,15 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Remaining-Downloads", remainingDownloads) w.Header().Set("X-Remaining-Downloads", remainingDownloads)
w.Header().Set("X-Remaining-Days", remainingDays) w.Header().Set("X-Remaining-Days", remainingDays)
if rng != nil && rng.ContentRange() != "" {
w.WriteHeader(http.StatusPartialContent)
}
if disposition == "inline" && canContainsXSS(contentType) { if disposition == "inline" && canContainsXSS(contentType) {
reader = ioutil.NopCloser(bluemonday.UGCPolicy().SanitizeReader(reader)) reader = ioutil.NopCloser(bluemonday.UGCPolicy().SanitizeReader(reader))
} }
if w.Header().Get("Range") != "" || strings.HasPrefix(metadata.ContentType, "video") || strings.HasPrefix(metadata.ContentType, "audio") { if _, err = io.Copy(w, rdr); err != nil {
file, err := ioutil.TempFile(s.tempPath, "range-")
defer s.cleanTmpFile(file)
if err != nil {
s.logger.Printf("%s", err.Error())
http.Error(w, "Error occurred copying to output stream", http.StatusInternalServerError)
return
}
_, err = io.Copy(file, reader)
if err != nil {
s.logger.Printf("%s", err.Error())
http.Error(w, "Error occurred copying to output stream", http.StatusInternalServerError)
return
}
http.ServeContent(w, r, filename, time.Now(), file)
return
}
if _, err = io.Copy(w, reader); err != nil {
s.logger.Printf("%s", err.Error()) s.logger.Printf("%s", err.Error())
http.Error(w, "Error occurred copying to output stream", http.StatusInternalServerError) http.Error(w, "Error occurred copying to output stream", http.StatusInternalServerError)
return return

View File

@@ -4,13 +4,95 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"strconv"
"time" "time"
"regexp"
) )
type Range struct {
Start uint64
Limit uint64
contentRange string
}
// Range Reconstructs Range header and returns it
func (r *Range) Range() string {
if r.Limit > 0 {
return fmt.Sprintf("bytes=%d-%d", r.Start, r.Start+r.Limit-1)
} else {
return fmt.Sprintf("bytes=%d-", r.Start)
}
}
// AcceptLength Tries to accept given range
// returns newContentLength if range was satisfied, otherwise returns given contentLength
func (r *Range) AcceptLength(contentLength uint64) (newContentLength uint64) {
newContentLength = contentLength
if r.Limit == 0 {
r.Limit = newContentLength - r.Start
}
if contentLength < r.Start {
return
}
if r.Limit > contentLength-r.Start {
return
}
r.contentRange = fmt.Sprintf("bytes %d-%d/%d", r.Start, r.Start+r.Limit-1, contentLength)
newContentLength = r.Limit
return
}
func (r *Range) SetContentRange(cr string) {
r.contentRange = cr
}
// Returns accepted Content-Range header. If range wasn't accepted empty string is returned
func (r *Range) ContentRange() string {
return r.contentRange
}
var rexp *regexp.Regexp = regexp.MustCompile(`^bytes=([0-9]+)-([0-9]*)$`)
// Parses HTTP Range header and returns struct on success
// only bytes=start-finish supported
func ParseRange(rng string) *Range {
if rng == "" {
return nil
}
matches := rexp.FindAllStringSubmatch(rng, -1)
if len(matches) != 1 || len(matches[0]) != 3 {
return nil
}
if len(matches[0][0]) != len(rng) || len(matches[0][1]) == 0 {
return nil
}
start, err := strconv.ParseUint(matches[0][1], 10, 64)
if err != nil {
return nil
}
if len(matches[0][2]) == 0 {
return &Range{Start: start, Limit: 0}
}
finish, err := strconv.ParseUint(matches[0][2], 10, 64)
if err != nil {
return nil
}
if finish < start || finish+1 < finish {
return nil
}
return &Range{Start: start, Limit: finish - start + 1}
}
// Storage is the interface for storage operation // Storage is the interface for storage operation
type Storage interface { type Storage interface {
// Get retrieves a file from storage // Get retrieves a file from storage
Get(ctx context.Context, token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error)
// Head retrieves content length of a file from storage // Head retrieves content length of a file from storage
Head(ctx context.Context, token string, filename string) (contentLength uint64, err error) Head(ctx context.Context, token string, filename string) (contentLength uint64, err error)
// Put saves a file on storage // Put saves a file on storage
@@ -21,13 +103,18 @@ type Storage interface {
IsNotExist(err error) bool IsNotExist(err error) bool
// Purge cleans up the storage // Purge cleans up the storage
Purge(ctx context.Context, days time.Duration) error Purge(ctx context.Context, days time.Duration) error
// Whether storage supports Get with Range header
IsRangeSupported() bool
// Type returns the storage type // Type returns the storage type
Type() string Type() string
} }
func CloseCheck(f func() error) { func CloseCheck(c io.Closer) {
if err := f(); err != nil { if c == nil {
return
}
if err := c.Close(); err != nil {
fmt.Println("Received close error:", err) fmt.Println("Received close error:", err)
} }
} }

View File

@@ -194,7 +194,7 @@ func (s *GDrive) Head(ctx context.Context, token string, filename string) (conte
} }
// Get retrieves a file from storage // Get retrieves a file from storage
func (s *GDrive) Get(ctx context.Context, token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) { func (s *GDrive) Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {
var fileID string var fileID string
fileID, err = s.findID(filename, token) fileID, err = s.findID(filename, token)
if err != nil { if err != nil {
@@ -213,12 +213,24 @@ func (s *GDrive) Get(ctx context.Context, token string, filename string) (reader
contentLength = uint64(fi.Size) contentLength = uint64(fi.Size)
fileGetCall := s.service.Files.Get(fileID)
if rng != nil {
header := fileGetCall.Header()
header.Set("Range", rng.Range())
}
var res *http.Response var res *http.Response
res, err = s.service.Files.Get(fileID).Context(ctx).Download() res, err = fileGetCall.Context(ctx).Download()
if err != nil { if err != nil {
return return
} }
if rng != nil {
reader = res.Body
rng.AcceptLength(contentLength)
return
}
reader = res.Body reader = res.Body
return return
@@ -296,7 +308,6 @@ func (s *GDrive) Put(ctx context.Context, token string, filename string, reader
Name: token, Name: token,
Parents: []string{s.rootID}, Parents: []string{s.rootID},
MimeType: gDriveDirectoryMimeType, MimeType: gDriveDirectoryMimeType,
Size: int64(contentLength),
} }
di, err := s.service.Files.Create(dir).Fields("id").Do() di, err := s.service.Files.Create(dir).Fields("id").Do()
@@ -323,6 +334,8 @@ func (s *GDrive) Put(ctx context.Context, token string, filename string, reader
return nil return nil
} }
func (s *GDrive) IsRangeSupported() bool { return true }
// Retrieve a token, saves the token, then returns the generated client. // Retrieve a token, saves the token, then returns the generated client.
func getGDriveClient(ctx context.Context, config *oauth2.Config, localConfigPath string, logger *log.Logger) *http.Client { func getGDriveClient(ctx context.Context, config *oauth2.Config, localConfigPath string, logger *log.Logger) *http.Client {
tokenFile := filepath.Join(localConfigPath, gDriveTokenJSONFile) tokenFile := filepath.Join(localConfigPath, gDriveTokenJSONFile)
@@ -356,7 +369,7 @@ func getGDriveTokenFromWeb(ctx context.Context, config *oauth2.Config, logger *l
// Retrieves a token from a local file. // Retrieves a token from a local file.
func gDriveTokenFromFile(file string) (*oauth2.Token, error) { func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
f, err := os.Open(file) f, err := os.Open(file)
defer CloseCheck(f.Close) defer CloseCheck(f)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -369,7 +382,7 @@ func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) { func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) {
logger.Printf("Saving credential file to: %s\n", path) logger.Printf("Saving credential file to: %s\n", path)
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
defer CloseCheck(f.Close) defer CloseCheck(f)
if err != nil { if err != nil {
logger.Fatalf("Unable to cache oauth token: %v", err) logger.Fatalf("Unable to cache oauth token: %v", err)
} }

View File

@@ -42,13 +42,16 @@ func (s *LocalStorage) Head(_ context.Context, token string, filename string) (c
} }
// Get retrieves a file from storage // Get retrieves a file from storage
func (s *LocalStorage) Get(_ context.Context, token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) { func (s *LocalStorage) Get(_ context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {
path := filepath.Join(s.basedir, token, filename) path := filepath.Join(s.basedir, token, filename)
var file *os.File
// content type , content length // content type , content length
if reader, err = os.Open(path); err != nil { if file, err = os.Open(path); err != nil {
return return
} }
reader = file
var fi os.FileInfo var fi os.FileInfo
if fi, err = os.Lstat(path); err != nil { if fi, err = os.Lstat(path); err != nil {
@@ -56,6 +59,12 @@ func (s *LocalStorage) Get(_ context.Context, token string, filename string) (re
} }
contentLength = uint64(fi.Size()) contentLength = uint64(fi.Size())
if rng != nil {
contentLength = rng.AcceptLength(contentLength)
if _, err = file.Seek(int64(rng.Start), 0); err != nil {
return
}
}
return return
} }
@@ -113,7 +122,7 @@ func (s *LocalStorage) Put(_ context.Context, token string, filename string, rea
} }
f, err = os.OpenFile(filepath.Join(path, filename), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) f, err = os.OpenFile(filepath.Join(path, filename), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
defer CloseCheck(f.Close) defer CloseCheck(f)
if err != nil { if err != nil {
return err return err
@@ -125,3 +134,5 @@ func (s *LocalStorage) Put(_ context.Context, token string, filename string, rea
return nil return nil
} }
func (s *LocalStorage) IsRangeSupported() bool { return true }

View File

@@ -90,7 +90,7 @@ func (s *S3Storage) IsNotExist(err error) bool {
} }
// Get retrieves a file from storage // Get retrieves a file from storage
func (s *S3Storage) Get(ctx context.Context, token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) { func (s *S3Storage) Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {
key := fmt.Sprintf("%s/%s", token, filename) key := fmt.Sprintf("%s/%s", token, filename)
getRequest := &s3.GetObjectInput{ getRequest := &s3.GetObjectInput{
@@ -98,6 +98,10 @@ func (s *S3Storage) Get(ctx context.Context, token string, filename string) (rea
Key: aws.String(key), Key: aws.String(key),
} }
if rng != nil {
getRequest.Range = aws.String(rng.Range())
}
response, err := s.s3.GetObjectWithContext(ctx, getRequest) response, err := s.s3.GetObjectWithContext(ctx, getRequest)
if err != nil { if err != nil {
return return
@@ -106,6 +110,9 @@ func (s *S3Storage) Get(ctx context.Context, token string, filename string) (rea
if response.ContentLength != nil { if response.ContentLength != nil {
contentLength = uint64(*response.ContentLength) contentLength = uint64(*response.ContentLength)
} }
if rng != nil && response.ContentRange != nil {
rng.SetContentRange(*response.ContentRange)
}
reader = response.Body reader = response.Body
return return
@@ -169,6 +176,8 @@ func (s *S3Storage) Put(ctx context.Context, token string, filename string, read
return return
} }
func (s *S3Storage) IsRangeSupported() bool { return true }
func getAwsSession(accessKey, secretKey, region, endpoint string, forcePathStyle bool) *session.Session { func getAwsSession(accessKey, secretKey, region, endpoint string, forcePathStyle bool) *session.Session {
return session.Must(session.NewSession(&aws.Config{ return session.Must(session.NewSession(&aws.Config{
Region: aws.String(region), Region: aws.String(region),

View File

@@ -78,12 +78,18 @@ func (s *StorjStorage) Head(ctx context.Context, token string, filename string)
} }
// Get retrieves a file from storage // Get retrieves a file from storage
func (s *StorjStorage) Get(ctx context.Context, token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) { func (s *StorjStorage) Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {
key := storj.JoinPaths(token, filename) key := storj.JoinPaths(token, filename)
s.logger.Printf("Getting file %s from Storj Bucket", filename) s.logger.Printf("Getting file %s from Storj Bucket", filename)
options := uplink.DownloadOptions{} options := uplink.DownloadOptions{}
if rng != nil {
options.Offset = int64(rng.Start)
if rng.Limit > 0 {
options.Length = int64(rng.Limit)
}
}
download, err := s.project.DownloadObject(fpath.WithTempData(ctx, "", true), s.bucket.Name, key, &options) download, err := s.project.DownloadObject(fpath.WithTempData(ctx, "", true), s.bucket.Name, key, &options)
if err != nil { if err != nil {
@@ -91,6 +97,9 @@ func (s *StorjStorage) Get(ctx context.Context, token string, filename string) (
} }
contentLength = uint64(download.Info().System.ContentLength) contentLength = uint64(download.Info().System.ContentLength)
if rng != nil {
contentLength = rng.AcceptLength(contentLength)
}
reader = download reader = download
return return
@@ -146,6 +155,8 @@ func (s *StorjStorage) Put(ctx context.Context, token string, filename string, r
return err return err
} }
func (s *StorjStorage) IsRangeSupported() bool { return true }
// IsNotExist indicates if a file doesn't exist on storage // IsNotExist indicates if a file doesn't exist on storage
func (s *StorjStorage) IsNotExist(err error) bool { func (s *StorjStorage) IsNotExist(err error) bool {
return errors.Is(err, uplink.ErrObjectNotFound) return errors.Is(err, uplink.ErrObjectNotFound)