2019-11-07 20:05:39 +01:00
|
|
|
package s3remote
|
|
|
|
|
|
|
|
import (
|
2020-01-09 14:24:26 +01:00
|
|
|
"bytes"
|
2019-11-07 20:05:39 +01:00
|
|
|
"context"
|
2024-05-22 13:58:39 +02:00
|
|
|
"crypto/tls"
|
2019-11-07 20:05:39 +01:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2024-05-22 13:58:39 +02:00
|
|
|
"net/http"
|
2023-12-04 09:21:29 +01:00
|
|
|
"path"
|
2019-11-07 20:05:39 +01:00
|
|
|
"strings"
|
2024-08-07 16:55:29 +02:00
|
|
|
"time"
|
2019-11-07 20:05:39 +01:00
|
|
|
|
2022-10-01 16:12:07 +02:00
|
|
|
"github.com/aws/aws-sdk-go-v2/aws"
|
2024-08-07 16:55:29 +02:00
|
|
|
"github.com/aws/aws-sdk-go-v2/aws/retry"
|
2022-10-01 16:12:07 +02:00
|
|
|
"github.com/aws/aws-sdk-go-v2/config"
|
|
|
|
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
|
|
|
"github.com/aws/aws-sdk-go-v2/service/s3"
|
2023-05-03 10:51:08 +02:00
|
|
|
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
|
2022-10-01 16:12:07 +02:00
|
|
|
|
2019-11-07 20:05:39 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
|
2020-01-09 14:24:26 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fscommon"
|
2019-11-07 20:05:39 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
)
|
|
|
|
|
2023-05-03 10:51:08 +02:00
|
|
|
var (
|
|
|
|
supportedStorageClasses = []s3types.StorageClass{s3types.StorageClassGlacier, s3types.StorageClassDeepArchive, s3types.StorageClassGlacierIr, s3types.StorageClassIntelligentTiering, s3types.StorageClassOnezoneIa, s3types.StorageClassOutposts, s3types.StorageClassReducedRedundancy, s3types.StorageClassStandard, s3types.StorageClassStandardIa}
|
|
|
|
)
|
|
|
|
|
|
|
|
func validateStorageClass(storageClass s3types.StorageClass) error {
|
|
|
|
// if no storageClass set, no need to validate against supported values
|
|
|
|
// backwards compatibility
|
|
|
|
if len(storageClass) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, supported := range supportedStorageClasses {
|
|
|
|
if supported == storageClass {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Errorf("unsupported S3 storage class: %s. Supported values: %v", storageClass, supportedStorageClasses)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StringToS3StorageClass converts string types to AWS S3 StorageClass type for value comparison
|
|
|
|
func StringToS3StorageClass(sc string) s3types.StorageClass {
|
|
|
|
return s3types.StorageClass(sc)
|
|
|
|
}
|
|
|
|
|
2019-11-07 20:05:39 +01:00
|
|
|
// FS represents filesystem for backups in S3.
|
|
|
|
//
|
|
|
|
// Init must be called before calling other FS methods.
|
|
|
|
type FS struct {
|
|
|
|
// Path to S3 credentials file.
|
|
|
|
CredsFilePath string
|
|
|
|
|
2019-11-29 17:05:46 +01:00
|
|
|
// Path to S3 configs file.
|
2019-11-07 20:05:39 +01:00
|
|
|
ConfigFilePath string
|
|
|
|
|
2023-12-04 09:21:29 +01:00
|
|
|
// S3 bucket to use.
|
2019-11-07 20:05:39 +01:00
|
|
|
Bucket string
|
|
|
|
|
|
|
|
// Directory in the bucket to write to.
|
|
|
|
Dir string
|
|
|
|
|
2023-02-13 13:27:13 +01:00
|
|
|
// Set for using S3-compatible endpoint such as MinIO etc.
|
2019-11-29 16:59:56 +01:00
|
|
|
CustomEndpoint string
|
2019-11-29 17:05:46 +01:00
|
|
|
|
2021-11-17 00:03:03 +01:00
|
|
|
// Force to use path style for s3, true by default.
|
|
|
|
S3ForcePathStyle bool
|
|
|
|
|
2023-05-03 10:51:08 +02:00
|
|
|
// Object Storage Class: https://aws.amazon.com/s3/storage-classes/
|
|
|
|
StorageClass s3types.StorageClass
|
|
|
|
|
2019-11-29 17:05:46 +01:00
|
|
|
// The name of S3 config profile to use.
|
|
|
|
ProfileName string
|
|
|
|
|
2024-05-22 13:58:39 +02:00
|
|
|
// Whether to use HTTP client with tls.InsecureSkipVerify setting
|
|
|
|
TLSInsecureSkipVerify bool
|
|
|
|
|
2022-10-01 16:12:07 +02:00
|
|
|
s3 *s3.Client
|
|
|
|
uploader *manager.Uploader
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes fs.
|
2020-10-09 14:31:39 +02:00
|
|
|
//
|
|
|
|
// The returned fs must be stopped when no long needed with MustStop call.
|
2019-11-07 20:05:39 +01:00
|
|
|
func (fs *FS) Init() error {
|
|
|
|
if fs.s3 != nil {
|
|
|
|
logger.Panicf("BUG: Init is already called")
|
|
|
|
}
|
|
|
|
for strings.HasPrefix(fs.Dir, "/") {
|
|
|
|
fs.Dir = fs.Dir[1:]
|
|
|
|
}
|
|
|
|
if !strings.HasSuffix(fs.Dir, "/") {
|
|
|
|
fs.Dir += "/"
|
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
configOpts := []func(*config.LoadOptions) error{
|
|
|
|
config.WithSharedConfigProfile(fs.ProfileName),
|
2022-10-13 09:30:07 +02:00
|
|
|
config.WithDefaultRegion("us-east-1"),
|
2024-08-07 16:55:29 +02:00
|
|
|
config.WithRetryer(func() aws.Retryer {
|
|
|
|
return retry.NewStandard(func(o *retry.StandardOptions) {
|
|
|
|
o.Backoff = retry.NewExponentialJitterBackoff(3 * time.Minute)
|
|
|
|
o.MaxAttempts = 10
|
|
|
|
})
|
|
|
|
}),
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
|
2024-06-14 14:13:02 +02:00
|
|
|
if len(fs.ConfigFilePath) > 0 {
|
2022-10-01 16:12:07 +02:00
|
|
|
configOpts = append(configOpts, config.WithSharedConfigFiles([]string{
|
2019-11-07 20:05:39 +01:00
|
|
|
fs.ConfigFilePath,
|
2024-06-14 14:13:02 +02:00
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(fs.CredsFilePath) > 0 {
|
|
|
|
configOpts = append(configOpts, config.WithSharedCredentialsFiles([]string{
|
2019-11-07 20:05:39 +01:00
|
|
|
fs.CredsFilePath,
|
2022-10-01 16:12:07 +02:00
|
|
|
}))
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
|
|
|
|
cfg, err := config.LoadDefaultConfig(context.TODO(),
|
|
|
|
configOpts...,
|
|
|
|
)
|
2019-11-07 20:05:39 +01:00
|
|
|
if err != nil {
|
2022-10-01 16:12:07 +02:00
|
|
|
return fmt.Errorf("cannot load S3 config: %w", err)
|
|
|
|
}
|
2023-05-03 10:51:08 +02:00
|
|
|
|
|
|
|
if err = validateStorageClass(fs.StorageClass); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-05-22 13:58:39 +02:00
|
|
|
if fs.TLSInsecureSkipVerify {
|
|
|
|
tr := &http.Transport{
|
|
|
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
|
|
|
}
|
|
|
|
cfg.HTTPClient = &http.Client{Transport: tr}
|
|
|
|
}
|
|
|
|
|
2022-10-01 16:12:07 +02:00
|
|
|
var outerErr error
|
|
|
|
fs.s3 = s3.NewFromConfig(cfg, func(o *s3.Options) {
|
|
|
|
if len(fs.CustomEndpoint) > 0 {
|
|
|
|
logger.Infof("Using provided custom S3 endpoint: %q", fs.CustomEndpoint)
|
|
|
|
o.UsePathStyle = fs.S3ForcePathStyle
|
2024-05-22 13:58:39 +02:00
|
|
|
o.BaseEndpoint = &fs.CustomEndpoint
|
2022-10-01 16:12:07 +02:00
|
|
|
} else {
|
|
|
|
region, err := manager.GetBucketRegion(context.Background(), s3.NewFromConfig(cfg), fs.Bucket)
|
|
|
|
if err != nil {
|
|
|
|
outerErr = fmt.Errorf("cannot determine region for bucket %q: %w", fs.Bucket, err)
|
|
|
|
return
|
|
|
|
}
|
2019-11-07 20:05:39 +01:00
|
|
|
|
2022-10-01 16:12:07 +02:00
|
|
|
o.Region = region
|
|
|
|
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
|
2021-07-27 11:35:38 +02:00
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
if outerErr != nil {
|
|
|
|
return outerErr
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
|
|
|
|
2022-10-01 16:12:07 +02:00
|
|
|
fs.uploader = manager.NewUploader(fs.s3, func(u *manager.Uploader) {
|
2019-11-07 20:05:39 +01:00
|
|
|
// We manage upload concurrency by ourselves.
|
|
|
|
u.Concurrency = 1
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-09 14:31:39 +02:00
|
|
|
// MustStop stops fs.
|
|
|
|
func (fs *FS) MustStop() {
|
|
|
|
fs.s3 = nil
|
|
|
|
fs.uploader = nil
|
|
|
|
}
|
|
|
|
|
2019-11-07 20:05:39 +01:00
|
|
|
// String returns human-readable description for fs.
|
|
|
|
func (fs *FS) String() string {
|
|
|
|
return fmt.Sprintf("S3{bucket: %q, dir: %q}", fs.Bucket, fs.Dir)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListParts returns all the parts for fs.
|
|
|
|
func (fs *FS) ListParts() ([]common.Part, error) {
|
|
|
|
dir := fs.Dir
|
2022-10-01 16:12:07 +02:00
|
|
|
|
|
|
|
var parts []common.Part
|
|
|
|
|
|
|
|
paginator := s3.NewListObjectsV2Paginator(fs.s3, &s3.ListObjectsV2Input{
|
2019-11-07 20:05:39 +01:00
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Prefix: aws.String(dir),
|
2022-10-01 16:12:07 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
for paginator.HasMorePages() {
|
|
|
|
page, err := paginator.NextPage(context.TODO())
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unexpected pagination error: %w", err)
|
|
|
|
}
|
|
|
|
|
2019-11-07 20:05:39 +01:00
|
|
|
for _, o := range page.Contents {
|
|
|
|
file := *o.Key
|
|
|
|
if !strings.HasPrefix(file, dir) {
|
2022-10-01 16:12:07 +02:00
|
|
|
return nil, fmt.Errorf("unexpected prefix for s3 key %q; want %q", file, dir)
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
2020-01-09 14:24:26 +01:00
|
|
|
if fscommon.IgnorePath(file) {
|
|
|
|
continue
|
|
|
|
}
|
2019-11-07 20:05:39 +01:00
|
|
|
var p common.Part
|
|
|
|
if !p.ParseFromRemotePath(file[len(dir):]) {
|
|
|
|
logger.Infof("skipping unknown object %q", file)
|
|
|
|
continue
|
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
|
2023-12-11 09:48:36 +01:00
|
|
|
p.ActualSize = uint64(*o.Size)
|
2019-11-07 20:05:39 +01:00
|
|
|
parts = append(parts, p)
|
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
|
2019-11-07 20:05:39 +01:00
|
|
|
return parts, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeletePart deletes part p from fs.
|
|
|
|
func (fs *FS) DeletePart(p common.Part) error {
|
|
|
|
path := fs.path(p)
|
2023-10-10 14:13:23 +02:00
|
|
|
return fs.delete(path)
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveEmptyDirs recursively removes empty dirs in fs.
|
|
|
|
func (fs *FS) RemoveEmptyDirs() error {
|
|
|
|
// S3 has no directories, so nothing to remove.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CopyPart copies p from srcFS to fs.
|
|
|
|
func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
|
|
|
src, ok := srcFS.(*FS)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("cannot perform server-side copying from %s to %s: both of them must be S3", srcFS, fs)
|
|
|
|
}
|
|
|
|
srcPath := src.path(p)
|
|
|
|
dstPath := fs.path(p)
|
|
|
|
copySource := fmt.Sprintf("/%s/%s", src.Bucket, srcPath)
|
|
|
|
|
|
|
|
input := &s3.CopyObjectInput{
|
2023-05-03 10:51:08 +02:00
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
CopySource: aws.String(copySource),
|
|
|
|
Key: aws.String(dstPath),
|
|
|
|
StorageClass: fs.StorageClass,
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
2023-05-03 10:51:08 +02:00
|
|
|
|
2022-10-01 16:12:07 +02:00
|
|
|
_, err := fs.s3.CopyObject(context.Background(), input)
|
2019-11-07 20:05:39 +01:00
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %w", p.Path, src, fs, copySource, err)
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DownloadPart downloads part p from fs to w.
|
|
|
|
func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
|
|
|
path := fs.path(p)
|
|
|
|
input := &s3.GetObjectInput{
|
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Key: aws.String(path),
|
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
o, err := fs.s3.GetObject(context.Background(), input)
|
2019-11-07 20:05:39 +01:00
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot open %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
|
|
|
r := o.Body
|
|
|
|
n, err := io.Copy(w, r)
|
|
|
|
if err1 := r.Close(); err1 != nil && err == nil {
|
|
|
|
err = err1
|
|
|
|
}
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, path, err)
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
|
|
|
if uint64(n) != p.Size {
|
|
|
|
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UploadPart uploads part p from r to fs.
|
|
|
|
func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
|
|
|
path := fs.path(p)
|
|
|
|
sr := &statReader{
|
|
|
|
r: r,
|
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
input := &s3.PutObjectInput{
|
2023-05-03 10:51:08 +02:00
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Key: aws.String(path),
|
|
|
|
Body: sr,
|
|
|
|
StorageClass: fs.StorageClass,
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
2023-05-03 10:51:08 +02:00
|
|
|
|
2022-10-01 16:12:07 +02:00
|
|
|
_, err := fs.uploader.Upload(context.Background(), input)
|
2019-11-07 20:05:39 +01:00
|
|
|
if err != nil {
|
2024-07-29 14:18:31 +02:00
|
|
|
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
2019-11-07 20:05:39 +01:00
|
|
|
}
|
|
|
|
if uint64(sr.size) != p.Size {
|
|
|
|
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, sr.size, p.Size)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-09 14:24:26 +01:00
|
|
|
// DeleteFile deletes filePath from fs if it exists.
|
|
|
|
//
|
|
|
|
// The function does nothing if the file doesn't exist.
|
|
|
|
func (fs *FS) DeleteFile(filePath string) error {
|
2020-01-09 22:20:13 +01:00
|
|
|
// It looks like s3 may return `AccessDenied: Access Denied` instead of `s3.ErrCodeNoSuchKey`
|
|
|
|
// on an attempt to delete non-existing file.
|
|
|
|
// so just check whether the filePath exists before deleting it.
|
|
|
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/284 for details.
|
|
|
|
ok, err := fs.HasFile(filePath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
// Missing file - nothing to delete.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-12-04 09:21:29 +01:00
|
|
|
path := path.Join(fs.Dir, filePath)
|
2023-10-10 14:13:23 +02:00
|
|
|
return fs.delete(path)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *FS) delete(path string) error {
|
|
|
|
if *common.DeleteAllObjectVersions {
|
|
|
|
return fs.deleteObjectWithVersions(path)
|
|
|
|
}
|
|
|
|
return fs.deleteObject(path)
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteObject deletes object at path.
|
|
|
|
// It does not specify a version ID, so it will delete the latest version of the object.
|
|
|
|
func (fs *FS) deleteObject(path string) error {
|
2020-01-09 14:24:26 +01:00
|
|
|
input := &s3.DeleteObjectInput{
|
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Key: aws.String(path),
|
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
if _, err := fs.s3.DeleteObject(context.Background(), input); err != nil {
|
2023-10-10 14:13:23 +02:00
|
|
|
return fmt.Errorf("cannot delete %q at %s: %w", path, fs, err)
|
2020-01-09 14:24:26 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-10-10 14:13:23 +02:00
|
|
|
// deleteObjectWithVersions deletes object at path and all its versions.
|
|
|
|
func (fs *FS) deleteObjectWithVersions(path string) error {
|
|
|
|
versions, err := fs.s3.ListObjectVersions(context.Background(), &s3.ListObjectVersionsInput{
|
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Prefix: aws.String(path),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cannot list versions for %q at %s: %w", path, fs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, version := range versions.Versions {
|
|
|
|
input := &s3.DeleteObjectInput{
|
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Key: version.Key,
|
|
|
|
VersionId: version.VersionId,
|
|
|
|
}
|
|
|
|
if _, err := fs.s3.DeleteObject(context.Background(), input); err != nil {
|
|
|
|
return fmt.Errorf("cannot delete %q at %s: %w", path, fs, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-09 14:24:26 +01:00
|
|
|
// CreateFile creates filePath at fs and puts data into it.
|
|
|
|
//
|
|
|
|
// The file is overwritten if it already exists.
|
|
|
|
func (fs *FS) CreateFile(filePath string, data []byte) error {
|
2023-12-04 09:21:29 +01:00
|
|
|
path := path.Join(fs.Dir, filePath)
|
2020-01-09 14:24:26 +01:00
|
|
|
sr := &statReader{
|
|
|
|
r: bytes.NewReader(data),
|
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
input := &s3.PutObjectInput{
|
2023-05-03 10:51:08 +02:00
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Key: aws.String(path),
|
|
|
|
Body: sr,
|
|
|
|
StorageClass: fs.StorageClass,
|
2020-01-09 14:24:26 +01:00
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
_, err := fs.uploader.Upload(context.Background(), input)
|
2020-01-09 14:24:26 +01:00
|
|
|
if err != nil {
|
2024-07-29 14:18:31 +02:00
|
|
|
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", filePath, fs, path, err)
|
2020-01-09 14:24:26 +01:00
|
|
|
}
|
|
|
|
l := int64(len(data))
|
|
|
|
if sr.size != l {
|
|
|
|
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", filePath, fs, sr.size, l)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasFile returns true if filePath exists at fs.
|
|
|
|
func (fs *FS) HasFile(filePath string) (bool, error) {
|
2023-12-04 09:21:29 +01:00
|
|
|
path := path.Join(fs.Dir, filePath)
|
2020-01-09 14:24:26 +01:00
|
|
|
input := &s3.GetObjectInput{
|
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Key: aws.String(path),
|
|
|
|
}
|
2022-10-01 16:12:07 +02:00
|
|
|
o, err := fs.s3.GetObject(context.Background(), input)
|
2020-01-09 14:24:26 +01:00
|
|
|
if err != nil {
|
2022-10-06 12:36:40 +02:00
|
|
|
if strings.Contains(err.Error(), "NoSuchKey") {
|
2020-01-09 14:24:26 +01:00
|
|
|
return false, nil
|
|
|
|
}
|
2020-06-30 21:58:18 +02:00
|
|
|
return false, fmt.Errorf("cannot open %q at %s (remote path %q): %w", filePath, fs, path, err)
|
2020-01-09 14:24:26 +01:00
|
|
|
}
|
|
|
|
if err := o.Body.Close(); err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return false, fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, path, err)
|
2020-01-09 14:24:26 +01:00
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2023-08-14 02:17:12 +02:00
|
|
|
// ReadFile returns the content of filePath at fs.
|
|
|
|
func (fs *FS) ReadFile(filePath string) ([]byte, error) {
|
2023-12-04 09:21:29 +01:00
|
|
|
p := path.Join(fs.Dir, filePath)
|
2023-08-14 02:17:12 +02:00
|
|
|
input := &s3.GetObjectInput{
|
|
|
|
Bucket: aws.String(fs.Bucket),
|
|
|
|
Key: aws.String(p),
|
|
|
|
}
|
|
|
|
o, err := fs.s3.GetObject(context.Background(), input)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot open %q at %s (remote path %q): %w", filePath, fs, p, err)
|
|
|
|
}
|
|
|
|
defer o.Body.Close()
|
|
|
|
b, err := io.ReadAll(o.Body)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot read %q at %s (remote path %q): %w", filePath, fs, p, err)
|
|
|
|
}
|
|
|
|
return b, nil
|
|
|
|
}
|
|
|
|
|
2019-11-07 20:05:39 +01:00
|
|
|
func (fs *FS) path(p common.Part) string {
|
|
|
|
return p.RemotePath(fs.Dir)
|
|
|
|
}
|
|
|
|
|
|
|
|
type statReader struct {
|
|
|
|
r io.Reader
|
|
|
|
size int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sr *statReader) Read(p []byte) (int, error) {
|
|
|
|
n, err := sr.r.Read(p)
|
|
|
|
sr.size += int64(n)
|
|
|
|
return n, err
|
|
|
|
}
|