From dd4f1e50dbc182e71b5d182d61777cc9fe8b61e6 Mon Sep 17 00:00:00 2001 From: Michael Filanov Date: Thu, 11 Apr 2019 16:21:31 +0300 Subject: [PATCH 1/3] S3 manager upload check for empty part before checking the number of multiparts --- service/s3/s3manager/upload.go | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/service/s3/s3manager/upload.go b/service/s3/s3manager/upload.go index da1838c9db8..b7cb81d665c 100644 --- a/service/s3/s3manager/upload.go +++ b/service/s3/s3manager/upload.go @@ -542,21 +542,6 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa // Read and queue the rest of the parts for u.geterr() == nil && err == nil { - num++ - // This upload exceeded maximum number of supported parts, error now. - if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) { - var msg string - if num > int64(u.cfg.MaxUploadParts) { - msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", - u.cfg.MaxUploadParts) - } else { - msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", - MaxUploadParts) - } - u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) - break - } - var reader io.ReadSeeker var nextChunkLen int var part []byte @@ -577,6 +562,21 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa break } + num++ + // This upload exceeded maximum number of supported parts, error now. + if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) { + var msg string + if num > int64(u.cfg.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) + break + } + ch <- chunk{buf: reader, part: part, num: num} } From 656a0619487fc927a4e854ead32d1e031461e04b Mon Sep 17 00:00:00 2001 From: Jason Del Ponte Date: Wed, 15 May 2019 10:21:25 -0700 Subject: [PATCH 2/3] Add clarification to MaxUploadParts usage Clarified S3 Upload Manager's MaxUploadPart parameter that it must not be used to limit the total number of bytes uploaded. An io.LimitReader should be used instead. --- service/s3/s3manager/upload.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/service/s3/s3manager/upload.go b/service/s3/s3manager/upload.go index b7cb81d665c..94f35b2b112 100644 --- a/service/s3/s3manager/upload.go +++ b/service/s3/s3manager/upload.go @@ -145,8 +145,13 @@ type Uploader struct { // MaxUploadParts is the max number of parts which will be uploaded to S3. // Will be used to calculate the partsize of the object to be uploaded. // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file - // as 100, 50MB parts. - // With a limited of s3.MaxUploadParts (10,000 parts). + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. // // Defaults to package const's MaxUploadParts value. MaxUploadParts int From 5e5c8f0cb7bacddefdd6b438ddba648d964ac1d3 Mon Sep 17 00:00:00 2001 From: Jason Del Ponte Date: Fri, 17 May 2019 15:40:42 -0700 Subject: [PATCH 3/3] Add testcase exercising MaxParts with EOF --- service/s3/s3manager/upload_test.go | 37 +++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/service/s3/s3manager/upload_test.go b/service/s3/s3manager/upload_test.go index e3693dd739d..7f729735639 100644 --- a/service/s3/s3manager/upload_test.go +++ b/service/s3/s3manager/upload_test.go @@ -993,3 +993,40 @@ func TestUploadWithContextCanceled(t *testing.T) { t.Errorf("expected error message to contain %q, but did not %q", e, a) } } + +// S3 Uploader incorrectly fails an upload if the content being uploaded +// has a size of MinPartSize * MaxUploadParts. +// Github: aws/aws-sdk-go#2557 +func TestUploadMaxPartsEOF(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.PartSize = s3manager.DefaultUploadPartSize + u.MaxUploadParts = 2 + }) + f := bytes.NewReader(make([]byte, int(mgr.PartSize)*mgr.MaxUploadParts)) + + r1 := io.NewSectionReader(f, 0, s3manager.DefaultUploadPartSize) + r2 := io.NewSectionReader(f, s3manager.DefaultUploadPartSize, 2*s3manager.DefaultUploadPartSize) + body := io.MultiReader(r1, r2) + + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: body, + }) + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + expectOps := []string{ + "CreateMultipartUpload", + "UploadPart", + "UploadPart", + "CompleteMultipartUpload", + } + if e, a := expectOps, *ops; !reflect.DeepEqual(e, a) { + t.Errorf("expect %v ops, got %v", e, a) + } +}