diff --git a/service/s3/s3manager/upload.go b/service/s3/s3manager/upload.go index da1838c9db8..94f35b2b112 100644 --- a/service/s3/s3manager/upload.go +++ b/service/s3/s3manager/upload.go @@ -145,8 +145,13 @@ type Uploader struct { // MaxUploadParts is the max number of parts which will be uploaded to S3. // Will be used to calculate the partsize of the object to be uploaded. // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file - // as 100, 50MB parts. - // With a limited of s3.MaxUploadParts (10,000 parts). + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. // // Defaults to package const's MaxUploadParts value. MaxUploadParts int @@ -542,21 +547,6 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa // Read and queue the rest of the parts for u.geterr() == nil && err == nil { - num++ - // This upload exceeded maximum number of supported parts, error now. - if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) { - var msg string - if num > int64(u.cfg.MaxUploadParts) { - msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", - u.cfg.MaxUploadParts) - } else { - msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", - MaxUploadParts) - } - u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) - break - } - var reader io.ReadSeeker var nextChunkLen int var part []byte @@ -577,6 +567,21 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa break } + num++ + // This upload exceeded maximum number of supported parts, error now. + if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) { + var msg string + if num > int64(u.cfg.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) + break + } + ch <- chunk{buf: reader, part: part, num: num} } diff --git a/service/s3/s3manager/upload_test.go b/service/s3/s3manager/upload_test.go index e3693dd739d..7f729735639 100644 --- a/service/s3/s3manager/upload_test.go +++ b/service/s3/s3manager/upload_test.go @@ -993,3 +993,40 @@ func TestUploadWithContextCanceled(t *testing.T) { t.Errorf("expected error message to contain %q, but did not %q", e, a) } } + +// S3 Uploader incorrectly fails an upload if the content being uploaded +// has a size of MinPartSize * MaxUploadParts. +// Github: aws/aws-sdk-go#2557 +func TestUploadMaxPartsEOF(t *testing.T) { + s, ops, _ := loggingSvc(emptyList) + mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) { + u.Concurrency = 1 + u.PartSize = s3manager.DefaultUploadPartSize + u.MaxUploadParts = 2 + }) + f := bytes.NewReader(make([]byte, int(mgr.PartSize)*mgr.MaxUploadParts)) + + r1 := io.NewSectionReader(f, 0, s3manager.DefaultUploadPartSize) + r2 := io.NewSectionReader(f, s3manager.DefaultUploadPartSize, 2*s3manager.DefaultUploadPartSize) + body := io.MultiReader(r1, r2) + + _, err := mgr.Upload(&s3manager.UploadInput{ + Bucket: aws.String("Bucket"), + Key: aws.String("Key"), + Body: body, + }) + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + expectOps := []string{ + "CreateMultipartUpload", + "UploadPart", + "UploadPart", + "CompleteMultipartUpload", + } + if e, a := expectOps, *ops; !reflect.DeepEqual(e, a) { + t.Errorf("expect %v ops, got %v", e, a) + } +}