Skip to content

Commit c7de38e

Browse files
michaelf-stratoscalediehlaws
authored andcommitted
service/s3/s3manager: Fix uploader to check for empty part before max parts check (aws#2556)
Fixes the S3 Upload manager's behavior for uploading exactly MaxUploadParts * PartSize to S3. The uploader would previously return an error after the full content was uploaded, because the assert on max upload parts was occurring before the check if there were any more parts to upload. This change fixes that logic so that the uploader first checks if there are any more chunks to upload before checking if the max number of parts to upload have been reached. Also clarifies the uploader's behavior of MaxUploadParts, and that it must not be used to limit the uploader's total upload size. an io.LimitReader can be used for that purpose. Fix aws#2557
1 parent 02ff0e6 commit c7de38e

File tree

2 files changed

+59
-17
lines changed

2 files changed

+59
-17
lines changed

service/s3/s3manager/upload.go

Lines changed: 22 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -145,8 +145,13 @@ type Uploader struct {
145145
// MaxUploadParts is the max number of parts which will be uploaded to S3.
146146
// Will be used to calculate the partsize of the object to be uploaded.
147147
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
148-
// as 100, 50MB parts.
149-
// With a limited of s3.MaxUploadParts (10,000 parts).
148+
// as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts).
149+
//
150+
// MaxUploadParts must not be used to limit the total number of bytes uploaded.
151+
// Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader)
152+
// instead. An io.LimitReader is helpful when uploading an unbounded reader
153+
// to S3, and you know its maximum size. Otherwise the reader's io.EOF returned
154+
// error must be used to signal end of stream.
150155
//
151156
// Defaults to package const's MaxUploadParts value.
152157
MaxUploadParts int
@@ -542,21 +547,6 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa
542547

543548
// Read and queue the rest of the parts
544549
for u.geterr() == nil && err == nil {
545-
num++
546-
// This upload exceeded maximum number of supported parts, error now.
547-
if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) {
548-
var msg string
549-
if num > int64(u.cfg.MaxUploadParts) {
550-
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
551-
u.cfg.MaxUploadParts)
552-
} else {
553-
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
554-
MaxUploadParts)
555-
}
556-
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
557-
break
558-
}
559-
560550
var reader io.ReadSeeker
561551
var nextChunkLen int
562552
var part []byte
@@ -577,6 +567,21 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa
577567
break
578568
}
579569

570+
num++
571+
// This upload exceeded maximum number of supported parts, error now.
572+
if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) {
573+
var msg string
574+
if num > int64(u.cfg.MaxUploadParts) {
575+
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
576+
u.cfg.MaxUploadParts)
577+
} else {
578+
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
579+
MaxUploadParts)
580+
}
581+
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
582+
break
583+
}
584+
580585
ch <- chunk{buf: reader, part: part, num: num}
581586
}
582587

service/s3/s3manager/upload_test.go

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -993,3 +993,40 @@ func TestUploadWithContextCanceled(t *testing.T) {
993993
t.Errorf("expected error message to contain %q, but did not %q", e, a)
994994
}
995995
}
996+
997+
// S3 Uploader incorrectly fails an upload if the content being uploaded
998+
// has a size of MinPartSize * MaxUploadParts.
999+
// Github: aws/aws-sdk-go#2557
1000+
func TestUploadMaxPartsEOF(t *testing.T) {
1001+
s, ops, _ := loggingSvc(emptyList)
1002+
mgr := s3manager.NewUploaderWithClient(s, func(u *s3manager.Uploader) {
1003+
u.Concurrency = 1
1004+
u.PartSize = s3manager.DefaultUploadPartSize
1005+
u.MaxUploadParts = 2
1006+
})
1007+
f := bytes.NewReader(make([]byte, int(mgr.PartSize)*mgr.MaxUploadParts))
1008+
1009+
r1 := io.NewSectionReader(f, 0, s3manager.DefaultUploadPartSize)
1010+
r2 := io.NewSectionReader(f, s3manager.DefaultUploadPartSize, 2*s3manager.DefaultUploadPartSize)
1011+
body := io.MultiReader(r1, r2)
1012+
1013+
_, err := mgr.Upload(&s3manager.UploadInput{
1014+
Bucket: aws.String("Bucket"),
1015+
Key: aws.String("Key"),
1016+
Body: body,
1017+
})
1018+
1019+
if err != nil {
1020+
t.Fatalf("expect no error, got %v", err)
1021+
}
1022+
1023+
expectOps := []string{
1024+
"CreateMultipartUpload",
1025+
"UploadPart",
1026+
"UploadPart",
1027+
"CompleteMultipartUpload",
1028+
}
1029+
if e, a := expectOps, *ops; !reflect.DeepEqual(e, a) {
1030+
t.Errorf("expect %v ops, got %v", e, a)
1031+
}
1032+
}

0 commit comments

Comments
 (0)