diff --git a/.gitignore b/.gitignore
index d566b8a..0c15d57 100755
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
vendor
doc
.idea/
-.DS_Store
\ No newline at end of file
+.DS_Store
+go-sdk-test*
\ No newline at end of file
diff --git a/internal/protocol/query/unmarshal_error.go b/internal/protocol/query/unmarshal_error.go
index ee9a08e..dc2049c 100755
--- a/internal/protocol/query/unmarshal_error.go
+++ b/internal/protocol/query/unmarshal_error.go
@@ -2,36 +2,56 @@ package query
import (
"encoding/xml"
- "io"
- "io/ioutil"
- "log"
-
"github.com/ks3sdklib/aws-sdk-go/aws"
"github.com/ks3sdklib/aws-sdk-go/internal/apierr"
+ "io"
+ "regexp"
+ "strings"
)
type XmlErrorResponse struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"`
- StatusCode int `"StatusCode"`
+ StatusCode int `xml:"StatusCode"`
Message string `xml:"Message"`
Resource string `xml:"Resource"`
RequestID string `xml:"RequestId"`
}
-// UnmarshalError unmarshals an error response for an AWS Query service.
+// UnmarshalError unmarshal an error response for an AWS Query service.
func UnmarshalError(r *aws.Request) {
defer r.HTTPResponse.Body.Close()
resp := &XmlErrorResponse{}
- body, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ body, err := io.ReadAll(r.HTTPResponse.Body)
if err != nil {
- log.Printf("read body err, %v\n", err)
+ r.Error = apierr.New("Unmarshal", "failed to read body", err)
return
}
+
+ // 如果响应类型是html,则解析html文本
+ if strings.Contains(r.HTTPResponse.Header.Get("Content-Type"), "text/html") {
+ // 获取HTML文本中title标签的内容
+ re := regexp.MustCompile(`
(.*?)`)
+ matches := re.FindStringSubmatch(string(body))
+
+ title := ""
+ if len(matches) > 1 {
+ title = matches[1]
+ }
+
+ r.Error = apierr.NewRequestError(apierr.New(title, "", nil), r.HTTPResponse.StatusCode, "")
+ return
+ }
+
err = xml.Unmarshal(body, &resp)
resp.StatusCode = r.HTTPResponse.StatusCode
+ // head请求无法从body中获取request id,如果是head请求,则从header中获取
+ if resp.RequestID == "" && r.HTTPRequest.Method == "HEAD" {
+ resp.RequestID = r.HTTPResponse.Header.Get("X-Kss-Request-Id")
+ }
+
if err != nil && err != io.EOF {
r.Error = apierr.New("Unmarshal", "failed to decode query XML error response", err)
} else {
diff --git a/internal/protocol/rest/build.go b/internal/protocol/rest/build.go
index 55189d9..022a45f 100755
--- a/internal/protocol/rest/build.go
+++ b/internal/protocol/rest/build.go
@@ -75,6 +75,8 @@ func buildLocationElements(r *aws.Request, v reflect.Value) {
buildURI(r, m, name)
case "querystring":
buildQueryString(r, m, name, query)
+ case "parameters":
+ buildParameters(r, m, query)
}
}
if r.Error != nil {
@@ -149,6 +151,19 @@ func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Va
r.Error = apierr.New("Marshal", "failed to encode REST request", err)
} else if str != nil {
query.Set(name, *str)
+ } else if str == nil {
+ query.Set(name, "")
+ }
+}
+
+func buildParameters(r *aws.Request, v reflect.Value, query url.Values) {
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key))
+ if err != nil {
+ r.Error = apierr.New("Marshal", "failed to encode REST request", err)
+ } else {
+ buildQueryString(r, reflect.ValueOf(str), key.String(), query)
+ }
}
}
@@ -230,7 +245,7 @@ func convertType(v reflect.Value) (*string, error) {
case time.Time:
str = value.UTC().Format(RFC822)
default:
- err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
return nil, err
}
return &str, nil
diff --git a/internal/signer/v2/v2.go b/internal/signer/v2/v2.go
index 5d5d061..56daafd 100644
--- a/internal/signer/v2/v2.go
+++ b/internal/signer/v2/v2.go
@@ -59,6 +59,14 @@ var signQuerys = map[string]bool{
"append": true,
"position": true,
"decompresspolicy": true,
+ "retention": true,
+ "crr": true,
+ "inventory": true,
+ "recycle": true,
+ "recover": true,
+ "clear": true,
+ "id": true,
+ "continuation-token": true,
}
type signer struct {
diff --git a/service/s3/api.go b/service/s3/api.go
index 63f410a..e71d7c2 100755
--- a/service/s3/api.go
+++ b/service/s3/api.go
@@ -9,7 +9,6 @@ import (
"github.com/ks3sdklib/aws-sdk-go/aws/awserr"
"github.com/ks3sdklib/aws-sdk-go/internal/apierr"
"github.com/ks3sdklib/aws-sdk-go/internal/crc"
- "github.com/ks3sdklib/aws-sdk-go/service/s3/s3util"
"hash"
"io"
"net/http"
@@ -123,7 +122,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *aws.Request, output
// URL encode the copy source
if input.CopySource == nil {
- input.CopySource = aws.String(s3util.BuildCopySource(input.SourceBucket, input.SourceKey))
+ input.CopySource = aws.String(BuildCopySource(input.SourceBucket, input.SourceKey))
}
req = c.newRequest(opCopyObject, input, output)
output = &CopyObjectOutput{}
@@ -315,44 +314,6 @@ func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketP
var opDeleteBucketPolicy *aws.Operation
-// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation.
-func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *aws.Request, output *DeleteBucketReplicationOutput) {
- oprw.Lock()
- defer oprw.Unlock()
-
- if opDeleteBucketReplication == nil {
- opDeleteBucketReplication = &aws.Operation{
- Name: "DeleteBucketReplication",
- HTTPMethod: "DELETE",
- HTTPPath: "/{Bucket}?replication",
- }
- }
-
- if input == nil {
- input = &DeleteBucketReplicationInput{}
- }
-
- req = c.newRequest(opDeleteBucketReplication, input, output)
- output = &DeleteBucketReplicationOutput{}
- req.Data = output
- return
-}
-
-func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
- req, out := c.DeleteBucketReplicationRequest(input)
- err := req.Send()
- return out, err
-}
-
-func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
- req, out := c.DeleteBucketReplicationRequest(input)
- req.SetContext(ctx)
- err := req.Send()
- return out, err
-}
-
-var opDeleteBucketReplication *aws.Operation
-
// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation.
func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *aws.Request, output *DeleteBucketTaggingOutput) {
oprw.Lock()
@@ -651,6 +612,7 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *aws.R
}
req = c.newRequest(opGetBucketLocation, input, output)
+ req.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
output = &GetBucketLocationOutput{}
req.Data = output
return
@@ -829,44 +791,6 @@ func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyI
var opGetBucketPolicy *aws.Operation
-// GetBucketReplicationRequest generates a request for the GetBucketReplication operation.
-func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *aws.Request, output *GetBucketReplicationOutput) {
- oprw.Lock()
- defer oprw.Unlock()
-
- if opGetBucketReplication == nil {
- opGetBucketReplication = &aws.Operation{
- Name: "GetBucketReplication",
- HTTPMethod: "GET",
- HTTPPath: "/{Bucket}?replication",
- }
- }
-
- if input == nil {
- input = &GetBucketReplicationInput{}
- }
-
- req = c.newRequest(opGetBucketReplication, input, output)
- output = &GetBucketReplicationOutput{}
- req.Data = output
- return
-}
-
-func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
- req, out := c.GetBucketReplicationRequest(input)
- err := req.Send()
- return out, err
-}
-
-func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
- req, out := c.GetBucketReplicationRequest(input)
- req.SetContext(ctx)
- err := req.Send()
- return out, err
-}
-
-var opGetBucketReplication *aws.Operation
-
// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation.
func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *aws.Request, output *GetBucketRequestPaymentOutput) {
oprw.Lock()
@@ -1750,45 +1674,6 @@ func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyI
var opPutBucketPolicy *aws.Operation
-// PutBucketReplicationRequest generates a request for the PutBucketReplication operation.
-func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *aws.Request, output *PutBucketReplicationOutput) {
- oprw.Lock()
- defer oprw.Unlock()
-
- if opPutBucketReplication == nil {
- opPutBucketReplication = &aws.Operation{
- Name: "PutBucketReplication",
- HTTPMethod: "PUT",
- HTTPPath: "/{Bucket}?replication",
- }
- }
-
- if input == nil {
- input = &PutBucketReplicationInput{}
- }
-
- req = c.newRequest(opPutBucketReplication, input, output)
- output = &PutBucketReplicationOutput{}
- req.Data = output
- return
-}
-
-// PutBucketReplication Creates a new replication configuration (or replaces an existing one, if present).
-func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
- req, out := c.PutBucketReplicationRequest(input)
- err := req.Send()
- return out, err
-}
-
-func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
- req, out := c.PutBucketReplicationRequest(input)
- req.SetContext(ctx)
- err := req.Send()
- return out, err
-}
-
-var opPutBucketReplication *aws.Operation
-
// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation.
func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *aws.Request, output *PutBucketRequestPaymentOutput) {
oprw.Lock()
@@ -1933,15 +1818,6 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *aws.Req
return
}
-type HTTPMethod string
-
-const (
- PUT HTTPMethod = "PUT"
- GET HTTPMethod = "GET"
- DELETE HTTPMethod = "DELETE"
- HEAD HTTPMethod = "HEAD"
-)
-
type metadataGeneratePresignedUrlInput struct {
SDKShapeTraits bool `type:"structure" payload:"GeneratePresignedUrlInput"`
}
@@ -1987,6 +1863,10 @@ type GeneratePresignedUrlInput struct {
// Sets the Expires header of the response.
ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"`
+ Headers map[string]*string `location:"headers" type:"map"`
+
+ Parameters map[string]*string `location:"parameters" type:"map"`
+
metadataGeneratePresignedUrlInput `json:"-" xml:"-"`
}
type GeneratePresignedUrlOutput struct {
@@ -2294,7 +2174,7 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *aws.Request
// URL encode the copy source
if input.CopySource == nil {
- input.CopySource = aws.String(s3util.BuildCopySource(input.SourceBucket, input.SourceKey))
+ input.CopySource = aws.String(BuildCopySource(input.SourceBucket, input.SourceKey))
}
req = c.newRequest(opUploadPartCopy, input, output)
output = &UploadPartCopyOutput{}
@@ -3006,30 +2886,6 @@ type metadataDeleteBucketPolicyOutput struct {
SDKShapeTraits bool `type:"structure"`
}
-type DeleteBucketReplicationInput struct {
- Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
-
- ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
-
- metadataDeleteBucketReplicationInput `json:"-" xml:"-"`
-}
-
-type metadataDeleteBucketReplicationInput struct {
- SDKShapeTraits bool `type:"structure"`
-}
-
-type DeleteBucketReplicationOutput struct {
- metadataDeleteBucketReplicationOutput `json:"-" xml:"-"`
-
- Metadata map[string]*string `location:"headers" type:"map"`
-
- StatusCode *int64 `location:"statusCode" type:"integer"`
-}
-
-type metadataDeleteBucketReplicationOutput struct {
- SDKShapeTraits bool `type:"structure"`
-}
-
type DeleteBucketTaggingInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -3227,18 +3083,6 @@ type metadataDeletedObject struct {
SDKShapeTraits bool `type:"structure"`
}
-type Destination struct {
- // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
- // replicas of the object identified by the rule.
- Bucket *string `type:"string" required:"true"`
-
- metadataDestination `json:"-" xml:"-"`
-}
-
-type metadataDestination struct {
- SDKShapeTraits bool `type:"structure"`
-}
-
type Error struct {
Code *string `type:"string"`
@@ -3385,34 +3229,6 @@ type metadataGetBucketPolicyOutput struct {
SDKShapeTraits bool `type:"structure" payload:"Policy"`
}
-type GetBucketReplicationInput struct {
- Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
-
- ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
-
- metadataGetBucketReplicationInput `json:"-" xml:"-"`
-}
-
-type metadataGetBucketReplicationInput struct {
- SDKShapeTraits bool `type:"structure"`
-}
-
-type GetBucketReplicationOutput struct {
- // Container for replication rules. You can add as many as 1,000 rules. Total
- // replication configuration size can be up to 2 MB.
- ReplicationConfiguration *ReplicationConfiguration `type:"structure"`
-
- metadataGetBucketReplicationOutput `json:"-" xml:"-"`
-
- Metadata map[string]*string `location:"headers" type:"map"`
-
- StatusCode *int64 `location:"statusCode" type:"integer"`
-}
-
-type metadataGetBucketReplicationOutput struct {
- SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"`
-}
-
type GetBucketRequestPaymentInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -4741,34 +4557,6 @@ type metadataPutBucketPolicyOutput struct {
SDKShapeTraits bool `type:"structure"`
}
-type PutBucketReplicationInput struct {
- Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
-
- // Container for replication rules. You can add as many as 1,000 rules. Total
- // replication configuration size can be up to 2 MB.
- ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"`
-
- ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
-
- metadataPutBucketReplicationInput `json:"-" xml:"-"`
-}
-
-type metadataPutBucketReplicationInput struct {
- SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"`
-}
-
-type PutBucketReplicationOutput struct {
- metadataPutBucketReplicationOutput `json:"-" xml:"-"`
-
- Metadata map[string]*string `location:"headers" type:"map"`
-
- StatusCode *int64 `location:"statusCode" type:"integer"`
-}
-
-type metadataPutBucketReplicationOutput struct {
- SDKShapeTraits bool `type:"structure"`
-}
-
type PutBucketRequestPaymentInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -5270,45 +5058,6 @@ type metadataRedirectAllRequestsTo struct {
SDKShapeTraits bool `type:"structure"`
}
-// ReplicationConfiguration Container for replication rules. You can add as many as 1,000 rules. Total
-// replication configuration size can be up to 2 MB.
-type ReplicationConfiguration struct {
- // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating
- // the objects.
- Role *string `type:"string" required:"true"`
-
- // Container for information about a particular replication rule. Replication
- // configuration must have at least one rule and can contain up to 1,000 rules.
- Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
-
- metadataReplicationConfiguration `json:"-" xml:"-"`
-}
-
-type metadataReplicationConfiguration struct {
- SDKShapeTraits bool `type:"structure"`
-}
-
-type ReplicationRule struct {
- Destination *Destination `type:"structure" required:"true"`
-
- // Unique identifier for the rule. The value cannot be longer than 255 characters.
- ID *string `type:"string"`
-
- // Object keyname prefix identifying one or more objects to which the rule applies.
- // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
- // are not supported.
- Prefix *string `type:"string" required:"true"`
-
- // The rule is ignored if status is not Enabled.
- Status *string `type:"string" required:"true"`
-
- metadataReplicationRule `json:"-" xml:"-"`
-}
-
-type metadataReplicationRule struct {
- SDKShapeTraits bool `type:"structure"`
-}
-
type RequestPaymentConfiguration struct {
// Specifies who pays for the download and request fees.
Payer *string `type:"string" required:"true"`
@@ -5729,54 +5478,6 @@ type metadataWebsiteConfiguration struct {
SDKShapeTraits bool `type:"structure"`
}
-const AllUsersUri = "http://acs.amazonaws.com/groups/global/AllUsers"
-
-type CannedAccessControlType int32
-
-const (
- PublicReadWrite CannedAccessControlType = 0
- PublicRead CannedAccessControlType = 1
- Private CannedAccessControlType = 2
-)
-
-func GetAcl(resp GetObjectACLOutput) CannedAccessControlType {
-
- allUsersPermissions := map[string]*string{}
- for _, value := range resp.Grants {
- if value.Grantee.URI != nil && *value.Grantee.URI == AllUsersUri {
- allUsersPermissions[*value.Permission] = value.Permission
- }
- }
- _, read := allUsersPermissions["READ"]
- _, write := allUsersPermissions["WRITE"]
- if read && write {
- return PublicReadWrite
- } else if read {
- return PublicRead
- } else {
- return Private
- }
-}
-
-func GetBucketAcl(resp GetBucketACLOutput) CannedAccessControlType {
-
- allUsersPermissions := map[string]*string{}
- for _, value := range resp.Grants {
- if value.Grantee.URI != nil && *value.Grantee.URI == AllUsersUri {
- allUsersPermissions[*value.Permission] = value.Permission
- }
- }
- _, read := allUsersPermissions["READ"]
- _, write := allUsersPermissions["WRITE"]
- if read && write {
- return PublicReadWrite
- } else if read {
- return PublicRead
- } else {
- return Private
- }
-}
-
func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *aws.Request, output *DeleteObjectTaggingOutput) {
oprw.Lock()
defer oprw.Unlock()
diff --git a/service/s3/const.go b/service/s3/const.go
index 03c8200..86f648e 100644
--- a/service/s3/const.go
+++ b/service/s3/const.go
@@ -17,9 +17,9 @@ const (
HTTPHeaderHost = "Host"
HTTPHeaderkssACL = "X-kss-Acl"
- ChannelBuf int = 1000
- MinPartSize5MB = 5*1024*1024 + 100 // part size, 5MB
- MinPartSize = 100 * 1024 // Min part size, 100KB
+ ChannelBuf int = 1000
+ PartSize5MB = 5 * 1024 * 1024 // part size, 5MB
+ MinPartSize = 100 * 1024 // Min part size, 100KB
)
// ACL
@@ -51,3 +51,23 @@ const (
BucketTypeDeepIA string = "DEEP_IA"
BucketTypeArchive string = "ARCHIVE"
)
+
+type HTTPMethod string
+
+const (
+ PUT HTTPMethod = "PUT"
+ GET HTTPMethod = "GET"
+ DELETE HTTPMethod = "DELETE"
+ HEAD HTTPMethod = "HEAD"
+ POST HTTPMethod = "POST"
+)
+
+const AllUsersUri = "http://acs.amazonaws.com/groups/global/AllUsers"
+
+type CannedAccessControlType int32
+
+const (
+ PublicReadWrite CannedAccessControlType = 0
+ PublicRead CannedAccessControlType = 1
+ Private CannedAccessControlType = 2
+)
diff --git a/service/s3/decompresspolicy.go b/service/s3/decompresspolicy.go
index a0f9479..1dd67c2 100644
--- a/service/s3/decompresspolicy.go
+++ b/service/s3/decompresspolicy.go
@@ -54,7 +54,7 @@ type metadataPutBucketDecompressPolicyInput struct {
}
type BucketDecompressPolicy struct {
- Rules []*DecompressPolicyRule `json:"rules,omitempty" type:"list" locationName:"rules" required:"true"`
+ Rules []*DecompressPolicyRule `json:"rules,omitempty" type:"list" locationName:"rules" required:"true"`
}
type DecompressPolicyRule struct {
@@ -105,7 +105,7 @@ type DecompressPolicyRule struct {
}
type PutBucketDecompressPolicyOutput struct {
- Metadata map[string]*string `location:"headers" type:"map"`
+ Metadata map[string]*string `location:"headers" type:"map"`
StatusCode *int64 `location:"statusCode" type:"integer"`
}
@@ -156,14 +156,14 @@ type GetBucketDecompressPolicyInput struct {
type GetBucketDecompressPolicyOutput struct {
BucketDecompressPolicy *BucketDecompressPolicy `locationName:"BucketDecompressPolicy" type:"structure"`
- Metadata map[string]*string `location:"headers" type:"map"`
+ Metadata map[string]*string `location:"headers" type:"map"`
StatusCode *int64 `location:"statusCode" type:"integer"`
- metadataGetBucketDecompressPolicyInput `json:"-" xml:"-"`
+ metadataGetBucketDecompressPolicyOutput `json:"-" xml:"-"`
}
-type metadataGetBucketDecompressPolicyInput struct {
+type metadataGetBucketDecompressPolicyOutput struct {
SDKShapeTraits bool `type:"structure" payload:"BucketDecompressPolicy"`
}
@@ -207,7 +207,7 @@ type DeleteBucketDecompressPolicyInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
type DeleteBucketDecompressPolicyOutput struct {
- Metadata map[string]*string `location:"headers" type:"map"`
+ Metadata map[string]*string `location:"headers" type:"map"`
StatusCode *int64 `location:"statusCode" type:"integer"`
}
diff --git a/service/s3/inventory.go b/service/s3/inventory.go
new file mode 100644
index 0000000..24e695f
--- /dev/null
+++ b/service/s3/inventory.go
@@ -0,0 +1,310 @@
+package s3
+
+import "github.com/ks3sdklib/aws-sdk-go/aws"
+
+// PutBucketInventoryRequest generates a request for the PutBucketInventory operation.
+func (c *S3) PutBucketInventoryRequest(input *PutBucketInventoryInput) (req *aws.Request, output *PutBucketInventoryOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opPutBucketInventory == nil {
+ opPutBucketInventory = &aws.Operation{
+ Name: "PutBucketInventory",
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+ }
+
+ if input == nil {
+ input = &PutBucketInventoryInput{}
+ }
+
+ input.AutoFillMD5 = true
+ req = c.newRequest(opPutBucketInventory, input, output)
+ output = &PutBucketInventoryOutput{}
+ req.Data = output
+ return
+}
+
+// PutBucketInventory creates a new inventory configuration.
+func (c *S3) PutBucketInventory(input *PutBucketInventoryInput) (*PutBucketInventoryOutput, error) {
+ req, out := c.PutBucketInventoryRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) PutBucketInventoryWithContext(ctx aws.Context, input *PutBucketInventoryInput) (*PutBucketInventoryOutput, error) {
+ req, out := c.PutBucketInventoryRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opPutBucketInventory *aws.Operation
+
+type PutBucketInventoryInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+ InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"`
+
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ metadataPutBucketInventoryInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketInventoryInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"InventoryConfiguration"`
+
+ AutoFillMD5 bool
+}
+
+type PutBucketInventoryOutput struct {
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+}
+
+type InventoryConfiguration struct {
+ // The list name specified by the user is unique within a single bucket.
+ Id *string `locationName:"Id" type:"string" required:"true"`
+
+ // Is the inventory function enabled.
+ IsEnabled *bool `locationName:"IsEnabled" type:"boolean" required:"true"`
+
+ // Specify scanning prefix information.
+ Filter *InventoryFilter `locationName:"Filter" type:"structure"`
+
+ // Storage inventory results.
+ Destination *Destination `locationName:"Destination" type:"structure" required:"true"`
+
+ // Container for storing inventory export cycle information.
+ Schedule *Schedule `locationName:"Schedule" type:"structure" required:"true"`
+
+ // Set the configuration items included in the inventory results.
+ OptionalFields *OptionalFields `locationName:"OptionalFields" type:"structure" required:"true"`
+}
+
+type InventoryFilter struct {
+ // The storage path prefix of the inventory file.
+ Prefix *string `locationName:"Prefix" type:"string" required:"true"`
+
+ // The starting timestamp of the last modification time of the filtered file, in seconds.
+ LastModifyBeginTimeStamp *string `locationName:"LastModifyBeginTimeStamp" type:"string"`
+
+ // End timestamp of the last modification time of the filtered file, in seconds.
+ LastModifyEndTimeStamp *string `locationName:"LastModifyEndTimeStamp" type:"string"`
+}
+
+type Destination struct {
+ // Bucket information stored after exporting the inventory results.
+ KS3BucketDestination *KS3BucketDestination `locationName:"KS3BucketDestination" type:"structure" required:"true"`
+}
+
+type KS3BucketDestination struct {
+ // The file format of the inventory file is a CSV file compressed using GZIP after exporting the manifest file.
+ Format *string `locationName:"Format" type:"string" required:"true"`
+
+ // Bucket owner's account ID.
+ AccountId *string `locationName:"AccountId" type:"string"`
+
+ // Bucket for storing exported inventory files.
+ Bucket *string `locationName:"Bucket" type:"string" required:"true"`
+
+ // The storage path prefix of the inventory file.
+ Prefix *string `locationName:"Prefix" type:"string"`
+}
+
+type Schedule struct {
+ // Cycle of exporting inventory files.
+ Frequency *string `locationName:"Frequency" type:"string" required:"true"`
+}
+
+type OptionalFields struct {
+ // Configuration items included in the inventory results.
+ // Valid values:
+ // Size: The size of the object.
+ // LastModifiedDate: The last modified time of an object.
+ // ETag: The ETag value of an object, used to identify its contents.
+ // StorageClass: The storage type of Object.
+ // IsMultipartUploaded: Is it an object uploaded through shard upload method.
+ // EncryptionStatus: Whether the object is encrypted. If the object is encrypted, the value of this field is True; otherwise, it is False.
+ Field []*string `locationName:"Field" type:"list" flattened:"true"`
+}
+
+// GetBucketInventoryRequest generates a request for the GetBucketInventory operation.
+func (c *S3) GetBucketInventoryRequest(input *GetBucketInventoryInput) (req *aws.Request, output *GetBucketInventoryOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opGetBucketInventory == nil {
+ opGetBucketInventory = &aws.Operation{
+ Name: "GetBucketInventory",
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+ }
+
+ if input == nil {
+ input = &GetBucketInventoryInput{}
+ }
+
+ req = c.newRequest(opGetBucketInventory, input, output)
+ output = &GetBucketInventoryOutput{}
+ req.Data = output
+ return
+}
+
+// GetBucketInventory gets the inventory configuration for the bucket.
+func (c *S3) GetBucketInventory(input *GetBucketInventoryInput) (*GetBucketInventoryOutput, error) {
+ req, out := c.GetBucketInventoryRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) GetBucketInventoryWithContext(ctx aws.Context, input *GetBucketInventoryInput) (*GetBucketInventoryOutput, error) {
+ req, out := c.GetBucketInventoryRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opGetBucketInventory *aws.Operation
+
+type GetBucketInventoryInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+type GetBucketInventoryOutput struct {
+ InventoryConfiguration *InventoryConfiguration `locationName:"Inventory" type:"structure"`
+
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+
+ metadataGetBucketInventoryOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketInventoryOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"InventoryConfiguration"`
+}
+
+// DeleteBucketInventoryRequest generates a request for the DeleteBucketInventory operation.
+func (c *S3) DeleteBucketInventoryRequest(input *DeleteBucketInventoryInput) (req *aws.Request, output *DeleteBucketInventoryOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opDeleteBucketInventory == nil {
+ opDeleteBucketInventory = &aws.Operation{
+ Name: "DeleteBucketInventory",
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+ }
+
+ if input == nil {
+ input = &DeleteBucketInventoryInput{}
+ }
+
+ req = c.newRequest(opDeleteBucketInventory, input, output)
+ output = &DeleteBucketInventoryOutput{}
+ req.Data = output
+ return
+}
+
+// DeleteBucketInventory deletes the inventory configuration for the bucket.
+func (c *S3) DeleteBucketInventory(input *DeleteBucketInventoryInput) (*DeleteBucketInventoryOutput, error) {
+ req, out := c.DeleteBucketInventoryRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) DeleteBucketInventoryWithContext(ctx aws.Context, input *DeleteBucketInventoryInput) (*DeleteBucketInventoryOutput, error) {
+ req, out := c.DeleteBucketInventoryRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opDeleteBucketInventory *aws.Operation
+
+type DeleteBucketInventoryInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+type DeleteBucketInventoryOutput struct {
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+}
+
+// ListBucketInventoryRequest generates a request for the ListBucketInventory operation.
+func (c *S3) ListBucketInventoryRequest(input *ListBucketInventoryInput) (req *aws.Request, output *ListBucketInventoryOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opListBucketInventory == nil {
+ opListBucketInventory = &aws.Operation{
+ Name: "ListBucketInventory",
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+ }
+
+ if input == nil {
+ input = &ListBucketInventoryInput{}
+ }
+
+ req = c.newRequest(opListBucketInventory, input, output)
+ output = &ListBucketInventoryOutput{}
+ req.Data = output
+ return
+}
+
+// ListBucketInventory lists the inventory configurations for the bucket.
+func (c *S3) ListBucketInventory(input *ListBucketInventoryInput) (*ListBucketInventoryOutput, error) {
+ req, out := c.ListBucketInventoryRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListBucketInventoryWithContext(ctx aws.Context, input *ListBucketInventoryInput) (*ListBucketInventoryOutput, error) {
+ req, out := c.ListBucketInventoryRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opListBucketInventory *aws.Operation
+
+type ListBucketInventoryInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+type ListInventoryConfigurationsResult struct {
+ InventoryConfigurations []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"`
+
+ IsTruncated *bool `locationName:"IsTruncated" type:"boolean"`
+
+ NextContinuationToken *string `locationName:"NextContinuationToken" type:"string"`
+}
+
+type ListBucketInventoryOutput struct {
+ InventoryConfigurationsResult *ListInventoryConfigurationsResult `locationName:"InventoryConfigurationsResult" type:"structure"`
+
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+
+ metadataListBucketInventoryOutput `json:"-" xml:"-"`
+}
+
+type metadataListBucketInventoryOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"InventoryConfigurationsResult"`
+}
diff --git a/service/s3/replication.go b/service/s3/replication.go
new file mode 100644
index 0000000..40bc018
--- /dev/null
+++ b/service/s3/replication.go
@@ -0,0 +1,193 @@
+package s3
+
+import "github.com/ks3sdklib/aws-sdk-go/aws"
+
+// PutBucketReplicationRequest generates a request for the PutBucketReplication operation.
+func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *aws.Request, output *PutBucketReplicationOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opPutBucketReplication == nil {
+ opPutBucketReplication = &aws.Operation{
+ Name: "PutBucketReplication",
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?crr",
+ }
+ }
+
+ if input == nil {
+ input = &PutBucketReplicationInput{}
+ }
+
+ input.AutoFillMD5 = true
+ req = c.newRequest(opPutBucketReplication, input, output)
+ output = &PutBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+// PutBucketReplication creates a new replication configuration.
+func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opPutBucketReplication *aws.Operation
+
+type PutBucketReplicationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ ReplicationConfiguration *ReplicationConfiguration `locationName:"Replication" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ metadataPutBucketReplicationInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketReplicationInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"`
+
+ AutoFillMD5 bool
+}
+
+type PutBucketReplicationOutput struct {
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+}
+
+type ReplicationConfiguration struct {
+ // Prefix matching, only objects that match prefix rules will be copied. Each copying rule
+ // can add up to 10 prefix matching rules, and prefixes cannot overlap with each other.
+ Prefix []*string `locationName:"prefix" type:"list" flattened:"true"`
+
+ // Indicate whether to enable delete replication. If set to Enabled, it means enabled; if set to
+ // Disabled or not, it means disabled. If set to delete replication, when the source bucket deletes
+ // an object, the replica of that object in the target bucket will also be deleted.
+ DeleteMarkerStatus *string `locationName:"DeleteMarkerStatus" type:"string" required:"true"`
+
+ // Target bucket for copying rules.
+ TargetBucket *string `locationName:"targetBucket" type:"string" required:"true"`
+
+ // Specify whether to copy historical data. Whether to copy the data from the source bucket
+ // to the target bucket before enabling data replication.
+ // Enabled: Copy historical data to the target bucket (default value)
+ // Disabled: Do not copy historical data, only copy new data after enabling the rule to the target bucket.
+ HistoricalObjectReplication *string `locationName:"HistoricalObjectReplication" type:"string"`
+
+ // Region of the target bucket.
+ Region *string `locationName:"region" type:"string"`
+}
+
+// GetBucketReplicationRequest generates a request for the GetBucketReplication operation.
+func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *aws.Request, output *GetBucketReplicationOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opGetBucketReplication == nil {
+ opGetBucketReplication = &aws.Operation{
+ Name: "GetBucketReplication",
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?crr",
+ }
+ }
+
+ if input == nil {
+ input = &GetBucketReplicationInput{}
+ }
+
+ req = c.newRequest(opGetBucketReplication, input, output)
+ output = &GetBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+// GetBucketReplication gets the replication configuration for the bucket.
+func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opGetBucketReplication *aws.Operation
+
+type GetBucketReplicationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+type GetBucketReplicationOutput struct {
+ ReplicationConfiguration *ReplicationConfiguration `locationName:"Replication" type:"structure"`
+
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+
+ metadataGetBucketReplicationOutput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketReplicationOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"`
+}
+
+// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation.
+func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *aws.Request, output *DeleteBucketReplicationOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opDeleteBucketReplication == nil {
+ opDeleteBucketReplication = &aws.Operation{
+ Name: "DeleteBucketReplication",
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?crr",
+ }
+ }
+
+ if input == nil {
+ input = &DeleteBucketReplicationInput{}
+ }
+
+ req = c.newRequest(opDeleteBucketReplication, input, output)
+ output = &DeleteBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+// DeleteBucketReplication deletes the replication configuration for the bucket.
+func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opDeleteBucketReplication *aws.Operation
+
+type DeleteBucketReplicationInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+type DeleteBucketReplicationOutput struct {
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+}
diff --git a/service/s3/retention.go b/service/s3/retention.go
new file mode 100644
index 0000000..490d980
--- /dev/null
+++ b/service/s3/retention.go
@@ -0,0 +1,387 @@
+package s3
+
+import (
+ "github.com/ks3sdklib/aws-sdk-go/aws"
+ "time"
+)
+
+// PutBucketRetentionRequest generates a request for the PutBucketRetention operation.
+func (c *S3) PutBucketRetentionRequest(input *PutBucketRetentionInput) (req *aws.Request, output *PutBucketRetentionOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opPutBucketRetention == nil {
+ opPutBucketRetention = &aws.Operation{
+ Name: "PutBucketRetention",
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?retention",
+ }
+ }
+
+ if input == nil {
+ input = &PutBucketRetentionInput{}
+ }
+
+ input.AutoFillMD5 = true
+ req = c.newRequest(opPutBucketRetention, input, output)
+ output = &PutBucketRetentionOutput{}
+ req.Data = output
+ return
+}
+
+// PutBucketRetention sets the retention configuration on a bucket.
+func (c *S3) PutBucketRetention(input *PutBucketRetentionInput) (*PutBucketRetentionOutput, error) {
+ req, out := c.PutBucketRetentionRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) PutBucketRetentionWithContext(ctx aws.Context, input *PutBucketRetentionInput) (*PutBucketRetentionOutput, error) {
+ req, out := c.PutBucketRetentionRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opPutBucketRetention *aws.Operation
+
+type PutBucketRetentionInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ RetentionConfiguration *BucketRetentionConfiguration `locationName:"RetentionConfiguration" type:"structure"`
+
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ metadataPutBucketRetentionInput `json:"-" xml:"-"`
+}
+
+type metadataPutBucketRetentionInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"RetentionConfiguration"`
+
+ AutoFillMD5 bool
+}
+
+type BucketRetentionConfiguration struct {
+ // A container that contains a specific rule for the recycle bin.
+ Rule *RetentionRule `locationName:"Rule" type:"structure" required:"true"`
+}
+
+type RetentionRule struct {
+ // The open status of the recycle bin is not case-sensitive.
+ // Valid values: Enabled, Disabled. Enabled indicates enabling the recycle bin, Disabled indicates disabling the recycle bin.
+ Status *string `locationName:"Status" type:"string" required:"true"`
+
+ // Specify how many days after the object enters the recycle bin to be completely deleted.
+ // When Days is not set, the object will be permanently retained in the recycle bin after deletion.
+ // Value range: 1-365
+ Days *int64 `locationName:"Days" type:"integer"`
+}
+
+type PutBucketRetentionOutput struct {
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+}
+
+// GetBucketRetentionRequest generates a request for the GetBucketRetention operation.
+func (c *S3) GetBucketRetentionRequest(input *GetBucketRetentionInput) (req *aws.Request, output *GetBucketRetentionOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opGetBucketRetention == nil {
+ opGetBucketRetention = &aws.Operation{
+ Name: "GetBucketRetention",
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?retention",
+ }
+ }
+
+ if input == nil {
+ input = &GetBucketRetentionInput{}
+ }
+
+ req = c.newRequest(opGetBucketRetention, input, output)
+ output = &GetBucketRetentionOutput{
+ RetentionConfiguration: &BucketRetentionConfiguration{},
+ }
+ req.Data = output
+ return
+}
+
+// GetBucketRetention gets the retention configuration for the bucket.
+func (c *S3) GetBucketRetention(input *GetBucketRetentionInput) (*GetBucketRetentionOutput, error) {
+ req, out := c.GetBucketRetentionRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) GetBucketRetentionWithContext(ctx aws.Context, input *GetBucketRetentionInput) (*GetBucketRetentionOutput, error) {
+ req, out := c.GetBucketRetentionRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opGetBucketRetention *aws.Operation
+
+type GetBucketRetentionInput struct {
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+type GetBucketRetentionOutput struct {
+ RetentionConfiguration *BucketRetentionConfiguration `locationName:"RetentionConfiguration" type:"structure"`
+
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+
+ metadataGetBucketRetentionInput `json:"-" xml:"-"`
+}
+
+type metadataGetBucketRetentionInput struct {
+ SDKShapeTraits bool `type:"structure" payload:"RetentionConfiguration"`
+}
+
+// ListRetentionRequest generates a request for the ListRetention operation.
+func (c *S3) ListRetentionRequest(input *ListRetentionInput) (req *aws.Request, output *ListRetentionOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opListRetention == nil {
+ opListRetention = &aws.Operation{
+ Name: "ListRetention",
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?recycle",
+ }
+ }
+
+ if input == nil {
+ input = &ListRetentionInput{}
+ }
+
+ req = c.newRequest(opListRetention, input, output)
+ output = &ListRetentionOutput{}
+ req.Data = output
+ return
+}
+
+// ListRetention lists the objects in the recycle bin.
+func (c *S3) ListRetention(input *ListRetentionInput) (*ListRetentionOutput, error) {
+ req, out := c.ListRetentionRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListRetentionWithContext(ctx aws.Context, input *ListRetentionInput) (*ListRetentionOutput, error) {
+ req, out := c.ListRetentionRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opListRetention *aws.Operation
+
+type ListRetentionInput struct {
+ // The name of the bucket.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ Marker *string `location:"querystring" locationName:"marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+}
+
+type ListRetentionOutput struct {
+ // A container that lists information about the list of objects in the recycle bin.
+ ListRetentionResult *ListRetentionResult `locationName:"ListRetentionResult" type:"structure"`
+
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+
+ metadataListRetentionOutput `json:"-" xml:"-"`
+}
+
+type metadataListRetentionOutput struct {
+ SDKShapeTraits bool `type:"structure" payload:"ListRetentionResult"`
+}
+
+type ListRetentionResult struct {
+ // The name of the bucket.
+ Name *string `type:"string"`
+
+ // Specify the prefix of the Key when requesting this List.
+ Prefix *string `type:"string"`
+
+ // The maximum number of objects returned is 1000 by default.
+ MaxKeys *int64 `type:"integer"`
+
+ // Specify the starting position of the object in the target bucket.
+ Marker *string `type:"string"`
+
+ // The starting point for the next listed file. Users can use this value as a marker parameter
+ // for the next List Retention.
+ NextMarker *string `type:"string"`
+
+ // Whether it has been truncated. If the number of records in the Object list exceeds the set
+ // maximum value, it will be truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // The encoding method for Object names.
+ EncodingType *string `type:"string"`
+
+ // List of Objects Listed.
+ Contents []*RetentionObject `type:"list" flattened:"true"`
+}
+
+type RetentionObject struct {
+ // The key of the object.
+ Key *string `type:"string"`
+
+ // The size of the object is measured in bytes.
+ Size *int64 `type:"integer"`
+
+ // The entity label of an object, ETag, is generated when uploading an object to identify its content.
+ ETag *string `type:"string"`
+
+ // The last time the object was modified.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // The owner information of this bucket.
+ Owner *Owner `type:"structure"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string"`
+
+ // The version ID of the object.
+ RetentionId *string `type:"string"`
+
+ // The time when the object was moved to the recycle bin.
+ RecycleTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // The time when an object is completely deleted from the recycle bin.
+ EstimatedClearTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// RecoverObjectRequest generates a request for the RecoverObject operation.
+func (c *S3) RecoverObjectRequest(input *RecoverObjectInput) (req *aws.Request, output *RecoverObjectOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opRecoverObject == nil {
+ opRecoverObject = &aws.Operation{
+ Name: "RecoverObject",
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?recover",
+ }
+ }
+
+ if input == nil {
+ input = &RecoverObjectInput{}
+ }
+
+ req = c.newRequest(opRecoverObject, input, output)
+ output = &RecoverObjectOutput{}
+ req.Data = output
+ return
+}
+
+// RecoverObject recovers the object from the recycle bin.
+func (c *S3) RecoverObject(input *RecoverObjectInput) (*RecoverObjectOutput, error) {
+ req, out := c.RecoverObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) RecoverObjectWithContext(ctx aws.Context, input *RecoverObjectInput) (*RecoverObjectOutput, error) {
+ req, out := c.RecoverObjectRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opRecoverObject *aws.Operation
+
+type RecoverObjectInput struct {
+ // The name of the bucket.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The key of the object.
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Does it support overwriting when an object with the same name exists in the bucket after being
+ // recovered from the recycle bin. When the value is true, it indicates overwriting, and the overwritten
+ // objects in the bucket will enter the recycle bin.
+ RetentionOverwrite *bool `location:"header" locationName:"x-kss-retention-overwrite" type:"boolean"`
+
+ // Specify the deletion ID of the recovered object. When the request header is not included,
+ // only the latest version is restored by default.
+ RetentionId *string `location:"header" locationName:"x-kss-retention-id" type:"string"`
+}
+
+type RecoverObjectOutput struct {
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+}
+
+// ClearObjectRequest generates a request for the ClearObject operation.
+func (c *S3) ClearObjectRequest(input *ClearObjectInput) (req *aws.Request, output *ClearObjectOutput) {
+ oprw.Lock()
+ defer oprw.Unlock()
+
+ if opClearObject == nil {
+ opClearObject = &aws.Operation{
+ Name: "ClearObject",
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}?clear",
+ }
+ }
+
+ if input == nil {
+ input = &ClearObjectInput{}
+ }
+
+ req = c.newRequest(opClearObject, input, output)
+ output = &ClearObjectOutput{}
+ req.Data = output
+ return
+}
+
+// ClearObject clears the object from the recycle bin.
+func (c *S3) ClearObject(input *ClearObjectInput) (*ClearObjectOutput, error) {
+ req, out := c.ClearObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ClearObjectWithContext(ctx aws.Context, input *ClearObjectInput) (*ClearObjectOutput, error) {
+ req, out := c.ClearObjectRequest(input)
+ req.SetContext(ctx)
+ err := req.Send()
+ return out, err
+}
+
+var opClearObject *aws.Operation
+
+type ClearObjectInput struct {
+ // The name of the bucket.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The key of the object.
+ Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
+
+ // Specify the deletion ID of the deleted object.
+ RetentionId *string `location:"header" locationName:"x-kss-retention-id" type:"string" required:"true"`
+}
+
+type ClearObjectOutput struct {
+ Metadata map[string]*string `location:"headers" type:"map"`
+
+ StatusCode *int64 `location:"statusCode" type:"integer"`
+}
diff --git a/service/s3/s3iface/interface.go b/service/s3/s3iface/interface.go
index d430bd1..ab0247d 100755
--- a/service/s3/s3iface/interface.go
+++ b/service/s3/s3iface/interface.go
@@ -21,6 +21,8 @@ type S3API interface {
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
+ ClearObject(*s3.ClearObjectInput) (*s3.ClearObjectOutput, error)
+
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
DeleteBucketCORS(*s3.DeleteBucketCORSInput) (*s3.DeleteBucketCORSOutput, error)
@@ -35,6 +37,10 @@ type S3API interface {
DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
+ DeleteBucketDecompressPolicy(*s3.DeleteBucketDecompressPolicyInput) (*s3.DeleteBucketDecompressPolicyOutput, error)
+
+ DeleteBucketInventory(*s3.DeleteBucketInventoryInput) (*s3.DeleteBucketInventoryOutput, error)
+
DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
@@ -65,6 +71,12 @@ type S3API interface {
GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
+ GetBucketDecompressPolicy(*s3.GetBucketDecompressPolicyInput) (*s3.GetBucketDecompressPolicyOutput, error)
+
+ GetBucketRetention(*s3.GetBucketRetentionInput) (*s3.GetBucketRetentionOutput, error)
+
+ GetBucketInventory(*s3.GetBucketInventoryInput) (*s3.GetBucketInventoryOutput, error)
+
GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectACL(*s3.GetObjectACLInput) (*s3.GetObjectACLOutput, error)
@@ -85,6 +97,10 @@ type S3API interface {
ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
+ ListRetention(*s3.ListRetentionInput) (*s3.ListRetentionOutput, error)
+
+ ListBucketInventory(*s3.ListBucketInventoryInput) (*s3.ListBucketInventoryOutput, error)
+
PutBucketACL(*s3.PutBucketACLInput) (*s3.PutBucketACLOutput, error)
PutBucketCORS(*s3.PutBucketCORSInput) (*s3.PutBucketCORSOutput, error)
@@ -109,12 +125,20 @@ type S3API interface {
PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
+ PutBucketDecompressPolicy(*s3.PutBucketDecompressPolicyInput) (*s3.PutBucketDecompressPolicyOutput, error)
+
+ PutBucketRetention(*s3.PutBucketRetentionInput) (*s3.PutBucketRetentionOutput, error)
+
+ PutBucketInventory(*s3.PutBucketInventoryInput) (*s3.PutBucketInventoryOutput, error)
+
PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
PutObjectACL(*s3.PutObjectACLInput) (*s3.PutObjectACLOutput, error)
RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
+ RecoverObject(*s3.RecoverObjectInput) (*s3.RecoverObjectOutput, error)
+
UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
diff --git a/service/s3/service.go b/service/s3/service.go
index baff037..8263edc 100755
--- a/service/s3/service.go
+++ b/service/s3/service.go
@@ -5,8 +5,8 @@ package s3
import (
"github.com/ks3sdklib/aws-sdk-go/aws"
"github.com/ks3sdklib/aws-sdk-go/internal/protocol/body"
- v2 "github.com/ks3sdklib/aws-sdk-go/internal/signer/v2"
- v4 "github.com/ks3sdklib/aws-sdk-go/internal/signer/v4"
+ "github.com/ks3sdklib/aws-sdk-go/internal/signer/v2"
+ "github.com/ks3sdklib/aws-sdk-go/internal/signer/v4"
"strings"
)
@@ -19,13 +19,7 @@ type S3 struct {
var initService func(*aws.Service)
// Used for custom request initialization logic
-var initRequest = func(r *aws.Request) {
- switch r.Operation.Name {
- case "GetBucketLocation":
- // GetBucketLocation has custom parsing logic
- r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
- }
-}
+var initRequest func(*aws.Request)
// New returns a new S3 client.
func New(config *aws.Config) *S3 {
diff --git a/service/s3/s3util/util.go b/service/s3/util.go
similarity index 53%
rename from service/s3/s3util/util.go
rename to service/s3/util.go
index f8d0aca..8dd52be 100644
--- a/service/s3/s3util/util.go
+++ b/service/s3/util.go
@@ -1,4 +1,4 @@
-package s3util
+package s3
import (
"crypto/md5"
@@ -56,3 +56,41 @@ func BuildCopySource(bucket *string, key *string) string {
}
return "/" + *bucket + "/" + url.QueryEscape(*key)
}
+
+// GetAcl 获取对象的访问控制权限
+func GetAcl(resp GetObjectACLOutput) CannedAccessControlType {
+ allUsersPermissions := map[string]*string{}
+ for _, value := range resp.Grants {
+ if value.Grantee.URI != nil && *value.Grantee.URI == AllUsersUri {
+ allUsersPermissions[*value.Permission] = value.Permission
+ }
+ }
+ _, read := allUsersPermissions["READ"]
+ _, write := allUsersPermissions["WRITE"]
+ if read && write {
+ return PublicReadWrite
+ } else if read {
+ return PublicRead
+ } else {
+ return Private
+ }
+}
+
+// GetBucketAcl 获取存储空间的访问控制权限
+func GetBucketAcl(resp GetBucketACLOutput) CannedAccessControlType {
+ allUsersPermissions := map[string]*string{}
+ for _, value := range resp.Grants {
+ if value.Grantee.URI != nil && *value.Grantee.URI == AllUsersUri {
+ allUsersPermissions[*value.Permission] = value.Permission
+ }
+ }
+ _, read := allUsersPermissions["READ"]
+ _, write := allUsersPermissions["WRITE"]
+ if read && write {
+ return PublicReadWrite
+ } else if read {
+ return PublicRead
+ } else {
+ return Private
+ }
+}
diff --git a/test/bucketsample_test.go b/test/bucketsample_test.go
index e21960c..671a34f 100644
--- a/test/bucketsample_test.go
+++ b/test/bucketsample_test.go
@@ -327,3 +327,123 @@ func (s *Ks3utilCommandSuite) TestBucketDecompressPolicy(c *C) {
})
c.Assert(err, IsNil)
}
+
+// TestBucketRetention bucket retention
+func (s *Ks3utilCommandSuite) TestBucketRetention(c *C) {
+ retentionBucket := commonNamePrefix + randLowStr(10)
+ s.CreateBucket(retentionBucket, c)
+ _, err := client.PutBucketRetention(&s3.PutBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ RetentionConfiguration: &s3.BucketRetentionConfiguration{
+ Rule: &s3.RetentionRule{
+ Status: aws.String("Enabled"),
+ Days: aws.Long(30),
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+
+ resp, err := client.GetBucketRetention(&s3.GetBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.RetentionConfiguration.Rule.Status, Equals, "Enabled")
+ c.Assert(*resp.RetentionConfiguration.Rule.Days, Equals, int64(30))
+
+ _, err = client.ListRetention(&s3.ListRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ })
+ c.Assert(err, IsNil)
+ s.DeleteBucket(retentionBucket, c)
+}
+
+// TestBucketReplication bucket replication
+func (s *Ks3utilCommandSuite) TestBucketReplication(c *C) {
+ _, err := client.PutBucketReplication(&s3.PutBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ ReplicationConfiguration: &s3.ReplicationConfiguration{
+ Prefix: []*string{aws.String("test/")},
+ DeleteMarkerStatus: aws.String("Disabled"),
+ TargetBucket: aws.String(bucket),
+ HistoricalObjectReplication: aws.String("Enabled"),
+ },
+ })
+ c.Assert(err, IsNil)
+
+ resp, err := client.GetBucketReplication(&s3.GetBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(len(resp.ReplicationConfiguration.Prefix), Equals, 1)
+ c.Assert(*resp.ReplicationConfiguration.Prefix[0], Equals, "test/")
+ c.Assert(*resp.ReplicationConfiguration.DeleteMarkerStatus, Equals, "Disabled")
+ c.Assert(*resp.ReplicationConfiguration.TargetBucket, Equals, bucket)
+ c.Assert(*resp.ReplicationConfiguration.HistoricalObjectReplication, Equals, "Enabled")
+
+ _, err = client.DeleteBucketReplication(&s3.DeleteBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+}
+
+func (s *Ks3utilCommandSuite) TestBucketInventory(c *C) {
+ id := randLowStr(8)
+ _, err := client.PutBucketInventory(&s3.PutBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ InventoryConfiguration: &s3.InventoryConfiguration{
+ Id: aws.String(id),
+ IsEnabled: aws.Boolean(true),
+ Filter: &s3.InventoryFilter{
+ Prefix: aws.String("abc/"),
+ },
+ Destination: &s3.Destination{
+ KS3BucketDestination: &s3.KS3BucketDestination{
+ Format: aws.String("CSV"),
+ Bucket: aws.String(bucket),
+ Prefix: aws.String("prefix/"),
+ },
+ },
+ Schedule: &s3.Schedule{
+ Frequency: aws.String("Once"),
+ },
+ OptionalFields: &s3.OptionalFields{
+ Field: []*string{
+ aws.String("Size"),
+ aws.String("LastModifiedDate"),
+ aws.String("ETag"),
+ aws.String("StorageClass"),
+ aws.String("IsMultipartUploaded"),
+ aws.String("EncryptionStatus"),
+ },
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+
+ resp, err := client.GetBucketInventory(&s3.GetBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.InventoryConfiguration.Id, Equals, id)
+ c.Assert(*resp.InventoryConfiguration.IsEnabled, Equals, true)
+ c.Assert(*resp.InventoryConfiguration.Filter.Prefix, Equals, "abc/")
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Format, Equals, "CSV")
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Bucket, Equals, bucket)
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Prefix, Equals, "prefix/")
+ c.Assert(*resp.InventoryConfiguration.Schedule.Frequency, Equals, "Once")
+ c.Assert(len(resp.InventoryConfiguration.OptionalFields.Field), Equals, 6)
+
+ listResp, err := client.ListBucketInventory(&s3.ListBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(len(listResp.InventoryConfigurationsResult.InventoryConfigurations), Equals, 1)
+
+ _, err = client.DeleteBucketInventory(&s3.DeleteBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+}
diff --git a/test/bucketwithcontext_test.go b/test/bucketwithcontext_test.go
index d06602a..0f1fcb1 100644
--- a/test/bucketwithcontext_test.go
+++ b/test/bucketwithcontext_test.go
@@ -910,6 +910,525 @@ func (s *Ks3utilCommandSuite) TestDeleteBucketDecompressPolicyWithContext(c *C)
c.Assert(err, IsNil)
}
+// PUT Bucket Replication
+func (s *Ks3utilCommandSuite) TestPutBucketReplicationWithContext(c *C) {
+ // put,不通过context取消
+ _, err := client.PutBucketReplicationWithContext(context.Background(), &s3.PutBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ ReplicationConfiguration: &s3.ReplicationConfiguration{
+ Prefix: []*string{aws.String("test/")},
+ DeleteMarkerStatus: aws.String("Disabled"),
+ TargetBucket: aws.String(bucket),
+ HistoricalObjectReplication: aws.String("Enabled"),
+ },
+ })
+ c.Assert(err, IsNil)
+ // get
+ resp, err := client.GetBucketReplicationWithContext(context.Background(), &s3.GetBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(len(resp.ReplicationConfiguration.Prefix), Equals, 1)
+ c.Assert(*resp.ReplicationConfiguration.Prefix[0], Equals, "test/")
+ c.Assert(*resp.ReplicationConfiguration.DeleteMarkerStatus, Equals, "Disabled")
+ c.Assert(*resp.ReplicationConfiguration.TargetBucket, Equals, bucket)
+ c.Assert(*resp.ReplicationConfiguration.HistoricalObjectReplication, Equals, "Enabled")
+ // put,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ _, err = client.PutBucketReplicationWithContext(ctx, &s3.PutBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ ReplicationConfiguration: &s3.ReplicationConfiguration{
+ Prefix: []*string{aws.String("test2/")},
+ DeleteMarkerStatus: aws.String("Enabled"),
+ TargetBucket: aws.String(bucket),
+ HistoricalObjectReplication: aws.String("Disabled"),
+ },
+ })
+ c.Assert(err, NotNil)
+ // get
+ resp, err = client.GetBucketReplicationWithContext(context.Background(), &s3.GetBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(len(resp.ReplicationConfiguration.Prefix), Equals, 1)
+ c.Assert(*resp.ReplicationConfiguration.Prefix[0], Equals, "test/")
+ c.Assert(*resp.ReplicationConfiguration.DeleteMarkerStatus, Equals, "Disabled")
+ c.Assert(*resp.ReplicationConfiguration.TargetBucket, Equals, bucket)
+ c.Assert(*resp.ReplicationConfiguration.HistoricalObjectReplication, Equals, "Enabled")
+ // delete
+ _, err = client.DeleteBucketReplicationWithContext(context.Background(), &s3.DeleteBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+}
+
+// GET Bucket Replication
+func (s *Ks3utilCommandSuite) TestGetBucketReplicationWithContext(c *C) {
+ // put
+ _, err := client.PutBucketReplicationWithContext(context.Background(), &s3.PutBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ ReplicationConfiguration: &s3.ReplicationConfiguration{
+ Prefix: []*string{aws.String("test/")},
+ DeleteMarkerStatus: aws.String("Disabled"),
+ TargetBucket: aws.String(bucket),
+ HistoricalObjectReplication: aws.String("Enabled"),
+ },
+ })
+ c.Assert(err, IsNil)
+ // get,不通过context取消
+ resp, err := client.GetBucketReplicationWithContext(context.Background(), &s3.GetBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(len(resp.ReplicationConfiguration.Prefix), Equals, 1)
+ c.Assert(*resp.ReplicationConfiguration.Prefix[0], Equals, "test/")
+ c.Assert(*resp.ReplicationConfiguration.DeleteMarkerStatus, Equals, "Disabled")
+ c.Assert(*resp.ReplicationConfiguration.TargetBucket, Equals, bucket)
+ c.Assert(*resp.ReplicationConfiguration.HistoricalObjectReplication, Equals, "Enabled")
+ // get,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ resp, err = client.GetBucketReplicationWithContext(ctx, &s3.GetBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, NotNil)
+ // delete
+ _, err = client.DeleteBucketReplicationWithContext(context.Background(), &s3.DeleteBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+}
+
+// DELETE Bucket Replication
+func (s *Ks3utilCommandSuite) TestDeleteBucketReplicationWithContext(c *C) {
+ // put
+ _, err := client.PutBucketReplicationWithContext(context.Background(), &s3.PutBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ ReplicationConfiguration: &s3.ReplicationConfiguration{
+ Prefix: []*string{aws.String("test/")},
+ DeleteMarkerStatus: aws.String("Disabled"),
+ TargetBucket: aws.String(bucket),
+ HistoricalObjectReplication: aws.String("Enabled"),
+ },
+ })
+ c.Assert(err, IsNil)
+ // get
+ resp, err := client.GetBucketReplicationWithContext(context.Background(), &s3.GetBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(len(resp.ReplicationConfiguration.Prefix), Equals, 1)
+ c.Assert(*resp.ReplicationConfiguration.Prefix[0], Equals, "test/")
+ c.Assert(*resp.ReplicationConfiguration.DeleteMarkerStatus, Equals, "Disabled")
+ c.Assert(*resp.ReplicationConfiguration.TargetBucket, Equals, bucket)
+ c.Assert(*resp.ReplicationConfiguration.HistoricalObjectReplication, Equals, "Enabled")
+ // delete,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ _, err = client.DeleteBucketReplicationWithContext(ctx, &s3.DeleteBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, NotNil)
+ // delete,不通过context取消
+ _, err = client.DeleteBucketReplicationWithContext(context.Background(), &s3.DeleteBucketReplicationInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+}
+
+// PUT Bucket Retention
+func (s *Ks3utilCommandSuite) TestPutBucketRetentionWithContext(c *C) {
+ retentionBucket := commonNamePrefix + randLowStr(10)
+ s.CreateBucket(retentionBucket, c)
+ // put,不通过context取消
+ _, err := client.PutBucketRetentionWithContext(context.Background(), &s3.PutBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ RetentionConfiguration: &s3.BucketRetentionConfiguration{
+ Rule: &s3.RetentionRule{
+ Status: aws.String("Enabled"),
+ Days: aws.Long(30),
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+ // get
+ resp, err := client.GetBucketRetentionWithContext(context.Background(), &s3.GetBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.RetentionConfiguration.Rule.Status, Equals, "Enabled")
+ c.Assert(*resp.RetentionConfiguration.Rule.Days, Equals, int64(30))
+ // put,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ _, err = client.PutBucketRetentionWithContext(ctx, &s3.PutBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ RetentionConfiguration: &s3.BucketRetentionConfiguration{
+ Rule: &s3.RetentionRule{
+ Status: aws.String("Enabled"),
+ Days: aws.Long(60),
+ },
+ },
+ })
+ c.Assert(err, NotNil)
+ // get
+ resp, err = client.GetBucketRetentionWithContext(context.Background(), &s3.GetBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.RetentionConfiguration.Rule.Status, Equals, "Enabled")
+ c.Assert(*resp.RetentionConfiguration.Rule.Days, Equals, int64(30))
+ s.DeleteBucket(retentionBucket, c)
+}
+
+// GET Bucket Retention
+func (s *Ks3utilCommandSuite) TestGetBucketRetentionWithContext(c *C) {
+ retentionBucket := commonNamePrefix + randLowStr(10)
+ s.CreateBucket(retentionBucket, c)
+ // put
+ _, err := client.PutBucketRetentionWithContext(context.Background(), &s3.PutBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ RetentionConfiguration: &s3.BucketRetentionConfiguration{
+ Rule: &s3.RetentionRule{
+ Status: aws.String("Enabled"),
+ Days: aws.Long(30),
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+ // get,不通过context取消
+ resp, err := client.GetBucketRetentionWithContext(context.Background(), &s3.GetBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.RetentionConfiguration.Rule.Status, Equals, "Enabled")
+ c.Assert(*resp.RetentionConfiguration.Rule.Days, Equals, int64(30))
+ // get,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ resp, err = client.GetBucketRetentionWithContext(ctx, &s3.GetBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ })
+ c.Assert(err, NotNil)
+ s.DeleteBucket(retentionBucket, c)
+}
+
+// List Retention
+func (s *Ks3utilCommandSuite) TestListBucketRetentionWithContext(c *C) {
+ retentionBucket := commonNamePrefix + randLowStr(10)
+ s.CreateBucket(retentionBucket, c)
+ // put
+ _, err := client.PutBucketRetentionWithContext(context.Background(), &s3.PutBucketRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ RetentionConfiguration: &s3.BucketRetentionConfiguration{
+ Rule: &s3.RetentionRule{
+ Status: aws.String("Enabled"),
+ Days: aws.Long(30),
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+ // list,不通过context取消
+ resp, err := client.ListRetentionWithContext(context.Background(), &s3.ListRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ Prefix: aws.String("test/"),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.ListRetentionResult.Prefix, Equals, "test/")
+ // list,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ resp, err = client.ListRetentionWithContext(ctx, &s3.ListRetentionInput{
+ Bucket: aws.String(retentionBucket),
+ Prefix: aws.String("test/"),
+ })
+ c.Assert(err, NotNil)
+ s.DeleteBucket(retentionBucket, c)
+}
+
+// PUT Bucket Inventory
+func (s *Ks3utilCommandSuite) TestPutBucketInventoryWithContext(c *C) {
+ id := randLowStr(8)
+ // put,不通过context取消
+ _, err := client.PutBucketInventoryWithContext(context.Background(), &s3.PutBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ InventoryConfiguration: &s3.InventoryConfiguration{
+ Id: aws.String(id),
+ IsEnabled: aws.Boolean(true),
+ Filter: &s3.InventoryFilter{
+ Prefix: aws.String("abc/"),
+ },
+ Destination: &s3.Destination{
+ KS3BucketDestination: &s3.KS3BucketDestination{
+ Format: aws.String("CSV"),
+ Bucket: aws.String(bucket),
+ Prefix: aws.String("prefix/"),
+ },
+ },
+ Schedule: &s3.Schedule{
+ Frequency: aws.String("Once"),
+ },
+ OptionalFields: &s3.OptionalFields{
+ Field: []*string{
+ aws.String("Size"),
+ aws.String("LastModifiedDate"),
+ aws.String("ETag"),
+ aws.String("StorageClass"),
+ aws.String("IsMultipartUploaded"),
+ aws.String("EncryptionStatus"),
+ },
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+ // get
+ resp, err := client.GetBucketInventoryWithContext(context.Background(), &s3.GetBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.InventoryConfiguration.Id, Equals, id)
+ c.Assert(*resp.InventoryConfiguration.IsEnabled, Equals, true)
+ c.Assert(*resp.InventoryConfiguration.Filter.Prefix, Equals, "abc/")
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Format, Equals, "CSV")
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Bucket, Equals, bucket)
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Prefix, Equals, "prefix/")
+ c.Assert(*resp.InventoryConfiguration.Schedule.Frequency, Equals, "Once")
+ c.Assert(len(resp.InventoryConfiguration.OptionalFields.Field), Equals, 6)
+ // put,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ _, err = client.PutBucketInventoryWithContext(ctx, &s3.PutBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ InventoryConfiguration: &s3.InventoryConfiguration{
+ Id: aws.String(id),
+ IsEnabled: aws.Boolean(true),
+ Filter: &s3.InventoryFilter{
+ Prefix: aws.String("abc/"),
+ },
+ Destination: &s3.Destination{
+ KS3BucketDestination: &s3.KS3BucketDestination{
+ Format: aws.String("CSV"),
+ Bucket: aws.String(bucket),
+ Prefix: aws.String("prefix/"),
+ },
+ },
+ Schedule: &s3.Schedule{
+ Frequency: aws.String("Once"),
+ },
+ OptionalFields: &s3.OptionalFields{
+ Field: []*string{
+ aws.String("Size"),
+ aws.String("LastModifiedDate"),
+ aws.String("ETag"),
+ aws.String("StorageClass"),
+ aws.String("IsMultipartUploaded"),
+ aws.String("EncryptionStatus"),
+ },
+ },
+ },
+ })
+ c.Assert(err, NotNil)
+ // delete
+ _, err = client.DeleteBucketInventoryWithContext(context.Background(), &s3.DeleteBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+}
+
+// GET Bucket Inventory
+func (s *Ks3utilCommandSuite) TestGetBucketInventoryWithContext(c *C) {
+ id := randLowStr(8)
+ // put,不通过context取消
+ _, err := client.PutBucketInventoryWithContext(context.Background(), &s3.PutBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ InventoryConfiguration: &s3.InventoryConfiguration{
+ Id: aws.String(id),
+ IsEnabled: aws.Boolean(true),
+ Filter: &s3.InventoryFilter{
+ Prefix: aws.String("abc/"),
+ },
+ Destination: &s3.Destination{
+ KS3BucketDestination: &s3.KS3BucketDestination{
+ Format: aws.String("CSV"),
+ Bucket: aws.String(bucket),
+ Prefix: aws.String("prefix/"),
+ },
+ },
+ Schedule: &s3.Schedule{
+ Frequency: aws.String("Once"),
+ },
+ OptionalFields: &s3.OptionalFields{
+ Field: []*string{
+ aws.String("Size"),
+ aws.String("LastModifiedDate"),
+ aws.String("ETag"),
+ aws.String("StorageClass"),
+ aws.String("IsMultipartUploaded"),
+ aws.String("EncryptionStatus"),
+ },
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+ // get
+ resp, err := client.GetBucketInventoryWithContext(context.Background(), &s3.GetBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.InventoryConfiguration.Id, Equals, id)
+ c.Assert(*resp.InventoryConfiguration.IsEnabled, Equals, true)
+ c.Assert(*resp.InventoryConfiguration.Filter.Prefix, Equals, "abc/")
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Format, Equals, "CSV")
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Bucket, Equals, bucket)
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Prefix, Equals, "prefix/")
+ c.Assert(*resp.InventoryConfiguration.Schedule.Frequency, Equals, "Once")
+ c.Assert(len(resp.InventoryConfiguration.OptionalFields.Field), Equals, 6)
+ // put,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ _, err = client.GetBucketInventoryWithContext(ctx, &s3.GetBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, NotNil)
+ // delete
+ _, err = client.DeleteBucketInventoryWithContext(context.Background(), &s3.DeleteBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+}
+
+// DELETE Bucket Inventory
+func (s *Ks3utilCommandSuite) TestDeleteBucketInventoryWithContext(c *C) {
+ id := randLowStr(8)
+ // put,不通过context取消
+ _, err := client.PutBucketInventoryWithContext(context.Background(), &s3.PutBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ InventoryConfiguration: &s3.InventoryConfiguration{
+ Id: aws.String(id),
+ IsEnabled: aws.Boolean(true),
+ Filter: &s3.InventoryFilter{
+ Prefix: aws.String("abc/"),
+ },
+ Destination: &s3.Destination{
+ KS3BucketDestination: &s3.KS3BucketDestination{
+ Format: aws.String("CSV"),
+ Bucket: aws.String(bucket),
+ Prefix: aws.String("prefix/"),
+ },
+ },
+ Schedule: &s3.Schedule{
+ Frequency: aws.String("Once"),
+ },
+ OptionalFields: &s3.OptionalFields{
+ Field: []*string{
+ aws.String("Size"),
+ aws.String("LastModifiedDate"),
+ aws.String("ETag"),
+ aws.String("StorageClass"),
+ aws.String("IsMultipartUploaded"),
+ aws.String("EncryptionStatus"),
+ },
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+ // get
+ resp, err := client.GetBucketInventoryWithContext(context.Background(), &s3.GetBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(*resp.InventoryConfiguration.Id, Equals, id)
+ c.Assert(*resp.InventoryConfiguration.IsEnabled, Equals, true)
+ c.Assert(*resp.InventoryConfiguration.Filter.Prefix, Equals, "abc/")
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Format, Equals, "CSV")
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Bucket, Equals, bucket)
+ c.Assert(*resp.InventoryConfiguration.Destination.KS3BucketDestination.Prefix, Equals, "prefix/")
+ c.Assert(*resp.InventoryConfiguration.Schedule.Frequency, Equals, "Once")
+ c.Assert(len(resp.InventoryConfiguration.OptionalFields.Field), Equals, 6)
+ // delete,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ _, err = client.DeleteBucketInventoryWithContext(ctx, &s3.DeleteBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, NotNil)
+ // delete
+ _, err = client.DeleteBucketInventoryWithContext(context.Background(), &s3.DeleteBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+}
+
+// List Bucket Inventory
+func (s *Ks3utilCommandSuite) TestListBucketInventoryWithContext(c *C) {
+ id := randLowStr(8)
+ // put,不通过context取消
+ _, err := client.PutBucketInventoryWithContext(context.Background(), &s3.PutBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ InventoryConfiguration: &s3.InventoryConfiguration{
+ Id: aws.String(id),
+ IsEnabled: aws.Boolean(true),
+ Filter: &s3.InventoryFilter{
+ Prefix: aws.String("abc/"),
+ },
+ Destination: &s3.Destination{
+ KS3BucketDestination: &s3.KS3BucketDestination{
+ Format: aws.String("CSV"),
+ Bucket: aws.String(bucket),
+ Prefix: aws.String("prefix/"),
+ },
+ },
+ Schedule: &s3.Schedule{
+ Frequency: aws.String("Once"),
+ },
+ OptionalFields: &s3.OptionalFields{
+ Field: []*string{
+ aws.String("Size"),
+ aws.String("LastModifiedDate"),
+ aws.String("ETag"),
+ aws.String("StorageClass"),
+ aws.String("IsMultipartUploaded"),
+ aws.String("EncryptionStatus"),
+ },
+ },
+ },
+ })
+ c.Assert(err, IsNil)
+ // list
+ resp, err := client.ListBucketInventoryWithContext(context.Background(), &s3.ListBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, IsNil)
+ c.Assert(len(resp.InventoryConfigurationsResult.InventoryConfigurations), Equals, 1)
+ // list,通过context取消
+ ctx, cancelFunc := context.WithTimeout(context.Background(), bucketTimeout)
+ defer cancelFunc()
+ _, err = client.ListBucketInventoryWithContext(ctx, &s3.ListBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ })
+ c.Assert(err, NotNil)
+ // delete
+ _, err = client.DeleteBucketInventoryWithContext(context.Background(), &s3.DeleteBucketInventoryInput{
+ Bucket: aws.String(bucket),
+ Id: aws.String(id),
+ })
+ c.Assert(err, IsNil)
+}
+
// PUT Bucket ACL
func (s *Ks3utilCommandSuite) TestPutBucketACLWithContext(c *C) {
// put,不通过context取消
diff --git a/test/common_test.go b/test/common_test.go
index 7008c8a..fa94595 100644
--- a/test/common_test.go
+++ b/test/common_test.go
@@ -227,3 +227,17 @@ func (s *Ks3utilCommandSuite) DeleteObject(key string, c *C) {
})
c.Assert(err, IsNil)
}
+
+func (s *Ks3utilCommandSuite) CreateBucket(bucketName string, c *C) {
+ _, err := client.CreateBucket(&s3.CreateBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ c.Assert(err, IsNil)
+}
+
+func (s *Ks3utilCommandSuite) DeleteBucket(bucketName string, c *C) {
+ _, err := client.DeleteBucket(&s3.DeleteBucketInput{
+ Bucket: aws.String(bucketName),
+ })
+ c.Assert(err, IsNil)
+}
diff --git a/test/object_encryption_test.go b/test/object_encryption_test.go
index c883199..772f945 100644
--- a/test/object_encryption_test.go
+++ b/test/object_encryption_test.go
@@ -5,7 +5,6 @@ import (
"context"
"github.com/ks3sdklib/aws-sdk-go/aws"
"github.com/ks3sdklib/aws-sdk-go/service/s3"
- "github.com/ks3sdklib/aws-sdk-go/service/s3/s3util"
. "gopkg.in/check.v1"
"io"
"os"
@@ -63,34 +62,34 @@ func (s *Ks3utilCommandSuite) TestPutObjectWithSSE_C(c *C) {
Key: aws.String(object),
Body: fd,
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*sseResp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// head
headResp, err := client.HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*headResp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// get
getResp, err := client.GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*getResp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// delete
_, err = client.DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucket),
@@ -159,12 +158,12 @@ func (s *Ks3utilCommandSuite) TestCopyObjectWithSSE_C(c *C) {
Key: aws.String(dstObject),
CopySource: aws.String("/" + bucket + "/" + object),
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*copyResp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*copyResp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*copyResp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// delete
_, err = client.DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucket),
@@ -196,10 +195,9 @@ func (s *Ks3utilCommandSuite) TestMultipartUploadWithSSE_S3(c *C) {
c.Assert(*initRet.ServerSideEncryption, Equals, "AES256")
// 获取分块上传Id
uploadId := *initRet.UploadID
- var i int64 = 1
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
// 缓冲区,分块大小为5MB
buffer := make([]byte, 5*1024*1024)
for {
@@ -213,15 +211,14 @@ func (s *Ks3utilCommandSuite) TestMultipartUploadWithSSE_S3(c *C) {
resp, err := client.UploadPartWithContext(context.Background(), &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
})
c.Assert(err, IsNil)
c.Assert(*resp.ServerSideEncryption, Equals, "AES256")
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
// complete
@@ -256,18 +253,17 @@ func (s *Ks3utilCommandSuite) TestMultipartUploadWithSSE_C(c *C) {
Bucket: aws.String(bucket),
Key: aws.String(object),
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*initRet.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*initRet.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*initRet.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// 获取分块上传Id
uploadId := *initRet.UploadID
- var i int64 = 1
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
// 缓冲区,分块大小为5MB
buffer := make([]byte, 5*1024*1024)
for {
@@ -281,19 +277,18 @@ func (s *Ks3utilCommandSuite) TestMultipartUploadWithSSE_C(c *C) {
resp, err := client.UploadPartWithContext(context.Background(), &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*resp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*resp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ c.Assert(*resp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
// complete
@@ -307,7 +302,7 @@ func (s *Ks3utilCommandSuite) TestMultipartUploadWithSSE_C(c *C) {
})
c.Assert(err, IsNil)
c.Assert(*comResp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*comResp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*comResp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// delete
_, err = client.DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucket),
@@ -369,34 +364,34 @@ func (s *Ks3utilCommandSuite) TestAppendObjectWithSSE_C(c *C) {
Position: aws.Long(0),
Body: fd,
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*sseResp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// head
headResp, err := client.HeadObjectWithContext(context.Background(), &s3.HeadObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*headResp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// get
getResp, err := client.GetObjectWithContext(context.Background(), &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
SSECustomerAlgorithm: aws.String("AES256"),
- SSECustomerKey: aws.String(s3util.GetBase64Str(customerKey)),
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(customerKey)),
+ SSECustomerKey: aws.String(s3.GetBase64Str(customerKey)),
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(customerKey)),
})
c.Assert(err, IsNil)
c.Assert(*getResp.SSECustomerAlgorithm, Equals, "AES256")
- c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3util.GetBase64MD5Str(customerKey))
+ c.Assert(*sseResp.SSECustomerKeyMD5, Equals, s3.GetBase64MD5Str(customerKey))
// delete
_, err = client.DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
Bucket: aws.String(bucket),
diff --git a/test/objectsample_test.go b/test/objectsample_test.go
index 65499aa..00099ff 100644
--- a/test/objectsample_test.go
+++ b/test/objectsample_test.go
@@ -2,13 +2,14 @@ package lib
import (
"bytes"
+ "encoding/xml"
"fmt"
"github.com/ks3sdklib/aws-sdk-go/aws"
"github.com/ks3sdklib/aws-sdk-go/aws/awserr"
"github.com/ks3sdklib/aws-sdk-go/service/s3"
"github.com/ks3sdklib/aws-sdk-go/service/s3/s3manager"
- "github.com/ks3sdklib/aws-sdk-go/service/s3/s3util"
. "gopkg.in/check.v1"
+ "io"
"net/http"
"net/url"
"os"
@@ -40,7 +41,7 @@ func (s *Ks3utilCommandSuite) TestPutObject(c *C) {
object := randLowStr(10)
createFile(object, 1024*1024*1)
fd, _ := os.Open(object)
- md5, _ := s3util.GetBase64FileMD5Str(object)
+ md5, _ := s3.GetBase64FileMD5Str(object)
_, err := client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
@@ -56,18 +57,17 @@ func (s *Ks3utilCommandSuite) TestPutObject(c *C) {
// TestPutObjectByLimit 上传示例 -限速
func (s *Ks3utilCommandSuite) TestPutObjectByLimit(c *C) {
- MIN_BANDWIDTH := 1024 * 100 * 8 // 100KB/s
+ minBandwidth := 1024 * 100 * 8 // 100KB/s
object := randLowStr(10)
createFile(object, 1024*1024*1) // 1MB大小的文件
fd, _ := os.Open(object)
// 记录开始时间
startTime := time.Now()
_, err := client.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(bucket),
- Key: aws.String(object),
- Body: fd,
- //设置上传速度
- TrafficLimit: aws.Long(int64(MIN_BANDWIDTH)),
+ Bucket: aws.String(bucket),
+ Key: aws.String(object),
+ Body: fd,
+ TrafficLimit: aws.Long(int64(minBandwidth)), //限制上传速度
})
c.Assert(err, IsNil)
// 计算上传耗时
@@ -79,7 +79,7 @@ func (s *Ks3utilCommandSuite) TestPutObjectByLimit(c *C) {
// TestGetObjectByLimit 下载限速示例
func (s *Ks3utilCommandSuite) TestGetObjectByLimit(c *C) {
- MIN_BANDWIDTH := 1024 * 100 * 8 // 100KB/s
+ minBandwidth := 1024 * 100 * 8 // 100KB/s
_, err := client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
@@ -92,23 +92,15 @@ func (s *Ks3utilCommandSuite) TestGetObjectByLimit(c *C) {
_, err = client.GetObject(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
- TrafficLimit: aws.Long(int64(MIN_BANDWIDTH)),
+ TrafficLimit: aws.Long(int64(minBandwidth)), //限制下载速度
})
c.Assert(err, IsNil)
}
// TestGetObject 下载示例
func (s *Ks3utilCommandSuite) TestGetObject(c *C) {
- _, err := client.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(bucket),
- Key: aws.String(key),
- ACL: aws.String("public-read"),
- Body: strings.NewReader(content),
- })
- c.Assert(err, IsNil)
-
- //下载
- _, err = client.GetObject(&s3.GetObjectInput{
+ s.PutObject(key, c)
+ _, err := client.GetObject(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
@@ -117,15 +109,8 @@ func (s *Ks3utilCommandSuite) TestGetObject(c *C) {
// TestDeleteObject 删除对象
func (s *Ks3utilCommandSuite) TestDeleteObject(c *C) {
- _, err := client.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(bucket),
- Key: aws.String(key),
- ACL: aws.String("public-read"),
- Body: strings.NewReader(content),
- })
- c.Assert(err, IsNil)
-
- _, err = client.DeleteObject(&s3.DeleteObjectInput{
+ s.PutObject(key, c)
+ _, err := client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
@@ -148,7 +133,7 @@ func (s *Ks3utilCommandSuite) TestGeneratePresignedUrl(c *C) {
// TestGeneratePUTPresignedUrl 根据外链PUT上传
func (s *Ks3utilCommandSuite) TestGeneratePUTPresignedUrl(c *C) {
text := "test content"
- md5 := s3util.GetBase64MD5Str(text)
+ md5 := s3.GetBase64MD5Str(text)
url, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
Bucket: aws.String(bucket), // 设置 bucket 名称
Key: aws.String(key), // 设置 object key
@@ -233,7 +218,6 @@ func (s *Ks3utilCommandSuite) TestCopyObject(c *C) {
// TestUploadPartCopy 分块拷贝用例
func (s *Ks3utilCommandSuite) TestUploadPartCopy(c *C) {
s.PutObject(key, c)
-
dstKey := "xxx/copy/" + key
//初始化分块
initResp, err := client.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
@@ -242,7 +226,7 @@ func (s *Ks3utilCommandSuite) TestUploadPartCopy(c *C) {
})
c.Assert(err, IsNil)
- uploadPartCopyresp, err := client.UploadPartCopy(&s3.UploadPartCopyInput{
+ uploadPartCopyResp, err := client.UploadPartCopy(&s3.UploadPartCopyInput{
Bucket: aws.String(bucket),
Key: aws.String(dstKey),
CopySource: aws.String("/" + bucket + "/" + key),
@@ -261,7 +245,7 @@ func (s *Ks3utilCommandSuite) TestUploadPartCopy(c *C) {
Parts: []*s3.CompletedPart{
{
PartNumber: aws.Long(1),
- ETag: uploadPartCopyresp.CopyPartResult.ETag,
+ ETag: uploadPartCopyResp.CopyPartResult.ETag,
},
},
},
@@ -331,14 +315,13 @@ func (s *Ks3utilCommandSuite) TestMultipartUpload(c *C) {
c.Assert(err, IsNil)
defer f.Close()
- var i int64 = 1
- //组装分块参数
+ var partNum int64 = 1
+ // 待合并分块
var compParts []*s3.CompletedPart
- partsNum := []int64{0}
- sc := make([]byte, 52428800)
-
+ // 缓冲区,分块大小为5MB
+ buffer := make([]byte, 5*1024*1024)
for {
- nr, err := f.Read(sc[:])
+ nr, err := f.Read(buffer)
if nr < 0 {
fmt.Fprintf(os.Stderr, "cat: error reading: %s\n", err.Error())
os.Exit(1)
@@ -351,21 +334,20 @@ func (s *Ks3utilCommandSuite) TestMultipartUpload(c *C) {
//块的数量可以是1到10,000中的任意一个(包含1和10,000)。块序号用于标识一个块以及其在对象创建时的位置。如果你上传一个新的块,使用之前已经使用的序列号,那么之前的那个块将会被覆盖。当所有块总大小大于5M时,除了最后一个块没有大小限制外,其余的块的大小均要求在5MB以上。当所有块总大小小于5M时,除了最后一个块没有大小限制外,其余的块的大小均要求在100K以上。如果不符合上述要求,会返回413状态码。
//为了保证数据在传输过程中没有损坏,请使用 Content-MD5 头部。当使用此头部时,KS3会自动计算出MD5,并根据用户提供的MD5进行校验,如果不匹配,将会返回错误信息。
//计算sc[:nr]的md5值
- md5 := s3util.GetBase64MD5Str(string(sc[0:nr]))
+ md5 := s3.GetBase64MD5Str(string(buffer[0:nr]))
resp, err := client.UploadPart(&s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
- Body: bytes.NewReader(sc[0:nr]),
- ContentLength: aws.Long(int64(len(sc[0:nr]))),
+ Body: bytes.NewReader(buffer[0:nr]),
+ ContentLength: aws.Long(int64(len(buffer[0:nr]))),
//TrafficLimit: aws.Long(int64(MIN_BANDWIDTH)),
ContentMD5: aws.String(md5),
})
c.Assert(err, IsNil)
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
@@ -393,9 +375,9 @@ func (s *Ks3utilCommandSuite) TestPutObjectWithSSEC(c *C) {
_, err := client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
- SSECustomerAlgorithm: aws.String("AES256"), //加密类型
- SSECustomerKey: aws.String(s3util.GetBase64Str(SSECustomerKey)), // 客户端提供的加密密钥
- SSECustomerKeyMD5: aws.String(s3util.GetBase64MD5Str(SSECustomerKey)), // 客户端提供的通过BASE64编码的通过128位MD5加密的密钥的MD5值
+ SSECustomerAlgorithm: aws.String("AES256"), //加密类型
+ SSECustomerKey: aws.String(s3.GetBase64Str(SSECustomerKey)), // 客户端提供的加密密钥
+ SSECustomerKeyMD5: aws.String(s3.GetBase64MD5Str(SSECustomerKey)), // 客户端提供的通过BASE64编码的通过128位MD5加密的密钥的MD5值
})
c.Assert(err, IsNil)
}
@@ -742,3 +724,269 @@ func (s *Ks3utilCommandSuite) TestUploadPartProgress(c *C) {
c.Assert(err, IsNil)
os.Remove(object)
}
+
+// TestPutObject10GB 上传10GB文件,报413 Request Entity Too Large错误,错误类型为html
+func (s *Ks3utilCommandSuite) TestPutObject10GB(c *C) {
+ object := randLowStr(10)
+ createFile(object, 1024*1024*1)
+ fd, _ := os.Open(object)
+ _, err := client.PutObject(&s3.PutObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(object),
+ Body: fd,
+ ContentLength: aws.Long(1024 * 1024 * 1024 * 10),
+ })
+ c.Assert(err, NotNil)
+ c.Assert(strings.Contains(err.Error(), "413 Request Entity Too Large"), Equals, true)
+ os.Remove(object)
+}
+
+// TestHeadNotExistsObject head不存在的对象,报404错误,request id不为空
+func (s *Ks3utilCommandSuite) TestHeadNotExistsObject(c *C) {
+ object := randLowStr(10)
+ _, err := client.HeadObject(&s3.HeadObjectInput{
+ Bucket: aws.String(bucket),
+ Key: aws.String(object),
+ })
+ c.Assert(err, NotNil)
+ c.Assert(strings.Index(err.Error(), "[")+1 != strings.Index(err.Error(), "]"), Equals, true)
+}
+
+// TestPresignedMultipartUpload 通过外链分块上传
+func (s *Ks3utilCommandSuite) TestPresignedMultipartUpload(c *C) {
+ object := randLowStr(10)
+ createFile(object, 1024*1024*1)
+ fd, _ := os.Open(object)
+ // 生成init外链
+ initUrl, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
+ HTTPMethod: s3.POST,
+ Bucket: aws.String(bucket),
+ Key: aws.String(object),
+ Expires: 3600,
+ Parameters: map[string]*string{
+ "uploads": nil,
+ },
+ })
+
+ fmt.Println(initUrl)
+
+ initRequest, err := http.NewRequest("POST", initUrl, nil)
+ c.Assert(err, IsNil)
+
+ initResp, err := http.DefaultClient.Do(initRequest)
+ c.Assert(err, IsNil)
+
+ body, err := io.ReadAll(initResp.Body)
+ c.Assert(err, IsNil)
+
+ initXml := struct {
+ UploadId string `xml:"UploadId"`
+ }{}
+ err = xml.Unmarshal(body, &initXml)
+ c.Assert(err, IsNil)
+
+ fmt.Println(initXml.UploadId)
+
+ // 生成upload part外链
+ uploadPartUrl, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
+ HTTPMethod: s3.PUT,
+ Bucket: aws.String(bucket),
+ Key: aws.String(object),
+ Expires: 3600,
+ Parameters: map[string]*string{
+ "partNumber": aws.String("1"),
+ "uploadId": aws.String(initXml.UploadId),
+ },
+ })
+ c.Assert(err, IsNil)
+ fmt.Println(uploadPartUrl)
+
+ uploadPartRequest, err := http.NewRequest("PUT", uploadPartUrl, fd)
+ c.Assert(err, IsNil)
+
+ uploadPartResp, err := http.DefaultClient.Do(uploadPartRequest)
+ c.Assert(err, IsNil)
+
+ etag := uploadPartResp.Header.Get("ETag")
+ fmt.Println(etag)
+
+ // 生成complete外链
+ completeUrl, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
+ HTTPMethod: s3.POST,
+ Bucket: aws.String(bucket),
+ Key: aws.String(object),
+ Expires: 3600,
+ Parameters: map[string]*string{
+ "uploadId": aws.String(initXml.UploadId),
+ },
+ })
+ c.Assert(err, IsNil)
+ fmt.Println(completeUrl)
+
+ completeParts := `
+
+ 1
+ ` + etag + `
+
+ `
+ fmt.Println(completeParts)
+
+ completeRequest, err := http.NewRequest("POST", completeUrl, strings.NewReader(completeParts))
+ c.Assert(err, IsNil)
+
+ completeResp, err := http.DefaultClient.Do(completeRequest)
+ c.Assert(err, IsNil)
+
+ body, err = io.ReadAll(completeResp.Body)
+ c.Assert(err, IsNil)
+
+ fmt.Println(string(body))
+
+ // 获取head外链
+ headUrl, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
+ HTTPMethod: s3.HEAD,
+ Bucket: aws.String(bucket),
+ Key: aws.String(object),
+ Expires: 3600,
+ })
+ c.Assert(err, IsNil)
+ fmt.Println(headUrl)
+
+ headRequest, err := http.NewRequest("HEAD", headUrl, nil)
+ c.Assert(err, IsNil)
+
+ headResp, err := http.DefaultClient.Do(headRequest)
+ c.Assert(err, IsNil)
+ c.Assert(headResp.StatusCode, Equals, 200)
+
+ os.Remove(object)
+}
+
+// TestPresignedMultipartCopy 通过外链分块复制
+func (s *Ks3utilCommandSuite) TestPresignedMultipartCopy(c *C) {
+ object := randLowStr(10)
+ createFile(object, 1024*1024*1)
+ s.PutObject(object, c)
+
+ // 生成init外链
+ initUrl, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
+ HTTPMethod: s3.POST,
+ Bucket: aws.String(bucket),
+ Key: aws.String(object + "copy"),
+ Expires: 3600,
+ Parameters: map[string]*string{
+ "uploads": nil,
+ },
+ })
+ c.Assert(err, IsNil)
+ fmt.Println(initUrl)
+
+ initRequest, err := http.NewRequest("POST", initUrl, nil)
+ c.Assert(err, IsNil)
+
+ initResp, err := http.DefaultClient.Do(initRequest)
+ c.Assert(err, IsNil)
+
+ body, err := io.ReadAll(initResp.Body)
+ c.Assert(err, IsNil)
+
+ initXml := struct {
+ UploadId string `xml:"UploadId"`
+ }{}
+
+ err = xml.Unmarshal(body, &initXml)
+ c.Assert(err, IsNil)
+
+ fmt.Println(initXml.UploadId)
+
+ // 生成upload part外链
+ uploadPartUrl, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
+ HTTPMethod: s3.PUT,
+ Bucket: aws.String(bucket),
+ Key: aws.String(object + "copy"),
+ Expires: 3600,
+ Parameters: map[string]*string{
+ "partNumber": aws.String("1"),
+ "uploadId": aws.String(initXml.UploadId),
+ },
+ Headers: map[string]*string{
+ "X-Amz-Copy-Source": aws.String("/" + bucket + "/" + object),
+ },
+ })
+ c.Assert(err, IsNil)
+ fmt.Println(uploadPartUrl)
+
+ uploadPartRequest, err := http.NewRequest("PUT", uploadPartUrl, nil)
+ c.Assert(err, IsNil)
+
+ // 设置header
+ uploadPartRequest.Header.Set("X-Amz-Copy-Source", "/"+bucket+"/"+object)
+
+ uploadPartResp, err := http.DefaultClient.Do(uploadPartRequest)
+ c.Assert(err, IsNil)
+
+ body, err = io.ReadAll(uploadPartResp.Body)
+ c.Assert(err, IsNil)
+ fmt.Println(string(body))
+
+ uploadPartXml := struct {
+ ETag string `xml:"ETag"`
+ }{}
+
+ err = xml.Unmarshal(body, &uploadPartXml)
+ c.Assert(err, IsNil)
+
+ etag := uploadPartXml.ETag
+
+ // 生成complete外链
+ completeUrl, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
+ HTTPMethod: s3.POST,
+ Bucket: aws.String(bucket),
+ Key: aws.String(object + "copy"),
+ Expires: 3600,
+ Parameters: map[string]*string{
+ "uploadId": aws.String(initXml.UploadId),
+ },
+ })
+ c.Assert(err, IsNil)
+ fmt.Println(completeUrl)
+
+ completeParts := `
+
+ 1
+ ` + etag + `
+
+ `
+
+ fmt.Println(completeParts)
+
+ completeRequest, err := http.NewRequest("POST", completeUrl, strings.NewReader(completeParts))
+ c.Assert(err, IsNil)
+
+ completeResp, err := http.DefaultClient.Do(completeRequest)
+ c.Assert(err, IsNil)
+
+ body, err = io.ReadAll(completeResp.Body)
+ c.Assert(err, IsNil)
+
+ fmt.Println(string(body))
+
+ // 获取head外链
+ headUrl, err := client.GeneratePresignedUrl(&s3.GeneratePresignedUrlInput{
+ HTTPMethod: s3.HEAD,
+ Bucket: aws.String(bucket),
+ Key: aws.String(object + "copy"),
+ Expires: 3600,
+ })
+ c.Assert(err, IsNil)
+ fmt.Println(headUrl)
+
+ headRequest, err := http.NewRequest("HEAD", headUrl, nil)
+ c.Assert(err, IsNil)
+
+ headResp, err := http.DefaultClient.Do(headRequest)
+ c.Assert(err, IsNil)
+ c.Assert(headResp.StatusCode, Equals, 200)
+
+ os.Remove(object)
+}
diff --git a/test/objectwithcontext_test.go b/test/objectwithcontext_test.go
index ca00570..66452c8 100644
--- a/test/objectwithcontext_test.go
+++ b/test/objectwithcontext_test.go
@@ -667,10 +667,9 @@ func (s *Ks3utilCommandSuite) TestCreateMultipartUploadWithContext(c *C) {
c.Assert(err, IsNil)
// 获取分块上传Id
uploadId := *initRet.UploadID
- var i int64 = 1
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
// 缓冲区,分块大小为5MB
buffer := make([]byte, 5*1024*1024)
for {
@@ -684,14 +683,13 @@ func (s *Ks3utilCommandSuite) TestCreateMultipartUploadWithContext(c *C) {
resp, err := client.UploadPartWithContext(context.Background(), &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
})
c.Assert(err, IsNil)
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
// complete
@@ -726,10 +724,9 @@ func (s *Ks3utilCommandSuite) TestUploadPartWithContext(c *C) {
c.Assert(err, IsNil)
// 获取分块上传Id
uploadId := *initRet.UploadID
- var i int64 = 1
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
// 缓冲区,分块大小为5MB
buffer := make([]byte, 5*1024*1024)
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Millisecond*500)
@@ -745,7 +742,7 @@ func (s *Ks3utilCommandSuite) TestUploadPartWithContext(c *C) {
_, err := client.UploadPartWithContext(ctx, &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
})
@@ -765,14 +762,13 @@ func (s *Ks3utilCommandSuite) TestUploadPartWithContext(c *C) {
resp, err := client.UploadPartWithContext(context.Background(), &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
})
c.Assert(err, IsNil)
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
// complete
@@ -807,10 +803,9 @@ func (s *Ks3utilCommandSuite) TestCompleteMultipartUploadWithContext(c *C) {
c.Assert(err, IsNil)
// 获取分块上传Id
uploadId := *initRet.UploadID
- var i int64 = 1
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
// 缓冲区,分块大小为5MB
buffer := make([]byte, 5*1024*1024)
for {
@@ -824,14 +819,13 @@ func (s *Ks3utilCommandSuite) TestCompleteMultipartUploadWithContext(c *C) {
resp, err := client.UploadPartWithContext(context.Background(), &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
})
c.Assert(err, IsNil)
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
// complete,通过context取消
@@ -889,10 +883,9 @@ func (s *Ks3utilCommandSuite) TestAbortMultipartUploadWithContext(c *C) {
c.Assert(err, IsNil)
// 获取分块上传Id
uploadId := *initRet.UploadID
- var i int64 = 1
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
// 缓冲区,分块大小为5MB
buffer := make([]byte, 5*1024*1024)
for {
@@ -906,14 +899,13 @@ func (s *Ks3utilCommandSuite) TestAbortMultipartUploadWithContext(c *C) {
resp, err := client.UploadPartWithContext(context.Background(), &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
})
c.Assert(err, IsNil)
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
// abort,通过context取消
@@ -959,10 +951,9 @@ func (s *Ks3utilCommandSuite) TestListPartsWithContext(c *C) {
c.Assert(err, IsNil)
// 获取分块上传Id
uploadId := *initRet.UploadID
- var i int64 = 1
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
// 缓冲区,分块大小为5MB
buffer := make([]byte, 5*1024*1024)
for {
@@ -976,14 +967,13 @@ func (s *Ks3utilCommandSuite) TestListPartsWithContext(c *C) {
resp, err := client.UploadPartWithContext(context.Background(), &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
})
c.Assert(err, IsNil)
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
// list,通过context取消
@@ -1026,10 +1016,9 @@ func (s *Ks3utilCommandSuite) TestListMultipartUploadsWithContext(c *C) {
c.Assert(err, IsNil)
// 获取分块上传Id
uploadId := *initRet.UploadID
- var i int64 = 1
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
// 缓冲区,分块大小为5MB
buffer := make([]byte, 5*1024*1024)
for {
@@ -1043,14 +1032,13 @@ func (s *Ks3utilCommandSuite) TestListMultipartUploadsWithContext(c *C) {
resp, err := client.UploadPartWithContext(context.Background(), &s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
UploadID: aws.String(uploadId),
Body: bytes.NewReader(buffer[:n]),
})
c.Assert(err, IsNil)
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.ETag})
+ partNum++
}
}
// list mul,通过context取消
@@ -1101,11 +1089,10 @@ func (s *Ks3utilCommandSuite) TestPartWithContext(c *C) {
c.Assert(err, IsNil)
c.Assert(*headObjectResp.StatusCode, Equals, int64(200))
contentLength := *headObjectResp.ContentLength
- partSize := int64(5 * 1024 * 1024)
- var i int64 = 1
+ var partSize int64 = 5 * 1024 * 1024
+ var partNum int64 = 1
// 待合并分块
- compParts := []*s3.CompletedPart{}
- partsNum := []int64{0}
+ var compParts []*s3.CompletedPart
var start int64 = 0
var end int64 = 0
dstObject := randLowStr(10)
@@ -1134,14 +1121,14 @@ func (s *Ks3utilCommandSuite) TestPartWithContext(c *C) {
Key: aws.String(dstObject),
CopySource: aws.String("/" + bucket + "/" + srcObject),
UploadID: aws.String(uploadId),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
CopySourceRange: aws.String("bytes=" + strconv.FormatInt(start, 10) + "-" + strconv.FormatInt(end-1, 10)),
})
c.Assert(err, NotNil)
- i++
+ partNum++
start = end
}
- i = 1
+ partNum = 1
start = 0
end = 0
for {
@@ -1159,13 +1146,12 @@ func (s *Ks3utilCommandSuite) TestPartWithContext(c *C) {
Key: aws.String(dstObject),
CopySource: aws.String("/" + bucket + "/" + srcObject),
UploadID: aws.String(uploadId),
- PartNumber: aws.Long(i),
+ PartNumber: aws.Long(partNum),
CopySourceRange: aws.String("bytes=" + strconv.FormatInt(start, 10) + "-" + strconv.FormatInt(end-1, 10)),
})
c.Assert(err, IsNil)
- partsNum = append(partsNum, i)
- compParts = append(compParts, &s3.CompletedPart{PartNumber: &partsNum[i], ETag: resp.CopyPartResult.ETag})
- i++
+ compParts = append(compParts, &s3.CompletedPart{PartNumber: aws.Long(partNum), ETag: resp.CopyPartResult.ETag})
+ partNum++
start = end
}
// complete