Browse Source

CreateBucket support DataRedundancyType

taowei.wtw 6 years ago
parent
commit
cee409f5b4
10 changed files with 124 additions and 34 deletions
  1. 5 0
      oss/bucket.go
  2. 27 10
      oss/client.go
  3. 60 7
      oss/client_test.go
  4. 0 4
      oss/conn.go
  5. 11 0
      oss/const.go
  6. 2 2
      oss/download.go
  7. 1 1
      oss/download_test.go
  8. 4 4
      oss/multicopy_test.go
  9. 6 0
      oss/option.go
  10. 8 6
      oss/type.go

+ 5 - 0
oss/bucket.go

@@ -1141,6 +1141,11 @@ func (bucket Bucket) do(method, objectName string, params map[string]interface{}
 		return nil, err
 		return nil, err
 	}
 	}
 
 
+	err = CheckBucketName(bucket.BucketName)
+	if len(bucket.BucketName) > 0 && err != nil {
+		return nil, err
+	}
+
 	resp, err := bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
 	resp, err := bucket.Client.Conn.Do(method, bucket.BucketName, objectName,
 		params, headers, data, 0, listener)
 		params, headers, data, 0, listener)
 
 

+ 27 - 10
oss/client.go

@@ -82,6 +82,11 @@ func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption)
 // error    it's nil if no error, otherwise it's an error object.
 // error    it's nil if no error, otherwise it's an error object.
 //
 //
 func (client Client) Bucket(bucketName string) (*Bucket, error) {
 func (client Client) Bucket(bucketName string) (*Bucket, error) {
+	err := CheckBucketName(bucketName)
+	if err != nil {
+		return nil, err
+	}
+
 	return &Bucket{
 	return &Bucket{
 		client,
 		client,
 		bucketName,
 		bucketName,
@@ -103,18 +108,26 @@ func (client Client) CreateBucket(bucketName string, options ...Option) error {
 
 
 	buffer := new(bytes.Buffer)
 	buffer := new(bytes.Buffer)
 
 
-	isOptSet, val, _ := isOptionSet(options, storageClass)
-	if isOptSet {
-		cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)}
-		bs, err := xml.Marshal(cbConfig)
-		if err != nil {
-			return err
-		}
-		buffer.Write(bs)
+	var cbConfig createBucketConfiguration
+	cbConfig.StorageClass = StorageStandard
+
+	isStorageSet, valStroage, _ := isOptionSet(options, storageClass)
+	isRedundancySet, valRedundancy, _ := isOptionSet(options, redundancyType)
+	if isStorageSet {
+		cbConfig.StorageClass = valStroage.(StorageClassType)
+	}
+
+	if isRedundancySet {
+		cbConfig.DataRedundancyType = valRedundancy.(DataRedundancyType)
+	}
 
 
-		contentType := http.DetectContentType(buffer.Bytes())
-		headers[HTTPHeaderContentType] = contentType
+	bs, err := xml.Marshal(cbConfig)
+	if err != nil {
+		return err
 	}
 	}
+	buffer.Write(bs)
+	contentType := http.DetectContentType(buffer.Bytes())
+	headers[HTTPHeaderContentType] = contentType
 
 
 	params := map[string]interface{}{}
 	params := map[string]interface{}{}
 	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
 	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
@@ -1296,6 +1309,10 @@ func SetLocalAddr(localAddr net.Addr) ClientOption {
 // Private
 // Private
 func (client Client) do(method, bucketName string, params map[string]interface{},
 func (client Client) do(method, bucketName string, params map[string]interface{},
 	headers map[string]string, data io.Reader, options ...Option) (*Response, error) {
 	headers map[string]string, data io.Reader, options ...Option) (*Response, error) {
+	err := CheckBucketName(bucketName)
+	if len(bucketName) > 0 && err != nil {
+		return nil, err
+	}
 
 
 	resp, err := client.Conn.Do(method, bucketName, "", params, headers, data, 0, nil)
 	resp, err := client.Conn.Do(method, bucketName, "", params, headers, data, 0, nil)
 
 

+ 60 - 7
oss/client_test.go

@@ -334,6 +334,49 @@ func (s *OssClientSuite) TestCreateBucket(c *C) {
 	}
 	}
 }
 }
 
 
+func (s *OssClientSuite) TestCreateBucketRedundancyType(c *C) {
+	bucketNameTest := bucketNamePrefix + randLowStr(6)
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	// CreateBucket creates without property
+	err = client.CreateBucket(bucketNameTest)
+	c.Assert(err, IsNil)
+	client.DeleteBucket(bucketNameTest)
+	time.Sleep(timeoutInOperation)
+
+	// CreateBucket creates with RedundancyZRS
+	err = client.CreateBucket(bucketNameTest, RedundancyType(RedundancyZRS))
+	c.Assert(err, IsNil)
+
+	res, err := client.GetBucketInfo(bucketNameTest)
+	c.Assert(err, IsNil)
+	c.Assert(res.BucketInfo.RedundancyType, Equals, string(RedundancyZRS))
+	client.DeleteBucket(bucketNameTest)
+	time.Sleep(timeoutInOperation)
+
+	// CreateBucket creates with RedundancyLRS
+	err = client.CreateBucket(bucketNameTest, RedundancyType(RedundancyLRS))
+	c.Assert(err, IsNil)
+
+	res, err = client.GetBucketInfo(bucketNameTest)
+	c.Assert(err, IsNil)
+	c.Assert(res.BucketInfo.RedundancyType, Equals, string(RedundancyLRS))
+	c.Assert(res.BucketInfo.StorageClass, Equals, string(StorageStandard))
+	client.DeleteBucket(bucketNameTest)
+	time.Sleep(timeoutInOperation)
+
+	// CreateBucket creates with ACLPublicRead RedundancyZRS
+	err = client.CreateBucket(bucketNameTest, ACL(ACLPublicRead), RedundancyType(RedundancyZRS))
+	c.Assert(err, IsNil)
+
+	res, err = client.GetBucketInfo(bucketNameTest)
+	c.Assert(err, IsNil)
+	c.Assert(res.BucketInfo.RedundancyType, Equals, string(RedundancyZRS))
+	c.Assert(res.BucketInfo.ACL, Equals, string(ACLPublicRead))
+	client.DeleteBucket(bucketNameTest)
+}
+
 // TestCreateBucketNegative
 // TestCreateBucketNegative
 func (s *OssClientSuite) TestCreateBucketNegative(c *C) {
 func (s *OssClientSuite) TestCreateBucketNegative(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	client, err := New(endpoint, accessID, accessKey)
@@ -944,10 +987,10 @@ func (s *OssClientSuite) TestSetBucketLifecycleAboutVersionObject(c *C) {
 	}
 	}
 
 
 	rule := LifecycleRule{
 	rule := LifecycleRule{
-        Status:            "Enabled",
-		Expiration:        &expiration,
-		VersionExpiration: &versionExpiration,
-		VersionTransition: &versionTransition,
+		Status:               "Enabled",
+		Expiration:           &expiration,
+		NonVersionExpiration: &versionExpiration,
+		NonVersionTransition: &versionTransition,
 	}
 	}
 	rules := []LifecycleRule{rule}
 	rules := []LifecycleRule{rule}
 
 
@@ -962,9 +1005,9 @@ func (s *OssClientSuite) TestSetBucketLifecycleAboutVersionObject(c *C) {
 	c.Assert(res.Rules[0].Expiration.Date, Equals, "")
 	c.Assert(res.Rules[0].Expiration.Date, Equals, "")
 	c.Assert(*(res.Rules[0].Expiration.ExpiredObjectDeleteMarker), Equals, true)
 	c.Assert(*(res.Rules[0].Expiration.ExpiredObjectDeleteMarker), Equals, true)
 
 
-	c.Assert(res.Rules[0].VersionExpiration.NoncurrentDays, Equals, 20)
-	c.Assert(res.Rules[0].VersionTransition.NoncurrentDays, Equals, 10)
-	c.Assert(res.Rules[0].VersionTransition.StorageClass, Equals, StorageClassType("IA"))
+	c.Assert(res.Rules[0].NonVersionExpiration.NoncurrentDays, Equals, 20)
+	c.Assert(res.Rules[0].NonVersionTransition.NoncurrentDays, Equals, 10)
+	c.Assert(res.Rules[0].NonVersionTransition.StorageClass, Equals, StorageClassType("IA"))
 
 
 	err = client.DeleteBucket(bucketNameTest)
 	err = client.DeleteBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 	c.Assert(err, IsNil)
@@ -2916,3 +2959,13 @@ func (s *OssClientSuite) TestClientProcessEndpointError(c *C) {
 	err = client.CreateBucket(bucketNameTest)
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 }
 }
+
+// TestClientBucketError
+func (s *OssClientSuite) TestClientBucketError(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	bucketName := "-" + randLowStr(5)
+	_, err = client.Bucket(bucketName)
+	c.Assert(err, NotNil)
+}

+ 0 - 4
oss/conn.go

@@ -78,10 +78,6 @@ func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client)
 // Do sends request and returns the response
 // Do sends request and returns the response
 func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
 func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
 	data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
 	data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
-	err := CheckBucketName(bucketName)
-	if len(bucketName) > 0 && err != nil {
-		return nil, err
-	}
 	urlParams := conn.getURLParams(params)
 	urlParams := conn.getURLParams(params)
 	subResource := conn.getSubResource(params)
 	subResource := conn.getSubResource(params)
 	uri := conn.url.getURL(bucketName, objectName, urlParams)
 	uri := conn.url.getURL(bucketName, objectName, urlParams)

+ 11 - 0
oss/const.go

@@ -74,6 +74,17 @@ const (
 	StorageArchive StorageClassType = "Archive"
 	StorageArchive StorageClassType = "Archive"
 )
 )
 
 
+// RedundancyType bucket data Redundancy type
+type DataRedundancyType string
+
+const (
+	// RedundancyLRS Local redundancy, default value
+	RedundancyLRS DataRedundancyType = "LRS"
+
+	// RedundancyZRS Same city redundancy
+	RedundancyZRS DataRedundancyType = "ZRS"
+)
+
 // PayerType the type of request payer
 // PayerType the type of request payer
 type PayerType string
 type PayerType string
 
 

+ 2 - 2
oss/download.go

@@ -45,7 +45,7 @@ func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, op
 	}
 	}
 
 
 	if cpConf != nil && cpConf.IsEnable {
 	if cpConf != nil && cpConf.IsEnable {
-		cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, filePath, strVersionId)
+		cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath)
 		if cpFilePath != "" {
 		if cpFilePath != "" {
 			return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
 			return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
 		}
 		}
@@ -54,7 +54,7 @@ func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, op
 	return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
 	return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
 }
 }
 
 
-func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destFile, versionId string) string {
+func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string {
 	if cpConf.FilePath == "" && cpConf.DirPath != "" {
 	if cpConf.FilePath == "" && cpConf.DirPath != "" {
 		src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
 		src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
 		absPath, _ := filepath.Abs(destFile)
 		absPath, _ := filepath.Abs(destFile)

+ 1 - 1
oss/download_test.go

@@ -216,7 +216,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	// Check
 	// Check
 	dcp = downloadCheckpoint{}
 	dcp = downloadCheckpoint{}
 	cpConf := cpConfig{IsEnable: true, DirPath: "./"}
 	cpConf := cpConfig{IsEnable: true, DirPath: "./"}
-	cpFilePath := getDownloadCpFilePath(&cpConf, s.bucket.BucketName, objectName, newFile, "")
+	cpFilePath := getDownloadCpFilePath(&cpConf, s.bucket.BucketName, objectName, "",newFile)
 	err = dcp.load(cpFilePath)
 	err = dcp.load(cpFilePath)
 	c.Assert(err, IsNil)
 	c.Assert(err, IsNil)
 	c.Assert(dcp.Magic, Equals, downloadCpMagic)
 	c.Assert(dcp.Magic, Equals, downloadCpMagic)

+ 4 - 4
oss/multicopy_test.go

@@ -239,11 +239,11 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecoveryNegative(c *C) {
 	copyPartHooker = defaultCopyPartHook
 	copyPartHooker = defaultCopyPartHook
 
 
 	// Source bucket does not exist
 	// Source bucket does not exist
-	err = s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Routines(2))
+	err = s.bucket.CopyFile("notexist", srcObjectName, destObjectName, 100*1024, Routines(2))
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 
 
 	// Target object does not exist
 	// Target object does not exist
-	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2))
+	err = s.bucket.CopyFile(bucketName, "notexist", destObjectName, 100*1024, Routines(2))
 
 
 	// The part size is invalid
 	// The part size is invalid
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Routines(2))
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Routines(2))
@@ -429,12 +429,12 @@ func (s *OssCopySuite) TestCopyRoutineWithRecoveryNegative(c *C) {
 	destObjectName := srcObjectName + "-dest"
 	destObjectName := srcObjectName + "-dest"
 
 
 	// Source bucket does not exist
 	// Source bucket does not exist
-	err := s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, destObjectName+".cp"))
+	err := s.bucket.CopyFile("notexist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 
 
 	// Source object does not exist
 	// Source object does not exist
-	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2), Checkpoint(true, destObjectName+".cp"))
+	err = s.bucket.CopyFile(bucketName, "notexist", destObjectName, 100*1024, Routines(2), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 
 
 	// Specify part size is invalid.
 	// Specify part size is invalid.

+ 6 - 0
oss/option.go

@@ -25,6 +25,7 @@ const (
 	progressListener   = "x-progress-listener"
 	progressListener   = "x-progress-listener"
 	storageClass       = "storage-class"
 	storageClass       = "storage-class"
 	responseHeader     = "x-response-header"
 	responseHeader     = "x-response-header"
+	redundancyType     = "redundancy-type"
 )
 )
 
 
 type (
 type (
@@ -325,6 +326,11 @@ func StorageClass(value StorageClassType) Option {
 	return addArg(storageClass, value)
 	return addArg(storageClass, value)
 }
 }
 
 
+// RedundancyType bucket data redundancy type
+func RedundancyType(value DataRedundancyType) Option {
+	return addArg(redundancyType, value)
+}
+
 // Checkpoint configuration
 // Checkpoint configuration
 type cpConfig struct {
 type cpConfig struct {
 	IsEnable bool
 	IsEnable bool

+ 8 - 6
oss/type.go

@@ -52,8 +52,8 @@ type LifecycleRule struct {
 	Expiration           *LifecycleExpiration           `xml:"Expiration,omitempty"`           // The expiration property
 	Expiration           *LifecycleExpiration           `xml:"Expiration,omitempty"`           // The expiration property
 	Transitions          []LifecycleTransition          `xml:"Transition,omitempty"`           // The transition property
 	Transitions          []LifecycleTransition          `xml:"Transition,omitempty"`           // The transition property
 	AbortMultipartUpload *LifecycleAbortMultipartUpload `xml:"AbortMultipartUpload,omitempty"` // The AbortMultipartUpload property
 	AbortMultipartUpload *LifecycleAbortMultipartUpload `xml:"AbortMultipartUpload,omitempty"` // The AbortMultipartUpload property
-	VersionExpiration    *LifecycleVersionExpiration    `xml:"NoncurrentVersionExpiration,omitempty"`
-	VersionTransition    *LifecycleVersionTransition    `xml:"NoncurrentVersionTransition,omitempty"`
+	NonVersionExpiration *LifecycleVersionExpiration    `xml:"NoncurrentVersionExpiration,omitempty"`
+	NonVersionTransition *LifecycleVersionTransition    `xml:"NoncurrentVersionTransition,omitempty"`
 }
 }
 
 
 // LifecycleExpiration defines the rule's expiration property
 // LifecycleExpiration defines the rule's expiration property
@@ -147,8 +147,8 @@ func verifyLifecycleRules(rules []LifecycleRule) error {
 					return fmt.Errorf("invalid transition lifecylce, the value of storage class must be IA or Archive")
 					return fmt.Errorf("invalid transition lifecylce, the value of storage class must be IA or Archive")
 				}
 				}
 			}
 			}
-		} else if rule.Expiration == nil && abortMPU == nil && rule.VersionExpiration == nil && rule.VersionTransition == nil {
-			return fmt.Errorf("invalid rule, must set one of Expiration, AbortMultipartUplaod, VersionExpiration, VersionTransition and Transitions")
+		} else if rule.Expiration == nil && abortMPU == nil && rule.NonVersionExpiration == nil && rule.NonVersionTransition == nil {
+			return fmt.Errorf("invalid rule, must set one of Expiration, AbortMultipartUplaod, NonVersionExpiration, NonVersionTransition and Transitions")
 		}
 		}
 	}
 	}
 
 
@@ -302,6 +302,7 @@ type BucketInfo struct {
 	ExtranetEndpoint string    `xml:"ExtranetEndpoint"`         // Bucket external endpoint
 	ExtranetEndpoint string    `xml:"ExtranetEndpoint"`         // Bucket external endpoint
 	IntranetEndpoint string    `xml:"IntranetEndpoint"`         // Bucket internal endpoint
 	IntranetEndpoint string    `xml:"IntranetEndpoint"`         // Bucket internal endpoint
 	ACL              string    `xml:"AccessControlList>Grant"`  // Bucket ACL
 	ACL              string    `xml:"AccessControlList>Grant"`  // Bucket ACL
+	RedundancyType   string    `xml:"DataRedundancyType"`       // Bucket DataRedundancyType
 	Owner            Owner     `xml:"Owner"`                    // Bucket owner
 	Owner            Owner     `xml:"Owner"`                    // Bucket owner
 	StorageClass     string    `xml:"StorageClass"`             // Bucket storage class
 	StorageClass     string    `xml:"StorageClass"`             // Bucket storage class
 	SseRule          SSERule   `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule
 	SseRule          SSERule   `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule
@@ -692,8 +693,9 @@ func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
 
 
 // createBucketConfiguration defines the configuration for creating a bucket.
 // createBucketConfiguration defines the configuration for creating a bucket.
 type createBucketConfiguration struct {
 type createBucketConfiguration struct {
-	XMLName      xml.Name         `xml:"CreateBucketConfiguration"`
-	StorageClass StorageClassType `xml:"StorageClass,omitempty"`
+	XMLName            xml.Name           `xml:"CreateBucketConfiguration"`
+	StorageClass       StorageClassType   `xml:"StorageClass,omitempty"`
+	DataRedundancyType DataRedundancyType `xml:"DataRedundancyType,omitempty"`
 }
 }
 
 
 // LiveChannelConfiguration defines the configuration for live-channel
 // LiveChannelConfiguration defines the configuration for live-channel