hangzws 6 лет назад
Родитель
Сommit
8c6b6db291

+ 2 - 5
oss/bucket_test.go

@@ -46,8 +46,6 @@ func (s *OssBucketSuite) SetUpSuite(c *C) {
 	err = s.client.CreateBucket(archiveBucketName, StorageClass(StorageArchive))
 	c.Assert(err, IsNil)
 
-	// time.Sleep(timeoutInOperation)
-
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
 	s.bucket = bucket
@@ -605,7 +603,6 @@ func (s *OssBucketSuite) TestPutObjectType(c *C) {
 	c.Assert(err, IsNil)
 
 	// Check
-	// time.Sleep(timeoutInOperation)
 	body, err := s.bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 	str, err := readBody(body)
@@ -2754,7 +2751,7 @@ func (s *OssBucketSuite) TestUploadObjectWithWebpFormat(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
-	bucketName := bucketNamePrefix + randLowStr(5)
+	bucketName := bucketNamePrefix + randLowStr(6)
 	err = client.CreateBucket(bucketName)
 	c.Assert(err, IsNil)
 
@@ -2763,7 +2760,7 @@ func (s *OssBucketSuite) TestUploadObjectWithWebpFormat(c *C) {
 
 	// create webp file
 	textBuffer := randStr(1024)
-	objectName := objectNamePrefix + getUuid()
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "." + string(os.PathSeparator) + objectName + ".webp"
 	ioutil.WriteFile(fileName, []byte(textBuffer), 0644)
 	_, err = os.Stat(fileName)

+ 63 - 11
oss/client_test.go

@@ -179,7 +179,7 @@ func (s *OssClientSuite) TestCreateBucket(c *C) {
 	client.DeleteBucket(bucketNameTest)
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, IsNil)
-	//sleep 5 seconds after create bucket
+	//sleep 3 seconds after create bucket
 	time.Sleep(timeoutInOperation)
 
 	// verify bucket is exist
@@ -501,8 +501,6 @@ func (s *OssClientSuite) TestSetBucketAcl(c *C) {
 	// Set ACL_PUBLIC_RW
 	err = client.SetBucketACL(bucketNameTest, ACLPrivate)
 	c.Assert(err, IsNil)
-	err = client.SetBucketACL(bucketNameTest, ACLPrivate)
-	c.Assert(err, IsNil)
 	time.Sleep(timeoutInOperation)
 
 	res, err = client.GetBucketACL(bucketNameTest)
@@ -614,15 +612,14 @@ func (s *OssClientSuite) TestGetBucketLocationNegative(c *C) {
 // TestSetBucketLifecycle
 func (s *OssClientSuite) TestSetBucketLifecycle(c *C) {
 	var bucketNameTest = bucketNamePrefix + randLowStr(6)
-	var rule1 = BuildLifecycleRuleByDate("idone", "one", true, 2015, 11, 11)
-	var rule2 = BuildLifecycleRuleByDays("idtwo", "two", true, 3)
+	var rule1 = BuildLifecycleRuleByDate("rule1", "one", true, 2015, 11, 11)
+	var rule2 = BuildLifecycleRuleByDays("rule2", "two", true, 3)
 
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, IsNil)
-	time.Sleep(timeoutInOperation)
 
 	// Set single rule
 	var rules = []LifecycleRule{rule1}
@@ -635,7 +632,7 @@ func (s *OssClientSuite) TestSetBucketLifecycle(c *C) {
 	res, err := client.GetBucketLifecycle(bucketNameTest)
 	c.Assert(err, IsNil)
 	c.Assert(len(res.Rules), Equals, 1)
-	c.Assert(res.Rules[0].ID, Equals, "idone")
+	c.Assert(res.Rules[0].ID, Equals, "rule1")
 
 	err = client.DeleteBucketLifecycle(bucketNameTest)
 	c.Assert(err, IsNil)
@@ -651,8 +648,63 @@ func (s *OssClientSuite) TestSetBucketLifecycle(c *C) {
 	res, err = client.GetBucketLifecycle(bucketNameTest)
 	c.Assert(err, IsNil)
 	c.Assert(len(res.Rules), Equals, 2)
-	c.Assert(res.Rules[0].ID, Equals, "idone")
-	c.Assert(res.Rules[1].ID, Equals, "idtwo")
+	c.Assert(res.Rules[0].ID, Equals, "rule1")
+	c.Assert(res.Rules[1].ID, Equals, "rule2")
+
+	err = client.DeleteBucket(bucketNameTest)
+	c.Assert(err, IsNil)
+}
+
+// TestSetBucketLifecycle
+func (s *OssClientSuite) TestSetBucketLifecycleNew(c *C) {
+	var bucketNameTest = bucketNamePrefix + randLowStr(6)
+	rule1, err := NewLifecycleRuleByCreateBeforeDate("rule1", "one", true, 2015, 11, 11, LRTExpriration)
+	c.Assert(err, IsNil)
+	rule2, err := NewLifecycleRuleByDays("rule2", "two", true, 3, LRTAbortMultiPartUpload)
+	c.Assert(err, IsNil)
+	rule3, err := NewLifecycleRuleByDays("rule3", "three", true, 3, LRTTransition, StorageIA)
+	c.Assert(err, IsNil)
+
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	err = client.CreateBucket(bucketNameTest)
+	c.Assert(err, IsNil)
+
+	// Set single rule
+	var rules = []LifecycleRule{*rule1}
+	err = client.SetBucketLifecycle(bucketNameTest, rules)
+	c.Assert(err, IsNil)
+	// Double set rule
+	err = client.SetBucketLifecycle(bucketNameTest, rules)
+	c.Assert(err, IsNil)
+
+	res, err := client.GetBucketLifecycle(bucketNameTest)
+	c.Assert(err, IsNil)
+	c.Assert(len(res.Rules), Equals, 1)
+	c.Assert(res.Rules[0].ID, Equals, "rule1")
+	c.Assert(res.Rules[0].Expiration, NotNil)
+
+	err = client.DeleteBucketLifecycle(bucketNameTest)
+	c.Assert(err, IsNil)
+
+	// Set two rules
+	rules = []LifecycleRule{*rule1, *rule2, *rule3}
+	err = client.SetBucketLifecycle(bucketNameTest, rules)
+	c.Assert(err, IsNil)
+
+	// Eliminate effect of cache
+	time.Sleep(timeoutInOperation)
+
+	res, err = client.GetBucketLifecycle(bucketNameTest)
+	c.Assert(err, IsNil)
+	c.Assert(len(res.Rules), Equals, 3)
+	c.Assert(res.Rules[0].ID, Equals, "rule1")
+	c.Assert(res.Rules[0].Expiration, NotNil)
+	c.Assert(res.Rules[1].ID, Equals, "rule2")
+	c.Assert(res.Rules[1].AbortMultipartUpload, NotNil)
+	c.Assert(res.Rules[2].ID, Equals, "rule3")
+	c.Assert(res.Rules[2].Transition, NotNil)
 
 	err = client.DeleteBucket(bucketNameTest)
 	c.Assert(err, IsNil)
@@ -662,8 +714,8 @@ func (s *OssClientSuite) TestSetBucketLifecycle(c *C) {
 func (s *OssClientSuite) TestDeleteBucketLifecycle(c *C) {
 	var bucketNameTest = bucketNamePrefix + randLowStr(6)
 
-	var rule1 = BuildLifecycleRuleByDate("idone", "one", true, 2015, 11, 11)
-	var rule2 = BuildLifecycleRuleByDays("idtwo", "two", true, 3)
+	var rule1 = BuildLifecycleRuleByDate("rule1", "one", true, 2015, 11, 11)
+	var rule2 = BuildLifecycleRuleByDays("rule2", "two", true, 3)
 	var rules = []LifecycleRule{rule1, rule2}
 
 	client, err := New(endpoint, accessID, accessKey)

+ 0 - 1
oss/crc_test.go

@@ -27,7 +27,6 @@ func (s *OssCrcSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	// time.Sleep(timeoutInOperation)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)

+ 0 - 1
oss/download_test.go

@@ -23,7 +23,6 @@ func (s *OssDownloadSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	// time.Sleep(timeoutInOperation)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)

+ 0 - 1
oss/multicopy_test.go

@@ -22,7 +22,6 @@ func (s *OssCopySuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	// time.Sleep(timeoutInOperation)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)

+ 16 - 18
oss/multipart_test.go

@@ -7,7 +7,6 @@ import (
 	"net/http"
 	"os"
 	"strconv"
-	"time"
 
 	. "gopkg.in/check.v1"
 )
@@ -26,7 +25,6 @@ func (s *OssBucketMultipartSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	time.Sleep(timeoutInOperation)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
@@ -220,7 +218,7 @@ func (s *OssBucketMultipartSuite) TestMultipartUploadFromFile(c *C) {
 // TestUploadPartCopy
 func (s *OssBucketMultipartSuite) TestUploadPartCopy(c *C) {
 	objectSrc := objectNamePrefix + randStr(8) + "-src"
-	objectDest := objectNamePrefix + randStr(8) + "-desc"
+	objectDest := objectNamePrefix + randStr(8) + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
@@ -265,7 +263,7 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopy(c *C) {
 func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 	objectName := objectNamePrefix + randStr(8)
 	objectSrc := objectName + "-src"
-	objectDesc := objectName + "-desc"
+	objectDest := objectName + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartSize(fileName, 100*1024)
@@ -285,7 +283,7 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 	}
 
 	// Copy
-	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDest)
 	var partsCopy []UploadPart
 	for _, chunk := range chunks {
 		part, err := s.bucket.UploadPartCopy(imurCopy, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
@@ -315,14 +313,14 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 	c.Assert(err, IsNil)
 
 	// Download
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic3.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic3.jpg")
 	c.Assert(err, IsNil)
 	err = s.bucket.GetObjectToFile(objectName, "newpic4.jpg")
 	c.Assert(err, IsNil)
 
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
-	err = s.bucket.DeleteObject(objectDesc)
+	err = s.bucket.DeleteObject(objectDest)
 	c.Assert(err, IsNil)
 	err = s.bucket.DeleteObject(objectSrc)
 	c.Assert(err, IsNil)
@@ -331,7 +329,7 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 	objectName := objectNamePrefix + randStr(8)
 	objectSrc := objectName + "-src"
-	objectDesc := objectName + "-desc"
+	objectDest := objectName + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartSize(fileName, 100*1024)
@@ -351,7 +349,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 	}
 
 	// Copy
-	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDest)
 	var partsCopy []UploadPart
 	for _, chunk := range chunks {
 		part, err := s.bucket.UploadPartCopy(imurCopy, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
@@ -387,7 +385,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 	c.Assert(len(lmur.Uploads), Equals, 0)
 
 	// Download
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic3.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic3.jpg")
 	c.Assert(err, NotNil)
 	err = s.bucket.GetObjectToFile(objectName, "newpic4.jpg")
 	c.Assert(err, NotNil)
@@ -396,7 +394,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 // TestUploadPartCopyWithConstraints
 func (s *OssBucketMultipartSuite) TestUploadPartCopyWithConstraints(c *C) {
 	objectSrc := objectNamePrefix + randStr(8) + "-src"
-	objectDesc := objectNamePrefix + randStr(8) + "-desc"
+	objectDest := objectNamePrefix + randStr(8) + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
@@ -406,7 +404,7 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyWithConstraints(c *C) {
 	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
 	c.Assert(err, IsNil)
 
-	imur, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	imur, err := s.bucket.InitiateMultipartUpload(objectDest)
 	var parts []UploadPart
 	for _, chunk := range chunks {
 		_, err = s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number),
@@ -441,12 +439,12 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyWithConstraints(c *C) {
 	c.Assert(err, IsNil)
 	testLogger.Println("cmur:", cmur)
 
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic5.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic5.jpg")
 	c.Assert(err, IsNil)
 
 	err = s.bucket.DeleteObject(objectSrc)
 	c.Assert(err, IsNil)
-	err = s.bucket.DeleteObject(objectDesc)
+	err = s.bucket.DeleteObject(objectDest)
 	c.Assert(err, IsNil)
 }
 
@@ -487,7 +485,7 @@ func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileOutofOrder(c *C) {
 // TestUploadPartCopyOutofOrder
 func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
 	objectSrc := objectNamePrefix + randStr(8) + "-src"
-	objectDesc := objectNamePrefix + randStr(8) + "-desc"
+	objectDest := objectNamePrefix + randStr(8) + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartSize(fileName, 1024*100)
@@ -498,7 +496,7 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
 	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
 	c.Assert(err, IsNil)
 
-	imur, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	imur, err := s.bucket.InitiateMultipartUpload(objectDest)
 	var parts []UploadPart
 	for _, chunk := range chunks {
 		_, err := s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
@@ -515,12 +513,12 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
 	c.Assert(err, IsNil)
 	testLogger.Println("cmur:", cmur)
 
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic7.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic7.jpg")
 	c.Assert(err, IsNil)
 
 	err = s.bucket.DeleteObject(objectSrc)
 	c.Assert(err, IsNil)
-	err = s.bucket.DeleteObject(objectDesc)
+	err = s.bucket.DeleteObject(objectDest)
 	c.Assert(err, IsNil)
 }
 

+ 0 - 1
oss/progress_test.go

@@ -26,7 +26,6 @@ func (s *OssProgressSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	// time.Sleep(timeoutInOperation)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)

+ 15 - 3
oss/type.go

@@ -102,10 +102,10 @@ func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day i
 		Expiration: &LifecycleExpiration{Date: date}}
 }
 
-// NewLifecleRuleByDays builds a lifecycle rule objects will expiration in days after the last modified time
-func NewLifecleRuleByDays(id, prefix string, status bool, days int, lrt LifecycleRuleType, sc ...StorageClassType) (*LifecycleRule, error) {
+// NewLifecycleRuleByDays builds a lifecycle rule objects will expiration in days after the last modified time
+func NewLifecycleRuleByDays(id, prefix string, status bool, days int, lrt LifecycleRuleType, sc ...StorageClassType) (*LifecycleRule, error) {
 	if len(sc) > 1 {
-		return nil, fmt.Errorf("invalid count of storage class type, the cound should be 0 or 1")
+		return nil, fmt.Errorf("invalid count of storage class type, the count should be 0 or 1")
 	}
 
 	var statusStr = "Enabled"
@@ -114,6 +114,9 @@ func NewLifecleRuleByDays(id, prefix string, status bool, days int, lrt Lifecycl
 	}
 	switch lrt {
 	case LRTExpriration:
+		if len(sc) == 1 {
+			return nil, fmt.Errorf("the count of storage class type should be 0")
+		}
 		return &LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
 			Expiration: &LifecycleExpiration{Days: days}}, nil
 	case LRTTransition:
@@ -126,6 +129,9 @@ func NewLifecleRuleByDays(id, prefix string, status bool, days int, lrt Lifecycl
 		return &LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
 			Transition: &LifecycleTransition{Days: days, StorageClass: sc[0]}}, nil
 	case LRTAbortMultiPartUpload:
+		if len(sc) == 1 {
+			return nil, fmt.Errorf("the count of storage class type should be 0")
+		}
 		return &LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
 			AbortMultipartUpload: &LifecycleAbortMultipartUpload{Days: days}}, nil
 	default:
@@ -147,6 +153,9 @@ func NewLifecycleRuleByCreateBeforeDate(id, prefix string, status bool, year, mo
 	date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC).Format(iso8601DateFormat)
 	switch lrt {
 	case LRTExpriration:
+		if len(sc) == 1 {
+			return nil, fmt.Errorf("the count of storage class type should be 0")
+		}
 		return &LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
 			Expiration: &LifecycleExpiration{CreatedBeforeDate: date}}, nil
 	case LRTTransition:
@@ -159,6 +168,9 @@ func NewLifecycleRuleByCreateBeforeDate(id, prefix string, status bool, year, mo
 		return &LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
 			Transition: &LifecycleTransition{CreatedBeforeDate: date, StorageClass: sc[0]}}, nil
 	case LRTAbortMultiPartUpload:
+		if len(sc) == 1 {
+			return nil, fmt.Errorf("the count of storage class type should be 0")
+		}
 		return &LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
 			AbortMultipartUpload: &LifecycleAbortMultipartUpload{CreatedBeforeDate: date}}, nil
 	default:

+ 68 - 21
oss/type_test.go

@@ -18,27 +18,6 @@ var (
 	chnURLStr = url.QueryEscape(chnStr)
 )
 
-/*
-func (s *OssTypeSuite) TestConvLifecycleRule(c *C) {
-	r1 := BuildLifecycleRuleByDate("id1", "one", true, 2015, 11, 11)
-	r2 := BuildLifecycleRuleByDays("id2", "two", false, 3)
-
-	rs := convLifecycleRule([]LifecycleRule{r1})
-	c.Assert(rs[0].ID, Equals, "id1")
-	c.Assert(rs[0].Prefix, Equals, "one")
-	c.Assert(rs[0].Status, Equals, "Enabled")
-	c.Assert(rs[0].Expiration.Date, Equals, "2015-11-11T00:00:00.000Z")
-	c.Assert(rs[0].Expiration.Days, Equals, 0)
-
-	rs = convLifecycleRule([]LifecycleRule{r2})
-	c.Assert(rs[0].ID, Equals, "id2")
-	c.Assert(rs[0].Prefix, Equals, "two")
-	c.Assert(rs[0].Status, Equals, "Disabled")
-	c.Assert(rs[0].Expiration.Date, Equals, "")
-	c.Assert(rs[0].Expiration.Days, Equals, 3)
-}
-*/
-
 func (s *OssTypeSuite) TestDecodeDeleteObjectsResult(c *C) {
 	var res DeleteObjectsResult
 	err := decodeDeleteObjectsResult(&res)
@@ -127,3 +106,71 @@ func (s *OssTypeSuite) TestSortUploadPart(c *C) {
 	c.Assert(parts[4].PartNumber, Equals, 5)
 	c.Assert(parts[4].ETag, Equals, "E5")
 }
+
+func (s *OssTypeSuite) TestNewLifecleRuleByDays(c *C) {
+	_, err := NewLifecycleRuleByDays("rule1", "one", true, 30, LRTExpriration)
+	c.Assert(err, IsNil)
+
+	_, err = NewLifecycleRuleByDays("rule2", "two", true, 30, LRTAbortMultiPartUpload)
+	c.Assert(err, IsNil)
+
+	_, err = NewLifecycleRuleByDays("rule3", "three", true, 30, LRTTransition, StorageIA)
+	c.Assert(err, IsNil)
+
+	_, err = NewLifecycleRuleByDays("rule4", "four", true, 30, LRTTransition, StorageArchive)
+	c.Assert(err, IsNil)
+
+	// expiration lifecycle type, set storage class type
+	_, err = NewLifecycleRuleByDays("rule5", "five", true, 30, LRTExpriration, StorageIA)
+	c.Assert(err, NotNil)
+
+	// abort multipart upload lifecycle type, set storage class type
+	_, err = NewLifecycleRuleByDays("rule6", "six", true, 30, LRTAbortMultiPartUpload, StorageIA)
+	c.Assert(err, NotNil)
+
+	// transition lifecycle type, the value of storage class type is StorageStandard
+	_, err = NewLifecycleRuleByDays("rule7", "seven", true, 30, LRTTransition, StorageStandard)
+	c.Assert(err, NotNil)
+
+	// transition lifecycle type, do not set storage class type
+	_, err = NewLifecycleRuleByDays("rule8", "eight", true, 30, LRTTransition)
+	c.Assert(err, NotNil)
+
+	// transition lifecycle type,set two storage class type
+	_, err = NewLifecycleRuleByDays("rule9", "nine", true, 30, LRTTransition, StorageIA, StorageArchive)
+	c.Assert(err, NotNil)
+}
+
+func (s *OssTypeSuite) TestNewLifecycleRuleByCreateBeforeDate(c *C) {
+	_, err := NewLifecycleRuleByCreateBeforeDate("rule1", "one", true, 2019, 3, 30, LRTExpriration)
+	c.Assert(err, IsNil)
+
+	_, err = NewLifecycleRuleByCreateBeforeDate("rule2", "two", true, 2019, 3, 30, LRTAbortMultiPartUpload)
+	c.Assert(err, IsNil)
+
+	_, err = NewLifecycleRuleByCreateBeforeDate("rule3", "three", true, 2019, 3, 30, LRTTransition, StorageIA)
+	c.Assert(err, IsNil)
+
+	_, err = NewLifecycleRuleByCreateBeforeDate("rule4", "four", true, 2019, 3, 30, LRTTransition, StorageArchive)
+	c.Assert(err, IsNil)
+
+	// expiration lifecycle type, set storage class type
+	_, err = NewLifecycleRuleByCreateBeforeDate("rule5", "five", true, 2019, 3, 30, LRTExpriration, StorageIA)
+	c.Assert(err, NotNil)
+
+	// abort multipart upload lifecycle type, set storage class type
+	_, err = NewLifecycleRuleByCreateBeforeDate("rule6", "six", true, 2019, 3, 30, LRTAbortMultiPartUpload, StorageIA)
+	c.Assert(err, NotNil)
+
+	// transition lifecycle type, the value of storage class type is StorageStandard
+	_, err = NewLifecycleRuleByCreateBeforeDate("rule7", "seven", true, 2019, 3, 30, LRTTransition, StorageStandard)
+	c.Assert(err, NotNil)
+
+	// transition lifecycle type, do not set storage class type
+	_, err = NewLifecycleRuleByCreateBeforeDate("rule8", "eight", true, 2019, 3, 30, LRTTransition)
+	c.Assert(err, NotNil)
+
+	// transition lifecycle type,set two storage class type
+	_, err = NewLifecycleRuleByCreateBeforeDate("rule9", "nine", true, 2019, 3, 30, LRTTransition, StorageIA, StorageArchive)
+	c.Assert(err, NotNil)
+}

+ 0 - 1
oss/upload_test.go

@@ -23,7 +23,6 @@ func (s *OssUploadSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	// time.Sleep(timeoutInOperation)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)

+ 2 - 2
sample/bucket_lifecycle.go

@@ -34,7 +34,7 @@ func BucketLifecycleSample() {
 
 	// Case 2: Set the lifecycle, The rule ID is id2 and the applied objects' prefix is two and the expired time is three days after the object created.
 	//var rule2 = oss.BuildLifecycleRuleByDays("id2", "two", true, 3)
-	rule2, err := oss.NewLifecleRuleByDays("id2", "two", true, 3, oss.LRTTransition, oss.StorageIA)
+	rule2, err := oss.NewLifecycleRuleByDays("id2", "two", true, 3, oss.LRTTransition, oss.StorageIA)
 	if err != nil {
 		HandleError(err)
 	}
@@ -51,7 +51,7 @@ func BucketLifecycleSample() {
 	}
 	fmt.Println("Bucket Lifecycle:", lc.Rules)
 
-	rule3, err := oss.NewLifecleRuleByDays("id3", "three", true, 3, oss.LRTAbortMultiPartUpload)
+	rule3, err := oss.NewLifecycleRuleByDays("id3", "three", true, 3, oss.LRTAbortMultiPartUpload)
 	if err != nil {
 		HandleError(err)
 	}