Selaa lähdekoodia

Merge pull request #167 from aliyun/preview_1.9.6

Preview 1.9.6
fengyu 6 vuotta sitten
vanhempi
commit
08079eb9f6

+ 12 - 0
CHANGELOG.md

@@ -1,4 +1,16 @@
 # ChangeLog - Aliyun OSS SDK for Go
+## 版本号:v1.9.6 日期:2019-04-15
+### 变更内容
+- 变更:扩展lifecycle功能,提供设置AbortMutipartUpload和Transitions两种规则的生命周期管理的处理
+- 修复:测试用例BucketName使用固定前缀+随机的字符串
+- 修复:测试用例ObjectName使用固定前缀+随机字符串
+- 修复:测试用例有关bucket相关的异步操作,统一定义sleep时间
+- 修复:测试集结束后,列出bucket内的所有对象并删除所有测试的对象
+- 修复:测试集结束后,列出bucket内的所有未上传完成的分片并删除所有测试过程中产生的为上传完成的分片
+- 修复:支持上传webp类型的对象时从对象的后缀名字自动解析对应的content-type并设置content-type
+- 变更:增加在put/copy/append等接口时时设置对象的存储类型的sample
+- 修复:sample示例中的配置项的值改为直接从环境变量读取
+
 ## 版本号:1.9.5 日期:2019-03-08
 ### 变更内容
 - 变更:增加了限速上传功能

+ 0 - 3
README-CN.md

@@ -12,9 +12,6 @@
 > - OSS适合存放任意文件类型,适合各种网站、开发企业及开发者使用。
 > - 使用此SDK,用户可以方便地在任何应用、任何时间、任何地点上传,下载和管理数据。
 
-## 版本
-> - 当前版本:1.9.5
-
 ## 运行环境
 > - Go 1.5及以上。
 

+ 0 - 3
README.md

@@ -12,9 +12,6 @@
 > - The OSS can store any type of files and therefore applies to various websites, development enterprises and developers.
 > - With this SDK, you can upload, download and manage data on any app anytime and anywhere conveniently. 
 
-## Version
-> - Current version: 1.9.5
-
 ## Running Environment
 > - Go 1.5 or above. 
 

+ 134 - 95
oss/bucket_test.go

@@ -46,8 +46,6 @@ func (s *OssBucketSuite) SetUpSuite(c *C) {
 	err = s.client.CreateBucket(archiveBucketName, StorageClass(StorageArchive))
 	c.Assert(err, IsNil)
 
-	time.Sleep(5 * time.Second)
-
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
 	s.bucket = bucket
@@ -63,23 +61,41 @@ func (s *OssBucketSuite) SetUpSuite(c *C) {
 func (s *OssBucketSuite) TearDownSuite(c *C) {
 	for _, bucket := range []*Bucket{s.bucket, s.archiveBucket} {
 		// Delete multipart
-		lmu, err := bucket.ListMultipartUploads()
-		c.Assert(err, IsNil)
-
-		for _, upload := range lmu.Uploads {
-			imur := InitiateMultipartUploadResult{Bucket: bucketName, Key: upload.Key, UploadID: upload.UploadID}
-			err = bucket.AbortMultipartUpload(imur)
+		keyMarker := KeyMarker("")
+		uploadIDMarker := UploadIDMarker("")
+		for {
+			lmu, err := bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 			c.Assert(err, IsNil)
+			for _, upload := range lmu.Uploads {
+				imur := InitiateMultipartUploadResult{Bucket: bucketName, Key: upload.Key, UploadID: upload.UploadID}
+				err = bucket.AbortMultipartUpload(imur)
+				c.Assert(err, IsNil)
+			}
+			keyMarker = KeyMarker(lmu.NextKeyMarker)
+			uploadIDMarker = UploadIDMarker(lmu.NextUploadIDMarker)
+			if !lmu.IsTruncated {
+				break
+			}
 		}
 
 		// Delete objects
-		lor, err := bucket.ListObjects()
-		c.Assert(err, IsNil)
-
-		for _, object := range lor.Objects {
-			err = bucket.DeleteObject(object.Key)
+		marker := Marker("")
+		for {
+			lor, err := bucket.ListObjects(marker)
 			c.Assert(err, IsNil)
+			for _, object := range lor.Objects {
+				err = bucket.DeleteObject(object.Key)
+				c.Assert(err, IsNil)
+			}
+			marker = Marker(lor.NextMarker)
+			if !lor.IsTruncated {
+				break
+			}
 		}
+
+		// Delete bucket
+		err := s.client.DeleteBucket(bucket.BucketName)
+		c.Assert(err, IsNil)
 	}
 
 	testLogger.Println("test bucket completed")
@@ -111,7 +127,7 @@ func (s *OssBucketSuite) TearDownTest(c *C) {
 
 // TestPutObject
 func (s *OssBucketSuite) TestPutObject(c *C) {
-	objectName := objectNamePrefix + "tpo"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "大江东去,浪淘尽,千古风流人物。 故垒西边,人道是、三国周郎赤壁。 乱石穿空,惊涛拍岸,卷起千堆雪。 江山如画,一时多少豪杰。" +
 		"遥想公谨当年,小乔初嫁了,雄姿英发。 羽扇纶巾,谈笑间、樯橹灰飞烟灭。故国神游,多情应笑我,早生华发,人生如梦,一尊还酹江月。"
 
@@ -169,7 +185,7 @@ func (s *OssBucketSuite) TestPutObject(c *C) {
 	c.Assert(err, IsNil)
 
 	// Put with properties
-	objectName = objectNamePrefix + "tpox"
+	objectName = objectNamePrefix + randStr(8)
 	options := []Option{
 		Expires(futureDate),
 		ObjectACL(ACLPublicRead),
@@ -200,7 +216,7 @@ func (s *OssBucketSuite) TestPutObject(c *C) {
 }
 
 func (s *OssBucketSuite) TestSignURL(c *C) {
-	objectName := objectNamePrefix + randStr(5)
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := randStr(20)
 
 	filePath := randLowStr(10)
@@ -579,7 +595,7 @@ func (s *OssBucketSuite) TestSignURLWithEscapedKeyAndPorxy(c *C) {
 
 // TestPutObjectType
 func (s *OssBucketSuite) TestPutObjectType(c *C) {
-	objectName := objectNamePrefix + "tptt"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "乱石穿空,惊涛拍岸,卷起千堆雪。 江山如画,一时多少豪杰。"
 
 	// Put
@@ -587,7 +603,6 @@ func (s *OssBucketSuite) TestPutObjectType(c *C) {
 	c.Assert(err, IsNil)
 
 	// Check
-	time.Sleep(time.Second)
 	body, err := s.bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 	str, err := readBody(body)
@@ -626,7 +641,7 @@ func (s *OssBucketSuite) TestPutObjectType(c *C) {
 
 // TestPutObject
 func (s *OssBucketSuite) TestPutObjectKeyChars(c *C) {
-	objectName := objectNamePrefix + "tpokc"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "白日依山尽,黄河入海流。欲穷千里目,更上一层楼。"
 
 	// Put
@@ -692,11 +707,11 @@ func (s *OssBucketSuite) TestPutObjectKeyChars(c *C) {
 
 // TestPutObjectNegative
 func (s *OssBucketSuite) TestPutObjectNegative(c *C) {
-	objectName := objectNamePrefix + "tpon"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "大江东去,浪淘尽,千古风流人物。 "
 
 	// Put
-	objectName = objectNamePrefix + "tpox"
+	objectName = objectNamePrefix + randStr(8)
 	err := s.bucket.PutObject(objectName, strings.NewReader(objectValue),
 		Meta("meta-my", "myprop"))
 	c.Assert(err, IsNil)
@@ -730,7 +745,7 @@ func (s *OssBucketSuite) TestPutObjectNegative(c *C) {
 
 // TestPutObjectFromFile
 func (s *OssBucketSuite) TestPutObjectFromFile(c *C) {
-	objectName := objectNamePrefix + "tpoff"
+	objectName := objectNamePrefix + randStr(8)
 	localFile := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "newpic11.jpg"
 
@@ -787,9 +802,9 @@ func (s *OssBucketSuite) TestPutObjectFromFile(c *C) {
 
 // TestPutObjectFromFile
 func (s *OssBucketSuite) TestPutObjectFromFileType(c *C) {
-	objectName := objectNamePrefix + "tpoffwm"
+	objectName := objectNamePrefix + randStr(8)
 	localFile := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "newpic11.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Put
 	err := s.bucket.PutObjectFromFile(objectName, localFile)
@@ -813,7 +828,7 @@ func (s *OssBucketSuite) TestPutObjectFromFileType(c *C) {
 
 // TestGetObject
 func (s *OssBucketSuite) TestGetObject(c *C) {
-	objectName := objectNamePrefix + "tgo"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "长忆观潮,满郭人争江上望。来疑沧海尽成空,万面鼓声中。弄潮儿向涛头立,手把红旗旗不湿。别来几向梦中看,梦觉尚心寒。"
 
 	// Put
@@ -876,7 +891,7 @@ func (s *OssBucketSuite) TestGetObject(c *C) {
 
 // TestGetObjectNegative
 func (s *OssBucketSuite) TestGetObjectToWriterNegative(c *C) {
-	objectName := objectNamePrefix + "tgotwn"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "长忆观潮,满郭人争江上望。"
 
 	// Object not exist
@@ -908,9 +923,9 @@ func (s *OssBucketSuite) TestGetObjectToWriterNegative(c *C) {
 
 // TestGetObjectToFile
 func (s *OssBucketSuite) TestGetObjectToFile(c *C) {
-	objectName := objectNamePrefix + "tgotf"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "江南好,风景旧曾谙;日出江花红胜火,春来江水绿如蓝。能不忆江南?江南忆,最忆是杭州;山寺月中寻桂子,郡亭枕上看潮头。何日更重游!"
-	newFile := "newpic15.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Put
 	var val = []byte(objectValue)
@@ -994,7 +1009,7 @@ func (s *OssBucketSuite) TestGetObjectToFile(c *C) {
 
 // TestListObjects
 func (s *OssBucketSuite) TestListObjects(c *C) {
-	objectName := objectNamePrefix + "tlo"
+	objectName := objectNamePrefix + randStr(8)
 
 	// List empty bucket
 	lor, err := s.bucket.ListObjects()
@@ -1043,10 +1058,10 @@ func (s *OssBucketSuite) TestListObjects(c *C) {
 
 // TestListObjects
 func (s *OssBucketSuite) TestListObjectsEncodingType(c *C) {
-	objectName := objectNamePrefix + "床前明月光,疑是地上霜。举头望明月,低头思故乡。" + "tloet"
+	prefix := objectNamePrefix + "床前明月光,疑是地上霜。举头望明月,低头思故乡。"
 
 	for i := 0; i < 10; i++ {
-		err := s.bucket.PutObject(objectName+strconv.Itoa(i), strings.NewReader(""))
+		err := s.bucket.PutObject(prefix+strconv.Itoa(i), strings.NewReader(""))
 		c.Assert(err, IsNil)
 	}
 
@@ -1054,10 +1069,6 @@ func (s *OssBucketSuite) TestListObjectsEncodingType(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 10)
 
-	lor, err = s.bucket.ListObjects(Prefix(objectNamePrefix + "床前明月光,"))
-	c.Assert(err, IsNil)
-	c.Assert(len(lor.Objects), Equals, 10)
-
 	lor, err = s.bucket.ListObjects(Marker(objectNamePrefix + "床前明月光,疑是地上霜。举头望明月,低头思故乡。"))
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 10)
@@ -1065,16 +1076,16 @@ func (s *OssBucketSuite) TestListObjectsEncodingType(c *C) {
 	lor, err = s.bucket.ListObjects(Prefix(objectNamePrefix + "床前明月光"))
 	c.Assert(err, IsNil)
 	for i, obj := range lor.Objects {
-		c.Assert(obj.Key, Equals, objectNamePrefix+"床前明月光,疑是地上霜。举头望明月,低头思故乡。tloet"+strconv.Itoa(i))
+		c.Assert(obj.Key, Equals, prefix+strconv.Itoa(i))
 	}
 
 	for i := 0; i < 10; i++ {
-		err = s.bucket.DeleteObject(objectName + strconv.Itoa(i))
+		err = s.bucket.DeleteObject(prefix + strconv.Itoa(i))
 		c.Assert(err, IsNil)
 	}
 
 	// Special characters
-	objectName = "go go ` ~ ! @ # $ % ^ & * () - _ + =[] {} \\ | < > , . ? / 0"
+	objectName := objectNamePrefix + "` ~ ! @ # $ % ^ & * () - _ + =[] {} \\ | < > , . ? / 0"
 	err = s.bucket.PutObject(objectName, strings.NewReader("明月几时有,把酒问青天"))
 	c.Assert(err, IsNil)
 
@@ -1085,7 +1096,7 @@ func (s *OssBucketSuite) TestListObjectsEncodingType(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	objectName = "go/中国  日本  +-#&=*"
+	objectName = objectNamePrefix + "中国  日本  +-#&=*"
 	err = s.bucket.PutObject(objectName, strings.NewReader("明月几时有,把酒问青天"))
 	c.Assert(err, IsNil)
 
@@ -1099,7 +1110,7 @@ func (s *OssBucketSuite) TestListObjectsEncodingType(c *C) {
 
 // TestIsBucketExist
 func (s *OssBucketSuite) TestIsObjectExist(c *C) {
-	objectName := objectNamePrefix + "tibe"
+	objectName := objectNamePrefix + randStr(8)
 
 	// Put three objects
 	err := s.bucket.PutObject(objectName+"1", strings.NewReader(""))
@@ -1141,7 +1152,7 @@ func (s *OssBucketSuite) TestIsObjectExist(c *C) {
 
 // TestDeleteObject
 func (s *OssBucketSuite) TestDeleteObject(c *C) {
-	objectName := objectNamePrefix + "tdo"
+	objectName := objectNamePrefix + randStr(8)
 
 	err := s.bucket.PutObject(objectName, strings.NewReader(""))
 	c.Assert(err, IsNil)
@@ -1165,7 +1176,7 @@ func (s *OssBucketSuite) TestDeleteObject(c *C) {
 
 // TestDeleteObjects
 func (s *OssBucketSuite) TestDeleteObjects(c *C) {
-	objectName := objectNamePrefix + "tdos"
+	objectName := objectNamePrefix + randStr(8)
 
 	// Delete objects
 	err := s.bucket.PutObject(objectName, strings.NewReader(""))
@@ -1275,7 +1286,7 @@ func (s *OssBucketSuite) TestDeleteObjects(c *C) {
 
 // TestSetObjectMeta
 func (s *OssBucketSuite) TestSetObjectMeta(c *C) {
-	objectName := objectNamePrefix + "tsom"
+	objectName := objectNamePrefix + randStr(8)
 
 	err := s.bucket.PutObject(objectName, strings.NewReader(""))
 	c.Assert(err, IsNil)
@@ -1313,7 +1324,7 @@ func (s *OssBucketSuite) TestSetObjectMeta(c *C) {
 
 // TestGetObjectMeta
 func (s *OssBucketSuite) TestGetObjectMeta(c *C) {
-	objectName := objectNamePrefix + "tgom"
+	objectName := objectNamePrefix + randStr(8)
 
 	// Put
 	err := s.bucket.PutObject(objectName, strings.NewReader(""))
@@ -1332,7 +1343,7 @@ func (s *OssBucketSuite) TestGetObjectMeta(c *C) {
 
 // TestGetObjectDetailedMeta
 func (s *OssBucketSuite) TestGetObjectDetailedMeta(c *C) {
-	objectName := objectNamePrefix + "tgodm"
+	objectName := objectNamePrefix + randStr(8)
 
 	// Put
 	err := s.bucket.PutObject(objectName, strings.NewReader(""),
@@ -1377,7 +1388,7 @@ func (s *OssBucketSuite) TestGetObjectDetailedMeta(c *C) {
 
 // TestSetAndGetObjectAcl
 func (s *OssBucketSuite) TestSetAndGetObjectAcl(c *C) {
-	objectName := objectNamePrefix + "tsgba"
+	objectName := objectNamePrefix + randStr(8)
 
 	err := s.bucket.PutObject(objectName, strings.NewReader(""))
 	c.Assert(err, IsNil)
@@ -1417,7 +1428,7 @@ func (s *OssBucketSuite) TestSetAndGetObjectAcl(c *C) {
 
 // TestSetAndGetObjectAclNegative
 func (s *OssBucketSuite) TestSetAndGetObjectAclNegative(c *C) {
-	objectName := objectNamePrefix + "tsgban"
+	objectName := objectNamePrefix + randStr(8)
 
 	// Object not exist
 	err := s.bucket.SetObjectACL(objectName, ACLPublicRead)
@@ -1426,7 +1437,7 @@ func (s *OssBucketSuite) TestSetAndGetObjectAclNegative(c *C) {
 
 // TestCopyObject
 func (s *OssBucketSuite) TestCopyObject(c *C) {
-	objectName := objectNamePrefix + "tco"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "男儿何不带吴钩,收取关山五十州。请君暂上凌烟阁,若个书生万户侯?"
 
 	err := s.bucket.PutObject(objectName, strings.NewReader(objectValue),
@@ -1555,10 +1566,10 @@ func (s *OssBucketSuite) TestCopyObject(c *C) {
 
 // TestCopyObjectToOrFrom
 func (s *OssBucketSuite) TestCopyObjectToOrFrom(c *C) {
-	objectName := objectNamePrefix + "tcotof" + randLowStr(5)
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "男儿何不带吴钩,收取关山五十州。请君暂上凌烟阁,若个书生万户侯?"
-	destBucketName := bucketName + "-dest" + randLowStr(5)
-	objectNameDest := objectName + "dest"
+	destBucketName := bucketName + "-dest"
+	objectNameDest := objectName + "-dest"
 
 	err := s.client.CreateBucket(destBucketName)
 	c.Assert(err, IsNil)
@@ -1607,9 +1618,9 @@ func (s *OssBucketSuite) TestCopyObjectToOrFrom(c *C) {
 
 // TestCopyObjectToOrFromNegative
 func (s *OssBucketSuite) TestCopyObjectToOrFromNegative(c *C) {
-	objectName := objectNamePrefix + "tcotofn"
-	destBucket := bucketName + "-destn"
-	objectNameDest := objectName + "destn"
+	objectName := objectNamePrefix + randStr(8)
+	destBucket := bucketName + "-dest"
+	objectNameDest := objectName + "-dest"
 
 	// Object not exist
 	_, err := s.bucket.CopyObjectTo(bucketName, objectName, objectNameDest)
@@ -1622,10 +1633,10 @@ func (s *OssBucketSuite) TestCopyObjectToOrFromNegative(c *C) {
 
 // TestAppendObject
 func (s *OssBucketSuite) TestAppendObject(c *C) {
-	objectName := objectNamePrefix + "tao"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "昨夜雨疏风骤,浓睡不消残酒。试问卷帘人,却道海棠依旧。知否?知否?应是绿肥红瘦。"
 	var val = []byte(objectValue)
-	var localFile = "testx.txt"
+	var localFile = randStr(8) + ".txt"
 	var nextPos int64
 	var midPos = 1 + rand.Intn(len(val)-1)
 
@@ -1727,7 +1738,7 @@ func (s *OssBucketSuite) TestAppendObject(c *C) {
 
 // TestAppendObjectNegative
 func (s *OssBucketSuite) TestAppendObjectNegative(c *C) {
-	objectName := objectNamePrefix + "taon"
+	objectName := objectNamePrefix + randStr(8)
 	nextPos := int64(0)
 
 	nextPos, err := s.bucket.AppendObject(objectName, strings.NewReader("ObjectValue"), nextPos)
@@ -1795,7 +1806,7 @@ func (s *OssBucketSuite) TestGetConfig(c *C) {
 }
 
 func (s *OssBucketSuite) TestSTSToken(c *C) {
-	objectName := objectNamePrefix + "tst"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "红藕香残玉簟秋。轻解罗裳,独上兰舟。云中谁寄锦书来?雁字回时,月满西楼。"
 
 	stsClient := sts.NewClient(stsaccessID, stsaccessKey, stsARN, "oss_test_sess")
@@ -1849,7 +1860,7 @@ func (s *OssBucketSuite) TestSTSToken(c *C) {
 }
 
 func (s *OssBucketSuite) TestSTSTonekNegative(c *C) {
-	objectName := objectNamePrefix + "tstg"
+	objectName := objectNamePrefix + randStr(8)
 	localFile := objectName + ".jpg"
 
 	client, err := New(endpoint, accessID, accessKey, SecurityToken("Invalid"))
@@ -1905,7 +1916,7 @@ func (s *OssBucketSuite) TestSTSTonekNegative(c *C) {
 }
 
 func (s *OssBucketSuite) TestUploadBigFile(c *C) {
-	objectName := objectNamePrefix + "tubf"
+	objectName := objectNamePrefix + randStr(8)
 	bigFile := "D:\\tmp\\bigfile.zip"
 	newFile := "D:\\tmp\\newbigfile.zip"
 
@@ -1941,8 +1952,8 @@ func (s *OssBucketSuite) TestUploadBigFile(c *C) {
 }
 
 func (s *OssBucketSuite) TestSymlink(c *C) {
-	objectName := objectNamePrefix + "符号链接"
-	targetObjectName := objectNamePrefix + "符号链接目标文件"
+	objectName := objectNamePrefix + randStr(8)
+	targetObjectName := objectName + "target"
 
 	err := s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
@@ -1999,8 +2010,8 @@ func (s *OssBucketSuite) TestSymlink(c *C) {
 	c.Assert(err, IsNil)
 
 	// Put symlink again
-	objectName = objectNamePrefix + "symlink"
-	targetObjectName = objectNamePrefix + "symlink-target"
+	objectName = objectNamePrefix + randStr(8)
+	targetObjectName = objectName + "-target"
 
 	err = s.bucket.PutSymlink(objectName, targetObjectName)
 	c.Assert(err, IsNil)
@@ -2027,7 +2038,7 @@ func (s *OssBucketSuite) TestSymlink(c *C) {
 
 // TestRestoreObject
 func (s *OssBucketSuite) TestRestoreObject(c *C) {
-	objectName := objectNamePrefix + "restore"
+	objectName := objectNamePrefix + randStr(8)
 
 	// List objects
 	lor, err := s.archiveBucket.ListObjects()
@@ -2071,12 +2082,12 @@ func (s *OssBucketSuite) TestRestoreObject(c *C) {
 
 // TestProcessObject
 func (s *OssBucketSuite) TestProcessObject(c *C) {
-	objectName := objectNamePrefix + "_process_src.jpg"
+	objectName := objectNamePrefix + randStr(8) + ".jpg"
 	err := s.bucket.PutObjectFromFile(objectName, "../sample/BingWallpaper-2015-11-07.jpg")
 	c.Assert(err, IsNil)
 
 	// If bucket-name not specified, it is saved to the current bucket by default.
-	destObjName := objectNamePrefix + "_process_dest_1.jpg"
+	destObjName := objectNamePrefix + randStr(8) + "-dest.jpg"
 	process := fmt.Sprintf("image/resize,w_100|sys/saveas,o_%v", base64.URLEncoding.EncodeToString([]byte(destObjName)))
 	result, err := s.bucket.ProcessObject(objectName, process)
 	c.Assert(err, IsNil)
@@ -2085,7 +2096,7 @@ func (s *OssBucketSuite) TestProcessObject(c *C) {
 	c.Assert(result.Bucket, Equals, "")
 	c.Assert(result.Object, Equals, destObjName)
 
-	destObjName = objectNamePrefix + "_process_dest_1.jpg"
+	destObjName = objectNamePrefix + randStr(8) + "-dest.jpg"
 	process = fmt.Sprintf("image/resize,w_100|sys/saveas,o_%v,b_%v", base64.URLEncoding.EncodeToString([]byte(destObjName)), base64.URLEncoding.EncodeToString([]byte(s.bucket.BucketName)))
 	result, err = s.bucket.ProcessObject(objectName, process)
 	c.Assert(err, IsNil)
@@ -2271,7 +2282,7 @@ func (s *OssBucketSuite) getObject(objects []ObjectProperties, object string) (b
 }
 
 func (s *OssBucketSuite) detectUploadSpeed(bucket *Bucket, c *C) (upSpeed int) {
-	objectName := objectNamePrefix + getUuid()
+	objectName := objectNamePrefix + randStr(8)
 
 	// 1M byte
 	textBuffer := randStr(1024 * 1024)
@@ -2301,12 +2312,11 @@ func (s *OssBucketSuite) TestPutSingleObjectLimitSpeed(c *C) {
 		// go version is less than go1.7,not support limit upload speed
 		// doesn't run this test
 		return
-	} else {
-		// set unlimited again
-		client.LimitUploadSpeed(0)
 	}
+	// set unlimited again
+	client.LimitUploadSpeed(0)
 
-	bucketName := bucketNamePrefix + randLowStr(5)
+	bucketName := bucketNamePrefix + randLowStr(6)
 	err = client.CreateBucket(bucketName)
 	c.Assert(err, IsNil)
 
@@ -2328,7 +2338,7 @@ func (s *OssBucketSuite) TestPutSingleObjectLimitSpeed(c *C) {
 	err = client.LimitUploadSpeed(limitSpeed / perTokenBandwidthSize)
 	c.Assert(err, IsNil)
 
-	objectName := objectNamePrefix + getUuid()
+	objectName := objectNamePrefix + randStr(8)
 
 	// 1M byte
 	textBuffer := randStr(1024 * 1024)
@@ -2384,12 +2394,11 @@ func (s *OssBucketSuite) TestPutManyObjectLimitSpeed(c *C) {
 		// go version is less than go1.7,not support limit upload speed
 		// doesn't run this test
 		return
-	} else {
-		// set unlimited
-		client.LimitUploadSpeed(0)
 	}
+	// set unlimited
+	client.LimitUploadSpeed(0)
 
-	bucketName := bucketNamePrefix + randLowStr(5)
+	bucketName := bucketNamePrefix + randLowStr(6)
 	err = client.CreateBucket(bucketName)
 	c.Assert(err, IsNil)
 
@@ -2410,8 +2419,8 @@ func (s *OssBucketSuite) TestPutManyObjectLimitSpeed(c *C) {
 	c.Assert(err, IsNil)
 
 	// object1
-	objectNameFirst := objectNamePrefix + getUuid()
-	objectNameSecond := objectNamePrefix + getUuid()
+	objectNameFirst := objectNamePrefix + randStr(8)
+	objectNameSecond := objectNamePrefix + randStr(8)
 
 	// 1M byte
 	textBuffer := randStr(1024 * 1024)
@@ -2475,12 +2484,11 @@ func (s *OssBucketSuite) TestPutMultipartObjectLimitSpeed(c *C) {
 		// go version is less than go1.7,not support limit upload speed
 		// doesn't run this test
 		return
-	} else {
-		// set unlimited
-		client.LimitUploadSpeed(0)
 	}
+	// set unlimited
+	client.LimitUploadSpeed(0)
 
-	bucketName := bucketNamePrefix + randLowStr(5)
+	bucketName := bucketNamePrefix + randLowStr(6)
 	err = client.CreateBucket(bucketName)
 	c.Assert(err, IsNil)
 
@@ -2502,7 +2510,7 @@ func (s *OssBucketSuite) TestPutMultipartObjectLimitSpeed(c *C) {
 	err = client.LimitUploadSpeed(limitSpeed / perTokenBandwidthSize)
 	c.Assert(err, IsNil)
 
-	objectName := objectNamePrefix + getUuid()
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "." + string(os.PathSeparator) + objectName
 
 	// 1M byte
@@ -2578,12 +2586,11 @@ func (s *OssBucketSuite) TestPutObjectFromFileLimitSpeed(c *C) {
 		// go version is less than go1.7,not support limit upload speed
 		// doesn't run this test
 		return
-	} else {
-		// set unlimited
-		client.LimitUploadSpeed(0)
 	}
+	// set unlimited
+	client.LimitUploadSpeed(0)
 
-	bucketName := bucketNamePrefix + randLowStr(5)
+	bucketName := bucketNamePrefix + randLowStr(6)
 	err = client.CreateBucket(bucketName)
 	c.Assert(err, IsNil)
 
@@ -2605,7 +2612,7 @@ func (s *OssBucketSuite) TestPutObjectFromFileLimitSpeed(c *C) {
 	err = client.LimitUploadSpeed(limitSpeed / perTokenBandwidthSize)
 	c.Assert(err, IsNil)
 
-	objectName := objectNamePrefix + getUuid()
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "." + string(os.PathSeparator) + objectName
 
 	// 1M byte
@@ -2683,12 +2690,11 @@ func (s *OssBucketSuite) TestUploadObjectLimitSpeed(c *C) {
 		// go version is less than go1.7,not support limit upload speed
 		// doesn't run this test
 		return
-	} else {
-		// set unlimited
-		client.LimitUploadSpeed(0)
 	}
+	// set unlimited
+	client.LimitUploadSpeed(0)
 
-	bucketName := bucketNamePrefix + randLowStr(5)
+	bucketName := bucketNamePrefix + randLowStr(6)
 	err = client.CreateBucket(bucketName)
 	c.Assert(err, IsNil)
 
@@ -2697,7 +2703,7 @@ func (s *OssBucketSuite) TestUploadObjectLimitSpeed(c *C) {
 
 	//first:upload a object
 	textBuffer := randStr(1024 * 100)
-	objectName := objectNamePrefix + getUuid()
+	objectName := objectNamePrefix + randStr(8)
 	err = bucket.PutObject(objectName, strings.NewReader(textBuffer))
 	c.Assert(err, IsNil)
 
@@ -2739,3 +2745,36 @@ func (s *OssBucketSuite) TestLimitUploadSpeedFail(c *C) {
 	err = client.LimitUploadSpeed(100)
 	c.Assert(err, NotNil)
 }
+
+// upload webp object
+func (s *OssBucketSuite) TestUploadObjectWithWebpFormat(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	bucketName := bucketNamePrefix + randLowStr(6)
+	err = client.CreateBucket(bucketName)
+	c.Assert(err, IsNil)
+
+	bucket, err := client.Bucket(bucketName)
+	c.Assert(err, IsNil)
+
+	// create webp file
+	textBuffer := randStr(1024)
+	objectName := objectNamePrefix + randStr(8)
+	fileName := "." + string(os.PathSeparator) + objectName + ".webp"
+	ioutil.WriteFile(fileName, []byte(textBuffer), 0644)
+	_, err = os.Stat(fileName)
+	c.Assert(err, IsNil)
+
+	err = bucket.PutObjectFromFile(objectName, fileName)
+	c.Assert(err, IsNil)
+
+	// check object content-type
+	props, err := bucket.GetObjectDetailedMeta(objectName)
+	c.Assert(err, IsNil)
+	c.Assert(props["Content-Type"][0], Equals, "image/webp")
+
+	os.Remove(fileName)
+	bucket.DeleteObject(objectName)
+	client.DeleteBucket(bucketName)
+}

+ 10 - 6
oss/client.go

@@ -260,8 +260,12 @@ func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error)
 // error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
-	lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
-	bs, err := xml.Marshal(lxml)
+	err := verifyLifecycleRules(rules)
+	if err != nil {
+		return err
+	}
+	lifecycleCfg := LifecycleConfiguration{Rules: rules}
+	bs, err := xml.Marshal(lifecycleCfg)
 	if err != nil {
 		return err
 	}
@@ -650,9 +654,9 @@ func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, erro
 	return out, err
 }
 
-// LimitUploadSpeed: set upload bandwidth limit speed,default is 0,unlimited
-// upSpeed: KB/s, 0 is unlimited,default is 0
-// error:it's nil if success, otherwise failure
+// LimitUploadSpeed set upload bandwidth limit speed,default is 0,unlimited
+// upSpeed KB/s, 0 is unlimited,default is 0
+// error it's nil if success, otherwise failure
 func (client Client) LimitUploadSpeed(upSpeed int) error {
 	if client.Config == nil {
 		return fmt.Errorf("client config is nil")
@@ -789,7 +793,7 @@ func SetLogLevel(LogLevel int) ClientOption {
 }
 
 //
-// SetLogLevel sets the oss sdk log level
+// SetLogger sets the oss sdk logger
 //
 func SetLogger(Logger *log.Logger) ClientOption {
 	return func(client *Client) {

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 334 - 113
oss/client_test.go


+ 6 - 3
oss/conf.go

@@ -8,6 +8,7 @@ import (
 	"time"
 )
 
+// Define the level of the output log
 const (
 	LogOff = iota
 	Error
@@ -16,6 +17,7 @@ const (
 	Debug
 )
 
+// LogTag Tag for each level of log
 var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"}
 
 // HTTPTimeout defines HTTP timeout.
@@ -27,6 +29,7 @@ type HTTPTimeout struct {
 	IdleConnTimeout  time.Duration
 }
 
+// HTTPMaxConns defines max idle connections and max idle connections per host
 type HTTPMaxConns struct {
 	MaxIdleConns        int
 	MaxIdleConnsPerHost int
@@ -59,10 +62,10 @@ type Config struct {
 	UploadLimiter    *OssLimiter  // Bandwidth limit reader for upload
 }
 
-// LimitUploadSpeed, uploadSpeed:KB/s, 0 is unlimited,default is 0
+// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
 func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
 	if uploadSpeed < 0 {
-		return fmt.Errorf("erro,speed is less than 0")
+		return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0")
 	} else if uploadSpeed == 0 {
 		config.UploadLimitSpeed = 0
 		config.UploadLimiter = nil
@@ -77,7 +80,7 @@ func (config *Config) LimitUploadSpeed(uploadSpeed int) error {
 	return err
 }
 
-// WriteLog
+// WriteLog output log function
 func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
 	if config.LogLevel < LogLevel || config.Logger == nil {
 		return

+ 9 - 7
oss/conn.go

@@ -27,7 +27,7 @@ type Conn struct {
 	client *http.Client
 }
 
-var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore", "callback", "callback-var"}
+var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore", "callback", "callback-var", "policy"}
 
 // init initializes Conn
 func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error {
@@ -112,7 +112,7 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 	publishProgress(listener, event)
 
 	if conn.config.LogLevel >= Debug {
-		conn.LoggerHttpReq(req)
+		conn.LoggerHTTPReq(req)
 	}
 
 	resp, err := conn.client.Do(req)
@@ -125,7 +125,7 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 
 	if conn.config.LogLevel >= Debug {
 		//print out http resp
-		conn.LoggerHttpResp(req, resp)
+		conn.LoggerHTTPResp(req, resp)
 	}
 
 	// Transfer completed
@@ -241,7 +241,7 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 	publishProgress(listener, event)
 
 	if conn.config.LogLevel >= Debug {
-		conn.LoggerHttpReq(req)
+		conn.LoggerHTTPReq(req)
 	}
 
 	resp, err := conn.client.Do(req)
@@ -255,7 +255,7 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 
 	if conn.config.LogLevel >= Debug {
 		//print out http resp
-		conn.LoggerHttpResp(req, resp)
+		conn.LoggerHTTPResp(req, resp)
 	}
 
 	// Transfer completed
@@ -457,7 +457,8 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
 	}, nil
 }
 
-func (conn Conn) LoggerHttpReq(req *http.Request) {
+// LoggerHTTPReq Print the header information of the http request
+func (conn Conn) LoggerHTTPReq(req *http.Request) {
 	var logBuffer bytes.Buffer
 	logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method))
 	logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host))
@@ -478,7 +479,8 @@ func (conn Conn) LoggerHttpReq(req *http.Request) {
 	conn.config.WriteLog(Debug, "%s\n", logBuffer.String())
 }
 
-func (conn Conn) LoggerHttpResp(req *http.Request, resp *http.Response) {
+// LoggerHTTPResp Print Response to http request
+func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) {
 	var logBuffer bytes.Buffer
 	logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode))
 	logBuffer.WriteString(fmt.Sprintf("Header info:"))

+ 1 - 1
oss/const.go

@@ -142,5 +142,5 @@ const (
 
 	CheckpointFileSuffix = ".cp" // Checkpoint file suffix
 
-	Version = "1.9.5" // Go SDK version
+	Version = "v1.9.6" // Go SDK version
 )

+ 34 - 18
oss/crc_test.go

@@ -9,7 +9,6 @@ import (
 	"math/rand"
 	"os"
 	"strings"
-	"time"
 
 	. "gopkg.in/check.v1"
 )
@@ -28,7 +27,6 @@ func (s *OssCrcSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	time.Sleep(5 * time.Second)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
@@ -40,25 +38,43 @@ func (s *OssCrcSuite) SetUpSuite(c *C) {
 // TearDownSuite runs before each test or benchmark starts running
 func (s *OssCrcSuite) TearDownSuite(c *C) {
 	// Delete part
-	lmur, err := s.bucket.ListMultipartUploads()
-	c.Assert(err, IsNil)
-
-	for _, upload := range lmur.Uploads {
-		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
-			Key: upload.Key, UploadID: upload.UploadID}
-		err = s.bucket.AbortMultipartUpload(imur)
+	keyMarker := KeyMarker("")
+	uploadIDMarker := UploadIDMarker("")
+	for {
+		lmur, err := s.bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 		c.Assert(err, IsNil)
+		for _, upload := range lmur.Uploads {
+			var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+				Key: upload.Key, UploadID: upload.UploadID}
+			err = s.bucket.AbortMultipartUpload(imur)
+			c.Assert(err, IsNil)
+		}
+		keyMarker = KeyMarker(lmur.NextKeyMarker)
+		uploadIDMarker = UploadIDMarker(lmur.NextUploadIDMarker)
+		if !lmur.IsTruncated {
+			break
+		}
 	}
 
 	// Delete objects
-	lor, err := s.bucket.ListObjects()
-	c.Assert(err, IsNil)
-
-	for _, object := range lor.Objects {
-		err = s.bucket.DeleteObject(object.Key)
+	marker := Marker("")
+	for {
+		lor, err := s.bucket.ListObjects(marker)
 		c.Assert(err, IsNil)
+		for _, object := range lor.Objects {
+			err = s.bucket.DeleteObject(object.Key)
+			c.Assert(err, IsNil)
+		}
+		marker = Marker(lor.NextMarker)
+		if !lor.IsTruncated {
+			break
+		}
 	}
 
+	// Delete bucket
+	err := s.client.DeleteBucket(s.bucket.BucketName)
+	c.Assert(err, IsNil)
+
 	testLogger.Println("test crc completed")
 }
 
@@ -206,7 +222,7 @@ func (s *OssCrcSuite) TestCRCRandomCombine(c *C) {
 
 // TestEnableCRCAndMD5 tests MD5 and CRC check
 func (s *OssCrcSuite) TestEnableCRCAndMD5(c *C) {
-	objectName := objectNamePrefix + "tecam"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFileName := "BingWallpaper-2015-11-07-2.jpg"
 	objectValue := "空山新雨后,天气晚来秋。明月松间照,清泉石上流。竹喧归浣女,莲动下渔舟。随意春芳歇,王孙自可留。"
@@ -303,7 +319,7 @@ func (s *OssCrcSuite) TestEnableCRCAndMD5(c *C) {
 
 // TestDisableCRCAndMD5 disables MD5 and CRC
 func (s *OssCrcSuite) TestDisableCRCAndMD5(c *C) {
-	objectName := objectNamePrefix + "tdcam"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFileName := "BingWallpaper-2015-11-07-3.jpg"
 	objectValue := "中岁颇好道,晚家南山陲。兴来每独往,胜事空自知。行到水穷处,坐看云起时。偶然值林叟,谈笑无还期。"
@@ -399,7 +415,7 @@ func (s *OssCrcSuite) TestDisableCRCAndMD5(c *C) {
 
 // TestSpecifyContentMD5 specifies MD5
 func (s *OssCrcSuite) TestSpecifyContentMD5(c *C) {
-	objectName := objectNamePrefix + "tdcam"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	objectValue := "积雨空林烟火迟,蒸藜炊黍饷东菑。漠漠水田飞白鹭,阴阴夏木啭黄鹂。山中习静观朝槿,松下清斋折露葵。野老与人争席罢,海鸥何事更相疑。"
 
@@ -464,7 +480,7 @@ func (s *OssCrcSuite) TestSpecifyContentMD5(c *C) {
 
 // TestAppendObjectNegative
 func (s *OssCrcSuite) TestAppendObjectNegative(c *C) {
-	objectName := objectNamePrefix + "taoncrc"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "空山不见人,但闻人语响。返影入深林,复照青苔上。"
 
 	nextPos, err := s.bucket.AppendObject(objectName, strings.NewReader(objectValue), 0, InitCRC(0))

+ 46 - 29
oss/download_test.go

@@ -23,7 +23,6 @@ func (s *OssDownloadSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	time.Sleep(5 * time.Second)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
@@ -35,25 +34,43 @@ func (s *OssDownloadSuite) SetUpSuite(c *C) {
 // TearDownSuite runs before each test or benchmark starts running
 func (s *OssDownloadSuite) TearDownSuite(c *C) {
 	// Delete part
-	lmur, err := s.bucket.ListMultipartUploads()
-	c.Assert(err, IsNil)
-
-	for _, upload := range lmur.Uploads {
-		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
-			Key: upload.Key, UploadID: upload.UploadID}
-		err = s.bucket.AbortMultipartUpload(imur)
+	keyMarker := KeyMarker("")
+	uploadIDMarker := UploadIDMarker("")
+	for {
+		lmur, err := s.bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 		c.Assert(err, IsNil)
+		for _, upload := range lmur.Uploads {
+			var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+				Key: upload.Key, UploadID: upload.UploadID}
+			err = s.bucket.AbortMultipartUpload(imur)
+			c.Assert(err, IsNil)
+		}
+		keyMarker = KeyMarker(lmur.NextKeyMarker)
+		uploadIDMarker = UploadIDMarker(lmur.NextUploadIDMarker)
+		if !lmur.IsTruncated {
+			break
+		}
 	}
 
 	// Delete objects
-	lor, err := s.bucket.ListObjects()
-	c.Assert(err, IsNil)
-
-	for _, object := range lor.Objects {
-		err = s.bucket.DeleteObject(object.Key)
+	marker := Marker("")
+	for {
+		lor, err := s.bucket.ListObjects(marker)
 		c.Assert(err, IsNil)
+		for _, object := range lor.Objects {
+			err = s.bucket.DeleteObject(object.Key)
+			c.Assert(err, IsNil)
+		}
+		marker = Marker(lor.NextMarker)
+		if !lor.IsTruncated {
+			break
+		}
 	}
 
+	// Delete bucket
+	err := s.client.DeleteBucket(s.bucket.BucketName)
+	c.Assert(err, IsNil)
+
 	testLogger.Println("test download completed")
 }
 
@@ -74,9 +91,9 @@ func (s *OssDownloadSuite) TearDownTest(c *C) {
 
 // TestDownloadRoutineWithoutRecovery multipart downloads without checkpoint
 func (s *OssDownloadSuite) TestDownloadRoutineWithoutRecovery(c *C) {
-	objectName := objectNamePrefix + "tdrwr"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "down-new-file.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
@@ -136,9 +153,9 @@ func DownErrorHooker(part downloadPart) error {
 
 // TestDownloadRoutineWithRecovery multi-routine resumable download
 func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
-	objectName := objectNamePrefix + "tdrtr"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "down-new-file-2.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
@@ -250,9 +267,9 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 
 // TestDownloadOption options
 func (s *OssDownloadSuite) TestDownloadOption(c *C) {
-	objectName := objectNamePrefix + "tdmo"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "down-new-file-3.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Upload the file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
@@ -290,9 +307,9 @@ func (s *OssDownloadSuite) TestDownloadOption(c *C) {
 
 // TestDownloadObjectChange tests the file is updated during the upload
 func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
-	objectName := objectNamePrefix + "tdloc"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "down-new-file-4.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
@@ -318,9 +335,9 @@ func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
 
 // TestDownloadNegative tests downloading negative
 func (s *OssDownloadSuite) TestDownloadNegative(c *C) {
-	objectName := objectNamePrefix + "tdn"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "down-new-file-3.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
@@ -370,10 +387,10 @@ func (s *OssDownloadSuite) TestDownloadNegative(c *C) {
 
 // TestDownloadWithRange tests concurrent downloading with range specified and checkpoint enabled
 func (s *OssDownloadSuite) TestDownloadWithRange(c *C) {
-	objectName := objectNamePrefix + "tdwr"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "down-new-file-tdwr.jpg"
-	newFileGet := "down-new-file-tdwr-2.jpg"
+	newFile := randStr(8) + ".jpg"
+	newFileGet := randStr(8) + "-.jpg"
 
 	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
@@ -464,10 +481,10 @@ func (s *OssDownloadSuite) TestDownloadWithRange(c *C) {
 
 // TestDownloadWithCheckoutAndRange tests concurrent downloading with range specified and checkpoint enabled
 func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
-	objectName := objectNamePrefix + "tdwcr"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "down-new-file-tdwcr.jpg"
-	newFileGet := "down-new-file-tdwcr-2.jpg"
+	newFile := randStr(8) + ".jpg"
+	newFileGet := randStr(8) + "-get.jpg"
 
 	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, fileName+".cp"))

+ 6 - 7
oss/limit_reader_1_7.go

@@ -15,13 +15,13 @@ const (
 	perTokenBandwidthSize int = 1024
 )
 
-// OssLimiter: wrapper rate.Limiter
+// OssLimiter wrapper rate.Limiter
 type OssLimiter struct {
 	limiter *rate.Limiter
 }
 
-// GetOssLimiter:create OssLimiter
-// uploadSpeed:KB/s
+// GetOssLimiter create OssLimiter
+// uploadSpeed KB/s
 func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
 	limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed)
 
@@ -33,7 +33,7 @@ func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) {
 	}, nil
 }
 
-// LimitSpeedReader: for limit bandwidth upload
+// LimitSpeedReader for limit bandwidth upload
 type LimitSpeedReader struct {
 	io.ReadCloser
 	reader     io.Reader
@@ -73,10 +73,9 @@ func (r *LimitSpeedReader) Read(p []byte) (n int, err error) {
 			err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d",
 				start, end, burst, perTokenBandwidthSize)
 			return
-		} else {
-			timeDelay := re.Delay()
-			time.Sleep(timeDelay)
 		}
+		timeDelay := re.Delay()
+		time.Sleep(timeDelay)
 	}
 	return
 }

+ 1 - 0
oss/mime.go

@@ -232,6 +232,7 @@ var extToMimeType = map[string]string{
 	".xsl":     "application/xml",
 	".xslt":    "application/xslt+xml",
 	".xul":     "application/vnd.mozilla.xul+xml",
+	".webp":    "image/webp",
 }
 
 // TypeByExtension returns the MIME type associated with the file extension ext.

+ 1 - 0
oss/model.go

@@ -19,6 +19,7 @@ func (r *Response) Read(p []byte) (n int, err error) {
 	return r.Body.Read(p)
 }
 
+// Close close http reponse body
 func (r *Response) Close() error {
 	return r.Body.Close()
 }

+ 43 - 26
oss/multicopy_test.go

@@ -22,7 +22,6 @@ func (s *OssCopySuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	time.Sleep(5 * time.Second)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
@@ -34,25 +33,43 @@ func (s *OssCopySuite) SetUpSuite(c *C) {
 // TearDownSuite runs before each test or benchmark starts running
 func (s *OssCopySuite) TearDownSuite(c *C) {
 	// Delete Part
-	lmur, err := s.bucket.ListMultipartUploads()
-	c.Assert(err, IsNil)
-
-	for _, upload := range lmur.Uploads {
-		var imur = InitiateMultipartUploadResult{Bucket: bucketName,
-			Key: upload.Key, UploadID: upload.UploadID}
-		err = s.bucket.AbortMultipartUpload(imur)
+	keyMarker := KeyMarker("")
+	uploadIDMarker := UploadIDMarker("")
+	for {
+		lmur, err := s.bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 		c.Assert(err, IsNil)
+		for _, upload := range lmur.Uploads {
+			var imur = InitiateMultipartUploadResult{Bucket: bucketName,
+				Key: upload.Key, UploadID: upload.UploadID}
+			err = s.bucket.AbortMultipartUpload(imur)
+			c.Assert(err, IsNil)
+		}
+		keyMarker = KeyMarker(lmur.NextKeyMarker)
+		uploadIDMarker = UploadIDMarker(lmur.NextUploadIDMarker)
+		if !lmur.IsTruncated {
+			break
+		}
 	}
 
 	// Delete objects
-	lor, err := s.bucket.ListObjects()
-	c.Assert(err, IsNil)
-
-	for _, object := range lor.Objects {
-		err = s.bucket.DeleteObject(object.Key)
+	marker := Marker("")
+	for {
+		lor, err := s.bucket.ListObjects(marker)
 		c.Assert(err, IsNil)
+		for _, object := range lor.Objects {
+			err = s.bucket.DeleteObject(object.Key)
+			c.Assert(err, IsNil)
+		}
+		marker = Marker(lor.NextMarker)
+		if !lor.IsTruncated {
+			break
+		}
 	}
 
+	// Delete bucket
+	err := s.client.DeleteBucket(s.bucket.BucketName)
+	c.Assert(err, IsNil)
+
 	testLogger.Println("test copy completed")
 }
 
@@ -70,8 +87,8 @@ func (s *OssCopySuite) TearDownTest(c *C) {
 
 // TestCopyRoutineWithoutRecovery is multi-routine copy without resumable recovery
 func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
-	srcObjectName := objectNamePrefix + "tcrwr"
-	destObjectName := srcObjectName + "-copy"
+	srcObjectName := objectNamePrefix + randStr(8)
+	destObjectName := srcObjectName + "-dest"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "copy-new-file.jpg"
 
@@ -203,8 +220,8 @@ func CopyErrorHooker(part copyPart) error {
 
 // TestCopyRoutineWithoutRecoveryNegative is a multiple routines copy without checkpoint
 func (s *OssCopySuite) TestCopyRoutineWithoutRecoveryNegative(c *C) {
-	srcObjectName := objectNamePrefix + "tcrwrn"
-	destObjectName := srcObjectName + "-copy"
+	srcObjectName := objectNamePrefix + randStr(8)
+	destObjectName := srcObjectName + "-dest"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
 	// Upload source file
@@ -240,10 +257,10 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecoveryNegative(c *C) {
 
 // TestCopyRoutineWithRecovery is a multiple routines copy with resumable recovery
 func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
-	srcObjectName := objectNamePrefix + "tcrtr"
-	destObjectName := srcObjectName + "-copy"
+	srcObjectName := objectNamePrefix + randStr(8)
+	destObjectName := srcObjectName + "-dest"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "copy-new-file.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Upload source file
 	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
@@ -406,8 +423,8 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 
 // TestCopyRoutineWithRecoveryNegative is a multiple routineed copy without checkpoint
 func (s *OssCopySuite) TestCopyRoutineWithRecoveryNegative(c *C) {
-	srcObjectName := objectNamePrefix + "tcrwrn"
-	destObjectName := srcObjectName + "-copy"
+	srcObjectName := objectNamePrefix + randStr(8)
+	destObjectName := srcObjectName + "-dest"
 
 	// Source bucket does not exist
 	err := s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, destObjectName+".cp"))
@@ -428,11 +445,11 @@ func (s *OssCopySuite) TestCopyRoutineWithRecoveryNegative(c *C) {
 
 // TestCopyFileCrossBucket is a cross bucket's direct copy.
 func (s *OssCopySuite) TestCopyFileCrossBucket(c *C) {
-	destBucketName := bucketName + "-cfcb-desc"
-	srcObjectName := objectNamePrefix + "tcrtr"
-	destObjectName := srcObjectName + "-copy"
+	destBucketName := bucketName + "-desc"
+	srcObjectName := objectNamePrefix + randStr(8)
+	destObjectName := srcObjectName + "-dest"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "copy-new-file.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	destBucket, err := s.client.Bucket(destBucketName)
 	c.Assert(err, IsNil)

+ 112 - 75
oss/multipart_test.go

@@ -7,7 +7,6 @@ import (
 	"net/http"
 	"os"
 	"strconv"
-	"time"
 
 	. "gopkg.in/check.v1"
 )
@@ -26,30 +25,43 @@ func (s *OssBucketMultipartSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	time.Sleep(5 * time.Second)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
 	s.bucket = bucket
 
 	// Delete part
-	lmur, err := s.bucket.ListMultipartUploads()
-	c.Assert(err, IsNil)
-
-	for _, upload := range lmur.Uploads {
-		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
-			Key: upload.Key, UploadID: upload.UploadID}
-		err = s.bucket.AbortMultipartUpload(imur)
+	keyMarker := KeyMarker("")
+	uploadIDMarker := UploadIDMarker("")
+	for {
+		lmur, err := s.bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 		c.Assert(err, IsNil)
+		for _, upload := range lmur.Uploads {
+			var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+				Key: upload.Key, UploadID: upload.UploadID}
+			err = s.bucket.AbortMultipartUpload(imur)
+			c.Assert(err, IsNil)
+		}
+		keyMarker = KeyMarker(lmur.NextKeyMarker)
+		uploadIDMarker = UploadIDMarker(lmur.NextUploadIDMarker)
+		if !lmur.IsTruncated {
+			break
+		}
 	}
 
 	// Delete objects
-	lor, err := s.bucket.ListObjects()
-	c.Assert(err, IsNil)
-
-	for _, object := range lor.Objects {
-		err = s.bucket.DeleteObject(object.Key)
+	marker := Marker("")
+	for {
+		lor, err := s.bucket.ListObjects(marker)
 		c.Assert(err, IsNil)
+		for _, object := range lor.Objects {
+			err = s.bucket.DeleteObject(object.Key)
+			c.Assert(err, IsNil)
+		}
+		marker = Marker(lor.NextMarker)
+		if !lor.IsTruncated {
+			break
+		}
 	}
 
 	testLogger.Println("test multipart started")
@@ -58,25 +70,43 @@ func (s *OssBucketMultipartSuite) SetUpSuite(c *C) {
 // TearDownSuite runs before each test or benchmark starts running
 func (s *OssBucketMultipartSuite) TearDownSuite(c *C) {
 	// Delete part
-	lmur, err := s.bucket.ListMultipartUploads()
-	c.Assert(err, IsNil)
-
-	for _, upload := range lmur.Uploads {
-		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
-			Key: upload.Key, UploadID: upload.UploadID}
-		err = s.bucket.AbortMultipartUpload(imur)
+	keyMarker := KeyMarker("")
+	uploadIDMarker := UploadIDMarker("")
+	for {
+		lmur, err := s.bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 		c.Assert(err, IsNil)
+		for _, upload := range lmur.Uploads {
+			var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+				Key: upload.Key, UploadID: upload.UploadID}
+			err = s.bucket.AbortMultipartUpload(imur)
+			c.Assert(err, IsNil)
+		}
+		keyMarker = KeyMarker(lmur.NextKeyMarker)
+		uploadIDMarker = UploadIDMarker(lmur.NextUploadIDMarker)
+		if !lmur.IsTruncated {
+			break
+		}
 	}
 
 	// Delete objects
-	lor, err := s.bucket.ListObjects()
-	c.Assert(err, IsNil)
-
-	for _, object := range lor.Objects {
-		err = s.bucket.DeleteObject(object.Key)
+	marker := Marker("")
+	for {
+		lor, err := s.bucket.ListObjects(marker)
 		c.Assert(err, IsNil)
+		for _, object := range lor.Objects {
+			err = s.bucket.DeleteObject(object.Key)
+			c.Assert(err, IsNil)
+		}
+		marker = Marker(lor.NextMarker)
+		if !lor.IsTruncated {
+			break
+		}
 	}
 
+	// Delete bucket
+	err := s.client.DeleteBucket(s.bucket.BucketName)
+	c.Assert(err, IsNil)
+
 	testLogger.Println("test multipart completed")
 }
 
@@ -103,7 +133,7 @@ func (s *OssBucketMultipartSuite) TearDownTest(c *C) {
 
 // TestMultipartUpload
 func (s *OssBucketMultipartSuite) TestMultipartUpload(c *C) {
-	objectName := objectNamePrefix + "tmu"
+	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
@@ -148,7 +178,7 @@ func (s *OssBucketMultipartSuite) TestMultipartUpload(c *C) {
 
 // TestMultipartUploadFromFile
 func (s *OssBucketMultipartSuite) TestMultipartUploadFromFile(c *C) {
-	objectName := objectNamePrefix + "tmuff"
+	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
@@ -187,8 +217,8 @@ func (s *OssBucketMultipartSuite) TestMultipartUploadFromFile(c *C) {
 
 // TestUploadPartCopy
 func (s *OssBucketMultipartSuite) TestUploadPartCopy(c *C) {
-	objectSrc := objectNamePrefix + "tupc" + "src"
-	objectDesc := objectNamePrefix + "tupc" + "desc"
+	objectSrc := objectNamePrefix + randStr(8) + "-src"
+	objectDest := objectNamePrefix + randStr(8) + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
@@ -201,7 +231,7 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopy(c *C) {
 	options := []Option{
 		Expires(futureDate), Meta("my", "myprop"),
 	}
-	imur, err := s.bucket.InitiateMultipartUpload(objectDesc, options...)
+	imur, err := s.bucket.InitiateMultipartUpload(objectDest, options...)
 	c.Assert(err, IsNil)
 	var parts []UploadPart
 	for _, chunk := range chunks {
@@ -214,26 +244,26 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopy(c *C) {
 	c.Assert(err, IsNil)
 	testLogger.Println("cmur:", cmur)
 
-	meta, err := s.bucket.GetObjectDetailedMeta(objectDesc)
+	meta, err := s.bucket.GetObjectDetailedMeta(objectDest)
 	c.Assert(err, IsNil)
 	testLogger.Println("GetObjectDetailedMeta:", meta)
 	c.Assert(meta.Get("X-Oss-Meta-My"), Equals, "myprop")
 	c.Assert(meta.Get("Expires"), Equals, futureDate.Format(http.TimeFormat))
 	c.Assert(meta.Get("X-Oss-Object-Type"), Equals, "Multipart")
 
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic2.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic2.jpg")
 	c.Assert(err, IsNil)
 
 	err = s.bucket.DeleteObject(objectSrc)
 	c.Assert(err, IsNil)
-	err = s.bucket.DeleteObject(objectDesc)
+	err = s.bucket.DeleteObject(objectDest)
 	c.Assert(err, IsNil)
 }
 
 func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
-	objectName := objectNamePrefix + "tlup"
-	objectSrc := objectNamePrefix + "tlup" + "src"
-	objectDesc := objectNamePrefix + "tlup" + "desc"
+	objectName := objectNamePrefix + randStr(8)
+	objectSrc := objectName + "-src"
+	objectDest := objectName + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartSize(fileName, 100*1024)
@@ -253,7 +283,7 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 	}
 
 	// Copy
-	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDest)
 	var partsCopy []UploadPart
 	for _, chunk := range chunks {
 		part, err := s.bucket.UploadPartCopy(imurCopy, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
@@ -283,23 +313,23 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 	c.Assert(err, IsNil)
 
 	// Download
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic3.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic3.jpg")
 	c.Assert(err, IsNil)
 	err = s.bucket.GetObjectToFile(objectName, "newpic4.jpg")
 	c.Assert(err, IsNil)
 
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
-	err = s.bucket.DeleteObject(objectDesc)
+	err = s.bucket.DeleteObject(objectDest)
 	c.Assert(err, IsNil)
 	err = s.bucket.DeleteObject(objectSrc)
 	c.Assert(err, IsNil)
 }
 
 func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
-	objectName := objectNamePrefix + "tamu"
-	objectSrc := objectNamePrefix + "tamu" + "src"
-	objectDesc := objectNamePrefix + "tamu" + "desc"
+	objectName := objectNamePrefix + randStr(8)
+	objectSrc := objectName + "-src"
+	objectDest := objectName + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartSize(fileName, 100*1024)
@@ -319,7 +349,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 	}
 
 	// Copy
-	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDest)
 	var partsCopy []UploadPart
 	for _, chunk := range chunks {
 		part, err := s.bucket.UploadPartCopy(imurCopy, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
@@ -355,7 +385,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 	c.Assert(len(lmur.Uploads), Equals, 0)
 
 	// Download
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic3.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic3.jpg")
 	c.Assert(err, NotNil)
 	err = s.bucket.GetObjectToFile(objectName, "newpic4.jpg")
 	c.Assert(err, NotNil)
@@ -363,8 +393,8 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 
 // TestUploadPartCopyWithConstraints
 func (s *OssBucketMultipartSuite) TestUploadPartCopyWithConstraints(c *C) {
-	objectSrc := objectNamePrefix + "tucwc" + "src"
-	objectDesc := objectNamePrefix + "tucwc" + "desc"
+	objectSrc := objectNamePrefix + randStr(8) + "-src"
+	objectDest := objectNamePrefix + randStr(8) + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
@@ -374,7 +404,7 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyWithConstraints(c *C) {
 	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
 	c.Assert(err, IsNil)
 
-	imur, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	imur, err := s.bucket.InitiateMultipartUpload(objectDest)
 	var parts []UploadPart
 	for _, chunk := range chunks {
 		_, err = s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number),
@@ -409,18 +439,18 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyWithConstraints(c *C) {
 	c.Assert(err, IsNil)
 	testLogger.Println("cmur:", cmur)
 
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic5.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic5.jpg")
 	c.Assert(err, IsNil)
 
 	err = s.bucket.DeleteObject(objectSrc)
 	c.Assert(err, IsNil)
-	err = s.bucket.DeleteObject(objectDesc)
+	err = s.bucket.DeleteObject(objectDest)
 	c.Assert(err, IsNil)
 }
 
 // TestMultipartUploadFromFileOutofOrder
 func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileOutofOrder(c *C) {
-	objectName := objectNamePrefix + "tmuffoo"
+	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartSize(fileName, 1024*100)
@@ -454,8 +484,8 @@ func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileOutofOrder(c *C) {
 
 // TestUploadPartCopyOutofOrder
 func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
-	objectSrc := objectNamePrefix + "tupcoo" + "src"
-	objectDesc := objectNamePrefix + "tupcoo" + "desc"
+	objectSrc := objectNamePrefix + randStr(8) + "-src"
+	objectDest := objectNamePrefix + randStr(8) + "-dest"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartSize(fileName, 1024*100)
@@ -466,7 +496,7 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
 	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
 	c.Assert(err, IsNil)
 
-	imur, err := s.bucket.InitiateMultipartUpload(objectDesc)
+	imur, err := s.bucket.InitiateMultipartUpload(objectDest)
 	var parts []UploadPart
 	for _, chunk := range chunks {
 		_, err := s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
@@ -483,18 +513,18 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
 	c.Assert(err, IsNil)
 	testLogger.Println("cmur:", cmur)
 
-	err = s.bucket.GetObjectToFile(objectDesc, "newpic7.jpg")
+	err = s.bucket.GetObjectToFile(objectDest, "newpic7.jpg")
 	c.Assert(err, IsNil)
 
 	err = s.bucket.DeleteObject(objectSrc)
 	c.Assert(err, IsNil)
-	err = s.bucket.DeleteObject(objectDesc)
+	err = s.bucket.DeleteObject(objectDest)
 	c.Assert(err, IsNil)
 }
 
 // TestMultipartUploadFromFileType
 func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileType(c *C) {
-	objectName := objectNamePrefix + "tmuffwm" + ".jpg"
+	objectName := objectNamePrefix + randStr(8) + ".jpg"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 4)
@@ -526,7 +556,7 @@ func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileType(c *C) {
 }
 
 func (s *OssBucketMultipartSuite) TestListMultipartUploads(c *C) {
-	objectName := objectNamePrefix + "tlmu"
+	objectName := objectNamePrefix + randStr(8)
 
 	imurs := []InitiateMultipartUploadResult{}
 	for i := 0; i < 20; i++ {
@@ -568,10 +598,17 @@ func (s *OssBucketMultipartSuite) TestListMultipartUploads(c *C) {
 	c.Assert(len(lmpu.Uploads), Equals, 18)
 	c.Assert(len(lmpu.CommonPrefixes), Equals, 2)
 
-	// Upload-id-marker
-	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(objectName+"12"), UploadIDMarker("EEE"))
+	upLoadIDStr := randStr(3)
+	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(objectName+"12"), UploadIDMarker(upLoadIDStr))
 	c.Assert(err, IsNil)
-	c.Assert(len(lmpu.Uploads), Equals, 15)
+	checkNum := 15
+	for _, im := range imurs {
+		if im.Key == objectName+"12" && im.UploadID > upLoadIDStr {
+			checkNum = 16
+			break
+		}
+	}
+	c.Assert(len(lmpu.Uploads), Equals, checkNum)
 	//testLogger.Println("UploadIDMarker", lmpu.Uploads)
 
 	for _, imur := range imurs {
@@ -581,11 +618,11 @@ func (s *OssBucketMultipartSuite) TestListMultipartUploads(c *C) {
 }
 
 func (s *OssBucketMultipartSuite) TestListMultipartUploadsEncodingKey(c *C) {
-	objectName := objectNamePrefix + "让你任性让你狂" + "tlmuek"
+	prefix := objectNamePrefix + "让你任性让你狂" + randStr(8)
 
 	imurs := []InitiateMultipartUploadResult{}
 	for i := 0; i < 3; i++ {
-		imur, err := s.bucket.InitiateMultipartUpload(objectName + strconv.Itoa(i))
+		imur, err := s.bucket.InitiateMultipartUpload(prefix + strconv.Itoa(i))
 		c.Assert(err, IsNil)
 		imurs = append(imurs, imur)
 	}
@@ -594,18 +631,18 @@ func (s *OssBucketMultipartSuite) TestListMultipartUploadsEncodingKey(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(lmpu.Uploads), Equals, 3)
 
-	lmpu, err = s.bucket.ListMultipartUploads(Prefix(objectNamePrefix + "让你任性让你狂tlmuek1"))
+	lmpu, err = s.bucket.ListMultipartUploads(Prefix(prefix + "1"))
 	c.Assert(err, IsNil)
 	c.Assert(len(lmpu.Uploads), Equals, 1)
 
-	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(objectNamePrefix + "让你任性让你狂tlmuek1"))
+	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(prefix + "1"))
 	c.Assert(err, IsNil)
 	c.Assert(len(lmpu.Uploads), Equals, 1)
 
 	lmpu, err = s.bucket.ListMultipartUploads(EncodingType("url"))
 	c.Assert(err, IsNil)
 	for i, upload := range lmpu.Uploads {
-		c.Assert(upload.Key, Equals, objectNamePrefix+"让你任性让你狂tlmuek"+strconv.Itoa(i))
+		c.Assert(upload.Key, Equals, prefix+strconv.Itoa(i))
 	}
 
 	for _, imur := range imurs {
@@ -615,7 +652,7 @@ func (s *OssBucketMultipartSuite) TestListMultipartUploadsEncodingKey(c *C) {
 }
 
 func (s *OssBucketMultipartSuite) TestMultipartNegative(c *C) {
-	objectName := objectNamePrefix + "tmn"
+	objectName := objectNamePrefix + randStr(8)
 
 	// Key tool long
 	data := make([]byte, 100*1024)
@@ -674,7 +711,7 @@ func (s *OssBucketMultipartSuite) TestMultipartNegative(c *C) {
 }
 
 func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileBigFile(c *C) {
-	objectName := objectNamePrefix + "tmuffbf"
+	objectName := objectNamePrefix + randStr(8)
 	bigFile := "D:\\tmp\\bigfile.zip"
 	newFile := "D:\\tmp\\newbigfile.zip"
 
@@ -722,9 +759,9 @@ func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileBigFile(c *C) {
 
 // TestUploadFile
 func (s *OssBucketMultipartSuite) TestUploadFile(c *C) {
-	objectName := objectNamePrefix + "tuff"
+	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "newfiletuff.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Upload with 100K part size
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
@@ -815,7 +852,7 @@ func (s *OssBucketMultipartSuite) TestUploadFile(c *C) {
 }
 
 func (s *OssBucketMultipartSuite) TestUploadFileNegative(c *C) {
-	objectName := objectNamePrefix + "tufn"
+	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	// Smaller than the required minimal part size (100KB)
@@ -837,9 +874,9 @@ func (s *OssBucketMultipartSuite) TestUploadFileNegative(c *C) {
 
 // TestDownloadFile
 func (s *OssBucketMultipartSuite) TestDownloadFile(c *C) {
-	objectName := objectNamePrefix + "tdff"
+	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "newfiletdff.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
 	c.Assert(err, IsNil)
@@ -919,8 +956,8 @@ func (s *OssBucketMultipartSuite) TestDownloadFile(c *C) {
 }
 
 func (s *OssBucketMultipartSuite) TestDownloadFileNegative(c *C) {
-	objectName := objectNamePrefix + "tufn"
-	newFile := "newfiletudff.jpg"
+	objectName := objectNamePrefix + randStr(8)
+	newFile := randStr(8) + ".jpg"
 
 	// Smaller than the required minimal part size (100KB)
 	err := s.bucket.DownloadFile(objectName, newFile, 100*1024-1)

+ 40 - 24
oss/progress_test.go

@@ -8,7 +8,6 @@ import (
 	"math/rand"
 	"os"
 	"strings"
-	"time"
 
 	. "gopkg.in/check.v1"
 )
@@ -27,7 +26,6 @@ func (s *OssProgressSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	time.Sleep(5 * time.Second)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
@@ -39,24 +37,42 @@ func (s *OssProgressSuite) SetUpSuite(c *C) {
 // TearDownSuite runs before each test or benchmark starts running
 func (s *OssProgressSuite) TearDownSuite(c *C) {
 	// Abort multipart uploads
-	lmu, err := s.bucket.ListMultipartUploads()
-	c.Assert(err, IsNil)
-
-	for _, upload := range lmu.Uploads {
-		imur := InitiateMultipartUploadResult{Bucket: bucketName, Key: upload.Key, UploadID: upload.UploadID}
-		err = s.bucket.AbortMultipartUpload(imur)
+	keyMarker := KeyMarker("")
+	uploadIDMarker := UploadIDMarker("")
+	for {
+		lmu, err := s.bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 		c.Assert(err, IsNil)
+		for _, upload := range lmu.Uploads {
+			imur := InitiateMultipartUploadResult{Bucket: bucketName, Key: upload.Key, UploadID: upload.UploadID}
+			err = s.bucket.AbortMultipartUpload(imur)
+			c.Assert(err, IsNil)
+		}
+		keyMarker = KeyMarker(lmu.NextKeyMarker)
+		uploadIDMarker = UploadIDMarker(lmu.NextUploadIDMarker)
+		if !lmu.IsTruncated {
+			break
+		}
 	}
 
 	// Delete objects
-	lor, err := s.bucket.ListObjects()
-	c.Assert(err, IsNil)
-
-	for _, object := range lor.Objects {
-		err = s.bucket.DeleteObject(object.Key)
+	marker := Marker("")
+	for {
+		lor, err := s.bucket.ListObjects(marker)
 		c.Assert(err, IsNil)
+		for _, object := range lor.Objects {
+			err = s.bucket.DeleteObject(object.Key)
+			c.Assert(err, IsNil)
+		}
+		marker = Marker(lor.NextMarker)
+		if !lor.IsTruncated {
+			break
+		}
 	}
 
+	// Delete bucket
+	err := s.client.DeleteBucket(s.bucket.BucketName)
+	c.Assert(err, IsNil)
+
 	testLogger.Println("test progress completed")
 }
 
@@ -109,7 +125,7 @@ func (listener *OssProgressListener) ProgressChanged(event *ProgressEvent) {
 
 // TestPutObject
 func (s *OssProgressSuite) TestPutObject(c *C) {
-	objectName := objectNamePrefix + "tpo.html"
+	objectName := randStr(8) + ".jpg"
 	localFile := "../sample/The Go Programming Language.html"
 
 	// PutObject
@@ -147,7 +163,7 @@ func (s *OssProgressSuite) TestPutObject(c *C) {
 
 // TestSignURL
 func (s *OssProgressSuite) TestSignURL(c *C) {
-	objectName := objectNamePrefix + randStr(5)
+	objectName := objectNamePrefix + randStr(8)
 	filePath := randLowStr(10)
 	content := randStr(20)
 	createFile(filePath, content, c)
@@ -215,7 +231,7 @@ func (s *OssProgressSuite) TestSignURL(c *C) {
 }
 
 func (s *OssProgressSuite) TestPutObjectNegative(c *C) {
-	objectName := objectNamePrefix + "tpon.html"
+	objectName := objectNamePrefix + randStr(8)
 	localFile := "../sample/The Go Programming Language.html"
 
 	// Invalid endpoint
@@ -234,7 +250,7 @@ func (s *OssProgressSuite) TestPutObjectNegative(c *C) {
 
 // TestAppendObject
 func (s *OssProgressSuite) TestAppendObject(c *C) {
-	objectName := objectNamePrefix + "tao"
+	objectName := objectNamePrefix + randStr(8)
 	objectValue := "昨夜雨疏风骤,浓睡不消残酒。试问卷帘人,却道海棠依旧。知否?知否?应是绿肥红瘦。"
 	var val = []byte(objectValue)
 	var nextPos int64
@@ -259,7 +275,7 @@ func (s *OssProgressSuite) TestAppendObject(c *C) {
 
 // TestMultipartUpload
 func (s *OssProgressSuite) TestMultipartUpload(c *C) {
-	objectName := objectNamePrefix + "tmu.jpg"
+	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
@@ -295,7 +311,7 @@ func (s *OssProgressSuite) TestMultipartUpload(c *C) {
 
 // TestMultipartUploadFromFile
 func (s *OssProgressSuite) TestMultipartUploadFromFile(c *C) {
-	objectName := objectNamePrefix + "tmuff.jpg"
+	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
@@ -325,7 +341,7 @@ func (s *OssProgressSuite) TestMultipartUploadFromFile(c *C) {
 
 // TestGetObject
 func (s *OssProgressSuite) TestGetObject(c *C) {
-	objectName := objectNamePrefix + "tgo.jpg"
+	objectName := objectNamePrefix + randStr(8)
 	localFile := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "newpic-progress-1.jpg"
 
@@ -376,7 +392,7 @@ func (s *OssProgressSuite) TestGetObject(c *C) {
 
 // TestGetObjectNegative
 func (s *OssProgressSuite) TestGetObjectNegative(c *C) {
-	objectName := objectNamePrefix + "tgon.jpg"
+	objectName := objectNamePrefix + randStr(8)
 	localFile := "../sample/BingWallpaper-2015-11-07.jpg"
 
 	// PutObject
@@ -406,7 +422,7 @@ func (s *OssProgressSuite) TestGetObjectNegative(c *C) {
 
 // TestUploadFile
 func (s *OssProgressSuite) TestUploadFile(c *C) {
-	objectName := objectNamePrefix + "tuf.jpg"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(5), Progress(&OssProgressListener{}))
@@ -420,7 +436,7 @@ func (s *OssProgressSuite) TestUploadFile(c *C) {
 
 // TestDownloadFile
 func (s *OssProgressSuite) TestDownloadFile(c *C) {
-	objectName := objectNamePrefix + "tdf.jpg"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-progress-2.jpg"
 
@@ -442,7 +458,7 @@ func (s *OssProgressSuite) TestDownloadFile(c *C) {
 
 // TestCopyFile
 func (s *OssProgressSuite) TestCopyFile(c *C) {
-	srcObjectName := objectNamePrefix + "tcf.jpg"
+	srcObjectName := objectNamePrefix + randStr(8)
 	destObjectName := srcObjectName + "-copy"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 

+ 75 - 46
oss/type.go

@@ -2,6 +2,7 @@ package oss
 
 import (
 	"encoding/xml"
+	"fmt"
 	"net/url"
 	"time"
 )
@@ -42,77 +43,105 @@ type LifecycleConfiguration struct {
 
 // LifecycleRule defines Lifecycle rules
 type LifecycleRule struct {
-	XMLName    xml.Name            `xml:"Rule"`
-	ID         string              `xml:"ID"`         // The rule ID
-	Prefix     string              `xml:"Prefix"`     // The object key prefix
-	Status     string              `xml:"Status"`     // The rule status (enabled or not)
-	Expiration LifecycleExpiration `xml:"Expiration"` // The expiration property
+	XMLName              xml.Name                       `xml:"Rule"`
+	ID                   string                         `xml:"ID,omitempty"`                   // The rule ID
+	Prefix               string                         `xml:"Prefix"`                         // The object key prefix
+	Status               string                         `xml:"Status"`                         // The rule status (enabled or not)
+	Expiration           *LifecycleExpiration           `xml:"Expiration,omitempty"`           // The expiration property
+	Transitions          []LifecycleTransition          `xml:"Transition,omitempty"`           // The transition property
+	AbortMultipartUpload *LifecycleAbortMultipartUpload `xml:"AbortMultipartUpload,omitempty"` // The AbortMultipartUpload property
 }
 
 // LifecycleExpiration defines the rule's expiration property
 type LifecycleExpiration struct {
-	XMLName xml.Name  `xml:"Expiration"`
-	Days    int       `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time
-	Date    time.Time `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date.
+	XMLName           xml.Name `xml:"Expiration"`
+	Days              int      `xml:"Days,omitempty"`              // Relative expiration time: The expiration time in days after the last modified time
+	Date              string   `xml:"Date,omitempty"`              // Absolute expiration time: The expiration time in date, not recommended
+	CreatedBeforeDate string   `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired
 }
 
-type lifecycleXML struct {
-	XMLName xml.Name        `xml:"LifecycleConfiguration"`
-	Rules   []lifecycleRule `xml:"Rule"`
-}
-
-type lifecycleRule struct {
-	XMLName    xml.Name            `xml:"Rule"`
-	ID         string              `xml:"ID"`
-	Prefix     string              `xml:"Prefix"`
-	Status     string              `xml:"Status"`
-	Expiration lifecycleExpiration `xml:"Expiration"`
+// LifecycleTransition defines the rule's transition propery
+type LifecycleTransition struct {
+	XMLName           xml.Name         `xml:"Transition"`
+	Days              int              `xml:"Days,omitempty"`              // Relative transition time: The transition time in days after the last modified time
+	CreatedBeforeDate string           `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired
+	StorageClass      StorageClassType `xml:"StorageClass,omitempty"`      // Specifies the target storage type
 }
 
-type lifecycleExpiration struct {
-	XMLName xml.Name `xml:"Expiration"`
-	Days    int      `xml:"Days,omitempty"`
-	Date    string   `xml:"Date,omitempty"`
+// LifecycleAbortMultipartUpload defines the rule's abort multipart upload propery
+type LifecycleAbortMultipartUpload struct {
+	XMLName           xml.Name `xml:"AbortMultipartUpload"`
+	Days              int      `xml:"Days,omitempty"`              // Relative expiration time: The expiration time in days after the last modified time
+	CreatedBeforeDate string   `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired
 }
 
-const expirationDateFormat = "2006-01-02T15:04:05.000Z"
+const iso8601DateFormat = "2006-01-02T15:04:05.000Z"
 
-func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
-	rs := []lifecycleRule{}
-	for _, rule := range rules {
-		r := lifecycleRule{}
-		r.ID = rule.ID
-		r.Prefix = rule.Prefix
-		r.Status = rule.Status
-		if rule.Expiration.Date.IsZero() {
-			r.Expiration.Days = rule.Expiration.Days
-		} else {
-			r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat)
-		}
-		rs = append(rs, r)
-	}
-	return rs
-}
-
-// BuildLifecycleRuleByDays builds a lifecycle rule with specified expiration days
+// BuildLifecycleRuleByDays builds a lifecycle rule objects will expiration in days after the last modified time
 func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
 	var statusStr = "Enabled"
 	if !status {
 		statusStr = "Disabled"
 	}
 	return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
-		Expiration: LifecycleExpiration{Days: days}}
+		Expiration: &LifecycleExpiration{Days: days}}
 }
 
-// BuildLifecycleRuleByDate builds a lifecycle rule with specified expiration time.
+// BuildLifecycleRuleByDate builds a lifecycle rule objects will expiration in specified date
 func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
 	var statusStr = "Enabled"
 	if !status {
 		statusStr = "Disabled"
 	}
-	date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
+	date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC).Format(iso8601DateFormat)
 	return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr,
-		Expiration: LifecycleExpiration{Date: date}}
+		Expiration: &LifecycleExpiration{Date: date}}
+}
+
+// ValidateLifecycleRule Determine if a lifecycle rule is valid, if it is invalid, it will return an error.
+func verifyLifecycleRules(rules []LifecycleRule) error {
+	if len(rules) == 0 {
+		return fmt.Errorf("invalid rules, the length of rules is zero")
+	}
+	for _, rule := range rules {
+		if rule.Status != "Enabled" && rule.Status != "Disabled" {
+			return fmt.Errorf("invalid rule, the value of status must be Enabled or Disabled")
+		}
+
+		expiration := rule.Expiration
+		if expiration != nil {
+			if (expiration.Days != 0 && expiration.CreatedBeforeDate != "") || (expiration.Days != 0 && expiration.Date != "") || (expiration.CreatedBeforeDate != "" && expiration.Date != "") || (expiration.Days == 0 && expiration.CreatedBeforeDate == "" && expiration.Date == "") {
+				return fmt.Errorf("invalid expiration lifecycle, must be set one of CreatedBeforeDate, Days and Date")
+			}
+		}
+
+		abortMPU := rule.AbortMultipartUpload
+		if abortMPU != nil {
+			if (abortMPU.Days != 0 && abortMPU.CreatedBeforeDate != "") || (abortMPU.Days == 0 && abortMPU.CreatedBeforeDate == "") {
+				return fmt.Errorf("invalid abort multipart upload lifecycle, must be set one of CreatedBeforeDate and Days")
+			}
+		}
+
+		transitions := rule.Transitions
+		if len(transitions) > 0 {
+			if len(transitions) > 2 {
+				return fmt.Errorf("invalid count of transition lifecycles, the count must than less than 3")
+			}
+
+			for _, transition := range transitions {
+				if (transition.Days != 0 && transition.CreatedBeforeDate != "") || (transition.Days == 0 && transition.CreatedBeforeDate == "") {
+					return fmt.Errorf("invalid transition lifecycle, must be set one of CreatedBeforeDate and Days")
+				}
+				if transition.StorageClass != StorageIA && transition.StorageClass != StorageArchive {
+					return fmt.Errorf("invalid transition lifecylce, the value of storage class must be IA or Archive")
+				}
+			}
+		} else if expiration == nil && abortMPU == nil {
+			return fmt.Errorf("invalid rule, must set one of Expiration, AbortMultipartUplaod and Transitions")
+		}
+	}
+
+	return nil
 }
 
 // GetBucketLifecycleResult defines GetBucketLifecycle's result object

+ 283 - 19
oss/type_test.go

@@ -18,25 +18,6 @@ var (
 	chnURLStr = url.QueryEscape(chnStr)
 )
 
-func (s *OssTypeSuite) TestConvLifecycleRule(c *C) {
-	r1 := BuildLifecycleRuleByDate("id1", "one", true, 2015, 11, 11)
-	r2 := BuildLifecycleRuleByDays("id2", "two", false, 3)
-
-	rs := convLifecycleRule([]LifecycleRule{r1})
-	c.Assert(rs[0].ID, Equals, "id1")
-	c.Assert(rs[0].Prefix, Equals, "one")
-	c.Assert(rs[0].Status, Equals, "Enabled")
-	c.Assert(rs[0].Expiration.Date, Equals, "2015-11-11T00:00:00.000Z")
-	c.Assert(rs[0].Expiration.Days, Equals, 0)
-
-	rs = convLifecycleRule([]LifecycleRule{r2})
-	c.Assert(rs[0].ID, Equals, "id2")
-	c.Assert(rs[0].Prefix, Equals, "two")
-	c.Assert(rs[0].Status, Equals, "Disabled")
-	c.Assert(rs[0].Expiration.Date, Equals, "")
-	c.Assert(rs[0].Expiration.Days, Equals, 3)
-}
-
 func (s *OssTypeSuite) TestDecodeDeleteObjectsResult(c *C) {
 	var res DeleteObjectsResult
 	err := decodeDeleteObjectsResult(&res)
@@ -125,3 +106,286 @@ func (s *OssTypeSuite) TestSortUploadPart(c *C) {
 	c.Assert(parts[4].PartNumber, Equals, 5)
 	c.Assert(parts[4].ETag, Equals, "E5")
 }
+
+func (s *OssTypeSuite) TestValidateLifecleRules(c *C) {
+	expiration := LifecycleExpiration{
+		Days:              30,
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+	}
+	rule := LifecycleRule{
+		ID:         "ruleID",
+		Prefix:     "prefix",
+		Status:     "Enabled",
+		Expiration: &expiration,
+	}
+	rules := []LifecycleRule{rule}
+	err := verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	expiration = LifecycleExpiration{
+		Date:              "2015-11-11T00:00:00.000Z",
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+	}
+	rule = LifecycleRule{
+		ID:         "ruleID",
+		Prefix:     "prefix",
+		Status:     "Enabled",
+		Expiration: &expiration,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	expiration = LifecycleExpiration{
+		Days:              0,
+		CreatedBeforeDate: "",
+		Date:              "",
+	}
+	rule = LifecycleRule{
+		ID:         "ruleID",
+		Prefix:     "prefix",
+		Status:     "Enabled",
+		Expiration: &expiration,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	abortMPU := LifecycleAbortMultipartUpload{
+		Days:              30,
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+	}
+	rule = LifecycleRule{
+		ID:                   "ruleID",
+		Prefix:               "prefix",
+		Status:               "Enabled",
+		AbortMultipartUpload: &abortMPU,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	abortMPU = LifecycleAbortMultipartUpload{
+		Days:              0,
+		CreatedBeforeDate: "",
+	}
+	rule = LifecycleRule{
+		ID:                   "ruleID",
+		Prefix:               "prefix",
+		Status:               "Enabled",
+		AbortMultipartUpload: &abortMPU,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	transition := LifecycleTransition{
+		Days:              30,
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+		StorageClass:      StorageIA,
+	}
+	rule = LifecycleRule{
+		ID:          "ruleID",
+		Prefix:      "prefix",
+		Status:      "Enabled",
+		Transitions: []LifecycleTransition{transition},
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	transition = LifecycleTransition{
+		Days:              0,
+		CreatedBeforeDate: "",
+		StorageClass:      StorageIA,
+	}
+	rule = LifecycleRule{
+		ID:          "ruleID",
+		Prefix:      "prefix",
+		Status:      "Enabled",
+		Transitions: []LifecycleTransition{transition},
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	transition = LifecycleTransition{
+		Days:         30,
+		StorageClass: StorageStandard,
+	}
+	rule = LifecycleRule{
+		ID:          "ruleID",
+		Prefix:      "prefix",
+		Status:      "Enabled",
+		Transitions: []LifecycleTransition{transition},
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	transition = LifecycleTransition{
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+		StorageClass:      StorageStandard,
+	}
+	rule = LifecycleRule{
+		ID:          "ruleID",
+		Prefix:      "prefix",
+		Status:      "Enabled",
+		Transitions: []LifecycleTransition{transition},
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	transition1 := LifecycleTransition{
+		Days:         30,
+		StorageClass: StorageIA,
+	}
+	transition2 := LifecycleTransition{
+		Days:         60,
+		StorageClass: StorageArchive,
+	}
+	transition3 := LifecycleTransition{
+		Days:         100,
+		StorageClass: StorageArchive,
+	}
+	rule = LifecycleRule{
+		ID:          "ruleID",
+		Prefix:      "prefix",
+		Status:      "Enabled",
+		Transitions: []LifecycleTransition{transition1, transition2, transition3},
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	rule = LifecycleRule{
+		ID:     "ruleID",
+		Prefix: "prefix",
+		Status: "Enabled",
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, NotNil)
+
+	rules = []LifecycleRule{}
+	err1 := verifyLifecycleRules(rules)
+	c.Assert(err1, NotNil)
+
+	expiration = LifecycleExpiration{
+		Days: 30,
+	}
+	rule = LifecycleRule{
+		ID:         "ruleID",
+		Prefix:     "prefix",
+		Status:     "Enabled",
+		Expiration: &expiration,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, IsNil)
+
+	expiration = LifecycleExpiration{
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+	}
+	rule = LifecycleRule{
+		ID:         "ruleID",
+		Prefix:     "prefix",
+		Status:     "Enabled",
+		Expiration: &expiration,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, IsNil)
+
+	abortMPU = LifecycleAbortMultipartUpload{
+		Days: 30,
+	}
+	rule = LifecycleRule{
+		ID:                   "ruleID",
+		Prefix:               "prefix",
+		Status:               "Enabled",
+		AbortMultipartUpload: &abortMPU,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, IsNil)
+
+	abortMPU = LifecycleAbortMultipartUpload{
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+	}
+	rule = LifecycleRule{
+		ID:                   "ruleID",
+		Prefix:               "prefix",
+		Status:               "Enabled",
+		AbortMultipartUpload: &abortMPU,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, IsNil)
+
+	expiration = LifecycleExpiration{
+		Days: 30,
+	}
+	abortMPU = LifecycleAbortMultipartUpload{
+		Days: 30,
+	}
+	rule = LifecycleRule{
+		ID:                   "ruleID",
+		Prefix:               "prefix",
+		Status:               "Enabled",
+		Expiration:           &expiration,
+		AbortMultipartUpload: &abortMPU,
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, IsNil)
+
+	expiration = LifecycleExpiration{
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+	}
+	abortMPU = LifecycleAbortMultipartUpload{
+		Days: 30,
+	}
+	transition = LifecycleTransition{
+		Days:         30,
+		StorageClass: StorageIA,
+	}
+	rule = LifecycleRule{
+		ID:                   "ruleID",
+		Prefix:               "prefix",
+		Status:               "Enabled",
+		Expiration:           &expiration,
+		AbortMultipartUpload: &abortMPU,
+		Transitions:          []LifecycleTransition{transition},
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, IsNil)
+
+	expiration = LifecycleExpiration{
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+	}
+	abortMPU = LifecycleAbortMultipartUpload{
+		Days: 30,
+	}
+	transition1 = LifecycleTransition{
+		Days:         30,
+		StorageClass: StorageIA,
+	}
+	transition2 = LifecycleTransition{
+		Days:         60,
+		StorageClass: StorageArchive,
+	}
+	rule = LifecycleRule{
+		ID:                   "ruleID",
+		Prefix:               "prefix",
+		Status:               "Enabled",
+		Expiration:           &expiration,
+		AbortMultipartUpload: &abortMPU,
+		Transitions:          []LifecycleTransition{transition1, transition2},
+	}
+	rules = []LifecycleRule{rule}
+	err = verifyLifecycleRules(rules)
+	c.Assert(err, IsNil)
+}

+ 38 - 21
oss/upload_test.go

@@ -23,7 +23,6 @@ func (s *OssUploadSuite) SetUpSuite(c *C) {
 	s.client = client
 
 	s.client.CreateBucket(bucketName)
-	time.Sleep(5 * time.Second)
 
 	bucket, err := s.client.Bucket(bucketName)
 	c.Assert(err, IsNil)
@@ -35,25 +34,43 @@ func (s *OssUploadSuite) SetUpSuite(c *C) {
 // TearDownSuite runs before each test or benchmark starts running
 func (s *OssUploadSuite) TearDownSuite(c *C) {
 	// Delete part
-	lmur, err := s.bucket.ListMultipartUploads()
-	c.Assert(err, IsNil)
-
-	for _, upload := range lmur.Uploads {
-		var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
-			Key: upload.Key, UploadID: upload.UploadID}
-		err = s.bucket.AbortMultipartUpload(imur)
+	keyMarker := KeyMarker("")
+	uploadIDMarker := UploadIDMarker("")
+	for {
+		lmur, err := s.bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 		c.Assert(err, IsNil)
+		for _, upload := range lmur.Uploads {
+			var imur = InitiateMultipartUploadResult{Bucket: s.bucket.BucketName,
+				Key: upload.Key, UploadID: upload.UploadID}
+			err = s.bucket.AbortMultipartUpload(imur)
+			c.Assert(err, IsNil)
+		}
+		keyMarker = KeyMarker(lmur.NextKeyMarker)
+		uploadIDMarker = UploadIDMarker(lmur.NextUploadIDMarker)
+		if !lmur.IsTruncated {
+			break
+		}
 	}
 
 	// Delete objects
-	lor, err := s.bucket.ListObjects()
-	c.Assert(err, IsNil)
-
-	for _, object := range lor.Objects {
-		err = s.bucket.DeleteObject(object.Key)
+	marker := Marker("")
+	for {
+		lor, err := s.bucket.ListObjects(marker)
 		c.Assert(err, IsNil)
+		for _, object := range lor.Objects {
+			err = s.bucket.DeleteObject(object.Key)
+			c.Assert(err, IsNil)
+		}
+		marker = Marker(lor.NextMarker)
+		if !lor.IsTruncated {
+			break
+		}
 	}
 
+	// Delete bucket
+	err := s.client.DeleteBucket(s.bucket.BucketName)
+	c.Assert(err, IsNil)
+
 	testLogger.Println("test upload completed")
 }
 
@@ -71,9 +88,9 @@ func (s *OssUploadSuite) TearDownTest(c *C) {
 
 // TestUploadRoutineWithoutRecovery tests multiroutineed upload without checkpoint
 func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
-	objectName := objectNamePrefix + "turwr"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	newFile := "upload-new-file.jpg"
+	newFile := randStr(8) + ".jpg"
 
 	// Routines is not specified, by default single routine
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
@@ -206,7 +223,7 @@ func ErrorHooker(id int, chunk FileChunk) error {
 
 // TestUploadRoutineWithoutRecoveryNegative is multiroutineed upload without checkpoint
 func (s *OssUploadSuite) TestUploadRoutineWithoutRecoveryNegative(c *C) {
-	objectName := objectNamePrefix + "turwrn"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
 	uploadPartHooker = ErrorHooker
@@ -230,7 +247,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecoveryNegative(c *C) {
 
 // TestUploadRoutineWithRecovery is multi-routine upload with resumable recovery
 func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
-	objectName := objectNamePrefix + "turtr"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "upload-new-file-2.jpg"
 
@@ -379,7 +396,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 
 // TestUploadRoutineWithRecoveryNegative is multiroutineed upload without checkpoint
 func (s *OssUploadSuite) TestUploadRoutineWithRecoveryNegative(c *C) {
-	objectName := objectNamePrefix + "turrn"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
 	// The local file does not exist
@@ -405,10 +422,10 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecoveryNegative(c *C) {
 
 // TestUploadLocalFileChange tests the file is updated while being uploaded
 func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
-	objectName := objectNamePrefix + "tulfc"
+	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
-	localFile := "BingWallpaper-2015-11-07.jpg"
-	newFile := "upload-new-file-3.jpg"
+	localFile := randStr(8) + ".jpg"
+	newFile := randStr(8) + ".jpg"
 
 	os.Remove(localFile)
 	err := copyFile(fileName, localFile)

+ 8 - 0
sample/append_object.go

@@ -146,6 +146,14 @@ func AppendObjectSample() {
 	}
 	fmt.Println("Object ACL:", goar.ACL)
 
+	// Case 6: Set the storage classes.OSS provides three storage classes: Standard, Infrequent Access, and Archive.
+	// Upload a strings, and you can append some strings in the behind of object. but the object is 'Archive' storange class.
+	// An object created with the AppendObject operation is an appendable object. set the object storange-class to 'Archive'.
+	nextPos, err = bucket.AppendObject(appendObjectKey, strings.NewReader("昨夜雨疏风骤,浓睡不消残酒。试问卷帘人,"), nextPos, oss.ObjectStorageClass("Archive"))
+	if err != nil {
+		HandleError(err)
+	}
+
 	// Delete the object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {

+ 50 - 9
sample/bucket_lifecycle.go

@@ -20,37 +20,78 @@ func BucketLifecycleSample() {
 		HandleError(err)
 	}
 
-	// Case 1: Set the lifecycle. The rule ID is id1 and the applied objects' prefix is one and expired time is 11/11/2015
-	var rule1 = oss.BuildLifecycleRuleByDate("id1", "one", true, 2015, 11, 11)
+	// Case 1: Set the lifecycle. The rule ID is rule1 and the applied objects' prefix is one and expired time is 11/11/2015
+	expriation := oss.LifecycleExpiration{
+		CreatedBeforeDate: "2015-11-11T00:00:00.000Z",
+	}
+	rule1 := oss.LifecycleRule{
+		ID:         "rule1",
+		Prefix:     "one",
+		Status:     "Enabled",
+		Expiration: &expriation,
+	}
 	var rules = []oss.LifecycleRule{rule1}
 	err = client.SetBucketLifecycle(bucketName, rules)
 	if err != nil {
 		HandleError(err)
 	}
 
+	// Get the bucket's lifecycle
+	lc, err := client.GetBucketLifecycle(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+	fmt.Printf("Bucket Lifecycle:%v, %v\n", lc.Rules, *lc.Rules[0].Expiration)
+
 	// Case 2: Set the lifecycle, The rule ID is id2 and the applied objects' prefix is two and the expired time is three days after the object created.
-	var rule2 = oss.BuildLifecycleRuleByDays("id2", "two", true, 3)
+	transitionIA := oss.LifecycleTransition{
+		Days:         3,
+		StorageClass: oss.StorageIA,
+	}
+	transitionArch := oss.LifecycleTransition{
+		Days:         30,
+		StorageClass: oss.StorageArchive,
+	}
+	rule2 := oss.LifecycleRule{
+		ID:          "rule2",
+		Prefix:      "two",
+		Status:      "Enabled",
+		Transitions: []oss.LifecycleTransition{transitionIA, transitionArch},
+	}
 	rules = []oss.LifecycleRule{rule2}
 	err = client.SetBucketLifecycle(bucketName, rules)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// Case 3: Create two rules in the bucket for different objects. The rule with the same ID will be overwritten.
-	var rule3 = oss.BuildLifecycleRuleByDays("id1", "two", true, 365)
-	var rule4 = oss.BuildLifecycleRuleByDate("id2", "one", true, 2016, 11, 11)
-	rules = []oss.LifecycleRule{rule3, rule4}
+	// Get the bucket's lifecycle
+	lc, err = client.GetBucketLifecycle(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+	fmt.Printf("Bucket Lifecycle:%v\n", lc.Rules)
+
+	abortMPU := oss.LifecycleAbortMultipartUpload{
+		Days: 3,
+	}
+	rule3 := oss.LifecycleRule{
+		ID:                   "rule3",
+		Prefix:               "three",
+		Status:               "Enabled",
+		AbortMultipartUpload: &abortMPU,
+	}
+	rules = append(lc.Rules, rule3)
 	err = client.SetBucketLifecycle(bucketName, rules)
 	if err != nil {
 		HandleError(err)
 	}
 
 	// Get the bucket's lifecycle
-	gbl, err := client.GetBucketLifecycle(bucketName)
+	lc, err = client.GetBucketLifecycle(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
-	fmt.Println("Bucket Lifecycle:", gbl.Rules)
+	fmt.Printf("Bucket Lifecycle:%v, %v\n", lc.Rules, *lc.Rules[1].AbortMultipartUpload)
 
 	// Delete bucket's Lifecycle
 	err = client.DeleteBucketLifecycle(bucketName)

+ 6 - 7
sample/cname_sample.go

@@ -11,26 +11,25 @@ import (
 // CnameSample shows the cname usage
 func CnameSample() {
 	// New client
-	client, err := oss.New(endpoint4Cname, accessID4Cname, accessKey4Cname,
-		oss.UseCname(true))
+	client, err := oss.New(endpoint4Cname, accessID, accessKey, oss.UseCname(true))
 	if err != nil {
 		HandleError(err)
 	}
 
 	// Create bucket
-	err = client.CreateBucket(bucketName4Cname)
+	err = client.CreateBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
 	// Set bucket ACL
-	err = client.SetBucketACL(bucketName4Cname, oss.ACLPrivate)
+	err = client.SetBucketACL(bucketName, oss.ACLPrivate)
 	if err != nil {
 		HandleError(err)
 	}
 
 	// Look up bucket ACL
-	gbar, err := client.GetBucketACL(bucketName4Cname)
+	gbar, err := client.GetBucketACL(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
@@ -42,7 +41,7 @@ func CnameSample() {
 		HandleError(err)
 	}
 
-	bucket, err := client.Bucket(bucketName4Cname)
+	bucket, err := client.Bucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
@@ -74,7 +73,7 @@ func CnameSample() {
 	}
 
 	// Get object to file
-	err = bucket.GetObjectToFile(objectKey, newPicName)
+	err = bucket.GetObjectToFile(objectKey, localFile)
 	if err != nil {
 		HandleError(err)
 	}

+ 30 - 16
sample/comm.go

@@ -102,31 +102,45 @@ func DeleteTestBucketAndObject(bucketName string) error {
 	}
 
 	// Delete part
-	lmur, err := bucket.ListMultipartUploads()
-	if err != nil {
-		return err
-	}
-
-	for _, upload := range lmur.Uploads {
-		var imur = oss.InitiateMultipartUploadResult{Bucket: bucket.BucketName,
-			Key: upload.Key, UploadID: upload.UploadID}
-		err = bucket.AbortMultipartUpload(imur)
+	keyMarker := oss.KeyMarker("")
+	uploadIDMarker := oss.UploadIDMarker("")
+	for {
+		lmur, err := bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
 		if err != nil {
 			return err
 		}
+		for _, upload := range lmur.Uploads {
+			var imur = oss.InitiateMultipartUploadResult{Bucket: bucket.BucketName,
+				Key: upload.Key, UploadID: upload.UploadID}
+			err = bucket.AbortMultipartUpload(imur)
+			if err != nil {
+				return err
+			}
+		}
+		keyMarker = oss.KeyMarker(lmur.NextKeyMarker)
+		uploadIDMarker = oss.UploadIDMarker(lmur.NextUploadIDMarker)
+		if !lmur.IsTruncated {
+			break
+		}
 	}
 
 	// Delete objects
-	lor, err := bucket.ListObjects()
-	if err != nil {
-		return err
-	}
-
-	for _, object := range lor.Objects {
-		err = bucket.DeleteObject(object.Key)
+	marker := oss.Marker("")
+	for {
+		lor, err := bucket.ListObjects(marker)
 		if err != nil {
 			return err
 		}
+		for _, object := range lor.Objects {
+			err = bucket.DeleteObject(object.Key)
+			if err != nil {
+				return err
+			}
+		}
+		marker = oss.Marker(lor.NextMarker)
+		if !lor.IsTruncated {
+			break
+		}
 	}
 
 	// Delete bucket

+ 16 - 15
sample/config.go

@@ -1,25 +1,26 @@
 package sample
 
-const (
+import "os"
+
+var (
 	// Sample code's env configuration. You need to specify them with the actual configuration if you want to run sample code
-	endpoint   string = "<endpoint>"
-	accessID   string = "<AccessKeyId>"
-	accessKey  string = "<AccessKeySecret>"
-	bucketName string = "<my-bucket>"
-	kmsID      string = "<KmsID>"
+	endpoint   = os.Getenv("OSS_TEST_ENDPOINT")
+	accessID   = os.Getenv("OSS_TEST_ACCESS_KEY_ID")
+	accessKey  = os.Getenv("OSS_TEST_ACCESS_KEY_SECRET")
+	bucketName = os.Getenv("OSS_TEST_BUCKET")
+	kmsID      = os.Getenv("OSS_TEST_KMS_ID")
 
 	// The cname endpoint
-	// These information are required to run sample/cname_sample
-	endpoint4Cname   string = "<endpoint>"
-	accessID4Cname   string = "<AccessKeyId>"
-	accessKey4Cname  string = "<AccessKeySecret>"
-	bucketName4Cname string = "<my-cname-bucket>"
+	endpoint4Cname = os.Getenv("OSS_TEST_CNAME_ENDPOINT")
+)
+
+const (
 
 	// The object name in the sample code
-	objectKey string = "my-object"
+	objectKey       string = "my-object"
+	appendObjectKey string = "my-object-append"
 
 	// The local files to run sample code.
-	localFile     string = "src/sample/BingWallpaper-2015-11-07.jpg"
-	htmlLocalFile string = "src/sample/The Go Programming Language.html"
-	newPicName    string = "src/sample/NewBingWallpaper-2015-11-07.jpg"
+	localFile     string = "sample/BingWallpaper-2015-11-07.jpg"
+	htmlLocalFile string = "sample/The Go Programming Language.html"
 )

+ 7 - 0
sample/copy_object.go

@@ -104,6 +104,13 @@ func CopyObjectSample() {
 		HandleError(err)
 	}
 
+	// Case 7: Set the storage classes.OSS provides three storage classes: Standard, Infrequent Access, and Archive.
+	// Copy a object in the same bucket, and set object's storage-class to Archive.
+	_, rr := bucket.CopyObject(objectKey, objectKey+"DestArchive", oss.ObjectStorageClass("Archive"))
+	if rr != nil {
+		HandleError(err)
+	}
+
 	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {

+ 9 - 5
sample/get_object.go

@@ -10,7 +10,7 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// GetObjectSample shows the streaming download, range download and resumable download. 
+// GetObjectSample shows the streaming download, range download and resumable download.
 func GetObjectSample() {
 	// Create bucket
 	bucket, err := GetTestBucket(bucketName)
@@ -29,12 +29,13 @@ func GetObjectSample() {
 	if err != nil {
 		HandleError(err)
 	}
+
 	data, err := ioutil.ReadAll(body)
 	body.Close()
 	if err != nil {
 		HandleError(err)
 	}
-	data = data // use data
+	fmt.Println("size of data is: ", len(data))
 
 	// Case 2: Download the object to byte array. This is for small object.
 	buf := new(bytes.Buffer)
@@ -72,11 +73,13 @@ func GetObjectSample() {
 		HandleError(err)
 	}
 	body.Close()
+
 	// Last modified time contraint is not met, do not download the file
-	_, err = bucket.GetObject(objectKey, oss.IfUnmodifiedSince(pastDate))
+	body, err = bucket.GetObject(objectKey, oss.IfUnmodifiedSince(pastDate))
 	if err == nil {
 		HandleError(err)
 	}
+	body.Close()
 
 	meta, err := bucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
@@ -95,6 +98,7 @@ func GetObjectSample() {
 	if err == nil {
 		HandleError(err)
 	}
+	body.Close()
 
 	// Case 6: Big file's multipart download, concurrent and resumable download is supported.
 	// multipart download with part size 100KB. By default single coroutine is used and no checkpoint
@@ -114,8 +118,8 @@ func GetObjectSample() {
 	if err != nil {
 		HandleError(err)
 	}
-	
-	// Specify the checkpoint file path to record which parts have been downloaded. 
+
+	// Specify the checkpoint file path to record which parts have been downloaded.
 	// This file path can be specified by the 2nd parameter of Checkpoint, it will be the download directory if the file path is empty.
 	err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024, oss.Checkpoint(true, "mynewfile.cp"))
 	if err != nil {

+ 13 - 0
sample/put_object.go

@@ -122,6 +122,19 @@ func PutObjectSample() {
 		HandleError(err)
 	}
 
+	// Case 9: Set the storage classes.OSS provides three storage classes: Standard, Infrequent Access, and Archive.
+	// Supported APIs: PutObject, CopyObject, UploadFile, AppendObject...
+	err = bucket.PutObject(objectKey, strings.NewReader(val), oss.ObjectStorageClass("IA"))
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Upload a local file, and set the object's storage-class to 'Archive'.
+	err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.ObjectStorageClass("Archive"))
+	if err != nil {
+		HandleError(err)
+	}
+
 	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {

+ 8 - 2
sample/sign_url.go

@@ -3,6 +3,7 @@ package sample
 import (
 	"fmt"
 	"io/ioutil"
+	"os"
 	"strings"
 
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
@@ -54,10 +55,15 @@ func SignURLSample() {
 	if err != nil {
 		HandleError(err)
 	}
+	defer body.Close()
+
 	// Read content
 	data, err := ioutil.ReadAll(body)
-	body.Close()
-	data = data // use data
+	if err != nil {
+		fmt.Println("Error:", err)
+		os.Exit(-1)
+	}
+	fmt.Println("data:", string(data))
 
 	err = bucket.GetObjectToFileWithURL(signedURL, "mynewfile-1.jpg")
 	if err != nil {

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä