فهرست منبع

Merge branch 'master' into livechannel

fengyu 7 سال پیش
والد
کامیت
0d81a1ee45
53فایلهای تغییر یافته به همراه1631 افزوده شده و 1704 حذف شده
  1. 4 0
      CHANGELOG.md
  2. 1 2
      README-CN.md
  3. 1 2
      README.md
  4. 12 12
      oss/auth.go
  5. 211 236
      oss/bucket.go
  6. 135 134
      oss/bucket_test.go
  7. 151 188
      oss/client.go
  8. 92 92
      oss/client_test.go
  9. 21 21
      oss/conf.go
  10. 39 39
      oss/conn.go
  11. 28 28
      oss/const.go
  12. 8 8
      oss/crc.go
  13. 20 20
      oss/crc_test.go
  14. 70 71
      oss/download.go
  15. 67 67
      oss/download_test.go
  16. 15 15
      oss/error.go
  17. 1 1
      oss/mime.go
  18. 8 8
      oss/model.go
  19. 61 62
      oss/multicopy.go
  20. 46 45
      oss/multicopy_test.go
  21. 58 71
      oss/multipart.go
  22. 46 46
      oss/multipart_test.go
  23. 12 12
      oss/option.go
  24. 8 8
      oss/progress.go
  25. 19 19
      oss/progress_test.go
  26. 1 1
      oss/transport_1_6.go
  27. 1 1
      oss/transport_1_7.go
  28. 144 144
      oss/type.go
  29. 61 62
      oss/upload.go
  30. 36 36
      oss/upload_test.go
  31. 29 29
      oss/utils.go
  32. 5 5
      oss/utils_test.go
  33. 15 15
      sample/append_object.go
  34. 9 9
      sample/archive.go
  35. 6 6
      sample/bucket_acl.go
  36. 8 8
      sample/bucket_cors.go
  37. 9 9
      sample/bucket_lifecycle.go
  38. 13 13
      sample/bucket_logging.go
  39. 8 8
      sample/bucket_referer.go
  40. 13 13
      sample/cname_sample.go
  41. 14 14
      sample/comm.go
  42. 5 6
      sample/config.go
  43. 16 16
      sample/copy_object.go
  44. 7 7
      sample/create_bucket.go
  45. 7 7
      sample/delete_object.go
  46. 21 20
      sample/get_object.go
  47. 12 12
      sample/list_buckets.go
  48. 14 14
      sample/list_objects.go
  49. 7 7
      sample/new_bucket.go
  50. 6 6
      sample/object_acl.go
  51. 9 9
      sample/object_meta.go
  52. 14 13
      sample/put_object.go
  53. 7 7
      sample/sign_url.go

+ 4 - 0
CHANGELOG.md

@@ -1,5 +1,9 @@
 # ChangeLog - Aliyun OSS SDK for Go
 
+## 版本号:1.9.0 日期:2018-06-15
+### 变更内容
+ - 变更:国际化
+
 ## 版本号:1.8.0 日期:2017-12-12
 ### 变更内容
  - 变更:空闲链接关闭时间调整为50秒

+ 1 - 2
README-CN.md

@@ -13,7 +13,7 @@
 > - 使用此SDK,用户可以方便地在任何应用、任何时间、任何地点上传,下载和管理数据。
 
 ## 版本
-> - 当前版本:1.8.0
+> - 当前版本:1.9.0
 
 ## 运行环境
 > - Go 1.5及以上。
@@ -162,7 +162,6 @@
 
 ## 作者
 > - Yubin Bai
-> - Hǎiliàng Wáng
 
 ## License
 > - Apache License 2.0

+ 1 - 2
README.md

@@ -13,7 +13,7 @@
 > - With this SDK, you can upload, download and manage data on any app anytime and anywhere conveniently. 
 
 ## Version
-> - Current version: 1.8.0. 
+> - Current version: 1.9.0. 
 
 ## Running Environment
 > - Go 1.5 or above. 
@@ -161,7 +161,6 @@ and copy the sample directory and sample.go to the src directory of your test pr
 
 ## Author
 > - Yubin Bai.
-> - Hǎiliàng Wáng.
 
 ## License
 > - Apache License 2.0.

+ 12 - 12
oss/auth.go

@@ -14,15 +14,15 @@ import (
 	"strings"
 )
 
-// 用于signHeader的字典排序存放容器。
+// headerSorter defines the key-value structure for storing the sorted data in signHeader.
 type headerSorter struct {
 	Keys []string
 	Vals []string
 }
 
-// 生成签名方法(直接设置请求的Header)。
+// signHeader signs the header and sets it as the authorization header.
 func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
-	// Get the final Authorization' string
+	// Get the final authorization string
 	authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource)
 
 	// Give the parameter "Authorization" value
@@ -30,7 +30,7 @@ func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
 }
 
 func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string {
-	// Find out the "x-oss-"'s address in this request'header
+	// Find out the "x-oss-"'s address in header of the request
 	temp := make(map[string]string)
 
 	for k, v := range req.Header {
@@ -40,17 +40,17 @@ func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) s
 	}
 	hs := newHeaderSorter(temp)
 
-	// Sort the temp by the Ascending Order
+	// Sort the temp by the ascending order
 	hs.Sort()
 
-	// Get the CanonicalizedOSSHeaders
+	// Get the canonicalizedOSSHeaders
 	canonicalizedOSSHeaders := ""
 	for i := range hs.Keys {
 		canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
 	}
 
 	// Give other parameters values
-	// when sign url, date is expires
+	// when sign URL, date is expires
 	date := req.Header.Get(HTTPHeaderDate)
 	contentType := req.Header.Get(HTTPHeaderContentType)
 	contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
@@ -91,7 +91,7 @@ func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string,
 	return signedStr
 }
 
-// Additional function for function SignHeader.
+// newHeaderSorter is an additional function for function SignHeader.
 func newHeaderSorter(m map[string]string) *headerSorter {
 	hs := &headerSorter{
 		Keys: make([]string, 0, len(m)),
@@ -105,22 +105,22 @@ func newHeaderSorter(m map[string]string) *headerSorter {
 	return hs
 }
 
-// Additional function for function SignHeader.
+// Sort is an additional function for function SignHeader.
 func (hs *headerSorter) Sort() {
 	sort.Sort(hs)
 }
 
-// Additional function for function SignHeader.
+// Len is an additional function for function SignHeader.
 func (hs *headerSorter) Len() int {
 	return len(hs.Vals)
 }
 
-// Additional function for function SignHeader.
+// Less is an additional function for function SignHeader.
 func (hs *headerSorter) Less(i, j int) bool {
 	return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
 }
 
-// Additional function for function SignHeader.
+// Swap is an additional function for function SignHeader.
 func (hs *headerSorter) Swap(i, j int) {
 	hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
 	hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]

+ 211 - 236
oss/bucket.go

@@ -23,16 +23,15 @@ type Bucket struct {
 	BucketName string
 }
 
+// PutObject creates a new object and it will overwrite the original one if it exists already.
 //
-// PutObject 新建Object,如果Object已存在,覆盖原有Object。
+// objectKey    the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\".
+// reader    io.Reader instance for reading the data for uploading
+// options    the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding
+//            Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details.
+//            https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
 //
-// objectKey  上传对象的名称,使用UTF-8编码、长度必须在1-1023字节之间、不能以“/”或者“\”字符开头。
-// reader     io.Reader读取object的数据。
-// options    上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、
-// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看
-// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
-//
-// error  操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error {
 	opts := addContentType(options, objectKey)
@@ -50,14 +49,13 @@ func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Op
 	return err
 }
 
+// PutObjectFromFile creates a new object from the local file.
 //
-// PutObjectFromFile 新建Object,内容从本地文件中读取。
-//
-// objectKey 上传对象的名称。
-// filePath  本地文件,上传对象的值为该文件内容。
-// options   上传对象时可以指定对象的属性。详见PutObject的options。
+// objectKey    object key.
+// filePath    the local file path to upload.
+// options    the options for uploading the object. Refer to the parameter options in PutObject for more details.
 //
-// error  操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error {
 	fd, err := os.Open(filePath)
@@ -81,14 +79,13 @@ func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Op
 	return err
 }
 
+// DoPutObject does the actual upload work.
 //
-// DoPutObject 上传文件。
-//
-// request  上传请求。
-// options  上传选项。
+// request    the request instance for uploading an object.
+// options    the options for uploading an object.
 //
-// Response 上传请求返回值。
-// error  操作无错误为nil,非nil为错误信息。
+// Response    the response from OSS.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) {
 	isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType)
@@ -116,16 +113,15 @@ func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*
 	return resp, err
 }
 
+// GetObject downloads the object.
 //
-// GetObject 下载文件。
+// objectKey    the object key.
+// options    the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch,
+//            IfNoneMatch, AcceptEncoding. For more details, please check out:
+//            https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
 //
-// objectKey 下载的文件名称。
-// options   对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、
-// IfNoneMatch、AcceptEncoding,详细请参考
-// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
-//
-// io.ReadCloser  reader,读取数据后需要close。error为nil时有效。
-// error  操作无错误为nil,非nil为错误信息。
+// io.ReadCloser    reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) {
 	result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
@@ -135,39 +131,38 @@ func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadClos
 	return result.Response.Body, nil
 }
 
+// GetObjectToFile downloads the data to a local file.
 //
-// GetObjectToFile 下载文件。
-//
-// objectKey  下载的文件名称。
-// filePath   下载对象的内容写到该本地文件。
-// options    对象的属性限制项。详见GetObject的options。
+// objectKey    the object key to download.
+// filePath    the local file to store the object data.
+// options    the options for downloading the object. Refer to the parameter options in method GetObject for more details.
 //
-// error  操作无错误时返回error为nil,非nil为错误说明。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error {
 	tempFilePath := filePath + TempFileSuffix
 
-	// 读取Object内容
+	// Calls the API to actually download the object. Returns the result instance.
 	result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options)
 	if err != nil {
 		return err
 	}
 	defer result.Response.Body.Close()
 
-	// 如果文件不存在则创建,存在则清空
+	// If the local file does not exist, create a new one. If it exists, overwrite it.
 	fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
 	if err != nil {
 		return err
 	}
 
-	// 存储数据到文件
+	// Copy the data to the local file path.
 	_, err = io.Copy(fd, result.Response.Body)
 	fd.Close()
 	if err != nil {
 		return err
 	}
 
-	// 比较CRC值
+	// Compares the CRC value
 	hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
 	if bucket.getConfig().IsEnableCRC && !hasRange {
 		result.Response.ClientCRC = result.ClientCRC.Sum64()
@@ -181,14 +176,13 @@ func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Opti
 	return os.Rename(tempFilePath, filePath)
 }
 
+// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs.
 //
-// DoGetObject 下载文件
+// request    the request to download the object.
+// options    the options for downloading the file. Checks out the parameter options in method GetObject.
 //
-// request 下载请求
-// options    对象的属性限制项。详见GetObject的options。
-//
-// GetObjectResult 下载请求返回值。
-// error  操作无错误为nil,非nil为错误信息。
+// GetObjectResult    the result instance of getting the object.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
 	params := map[string]interface{}{}
@@ -201,7 +195,7 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
 		Response: resp,
 	}
 
-	// crc
+	// CRC
 	var crcCalc hash.Hash64
 	hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
 	if bucket.getConfig().IsEnableCRC && !hasRange {
@@ -210,7 +204,7 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
 		result.ClientCRC = crcCalc
 	}
 
-	// progress
+	// Progress
 	listener := getProgressListener(options)
 
 	contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
@@ -219,18 +213,17 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
 	return result, nil
 }
 
+// CopyObject copies the object inside the bucket.
 //
-// CopyObject 同一个bucket内拷贝Object。
-//
-// srcObjectKey  Copy的源对象。
-// destObjectKey Copy的目标对象。
-// options  Copy对象时,您可以指定源对象的限制条件,满足限制条件时copy,不满足时返回错误,您可以选择如下选项CopySourceIfMatch、
-// CopySourceIfNoneMatch、CopySourceIfModifiedSince、CopySourceIfUnmodifiedSince、MetadataDirective。
-// Copy对象时,您可以指定目标对象的属性,如CacheControl、ContentDisposition、ContentEncoding、Expires、
-// ServerSideEncryption、ObjectACL、Meta,选项的含义请参看
-// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
+// srcObjectKey    the source object to copy.
+// destObjectKey    the target object to copy.
+// options    options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch,
+//            CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective.
+//            Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires, 
+//            ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details :
+//            https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
 	var out CopyObjectResult
@@ -246,29 +239,28 @@ func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...O
 	return out, err
 }
 
+// CopyObjectTo copies the object to another bucket.
 //
-// CopyObjectTo bucket间拷贝object。
+// srcObjectKey    source object key. The source bucket is Bucket.BucketName .
+// destBucketName    target bucket name.
+// destObjectKey    target object name.
+// options    copy options, check out parameter options in function CopyObject for more details.
 //
-// srcObjectKey   源Object名称。源Bucket名称为Bucket.BucketName。
-// destBucketName  目标Bucket名称。
-// destObjectKey  目标Object名称。
-// options        Copy选项,详见CopyObject的options。
-//
-// error  操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) {
 	return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...)
 }
 
 //
-// CopyObjectFrom bucket间拷贝object。
+// CopyObjectFrom copies the object to another bucket.
 //
-// srcBucketName  源Bucket名称。
-// srcObjectKey   源Object名称。
-// destObjectKey  目标Object名称。目标Bucket名称为Bucket.BucketName。
-// options        Copy选项,详见CopyObject的options。
+// srcBucketName    source bucket name.
+// srcObjectKey    source object name.
+// destObjectKey    target object name. The target bucket name is Bucket.BucketName.
+// options    copy options. Check out parameter options in function CopyObject.
 //
-// error  操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) {
 	destBucketName := bucket.BucketName
@@ -300,22 +292,21 @@ func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, op
 	return out, err
 }
 
+// AppendObject uploads the data in the way of appending an existing or new object.
 //
-// AppendObject 追加方式上传。
-//
-// AppendObject参数必须包含position,其值指定从何处进行追加。首次追加操作的position必须为0,
-// 后续追加操作的position是Object的当前长度。例如,第一次Append Object请求指定position值为0,
-// content-length是65536;那么,第二次Append Object需要指定position为65536。
-// 每次操作成功后,响应头部x-oss-next-append-position也会标明下一次追加的position。
+// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file),
+// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length.
+// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536.
+// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information).
 //
-// objectKey  需要追加的Object。
-// reader     io.Reader,读取追的内容。
-// appendPosition  object追加的起始位置。
-// destObjectProperties  第一次追加时指定新对象的属性,如CacheControl、ContentDisposition、ContentEncoding、
-// Expires、ServerSideEncryption、ObjectACL。
+// objectKey    the target object to append to.
+// reader    io.Reader. The read instance for reading the data to append.
+// appendPosition    the start position to append.
+// destObjectProperties    the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding,
+//                         Expires, ServerSideEncryption, ObjectACL. 
 //
-// int64 下次追加的开始位置,error为nil空时有效。
-// error 操作无错误为nil,非nil为错误信息。
+// int64    the next append position, it's valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) {
 	request := &AppendObjectRequest{
@@ -332,14 +323,13 @@ func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosi
 	return result.NextPosition, err
 }
 
+// DoAppendObject is the actual API that does the object append.
 //
-// DoAppendObject 追加上传。
+// request    the request object for appending object.
+// options    the options for appending object.
 //
-// request 追加上传请求。
-// options 追加上传选项。
-//
-// AppendObjectResult 追加上传请求返回值。
-// error  操作无错误为nil,非nil为错误信息。
+// AppendObjectResult    the result object for appending object.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) {
 	params := map[string]interface{}{}
@@ -382,12 +372,11 @@ func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Opti
 	return result, nil
 }
 
+// DeleteObject deletes the object.
 //
-// DeleteObject 删除Object。
-//
-// objectKey 待删除Object。
+// objectKey    the object key to delete.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DeleteObject(objectKey string) error {
 	params := map[string]interface{}{}
@@ -399,14 +388,14 @@ func (bucket Bucket) DeleteObject(objectKey string) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
 }
 
+// DeleteObjects deletes multiple objects.
 //
-// DeleteObjects 批量删除object。
+// objectKeys    the object keys to delete.
+// options    the options for deleting objects.
+//            Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used.
 //
-// objectKeys 待删除object类表。
-// options 删除选项,DeleteObjectsQuiet,是否是安静模式,默认不使用。
-//
-// DeleteObjectsResult 非安静模式的的返回值。
-// error 操作无错误为nil,非nil为错误信息。
+// DeleteObjectsResult    the result object.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) {
 	out := DeleteObjectsResult{}
@@ -448,12 +437,11 @@ func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (Dele
 	return out, err
 }
 
+// IsObjectExist checks if the object exists.
 //
-// IsObjectExist object是否存在。
-//
-// bool  object是否存在,true存在,false不存在。error为nil时有效。
+// bool    flag of object's existence (true:exists; false:non-exist) when error is nil.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
 	_, err := bucket.GetObjectMeta(objectKey)
@@ -471,24 +459,25 @@ func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
 	return false, err
 }
 
-//
-// ListObjects 获得Bucket下筛选后所有的object的列表。
-//
-// options  ListObject的筛选行为。Prefix指定的前缀、MaxKeys最大数目、Marker第一个开始、Delimiter对Object名字进行分组的字符。
-//
-// 您有如下8个object,my-object-1, my-object-11, my-object-2, my-object-21,
-// my-object-22, my-object-3, my-object-31, my-object-32。如果您指定了Prefix为my-object-2,
-// 则返回my-object-2, my-object-21, my-object-22三个object。如果您指定了Marker为my-object-22,
-// 则返回my-object-3, my-object-31, my-object-32三个object。如果您指定MaxKeys则每次最多返回MaxKeys个,
-// 最后一次可能不足。这三个参数可以组合使用,实现分页等功能。如果把prefix设为某个文件夹名,就可以罗列以此prefix开头的文件,
-// 即该文件夹下递归的所有的文件和子文件夹。如果再把delimiter设置为"/"时,返回值就只罗列该文件夹下的文件,该文件夹下的子文件名
-// 返回在CommonPrefixes部分,子文件夹下递归的文件和文件夹不被显示。例如一个bucket存在三个object,fun/test.jpg、
-// fun/movie/001.avi、fun/movie/007.avi。若设定prefix为"fun/",则返回三个object;如果增加设定
-// delimiter为"/",则返回文件"fun/test.jpg"和前缀"fun/movie/",即实现了文件夹的逻辑。
-//
-// 常用场景,请参数示例sample/list_object.go。
-//
-// ListObjectsResponse  操作成功后的返回值,成员Objects为bucket中对象列表。error为nil时该返回值有效。
+// ListObjects lists the objects under the current bucket.
+//
+// options    it contains all the filters for listing objects.
+//            It could specify a prefix filter on object keys,  the max keys count to return and the object key marker and the delimiter for grouping object names.
+//            The key marker means the returned objects' key must be greater than it in lexicographic order.
+// 
+//            For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21,
+//            my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns
+//            my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns
+//            my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects.
+//            The three filters could be used together to achieve filter and paging functionality.
+//            If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders).
+//            But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties.
+//            For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects.
+//            But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix.
+// 
+//            For common usage scenario, check out sample/list_object.go.
+// 
+// ListObjectsResponse    the return value after operation succeeds (only valid when error is nil).
 //
 func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
 	var out ListObjectsResult
@@ -514,14 +503,13 @@ func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
 	return out, err
 }
 
+// SetObjectMeta sets the metadata of the Object.
 //
-// SetObjectMeta 设置Object的Meta。
-//
-// objectKey object
-// options 指定对象的属性,有以下可选项CacheControl、ContentDisposition、ContentEncoding、Expires、
-// ServerSideEncryption、Meta。
+// objectKey    object
+// options    options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
+//            ServerSideEncryption, and custom metadata.
 //
-// error 操作无错误时error为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
 	options = append(options, MetadataDirective(MetaReplace))
@@ -529,15 +517,14 @@ func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error {
 	return err
 }
 
+// GetObjectDetailedMeta gets the object's detailed metadata
 //
-// GetObjectDetailedMeta 查询Object的头信息。
-//
-// objectKey object名称。
-// objectPropertyConstraints 对象的属性限制项,满足时正常返回,不满足时返回错误。现在项有IfModifiedSince、IfUnmodifiedSince、
-// IfMatch、IfNoneMatch。具体含义请参看 https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
+// objectKey    object key.
+// options    the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince,
+//            IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html
 //
-// http.Header  对象的meta,error为nil时有效。
-// error  操作无错误为nil,非nil为错误信息。
+// http.Header    object meta when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) {
 	params := map[string]interface{}{}
@@ -550,16 +537,15 @@ func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option)
 	return resp.Headers, nil
 }
 
+// GetObjectMeta gets object metadata.
 //
-// GetObjectMeta 查询Object的头信息。
+// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag
+// size, LastModified. The size information is in the HTTP header Content-Length.
 //
-// GetObjectMeta相比GetObjectDetailedMeta更轻量,仅返回指定Object的少量基本meta信息,
-// 包括该Object的ETag、Size(对象大小)、LastModified,其中Size由响应头Content-Length的数值表示。
+// objectKey    object key
 //
-// objectKey object名称。
-//
-// http.Header 对象的meta,error为nil时有效。
-// error 操作无错误为nil,非nil为错误信息。
+// http.Header    the object's metadata, valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
 	params := map[string]interface{}{}
@@ -574,22 +560,21 @@ func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
 	return resp.Headers, nil
 }
 
+// SetObjectACL updates the object's ACL.
 //
-// SetObjectACL 修改Object的ACL权限。
-//
-// 只有Bucket Owner才有权限调用PutObjectACL来修改Object的ACL。Object ACL优先级高于Bucket ACL。
-// 例如Bucket ACL是private的,而Object ACL是public-read-write的,则访问这个Object时,
-// 先判断Object的ACL,所以所有用户都拥有这个Object的访问权限,即使这个Bucket是private bucket。
-// 如果某个Object从来没设置过ACL,则访问权限遵循Bucket ACL。
+// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL.
+// For example, if the bucket ACL is private and object's ACL is public-read-write.
+// Then object's ACL is used and it means all users could read or write that object.
+// When the object's ACL is not set, then bucket's ACL is used as the object's ACL.
 //
-// Object的读操作包括GetObject,HeadObject,CopyObject和UploadPartCopy中的对source object的读;
-// Object的写操作包括:PutObject,PostObject,AppendObject,DeleteObject,
-// DeleteMultipleObjects,CompleteMultipartUpload以及CopyObject对新的Object的写。
+// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object;
+// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects,
+// CompleteMultipartUpload and CopyObject on target object.
 //
-// objectKey 设置权限的object。
-// objectAcl 对象权限。可选值PrivateACL(私有读写)、PublicReadACL(公共读私有写)、PublicReadWriteACL(公共读写)。
+// objectKey    the target object key (to set the ACL on)
+// objectAcl    object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
 	options := []Option{ObjectACL(objectACL)}
@@ -603,13 +588,12 @@ func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// GetObjectACL gets object's ACL
 //
-// GetObjectACL 获取对象的ACL权限。
+// objectKey    the object to get ACL from.
 //
-// objectKey 获取权限的object。
-//
-// GetObjectAclResponse 获取权限操作返回值,error为nil时有效。GetObjectAclResponse.Acl为对象的权限。
-// error 操作无错误为nil,非nil为错误信息。
+// GetObjectACLResult    the result object when error is nil. GetObjectACLResult.Acl is the object ACL.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) {
 	var out GetObjectACLResult
@@ -625,19 +609,18 @@ func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error)
 	return out, err
 }
 
+// PutSymlink creates a symlink (to point to an existing object)
 //
-// PutSymlink 创建符号链接。
-//
-// 符号链接的目标文件类型不能为符号链接。
-// 创建符号链接时: 不检查目标文件是否存在, 不检查目标文件类型是否合法, 不检查目标文件是否有权限访问。
-// 以上检查,都推迟到GetObject等需要访问目标文件的API。
-// 如果试图添加的文件已经存在,并且有访问权限。新添加的文件将覆盖原来的文件。
-// 如果在PutSymlink的时候,携带以x-oss-meta-为前缀的参数,则视为user meta。
+// Symlink cannot point to another symlink.
+// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink.
+// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink.
+// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten.
+// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file.
 //
-// symObjectKey 要创建的符号链接文件。
-// targetObjectKey 目标文件。
+// symObjectKey    the symlink object's key.
+// targetObjectKey    the target object key to point to.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error {
 	options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey)))
@@ -651,13 +634,13 @@ func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, opt
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// GetSymlink gets the symlink object with the specified key.
+// If the symlink object does not exist, returns 404.
 //
-// GetSymlink 获取符号链接的目标文件。
-// 如果符号链接不存在返回404。
+// objectKey    the symlink object's key.
 //
-// objectKey 获取目标文件的符号链接object。
-//
-// error 操作无错误为nil,非nil为错误信息。当error为nil时,返回的string为目标文件,否则该值无效。
+// error    it's nil if no error, otherwise it's an error object.
+//          When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object.
 //
 func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) {
 	params := map[string]interface{}{}
@@ -677,18 +660,17 @@ func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) {
 	return resp.Headers, err
 }
 
+// RestoreObject restores the object from the archive storage.
 //
-// RestoreObject 恢复处于冷冻状态的归档类型Object进入读就绪状态。
-//
-// 一个Archive类型的object初始时处于冷冻状态。
+// An archive object is in cold status by default and it cannot be accessed.
+// When restore is called on the cold object, it will become available for access after some time.
+// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success.
+// By default, the restored object is available for access for one day. After that it will be unavailable again.
+// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days.
 //
-// 针对处于冷冻状态的object调用restore命令,返回成功。object处于解冻中,服务端执行解冻,在此期间再次调用restore命令,同样成功,且不会延长object可读状态持续时间。
-// 待服务端执行完成解冻任务后,object就进入了解冻状态,此时用户可以读取object。
-// 解冻状态默认持续1天,对于解冻状态的object调用restore命令,会将object的解冻状态延长一天,最多可以延长到7天,之后object又回到初始时的冷冻状态。
+// objectKey    object key to restore.
 //
-// objectKey 需要恢复状态的object名称。
-//
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) RestoreObject(objectKey string) error {
 	params := map[string]interface{}{}
@@ -701,14 +683,13 @@ func (bucket Bucket) RestoreObject(objectKey string) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted})
 }
 
+// SignURL signs the URL. Users could access the object directly with this URL without getting the AK.
 //
-// SignURL 获取签名URL。
-//
-// objectKey 获取URL的object。
-// signURLConfig 获取URL的配置。
+// objectKey    the target object to sign.
+// signURLConfig    the configuration for the signed URL
 //
-// 返回URL字符串,error为nil时有效。
-// error 操作无错误为nil,非nil为错误信息。
+// string    returns the signed URL, when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) {
 	if expiredInSec < 0 {
@@ -730,17 +711,16 @@ func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec i
 	return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil
 }
 
+// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten.
+// PutObjectWithURL It will not generate minetype according to the key name.
 //
-// PutObjectWithURL 新建Object,如果Object已存在,覆盖原有Object。
-// PutObjectWithURL 不会根据key生成minetype。
+// signedURL    signed URL.
+// reader    io.Reader the read instance for reading the data for the upload.
+// options    the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding,
+//            Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details:
+//            https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
 //
-// signedURL  签名的URL。
-// reader     io.Reader读取object的数据。
-// options    上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、
-// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看
-// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html
-//
-// error  操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error {
 	resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options)
@@ -752,15 +732,14 @@ func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, option
 	return err
 }
 
+// PutObjectFromFileWithURL uploads an object from a local file with the signed URL.
+// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name.
 //
-// PutObjectFromFileWithURL 新建Object,内容从本地文件中读取。
-// PutObjectFromFileWithURL 不会根据key、filePath生成minetype。
-//
-// signedURL  签名的URL。
-// filePath  本地文件,如 dir/file.txt,上传对象的值为该文件内容。
-// options   上传对象时可以指定对象的属性。详见PutObject的options。
+// signedURL    the signed URL.
+// filePath    local file path, such as dirfile.txt, for uploading.
+// options    options for uploading, same as the options in PutObject function.
 //
-// error  操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error {
 	fd, err := os.Open(filePath)
@@ -778,15 +757,14 @@ func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, option
 	return err
 }
 
+// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK)
 //
-// DoPutObjectWithURL 上传文件。
+// signedURL    the signed URL.
+// reader    io.Reader the read instance for getting the data to upload.
+// options    options for uploading.
 //
-// signedURL  签名的URL。
-// reader     io.Reader读取object的数据。
-// options  上传选项。
-//
-// Response 上传请求返回值。
-// error  操作无错误为nil,非nil为错误信息。
+// Response    the response object which contains the HTTP response.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) {
 	listener := getProgressListener(options)
@@ -809,16 +787,15 @@ func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, opti
 	return resp, err
 }
 
+// GetObjectWithURL downloads the object and returns the reader instance,  with the signed URL.
 //
-// GetObjectWithURL 下载文件。
-//
-// signedURL  签名的URL。
-// options   对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、
-// IfNoneMatch、AcceptEncoding,详细请参考
-// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
+// signedURL    the signed URL.
+// options    options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch,
+//            IfNoneMatch, AcceptEncoding. For more information, check out the following link:
+//            https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html
 //
-// io.ReadCloser  reader,读取数据后需要close。error为nil时有效。
-// error  操作无错误为nil,非nil为错误信息。
+// io.ReadCloser    the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) {
 	result, err := bucket.DoGetObjectWithURL(signedURL, options)
@@ -828,39 +805,38 @@ func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.R
 	return result.Response.Body, nil
 }
 
+// GetObjectToFileWithURL downloads the object into a local file with the signed URL.
 //
-// GetObjectToFile 下载文件。
+// signedURL    the signed URL
+// filePath    the local file path to download to.
+// options    the options for downloading object. Check out the parameter options in function GetObject for the reference.
 //
-// signedURL  签名的URL。
-// filePath   下载对象的内容写到该本地文件。
-// options    对象的属性限制项。详见GetObject的options。
-//
-// error  操作无错误时返回error为nil,非nil为错误说明。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error {
 	tempFilePath := filePath + TempFileSuffix
 
-	// 读取Object内容
+	// Get the object's content
 	result, err := bucket.DoGetObjectWithURL(signedURL, options)
 	if err != nil {
 		return err
 	}
 	defer result.Response.Body.Close()
 
-	// 如果文件不存在则创建,存在则清空
+	// If the file does not exist, create one. If exists, then overwrite it.
 	fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
 	if err != nil {
 		return err
 	}
 
-	// 存储数据到文件
+	// Save the data to the file.
 	_, err = io.Copy(fd, result.Response.Body)
 	fd.Close()
 	if err != nil {
 		return err
 	}
 
-	// 比较CRC值
+	// Compare the CRC value. If CRC values do not match, return error.
 	hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
 	if bucket.getConfig().IsEnableCRC && !hasRange {
 		result.Response.ClientCRC = result.ClientCRC.Sum64()
@@ -874,14 +850,13 @@ func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options
 	return os.Rename(tempFilePath, filePath)
 }
 
+// DoGetObjectWithURL is the actual API that downloads the file with the signed URL.
 //
-// DoGetObjectWithURL 下载文件
-//
-// signedURL  签名的URL。
-// options    对象的属性限制项。详见GetObject的options。
+// signedURL    the signed URL.
+// options    the options for getting object. Check out parameter options in GetObject for the reference.
 //
-// GetObjectResult 下载请求返回值。
-// error  操作无错误为nil,非nil为错误信息。
+// GetObjectResult    the result object when the error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) {
 	params := map[string]interface{}{}
@@ -894,7 +869,7 @@ func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*Ge
 		Response: resp,
 	}
 
-	// crc
+	// CRC
 	var crcCalc hash.Hash64
 	hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
 	if bucket.getConfig().IsEnableCRC && !hasRange {
@@ -903,7 +878,7 @@ func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*Ge
 		result.ClientCRC = crcCalc
 	}
 
-	// progress
+	// Progress
 	listener := getProgressListener(options)
 
 	contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)

+ 135 - 134
oss/bucket_test.go

@@ -1,4 +1,4 @@
-// bucket test
+// Bucket test
 
 package oss
 
@@ -34,7 +34,7 @@ var (
 	futureDate = time.Date(2049, time.January, 10, 23, 0, 0, 0, time.UTC)
 )
 
-// Run once when the suite starts running
+// SetUpSuite runs once when the suite starts running.
 func (s *OssBucketSuite) SetUpSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -58,10 +58,11 @@ func (s *OssBucketSuite) SetUpSuite(c *C) {
 	testLogger.Println("test bucket started")
 }
 
-// Run once after all tests or benckmarks have finished running
+
+// TearDownSuite runs before each test or benchmark starts running.
 func (s *OssBucketSuite) TearDownSuite(c *C) {
 	for _, bucket := range []*Bucket{s.bucket, s.archiveBucket} {
-		// Delete Multipart
+		// Delete multipart
 		lmu, err := bucket.ListMultipartUploads()
 		c.Assert(err, IsNil)
 
@@ -71,7 +72,7 @@ func (s *OssBucketSuite) TearDownSuite(c *C) {
 			c.Assert(err, IsNil)
 		}
 
-		// Delete Objects
+		// Delete objects
 		lor, err := bucket.ListObjects()
 		c.Assert(err, IsNil)
 
@@ -84,13 +85,13 @@ func (s *OssBucketSuite) TearDownSuite(c *C) {
 	testLogger.Println("test bucket completed")
 }
 
-// Run before each test or benchmark starts
+// SetUpTest runs after each test or benchmark runs.
 func (s *OssBucketSuite) SetUpTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// Run after each test or benchmark runs
+// TearDownTest runs once after all tests or benchmarks have finished running.
 func (s *OssBucketSuite) TearDownTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
@@ -114,7 +115,7 @@ func (s *OssBucketSuite) TestPutObject(c *C) {
 	objectValue := "大江东去,浪淘尽,千古风流人物。 故垒西边,人道是、三国周郎赤壁。 乱石穿空,惊涛拍岸,卷起千堆雪。 江山如画,一时多少豪杰。" +
 		"遥想公谨当年,小乔初嫁了,雄姿英发。 羽扇纶巾,谈笑间、樯橹灰飞烟灭。故国神游,多情应笑我,早生华发,人生如梦,一尊还酹江月。"
 
-	// string put
+	// Put string
 	err := s.bucket.PutObject(objectName, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
@@ -133,7 +134,7 @@ func (s *OssBucketSuite) TestPutObject(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// bytes put
+	// Put bytes 
 	err = s.bucket.PutObject(objectName, bytes.NewReader([]byte(objectValue)))
 	c.Assert(err, IsNil)
 
@@ -147,7 +148,7 @@ func (s *OssBucketSuite) TestPutObject(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// file put
+	// Put file
 	err = createFileAndWrite(objectName+".txt", []byte(objectValue))
 	c.Assert(err, IsNil)
 	fd, err := os.Open(objectName + ".txt")
@@ -209,14 +210,14 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	notExistfilePath := randLowStr(10)
 	os.Remove(notExistfilePath)
 
-	// sign url for put
+	// Sign URL for put
 	str, err := s.bucket.SignURL(objectName, HTTPPut, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// error put object with url
+	// Error put object with URL
 	err = s.bucket.PutObjectWithURL(str, strings.NewReader(objectValue), ContentType("image/tiff"))
 	c.Assert(err, NotNil)
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
@@ -225,7 +226,7 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	c.Assert(err, NotNil)
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
 
-	// put object with url
+	// Put object with URL
 	err = s.bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
@@ -233,27 +234,27 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(acl.ACL, Equals, "default")
 
-	// get object meta
+	// Get object meta
 	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
 	c.Assert(err, IsNil)
 	c.Assert(meta.Get(HTTPHeaderContentType), Equals, "application/octet-stream")
 	c.Assert(meta.Get("X-Oss-Meta-Myprop"), Equals, "")
 
-	// sign url for get object
+	// Sign URL for function GetObjectWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// get object with url
+	// Get object with URL
 	body, err := s.bucket.GetObjectWithURL(str)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// sign url for put with options
+	// Sign URL for function PutObjectWithURL
 	options := []Option{
 		ObjectACL(ACLPublicRead),
 		Meta("myprop", "mypropval"),
@@ -266,8 +267,8 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// put object with url from file
-	// without option, error
+	// Put object with URL from file
+	// Without option, error
 	err = s.bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, NotNil)
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
@@ -276,15 +277,15 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	c.Assert(err, NotNil)
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
 
-	// with option, error file
+	// With option, error file
 	err = s.bucket.PutObjectFromFileWithURL(str, notExistfilePath, options...)
 	c.Assert(err, NotNil)
 
-	// with option
+	// With option
 	err = s.bucket.PutObjectFromFileWithURL(str, filePath, options...)
 	c.Assert(err, IsNil)
 
-	// get object meta
+	// Get object meta
 	meta, err = s.bucket.GetObjectDetailedMeta(objectName)
 	c.Assert(err, IsNil)
 	c.Assert(meta.Get("X-Oss-Meta-Myprop"), Equals, "mypropval")
@@ -294,11 +295,11 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(acl.ACL, Equals, string(ACLPublicRead))
 
-	// sign url for get object
+	// Sign URL for function GetObjectToFileWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 
-	// get object to file with url
+	// Get object to file with URL
 	newFile := randStr(10)
 	err = s.bucket.GetObjectToFileWithURL(str, newFile)
 	c.Assert(err, IsNil)
@@ -307,20 +308,20 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	c.Assert(eq, Equals, true)
 	os.Remove(newFile)
 
-	// get object to file error
+	// Get object to file error
 	err = s.bucket.GetObjectToFileWithURL(str, newFile, options...)
 	c.Assert(err, NotNil)
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
 	_, err = os.Stat(newFile)
 	c.Assert(err, NotNil)
 
-	// get object error
+	// Get object error
 	body, err = s.bucket.GetObjectWithURL(str, options...)
 	c.Assert(err, NotNil)
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
 	c.Assert(body, IsNil)
 
-	// sign url for get object with options
+	// Sign URL for function GetObjectToFileWithURL
 	options = []Option{
 		Expires(futureDate),
 		ObjectACL(ACLPublicRead),
@@ -331,7 +332,7 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 60, options...)
 	c.Assert(err, IsNil)
 
-	// get object to file with url and options
+	// Get object to file with URL and options
 	err = s.bucket.GetObjectToFileWithURL(str, newFile, options...)
 	c.Assert(err, IsNil)
 	eq, err = compareFiles(filePath, newFile)
@@ -339,14 +340,14 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	c.Assert(eq, Equals, true)
 	os.Remove(newFile)
 
-	// get object to file error
+	// Get object to file error
 	err = s.bucket.GetObjectToFileWithURL(str, newFile)
 	c.Assert(err, NotNil)
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
 	_, err = os.Stat(newFile)
 	c.Assert(err, NotNil)
 
-	// get object error
+	// Get object error
 	body, err = s.bucket.GetObjectWithURL(str)
 	c.Assert(err, NotNil)
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
@@ -355,14 +356,14 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	os.Remove(filePath)
 	os.Remove(newFile)
 
-	// sign url error
+	// Sign URL error
 	str, err = s.bucket.SignURL(objectName, HTTPGet, -1)
 	c.Assert(err, NotNil)
 
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// invalid url parse
+	// Invalid URL parse
 	str = randStr(20)
 
 	err = s.bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
@@ -373,132 +374,132 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 }
 
 func (s *OssBucketSuite) TestSignURLWithEscapedKey(c *C) {
-	// key with '/'
+	// Key with '/'
 	objectName := "zyimg/86/e8/653b5dc97bb0022051a84c632bc4"
 	objectValue := "弃我去者,昨日之日不可留;乱我心者,今日之日多烦忧。长风万里送秋雁,对此可以酣高楼。蓬莱文章建安骨,中间小谢又清发。" +
 		"俱怀逸兴壮思飞,欲上青天揽明月。抽刀断水水更流,举杯销愁愁更愁。人生在世不称意,明朝散发弄扁舟。"
 
-	// sign url for put
+	// Sign URL for function PutObjectWithURL
 	str, err := s.bucket.SignURL(objectName, HTTPPut, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// put object with url
+	// Put object with URL
 	err = s.bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// sign url for get object
+	// Sign URL for function GetObjectWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// get object with url
+	// Get object with URL
 	body, err := s.bucket.GetObjectWithURL(str)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// key with escaped chars
+	// Key with escaped chars
 	objectName = "<>[]()`?.,!@#$%^&'/*-_=+~:;"
 
-	// sign url for put
+	// Sign URL for funciton PutObjectWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPPut, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// put object with url
+	// Put object with URL
 	err = s.bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// sign url for get object
+	// Sign URL for function GetObjectWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// get object with url
+	// Get object with URL
 	body, err = s.bucket.GetObjectWithURL(str)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// key with Chinese chars
+	// Key with Chinese chars
 	objectName = "风吹柳花满店香,吴姬压酒劝客尝。金陵子弟来相送,欲行不行各尽觞。请君试问东流水,别意与之谁短长。"
 
-	// sign url for put
+	// Sign URL for function PutObjectWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPPut, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// put object with url
+	// Put object with URL
 	err = s.bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// sign url for get object
+	// Sign URL for get function GetObjectWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// get object with url
+	// Get object with URL
 	body, err = s.bucket.GetObjectWithURL(str)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// key
+	// Key
 	objectName = "test/此情无计可消除/才下眉头/却上 心头/。,;:‘’“”?()『』【】《》!@#¥%……&×/test+ =-_*&^%$#@!`~[]{}()<>|\\/?.,;.txt"
 
-	// sign url for put
+	// Sign URL for function PutObjectWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPPut, 60)
 	c.Assert(err, IsNil)
 
-	// put object with url
+	// Put object with URL
 	err = s.bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// sign url for get object
+	// Sign URL for function GetObjectWithURL
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 
-	// get object with url
+	// Get object with URL
 	body, err = s.bucket.GetObjectWithURL(str)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// put object
+	// Put object
 	err = s.bucket.PutObject(objectName, bytes.NewReader([]byte(objectValue)))
 	c.Assert(err, IsNil)
 
-	// get object
+	// Get object
 	body, err = s.bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// delete object
+	// Delete object
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 }
 
 func (s *OssBucketSuite) TestSignURLWithEscapedKeyAndPorxy(c *C) {
-	// key with '/'
+	// Key with '/'
 	objectName := "zyimg/86/e8/653b5dc97bb0022051a84c632bc4"
 	objectValue := "弃我去者,昨日之日不可留;乱我心者,今日之日多烦忧。长风万里送秋雁,对此可以酣高楼。蓬莱文章建安骨,中间小谢又清发。" +
 		"俱怀逸兴壮思飞,欲上青天揽明月。抽刀断水水更流,举杯销愁愁更愁。人生在世不称意,明朝散发弄扁舟。"
@@ -506,65 +507,65 @@ func (s *OssBucketSuite) TestSignURLWithEscapedKeyAndPorxy(c *C) {
 	client, err := New(endpoint, accessID, accessKey, AuthProxy(proxyHost, proxyUser, proxyPasswd))
 	bucket, err := client.Bucket(bucketName)
 
-	// sign url for put
+	// Sign URL for put
 	str, err := bucket.SignURL(objectName, HTTPPut, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// put object with url
+	// Put object with URL
 	err = bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// sign url for get object
+	// Sign URL for function GetObjectWithURL
 	str, err = bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// get object with url
+	// Get object with URL
 	body, err := bucket.GetObjectWithURL(str)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// key with Chinese chars
+	// Key with Chinese chars
 	objectName = "test/此情无计可消除/才下眉头/却上 心头/。,;:‘’“”?()『』【】《》!@#¥%……&×/test+ =-_*&^%$#@!`~[]{}()<>|\\/?.,;.txt"
 
-	// sign url for put
+	// Sign URL for function PutObjectWithURL
 	str, err = bucket.SignURL(objectName, HTTPPut, 60)
 	c.Assert(err, IsNil)
 
-	// put object with url
+	// Put object with URL
 	err = bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// sign url for get object
+	// Sign URL for function GetObjectWithURL
 	str, err = bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 
-	// get object with url
+	// Get object with URL
 	body, err = bucket.GetObjectWithURL(str)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// put object
+	// Put object
 	err = bucket.PutObject(objectName, bytes.NewReader([]byte(objectValue)))
 	c.Assert(err, IsNil)
 
-	// get object
+	// Get object
 	body, err = bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// delete object
+	// Delete object
 	err = bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 }
@@ -708,7 +709,7 @@ func (s *OssBucketSuite) TestPutObjectNegative(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// invalid option
+	// Invalid option
 	err = s.bucket.PutObject(objectName, strings.NewReader(objectValue),
 		IfModifiedSince(pastDate))
 	c.Assert(err, NotNil)
@@ -865,23 +866,23 @@ func (s *OssBucketSuite) TestGetObjectToWriterNegative(c *C) {
 	objectName := objectNamePrefix + "tgotwn"
 	objectValue := "长忆观潮,满郭人争江上望。"
 
-	// object not exist
+	// Object not exist
 	_, err := s.bucket.GetObject("NotExist")
 	c.Assert(err, NotNil)
 
-	// constraint invalid
+	// Constraint invalid
 	err = s.bucket.PutObject(objectName, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// out of range
+	// Out of range
 	_, err = s.bucket.GetObject(objectName, Range(15, 1000))
 	c.Assert(err, IsNil)
 
-	// no exist
+	// Not exist
 	err = s.bucket.GetObjectToFile(objectName, "/root/123abc9874")
 	c.Assert(err, NotNil)
 
-	// invalid option
+	// Invalid option
 	_, err = s.bucket.GetObject(objectName, ACL(ACLPublicRead))
 	c.Assert(err, IsNil)
 
@@ -976,12 +977,12 @@ func (s *OssBucketSuite) TestGetObjectToFile(c *C) {
 func (s *OssBucketSuite) TestListObjects(c *C) {
 	objectName := objectNamePrefix + "tlo"
 
-	// list empty bucket
+	// List empty bucket
 	lor, err := s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 	left := len(lor.Objects)
 
-	// Put three object
+	// Put three objects
 	err = s.bucket.PutObject(objectName+"1", strings.NewReader(""))
 	c.Assert(err, IsNil)
 	err = s.bucket.PutObject(objectName+"2", strings.NewReader(""))
@@ -989,12 +990,12 @@ func (s *OssBucketSuite) TestListObjects(c *C) {
 	err = s.bucket.PutObject(objectName+"3", strings.NewReader(""))
 	c.Assert(err, IsNil)
 
-	// list
+	// List
 	lor, err = s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, left+3)
 
-	// list with prefix
+	// List with prefix
 	lor, err = s.bucket.ListObjects(Prefix(objectName + "2"))
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 1)
@@ -1003,12 +1004,12 @@ func (s *OssBucketSuite) TestListObjects(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 0)
 
-	// list with max keys
+	// List with max keys
 	lor, err = s.bucket.ListObjects(Prefix(objectName), MaxKeys(2))
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 2)
 
-	// list with marker
+	// List with marker
 	lor, err = s.bucket.ListObjects(Marker(objectName+"1"), MaxKeys(1))
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 1)
@@ -1053,7 +1054,7 @@ func (s *OssBucketSuite) TestListObjectsEncodingType(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	// 特殊字符
+	// Special characters
 	objectName = "go go ` ~ ! @ # $ % ^ & * () - _ + =[] {} \\ | < > , . ? / 0"
 	err = s.bucket.PutObject(objectName, strings.NewReader("明月几时有,把酒问青天"))
 	c.Assert(err, IsNil)
@@ -1081,7 +1082,7 @@ func (s *OssBucketSuite) TestListObjectsEncodingType(c *C) {
 func (s *OssBucketSuite) TestIsObjectExist(c *C) {
 	objectName := objectNamePrefix + "tibe"
 
-	// Put three object
+	// Put three objects
 	err := s.bucket.PutObject(objectName+"1", strings.NewReader(""))
 	c.Assert(err, IsNil)
 	err = s.bucket.PutObject(objectName+"11", strings.NewReader(""))
@@ -1089,7 +1090,7 @@ func (s *OssBucketSuite) TestIsObjectExist(c *C) {
 	err = s.bucket.PutObject(objectName+"111", strings.NewReader(""))
 	c.Assert(err, IsNil)
 
-	// exist
+	// Exist
 	exist, err := s.bucket.IsObjectExist(objectName + "11")
 	c.Assert(err, IsNil)
 	c.Assert(exist, Equals, true)
@@ -1102,7 +1103,7 @@ func (s *OssBucketSuite) TestIsObjectExist(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(exist, Equals, true)
 
-	// not exist
+	// Not exist
 	exist, err = s.bucket.IsObjectExist(objectName + "1111")
 	c.Assert(err, IsNil)
 	c.Assert(exist, Equals, false)
@@ -1130,11 +1131,11 @@ func (s *OssBucketSuite) TestDeleteObject(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 1)
 
-	// delete
+	// Delete
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// duplicate delete
+	// Duplicate delete
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
@@ -1147,7 +1148,7 @@ func (s *OssBucketSuite) TestDeleteObject(c *C) {
 func (s *OssBucketSuite) TestDeleteObjects(c *C) {
 	objectName := objectNamePrefix + "tdos"
 
-	// delete object
+	// Delete objects
 	err := s.bucket.PutObject(objectName, strings.NewReader(""))
 	c.Assert(err, IsNil)
 
@@ -1159,7 +1160,7 @@ func (s *OssBucketSuite) TestDeleteObjects(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 0)
 
-	// delete objects
+	// Delete objects
 	err = s.bucket.PutObject(objectName+"1", strings.NewReader(""))
 	c.Assert(err, IsNil)
 
@@ -1174,7 +1175,7 @@ func (s *OssBucketSuite) TestDeleteObjects(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(lor.Objects), Equals, 0)
 
-	// delete 0
+	// Delete 0
 	_, err = s.bucket.DeleteObjects([]string{})
 	c.Assert(err, NotNil)
 
@@ -1236,7 +1237,7 @@ func (s *OssBucketSuite) TestDeleteObjects(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(res.DeletedObjects), Equals, 0)
 
-	// 特殊字符
+	// Special characters
 	key := "A ' < > \" & ~ ` ! @ # $ % ^ & * ( ) [] {} - _ + = / | \\ ? . , : ; A"
 	err = s.bucket.PutObject(key, strings.NewReader("value"))
 	c.Assert(err, IsNil)
@@ -1248,7 +1249,7 @@ func (s *OssBucketSuite) TestDeleteObjects(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(ress.Objects), Equals, 0)
 
-	// not exist
+	// Not exist
 	_, err = s.bucket.DeleteObjects([]string{"NotExistObject"})
 	c.Assert(err, IsNil)
 }
@@ -1275,18 +1276,18 @@ func (s *OssBucketSuite) TestSetObjectMeta(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(acl.ACL, Equals, "default")
 
-	// invalid option
+	// Invalid option
 	err = s.bucket.SetObjectMeta(objectName, AcceptEncoding("url"))
 	c.Assert(err, IsNil)
 
-	// invalid option value
+	// Invalid option value
 	err = s.bucket.SetObjectMeta(objectName, ServerSideEncryption("invalid"))
 	c.Assert(err, NotNil)
 
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// no exist
+	// Not exist
 	err = s.bucket.SetObjectMeta(objectName, Expires(futureDate))
 	c.Assert(err, NotNil)
 }
@@ -1362,12 +1363,12 @@ func (s *OssBucketSuite) TestSetAndGetObjectAcl(c *C) {
 	err := s.bucket.PutObject(objectName, strings.NewReader(""))
 	c.Assert(err, IsNil)
 
-	// default
+	// Default
 	acl, err := s.bucket.GetObjectACL(objectName)
 	c.Assert(err, IsNil)
 	c.Assert(acl.ACL, Equals, "default")
 
-	// set ACL_PUBLIC_RW
+	// Set ACL_PUBLIC_RW
 	err = s.bucket.SetObjectACL(objectName, ACLPublicReadWrite)
 	c.Assert(err, IsNil)
 
@@ -1375,7 +1376,7 @@ func (s *OssBucketSuite) TestSetAndGetObjectAcl(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(acl.ACL, Equals, string(ACLPublicReadWrite))
 
-	// set ACL_PRIVATE
+	// Set ACL_PRIVATE
 	err = s.bucket.SetObjectACL(objectName, ACLPrivate)
 	c.Assert(err, IsNil)
 
@@ -1383,7 +1384,7 @@ func (s *OssBucketSuite) TestSetAndGetObjectAcl(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(acl.ACL, Equals, string(ACLPrivate))
 
-	// set ACL_PUBLIC_R
+	// Set ACL_PUBLIC_R
 	err = s.bucket.SetObjectACL(objectName, ACLPublicRead)
 	c.Assert(err, IsNil)
 
@@ -1399,7 +1400,7 @@ func (s *OssBucketSuite) TestSetAndGetObjectAcl(c *C) {
 func (s *OssBucketSuite) TestSetAndGetObjectAclNegative(c *C) {
 	objectName := objectNamePrefix + "tsgban"
 
-	// object not exist
+	// Object not exist
 	err := s.bucket.SetObjectACL(objectName, ACLPublicRead)
 	c.Assert(err, NotNil)
 }
@@ -1413,12 +1414,12 @@ func (s *OssBucketSuite) TestCopyObject(c *C) {
 		ACL(ACLPublicRead), Meta("my", "myprop"))
 	c.Assert(err, IsNil)
 
-	// copy
+	// Copy
 	var objectNameDest = objectName + "dest"
 	_, err = s.bucket.CopyObject(objectName, objectNameDest)
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	lor, err := s.bucket.ListObjects(Prefix(objectName))
 	c.Assert(err, IsNil)
 	testLogger.Println("objects:", lor.Objects)
@@ -1433,16 +1434,16 @@ func (s *OssBucketSuite) TestCopyObject(c *C) {
 	err = s.bucket.DeleteObject(objectNameDest)
 	c.Assert(err, IsNil)
 
-	// copy with constraints x-oss-copy-source-if-modified-since
+	// Copy with constraints x-oss-copy-source-if-modified-since
 	_, err = s.bucket.CopyObject(objectName, objectNameDest, CopySourceIfModifiedSince(futureDate))
 	c.Assert(err, NotNil)
 	testLogger.Println("CopyObject:", err)
 
-	// copy with constraints x-oss-copy-source-if-unmodified-since
+	// Copy with constraints x-oss-copy-source-if-unmodified-since
 	_, err = s.bucket.CopyObject(objectName, objectNameDest, CopySourceIfUnmodifiedSince(futureDate))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	lor, err = s.bucket.ListObjects(Prefix(objectName))
 	c.Assert(err, IsNil)
 	testLogger.Println("objects:", lor.Objects)
@@ -1457,7 +1458,7 @@ func (s *OssBucketSuite) TestCopyObject(c *C) {
 	err = s.bucket.DeleteObject(objectNameDest)
 	c.Assert(err, IsNil)
 
-	// copy with constraints x-oss-copy-source-if-match
+	// Copy with constraints x-oss-copy-source-if-match
 	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
 	c.Assert(err, IsNil)
 	testLogger.Println("GetObjectDetailedMeta:", meta)
@@ -1465,7 +1466,7 @@ func (s *OssBucketSuite) TestCopyObject(c *C) {
 	_, err = s.bucket.CopyObject(objectName, objectNameDest, CopySourceIfMatch(meta.Get("Etag")))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	body, err = s.bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
@@ -1475,16 +1476,16 @@ func (s *OssBucketSuite) TestCopyObject(c *C) {
 	err = s.bucket.DeleteObject(objectNameDest)
 	c.Assert(err, IsNil)
 
-	// copy with constraints x-oss-copy-source-if-none-match
+	// Copy with constraints x-oss-copy-source-if-none-match
 	_, err = s.bucket.CopyObject(objectName, objectNameDest, CopySourceIfNoneMatch(meta.Get("Etag")))
 	c.Assert(err, NotNil)
 
-	// copy with constraints x-oss-metadata-directive
+	// Copy with constraints x-oss-metadata-directive
 	_, err = s.bucket.CopyObject(objectName, objectNameDest, Meta("my", "mydestprop"),
 		MetadataDirective(MetaCopy))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	body, err = s.bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
@@ -1502,7 +1503,7 @@ func (s *OssBucketSuite) TestCopyObject(c *C) {
 	err = s.bucket.DeleteObject(objectNameDest)
 	c.Assert(err, IsNil)
 
-	// copy with constraints x-oss-metadata-directive and self defined dest object meta
+	// Copy with constraints x-oss-metadata-directive and self defined dest object meta
 	options := []Option{
 		ObjectACL(ACLPublicReadWrite),
 		Meta("my", "mydestprop"),
@@ -1511,7 +1512,7 @@ func (s *OssBucketSuite) TestCopyObject(c *C) {
 	_, err = s.bucket.CopyObject(objectName, objectNameDest, options...)
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	body, err = s.bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
@@ -1548,11 +1549,11 @@ func (s *OssBucketSuite) TestCopyObjectToOrFrom(c *C) {
 	err = s.bucket.PutObject(objectName, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// copy from
+	// Copy from
 	_, err = destBuck.CopyObjectFrom(bucketName, objectName, objectNameDest)
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	body, err := destBuck.GetObject(objectNameDest)
 	c.Assert(err, IsNil)
 	str, err := readBody(body)
@@ -1562,18 +1563,18 @@ func (s *OssBucketSuite) TestCopyObjectToOrFrom(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// copy to
+	// Copy to
 	_, err = destBuck.CopyObjectTo(bucketName, objectName, objectNameDest)
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	body, err = s.bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, objectValue)
 
-	// clean
+	// Clean
 	err = destBuck.DeleteObject(objectNameDest)
 	c.Assert(err, IsNil)
 
@@ -1590,11 +1591,11 @@ func (s *OssBucketSuite) TestCopyObjectToOrFromNegative(c *C) {
 	destBucket := bucketName + "-destn"
 	objectNameDest := objectName + "destn"
 
-	// object no exist
+	// Object not exist
 	_, err := s.bucket.CopyObjectTo(bucketName, objectName, objectNameDest)
 	c.Assert(err, NotNil)
 
-	// bucket no exist
+	// Bucket not exist
 	_, err = s.bucket.CopyObjectFrom(destBucket, objectNameDest, objectName)
 	c.Assert(err, NotNil)
 }
@@ -1613,7 +1614,7 @@ func (s *OssBucketSuite) TestAppendObject(c *C) {
 	err = createFileAndWrite(localFile+"2", val[midPos:])
 	c.Assert(err, IsNil)
 
-	// string append
+	// String append
 	nextPos, err = s.bucket.AppendObject(objectName, strings.NewReader("昨夜雨疏风骤,浓睡不消残酒。试问卷帘人,"), nextPos)
 	c.Assert(err, IsNil)
 	nextPos, err = s.bucket.AppendObject(objectName, strings.NewReader("却道海棠依旧。知否?知否?应是绿肥红瘦。"), nextPos)
@@ -1628,7 +1629,7 @@ func (s *OssBucketSuite) TestAppendObject(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// byte append
+	// Byte append
 	nextPos = 0
 	nextPos, err = s.bucket.AppendObject(objectName, bytes.NewReader(val[0:midPos]), nextPos)
 	c.Assert(err, IsNil)
@@ -1644,7 +1645,7 @@ func (s *OssBucketSuite) TestAppendObject(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// file append
+	// File append
 	options := []Option{
 		ObjectACL(ACLPublicReadWrite),
 		Meta("my", "myprop"),
@@ -1670,7 +1671,7 @@ func (s *OssBucketSuite) TestAppendObject(c *C) {
 	testLogger.Println("GetObjectACL:", acl)
 	c.Assert(acl.ACL, Equals, string(ACLPublicReadWrite))
 
-	// second append
+	// Second append
 	options = []Option{
 		ObjectACL(ACLPublicRead),
 		Meta("my", "myproptwo"),
@@ -1805,14 +1806,14 @@ func (s *OssBucketSuite) TestSTSToken(c *C) {
 	c.Assert(err, IsNil)
 	testLogger.Println("Objects:", lor.Objects)
 
-	// Put with url
+	// Put with URL
 	signedURL, err := bucket.SignURL(objectName, HTTPPut, 3600)
 	c.Assert(err, IsNil)
 
 	err = bucket.PutObjectWithURL(signedURL, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// Get with url
+	// Get with URL
 	signedURL, err = bucket.SignURL(objectName, HTTPGet, 3600)
 	c.Assert(err, IsNil)
 
@@ -1977,7 +1978,7 @@ func (s *OssBucketSuite) TestSymlink(c *C) {
 	err = s.bucket.DeleteObject(targetObjectName)
 	c.Assert(err, IsNil)
 
-	// put symlink again
+	// Put symlink again
 	objectName = objectNamePrefix + "symlink"
 	targetObjectName = objectNamePrefix + "symlink-target"
 
@@ -2008,12 +2009,12 @@ func (s *OssBucketSuite) TestSymlink(c *C) {
 func (s *OssBucketSuite) TestRestoreObject(c *C) {
 	objectName := objectNamePrefix + "restore"
 
-	// List Object
+	// List objects
 	lor, err := s.archiveBucket.ListObjects()
 	c.Assert(err, IsNil)
 	left := len(lor.Objects)
 
-	// Put three object
+	// Put object
 	err = s.archiveBucket.PutObject(objectName, strings.NewReader(""))
 	c.Assert(err, IsNil)
 
@@ -2026,29 +2027,29 @@ func (s *OssBucketSuite) TestRestoreObject(c *C) {
 		c.Assert(object.Type, Equals, "Normal")
 	}
 
-	// Head Object
+	// Head object
 	meta, err := s.archiveBucket.GetObjectDetailedMeta(objectName)
 	c.Assert(err, IsNil)
 	_, ok := meta["X-Oss-Restore"]
 	c.Assert(ok, Equals, false)
 	c.Assert(meta.Get("X-Oss-Storage-Class"), Equals, "Archive")
 
-	// Error Restore
+	// Error restore object
 	err = s.archiveBucket.RestoreObject("notexistobject")
 	c.Assert(err, NotNil)
 
-	// Restore Object
+	// Restore object
 	err = s.archiveBucket.RestoreObject(objectName)
 	c.Assert(err, IsNil)
 
-	// Head Object
+	// Head object
 	meta, err = s.archiveBucket.GetObjectDetailedMeta(objectName)
 	c.Assert(err, IsNil)
 	c.Assert(meta.Get("X-Oss-Restore"), Equals, "ongoing-request=\"true\"")
 	c.Assert(meta.Get("X-Oss-Storage-Class"), Equals, "Archive")
 }
 
-// private
+// Private
 func createFileAndWrite(fileName string, data []byte) error {
 	os.Remove(fileName)
 
@@ -2070,7 +2071,7 @@ func createFileAndWrite(fileName string, data []byte) error {
 	return nil
 }
 
-// compare the content between fileL and fileR
+// Compare the content between fileL and fileR
 func compareFiles(fileL string, fileR string) (bool, error) {
 	finL, err := os.Open(fileL)
 	if err != nil {
@@ -2124,7 +2125,7 @@ func compareFiles(fileL string, fileR string) (bool, error) {
 	return true, nil
 }
 
-// compare the content of file and data
+// Compare the content of file and data
 func compareFileData(file string, data []byte) (bool, error) {
 	fin, err := os.Open(file)
 	if err != nil {

+ 151 - 188
oss/client.go

@@ -11,70 +11,67 @@ import (
 	"time"
 )
 
-//
-// Client Sdk的入口,Client的方法可以完成bucket的各种操作,如create/delete bucket,
-// set/get acl/lifecycle/referer/logging/website等。文件(object)的上传下载通过Bucket完成。
-// 用户用oss.New创建Client。
+// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website).
+// Object related operations are done by Bucket class.
+// Users use oss.New to create Client instance.
 //
 type (
-	// Client oss client
+	// Client OSS client
 	Client struct {
-		Config *Config // Oss Client configure
-		Conn   *Conn   // Send http request
+		Config *Config // OSS client configuration
+		Conn   *Conn   // Send HTTP request
 	}
 
 	// ClientOption client option such as UseCname, Timeout, SecurityToken.
 	ClientOption func(*Client)
 )
 
+// New creates a new client.
 //
-// New 生成一个新的Client。
-//
-// endpoint        用户Bucket所在数据中心的访问域名,如http://oss-cn-hangzhou.aliyuncs.com。
-// accessKeyId     用户标识。
-// accessKeySecret 用户密钥。
+// endpoint    the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com .
+// accessKeyId    access key Id.
+// accessKeySecret    access key secret.
 //
-// Client 生成的新Client。error为nil时有效。
-// error  操作无错误时为nil,非nil时表示操作出错。
+// Client    creates the new client instance, the returned value is valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) {
-	// configuration
+	// Configuration
 	config := getDefaultOssConfig()
 	config.Endpoint = endpoint
 	config.AccessKeyID = accessKeyID
 	config.AccessKeySecret = accessKeySecret
 
-	// url parse
+	// URL parse
 	url := &urlMaker{}
 	url.Init(config.Endpoint, config.IsCname, config.IsUseProxy)
 
-	// http connect
+	// HTTP connect
 	conn := &Conn{config: config, url: url}
 
-	// oss client
+	// OSS client
 	client := &Client{
 		config,
 		conn,
 	}
 
-	// client options parse
+	// Client options parse
 	for _, option := range options {
 		option(client)
 	}
 
-	// create http connect
+	// Create HTTP connection
 	err := conn.init(config, url)
 
 	return client, err
 }
 
+// Bucket gets the bucket instance.
 //
-// Bucket 取存储空间(Bucket)的对象实例。
-//
-// bucketName 存储空间名称。
-// Bucket     新的Bucket。error为nil时有效。
+// bucketName    the bucket name.
+// Bucket    the bucket object, when error is nil.
 //
-// error 操作无错误时返回nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) Bucket(bucketName string) (*Bucket, error) {
 	return &Bucket{
@@ -83,15 +80,14 @@ func (client Client) Bucket(bucketName string) (*Bucket, error) {
 	}, nil
 }
 
+// CreateBucket creates a bucket.
 //
-// CreateBucket 创建Bucket。
+// bucketName    the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-').
+//               It must start with lowercase letter or number and the length can only be between 3 and 255.
+// options    options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate.
+//            It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive.
 //
-// bucketName bucket名称,在整个OSS中具有全局唯一性,且不能修改。bucket名称的只能包括小写字母,数字和短横线-,
-// 必须以小写字母或者数字开头,长度必须在3-255字节之间。
-// options  创建bucket的选项。您可以使用选项ACL,指定bucket的访问权限。Bucket有以下三种访问权限,私有读写(ACLPrivate)、
-// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite),默认访问权限是私有读写。可以使用StorageClass选项设置bucket的存储方式,目前支持:标准存储模式(StorageStandard)、 低频存储模式(StorageIA)、 归档存储模式(StorageArchive)。
-//
-// error 操作无错误时返回nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) CreateBucket(bucketName string, options ...Option) error {
 	headers := make(map[string]string)
@@ -122,15 +118,15 @@ func (client Client) CreateBucket(bucketName string, options ...Option) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// ListBuckets lists buckets of the current account under the given endpoint, with optional filters.
 //
-// ListBuckets 获取当前用户下的bucket。
-//
-// options 指定ListBuckets的筛选行为,Prefix、Marker、MaxKeys三个选项。Prefix限定前缀。
-// Marker设定从Marker之后的第一个开始返回。MaxKeys限定此次返回的最大数目,默认为100。
-// 常用使用场景的实现,参数示例程序list_bucket.go。
-// ListBucketsResponse 操作成功后的返回值,error为nil时该返回值有效。
+// options    specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter.
+//            And marker makes sure the returned buckets' name are greater than it in lexicographic order.
+//            Maxkeys limits the max keys to return, and by default it's 100 and up to 1000.
+//            For the common usage scenario, please check out list_bucket.go in the sample.
+// ListBucketsResponse    the response object if error is nil.
 //
-// error 操作无错误时返回nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
 	var out ListBucketsResult
@@ -150,13 +146,12 @@ func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
 	return out, err
 }
 
+// IsBucketExist checks if the bucket exists
 //
-// IsBucketExist Bucket是否存在。
-//
-// bucketName 存储空间名称。
-//
-// bool  存储空间是否存在。error为nil时有效。
-// error 操作无错误时返回nil,非nil为错误信息。
+// bucketName    the bucket name.
+// 
+// bool    true if it exists, and it's only valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) IsBucketExist(bucketName string) (bool, error) {
 	listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1))
@@ -170,12 +165,11 @@ func (client Client) IsBucketExist(bucketName string) (bool, error) {
 	return false, nil
 }
 
+// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts).
 //
-// DeleteBucket 删除空存储空间。非空时请先清理Object、Upload。
-//
-// bucketName 存储空间名称。
+// bucketName    the bucket name.
 //
-// error 操作无错误时返回nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) DeleteBucket(bucketName string) error {
 	params := map[string]interface{}{}
@@ -188,16 +182,15 @@ func (client Client) DeleteBucket(bucketName string) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
 }
 
+// GetBucketLocation gets the bucket location.
 //
-// GetBucketLocation 查看Bucket所属数据中心位置的信息。
-//
-// 如果您想了解"访问域名和数据中心"详细信息,请参看
+// Checks out the following link for more information : 
 // https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
 //
-// bucketName 存储空间名称。
+// bucketName    the bucket name
 //
-// string Bucket所属的数据中心位置信息。
-// error  操作无错误时返回nil,非nil为错误信息。
+// string    bucket's datacenter location
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) GetBucketLocation(bucketName string) (string, error) {
 	params := map[string]interface{}{}
@@ -213,14 +206,12 @@ func (client Client) GetBucketLocation(bucketName string) (string, error) {
 	return LocationConstraint, err
 }
 
+// SetBucketACL sets bucket's ACL.
 //
-// SetBucketACL 修改Bucket的访问权限。
-//
-// bucketName 存储空间名称。
-// bucketAcl  bucket的访问权限。Bucket有以下三种访问权限,Bucket有以下三种访问权限,私有读写(ACLPrivate)、
-// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite)。
+// bucketName    the bucket name
+// bucketAcl    the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite.
 //
-// error 操作无错误时返回nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
 	headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)}
@@ -233,13 +224,12 @@ func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// GetBucketACL gets the bucket ACL.
 //
-// GetBucketACL 获得Bucket的访问权限。
+// bucketName    the bucket name.
 //
-// bucketName 存储空间名称。
-//
-// GetBucketAclResponse 操作成功后的返回值,error为nil时该返回值有效。
-// error 操作无错误时返回nil,非nil为错误信息。
+// GetBucketAclResponse    the result object, and it's only valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) {
 	var out GetBucketACLResult
@@ -255,19 +245,16 @@ func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error)
 	return out, err
 }
 
+// SetBucketLifecycle sets the bucket's lifecycle.
 //
-// SetBucketLifecycle 修改Bucket的生命周期设置。
-//
-// OSS提供Object生命周期管理来为用户管理对象。用户可以为某个Bucket定义生命周期配置,来为该Bucket的Object定义各种规则。
-// Bucket的拥有者可以通过SetBucketLifecycle来设置Bucket的Lifecycle配置。Lifecycle开启后,OSS将按照配置,
-// 定期自动删除与Lifecycle规则相匹配的Object。如果您想了解更多的生命周期的信息,请参看
+// For more information, checks out following link:
 // https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html
 //
-// bucketName 存储空间名称。
-// rules 生命周期规则列表。生命周期规则有两种格式,指定绝对和相对过期时间,分布由days和year/month/day控制。
-// 具体用法请参考示例程序sample/bucket_lifecycle.go。
-//
-// error 操作无错误时返回error为nil,非nil为错误信息。
+// bucketName    the bucket name.
+// rules    the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively.
+//          Check out sample/bucket_lifecycle.go for more details.
+// 
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
 	lxml := lifecycleXML{Rules: convLifecycleRule(rules)}
@@ -292,13 +279,12 @@ func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// DeleteBucketLifecycle deletes the bucket's lifecycle.
 //
-// DeleteBucketLifecycle 删除Bucket的生命周期设置。
 //
+// bucketName    the bucket name.
 //
-// bucketName 存储空间名称。
-//
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) DeleteBucketLifecycle(bucketName string) error {
 	params := map[string]interface{}{}
@@ -311,13 +297,12 @@ func (client Client) DeleteBucketLifecycle(bucketName string) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
 }
 
+// GetBucketLifecycle gets the bucket's lifecycle settings.
 //
-// GetBucketLifecycle 查看Bucket的生命周期设置。
-//
-// bucketName 存储空间名称。
-//
-// GetBucketLifecycleResponse 操作成功的返回值,error为nil时该返回值有效。Rules为该bucket上的规则列表。
-// error 操作无错误时为nil,非nil为错误信息。
+// bucketName    the bucket name.
+// 
+// GetBucketLifecycleResponse    the result object upon successful request. It's only valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) {
 	var out GetBucketLifecycleResult
@@ -333,21 +318,20 @@ func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleRe
 	return out, err
 }
 
+// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer.
 //
-// SetBucketReferer 设置bucket的referer访问白名单和是否允许referer字段为空的请求访问。
-//
-// 防止用户在OSS上的数据被其他人盗用,OSS支持基于HTTP header中表头字段referer的防盗链方法。可以通过OSS控制台或者API的方式对
-// 一个bucket设置referer字段的白名单和是否允许referer字段为空的请求访问。例如,对于一个名为oss-example的bucket,
-// 设置其referer白名单为http://www.aliyun.com。则所有referer为http://www.aliyun.com的请求才能访问oss-example
-// 这个bucket中的object。如果您还需要了解更多信息,请参看
+// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as
+// the allowing empty referrer flag. Note that this applies to requests from webbrowser only.
+// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket.
+// For more information, please check out this link :
 // https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html
 //
-// bucketName  存储空间名称。
-// referers  访问白名单列表。一个bucket可以支持多个referer参数。referer参数支持通配符"*"和"?"。
-// 用法请参看示例sample/bucket_referer.go
-// allowEmptyReferer  指定是否允许referer字段为空的请求访问。 默认为true。
+// bucketName    the bucket name.
+// referers    the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards.
+//             The sample could be found in sample/bucket_referer.go
+// allowEmptyReferer    the flag of allowing empty referrer. By default it's true.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error {
 	rxml := RefererXML{}
@@ -381,13 +365,12 @@ func (client Client) SetBucketReferer(bucketName string, referers []string, allo
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// GetBucketReferer gets the bucket's referrer white list.
 //
-// GetBucketReferer 获得Bucket的白名单地址。
+// bucketName    the bucket name.
 //
-// bucketName 存储空间名称。
-//
-// GetBucketRefererResponse 操作成功的返回值,error为nil时该返回值有效。
-// error 操作无错误时为nil,非nil为错误信息。
+// GetBucketRefererResponse    the result object upon successful request. It's only valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) {
 	var out GetBucketRefererResult
@@ -403,18 +386,17 @@ func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult
 	return out, err
 }
 
+// SetBucketLogging sets the bucket logging settings.
 //
-// SetBucketLogging 修改Bucket的日志设置。
-//
-// OSS为您提供自动保存访问日志记录功能。Bucket的拥有者可以开启访问日志记录功能。当一个bucket开启访问日志记录功能后,
-// OSS自动将访问这个bucket的请求日志,以小时为单位,按照固定的命名规则,生成一个Object写入用户指定的bucket中。
-// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
+// OSS could automatically store the access log. Only the bucket owner could enable the logging.
+// Once enabled, OSS would save all the access log into hourly log files in a specified bucket.
+// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html
 //
-// bucketName   需要记录访问日志的Bucket。
-// targetBucket 访问日志记录到的Bucket。
-// targetPrefix bucketName中需要存储访问日志记录的object前缀。为空记录所有object的访问日志。
+// bucketName    bucket name to enable the log.
+// targetBucket    the target bucket name to store the log files.
+// targetPrefix    the log files' prefix.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string,
 	isEnable bool) error {
@@ -451,12 +433,11 @@ func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix str
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket.
 //
-// DeleteBucketLogging 删除Bucket的日志设置。
+// bucketName    the bucket name to disable the logging.
 //
-// bucketName 需要删除访问日志的Bucket。
-//
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) DeleteBucketLogging(bucketName string) error {
 	params := map[string]interface{}{}
@@ -469,13 +450,12 @@ func (client Client) DeleteBucketLogging(bucketName string) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
 }
 
+// GetBucketLogging gets the bucket's logging settings
 //
-// GetBucketLogging 获得Bucket的日志设置。
-//
-// bucketName  需要删除访问日志的Bucket。
-// GetBucketLoggingResponse  操作成功的返回值,error为nil时该返回值有效。
+// bucketName    the bucket name
+// GetBucketLoggingResponse    the result object upon successful request. It's only valid when error is nil.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) {
 	var out GetBucketLoggingResult
@@ -491,17 +471,16 @@ func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult
 	return out, err
 }
 
+// SetBucketWebsite sets the bucket's static website's index and error page.
 //
-// SetBucketWebsite 设置/修改Bucket的默认首页以及错误页。
-//
-// OSS支持静态网站托管,Website操作可以将一个bucket设置成静态网站托管模式 。您可以将自己的Bucket配置成静态网站托管模式。
-// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
+// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website.
+// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html
 //
-// bucketName     需要设置Website的Bucket。
-// indexDocument  索引文档。
-// errorDocument  错误文档。
+// bucketName    the bucket name to enable static web site.
+// indexDocument    index page.
+// errorDocument    error page.
 //
-// error  操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error {
 	wxml := WebsiteXML{}
@@ -529,12 +508,11 @@ func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument s
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// DeleteBucketWebsite deletes the bucket's static web site settings.
 //
-// DeleteBucketWebsite 删除Bucket的Website设置。
+// bucketName    the bucket name.
 //
-// bucketName  需要删除website设置的Bucket。
-//
-// error  操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) DeleteBucketWebsite(bucketName string) error {
 	params := map[string]interface{}{}
@@ -547,13 +525,12 @@ func (client Client) DeleteBucketWebsite(bucketName string) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
 }
 
+// GetBucketWebsite gets the bucket's default page (index page) and the error page.
 //
-// GetBucketWebsite 获得Bucket的默认首页以及错误页。
-//
-// bucketName 存储空间名称。
+// bucketName    the bucket name
 //
-// GetBucketWebsiteResponse 操作成功的返回值,error为nil时该返回值有效。
-// error 操作无错误为nil,非nil为错误信息。
+// GetBucketWebsiteResponse    the result object upon successful request. It's only valid when error is nil.
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) {
 	var out GetBucketWebsiteResult
@@ -569,15 +546,14 @@ func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult
 	return out, err
 }
 
+// SetBucketCORS sets the bucket's CORS rules
 //
-// SetBucketCORS 设置Bucket的跨域访问(CORS)规则。
+// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
 //
-// 跨域访问的更多信息,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html
+// bucketName    the bucket name
+// corsRules    the CORS rules to set. The related sample code is in sample/bucket_cors.go.
 //
-// bucketName 需要设置Website的Bucket。
-// corsRules  待设置的CORS规则。用法请参看示例代码sample/bucket_cors.go。
-//
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error {
 	corsxml := CORSXML{}
@@ -612,12 +588,11 @@ func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) erro
 	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
 }
 
+// DeleteBucketCORS deletes the bucket's static website settings.
 //
-// DeleteBucketCORS 删除Bucket的Website设置。
-//
-// bucketName 需要删除cors设置的Bucket。
+// bucketName    the bucket name.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) DeleteBucketCORS(bucketName string) error {
 	params := map[string]interface{}{}
@@ -630,14 +605,12 @@ func (client Client) DeleteBucketCORS(bucketName string) error {
 	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
 }
 
+// GetBucketCORS gets the bucket's CORS settings.
 //
-// GetBucketCORS 获得Bucket的CORS设置。
-//
+// bucketName    the bucket name.
+// GetBucketCORSResult    the result object upon successful request. It's only valid when error is nil.
 //
-// bucketName  存储空间名称。
-// GetBucketCORSResult  操作成功的返回值,error为nil时该返回值有效。
-//
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) {
 	var out GetBucketCORSResult
@@ -653,13 +626,12 @@ func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, erro
 	return out, err
 }
 
+// GetBucketInfo gets the bucket information.
 //
-// GetBucketInfo 获得Bucket的信息。
-//
-// bucketName  存储空间名称。
-// GetBucketInfoResult  操作成功的返回值,error为nil时该返回值有效。
+// bucketName    the bucket name.
+// GetBucketInfoResult    the result object upon successful request. It's only valid when error is nil.
 //
-// error 操作无错误为nil,非nil为错误信息。
+// error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) {
 	var out GetBucketInfoResult
@@ -675,10 +647,9 @@ func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, erro
 	return out, err
 }
 
+// UseCname sets the flag of using CName. By default it's false.
 //
-// UseCname 设置是否使用CNAME,默认不使用。
-//
-// isUseCname true设置endpoint格式是cname格式,false为非cname格式,默认false
+// isUseCname    true: the endpoint has the CName, false: the endpoint does not have cname. Default is false.
 //
 func UseCname(isUseCname bool) ClientOption {
 	return func(client *Client) {
@@ -687,11 +658,10 @@ func UseCname(isUseCname bool) ClientOption {
 	}
 }
 
+// Timeout sets the HTTP timeout in seconds.
 //
-// Timeout 设置HTTP超时时间。
-//
-// connectTimeoutSec HTTP链接超时时间,单位是秒,默认10秒。0表示永不超时。
-// readWriteTimeout  HTTP发送接受数据超时时间,单位是秒,默认20秒。0表示永不超时。
+// connectTimeoutSec    HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended)
+// readWriteTimeout    HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite.
 //
 func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
 	return func(client *Client) {
@@ -708,10 +678,9 @@ func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption {
 	}
 }
 
+// SecurityToken sets the temporary user's SecurityToken.
 //
-// SecurityToken 临时用户设置SecurityToken。
-//
-// token STS token
+// token    STS token
 //
 func SecurityToken(token string) ClientOption {
 	return func(client *Client) {
@@ -719,10 +688,9 @@ func SecurityToken(token string) ClientOption {
 	}
 }
 
+// EnableMD5 enables MD5 validation.
 //
-// EnableMD5 是否启用MD5校验,默认启用。
-//
-// isEnableMD5 true启用MD5校验,false不启用MD5校验
+// isEnableMD5    true: enable MD5 validation; false: disable MD5 validation.
 //
 func EnableMD5(isEnableMD5 bool) ClientOption {
 	return func(client *Client) {
@@ -730,10 +698,9 @@ func EnableMD5(isEnableMD5 bool) ClientOption {
 	}
 }
 
+// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB.
 //
-// MD5ThresholdCalcInMemory 使用内存计算MD5值的上限,默认16MB。
-//
-// threshold 单位Byte。上传内容小于threshold在MD5在内存中计算,大于使用临时文件计算MD5
+// threshold    the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5.
 //
 func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
 	return func(client *Client) {
@@ -741,10 +708,9 @@ func MD5ThresholdCalcInMemory(threshold int64) ClientOption {
 	}
 }
 
+// EnableCRC enables the CRC checksum. Default is true.
 //
-// EnableCRC 上传是否启用CRC校验,默认启用。
-//
-// isEnableCRC true启用CRC校验,false不启用CRC校验
+// isEnableCRC    true: enable CRC checksum; false: disable the CRC checksum.
 //
 func EnableCRC(isEnableCRC bool) ClientOption {
 	return func(client *Client) {
@@ -752,10 +718,9 @@ func EnableCRC(isEnableCRC bool) ClientOption {
 	}
 }
 
+// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2).
 //
-// UserAgent 指定UserAgent,默认如下aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2)。
-//
-// userAgent user agent字符串。
+// userAgent    the user agent string.
 //
 func UserAgent(userAgent string) ClientOption {
 	return func(client *Client) {
@@ -763,10 +728,9 @@ func UserAgent(userAgent string) ClientOption {
 	}
 }
 
+// Proxy sets the proxy (optional). The default is not using proxy.
 //
-// Proxy 设置代理服务器,默认不使用代理。
-//
-// proxyHost 代理服务器地址,格式是host或host:port
+// proxyHost    the proxy host in the format "host:port". For example, proxy.com:80 .
 //
 func Proxy(proxyHost string) ClientOption {
 	return func(client *Client) {
@@ -776,12 +740,11 @@ func Proxy(proxyHost string) ClientOption {
 	}
 }
 
+// AuthProxy sets the proxy information with user name and password.
 //
-// AuthProxy 设置需要认证的代理服务器,默认不使用代理。
-//
-// proxyHost 代理服务器地址,格式是host或host:port
-// proxyUser 代理服务器认证的用户名
-// proxyPassword 代理服务器认证的用户密码
+// proxyHost    the proxy host in the format "host:port". For example, proxy.com:80 .
+// proxyUser    the proxy user name.
+// proxyPassword    the proxy password.
 //
 func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
 	return func(client *Client) {

+ 92 - 92
oss/client_test.go

@@ -15,7 +15,7 @@ import (
 	. "gopkg.in/check.v1"
 )
 
-// Hook up gocheck into the "go test" runner.
+// Test hooks up gocheck into the "go test" runner.
 func Test(t *testing.T) {
 	TestingT(t)
 }
@@ -25,17 +25,17 @@ type OssClientSuite struct{}
 var _ = Suite(&OssClientSuite{})
 
 var (
-	// endpoint/id/key
+	// Endpoint/ID/Key
 	endpoint  = os.Getenv("OSS_TEST_ENDPOINT")
 	accessID  = os.Getenv("OSS_TEST_ACCESS_KEY_ID")
 	accessKey = os.Getenv("OSS_TEST_ACCESS_KEY_SECRET")
 
-	// proxy
+	// Proxy
 	proxyHost   = os.Getenv("OSS_TEST_PROXY_HOST")
 	proxyUser   = os.Getenv("OSS_TEST_PROXY_USER")
 	proxyPasswd = os.Getenv("OSS_TEST_PROXY_PASSWORD")
 
-	// sts
+	// STS
 	stsaccessID  = os.Getenv("OSS_TEST_STS_ID")
 	stsaccessKey = os.Getenv("OSS_TEST_STS_KEY")
 	stsARN       = os.Getenv("OSS_TEST_STS_ARN")
@@ -81,7 +81,7 @@ func randLowStr(n int) string {
 	return strings.ToLower(randStr(n))
 }
 
-// Run once when the suite starts running
+// SetUpSuite runs once when the suite starts running
 func (s *OssClientSuite) SetUpSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -96,7 +96,7 @@ func (s *OssClientSuite) SetUpSuite(c *C) {
 	testLogger.Println("test client started")
 }
 
-// Run before each test or benchmark starts running
+// TearDownSuite runs before each test or benchmark starts running
 func (s *OssClientSuite) TearDownSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -140,11 +140,11 @@ func (s *OssClientSuite) deleteBucket(client *Client, bucketName string, c *C) {
 	c.Assert(err, IsNil)
 }
 
-// Run after each test or benchmark runs
+// SetUpTest runs after each test or benchmark runs
 func (s *OssClientSuite) SetUpTest(c *C) {
 }
 
-// Run once after all tests or benchmarks have finished running
+// TearDownTest runs once after all tests or benchmarks have finished running
 func (s *OssClientSuite) TearDownTest(c *C) {
 }
 
@@ -176,7 +176,7 @@ func (s *OssClientSuite) TestCreateBucket(c *C) {
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
 
-	// Create with ACLPublicRead
+    // CreateBucket creates with ACLPublicRead
 	err = client.CreateBucket(bucketNameTest, ACL(ACLPublicRead))
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
@@ -213,7 +213,7 @@ func (s *OssClientSuite) TestCreateBucket(c *C) {
 	err = client.DeleteBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// create bucket with config and test get bucket info
+	// Create bucket with configuration and test GetBucketInfo
 	for _, storage := range []StorageClassType{StorageStandard, StorageIA, StorageArchive} {
 		bucketNameTest := bucketNamePrefix + randLowStr(5)
 		err = client.CreateBucket(bucketNameTest, StorageClass(storage), ACL(ACLPublicRead))
@@ -230,11 +230,11 @@ func (s *OssClientSuite) TestCreateBucket(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	// error put bucket with config
+	// Error put bucket with configuration
 	err = client.CreateBucket("ERRORBUCKETNAME", StorageClass(StorageArchive))
 	c.Assert(err, NotNil)
 
-	// create bucket with config and test list bucket
+	// Create bucket with configuration and test ListBuckets
 	for _, storage := range []StorageClassType{StorageStandard, StorageIA, StorageArchive} {
 		bucketNameTest := bucketNamePrefix + randLowStr(5)
 		err = client.CreateBucket(bucketNameTest, StorageClass(storage))
@@ -258,7 +258,7 @@ func (s *OssClientSuite) TestCreateBucketNegative(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
-	// BucketName invalid
+	// Bucket name invalid
 	err = client.CreateBucket("xx")
 	c.Assert(err, NotNil)
 
@@ -270,7 +270,7 @@ func (s *OssClientSuite) TestCreateBucketNegative(c *C) {
 	c.Assert(err, NotNil)
 	testLogger.Println(err)
 
-	// Acl invalid
+	// ACL invalid
 	err = client.CreateBucket(bucketNamePrefix+"tcbn", ACL("InvaldAcl"))
 	c.Assert(err, NotNil)
 	testLogger.Println(err)
@@ -319,7 +319,7 @@ func (s *OssClientSuite) TestDeleteBucketNegative(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
-	// BucketName invalid
+	// Bucket name invalid
 	err = client.DeleteBucket("xx")
 	c.Assert(err, NotNil)
 
@@ -329,7 +329,7 @@ func (s *OssClientSuite) TestDeleteBucketNegative(c *C) {
 	err = client.DeleteBucket("_bucket")
 	c.Assert(err, NotNil)
 
-	// Delete no exist
+	// Delete no exist bucket
 	err = client.DeleteBucket("notexist")
 	c.Assert(err, NotNil)
 
@@ -412,7 +412,7 @@ func (s *OssClientSuite) TestIsBucketExist(c *C) {
 	err = client.CreateBucket(bucketNameLbThree)
 	c.Assert(err, IsNil)
 
-	// exist
+	// Exist
 	exist, err := client.IsBucketExist(bucketNameLbTwo)
 	c.Assert(err, IsNil)
 	c.Assert(exist, Equals, true)
@@ -425,7 +425,7 @@ func (s *OssClientSuite) TestIsBucketExist(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(exist, Equals, true)
 
-	// not exist
+	// Not exist
 	exist, err = client.IsBucketExist(bucketNamePrefix + "tibe")
 	c.Assert(err, IsNil)
 	c.Assert(exist, Equals, false)
@@ -434,7 +434,7 @@ func (s *OssClientSuite) TestIsBucketExist(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(exist, Equals, false)
 
-	// negative
+	// Negative
 	exist, err = client.IsBucketExist("BucketNameInvalid")
 	c.Assert(err, NotNil)
 
@@ -462,7 +462,7 @@ func (s *OssClientSuite) TestSetBucketAcl(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(res.ACL, Equals, string(ACLPrivate))
 
-	// set ACL_PUBLIC_R
+	// Set ACL_PUBLIC_R
 	err = client.SetBucketACL(bucketNameTest, ACLPublicRead)
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
@@ -471,7 +471,7 @@ func (s *OssClientSuite) TestSetBucketAcl(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(res.ACL, Equals, string(ACLPublicRead))
 
-	// set ACL_PUBLIC_RW
+	// Set ACL_PUBLIC_RW
 	err = client.SetBucketACL(bucketNameTest, ACLPublicReadWrite)
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
@@ -480,7 +480,7 @@ func (s *OssClientSuite) TestSetBucketAcl(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(res.ACL, Equals, string(ACLPublicReadWrite))
 
-	// set ACL_PUBLIC_RW
+	// Set ACL_PUBLIC_RW
 	err = client.SetBucketACL(bucketNameTest, ACLPrivate)
 	c.Assert(err, IsNil)
 	err = client.SetBucketACL(bucketNameTest, ACLPrivate)
@@ -583,11 +583,11 @@ func (s *OssClientSuite) TestGetBucketLocationNegative(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
-	// not exist
+	// Not exist
 	_, err = client.GetBucketLocation(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// not exist
+	// Not exist
 	_, err = client.GetBucketLocation("InvalidBucketName_")
 	c.Assert(err, NotNil)
 }
@@ -604,11 +604,11 @@ func (s *OssClientSuite) TestSetBucketLifecycle(c *C) {
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set single rule
+	// Set single rule
 	var rules = []LifecycleRule{rule1}
 	err = client.SetBucketLifecycle(bucketNameTest, rules)
 	c.Assert(err, IsNil)
-	// double set rule
+	// Double set rule
 	err = client.SetBucketLifecycle(bucketNameTest, rules)
 	c.Assert(err, IsNil)
 
@@ -620,12 +620,12 @@ func (s *OssClientSuite) TestSetBucketLifecycle(c *C) {
 	err = client.DeleteBucketLifecycle(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set two rules
+	// Set two rules
 	rules = []LifecycleRule{rule1, rule2}
 	err = client.SetBucketLifecycle(bucketNameTest, rules)
 	c.Assert(err, IsNil)
 
-	// eliminate effect of cache
+	// Eliminate effect of cache
 	time.Sleep(5 * time.Second)
 
 	res, err = client.GetBucketLifecycle(bucketNameTest)
@@ -663,7 +663,7 @@ func (s *OssClientSuite) TestDeleteBucketLifecycle(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(res.Rules), Equals, 2)
 
-	// delete
+	// Delete
 	err = client.DeleteBucketLifecycle(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -671,10 +671,10 @@ func (s *OssClientSuite) TestDeleteBucketLifecycle(c *C) {
 	res, err = client.GetBucketLifecycle(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// eliminate effect of cache
+	// Eliminate effect of cache
 	time.Sleep(time.Second * 3)
 
-	// delete when not set
+	// Delete when not set
 	err = client.DeleteBucketLifecycle(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -693,22 +693,22 @@ func (s *OssClientSuite) TestBucketLifecycleNegative(c *C) {
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set with no rule
+	// Set with no rule
 	err = client.SetBucketLifecycle(bucketNameTest, rules)
 	c.Assert(err, NotNil)
 
 	err = client.DeleteBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// not exist
+	// Not exist
 	err = client.SetBucketLifecycle(bucketNameTest, rules)
 	c.Assert(err, NotNil)
 
-	// not exist
+	// Not exist
 	_, err = client.GetBucketLifecycle(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// not exist
+	// Not exist
 	err = client.DeleteBucketLifecycle(bucketNameTest)
 	c.Assert(err, NotNil)
 }
@@ -728,7 +728,7 @@ func (s *OssClientSuite) TestSetBucketReferer(c *C) {
 	c.Assert(res.AllowEmptyReferer, Equals, true)
 	c.Assert(len(res.RefererList), Equals, 0)
 
-	// set referers
+	// Set referers
 	err = client.SetBucketReferer(bucketNameTest, referers, false)
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
@@ -739,7 +739,7 @@ func (s *OssClientSuite) TestSetBucketReferer(c *C) {
 	c.Assert(res.RefererList[0], Equals, "http://www.aliyun.com")
 	c.Assert(res.RefererList[1], Equals, "https://www.aliyun.com")
 
-	// reset referer, referers empty
+	// Reset referer, referers empty
 	referers = []string{""}
 	err = client.SetBucketReferer(bucketNameTest, referers, true)
 	c.Assert(err, IsNil)
@@ -764,12 +764,12 @@ func (s *OssClientSuite) TestBucketRefererNegative(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
-	// not exist
+	// Not exist
 	_, err = client.GetBucketReferer(bucketNameTest)
 	c.Assert(err, NotNil)
 	testLogger.Println(err)
 
-	// not exist
+	// Not exist
 	err = client.SetBucketReferer(bucketNameTest, referers, true)
 	c.Assert(err, NotNil)
 	testLogger.Println(err)
@@ -789,10 +789,10 @@ func (s *OssClientSuite) TestSetBucketLogging(c *C) {
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
 
-	// set logging
+	// Set logging
 	err = client.SetBucketLogging(bucketNameTest, bucketNameTarget, "prefix", true)
 	c.Assert(err, IsNil)
-	// reset
+	// Reset
 	err = client.SetBucketLogging(bucketNameTest, bucketNameTarget, "prefix", false)
 	c.Assert(err, IsNil)
 
@@ -805,7 +805,7 @@ func (s *OssClientSuite) TestSetBucketLogging(c *C) {
 	err = client.DeleteBucketLogging(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set to self
+	// Set to self
 	err = client.SetBucketLogging(bucketNameTest, bucketNameTest, "prefix", true)
 	c.Assert(err, IsNil)
 
@@ -828,39 +828,39 @@ func (s *OssClientSuite) TestDeleteBucketLogging(c *C) {
 	err = client.CreateBucket(bucketNameTarget)
 	c.Assert(err, IsNil)
 
-	// get when not set
+	// Get when not set
 	res, err := client.GetBucketLogging(bucketNameTest)
 	c.Assert(err, IsNil)
 	c.Assert(res.LoggingEnabled.TargetBucket, Equals, "")
 	c.Assert(res.LoggingEnabled.TargetPrefix, Equals, "")
 
-	// set
+	// Set
 	err = client.SetBucketLogging(bucketNameTest, bucketNameTarget, "prefix", true)
 	c.Assert(err, IsNil)
 
-	// get
+	// Get
 	time.Sleep(5 * time.Second)
 	res, err = client.GetBucketLogging(bucketNameTest)
 	c.Assert(err, IsNil)
 	c.Assert(res.LoggingEnabled.TargetBucket, Equals, bucketNameTarget)
 	c.Assert(res.LoggingEnabled.TargetPrefix, Equals, "prefix")
 
-	// set
+	// Set
 	err = client.SetBucketLogging(bucketNameTest, bucketNameTarget, "prefix", false)
 	c.Assert(err, IsNil)
 
-	// get
+	// Get
 	time.Sleep(5 * time.Second)
 	res, err = client.GetBucketLogging(bucketNameTest)
 	c.Assert(err, IsNil)
 	c.Assert(res.LoggingEnabled.TargetBucket, Equals, "")
 	c.Assert(res.LoggingEnabled.TargetPrefix, Equals, "")
 
-	// delete
+	// Delete
 	err = client.DeleteBucketLogging(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// get after delete
+	// Get after delete
 	time.Sleep(5 * time.Second)
 	res, err = client.GetBucketLogging(bucketNameTest)
 	c.Assert(err, IsNil)
@@ -881,15 +881,15 @@ func (s *OssClientSuite) TestSetBucketLoggingNegative(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
-	// not exist
+	// Not exist
 	_, err = client.GetBucketLogging(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// not exist
+	// Not exist
 	err = client.SetBucketLogging(bucketNameTest, "targetbucket", "prefix", true)
 	c.Assert(err, NotNil)
 
-	// not exist
+	// Not exist
 	err = client.DeleteBucketLogging(bucketNameTest)
 	c.Assert(err, NotNil)
 
@@ -897,11 +897,11 @@ func (s *OssClientSuite) TestSetBucketLoggingNegative(c *C) {
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
 
-	// target bucket not exist
+	// Target bucket not exist
 	err = client.SetBucketLogging(bucketNameTest, bucketNameTarget, "prefix", true)
 	c.Assert(err, NotNil)
 
-	// parameter invalid
+	// Parameter invalid
 	err = client.SetBucketLogging(bucketNameTest, "XXXX", "prefix", true)
 	c.Assert(err, NotNil)
 
@@ -924,11 +924,11 @@ func (s *OssClientSuite) TestSetBucketWebsite(c *C) {
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set
+	// Set
 	err = client.SetBucketWebsite(bucketNameTest, indexWebsite, errorWebsite)
 	c.Assert(err, IsNil)
 
-	// double set
+	// Double set
 	err = client.SetBucketWebsite(bucketNameTest, indexWebsite, errorWebsite)
 	c.Assert(err, IsNil)
 
@@ -937,7 +937,7 @@ func (s *OssClientSuite) TestSetBucketWebsite(c *C) {
 	c.Assert(res.IndexDocument.Suffix, Equals, indexWebsite)
 	c.Assert(res.ErrorDocument.Key, Equals, errorWebsite)
 
-	// reset
+	// Reset
 	err = client.SetBucketWebsite(bucketNameTest, "your"+indexWebsite, "your"+errorWebsite)
 	c.Assert(err, IsNil)
 
@@ -950,11 +950,11 @@ func (s *OssClientSuite) TestSetBucketWebsite(c *C) {
 	err = client.DeleteBucketWebsite(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set after delete
+	// Set after delete
 	err = client.SetBucketWebsite(bucketNameTest, indexWebsite, errorWebsite)
 	c.Assert(err, IsNil)
 
-	// eliminate effect of cache
+	// Eliminate effect of cache
 	time.Sleep(5 * time.Second)
 
 	res, err = client.GetBucketWebsite(bucketNameTest)
@@ -978,15 +978,15 @@ func (s *OssClientSuite) TestDeleteBucketWebsite(c *C) {
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// get
+	// Get
 	res, err := client.GetBucketWebsite(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// detele without set
+	// Detele without set
 	err = client.DeleteBucketWebsite(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set
+	// Set
 	err = client.SetBucketWebsite(bucketNameTest, indexWebsite, errorWebsite)
 	c.Assert(err, IsNil)
 
@@ -996,7 +996,7 @@ func (s *OssClientSuite) TestDeleteBucketWebsite(c *C) {
 	c.Assert(res.IndexDocument.Suffix, Equals, indexWebsite)
 	c.Assert(res.ErrorDocument.Key, Equals, errorWebsite)
 
-	// detele
+	// Detele
 	time.Sleep(5 * time.Second)
 	err = client.DeleteBucketWebsite(bucketNameTest)
 	c.Assert(err, IsNil)
@@ -1005,7 +1005,7 @@ func (s *OssClientSuite) TestDeleteBucketWebsite(c *C) {
 	res, err = client.GetBucketWebsite(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// detele after delete
+	// Detele after delete
 	err = client.DeleteBucketWebsite(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -1024,7 +1024,7 @@ func (s *OssClientSuite) TestSetBucketWebsiteNegative(c *C) {
 
 	err = client.DeleteBucket(bucketNameTest)
 
-	// not exist
+	// Not exist
 	_, err = client.GetBucketWebsite(bucketNameTest)
 	c.Assert(err, NotNil)
 
@@ -1037,7 +1037,7 @@ func (s *OssClientSuite) TestSetBucketWebsiteNegative(c *C) {
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set
+	// Set
 	time.Sleep(5 * time.Second)
 	err = client.SetBucketWebsite(bucketNameTest, "myindex", "myerror")
 	c.Assert(err, IsNil)
@@ -1047,7 +1047,7 @@ func (s *OssClientSuite) TestSetBucketWebsiteNegative(c *C) {
 	c.Assert(res.IndexDocument.Suffix, Equals, "myindex")
 	c.Assert(res.ErrorDocument.Key, Equals, "myerror")
 
-	// detele
+	// Detele
 	err = client.DeleteBucketWebsite(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -1055,7 +1055,7 @@ func (s *OssClientSuite) TestSetBucketWebsiteNegative(c *C) {
 	_, err = client.GetBucketWebsite(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// detele after delete
+	// Detele after delete
 	err = client.DeleteBucketWebsite(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -1089,7 +1089,7 @@ func (s *OssClientSuite) TestSetBucketCORS(c *C) {
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
 
-	// set
+	// Set
 	err = client.SetBucketCORS(bucketNameTest, []CORSRule{rule1})
 	c.Assert(err, IsNil)
 
@@ -1102,7 +1102,7 @@ func (s *OssClientSuite) TestSetBucketCORS(c *C) {
 	c.Assert(len(gbcr.CORSRules[0].ExposeHeader), Equals, 0)
 	c.Assert(gbcr.CORSRules[0].MaxAgeSeconds, Equals, 100)
 
-	// double set
+	// Double set
 	err = client.SetBucketCORS(bucketNameTest, []CORSRule{rule1})
 	c.Assert(err, IsNil)
 
@@ -1115,7 +1115,7 @@ func (s *OssClientSuite) TestSetBucketCORS(c *C) {
 	c.Assert(len(gbcr.CORSRules[0].ExposeHeader), Equals, 0)
 	c.Assert(gbcr.CORSRules[0].MaxAgeSeconds, Equals, 100)
 
-	// set rule2
+	// Set rule2
 	err = client.SetBucketCORS(bucketNameTest, []CORSRule{rule2})
 	c.Assert(err, IsNil)
 
@@ -1129,7 +1129,7 @@ func (s *OssClientSuite) TestSetBucketCORS(c *C) {
 	c.Assert(len(gbcr.CORSRules[0].ExposeHeader), Equals, 2)
 	c.Assert(gbcr.CORSRules[0].MaxAgeSeconds, Equals, 200)
 
-	// reset
+	// Reset
 	err = client.SetBucketCORS(bucketNameTest, []CORSRule{rule1, rule2})
 	c.Assert(err, IsNil)
 
@@ -1138,7 +1138,7 @@ func (s *OssClientSuite) TestSetBucketCORS(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(len(gbcr.CORSRules), Equals, 2)
 
-	// set after delete
+	// Set after delete
 	err = client.DeleteBucketCORS(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -1174,11 +1174,11 @@ func (s *OssClientSuite) TestDeleteBucketCORS(c *C) {
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// delete not set
+	// Delete not set
 	err = client.DeleteBucketCORS(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// set
+	// Set
 	err = client.SetBucketCORS(bucketNameTest, []CORSRule{rule})
 	c.Assert(err, IsNil)
 
@@ -1186,7 +1186,7 @@ func (s *OssClientSuite) TestDeleteBucketCORS(c *C) {
 	_, err = client.GetBucketCORS(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// detele
+	// Detele
 	err = client.DeleteBucketCORS(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -1194,7 +1194,7 @@ func (s *OssClientSuite) TestDeleteBucketCORS(c *C) {
 	_, err = client.GetBucketCORS(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// detele after delete
+	// Detele after deleting
 	err = client.DeleteBucketCORS(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -1218,7 +1218,7 @@ func (s *OssClientSuite) TestSetBucketCORSNegative(c *C) {
 
 	err = client.DeleteBucket(bucketNameTest)
 
-	// not exist
+	// Not exist
 	_, err = client.GetBucketCORS(bucketNameTest)
 	c.Assert(err, NotNil)
 
@@ -1235,7 +1235,7 @@ func (s *OssClientSuite) TestSetBucketCORSNegative(c *C) {
 	_, err = client.GetBucketCORS(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// set
+	// Set
 	err = client.SetBucketCORS(bucketNameTest, []CORSRule{rule})
 	c.Assert(err, IsNil)
 	time.Sleep(5 * time.Second)
@@ -1243,7 +1243,7 @@ func (s *OssClientSuite) TestSetBucketCORSNegative(c *C) {
 	_, err = client.GetBucketCORS(bucketNameTest)
 	c.Assert(err, IsNil)
 
-	// detele
+	// Delete
 	err = client.DeleteBucketCORS(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -1251,7 +1251,7 @@ func (s *OssClientSuite) TestSetBucketCORSNegative(c *C) {
 	_, err = client.GetBucketCORS(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// detele after delete
+	// Delete after deleting
 	err = client.DeleteBucketCORS(bucketNameTest)
 	c.Assert(err, IsNil)
 
@@ -1289,11 +1289,11 @@ func (s *OssClientSuite) TestGetBucketInfoNegative(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
-	// not exist
+	// Not exist
 	_, err = client.GetBucketInfo(bucketNameTest)
 	c.Assert(err, NotNil)
 
-	// bucket name invalid
+	// Bucket name invalid
 	_, err = client.GetBucketInfo("InvalidBucketName_")
 	c.Assert(err, NotNil)
 }
@@ -1394,7 +1394,7 @@ func (s *OssClientSuite) TestClientOption(c *C) {
 		Timeout(11, 12), SecurityToken("token"), Proxy(proxyHost))
 	c.Assert(err, IsNil)
 
-	// Create Bucket timeout
+	// CreateBucket timeout
 	err = client.CreateBucket(bucketNameTest)
 	c.Assert(err, NotNil)
 
@@ -1441,25 +1441,25 @@ func (s *OssClientSuite) TestProxy(c *C) {
 
 	bucket, err := client.Bucket(bucketNameTest)
 
-	// Sign url
+	// Sign URL
 	str, err := bucket.SignURL(objectName, HTTPPut, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// Put object with url
+	// Put object with URL
 	err = bucket.PutObjectWithURL(str, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// sign url for get object
+	// Sign URL for get object
 	str, err = bucket.SignURL(objectName, HTTPGet, 60)
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// Get object with url
+	// Get object with URL
 	body, err := bucket.GetObjectWithURL(str)
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
@@ -1470,15 +1470,15 @@ func (s *OssClientSuite) TestProxy(c *C) {
 	err = bucket.PutObject(objectName, strings.NewReader(objectValue))
 	c.Assert(err, IsNil)
 
-	// Get Object
+	// Get object
 	_, err = bucket.GetObject(objectName)
 	c.Assert(err, IsNil)
 
-	// List Objects
+	// List objects
 	_, err = bucket.ListObjects()
 	c.Assert(err, IsNil)
 
-	// Delete Object
+	// Delete object
 	err = bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
@@ -1487,7 +1487,7 @@ func (s *OssClientSuite) TestProxy(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// private
+// Private
 func (s *OssClientSuite) checkBucket(buckets []BucketProperties, bucket string) bool {
 	for _, v := range buckets {
 		if v.Name == bucket {

+ 21 - 21
oss/conf.go

@@ -4,7 +4,7 @@ import (
 	"time"
 )
 
-// HTTPTimeout http timeout
+// HTTPTimeout defines HTTP timeout.
 type HTTPTimeout struct {
 	ConnectTimeout   time.Duration
 	ReadWriteTimeout time.Duration
@@ -13,29 +13,29 @@ type HTTPTimeout struct {
 	IdleConnTimeout  time.Duration
 }
 
-// Config oss configure
+// Config defines oss configuration
 type Config struct {
-	Endpoint        string      // oss地址
-	AccessKeyID     string      // accessId
-	AccessKeySecret string      // accessKey
-	RetryTimes      uint        // 失败重试次数,默认5
-	UserAgent       string      // SDK名称/版本/系统信息
-	IsDebug         bool        // 是否开启调试模式,默认false
-	Timeout         uint        // 超时时间,默认60s
+	Endpoint        string      // OSS endpoint
+	AccessKeyID     string      // AccessId
+	AccessKeySecret string      // AccessKey
+	RetryTimes      uint        // Retry count by default it's 5.
+	UserAgent       string      // SDK name/version/system information
+	IsDebug         bool        // Enable debug mode. Default is false.
+	Timeout         uint        // Timeout in seconds. By default it's 60.
 	SecurityToken   string      // STS Token
-	IsCname         bool        // Endpoint是否是CNAME
-	HTTPTimeout     HTTPTimeout // HTTP的超时时间设置
-	IsUseProxy      bool        // 是否使用代理
-	ProxyHost       string      // 代理服务器地址
-	IsAuthProxy     bool        // 代理服务器是否使用用户认证
-	ProxyUser       string      // 代理服务器认证用户名
-	ProxyPassword   string      // 代理服务器认证密码
-	IsEnableMD5     bool        // 上传数据时是否启用MD5校验
-	MD5Threshold    int64       // 内存中计算MD5的上线大小,大于该值启用临时文件,单位Byte
-	IsEnableCRC     bool        // 上传数据时是否启用CRC64校验
+	IsCname         bool        // If cname is in the endpoint.
+	HTTPTimeout     HTTPTimeout // HTTP timeout
+	IsUseProxy      bool        // Flag of using proxy.
+	ProxyHost       string      // Flag of using proxy host.
+	IsAuthProxy     bool        // Flag of needing authentication.
+	ProxyUser       string      // Proxy user
+	ProxyPassword   string      // Proxy password
+	IsEnableMD5     bool        // Flag of enabling MD5 for upload.
+	MD5Threshold    int64       // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
+	IsEnableCRC     bool        // Flag of enabling CRC for upload.
 }
 
-// 获取默认配置
+// getDefaultOssConfig gets the default configuration.
 func getDefaultOssConfig() *Config {
 	config := Config{}
 
@@ -45,7 +45,7 @@ func getDefaultOssConfig() *Config {
 	config.RetryTimes = 5
 	config.IsDebug = false
 	config.UserAgent = userAgent
-	config.Timeout = 60 // seconds
+	config.Timeout = 60  // Seconds
 	config.SecurityToken = ""
 	config.IsCname = false
 

+ 39 - 39
oss/conn.go

@@ -19,7 +19,7 @@ import (
 	"time"
 )
 
-// Conn oss conn
+// Conn defines OSS Conn
 type Conn struct {
 	config *Config
 	url    *urlMaker
@@ -28,9 +28,9 @@ type Conn struct {
 
 var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore"}
 
-// init 初始化Conn
+// init initializes Conn
 func (conn *Conn) init(config *Config, urlMaker *urlMaker) error {
-	// new Transport
+	// New transport
 	transport := newTransport(conn, config)
 
 	// Proxy
@@ -49,7 +49,7 @@ func (conn *Conn) init(config *Config, urlMaker *urlMaker) error {
 	return nil
 }
 
-// Do 处理请求,返回响应结果。
+// Do sends request and returns the response
 func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string,
 	data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
 	urlParams := conn.getURLParams(params)
@@ -59,10 +59,10 @@ func (conn Conn) Do(method, bucketName, objectName string, params map[string]int
 	return conn.doRequest(method, uri, resource, headers, data, initCRC, listener)
 }
 
-// DoURL 根据已签名的URL处理请求,返回响应结果。
+// DoURL sends the request with signed URL and returns the response result.
 func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string,
 	data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) {
-	// get uri form signedURL
+	// Get URI from signedURL
 	uri, err := url.ParseRequestURI(signedURL)
 	if err != nil {
 		return nil, err
@@ -103,19 +103,19 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 		}
 	}
 
-	// transfer started
+	// Transfer started
 	event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
 	publishProgress(listener, event)
 
 	resp, err := conn.client.Do(req)
 	if err != nil {
-		// transfer failed
+		// Transfer failed
 		event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
 		publishProgress(listener, event)
 		return nil, err
 	}
 
-	// transfer completed
+	// Transfer completed
 	event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
 	publishProgress(listener, event)
 
@@ -123,14 +123,14 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 }
 
 func (conn Conn) getURLParams(params map[string]interface{}) string {
-	// sort
+	// Sort
 	keys := make([]string, 0, len(params))
 	for k := range params {
 		keys = append(keys, k)
 	}
 	sort.Strings(keys)
 
-	// serialize
+	// Serialize
 	var buf bytes.Buffer
 	for _, k := range keys {
 		if buf.Len() > 0 {
@@ -146,7 +146,7 @@ func (conn Conn) getURLParams(params map[string]interface{}) string {
 }
 
 func (conn Conn) getSubResource(params map[string]interface{}) string {
-	// sort
+	// Sort
 	keys := make([]string, 0, len(params))
 	for k := range params {
 		if conn.isParamSign(k) {
@@ -155,7 +155,7 @@ func (conn Conn) getSubResource(params map[string]interface{}) string {
 	}
 	sort.Strings(keys)
 
-	// serialize
+	// Serialize
 	var buf bytes.Buffer
 	for _, k := range keys {
 		if buf.Len() > 0 {
@@ -223,19 +223,19 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 
 	conn.signHeader(req, canonicalizedResource)
 
-	// transfer started
+	// Transfer started
 	event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
 	publishProgress(listener, event)
 
 	resp, err := conn.client.Do(req)
 	if err != nil {
-		// transfer failed
+		// Transfer failed
 		event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
 		publishProgress(listener, event)
 		return nil, err
 	}
 
-	// transfer completed
+	// Transfer completed
 	event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
 	publishProgress(listener, event)
 
@@ -302,14 +302,14 @@ func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expir
 	return conn.url.getSignRtmpURL(bucketName, channelName, urlParams)
 }
 
-// handle request body
+// handleBody handles request body
 func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
 	listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) {
 	var file *os.File
 	var crc hash.Hash64
 	reader := body
 
-	// length
+	// Length
 	switch v := body.(type) {
 	case *bytes.Buffer:
 		req.ContentLength = int64(v.Len())
@@ -324,20 +324,20 @@ func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64,
 	}
 	req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
 
-	// md5
+	// MD5
 	if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" {
 		md5 := ""
 		reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold)
 		req.Header.Set(HTTPHeaderContentMD5, md5)
 	}
 
-	// crc
+	// CRC
 	if reader != nil && conn.config.IsEnableCRC {
 		crc = NewCRC(crcTable(), initCRC)
 		reader = TeeReader(reader, crc, req.ContentLength, listener, tracker)
 	}
 
-	// http body
+	// HTTP body
 	rc, ok := reader.(io.ReadCloser)
 	if !ok && reader != nil {
 		rc = ioutil.NopCloser(reader)
@@ -352,7 +352,7 @@ func tryGetFileSize(f *os.File) int64 {
 	return fInfo.Size()
 }
 
-// handle response
+// handleResponse handles response
 func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) {
 	var cliCRC uint64
 	var srvCRC uint64
@@ -367,10 +367,10 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
 		}
 
 		if len(respBody) == 0 {
-			// no error in response body
+			// No error in response body
 			err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
 		} else {
-			// response contains storage service error object, unmarshal
+			// Response contains storage service error object, unmarshal
 			srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
 				resp.Header.Get(HTTPHeaderOssRequestID))
 			if err != nil { // error unmarshaling the error response
@@ -385,7 +385,7 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
 			Body:       ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body
 		}, err
 	} else if statusCode >= 300 && statusCode <= 307 {
-		// oss use 3xx, but response has no body
+		// OSS use 3xx, but response has no body
 		err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status)
 		return &Response{
 			StatusCode: resp.StatusCode,
@@ -411,7 +411,7 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
 
 func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
 	if contentLen == 0 || contentLen > md5Threshold {
-		// huge body, use temporary file
+		// Huge body, use temporary file
 		tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix)
 		if tempFile != nil {
 			io.Copy(tempFile, body)
@@ -424,7 +424,7 @@ func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader,
 			reader = tempFile
 		}
 	} else {
-		// small body, use memory
+		// Small body, use memory
 		buf, _ := ioutil.ReadAll(body)
 		sum := md5.Sum(buf)
 		b64 = base64.StdEncoding.EncodeToString(sum[:])
@@ -461,7 +461,7 @@ func xmlUnmarshal(body io.Reader, v interface{}) error {
 	return xml.Unmarshal(data, v)
 }
 
-// Handle http timeout
+// timeoutConn handles HTTP timeout
 type timeoutConn struct {
 	conn        net.Conn
 	timeout     time.Duration
@@ -515,7 +515,7 @@ func (c *timeoutConn) SetWriteDeadline(t time.Time) error {
 	return c.conn.SetWriteDeadline(t)
 }
 
-// UrlMaker - build url and resource
+// UrlMaker builds URL and resource
 const (
 	urlTypeCname  = 1
 	urlTypeIP     = 2
@@ -523,13 +523,13 @@ const (
 )
 
 type urlMaker struct {
-	Scheme  string // http or https
-	NetLoc  string // host or ip
-	Type    int    // 1 CNAME 2 IP 3 ALIYUN
-	IsProxy bool   // proxy
+	Scheme  string // HTTP or HTTPS
+	NetLoc  string // Host or IP
+	Type    int    // 1 CNAME, 2 IP, 3 ALIYUN
+	IsProxy bool   // Proxy
 }
 
-// Parse endpoint
+// Init parses endpoint
 func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
 	if strings.HasPrefix(endpoint, "http://") {
 		um.Scheme = "http"
@@ -557,7 +557,7 @@ func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
 	um.IsProxy = isProxy
 }
 
-// Build URL
+// getURL gets URL
 func (um urlMaker) getURL(bucket, object, params string) *url.URL {
 	host, path := um.buildURL(bucket, object)
 	addr := ""
@@ -570,13 +570,13 @@ func (um urlMaker) getURL(bucket, object, params string) *url.URL {
 	return uri
 }
 
-// Build Sign URL
+// getSignURL gets sign URL
 func (um urlMaker) getSignURL(bucket, object, params string) string {
 	host, path := um.buildURL(bucket, object)
 	return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params)
 }
 
-// Build Sign Rtmp URL
+// getSignRtmpURL Build Sign Rtmp URL
 func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string {
 	host, path := um.buildURL(bucket, "live")
 
@@ -586,7 +586,7 @@ func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string {
 	return fmt.Sprintf("rtmp://%s%s/%s?%s", host, path, channelName, params)
 }
 
-// Build URL
+// buildURL builds URL
 func (um urlMaker) buildURL(bucket, object string) (string, string) {
 	var host = ""
 	var path = ""
@@ -618,7 +618,7 @@ func (um urlMaker) buildURL(bucket, object string) (string, string) {
 	return host, path
 }
 
-// Canonicalized Resource
+// getResource gets canonicalized resource 
 func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
 	if subResource != "" {
 		subResource = "?" + subResource

+ 28 - 28
oss/const.go

@@ -2,69 +2,69 @@ package oss
 
 import "os"
 
-// ACLType Bucket/Object的访问控制
+// ACLType bucket/object ACL
 type ACLType string
 
 const (
-	// ACLPrivate 私有读写
+	// ACLPrivate definition : private read and write
 	ACLPrivate ACLType = "private"
 
-	// ACLPublicRead 公共读私有写
+	// ACLPublicRead definition : public read and private write
 	ACLPublicRead ACLType = "public-read"
 
-	// ACLPublicReadWrite 公共读写
+	// ACLPublicReadWrite definition : public read and public write
 	ACLPublicReadWrite ACLType = "public-read-write"
 
-	// ACLDefault Object默认权限,Bucket无此权限
+	// ACLDefault Object. It's only applicable for object.
 	ACLDefault ACLType = "default"
 )
 
-// MetadataDirectiveType 对象COPY时新对象是否使用原对象的Meta
+// MetadataDirectiveType specifying whether use the metadata of source object when copying object.
 type MetadataDirectiveType string
 
 const (
-	// MetaCopy 目标对象使用源对象的META
+	// MetaCopy the target object's metadata is copied from the source one
 	MetaCopy MetadataDirectiveType = "COPY"
 
-	// MetaReplace 目标对象使用自定义的META
+	// MetaReplace the target object's metadata is created as part of the copy request (not same as the source one)
 	MetaReplace MetadataDirectiveType = "REPLACE"
 )
 
-// StorageClassType Bucket的存储类型
+// StorageClassType bucket storage type
 type StorageClassType string
 
 const (
-	// StorageStandard 标准存储模式
+	// StorageStandard standard
 	StorageStandard StorageClassType = "Standard"
 
-	// StorageIA 低频存储模式
+	// StorageIA infrequent access
 	StorageIA StorageClassType = "IA"
 
-	// StorageArchive 归档存储模式
+	// StorageArchive archive
 	StorageArchive StorageClassType = "Archive"
 )
 
-// HTTPMethod HTTP请求方法
+// HTTPMethod HTTP request method
 type HTTPMethod string
 
 const (
-	// HTTPGet HTTP请求方法 GET
+	// HTTPGet HTTP GET
 	HTTPGet HTTPMethod = "GET"
 
-	// HTTPPut HTTP请求方法 PUT
+	// HTTPPut HTTP PUT
 	HTTPPut HTTPMethod = "PUT"
 
-	// HTTPHead HTTP请求方法 HEAD
+	// HTTPHead HTTP HEAD
 	HTTPHead HTTPMethod = "HEAD"
 
-	// HTTPPost HTTP请求方法 POST
+	// HTTPPost HTTP POST
 	HTTPPost HTTPMethod = "POST"
 
-	// HTTPDelete HTTP请求方法 DELETE
+	// HTTPDelete HTTP DELETE
 	HTTPDelete HTTPMethod = "DELETE"
 )
 
-// Http头标签
+// HTTP headers
 const (
 	HTTPHeaderAcceptEncoding     string = "Accept-Encoding"
 	HTTPHeaderAuthorization             = "Authorization"
@@ -108,7 +108,7 @@ const (
 	HTTPHeaderOssSymlinkTarget               = "X-Oss-Symlink-Target"
 )
 
-// Http Param
+// HTTP Param
 const (
 	HTTPParamExpires       = "Expires"
 	HTTPParamAccessKeyID   = "OSSAccessKeyId"
@@ -117,17 +117,17 @@ const (
 	HTTPParamPlaylistName  = "playlistName"
 )
 
-// 其它常量
+// Other constants
 const (
-	MaxPartSize = 5 * 1024 * 1024 * 1024 // 文件片最大值,5GB
-	MinPartSize = 100 * 1024             // 文件片最小值,100KBß
+	MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB
+	MinPartSize = 100 * 1024             // Min part size, 100KB
 
-	FilePermMode = os.FileMode(0664) // 新建文件默认权限
+	FilePermMode = os.FileMode(0664) // Default file permission
 
-	TempFilePrefix = "oss-go-temp-" // 临时文件前缀
-	TempFileSuffix = ".temp"        // 临时文件后缀
+	TempFilePrefix = "oss-go-temp-"  // Temp file prefix
+	TempFileSuffix = ".temp"         // Temp file suffix
 
-	CheckpointFileSuffix = ".cp" // Checkpoint文件后缀
+	CheckpointFileSuffix = ".cp"     // Checkpoint file suffix
 
-	Version = "1.8.0" // Go sdk版本
+	Version = "1.9.0" // Go SDK version
 )

+ 8 - 8
oss/crc.go

@@ -11,11 +11,11 @@ type digest struct {
 	tab *crc64.Table
 }
 
-// NewCRC creates a new hash.Hash64 computing the CRC-64 checksum
+// NewCRC creates a new hash.Hash64 computing the CRC64 checksum
 // using the polynomial represented by the Table.
 func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} }
 
-// Size returns the number of bytes Sum will return.
+// Size returns the number of bytes sum will return.
 func (d *digest) Size() int { return crc64.Size }
 
 // BlockSize returns the hash's underlying block size.
@@ -24,7 +24,7 @@ func (d *digest) Size() int { return crc64.Size }
 // are a multiple of the block size.
 func (d *digest) BlockSize() int { return 1 }
 
-// Reset resets the Hash to its initial state.
+// Reset resets the hash to its initial state.
 func (d *digest) Reset() { d.crc = 0 }
 
 // Write (via the embedded io.Writer interface) adds more data to the running hash.
@@ -34,7 +34,7 @@ func (d *digest) Write(p []byte) (n int, err error) {
 	return len(p), nil
 }
 
-// Sum64 returns crc64 value.
+// Sum64 returns CRC64 value.
 func (d *digest) Sum64() uint64 { return d.crc }
 
 // Sum returns hash value.
@@ -64,10 +64,10 @@ func gf2MatrixSquare(square []uint64, mat []uint64) {
 	}
 }
 
-// CRC64Combine combine crc64
+// CRC64Combine combines CRC64
 func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
-	var even [gf2Dim]uint64 // even-power-of-two zeros operator
-	var odd [gf2Dim]uint64  // odd-power-of-two zeros operator
+	var even [gf2Dim]uint64 // Even-power-of-two zeros operator
+	var odd [gf2Dim]uint64  // Odd-power-of-two zeros operator
 
 	// Degenerate case
 	if len2 == 0 {
@@ -117,7 +117,7 @@ func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 {
 		}
 	}
 
-	// Return combined crc
+	// Return combined CRC
 	crc1 ^= crc2
 	return crc1
 }

+ 20 - 20
oss/crc_test.go

@@ -21,7 +21,7 @@ type OssCrcSuite struct {
 
 var _ = Suite(&OssCrcSuite{})
 
-// Run once when the suite starts running
+// SetUpSuite runs once when the suite starts running
 func (s *OssCrcSuite) SetUpSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -37,9 +37,9 @@ func (s *OssCrcSuite) SetUpSuite(c *C) {
 	testLogger.Println("test crc started")
 }
 
-// Run before each test or benchmark starts running
+// TearDownSuite runs before each test or benchmark starts running
 func (s *OssCrcSuite) TearDownSuite(c *C) {
-	// Delete Part
+	// Delete part
 	lmur, err := s.bucket.ListMultipartUploads()
 	c.Assert(err, IsNil)
 
@@ -50,7 +50,7 @@ func (s *OssCrcSuite) TearDownSuite(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	// Delete Objects
+	// Delete objects
 	lor, err := s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 
@@ -62,19 +62,19 @@ func (s *OssCrcSuite) TearDownSuite(c *C) {
 	testLogger.Println("test crc completed")
 }
 
-// Run after each test or benchmark runs
+// SetUpTest runs after each test or benchmark runs
 func (s *OssCrcSuite) SetUpTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// Run once after all tests or benchmarks have finished running
+// TearDownTest runs once after all tests or benchmarks have finished running
 func (s *OssCrcSuite) TearDownTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// TestCRCGolden 测试OSS实现的CRC64
+// TestCRCGolden tests OSS's CRC64
 func (s *OssCrcSuite) TestCRCGolden(c *C) {
 	type crcTest struct {
 		out uint64
@@ -127,18 +127,18 @@ func (s *OssCrcSuite) TestCRCGolden(c *C) {
 	}
 }
 
-// testCRC64Combine test crc64 on vector[0..pos] which should have CRC-64 crc.
+// testCRC64Combine tests CRC64 on vector[0..pos] which should have CRC64 crc.
 // Also test CRC64Combine on vector[] split in two.
 func testCRC64Combine(c *C, str string, pos int, crc uint64) {
 	tabECMA := crc64.MakeTable(crc64.ECMA)
 
-	// test crc64
+	// Test CRC64
 	hash := crc64.New(tabECMA)
 	io.WriteString(hash, str)
 	crc1 := hash.Sum64()
 	c.Assert(crc1, Equals, crc)
 
-	// test crc64 combine
+	// Test CRC64 combine
 	hash = crc64.New(tabECMA)
 	io.WriteString(hash, str[0:pos])
 	crc1 = hash.Sum64()
@@ -151,7 +151,7 @@ func testCRC64Combine(c *C, str string, pos int, crc uint64) {
 	c.Assert(crc1, Equals, crc)
 }
 
-// TestCRCGolden 测试CRC64Combine
+// TestCRCCombine tests CRC64Combine
 func (s *OssCrcSuite) TestCRCCombine(c *C) {
 	str := "123456789"
 	testCRC64Combine(c, str, (len(str)+1)>>1, 0x995DC9BBDF1939FA)
@@ -160,7 +160,7 @@ func (s *OssCrcSuite) TestCRCCombine(c *C) {
 	testCRC64Combine(c, str, (len(str)+1)>>1, 0x27DB187FC15BBC72)
 }
 
-// TestCRCGolden 测试CRC64Combine
+// TestCRCRepeatedCombine tests CRC64Combine
 func (s *OssCrcSuite) TestCRCRepeatedCombine(c *C) {
 	tab := crc64.MakeTable(crc64.ECMA)
 	str := "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"
@@ -180,7 +180,7 @@ func (s *OssCrcSuite) TestCRCRepeatedCombine(c *C) {
 	}
 }
 
-// TestCRCGolden 测试CRC64Combine
+// TestCRCRandomCombine tests CRC64Combine
 func (s *OssCrcSuite) TestCRCRandomCombine(c *C) {
 	tab := crc64.MakeTable(crc64.ECMA)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
@@ -204,7 +204,7 @@ func (s *OssCrcSuite) TestCRCRandomCombine(c *C) {
 	}
 }
 
-// TestEnableCRCAndMD5 开启MD5和CRC校验
+// TestEnableCRCAndMD5 tests MD5 and CRC check
 func (s *OssCrcSuite) TestEnableCRCAndMD5(c *C) {
 	objectName := objectNamePrefix + "tecam"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
@@ -274,7 +274,7 @@ func (s *OssCrcSuite) TestEnableCRCAndMD5(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	//	MultipartUpload
+	// MultipartUpload
 	chunks, err := SplitFileByPartSize(fileName, 100*1024)
 	imurUpload, err := bucket.InitiateMultipartUpload(objectName)
 	c.Assert(err, IsNil)
@@ -301,7 +301,7 @@ func (s *OssCrcSuite) TestEnableCRCAndMD5(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestDisableCRCAndMD5 关闭MD5和CRC校验
+// TestDisableCRCAndMD5 disables MD5 and CRC
 func (s *OssCrcSuite) TestDisableCRCAndMD5(c *C) {
 	objectName := objectNamePrefix + "tdcam"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
@@ -370,7 +370,7 @@ func (s *OssCrcSuite) TestDisableCRCAndMD5(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	//	MultipartUpload
+	// MultipartUpload
 	chunks, err := SplitFileByPartSize(fileName, 100*1024)
 	imurUpload, err := bucket.InitiateMultipartUpload(objectName)
 	c.Assert(err, IsNil)
@@ -397,7 +397,7 @@ func (s *OssCrcSuite) TestDisableCRCAndMD5(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestSpecifyContentMD5 指定MD5
+// TestSpecifyContentMD5 specifies MD5
 func (s *OssCrcSuite) TestSpecifyContentMD5(c *C) {
 	objectName := objectNamePrefix + "tdcam"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
@@ -445,7 +445,7 @@ func (s *OssCrcSuite) TestSpecifyContentMD5(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	//	MultipartUpload
+	// MultipartUpload
 	imurUpload, err := s.bucket.InitiateMultipartUpload(objectName)
 	c.Assert(err, IsNil)
 
@@ -462,7 +462,7 @@ func (s *OssCrcSuite) TestSpecifyContentMD5(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestCopyObjectToOrFromNegative
+// TestAppendObjectNegative
 func (s *OssCrcSuite) TestAppendObjectNegative(c *C) {
 	objectName := objectNamePrefix + "taoncrc"
 	objectValue := "空山不见人,但闻人语响。返影入深林,复照青苔上。"

+ 70 - 71
oss/download.go

@@ -13,15 +13,14 @@ import (
 	"strconv"
 )
 
+// DownloadFile downloads files with multipart download.
 //
-// DownloadFile 分片下载文件
+// objectKey    the object key.
+// filePath    the local file to download from objectKey in OSS.
+// partSize    the part size in bytes.
+// options    object's constraints, check out GetObject for the reference.
 //
-// objectKey  object key。
-// filePath   本地文件。objectKey下载到文件。
-// partSize   本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。
-// options    Object的属性限制项。详见GetObject。
-//
-// error 操作成功error为nil,非nil为错误信息。
+// error    it's nil when the call succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error {
 	if partSize < 1 {
@@ -47,7 +46,7 @@ func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, op
 	return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
 }
 
-// 获取下载范围
+// getRangeConfig gets the download range from the options.
 func getRangeConfig(options []Option) (*unpackedRange, error) {
 	rangeOpt, err := findOption(options, HTTPHeaderRange, nil)
 	if err != nil || rangeOpt == nil {
@@ -56,9 +55,9 @@ func getRangeConfig(options []Option) (*unpackedRange, error) {
 	return parseRange(rangeOpt.(string))
 }
 
-// ----- 并发无断点的下载  -----
+// ----- concurrent download without checkpoint  -----
 
-// 工作协程参数
+// downloadWorkerArg is download worker's parameters
 type downloadWorkerArg struct {
 	bucket    *Bucket
 	key       string
@@ -68,7 +67,7 @@ type downloadWorkerArg struct {
 	enableCRC bool
 }
 
-// Hook用于测试
+// downloadPartHook is hook for test
 type downloadPartHook func(part downloadPart) error
 
 var downloadPartHooker downloadPartHook = defaultDownloadPartHook
@@ -77,15 +76,15 @@ func defaultDownloadPartHook(part downloadPart) error {
 	return nil
 }
 
-// 默认ProgressListener,屏蔽GetObject的Options中ProgressListener
+// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject. 
 type defaultDownloadProgressListener struct {
 }
 
-// ProgressChanged 静默处理
+// ProgressChanged no-ops
 func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) {
 }
 
-// 工作协程
+// downloadWorker
 func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) {
 	for part := range jobs {
 		if err := arg.hook(part); err != nil {
@@ -93,11 +92,11 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
 			break
 		}
 
-		// resolve options
+		// Resolve options
 		r := Range(part.Start, part.End)
 		p := Progress(&defaultDownloadProgressListener{})
 		opts := make([]Option, len(arg.options)+2)
-		// append orderly, can not be reversed!
+		// Append orderly, can not be reversed!
 		opts = append(opts, arg.options...)
 		opts = append(opts, r, p)
 
@@ -151,7 +150,7 @@ func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, res
 	}
 }
 
-// 调度协程
+// downloadScheduler
 func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
 	for _, part := range parts {
 		jobs <- part
@@ -159,16 +158,16 @@ func downloadScheduler(jobs chan downloadPart, parts []downloadPart) {
 	close(jobs)
 }
 
-// 下载片
+// downloadPart defines download part
 type downloadPart struct {
-	Index  int    // 片序号,从0开始编号
-	Start  int64  // 片起始位置
-	End    int64  // 片结束位置
-	Offset int64  // 文件中的偏移位置
-	CRC64  uint64 // 片的校验值
+	Index  int    // Part number, starting from 0
+	Start  int64  // Start index
+	End    int64  // End index
+	Offset int64  // Offset
+	CRC64  uint64 // CRC check value of part
 }
 
-// 文件分片
+// getDownloadParts gets download parts
 func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *unpackedRange) ([]downloadPart, bool, uint64, error) {
 	meta, err := bucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
@@ -205,7 +204,7 @@ func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *
 	return parts, enableCRC, crcVal, nil
 }
 
-// 文件大小
+// getObjectBytes gets object bytes length
 func getObjectBytes(parts []downloadPart) int64 {
 	var ob int64
 	for _, part := range parts {
@@ -214,7 +213,7 @@ func getObjectBytes(parts []downloadPart) int64 {
 	return ob
 }
 
-// 计算连续分片总的CRC
+// combineCRCInParts caculates the total CRC of continuous parts
 func combineCRCInParts(dps []downloadPart) uint64 {
 	if dps == nil || len(dps) == 0 {
 		return 0
@@ -228,19 +227,19 @@ func combineCRCInParts(dps []downloadPart) uint64 {
 	return crc
 }
 
-// 并发无断点续传的下载
+// downloadFile downloads file concurrently without checkpoint.
 func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error {
 	tempFilePath := filePath + TempFileSuffix
 	listener := getProgressListener(options)
 
-	// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
+	// If the file does not exist, create one. If exists, the download will overwrite it.
 	fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
 	if err != nil {
 		return err
 	}
 	fd.Close()
 
-	// 分割文件
+	// Get the parts of the file
 	parts, enableCRC, expectedCRC, err := getDownloadParts(&bucket, objectKey, partSize, uRange)
 	if err != nil {
 		return err
@@ -256,16 +255,16 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 	event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
 	publishProgress(listener, event)
 
-	// 启动工作协程
+	// Start the download workers
 	arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC}
 	for w := 1; w <= routines; w++ {
 		go downloadWorker(w, arg, jobs, results, failed, die)
 	}
 
-	// 并发上传分片
+	// Download parts concurrently
 	go downloadScheduler(jobs, parts)
 
-	// 等待分片下载完成
+	// Waiting for parts download finished
 	completed := 0
 	for completed < len(parts) {
 		select {
@@ -301,33 +300,33 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 	return os.Rename(tempFilePath, filePath)
 }
 
-// ----- 并发有断点的下载  -----
+// ----- Concurrent download with chcekpoint  -----
 
 const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3"
 
 type downloadCheckpoint struct {
-	Magic     string         // magic
-	MD5       string         // cp内容的MD5
-	FilePath  string         // 本地文件
-	Object    string         // key
-	ObjStat   objectStat     // 文件状态
-	Parts     []downloadPart // 全部分片
-	PartStat  []bool         // 分片下载是否完成
-	Start     int64          // 起点
-	End       int64          // 终点
-	enableCRC bool           // 是否有CRC校验
-	CRC       uint64         // CRC校验值
+	Magic     string         // Magic
+	MD5       string         // Checkpoint content MD5
+	FilePath  string         // Local file
+	Object    string         // Key
+	ObjStat   objectStat     // Object status
+	Parts     []downloadPart // All download parts
+	PartStat  []bool         // Parts' download status
+	Start     int64          // Start point of the file
+	End       int64          // End point of the file
+	enableCRC bool           // Whether has CRC check
+	CRC       uint64         // CRC check value
 }
 
 type objectStat struct {
-	Size         int64  // 大小
-	LastModified string // 最后修改时间
-	Etag         string // etag
+	Size         int64  // Object size
+	LastModified string // Last modified time
+	Etag         string // Etag
 }
 
-// CP数据是否有效,CP有效且Object没有更新时有效
+// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
 func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *unpackedRange) (bool, error) {
-	// 比较CP的Magic及MD5
+	// Compare the CP's Magic and the MD5
 	cpb := cp
 	cpb.MD5 = ""
 	js, _ := json.Marshal(cpb)
@@ -338,7 +337,7 @@ func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *u
 		return false, nil
 	}
 
-	// 确认object没有更新
+	// Ensure the object is not updated.
 	meta, err := bucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
 		return false, err
@@ -349,14 +348,14 @@ func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *u
 		return false, err
 	}
 
-	// 比较Object的大小/最后修改时间/etag
+	// Compare the object size, last modified time and etag
 	if cp.ObjStat.Size != objectSize ||
 		cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
 		cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
 		return false, nil
 	}
 
-	// 确认下载范围是否变化
+	// Check the download range
 	if uRange != nil {
 		start, end := adjustRange(uRange, objectSize)
 		if start != cp.Start || end != cp.End {
@@ -367,7 +366,7 @@ func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *u
 	return true, nil
 }
 
-// 从文件中load
+// load checkpoint from local file
 func (cp *downloadCheckpoint) load(filePath string) error {
 	contents, err := ioutil.ReadFile(filePath)
 	if err != nil {
@@ -378,11 +377,11 @@ func (cp *downloadCheckpoint) load(filePath string) error {
 	return err
 }
 
-// dump到文件
+// dump funciton dumps to file
 func (cp *downloadCheckpoint) dump(filePath string) error {
 	bcp := *cp
 
-	// 计算MD5
+	// Calculate MD5
 	bcp.MD5 = ""
 	js, err := json.Marshal(bcp)
 	if err != nil {
@@ -392,17 +391,17 @@ func (cp *downloadCheckpoint) dump(filePath string) error {
 	b64 := base64.StdEncoding.EncodeToString(sum[:])
 	bcp.MD5 = b64
 
-	// 序列化
+	// Serialize
 	js, err = json.Marshal(bcp)
 	if err != nil {
 		return err
 	}
 
-	// dump
+	// Dump
 	return ioutil.WriteFile(filePath, js, FilePermMode)
 }
 
-// 未完成的分片
+// todoParts gets unfinished parts
 func (cp downloadCheckpoint) todoParts() []downloadPart {
 	dps := []downloadPart{}
 	for i, ps := range cp.PartStat {
@@ -413,7 +412,7 @@ func (cp downloadCheckpoint) todoParts() []downloadPart {
 	return dps
 }
 
-// 完成的字节数
+// getCompletedBytes gets completed size
 func (cp downloadCheckpoint) getCompletedBytes() int64 {
 	var completedBytes int64
 	for i, part := range cp.Parts {
@@ -424,14 +423,14 @@ func (cp downloadCheckpoint) getCompletedBytes() int64 {
 	return completedBytes
 }
 
-// 初始化下载任务
+// prepare initiates download tasks
 func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
-	// cp
+	// CP
 	cp.Magic = downloadCpMagic
 	cp.FilePath = filePath
 	cp.Object = objectKey
 
-	// object
+	// Object
 	meta, err := bucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
 		return err
@@ -446,7 +445,7 @@ func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string
 	cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
 	cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
 
-	// parts
+	// Parts
 	cp.Parts, cp.enableCRC, cp.CRC, err = getDownloadParts(bucket, objectKey, partSize, uRange)
 	if err != nil {
 		return err
@@ -464,19 +463,19 @@ func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error {
 	return os.Rename(downFilepath, cp.FilePath)
 }
 
-// 并发带断点的下载
+// downloadFileWithCp downloads files with checkpoint.
 func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error {
 	tempFilePath := filePath + TempFileSuffix
 	listener := getProgressListener(options)
 
-	// LOAD CP数据
+	// Load checkpoint data.
 	dcp := downloadCheckpoint{}
 	err := dcp.load(cpFilePath)
 	if err != nil {
 		os.Remove(cpFilePath)
 	}
 
-	// LOAD出错或数据无效重新初始化下载
+	// Load error or data invalid. Re-initialize the download.
 	valid, err := dcp.isValid(&bucket, objectKey, uRange)
 	if err != nil || !valid {
 		if err = dcp.prepare(&bucket, objectKey, filePath, partSize, uRange); err != nil {
@@ -485,14 +484,14 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 		os.Remove(cpFilePath)
 	}
 
-	// 如果文件不存在则创建,存在不清空,下载分片会重写文件内容
+	// Create the file if not exists. Otherwise the parts download will overwrite it.
 	fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
 	if err != nil {
 		return err
 	}
 	fd.Close()
 
-	// 未完成的分片
+	// Unfinished parts
 	parts := dcp.todoParts()
 	jobs := make(chan downloadPart, len(parts))
 	results := make(chan downloadPart, len(parts))
@@ -503,16 +502,16 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 	event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size)
 	publishProgress(listener, event)
 
-	// 启动工作协程
+	// Start the download workers routine
 	arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC}
 	for w := 1; w <= routines; w++ {
 		go downloadWorker(w, arg, jobs, results, failed, die)
 	}
 
-	// 并发下载分片
+	// Concurrently downloads parts
 	go downloadScheduler(jobs, parts)
 
-	// 等待分片下载完成
+	// Wait for the parts download finished
 	completed := 0
 	for completed < len(parts) {
 		select {

+ 67 - 67
oss/download_test.go

@@ -16,7 +16,7 @@ type OssDownloadSuite struct {
 
 var _ = Suite(&OssDownloadSuite{})
 
-// Run once when the suite starts running
+// SetUpSuite runs once when the suite starts running
 func (s *OssDownloadSuite) SetUpSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -32,9 +32,9 @@ func (s *OssDownloadSuite) SetUpSuite(c *C) {
 	testLogger.Println("test download started")
 }
 
-// Run before each test or benchmark starts running
+// TearDownSuite runs before each test or benchmark starts running
 func (s *OssDownloadSuite) TearDownSuite(c *C) {
-	// Delete Part
+	// Delete part
 	lmur, err := s.bucket.ListMultipartUploads()
 	c.Assert(err, IsNil)
 
@@ -45,7 +45,7 @@ func (s *OssDownloadSuite) TearDownSuite(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	// Delete Objects
+	// Delete objects
 	lor, err := s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 
@@ -57,13 +57,13 @@ func (s *OssDownloadSuite) TearDownSuite(c *C) {
 	testLogger.Println("test download completed")
 }
 
-// Run after each test or benchmark runs
+// SetUpTest runs after each test or benchmark runs
 func (s *OssDownloadSuite) SetUpTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// Run once after all tests or benchmarks have finished running
+// TearDownTest runs once after all tests or benchmarks have finished running
 func (s *OssDownloadSuite) TearDownTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
@@ -72,51 +72,51 @@ func (s *OssDownloadSuite) TearDownTest(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestUploadRoutineWithoutRecovery 多线程无断点恢复的下载
+// TestDownloadRoutineWithoutRecovery multipart downloads without checkpoint
 func (s *OssDownloadSuite) TestDownloadRoutineWithoutRecovery(c *C) {
 	objectName := objectNamePrefix + "tdrwr"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file.jpg"
 
-	// 上传文件
+	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
-	// 使用默认值下载
+	// Download the file by default
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024)
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err := compareFiles(fileName, newFile)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 使用2个协程下载,小于总分片数5
+	// Use 2 coroutines to download the file and total parts count is 5
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(2))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFiles(fileName, newFile)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 使用5个协程下载,等于总分片数5
+	// Use 5 coroutines to download the file and the total parts count is 5.
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(5))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFiles(fileName, newFile)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 使用10个协程下载,大于总分片数5
+	// Use 10 coroutines to download the file and the total parts count is 5.
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(10))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFiles(fileName, newFile)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -125,7 +125,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// ErrorHooker DownloadPart请求Hook
+// DownErrorHooker requests hook by downloadPart
 func DownErrorHooker(part downloadPart) error {
 	if part.Index == 4 {
 		time.Sleep(time.Second)
@@ -134,24 +134,24 @@ func DownErrorHooker(part downloadPart) error {
 	return nil
 }
 
-// TestDownloadRoutineWithRecovery 多线程有断点恢复的下载
+// TestDownloadRoutineWithRecovery multi-routine resumable download
 func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	objectName := objectNamePrefix + "tdrtr"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-2.jpg"
 
-	// 上传文件
+	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
-	// 下载,CP使用默认值
+	// Download a file with default checkpoint
 	downloadPartHooker = DownErrorHooker
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
 
-	// check
+	// Check
 	dcp := downloadCheckpoint{}
 	err = dcp.load(newFile + ".cp")
 	c.Assert(err, IsNil)
@@ -175,7 +175,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 下载,指定CP
+	// Resumable download with checkpoint
 	os.Remove(newFile)
 	downloadPartHooker = DownErrorHooker
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, objectName+".cp"))
@@ -183,7 +183,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
 
-	// check
+	// Check
 	dcp = downloadCheckpoint{}
 	err = dcp.load(objectName + ".cp")
 	c.Assert(err, IsNil)
@@ -207,7 +207,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 一次完成下载,中间没有错误
+	// Resumable download with checkpoint at a time. No error is expected in the download procedure.
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
 	c.Assert(err, IsNil)
@@ -219,7 +219,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 一次完成下载,中间没有错误
+	// Resumable download with checkpoint at a time. No error is expected in the download procedure.
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(10), Checkpoint(true, ""))
 	c.Assert(err, IsNil)
@@ -235,13 +235,13 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestDownloadOption 选项
+// TestDownloadOption options
 func (s *OssDownloadSuite) TestDownloadOption(c *C) {
 	objectName := objectNamePrefix + "tdmo"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-3.jpg"
 
-	// 上传文件
+	// Upload the file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
@@ -275,17 +275,17 @@ func (s *OssDownloadSuite) TestDownloadOption(c *C) {
 	c.Assert(err, NotNil)
 }
 
-// TestDownloadObjectChange 上传过程中文件修改了
+// TestDownloadObjectChange tests the file is updated during the upload
 func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
 	objectName := objectNamePrefix + "tdloc"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-4.jpg"
 
-	// 上传文件
+	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
-	// 下载,CP使用默认值
+	// Download with default checkpoint
 	downloadPartHooker = DownErrorHooker
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
@@ -303,28 +303,28 @@ func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
 	c.Assert(eq, Equals, true)
 }
 
-// TestDownloadNegative Download Negative
+// TestDownloadNegative tests downloading negative
 func (s *OssDownloadSuite) TestDownloadNegative(c *C) {
 	objectName := objectNamePrefix + "tdn"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-3.jpg"
 
-	// 上传文件
+	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
-	// worker线程错误
+	// Worker routine error
 	downloadPartHooker = DownErrorHooker
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(2))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
 
-	// 本地文件不存在
+	// Local file does not exist
 	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Routines(2))
 	c.Assert(err, NotNil)
 
-	// 指定的分片大小无效
+	// Invalid part size
 	err = s.bucket.DownloadFile(objectName, newFile, 0, Routines(2))
 	c.Assert(err, NotNil)
 
@@ -334,14 +334,14 @@ func (s *OssDownloadSuite) TestDownloadNegative(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 本地文件不存在
+	// Local file does not exist
 	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 
 	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Routines(2), Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 
-	// 指定的分片大小无效
+	// Invalid part size
 	err = s.bucket.DownloadFile(objectName, newFile, -1, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 
@@ -355,26 +355,26 @@ func (s *OssDownloadSuite) TestDownloadNegative(c *C) {
 	c.Assert(err, NotNil)
 }
 
-// TestDownloadWithRange 带范围的并发下载、断点下载测试
+// TestDownloadWithRange tests concurrent downloading with range specified and checkpoint enabled
 func (s *OssDownloadSuite) TestDownloadWithRange(c *C) {
 	objectName := objectNamePrefix + "tdwr"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-tdwr.jpg"
 	newFileGet := "down-new-file-tdwr-2.jpg"
 
-	// 上传文件
+	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
 	fileSize, err := getFileSize(fileName)
 	c.Assert(err, IsNil)
 
-	// 范围下载,从1024到4096
+	// Download with range, from 1024 to 4096
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Range(1024, 4095))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err := compareFilesWithRange(fileName, 1024, newFile, 0, 3072)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -383,17 +383,17 @@ func (s *OssDownloadSuite) TestDownloadWithRange(c *C) {
 	err = s.bucket.GetObjectToFile(objectName, newFileGet, Range(1024, 4095))
 	c.Assert(err, IsNil)
 
-	// compare get and download
+	// Compare get and download
 	eq, err = compareFiles(newFile, newFileGet)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 范围下载,从1024到4096
+	// Download with range, from 1024 to 4096
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), NormalizedRange("1024-4095"))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFilesWithRange(fileName, 1024, newFile, 0, 3072)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -402,17 +402,17 @@ func (s *OssDownloadSuite) TestDownloadWithRange(c *C) {
 	err = s.bucket.GetObjectToFile(objectName, newFileGet, NormalizedRange("1024-4095"))
 	c.Assert(err, IsNil)
 
-	// compare get and download
+	// Compare get and download
 	eq, err = compareFiles(newFile, newFileGet)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 范围下载,从2048到结束
+	// Download with range, from 2048 to the end
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024, Routines(3), NormalizedRange("2048-"))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFilesWithRange(fileName, 2048, newFile, 0, fileSize-2048)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -421,17 +421,17 @@ func (s *OssDownloadSuite) TestDownloadWithRange(c *C) {
 	err = s.bucket.GetObjectToFile(objectName, newFileGet, NormalizedRange("2048-"))
 	c.Assert(err, IsNil)
 
-	// compare get and download
+	// Compare get and download
 	eq, err = compareFiles(newFile, newFileGet)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 范围下载,最后4096个字节
+	// Download with range, the last 4096
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), NormalizedRange("-4096"))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFilesWithRange(fileName, fileSize-4096, newFile, 0, 4096)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -440,7 +440,7 @@ func (s *OssDownloadSuite) TestDownloadWithRange(c *C) {
 	err = s.bucket.GetObjectToFile(objectName, newFileGet, NormalizedRange("-4096"))
 	c.Assert(err, IsNil)
 
-	// compare get and download
+	// Compare get and download
 	eq, err = compareFiles(newFile, newFileGet)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -449,26 +449,26 @@ func (s *OssDownloadSuite) TestDownloadWithRange(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestDownloadWithCheckoutAndRange 带范围的并发下载、断点下载测试
+// TestDownloadWithCheckoutAndRange tests concurrent downloading with range specified and checkpoint enabled
 func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 	objectName := objectNamePrefix + "tdwcr"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-tdwcr.jpg"
 	newFileGet := "down-new-file-tdwcr-2.jpg"
 
-	// 上传文件
+	// Upload a file
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
 	fileSize, err := getFileSize(fileName)
 	c.Assert(err, IsNil)
 
-	// 范围下载,从1024到4096
+	// Download with range, from 1024 to 4096
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), Range(1024, 4095))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err := compareFilesWithRange(fileName, 1024, newFile, 0, 3072)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -477,17 +477,17 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 	err = s.bucket.GetObjectToFile(objectName, newFileGet, Range(1024, 4095))
 	c.Assert(err, IsNil)
 
-	// compare get and download
+	// Compare get and download
 	eq, err = compareFiles(newFile, newFileGet)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 范围下载,从1024到4096
+	// Download with range, from 1024 to 4096
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, ""), NormalizedRange("1024-4095"))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFilesWithRange(fileName, 1024, newFile, 0, 3072)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -496,17 +496,17 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 	err = s.bucket.GetObjectToFile(objectName, newFileGet, NormalizedRange("1024-4095"))
 	c.Assert(err, IsNil)
 
-	// compare get and download
+	// Compare get and download
 	eq, err = compareFiles(newFile, newFileGet)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 范围下载,从2048到结束
+	// Download with range, from 2048 to the end
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024, Routines(3), Checkpoint(true, ""), NormalizedRange("2048-"))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFilesWithRange(fileName, 2048, newFile, 0, fileSize-2048)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -515,17 +515,17 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 	err = s.bucket.GetObjectToFile(objectName, newFileGet, NormalizedRange("2048-"))
 	c.Assert(err, IsNil)
 
-	// compare get and download
+	// Compare get and download
 	eq, err = compareFiles(newFile, newFileGet)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 范围下载,最后4096个字节
+	// Download with range, the last 4096 bytes
 	os.Remove(newFile)
 	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, ""), NormalizedRange("-4096"))
 	c.Assert(err, IsNil)
 
-	// check
+	// Check
 	eq, err = compareFilesWithRange(fileName, fileSize-4096, newFile, 0, 4096)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -534,7 +534,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 	err = s.bucket.GetObjectToFile(objectName, newFileGet, NormalizedRange("-4096"))
 	c.Assert(err, IsNil)
 
-	// compare get and download
+	// Compare get and download
 	eq, err = compareFiles(newFile, newFileGet)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
@@ -543,7 +543,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestCombineCRCInParts 测试DownloadParts的CRC Combine
+// TestCombineCRCInDownloadParts tests combineCRCInParts
 func (s *OssDownloadSuite) TestCombineCRCInDownloadParts(c *C) {
 	crc := combineCRCInParts(nil)
 	c.Assert(crc == 0, Equals, true)
@@ -582,7 +582,7 @@ func getFileSize(fileName string) (int64, error) {
 	return stat.Size(), nil
 }
 
-// compare the content between fileL and fileR with specified range
+// compareFilesWithRange compares the content between fileL and fileR with specified range
 func compareFilesWithRange(fileL string, offsetL int64, fileR string, offsetR int64, size int64) (bool, error) {
 	finL, err := os.Open(fileL)
 	if err != nil {

+ 15 - 15
oss/error.go

@@ -10,15 +10,15 @@ import (
 // ServiceError contains fields of the error response from Oss Service REST API.
 type ServiceError struct {
 	XMLName    xml.Name `xml:"Error"`
-	Code       string   `xml:"Code"`      // OSS返回给用户的错误码
-	Message    string   `xml:"Message"`   // OSS给出的详细错误信息
-	RequestID  string   `xml:"RequestId"` // 用于唯一标识该次请求的UUID
-	HostID     string   `xml:"HostId"`    // 用于标识访问的OSS集群
-	RawMessage string   // OSS返回的原始消息内容
-	StatusCode int      // HTTP状态码
+	Code       string   `xml:"Code"`      // The error code returned from OSS to the caller
+	Message    string   `xml:"Message"`   // The detail error message from OSS
+	RequestID  string   `xml:"RequestId"` // The UUID used to uniquely identify the request
+	HostID     string   `xml:"HostId"`    // The OSS server cluster's Id
+	RawMessage string   // The raw messages from OSS
+	StatusCode int      // HTTP status code
 }
 
-// Implement interface error
+// Error implements interface error
 func (e ServiceError) Error() string {
 	return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s",
 		e.StatusCode, e.Code, e.Message, e.RequestID)
@@ -27,11 +27,11 @@ func (e ServiceError) Error() string {
 // UnexpectedStatusCodeError is returned when a storage service responds with neither an error
 // nor with an HTTP status code indicating success.
 type UnexpectedStatusCodeError struct {
-	allowed []int // 预期OSS返回HTTP状态码
-	got     int   // OSS实际返回HTTP状态码
+	allowed []int // The expected HTTP stats code returned from OSS
+	got     int   // The actual HTTP status code from OSS
 }
 
-// Implement interface error
+// Error implements interface error
 func (e UnexpectedStatusCodeError) Error() string {
 	s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
 
@@ -62,13 +62,13 @@ func checkRespCode(respCode int, allowed []int) error {
 
 // CRCCheckError is returned when crc check is inconsistent between client and server
 type CRCCheckError struct {
-	clientCRC uint64 // 客户端计算的CRC64值
-	serverCRC uint64 // 服务端计算的CRC64值
-	operation string // 上传操作,如PutObject/AppendObject/UploadPart等
-	requestID string // 本次操作的RequestID
+	clientCRC uint64 // Calculated CRC64 in client
+	serverCRC uint64 // Calculated CRC64 in server
+	operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc
+	requestID string // The request id of this operation
 }
 
-// Implement interface error
+// Error implements interface error
 func (e CRCCheckError) Error() string {
 	return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s",
 		e.operation, e.clientCRC, e.serverCRC, e.requestID)

+ 1 - 1
oss/mime.go

@@ -235,7 +235,7 @@ var extToMimeType = map[string]string{
 }
 
 // TypeByExtension returns the MIME type associated with the file extension ext.
-// 获取文件类型,选项ContentType使用
+// gets the file's MIME type for HTTP header Content-Type
 func TypeByExtension(filePath string) string {
 	typ := mime.TypeByExtension(path.Ext(filePath))
 	if typ == "" {

+ 8 - 8
oss/model.go

@@ -6,7 +6,7 @@ import (
 	"net/http"
 )
 
-// Response Http response from oss
+// Response defines HTTP response from OSS
 type Response struct {
 	StatusCode int
 	Headers    http.Header
@@ -15,38 +15,38 @@ type Response struct {
 	ServerCRC  uint64
 }
 
-// PutObjectRequest The request of DoPutObject
+// PutObjectRequest is the request of DoPutObject
 type PutObjectRequest struct {
 	ObjectKey string
 	Reader    io.Reader
 }
 
-// GetObjectRequest The request of DoGetObject
+// GetObjectRequest is the request of DoGetObject
 type GetObjectRequest struct {
 	ObjectKey string
 }
 
-// GetObjectResult The result of DoGetObject
+// GetObjectResult is the result of DoGetObject
 type GetObjectResult struct {
 	Response  *Response
 	ClientCRC hash.Hash64
 	ServerCRC uint64
 }
 
-// AppendObjectRequest  The requtest of DoAppendObject
+// AppendObjectRequest is the requtest of DoAppendObject
 type AppendObjectRequest struct {
 	ObjectKey string
 	Reader    io.Reader
 	Position  int64
 }
 
-// AppendObjectResult The result of DoAppendObject
+// AppendObjectResult is the result of DoAppendObject
 type AppendObjectResult struct {
 	NextPosition int64
 	CRC          uint64
 }
 
-// UploadPartRequest The request of DoUploadPart
+// UploadPartRequest is the request of DoUploadPart
 type UploadPartRequest struct {
 	InitResult *InitiateMultipartUploadResult
 	Reader     io.Reader
@@ -54,7 +54,7 @@ type UploadPartRequest struct {
 	PartNumber int
 }
 
-// UploadPartResult The result of DoUploadPart
+// UploadPartResult is the result of DoUploadPart
 type UploadPartResult struct {
 	Part UploadPart
 }

+ 61 - 62
oss/multicopy.go

@@ -11,16 +11,15 @@ import (
 	"strconv"
 )
 
+// CopyFile is multipart copy object
 //
-// CopyFile 分片复制文件
+// srcBucketName    source bucket name
+// srcObjectKey    source object name
+// destObjectKey    target object name in the form of bucketname.objectkey
+// partSize    the part size in byte.
+// options    object's contraints. Check out function InitiateMultipartUpload.
 //
-// srcBucketName  源Bucket名称。
-// srcObjectKey   源Object名称。
-// destObjectKey   目标Object名称。目标Bucket名称为Bucket.BucketName。
-// partSize   复制文件片的大小,字节数。比如100 * 1024为每片100KB。
-// options    Object的属性限制项。详见InitiateMultipartUpload。
-//
-// error 操作成功error为nil,非nil为错误信息。
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error {
 	destBucketName := bucket.BucketName
@@ -44,9 +43,9 @@ func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string,
 		partSize, options, routines)
 }
 
-// ----- 并发无断点的下载  -----
+// ----- Concurrently copy without checkpoint ---------
 
-// 工作协程参数
+// copyWorkerArg defines the copy worker arguments
 type copyWorkerArg struct {
 	bucket        *Bucket
 	imur          InitiateMultipartUploadResult
@@ -56,7 +55,7 @@ type copyWorkerArg struct {
 	hook          copyPartHook
 }
 
-// Hook用于测试
+// copyPartHook is the hook for testing purpose
 type copyPartHook func(part copyPart) error
 
 var copyPartHooker copyPartHook = defaultCopyPartHook
@@ -65,7 +64,7 @@ func defaultCopyPartHook(part copyPart) error {
 	return nil
 }
 
-// 工作协程
+// copyWorker copies worker
 func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
 	for chunk := range jobs {
 		if err := arg.hook(chunk); err != nil {
@@ -88,7 +87,7 @@ func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<-
 	}
 }
 
-// 调度协程
+// copyScheduler
 func copyScheduler(jobs chan copyPart, parts []copyPart) {
 	for _, part := range parts {
 		jobs <- part
@@ -96,14 +95,14 @@ func copyScheduler(jobs chan copyPart, parts []copyPart) {
 	close(jobs)
 }
 
-// 分片
+// copyPart structure
 type copyPart struct {
-	Number int   // 片序号[1, 10000]
-	Start  int64 // 片起始位置
-	End    int64 // 片结束位置
+	Number int   // Part number (from 1 to 10,000)
+	Start  int64 // The start index in the source file.
+	End    int64 // The end index in the source file
 }
 
-// 文件分片
+// getCopyParts calculates copy parts
 func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
 	meta, err := bucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
@@ -128,7 +127,7 @@ func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart,
 	return parts, nil
 }
 
-// 获取源文件大小
+// getSrcObjectBytes gets the source file size
 func getSrcObjectBytes(parts []copyPart) int64 {
 	var ob int64
 	for _, part := range parts {
@@ -137,20 +136,20 @@ func getSrcObjectBytes(parts []copyPart) int64 {
 	return ob
 }
 
-// 并发无断点续传的下载
+// copyFile is a concurrently copy without checkpoint
 func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
 	partSize int64, options []Option, routines int) error {
 	descBucket, err := bucket.Client.Bucket(destBucketName)
 	srcBucket, err := bucket.Client.Bucket(srcBucketName)
 	listener := getProgressListener(options)
 
-	// 分割文件
+	// Get copy parts
 	parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
 	if err != nil {
 		return err
 	}
 
-	// 初始化上传任务
+	// Initialize the multipart upload
 	imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
 	if err != nil {
 		return err
@@ -166,16 +165,16 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
 	publishProgress(listener, event)
 
-	// 启动工作协程
+	// Start to copy workers
 	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
 	for w := 1; w <= routines; w++ {
 		go copyWorker(w, arg, jobs, results, failed, die)
 	}
 
-	// 并发上传分片
+	// Start the scheduler
 	go copyScheduler(jobs, parts)
 
-	// 等待分片下载完成
+	// Wait for the parts finished.
 	completed := 0
 	ups := make([]UploadPart, len(parts))
 	for completed < len(parts) {
@@ -202,7 +201,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
 	publishProgress(listener, event)
 
-	// 提交任务
+	// Complete the multipart upload
 	_, err = descBucket.CompleteMultipartUpload(imur, ups)
 	if err != nil {
 		bucket.AbortMultipartUpload(imur)
@@ -211,27 +210,27 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	return nil
 }
 
-// ----- 并发有断点的下载  -----
+// ----- Concurrently copy with checkpoint  -----
 
 const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A"
 
 type copyCheckpoint struct {
-	Magic          string       // magic
-	MD5            string       // cp内容的MD5
-	SrcBucketName  string       // 源Bucket
-	SrcObjectKey   string       // 源Object
-	DestBucketName string       // 目标Bucket
-	DestObjectKey  string       // 目标Bucket
-	CopyID         string       // copy id
-	ObjStat        objectStat   // 文件状态
-	Parts          []copyPart   // 全部分片
-	CopyParts      []UploadPart // 分片上传成功后的返回值
-	PartStat       []bool       // 分片下载是否完成
+	Magic          string       // Magic
+	MD5            string       // CP content MD5
+	SrcBucketName  string       // Source bucket
+	SrcObjectKey   string       // Source object
+	DestBucketName string       // Target bucket
+	DestObjectKey  string       // Target object
+	CopyID         string       // Copy ID
+	ObjStat        objectStat   // Object stat
+	Parts          []copyPart   // Copy parts
+	CopyParts      []UploadPart // The uploaded parts
+	PartStat       []bool       // The part status
 }
 
-// CP数据是否有效,CP有效且Object没有更新时有效
+// isValid checks if the data is valid which means CP is valid and object is not updated.
 func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
-	// 比较CP的Magic及MD5
+	// Compare CP's magic number and the MD5.
 	cpb := cp
 	cpb.MD5 = ""
 	js, _ := json.Marshal(cpb)
@@ -242,7 +241,7 @@ func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error)
 		return false, nil
 	}
 
-	// 确认object没有更新
+	// Make sure the object is not updated.
 	meta, err := bucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
 		return false, err
@@ -253,7 +252,7 @@ func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error)
 		return false, err
 	}
 
-	// 比较Object的大小/最后修改时间/etag
+	// Compare the object size and last modified time and etag.
 	if cp.ObjStat.Size != objectSize ||
 		cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) ||
 		cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) {
@@ -263,7 +262,7 @@ func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error)
 	return true, nil
 }
 
-// 从文件中load
+// load loads from the checkpoint file
 func (cp *copyCheckpoint) load(filePath string) error {
 	contents, err := ioutil.ReadFile(filePath)
 	if err != nil {
@@ -274,17 +273,17 @@ func (cp *copyCheckpoint) load(filePath string) error {
 	return err
 }
 
-// 更新分片状态
+// update updates the parts status
 func (cp *copyCheckpoint) update(part UploadPart) {
 	cp.CopyParts[part.PartNumber-1] = part
 	cp.PartStat[part.PartNumber-1] = true
 }
 
-// dump到文件
+// dump dumps the CP to the file
 func (cp *copyCheckpoint) dump(filePath string) error {
 	bcp := *cp
 
-	// 计算MD5
+	// Calculate MD5
 	bcp.MD5 = ""
 	js, err := json.Marshal(bcp)
 	if err != nil {
@@ -294,17 +293,17 @@ func (cp *copyCheckpoint) dump(filePath string) error {
 	b64 := base64.StdEncoding.EncodeToString(sum[:])
 	bcp.MD5 = b64
 
-	// 序列化
+	// Serialization
 	js, err = json.Marshal(bcp)
 	if err != nil {
 		return err
 	}
 
-	// dump
+	// Dump
 	return ioutil.WriteFile(filePath, js, FilePermMode)
 }
 
-// 未完成的分片
+// todoParts returns unfinished parts
 func (cp copyCheckpoint) todoParts() []copyPart {
 	dps := []copyPart{}
 	for i, ps := range cp.PartStat {
@@ -315,7 +314,7 @@ func (cp copyCheckpoint) todoParts() []copyPart {
 	return dps
 }
 
-// 完成的字节数
+// getCompletedBytes returns finished bytes count
 func (cp copyCheckpoint) getCompletedBytes() int64 {
 	var completedBytes int64
 	for i, part := range cp.Parts {
@@ -326,17 +325,17 @@ func (cp copyCheckpoint) getCompletedBytes() int64 {
 	return completedBytes
 }
 
-// 初始化下载任务
+// prepare initializes the multipart upload
 func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
 	partSize int64, options []Option) error {
-	// cp
+	// CP
 	cp.Magic = copyCpMagic
 	cp.SrcBucketName = srcBucket.BucketName
 	cp.SrcObjectKey = srcObjectKey
 	cp.DestBucketName = destBucket.BucketName
 	cp.DestObjectKey = destObjectKey
 
-	// object
+	// Object
 	meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
 	if err != nil {
 		return err
@@ -351,7 +350,7 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
 	cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
 	cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
 
-	// parts
+	// Parts
 	cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
 	if err != nil {
 		return err
@@ -362,7 +361,7 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
 	}
 	cp.CopyParts = make([]UploadPart, len(cp.Parts))
 
-	// init copy
+	// Init copy
 	imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...)
 	if err != nil {
 		return err
@@ -383,21 +382,21 @@ func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePat
 	return err
 }
 
-// 并发带断点的下载
+// copyFileWithCp is concurrently copy with checkpoint
 func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string,
 	partSize int64, options []Option, cpFilePath string, routines int) error {
 	descBucket, err := bucket.Client.Bucket(destBucketName)
 	srcBucket, err := bucket.Client.Bucket(srcBucketName)
 	listener := getProgressListener(options)
 
-	// LOAD CP数据
+	// Load CP data
 	ccp := copyCheckpoint{}
 	err = ccp.load(cpFilePath)
 	if err != nil {
 		os.Remove(cpFilePath)
 	}
 
-	// LOAD出错或数据无效重新初始化下载
+	// Load error or the CP data is invalid---reinitialize
 	valid, err := ccp.isValid(srcBucket, srcObjectKey)
 	if err != nil || !valid {
 		if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
@@ -406,7 +405,7 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 		os.Remove(cpFilePath)
 	}
 
-	// 未完成的分片
+	// Unfinished parts
 	parts := ccp.todoParts()
 	imur := InitiateMultipartUploadResult{
 		Bucket:   destBucketName,
@@ -422,16 +421,16 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 	event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size)
 	publishProgress(listener, event)
 
-	// 启动工作协程
+	// Start the worker coroutines
 	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
 	for w := 1; w <= routines; w++ {
 		go copyWorker(w, arg, jobs, results, failed, die)
 	}
 
-	// 并发下载分片
+	// Start the scheduler
 	go copyScheduler(jobs, parts)
 
-	// 等待分片下载完成
+	// Wait for the parts completed.
 	completed := 0
 	for completed < len(parts) {
 		select {

+ 46 - 45
oss/multicopy_test.go

@@ -15,7 +15,7 @@ type OssCopySuite struct {
 
 var _ = Suite(&OssCopySuite{})
 
-// Run once when the suite starts running
+// SetUpSuite runs once when the suite starts running
 func (s *OssCopySuite) SetUpSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -31,7 +31,7 @@ func (s *OssCopySuite) SetUpSuite(c *C) {
 	testLogger.Println("test copy started")
 }
 
-// Run before each test or benchmark starts running
+// TearDownSuite runs before each test or benchmark starts running
 func (s *OssCopySuite) TearDownSuite(c *C) {
 	// Delete Part
 	lmur, err := s.bucket.ListMultipartUploads()
@@ -44,7 +44,7 @@ func (s *OssCopySuite) TearDownSuite(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	//Delete Objects
+	// Delete objects
 	lor, err := s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 
@@ -56,31 +56,31 @@ func (s *OssCopySuite) TearDownSuite(c *C) {
 	testLogger.Println("test copy completed")
 }
 
-// Run after each test or benchmark runs
+// SetUpTest runs after each test or benchmark runs
 func (s *OssCopySuite) SetUpTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// Run once after all tests or benchmarks have finished running
+// TearDownTest runs once after all tests or benchmarks have finished running
 func (s *OssCopySuite) TearDownTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// TestCopyRoutineWithoutRecovery 多线程无断点恢复的复制
+// TestCopyRoutineWithoutRecovery is multi-routine copy without resumable recovery
 func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
 	srcObjectName := objectNamePrefix + "tcrwr"
 	destObjectName := srcObjectName + "-copy"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "copy-new-file.jpg"
 
-	// 上传源文件
+	// Upload source file
 	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 不指定Routines,默认单线程
+	// Does not specify parameter 'routines', by default it's single routine
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024)
 	c.Assert(err, IsNil)
 
@@ -95,7 +95,7 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 指定线程数1
+	// Specify one routine.
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(1))
 	c.Assert(err, IsNil)
 
@@ -110,7 +110,7 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 指定线程数3,小于分片数5
+	// Specify three routines, which is less than parts count 5
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
@@ -125,7 +125,7 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 指定线程数5,等于分片数
+	// Specify 5 routines which is the same as parts count
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(5))
 	c.Assert(err, IsNil)
 
@@ -140,7 +140,7 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 指定线程数10,大于分片数5
+	// Specify routine count 10, which is more than parts count
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(10))
 	c.Assert(err, IsNil)
 
@@ -155,7 +155,7 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 线程值无效自动变成1
+	// Invalid routine count, will use single routine
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(-1))
 	c.Assert(err, IsNil)
 
@@ -170,7 +170,7 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// option
+	// Option
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(3), Meta("myprop", "mypropval"))
 
 	meta, err := s.bucket.GetObjectDetailedMeta(destObjectName)
@@ -192,7 +192,7 @@ func (s *OssCopySuite) TestCopyRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// CopyErrorHooker CopyPart请求Hook
+// CopyErrorHooker is a copypart request hook
 func CopyErrorHooker(part copyPart) error {
 	if part.Number == 5 {
 		time.Sleep(time.Second)
@@ -201,64 +201,65 @@ func CopyErrorHooker(part copyPart) error {
 	return nil
 }
 
-// TestCopyRoutineWithoutRecoveryNegative 多线程无断点恢复的复制
+// TestCopyRoutineWithoutRecoveryNegative is a multiple routines copy without checkpoint
 func (s *OssCopySuite) TestCopyRoutineWithoutRecoveryNegative(c *C) {
 	srcObjectName := objectNamePrefix + "tcrwrn"
 	destObjectName := srcObjectName + "-copy"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
-	// 上传源文件
+	// Upload source file
 	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
 	copyPartHooker = CopyErrorHooker
-	// worker线程错误
+	// Worker routine errors
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(2))
 
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	copyPartHooker = defaultCopyPartHook
 
-	// 源Bucket不存在
+	// Source bucket does not exist
 	err = s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Routines(2))
 	c.Assert(err, NotNil)
 
-	// 源Object不存在
+	// Target object does not exist
 	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2))
 
-	// 指定的分片大小无效
+	// The part size is invalid
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Routines(2))
 	c.Assert(err, NotNil)
 
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*1024*1024*100, Routines(2))
 	c.Assert(err, NotNil)
 
-	// 删除源文件
+	// Delete the source file
 	err = s.bucket.DeleteObject(srcObjectName)
 	c.Assert(err, IsNil)
 }
 
-// TestCopyRoutineWithRecovery 多线程且有断点恢复的复制
+// TestCopyRoutineWithRecovery is a multiple routines copy with resumable recovery
 func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	srcObjectName := objectNamePrefix + "tcrtr"
 	destObjectName := srcObjectName + "-copy"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "copy-new-file.jpg"
 
-	// 上传源文件
+	// Upload source file
 	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// Routines默认值,CP开启默认路径是destObjectName+.cp
-	// 第一次上传,上传4片
+	// Routines default value, CP's default path is destObjectName+.cp
+	// Copy object with checkpoint enabled, single runtine.
+	// Copy 4 parts---the CopyErrorHooker makes sure the copy of part 5 will fail.
 	copyPartHooker = CopyErrorHooker
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	copyPartHooker = defaultCopyPartHook
 
-	// check cp
+	// Check CP
 	ccp := copyCheckpoint{}
 	err = ccp.load(destObjectName + ".cp")
 	c.Assert(err, IsNil)
@@ -276,7 +277,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(len(ccp.todoParts()), Equals, 1)
 	c.Assert(ccp.PartStat[4], Equals, false)
 
-	// 第二次上传,完成剩余的一片
+	// Second copy, finish the last part
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
 	c.Assert(err, IsNil)
 
@@ -294,14 +295,14 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	err = ccp.load(fileName + ".cp")
 	c.Assert(err, NotNil)
 
-	// Routines指定,CP指定
+	// Specify Routine and CP's path
 	copyPartHooker = CopyErrorHooker
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), Checkpoint(true, srcObjectName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	copyPartHooker = defaultCopyPartHook
 
-	// check cp
+	// Check CP
 	ccp = copyCheckpoint{}
 	err = ccp.load(srcObjectName + ".cp")
 	c.Assert(err, IsNil)
@@ -319,7 +320,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(len(ccp.todoParts()), Equals, 1)
 	c.Assert(ccp.PartStat[4], Equals, false)
 
-	// 第二次上传,完成剩余的一片
+	// Second copy, finish the last part.
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), Checkpoint(true, srcObjectName+".cp"))
 	c.Assert(err, IsNil)
 
@@ -337,7 +338,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	err = ccp.load(srcObjectName + ".cp")
 	c.Assert(err, NotNil)
 
-	// 一次完成上传,中间没有错误
+	// First copy without error.
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(3), Checkpoint(true, ""))
 	c.Assert(err, IsNil)
 
@@ -352,7 +353,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 用多协程下载,中间没有错误
+	// Copy with multiple coroutines, no errors.
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(10), Checkpoint(true, ""))
 	c.Assert(err, IsNil)
 
@@ -367,7 +368,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// option
+	// Option
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, ""), Meta("myprop", "mypropval"))
 	c.Assert(err, IsNil)
 
@@ -386,26 +387,26 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 删除源文件
+	// Delete the source file
 	err = s.bucket.DeleteObject(srcObjectName)
 	c.Assert(err, IsNil)
 }
 
-// TestCopyRoutineWithRecoveryNegative 多线程无断点恢复的复制
+// TestCopyRoutineWithRecoveryNegative is a multiple routineed copy without checkpoint
 func (s *OssCopySuite) TestCopyRoutineWithRecoveryNegative(c *C) {
 	srcObjectName := objectNamePrefix + "tcrwrn"
 	destObjectName := srcObjectName + "-copy"
 
-	// 源Bucket不存在
+	// Source bucket does not exist
 	err := s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 
-	// 源Object不存在
+	// Source object does not exist
 	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2), Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 
-	// 指定的分片大小无效
+	// Specify part size is invalid.
 	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 
@@ -413,7 +414,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecoveryNegative(c *C) {
 	c.Assert(err, NotNil)
 }
 
-// TestCopyFileCrossBucket 跨Bucket直接的复制
+// TestCopyFileCrossBucket is a cross bucket's direct copy.
 func (s *OssCopySuite) TestCopyFileCrossBucket(c *C) {
 	destBucketName := bucketName + "-cfcb-desc"
 	srcObjectName := objectNamePrefix + "tcrtr"
@@ -424,15 +425,15 @@ func (s *OssCopySuite) TestCopyFileCrossBucket(c *C) {
 	destBucket, err := s.client.Bucket(destBucketName)
 	c.Assert(err, IsNil)
 
-	// 创建目标Bucket
+	// Create a target bucket
 	err = s.client.CreateBucket(destBucketName)
 
-	// 上传源文件
+	// Upload source file
 	err = s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 复制文件
+	// Copy files
 	err = destBucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, ""))
 	c.Assert(err, IsNil)
 
@@ -447,7 +448,7 @@ func (s *OssCopySuite) TestCopyFileCrossBucket(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 带option的复制
+	// Copy file with options
 	err = destBucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(10), Checkpoint(true, "copy.cp"), Meta("myprop", "mypropval"))
 	c.Assert(err, IsNil)
 
@@ -462,7 +463,7 @@ func (s *OssCopySuite) TestCopyFileCrossBucket(c *C) {
 	c.Assert(err, IsNil)
 	os.Remove(newFile)
 
-	// 删除目标Bucket
+	// Delete target bucket
 	err = s.client.DeleteBucket(destBucketName)
 	c.Assert(err, IsNil)
 }

+ 58 - 71
oss/multipart.go

@@ -10,16 +10,15 @@ import (
 	"strconv"
 )
 
+// InitiateMultipartUpload initializes multipart upload
 //
-// InitiateMultipartUpload 初始化分片上传任务。
+// objectKey    object name
+// options    the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, 
+//            ServerSideEncryption, Meta, check out the following link:
+//            https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
 //
-// objectKey  Object名称。
-// options    上传时可以指定Object的属性,可选属性有CacheControl、ContentDisposition、ContentEncoding、Expires、
-// ServerSideEncryption、Meta,具体含义请参考
-// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
-//
-// InitiateMultipartUploadResult 初始化后操作成功的返回值,用于后面的UploadPartFromFile、UploadPartCopy等操作。error为nil时有效。
-// error  操作成功error为nil,非nil为错误信息。
+// InitiateMultipartUploadResult    the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy.
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) {
 	var imur InitiateMultipartUploadResult
@@ -36,23 +35,20 @@ func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option
 	return imur, err
 }
 
+// UploadPart uploads parts
 //
-// UploadPart 上传分片。
-//
-// 初始化一个Multipart Upload之后,可以根据指定的Object名和Upload ID来分片(Part)上传数据。
-// 每一个上传的Part都有一个标识它的号码(part number,范围是1~10000)。对于同一个Upload ID,
-// 该号码不但唯一标识这一片数据,也标识了这片数据在整个文件内的相对位置。如果您用同一个part号码,上传了新的数据,
-// 那么OSS上已有的这个号码的Part数据将被覆盖。除了最后一片Part以外,其他的part最小为100KB;
-// 最后一片Part没有大小限制。
+// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts.
+// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file.
+// And thus with the same part number and upload Id, another part upload will overwrite the data.
+// Except the last one, minimal part size is 100KB. There's no limit on the last part size.
 //
-// imur        InitiateMultipartUpload成功后的返回值。
-// reader      io.Reader 需要分片上传的reader。
-// size        本次上传片Part的大小。
-// partNumber  本次上传片(Part)的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。
+// imur    the returned value of InitiateMultipartUpload.
+// reader    io.Reader the reader for the part's data.
+// size    the part size.
+// partNumber    the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error.
 //
-// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,即传入参数partNumber;
-// ETag及上传数据的MD5。error为nil时有效。
-// error 操作成功error为nil,非nil为错误信息。
+// UploadPart    the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil.
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader,
 	partSize int64, partNumber int, options ...Option) (UploadPart, error) {
@@ -68,18 +64,16 @@ func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Re
 	return result.Part, err
 }
 
+// UploadPartFromFile uploads part from the file.
 //
-// UploadPartFromFile 上传分片。
-//
-// imur           InitiateMultipartUpload成功后的返回值。
-// filePath       需要分片上传的本地文件。
-// startPosition  本次上传文件片的起始位置。
-// partSize       本次上传文件片的大小。
-// partNumber     本次上传文件片的编号,范围是1~10000。
+// imur    the return value of a successful InitiateMultipartUpload.
+// filePath    the local file path to upload.
+// startPosition    the start position in the local file.
+// partSize    the part size.
+// partNumber    the part number (from 1 to 10,000)
 //
-// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,传入参数partNumber;
-// ETag上传数据的MD5。error为nil时有效。
-// error 操作成功error为nil,非nil为错误信息。
+// UploadPart    the return value consists of PartNumber and ETag.
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string,
 	startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
@@ -103,13 +97,12 @@ func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, file
 	return result.Part, err
 }
 
+// DoUploadPart does the actual part upload.
 //
-// DoUploadPart 上传分片。
+// request    part upload request
 //
-// request 上传分片请求。
-//
-// UploadPartResult 上传分片请求返回值。
-// error  操作无错误为nil,非nil为错误信息。
+// UploadPartResult    the result of uploading part.
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
 	listener := getProgressListener(options)
@@ -139,21 +132,19 @@ func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option)
 	return &UploadPartResult{part}, nil
 }
 
+// UploadPartCopy uploads part copy
 //
-// UploadPartCopy 拷贝分片。
-//
-// imur           InitiateMultipartUpload成功后的返回值。
-// copySrc        源Object名称。
-// startPosition  本次拷贝片(Part)在源Object的起始位置。
-// partSize       本次拷贝片的大小。
-// partNumber     本次拷贝片的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。
-// options        copy时源Object的限制条件,满足限制条件时copy,不满足时返回错误。可选条件有CopySourceIfMatch、
-// CopySourceIfNoneMatch、CopySourceIfModifiedSince  CopySourceIfUnmodifiedSince,具体含义请参看
-// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
+// imur    the return value of InitiateMultipartUpload
+// copySrc    source Object name
+// startPosition    the part's start index in the source file
+// partSize    the part size
+// partNumber    the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error.
+// options    the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error.
+//            CopySourceIfNoneMatch, CopySourceIfModifiedSince  CopySourceIfUnmodifiedSince, check out the following link for the detail
+//            https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html
 //
-// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片(Part)编号,即传入参数partNumber;
-// ETag及上传数据的MD5。error为nil时有效。
-// error 操作成功error为nil,非nil为错误信息。
+// UploadPart    the return value consists of PartNumber and ETag.
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string,
 	startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) {
@@ -182,14 +173,13 @@ func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucke
 	return part, nil
 }
 
+// CompleteMultipartUpload completes the multipart upload.
 //
-// CompleteMultipartUpload 提交分片上传任务。
+// imur    the return value of InitiateMultipartUpload.
+// parts    the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy.
 //
-// imur   InitiateMultipartUpload的返回值。
-// parts  UploadPart/UploadPartFromFile/UploadPartCopy返回值组成的数组。
-//
-// CompleteMultipartUploadResponse  操作成功后的返回值。error为nil时有效。
-// error  操作成功error为nil,非nil为错误信息。
+// CompleteMultipartUploadResponse    the return value when the call succeeds. Only valid when the error is nil.
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
 	parts []UploadPart) (CompleteMultipartUploadResult, error) {
@@ -217,12 +207,11 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
 	return out, err
 }
 
+// AbortMultipartUpload aborts the multipart upload.
 //
-// AbortMultipartUpload 取消分片上传任务。
-//
-// imur  InitiateMultipartUpload的返回值。
+// imur    the return value of InitiateMultipartUpload.
 //
-// error  操作成功error为nil,非nil为错误信息。
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
 	params := map[string]interface{}{}
@@ -235,13 +224,12 @@ func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) er
 	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
 }
 
+// ListUploadedParts lists the uploaded parts.
 //
-// ListUploadedParts 列出指定上传任务已经上传的分片。
+// imur    the return value of InitiateMultipartUpload.
 //
-// imur  InitiateMultipartUpload的返回值。
-//
-// ListUploadedPartsResponse  操作成功后的返回值,成员UploadedParts已经上传/拷贝的片。error为nil时该返回值有效。
-// error  操作成功error为nil,非nil为错误信息。
+// ListUploadedPartsResponse    the return value if it succeeds, only valid when error is nil.
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) {
 	var out ListUploadedPartsResult
@@ -257,14 +245,13 @@ func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (List
 	return out, err
 }
 
+// ListMultipartUploads lists all ongoing multipart upload tasks
 //
-// ListMultipartUploads 列出所有未上传完整的multipart任务列表。
-//
-// options  ListObject的筛选行为。Prefix返回object的前缀,KeyMarker返回object的起始位置,MaxUploads最大数目默认1000,
-// Delimiter用于对Object名字进行分组的字符,所有名字包含指定的前缀且第一次出现delimiter字符之间的object。
+// options    listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order;
+//            MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys.
 //
-// ListMultipartUploadResponse  操作成功后的返回值,error为nil时该返回值有效。
-// error  操作成功error为nil,非nil为错误信息。
+// ListMultipartUploadResponse    the return value if it succeeds, only valid when error is nil.
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) {
 	var out ListMultipartUploadResult

+ 46 - 46
oss/multipart_test.go

@@ -19,7 +19,7 @@ type OssBucketMultipartSuite struct {
 
 var _ = Suite(&OssBucketMultipartSuite{})
 
-// Run once when the suite starts running
+// SetUpSuite runs once when the suite starts running
 func (s *OssBucketMultipartSuite) SetUpSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -32,7 +32,7 @@ func (s *OssBucketMultipartSuite) SetUpSuite(c *C) {
 	c.Assert(err, IsNil)
 	s.bucket = bucket
 
-	// Delete Part
+	// Delete part
 	lmur, err := s.bucket.ListMultipartUploads()
 	c.Assert(err, IsNil)
 
@@ -43,7 +43,7 @@ func (s *OssBucketMultipartSuite) SetUpSuite(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	// Delete Objects
+	// Delete objects
 	lor, err := s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 
@@ -55,9 +55,9 @@ func (s *OssBucketMultipartSuite) SetUpSuite(c *C) {
 	testLogger.Println("test multipart started")
 }
 
-// Run before each test or benchmark starts running
+// TearDownSuite runs before each test or benchmark starts running
 func (s *OssBucketMultipartSuite) TearDownSuite(c *C) {
-	// Delete Part
+	// Delete part
 	lmur, err := s.bucket.ListMultipartUploads()
 	c.Assert(err, IsNil)
 
@@ -68,7 +68,7 @@ func (s *OssBucketMultipartSuite) TearDownSuite(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	// Delete Objects
+	// Delete objects
 	lor, err := s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 
@@ -80,13 +80,13 @@ func (s *OssBucketMultipartSuite) TearDownSuite(c *C) {
 	testLogger.Println("test multipart completed")
 }
 
-// Run after each test or benchmark runs
+// SetUpTest runs after each test or benchmark runs
 func (s *OssBucketMultipartSuite) SetUpTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// Run once after all tests or benchmarks have finished running
+// TearDownTest runs once after all tests or benchmarks have finished running
 func (s *OssBucketMultipartSuite) TearDownTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
@@ -146,7 +146,7 @@ func (s *OssBucketMultipartSuite) TestMultipartUpload(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestMultipartUpload
+// TestMultipartUploadFromFile
 func (s *OssBucketMultipartSuite) TestMultipartUploadFromFile(c *C) {
 	objectName := objectNamePrefix + "tmuff"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
@@ -243,7 +243,7 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
 	c.Assert(err, IsNil)
 
-	// upload
+	// Upload
 	imurUpload, err := s.bucket.InitiateMultipartUpload(objectName)
 	var partsUpload []UploadPart
 	for _, chunk := range chunks {
@@ -252,7 +252,7 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 		partsUpload = append(partsUpload, part)
 	}
 
-	// copy
+	// Copy
 	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDesc)
 	var partsCopy []UploadPart
 	for _, chunk := range chunks {
@@ -261,7 +261,7 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 		partsCopy = append(partsCopy, part)
 	}
 
-	// list
+	// List
 	lupr, err := s.bucket.ListUploadedParts(imurUpload)
 	c.Assert(err, IsNil)
 	testLogger.Println("lupr:", lupr)
@@ -276,13 +276,13 @@ func (s *OssBucketMultipartSuite) TestListUploadedParts(c *C) {
 	c.Assert(err, IsNil)
 	testLogger.Println("lmur:", lmur)
 
-	// complete
+	// Complete
 	_, err = s.bucket.CompleteMultipartUpload(imurUpload, partsUpload)
 	c.Assert(err, IsNil)
 	_, err = s.bucket.CompleteMultipartUpload(imurCopy, partsCopy)
 	c.Assert(err, IsNil)
 
-	// download
+	// Download
 	err = s.bucket.GetObjectToFile(objectDesc, "newpic3.jpg")
 	c.Assert(err, IsNil)
 	err = s.bucket.GetObjectToFile(objectName, "newpic4.jpg")
@@ -309,7 +309,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 	err = s.bucket.PutObjectFromFile(objectSrc, fileName)
 	c.Assert(err, IsNil)
 
-	// upload
+	// Upload
 	imurUpload, err := s.bucket.InitiateMultipartUpload(objectName)
 	var partsUpload []UploadPart
 	for _, chunk := range chunks {
@@ -318,7 +318,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 		partsUpload = append(partsUpload, part)
 	}
 
-	// copy
+	// Copy
 	imurCopy, err := s.bucket.InitiateMultipartUpload(objectDesc)
 	var partsCopy []UploadPart
 	for _, chunk := range chunks {
@@ -327,7 +327,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 		partsCopy = append(partsCopy, part)
 	}
 
-	// list
+	// List
 	lupr, err := s.bucket.ListUploadedParts(imurUpload)
 	c.Assert(err, IsNil)
 	testLogger.Println("lupr:", lupr)
@@ -343,7 +343,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 	testLogger.Println("lmur:", lmur)
 	c.Assert(len(lmur.Uploads), Equals, 2)
 
-	// abort
+	// Abort
 	err = s.bucket.AbortMultipartUpload(imurUpload)
 	c.Assert(err, IsNil)
 	err = s.bucket.AbortMultipartUpload(imurCopy)
@@ -354,7 +354,7 @@ func (s *OssBucketMultipartSuite) TestAbortMultipartUpload(c *C) {
 	testLogger.Println("lmur:", lmur)
 	c.Assert(len(lmur.Uploads), Equals, 0)
 
-	// download
+	// Download
 	err = s.bucket.GetObjectToFile(objectDesc, "newpic3.jpg")
 	c.Assert(err, NotNil)
 	err = s.bucket.GetObjectToFile(objectName, "newpic4.jpg")
@@ -434,7 +434,7 @@ func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileOutofOrder(c *C) {
 		_, err := s.bucket.UploadPartFromFile(imur, fileName, chunk.Offset, chunk.Size, (int)(chunk.Number))
 		c.Assert(err, IsNil)
 	}
-	// double upload
+	// Double upload
 	for _, chunk := range chunks {
 		part, err := s.bucket.UploadPartFromFile(imur, fileName, chunk.Offset, chunk.Size, (int)(chunk.Number))
 		c.Assert(err, IsNil)
@@ -472,7 +472,7 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
 		_, err := s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
 		c.Assert(err, IsNil)
 	}
-	//double copy
+	// Double copy
 	for _, chunk := range chunks {
 		part, err := s.bucket.UploadPartCopy(imur, bucketName, objectSrc, chunk.Offset, chunk.Size, (int)(chunk.Number))
 		c.Assert(err, IsNil)
@@ -492,7 +492,7 @@ func (s *OssBucketMultipartSuite) TestUploadPartCopyOutofOrder(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestMultipartUpload
+// TestMultipartUploadFromFileType
 func (s *OssBucketMultipartSuite) TestMultipartUploadFromFileType(c *C) {
 	objectName := objectNamePrefix + "tmuffwm" + ".jpg"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
@@ -568,7 +568,7 @@ func (s *OssBucketMultipartSuite) TestListMultipartUploads(c *C) {
 	c.Assert(len(lmpu.Uploads), Equals, 18)
 	c.Assert(len(lmpu.CommonPrefixes), Equals, 2)
 
-	// upload-id-marker
+	// Upload-id-marker
 	lmpu, err = s.bucket.ListMultipartUploads(KeyMarker(objectName+"12"), UploadIDMarker("EEE"))
 	c.Assert(err, IsNil)
 	c.Assert(len(lmpu.Uploads), Equals, 15)
@@ -617,12 +617,12 @@ func (s *OssBucketMultipartSuite) TestListMultipartUploadsEncodingKey(c *C) {
 func (s *OssBucketMultipartSuite) TestMultipartNegative(c *C) {
 	objectName := objectNamePrefix + "tmn"
 
-	// key tool long
+	// Key tool long
 	data := make([]byte, 100*1024)
 	imur, err := s.bucket.InitiateMultipartUpload(string(data))
 	c.Assert(err, NotNil)
 
-	// imur invalid
+	// Invalid imur 
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	fd, err := os.Open(fileName)
 	c.Assert(err, IsNil)
@@ -643,7 +643,7 @@ func (s *OssBucketMultipartSuite) TestMultipartNegative(c *C) {
 	_, err = s.bucket.ListUploadedParts(imur)
 	c.Assert(err, NotNil)
 
-	// invalid exist
+	// Invalid exist
 	imur, err = s.bucket.InitiateMultipartUpload(objectName)
 	c.Assert(err, IsNil)
 
@@ -668,7 +668,7 @@ func (s *OssBucketMultipartSuite) TestMultipartNegative(c *C) {
 	err = s.bucket.AbortMultipartUpload(imur)
 	c.Assert(err, IsNil)
 
-	// option invalid
+	// Invalid option 
 	_, err = s.bucket.InitiateMultipartUpload(objectName, IfModifiedSince(futureDate))
 	c.Assert(err, IsNil)
 }
@@ -726,7 +726,7 @@ func (s *OssBucketMultipartSuite) TestUploadFile(c *C) {
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "newfiletuff.jpg"
 
-	// 有余数
+	// Upload with 100K part size
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
 	c.Assert(err, IsNil)
 
@@ -741,7 +741,7 @@ func (s *OssBucketMultipartSuite) TestUploadFile(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 整除
+	// Upload with part size equals to 1/4 of the file size
 	err = s.bucket.UploadFile(objectName, fileName, 482048/4)
 	c.Assert(err, IsNil)
 
@@ -756,7 +756,7 @@ func (s *OssBucketMultipartSuite) TestUploadFile(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 等于文件大小
+	// Upload with part size equals to the file size
 	err = s.bucket.UploadFile(objectName, fileName, 482048)
 	c.Assert(err, IsNil)
 
@@ -771,7 +771,7 @@ func (s *OssBucketMultipartSuite) TestUploadFile(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 大于文件大小
+	// Upload with part size is bigger than the file size
 	err = s.bucket.UploadFile(objectName, fileName, 482049)
 	c.Assert(err, IsNil)
 
@@ -786,7 +786,7 @@ func (s *OssBucketMultipartSuite) TestUploadFile(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// option
+	// Option
 	options := []Option{
 		Expires(futureDate),
 		ObjectACL(ACLPublicRead),
@@ -818,19 +818,19 @@ func (s *OssBucketMultipartSuite) TestUploadFileNegative(c *C) {
 	objectName := objectNamePrefix + "tufn"
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
-	// 小于最小文件片
+	// Smaller than the required minimal part size (100KB)
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024-1)
 	c.Assert(err, NotNil)
 
-	// 大于最大文件片
+	// Bigger than the max part size (5G)
 	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*5+1)
 	c.Assert(err, NotNil)
 
-	// 文件不存在
+	// File does not exist
 	err = s.bucket.UploadFile(objectName, "/root/123abc9874", 1024*1024*1024)
 	c.Assert(err, NotNil)
 
-	// Key无效
+	// Invalid key , key is empty.
 	err = s.bucket.UploadFile("", fileName, 100*1024)
 	c.Assert(err, NotNil)
 }
@@ -844,7 +844,7 @@ func (s *OssBucketMultipartSuite) TestDownloadFile(c *C) {
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
 	c.Assert(err, IsNil)
 
-	// 有余数
+	// Download file with part size of 100K
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024)
 	c.Assert(err, IsNil)
 
@@ -856,7 +856,7 @@ func (s *OssBucketMultipartSuite) TestDownloadFile(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 整除
+	// Download the file with part size equals to 1/4 of the file size
 	err = s.bucket.DownloadFile(objectName, newFile, 482048/4)
 	c.Assert(err, IsNil)
 
@@ -868,7 +868,7 @@ func (s *OssBucketMultipartSuite) TestDownloadFile(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 等于文件大小
+	// Download the file with part size same as the file size
 	err = s.bucket.DownloadFile(objectName, newFile, 482048)
 	c.Assert(err, IsNil)
 
@@ -880,7 +880,7 @@ func (s *OssBucketMultipartSuite) TestDownloadFile(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// 大于文件大小
+	// Download the file with part size bigger than the file size
 	err = s.bucket.DownloadFile(objectName, newFile, 482049)
 	c.Assert(err, IsNil)
 
@@ -892,7 +892,7 @@ func (s *OssBucketMultipartSuite) TestDownloadFile(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// option
+	// Option
 	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
 	c.Assert(err, IsNil)
 	testLogger.Println("GetObjectDetailedMeta:", meta)
@@ -922,24 +922,24 @@ func (s *OssBucketMultipartSuite) TestDownloadFileNegative(c *C) {
 	objectName := objectNamePrefix + "tufn"
 	newFile := "newfiletudff.jpg"
 
-	// 小于最小文件片
+	// Smaller than the required minimal part size (100KB)
 	err := s.bucket.DownloadFile(objectName, newFile, 100*1024-1)
 	c.Assert(err, NotNil)
 
-	// 大于最大文件片
+	// Bigger than the required max part size (5G)
 	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024+1)
 	c.Assert(err, NotNil)
 
-	// 文件不存在
+	// File does not exist
 	err = s.bucket.DownloadFile(objectName, "/OSS/TEMP/ZIBI/QUQU/BALA", 1024*1024*1024+1)
 	c.Assert(err, NotNil)
 
-	// Key不存在
+	// Key does not exist
 	err = s.bucket.DownloadFile(objectName, newFile, 100*1024)
 	c.Assert(err, NotNil)
 }
 
-// private
+// Private
 func shuffleArray(chunks []FileChunk) []FileChunk {
 	for i := range chunks {
 		j := rand.Intn(i + 1)

+ 12 - 12
oss/option.go

@@ -11,9 +11,9 @@ import (
 type optionType string
 
 const (
-	optionParam optionType = "HTTPParameter" // URL参数
-	optionHTTP  optionType = "HTTPHeader"    // HTTP
-	optionArg   optionType = "FuncArgument"  // 函数参数
+	optionParam optionType = "HTTPParameter" // URL parameter
+	optionHTTP  optionType = "HTTPHeader"    // HTTP header
+	optionArg   optionType = "FuncArgument"  // Function argument
 )
 
 const (
@@ -31,7 +31,7 @@ type (
 		Type  optionType
 	}
 
-	// Option http option
+	// Option HTTP option
 	Option func(map[string]optionValue) error
 )
 
@@ -212,33 +212,33 @@ func UploadIDMarker(value string) Option {
 	return addParam("upload-id-marker", value)
 }
 
-// DeleteObjectsQuiet DeleteObjects详细(verbose)模式或简单(quiet)模式,默认详细模式。
+// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
 func DeleteObjectsQuiet(isQuiet bool) Option {
 	return addArg(deleteObjectsQuiet, isQuiet)
 }
 
-// StorageClass bucket的存储方式
+// StorageClass bucket storage class
 func StorageClass(value StorageClassType) Option {
 	return addArg(storageClass, value)
 }
 
-// 断点续传配置,包括是否启用、cp文件
+// Checkpoint configuration
 type cpConfig struct {
 	IsEnable bool
 	FilePath string
 }
 
-// Checkpoint DownloadFile/UploadFile是否开启checkpoint及checkpoint文件路径
+// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
 func Checkpoint(isEnable bool, filePath string) Option {
 	return addArg(checkpointConfig, &cpConfig{isEnable, filePath})
 }
 
-// Routines DownloadFile/UploadFile并发数
+// Routines DownloadFile/UploadFile routine count
 func Routines(n int) Option {
 	return addArg(routineNum, n)
 }
 
-// InitCRC AppendObject CRC的校验的初始值
+// InitCRC Init AppendObject CRC
 func InitCRC(initCRC uint64) Option {
 	return addArg(initCRC64, initCRC)
 }
@@ -331,7 +331,7 @@ func handleOptions(headers map[string]string, options []Option) error {
 }
 
 func getRawParams(options []Option) (map[string]interface{}, error) {
-	// option
+	// Option
 	params := map[string]optionValue{}
 	for _, option := range options {
 		if option != nil {
@@ -342,7 +342,7 @@ func getRawParams(options []Option) (map[string]interface{}, error) {
 	}
 
 	paramsm := map[string]interface{}{}
-	// serialize
+	// Serialize
 	for k, v := range params {
 		if v.Type == optionParam {
 			vs := params[k]

+ 8 - 8
oss/progress.go

@@ -2,7 +2,7 @@ package oss
 
 import "io"
 
-// ProgressEventType transfer progress event type
+// ProgressEventType defines transfer progress event type
 type ProgressEventType int
 
 const (
@@ -16,19 +16,19 @@ const (
 	TransferFailedEvent
 )
 
-// ProgressEvent progress event
+// ProgressEvent defines progress event
 type ProgressEvent struct {
 	ConsumedBytes int64
 	TotalBytes    int64
 	EventType     ProgressEventType
 }
 
-// ProgressListener listen progress change
+// ProgressListener listens progress change
 type ProgressListener interface {
 	ProgressChanged(event *ProgressEvent)
 }
 
-// -------------------- private --------------------
+// -------------------- Private --------------------
 
 func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
 	return &ProgressEvent{
@@ -76,7 +76,7 @@ func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener Pr
 func (t *teeReader) Read(p []byte) (n int, err error) {
 	n, err = t.reader.Read(p)
 
-	// read encountered error
+	// Read encountered error
 	if err != nil && err != io.EOF {
 		event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
 		publishProgress(t.listener, event)
@@ -84,18 +84,18 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
 
 	if n > 0 {
 		t.consumedBytes += int64(n)
-		// crc
+		// CRC
 		if t.writer != nil {
 			if n, err := t.writer.Write(p[:n]); err != nil {
 				return n, err
 			}
 		}
-		// progress
+		// Progress
 		if t.listener != nil {
 			event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
 			publishProgress(t.listener, event)
 		}
-		// track
+		// Track
 		if t.tracker != nil {
 			t.tracker.completedBytes = t.consumedBytes
 		}

+ 19 - 19
oss/progress_test.go

@@ -20,7 +20,7 @@ type OssProgressSuite struct {
 
 var _ = Suite(&OssProgressSuite{})
 
-// Run once when the suite starts running
+// SetUpSuite runs once when the suite starts running
 func (s *OssProgressSuite) SetUpSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -36,9 +36,9 @@ func (s *OssProgressSuite) SetUpSuite(c *C) {
 	testLogger.Println("test progress started")
 }
 
-// Run before each test or benchmark starts running
+// TearDownSuite runs before each test or benchmark starts running
 func (s *OssProgressSuite) TearDownSuite(c *C) {
-	// Delete Multipart
+	// Abort multipart uploads
 	lmu, err := s.bucket.ListMultipartUploads()
 	c.Assert(err, IsNil)
 
@@ -48,7 +48,7 @@ func (s *OssProgressSuite) TearDownSuite(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	// Delete Objects
+	// Delete objects
 	lor, err := s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 
@@ -60,7 +60,7 @@ func (s *OssProgressSuite) TearDownSuite(c *C) {
 	testLogger.Println("test progress completed")
 }
 
-// Run after each test or benchmark runs
+// SetUpTest runs after each test or benchmark runs
 func (s *OssProgressSuite) SetUpTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
@@ -72,7 +72,7 @@ func (s *OssProgressSuite) SetUpTest(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// Run once after all tests or benchmarks have finished running
+// TearDownTest runs once after all tests or benchmarks have finished running
 func (s *OssProgressSuite) TearDownTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
@@ -84,11 +84,11 @@ func (s *OssProgressSuite) TearDownTest(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// OssProgressListener progress listener
+// OssProgressListener is the progress listener
 type OssProgressListener struct {
 }
 
-// ProgressChanged handle progress event
+// ProgressChanged handles progress event
 func (listener *OssProgressListener) ProgressChanged(event *ProgressEvent) {
 	switch event.EventType {
 	case TransferStartedEvent:
@@ -145,21 +145,21 @@ func (s *OssProgressSuite) TestPutObject(c *C) {
 	testLogger.Println("OssProgressSuite.TestPutObject")
 }
 
-// Test SignURL
+// TestSignURL
 func (s *OssProgressSuite) TestSignURL(c *C) {
 	objectName := objectNamePrefix + randStr(5)
 	filePath := randLowStr(10)
 	content := randStr(20)
 	createFile(filePath, content, c)
 
-	// sign url for put
+	// Sign URL for put
 	str, err := s.bucket.SignURL(objectName, HTTPPut, 60, Progress(&OssProgressListener{}))
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// put object with url
+	// Put object with URL
 	fd, err := os.Open(filePath)
 	c.Assert(err, IsNil)
 	defer fd.Close()
@@ -167,7 +167,7 @@ func (s *OssProgressSuite) TestSignURL(c *C) {
 	err = s.bucket.PutObjectWithURL(str, fd, Progress(&OssProgressListener{}))
 	c.Assert(err, IsNil)
 
-	// put object from file with url
+	// Put object from file with URL
 	err = s.bucket.PutObjectFromFileWithURL(str, filePath, Progress(&OssProgressListener{}))
 	c.Assert(err, IsNil)
 
@@ -180,21 +180,21 @@ func (s *OssProgressSuite) TestSignURL(c *C) {
 	_, err = s.bucket.DoPutObjectWithURL(str, fd, options)
 	c.Assert(err, IsNil)
 
-	// sign url for get
+	// Sign URL for get
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 60, Progress(&OssProgressListener{}))
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
-	// get object with url
+	// Get object with URL
 	body, err := s.bucket.GetObjectWithURL(str, Progress(&OssProgressListener{}))
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, content)
 
-	// get object to file with url
+	// Get object to file with URL
 	str, err = s.bucket.SignURL(objectName, HTTPGet, 10, Progress(&OssProgressListener{}))
 	c.Assert(err, IsNil)
 
@@ -218,7 +218,7 @@ func (s *OssProgressSuite) TestPutObjectNegative(c *C) {
 	objectName := objectNamePrefix + "tpon.html"
 	localFile := "../sample/The Go Programming Language.html"
 
-	// invalid endpoint
+	// Invalid endpoint
 	client, err := New("http://oss-cn-taikang.aliyuncs.com", accessID, accessKey)
 	c.Assert(err, IsNil)
 
@@ -393,7 +393,7 @@ func (s *OssProgressSuite) TestGetObjectNegative(c *C) {
 
 	//time.Sleep(70 * time.Second) TODO
 
-	// read should fail
+	// Read should fail
 	for err == nil {
 		n, err = body.Read(buf)
 		n += n
@@ -424,7 +424,7 @@ func (s *OssProgressSuite) TestDownloadFile(c *C) {
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-progress-2.jpg"
 
-	// upload
+	// Upload
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
@@ -446,7 +446,7 @@ func (s *OssProgressSuite) TestCopyFile(c *C) {
 	destObjectName := srcObjectName + "-copy"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
-	// upload
+	// Upload
 	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 

+ 1 - 1
oss/transport_1_6.go

@@ -9,7 +9,7 @@ import (
 
 func newTransport(conn *Conn, config *Config) *http.Transport {
 	httpTimeOut := conn.config.HTTPTimeout
-	// new Transport
+	// New Transport
 	transport := &http.Transport{
 		Dial: func(netw, addr string) (net.Conn, error) {
 			conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)

+ 1 - 1
oss/transport_1_7.go

@@ -9,7 +9,7 @@ import (
 
 func newTransport(conn *Conn, config *Config) *http.Transport {
 	httpTimeOut := conn.config.HTTPTimeout
-	// new Transport
+	// New Transport
 	transport := &http.Transport{
 		Dial: func(netw, addr string) (net.Conn, error) {
 			conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout)

+ 144 - 144
oss/type.go

@@ -6,54 +6,54 @@ import (
 	"time"
 )
 
-// ListBucketsResult ListBuckets请求返回的结果
+// ListBucketsResult defines the result object from ListBuckets request
 type ListBucketsResult struct {
 	XMLName     xml.Name           `xml:"ListAllMyBucketsResult"`
-	Prefix      string             `xml:"Prefix"`         // 本次查询结果的前缀
-	Marker      string             `xml:"Marker"`         // 标明查询的起点,未全部返回时有此节点
-	MaxKeys     int                `xml:"MaxKeys"`        // 返回结果的最大数目,未全部返回时有此节点
-	IsTruncated bool               `xml:"IsTruncated"`    // 所有的结果是否已经全部返回
-	NextMarker  string             `xml:"NextMarker"`     // 表示下一次查询的起点
-	Owner       Owner              `xml:"Owner"`          // 拥有者信息
-	Buckets     []BucketProperties `xml:"Buckets>Bucket"` // Bucket列表
+	Prefix      string             `xml:"Prefix"`         // The prefix in this query
+	Marker      string             `xml:"Marker"`         // The marker filter
+	MaxKeys     int                `xml:"MaxKeys"`        // The max entry count to return. This information is returned when IsTruncated is true.
+	IsTruncated bool               `xml:"IsTruncated"`    // Flag true means there's remaining buckets to return.
+	NextMarker  string             `xml:"NextMarker"`     // The marker filter for the next list call
+	Owner       Owner              `xml:"Owner"`          // The owner information
+	Buckets     []BucketProperties `xml:"Buckets>Bucket"` // The bucket list
 }
 
-// BucketProperties Bucket信息
+// BucketProperties defines bucket properties
 type BucketProperties struct {
 	XMLName      xml.Name  `xml:"Bucket"`
-	Name         string    `xml:"Name"`         // Bucket名称
-	Location     string    `xml:"Location"`     // Bucket所在的数据中心
-	CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间
-	StorageClass string    `xml:"StorageClass"` // Bucket的存储方式
+	Name         string    `xml:"Name"`         // Bucket name
+	Location     string    `xml:"Location"`     // Bucket datacenter
+	CreationDate time.Time `xml:"CreationDate"` // Bucket create time
+	StorageClass string    `xml:"StorageClass"` // Bucket storage class
 }
 
-// GetBucketACLResult GetBucketACL请求返回的结果
+// GetBucketACLResult defines GetBucketACL request's result
 type GetBucketACLResult struct {
 	XMLName xml.Name `xml:"AccessControlPolicy"`
-	ACL     string   `xml:"AccessControlList>Grant"` // Bucket权限
-	Owner   Owner    `xml:"Owner"`                   // Bucket拥有者信息
+	ACL     string   `xml:"AccessControlList>Grant"` // Bucket ACL
+	Owner   Owner    `xml:"Owner"`                   // Bucket owner
 }
 
-// LifecycleConfiguration Bucket的Lifecycle配置
+// LifecycleConfiguration is the Bucket Lifecycle configuration
 type LifecycleConfiguration struct {
 	XMLName xml.Name        `xml:"LifecycleConfiguration"`
 	Rules   []LifecycleRule `xml:"Rule"`
 }
 
-// LifecycleRule Lifecycle规则
+// LifecycleRule defines Lifecycle rules
 type LifecycleRule struct {
 	XMLName    xml.Name            `xml:"Rule"`
-	ID         string              `xml:"ID"`         // 规则唯一的ID
-	Prefix     string              `xml:"Prefix"`     // 规则所适用Object的前缀
-	Status     string              `xml:"Status"`     // 规则是否生效
-	Expiration LifecycleExpiration `xml:"Expiration"` // 规则的过期属性
+	ID         string              `xml:"ID"`         // The rule ID
+	Prefix     string              `xml:"Prefix"`     // The object key prefix
+	Status     string              `xml:"Status"`     // The rule status (enabled or not)
+	Expiration LifecycleExpiration `xml:"Expiration"` // The expiration property
 }
 
-// LifecycleExpiration 规则的过期属性
+// LifecycleExpiration defines the rule's expiration property
 type LifecycleExpiration struct {
 	XMLName xml.Name  `xml:"Expiration"`
-	Days    int       `xml:"Days,omitempty"` // 最后修改时间过后多少天生效
-	Date    time.Time `xml:"Date,omitempty"` // 指定规则何时生效
+	Days    int       `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time
+	Date    time.Time `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date.
 }
 
 type lifecycleXML struct {
@@ -94,7 +94,7 @@ func convLifecycleRule(rules []LifecycleRule) []lifecycleRule {
 	return rs
 }
 
-// BuildLifecycleRuleByDays 指定过期天数构建Lifecycle规则
+// BuildLifecycleRuleByDays builds a lifecycle rule with specified expiration days
 func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule {
 	var statusStr = "Enabled"
 	if !status {
@@ -104,7 +104,7 @@ func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) Lifecycl
 		Expiration: LifecycleExpiration{Days: days}}
 }
 
-// BuildLifecycleRuleByDate 指定过期时间构建Lifecycle规则
+// BuildLifecycleRuleByDate builds a lifecycle rule with specified expiration time.
 func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule {
 	var statusStr = "Enabled"
 	if !status {
@@ -115,172 +115,172 @@ func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day i
 		Expiration: LifecycleExpiration{Date: date}}
 }
 
-// GetBucketLifecycleResult GetBucketLifecycle请求请求结果
+// GetBucketLifecycleResult defines GetBucketLifecycle's result object
 type GetBucketLifecycleResult LifecycleConfiguration
 
-// RefererXML Referer配置
+// RefererXML defines Referer configuration
 type RefererXML struct {
 	XMLName           xml.Name `xml:"RefererConfiguration"`
-	AllowEmptyReferer bool     `xml:"AllowEmptyReferer"`   // 是否允许referer字段为空的请求访问
-	RefererList       []string `xml:"RefererList>Referer"` // referer访问白名单
+	AllowEmptyReferer bool     `xml:"AllowEmptyReferer"`   // Allow empty referrer
+	RefererList       []string `xml:"RefererList>Referer"` // Referer whitelist
 }
 
-// GetBucketRefererResult GetBucketReferer请教返回结果
+// GetBucketRefererResult defines result object for GetBucketReferer request
 type GetBucketRefererResult RefererXML
 
-// LoggingXML Logging配置
+// LoggingXML defines logging configuration
 type LoggingXML struct {
 	XMLName        xml.Name       `xml:"BucketLoggingStatus"`
-	LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // 访问日志信息容器
+	LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information
 }
 
 type loggingXMLEmpty struct {
 	XMLName xml.Name `xml:"BucketLoggingStatus"`
 }
 
-// LoggingEnabled 访问日志信息容器
+// LoggingEnabled defines the logging configuration information
 type LoggingEnabled struct {
 	XMLName      xml.Name `xml:"LoggingEnabled"`
-	TargetBucket string   `xml:"TargetBucket"` //存放访问日志的Bucket
-	TargetPrefix string   `xml:"TargetPrefix"` //保存访问日志的文件前缀
+	TargetBucket string   `xml:"TargetBucket"` // The bucket name for storing the log files
+	TargetPrefix string   `xml:"TargetPrefix"` // The log file prefix
 }
 
-// GetBucketLoggingResult GetBucketLogging请求返回结果
+// GetBucketLoggingResult defines the result from GetBucketLogging request
 type GetBucketLoggingResult LoggingXML
 
-// WebsiteXML Website配置
+// WebsiteXML defines Website configuration
 type WebsiteXML struct {
 	XMLName       xml.Name      `xml:"WebsiteConfiguration"`
-	IndexDocument IndexDocument `xml:"IndexDocument"` // 目录URL时添加的索引文件
-	ErrorDocument ErrorDocument `xml:"ErrorDocument"` // 404错误时使用的文件
+	IndexDocument IndexDocument `xml:"IndexDocument"` // The index page
+	ErrorDocument ErrorDocument `xml:"ErrorDocument"` // The error page
 }
 
-// IndexDocument 目录URL时添加的索引文件
+// IndexDocument defines the index page info
 type IndexDocument struct {
 	XMLName xml.Name `xml:"IndexDocument"`
-	Suffix  string   `xml:"Suffix"` // 目录URL时添加的索引文件名
+	Suffix  string   `xml:"Suffix"` // The file name for the index page
 }
 
-// ErrorDocument 404错误时使用的文件
+// ErrorDocument defines the 404 error page info
 type ErrorDocument struct {
 	XMLName xml.Name `xml:"ErrorDocument"`
-	Key     string   `xml:"Key"` // 404错误时使用的文件名
+	Key     string   `xml:"Key"` // 404 error file name
 }
 
-// GetBucketWebsiteResult GetBucketWebsite请求返回结果
+// GetBucketWebsiteResult defines the result from GetBucketWebsite request.
 type GetBucketWebsiteResult WebsiteXML
 
-// CORSXML CORS配置
+// CORSXML defines CORS configuration
 type CORSXML struct {
 	XMLName   xml.Name   `xml:"CORSConfiguration"`
-	CORSRules []CORSRule `xml:"CORSRule"` // CORS规则列表
+	CORSRules []CORSRule `xml:"CORSRule"` // CORS rules
 }
 
-// CORSRule CORS规则
+// CORSRule defines CORS rules
 type CORSRule struct {
 	XMLName       xml.Name `xml:"CORSRule"`
-	AllowedOrigin []string `xml:"AllowedOrigin"` // 允许的来源,默认通配符"*"
-	AllowedMethod []string `xml:"AllowedMethod"` // 允许的方法
-	AllowedHeader []string `xml:"AllowedHeader"` // 允许的请求头
-	ExposeHeader  []string `xml:"ExposeHeader"`  // 允许的响应头
-	MaxAgeSeconds int      `xml:"MaxAgeSeconds"` // 最大的缓存时间
+	AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*'
+	AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods
+	AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers
+	ExposeHeader  []string `xml:"ExposeHeader"`  // Allowed response headers
+	MaxAgeSeconds int      `xml:"MaxAgeSeconds"` // Max cache ages in seconds
 }
 
-// GetBucketCORSResult GetBucketCORS请求返回的结果
+// GetBucketCORSResult defines the result from GetBucketCORS request.
 type GetBucketCORSResult CORSXML
 
-// GetBucketInfoResult GetBucketInfo请求返回结果
+// GetBucketInfoResult defines the result from GetBucketInfo request.
 type GetBucketInfoResult struct {
 	XMLName    xml.Name   `xml:"BucketInfo"`
 	BucketInfo BucketInfo `xml:"Bucket"`
 }
 
-// BucketInfo Bucket信息
+// BucketInfo defines Bucket information
 type BucketInfo struct {
 	XMLName          xml.Name  `xml:"Bucket"`
-	Name             string    `xml:"Name"`                    // Bucket名称
-	Location         string    `xml:"Location"`                // Bucket所在的数据中心
-	CreationDate     time.Time `xml:"CreationDate"`            // Bucket创建时间
-	ExtranetEndpoint string    `xml:"ExtranetEndpoint"`        // Bucket访问的外网域名
-	IntranetEndpoint string    `xml:"IntranetEndpoint"`        // Bucket访问的内网域名
-	ACL              string    `xml:"AccessControlList>Grant"` // Bucket权限
-	Owner            Owner     `xml:"Owner"`                   // Bucket拥有者信息
-	StorageClass     string    `xml:"StorageClass"`            // Bucket存储类型
+	Name             string    `xml:"Name"`                    // Bucket name
+	Location         string    `xml:"Location"`                // Bucket datacenter
+	CreationDate     time.Time `xml:"CreationDate"`            // Bucket creation time
+	ExtranetEndpoint string    `xml:"ExtranetEndpoint"`        // Bucket external endpoint
+	IntranetEndpoint string    `xml:"IntranetEndpoint"`        // Bucket internal endpoint
+	ACL              string    `xml:"AccessControlList>Grant"` // Bucket ACL
+	Owner            Owner     `xml:"Owner"`                   // Bucket owner
+	StorageClass     string    `xml:"StorageClass"`            // Bucket storage class
 }
 
-// ListObjectsResult ListObjects请求返回结果
+// ListObjectsResult defines the result from ListObjects request
 type ListObjectsResult struct {
 	XMLName        xml.Name           `xml:"ListBucketResult"`
-	Prefix         string             `xml:"Prefix"`                // 本次查询结果的开始前缀
-	Marker         string             `xml:"Marker"`                // 这次查询的起点
-	MaxKeys        int                `xml:"MaxKeys"`               // 请求返回结果的最大数目
-	Delimiter      string             `xml:"Delimiter"`             // 对Object名字进行分组的字符
-	IsTruncated    bool               `xml:"IsTruncated"`           // 是否所有的结果都已经返回
-	NextMarker     string             `xml:"NextMarker"`            // 下一次查询的起点
-	Objects        []ObjectProperties `xml:"Contents"`              // Object类别
-	CommonPrefixes []string           `xml:"CommonPrefixes>Prefix"` // 以delimiter结尾并有共同前缀的Object的集合
+	Prefix         string             `xml:"Prefix"`                // The object prefix
+	Marker         string             `xml:"Marker"`                // The marker filter.
+	MaxKeys        int                `xml:"MaxKeys"`               // Max keys to return
+	Delimiter      string             `xml:"Delimiter"`             // The delimiter for grouping objects' name
+	IsTruncated    bool               `xml:"IsTruncated"`           // Flag indicates if all results are returned (when it's false)
+	NextMarker     string             `xml:"NextMarker"`            // The start point of the next query
+	Objects        []ObjectProperties `xml:"Contents"`              // Object list
+	CommonPrefixes []string           `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter
 }
 
-// ObjectProperties Objecct属性
+// ObjectProperties defines Objecct properties
 type ObjectProperties struct {
 	XMLName      xml.Name  `xml:"Contents"`
-	Key          string    `xml:"Key"`          // Object的Key
-	Type         string    `xml:"Type"`         // Object Type
-	Size         int64     `xml:"Size"`         // Object的长度字节数
-	ETag         string    `xml:"ETag"`         // 标示Object的内容
-	Owner        Owner     `xml:"Owner"`        // 保存Object拥有者信息的容器
-	LastModified time.Time `xml:"LastModified"` // Object最后修改时间
-	StorageClass string    `xml:"StorageClass"` // Object的存储类型
+	Key          string    `xml:"Key"`          // Object key
+	Type         string    `xml:"Type"`         // Object type
+	Size         int64     `xml:"Size"`         // Object size
+	ETag         string    `xml:"ETag"`         // Object ETag
+	Owner        Owner     `xml:"Owner"`        // Object owner information
+	LastModified time.Time `xml:"LastModified"` // Object last modified time
+	StorageClass string    `xml:"StorageClass"` // Object storage class (Standard, IA, Archive)
 }
 
-// Owner Bucket/Object的owner
+// Owner defines Bucket/Object's owner
 type Owner struct {
 	XMLName     xml.Name `xml:"Owner"`
-	ID          string   `xml:"ID"`          // 用户ID
-	DisplayName string   `xml:"DisplayName"` // Owner名字
+	ID          string   `xml:"ID"`          // Owner ID
+	DisplayName string   `xml:"DisplayName"` // Owner's display name
 }
 
-// CopyObjectResult CopyObject请求返回的结果
+// CopyObjectResult defines result object of CopyObject
 type CopyObjectResult struct {
 	XMLName      xml.Name  `xml:"CopyObjectResult"`
-	LastModified time.Time `xml:"LastModified"` // 新Object最后更新时间
-	ETag         string    `xml:"ETag"`         // 新Object的ETag值
+	LastModified time.Time `xml:"LastModified"` // New object's last modified time.
+	ETag         string    `xml:"ETag"`         // New object's ETag
 }
 
-// GetObjectACLResult GetObjectACL请求返回的结果
+// GetObjectACLResult defines result of GetObjectACL request
 type GetObjectACLResult GetBucketACLResult
 
 type deleteXML struct {
 	XMLName xml.Name       `xml:"Delete"`
-	Objects []DeleteObject `xml:"Object"` // 删除的所有Object
-	Quiet   bool           `xml:"Quiet"`  // 安静响应模式
+	Objects []DeleteObject `xml:"Object"` // Objects to delete
+	Quiet   bool           `xml:"Quiet"`  // Flag of quiet mode.
 }
 
-// DeleteObject 删除的Object
+// DeleteObject defines the struct for deleting object
 type DeleteObject struct {
 	XMLName xml.Name `xml:"Object"`
-	Key     string   `xml:"Key"` // Object名称
+	Key     string   `xml:"Key"` // Object name
 }
 
-// DeleteObjectsResult DeleteObjects请求返回结果
+// DeleteObjectsResult defines result of DeleteObjects request
 type DeleteObjectsResult struct {
 	XMLName        xml.Name `xml:"DeleteResult"`
-	DeletedObjects []string `xml:"Deleted>Key"` // 删除的Object列表
+	DeletedObjects []string `xml:"Deleted>Key"` // Deleted object list
 }
 
-// InitiateMultipartUploadResult InitiateMultipartUpload请求返回结果
+// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request
 type InitiateMultipartUploadResult struct {
 	XMLName  xml.Name `xml:"InitiateMultipartUploadResult"`
-	Bucket   string   `xml:"Bucket"`   // Bucket名称
-	Key      string   `xml:"Key"`      // 上传Object名称
-	UploadID string   `xml:"UploadId"` // 生成的UploadId
+	Bucket   string   `xml:"Bucket"`   // Bucket name
+	Key      string   `xml:"Key"`      // Object name to upload
+	UploadID string   `xml:"UploadId"` // Generated UploadId
 }
 
-// UploadPart 上传/拷贝的分片
+// UploadPart defines the upload/copy part
 type UploadPart struct {
 	XMLName    xml.Name `xml:"Part"`
-	PartNumber int      `xml:"PartNumber"` // Part编号
-	ETag       string   `xml:"ETag"`       // ETag缓存码
+	PartNumber int      `xml:"PartNumber"` // Part number
+	ETag       string   `xml:"ETag"`       // ETag value of the part's data
 }
 
 type uploadParts []UploadPart
@@ -297,10 +297,10 @@ func (slice uploadParts) Swap(i, j int) {
 	slice[i], slice[j] = slice[j], slice[i]
 }
 
-// UploadPartCopyResult 拷贝分片请求返回的结果
+// UploadPartCopyResult defines result object of multipart copy request.
 type UploadPartCopyResult struct {
 	XMLName      xml.Name  `xml:"CopyPartResult"`
-	LastModified time.Time `xml:"LastModified"` // 最后修改时间
+	LastModified time.Time `xml:"LastModified"` // Last modified time
 	ETag         string    `xml:"ETag"`         // ETag
 }
 
@@ -309,61 +309,61 @@ type completeMultipartUploadXML struct {
 	Part    []UploadPart `xml:"Part"`
 }
 
-// CompleteMultipartUploadResult 提交分片上传任务返回结果
+// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest
 type CompleteMultipartUploadResult struct {
 	XMLName  xml.Name `xml:"CompleteMultipartUploadResult"`
-	Location string   `xml:"Location"` // ObjectURL
-	Bucket   string   `xml:"Bucket"`   // Bucket名称
-	ETag     string   `xml:"ETag"`     // ObjectETag
-	Key      string   `xml:"Key"`      // Object的名字
+	Location string   `xml:"Location"` // Object URL
+	Bucket   string   `xml:"Bucket"`   // Bucket name
+	ETag     string   `xml:"ETag"`     // Object ETag
+	Key      string   `xml:"Key"`      // Object name
 }
 
-// ListUploadedPartsResult ListUploadedParts请求返回结果
+// ListUploadedPartsResult defines result object of ListUploadedParts
 type ListUploadedPartsResult struct {
 	XMLName              xml.Name       `xml:"ListPartsResult"`
-	Bucket               string         `xml:"Bucket"`               // Bucket名称
-	Key                  string         `xml:"Key"`                  // Object名称
-	UploadID             string         `xml:"UploadId"`             // 上传Id
-	NextPartNumberMarker string         `xml:"NextPartNumberMarker"` // 下一个Part的位置
-	MaxParts             int            `xml:"MaxParts"`             // 最大Part个数
-	IsTruncated          bool           `xml:"IsTruncated"`          // 是否完全上传完成
-	UploadedParts        []UploadedPart `xml:"Part"`                 // 已完成的Part
+	Bucket               string         `xml:"Bucket"`               // Bucket name
+	Key                  string         `xml:"Key"`                  // Object name
+	UploadID             string         `xml:"UploadId"`             // Upload ID
+	NextPartNumberMarker string         `xml:"NextPartNumberMarker"` // Next part number
+	MaxParts             int            `xml:"MaxParts"`             // Max parts count
+	IsTruncated          bool           `xml:"IsTruncated"`          // Flag indicates all entries returned.false: all entries returned.
+	UploadedParts        []UploadedPart `xml:"Part"`                 // Uploaded parts
 }
 
-// UploadedPart 该任务已经上传的分片
+// UploadedPart defines uploaded part
 type UploadedPart struct {
 	XMLName      xml.Name  `xml:"Part"`
-	PartNumber   int       `xml:"PartNumber"`   // Part编号
-	LastModified time.Time `xml:"LastModified"` // 最后一次修改时间
-	ETag         string    `xml:"ETag"`         // ETag缓存码
-	Size         int       `xml:"Size"`         // Part大小
+	PartNumber   int       `xml:"PartNumber"`   // Part number
+	LastModified time.Time `xml:"LastModified"` // Last modified time
+	ETag         string    `xml:"ETag"`         // ETag cache
+	Size         int       `xml:"Size"`         // Part size
 }
 
-// ListMultipartUploadResult ListMultipartUpload请求返回结果
+// ListMultipartUploadResult defines result object of ListMultipartUpload
 type ListMultipartUploadResult struct {
 	XMLName            xml.Name            `xml:"ListMultipartUploadsResult"`
-	Bucket             string              `xml:"Bucket"`                // Bucket名称
-	Delimiter          string              `xml:"Delimiter"`             // 分组分割符
-	Prefix             string              `xml:"Prefix"`                // 筛选前缀
-	KeyMarker          string              `xml:"KeyMarker"`             // 起始Object位置
-	UploadIDMarker     string              `xml:"UploadIdMarker"`        // 起始UploadId位置
-	NextKeyMarker      string              `xml:"NextKeyMarker"`         // 如果没有全部返回,标明接下去的KeyMarker位置
-	NextUploadIDMarker string              `xml:"NextUploadIdMarker"`    // 如果没有全部返回,标明接下去的UploadId位置
-	MaxUploads         int                 `xml:"MaxUploads"`            // 返回最大Upload数目
-	IsTruncated        bool                `xml:"IsTruncated"`           // 是否完全返回
-	Uploads            []UncompletedUpload `xml:"Upload"`                // 未完成上传的MultipartUpload
-	CommonPrefixes     []string            `xml:"CommonPrefixes>Prefix"` // 所有名字包含指定的前缀且第一次出现delimiter字符之间的object作为一组的分组结果
-}
-
-// UncompletedUpload 未完成的Upload任务
+	Bucket             string              `xml:"Bucket"`                // Bucket name
+	Delimiter          string              `xml:"Delimiter"`             // Delimiter for grouping object.
+	Prefix             string              `xml:"Prefix"`                // Object prefix
+	KeyMarker          string              `xml:"KeyMarker"`             // Object key marker
+	UploadIDMarker     string              `xml:"UploadIdMarker"`        // UploadId marker
+	NextKeyMarker      string              `xml:"NextKeyMarker"`         // Next key marker, if not all entries returned.
+	NextUploadIDMarker string              `xml:"NextUploadIdMarker"`    // Next uploadId marker, if not all entries returned.
+	MaxUploads         int                 `xml:"MaxUploads"`            // Max uploads to return
+	IsTruncated        bool                `xml:"IsTruncated"`           // Flag indicates all entries are returned.
+	Uploads            []UncompletedUpload `xml:"Upload"`                // Ongoing uploads (not completed, not aborted)
+	CommonPrefixes     []string            `xml:"CommonPrefixes>Prefix"` // Common prefixes list.
+}
+
+// UncompletedUpload structure wraps an uncompleted upload task
 type UncompletedUpload struct {
 	XMLName   xml.Name  `xml:"Upload"`
-	Key       string    `xml:"Key"`       // Object名称
-	UploadID  string    `xml:"UploadId"`  // 对应UploadId
-	Initiated time.Time `xml:"Initiated"` // 初始化时间,格式2012-02-23T04:18:23.000Z
+	Key       string    `xml:"Key"`       // Object name
+	UploadID  string    `xml:"UploadId"`  // The UploadId
+	Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z
 }
 
-// 解析URL编码
+// decodeDeleteObjectsResult decodes deleting objects result in URL encoding
 func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
 	var err error
 	for i := 0; i < len(result.DeletedObjects); i++ {
@@ -375,7 +375,7 @@ func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
 	return nil
 }
 
-// 解析URL编码
+// decodeListObjectsResult decodes list objects result in URL encoding
 func decodeListObjectsResult(result *ListObjectsResult) error {
 	var err error
 	result.Prefix, err = url.QueryUnescape(result.Prefix)
@@ -409,7 +409,7 @@ func decodeListObjectsResult(result *ListObjectsResult) error {
 	return nil
 }
 
-// 解析URL编码
+// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding
 func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
 	var err error
 	result.Prefix, err = url.QueryUnescape(result.Prefix)
@@ -443,7 +443,7 @@ func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
 	return nil
 }
 
-// createBucketConfiguration 规则的过期属性
+// createBucketConfiguration defines the configuration for creating a bucket.
 type createBucketConfiguration struct {
 	XMLName      xml.Name         `xml:"CreateBucketConfiguration"`
 	StorageClass StorageClassType `xml:"StorageClass,omitempty"`

+ 61 - 62
oss/upload.go

@@ -10,15 +10,14 @@ import (
 	"time"
 )
 
+// UploadFile is multipart file upload.
 //
-// UploadFile 分片上传文件
+// objectKey    the object name.
+// filePath    the local file path to upload.
+// partSize    the part size in byte.
+// options    the options for uploading object.
 //
-// objectKey  object名称。
-// filePath   本地文件。需要上传的文件。
-// partSize   本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。
-// options    上传Object时可以指定Object的属性。详见InitiateMultipartUpload。
-//
-// error 操作成功为nil,非nil为错误信息。
+// error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
 	if partSize < MinPartSize || partSize > MaxPartSize {
@@ -39,9 +38,9 @@ func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, opti
 	return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
 }
 
-// ----- 并发无断点的上传  -----
+// ----- concurrent upload without checkpoint  -----
 
-// 获取Checkpoint配置
+// getCpConfig gets checkpoint configuration
 func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
 	cpc := &cpConfig{}
 	cpcOpt, err := findOption(options, checkpointConfig, nil)
@@ -57,7 +56,7 @@ func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
 	return cpc, nil
 }
 
-// 获取并发数,默认并发数1
+// getRoutines gets the routine count. by default it's 1.
 func getRoutines(options []Option) int {
 	rtnOpt, err := findOption(options, routineNum, nil)
 	if err != nil || rtnOpt == nil {
@@ -74,7 +73,7 @@ func getRoutines(options []Option) int {
 	return rs
 }
 
-// 获取进度回调
+// getProgressListener gets the progress callback
 func getProgressListener(options []Option) ProgressListener {
 	isSet, listener, _ := isOptionSet(options, progressListener)
 	if !isSet {
@@ -83,7 +82,7 @@ func getProgressListener(options []Option) ProgressListener {
 	return listener.(ProgressListener)
 }
 
-// 测试使用
+// uploadPartHook is for testing usage
 type uploadPartHook func(id int, chunk FileChunk) error
 
 var uploadPartHooker uploadPartHook = defaultUploadPart
@@ -92,7 +91,7 @@ func defaultUploadPart(id int, chunk FileChunk) error {
 	return nil
 }
 
-// 工作协程参数
+// workerArg defines worker argument structure
 type workerArg struct {
 	bucket   *Bucket
 	filePath string
@@ -100,7 +99,7 @@ type workerArg struct {
 	hook     uploadPartHook
 }
 
-// 工作协程
+// worker is the worker coroutine function
 func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) {
 	for chunk := range jobs {
 		if err := arg.hook(id, chunk); err != nil {
@@ -121,7 +120,7 @@ func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadP
 	}
 }
 
-// 调度协程
+// scheduler function
 func scheduler(jobs chan FileChunk, chunks []FileChunk) {
 	for _, chunk := range chunks {
 		jobs <- chunk
@@ -137,7 +136,7 @@ func getTotalBytes(chunks []FileChunk) int64 {
 	return tb
 }
 
-// 并发上传,不带断点续传功能
+// uploadFile is a concurrent upload, without checkpoint
 func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error {
 	listener := getProgressListener(options)
 
@@ -146,7 +145,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 		return err
 	}
 
-	// 初始化上传任务
+	// Initialize the multipart upload
 	imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
 	if err != nil {
 		return err
@@ -162,16 +161,16 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 	event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
 	publishProgress(listener, event)
 
-	// 启动工作协程
+	// Start the worker coroutine
 	arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
 	for w := 1; w <= routines; w++ {
 		go worker(w, arg, jobs, results, failed, die)
 	}
 
-	// 并发上传分片
+	// Schedule the jobs
 	go scheduler(jobs, chunks)
 
-	// 等待分配分片上传完成
+	// Waiting for the upload finished
 	completed := 0
 	parts := make([]UploadPart, len(chunks))
 	for completed < len(chunks) {
@@ -198,7 +197,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 	event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes)
 	publishProgress(listener, event)
 
-	// 提交任务
+	// Complete the multpart upload
 	_, err = bucket.CompleteMultipartUpload(imur, parts)
 	if err != nil {
 		bucket.AbortMultipartUpload(imur)
@@ -207,34 +206,34 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 	return nil
 }
 
-// ----- 并发带断点的上传  -----
+// ----- concurrent upload with checkpoint  -----
 const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62"
 
 type uploadCheckpoint struct {
-	Magic     string   // magic
-	MD5       string   // cp内容的MD5
-	FilePath  string   // 本地文件
-	FileStat  cpStat   // 文件状态
-	ObjectKey string   // key
-	UploadID  string   // upload id
-	Parts     []cpPart // 本地文件的全部分片
+	Magic     string   // Magic
+	MD5       string   // Checkpoint file content's MD5
+	FilePath  string   // Local file path
+	FileStat  cpStat   // File state
+	ObjectKey string   // Key
+	UploadID  string   // Upload ID
+	Parts     []cpPart // All parts of the local file
 }
 
 type cpStat struct {
-	Size         int64     // 文件大小
-	LastModified time.Time // 本地文件最后修改时间
-	MD5          string    // 本地文件MD5
+	Size         int64     // File size
+	LastModified time.Time // File's last modified time
+	MD5          string    // Local file's MD5
 }
 
 type cpPart struct {
-	Chunk       FileChunk  // 分片
-	Part        UploadPart // 上传完成的分片
-	IsCompleted bool       // upload是否完成
+	Chunk       FileChunk  // File chunk
+	Part        UploadPart // Uploaded part
+	IsCompleted bool       // Upload complete flag
 }
 
-// CP数据是否有效,CP有效且文件没有更新时有效
+// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
 func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
-	// 比较CP的Magic及MD5
+	// Compare the CP's magic number and MD5.
 	cpb := cp
 	cpb.MD5 = ""
 	js, _ := json.Marshal(cpb)
@@ -245,7 +244,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
 		return false, nil
 	}
 
-	// 确认本地文件是否更新
+	// Make sure if the local file is updated.
 	fd, err := os.Open(filePath)
 	if err != nil {
 		return false, err
@@ -262,7 +261,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
 		return false, err
 	}
 
-	// 比较文件大小/文件最后更新时间/文件MD5
+	// Compare the file size, file's last modified time and file's MD5
 	if cp.FileStat.Size != st.Size() ||
 		cp.FileStat.LastModified != st.ModTime() ||
 		cp.FileStat.MD5 != md {
@@ -272,7 +271,7 @@ func (cp uploadCheckpoint) isValid(filePath string) (bool, error) {
 	return true, nil
 }
 
-// 从文件中load
+// load loads from the file
 func (cp *uploadCheckpoint) load(filePath string) error {
 	contents, err := ioutil.ReadFile(filePath)
 	if err != nil {
@@ -283,11 +282,11 @@ func (cp *uploadCheckpoint) load(filePath string) error {
 	return err
 }
 
-// dump到文件
+// dump dumps to the local file
 func (cp *uploadCheckpoint) dump(filePath string) error {
 	bcp := *cp
 
-	// 计算MD5
+	// Calculate MD5
 	bcp.MD5 = ""
 	js, err := json.Marshal(bcp)
 	if err != nil {
@@ -297,23 +296,23 @@ func (cp *uploadCheckpoint) dump(filePath string) error {
 	b64 := base64.StdEncoding.EncodeToString(sum[:])
 	bcp.MD5 = b64
 
-	// 序列化
+	// Serialization
 	js, err = json.Marshal(bcp)
 	if err != nil {
 		return err
 	}
 
-	// dump
+	// Dump
 	return ioutil.WriteFile(filePath, js, FilePermMode)
 }
 
-// 更新分片状态
+// updatePart updates the part status
 func (cp *uploadCheckpoint) updatePart(part UploadPart) {
 	cp.Parts[part.PartNumber-1].Part = part
 	cp.Parts[part.PartNumber-1].IsCompleted = true
 }
 
-// 未完成的分片
+// todoParts returns unfinished parts
 func (cp *uploadCheckpoint) todoParts() []FileChunk {
 	fcs := []FileChunk{}
 	for _, part := range cp.Parts {
@@ -324,7 +323,7 @@ func (cp *uploadCheckpoint) todoParts() []FileChunk {
 	return fcs
 }
 
-// 所有的分片
+// allParts returns all parts
 func (cp *uploadCheckpoint) allParts() []UploadPart {
 	ps := []UploadPart{}
 	for _, part := range cp.Parts {
@@ -333,7 +332,7 @@ func (cp *uploadCheckpoint) allParts() []UploadPart {
 	return ps
 }
 
-// 完成的字节数
+// getCompletedBytes returns completed bytes count
 func (cp *uploadCheckpoint) getCompletedBytes() int64 {
 	var completedBytes int64
 	for _, part := range cp.Parts {
@@ -344,19 +343,19 @@ func (cp *uploadCheckpoint) getCompletedBytes() int64 {
 	return completedBytes
 }
 
-// 计算文件文件MD5
+// calcFileMD5 calculates the MD5 for the specified local file
 func calcFileMD5(filePath string) (string, error) {
 	return "", nil
 }
 
-// 初始化分片上传
+// prepare initializes the multipart upload
 func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error {
-	// cp
+	// CP
 	cp.Magic = uploadCpMagic
 	cp.FilePath = filePath
 	cp.ObjectKey = objectKey
 
-	// localfile
+	// Local file
 	fd, err := os.Open(filePath)
 	if err != nil {
 		return err
@@ -375,7 +374,7 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
 	}
 	cp.FileStat.MD5 = md
 
-	// chunks
+	// Chunks
 	parts, err := SplitFileByPartSize(filePath, partSize)
 	if err != nil {
 		return err
@@ -387,7 +386,7 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
 		cp.Parts[i].IsCompleted = false
 	}
 
-	// init load
+	// Init load
 	imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
 	if err != nil {
 		return err
@@ -397,7 +396,7 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
 	return nil
 }
 
-// 提交分片上传,删除CP文件
+// complete completes the multipart upload and deletes the local CP files
 func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
 	imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
 		Key: cp.ObjectKey, UploadID: cp.UploadID}
@@ -409,18 +408,18 @@ func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePa
 	return err
 }
 
-// 并发带断点的上传
+// uploadFileWithCp handles concurrent upload with checkpoint
 func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
 	listener := getProgressListener(options)
 
-	// LOAD CP数据
+	// Load CP data
 	ucp := uploadCheckpoint{}
 	err := ucp.load(cpFilePath)
 	if err != nil {
 		os.Remove(cpFilePath)
 	}
 
-	// LOAD出错或数据无效重新初始化上传
+	// Load error or the CP data is invalid.
 	valid, err := ucp.isValid(filePath)
 	if err != nil || !valid {
 		if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil {
@@ -444,16 +443,16 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 	event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size)
 	publishProgress(listener, event)
 
-	// 启动工作协程
+	// Start the workers
 	arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
 	for w := 1; w <= routines; w++ {
 		go worker(w, arg, jobs, results, failed, die)
 	}
 
-	// 并发上传分片
+	// Schedule jobs
 	go scheduler(jobs, chunks)
 
-	// 等待分配分片上传完成
+	// Waiting for the job finished
 	completed := 0
 	for completed < len(chunks) {
 		select {
@@ -479,7 +478,7 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 	event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size)
 	publishProgress(listener, event)
 
-	// 提交分片上传
+	// Complete the multipart upload
 	err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
 	return err
 }

+ 36 - 36
oss/upload_test.go

@@ -16,7 +16,7 @@ type OssUploadSuite struct {
 
 var _ = Suite(&OssUploadSuite{})
 
-// Run once when the suite starts running
+// SetUpSuite runs once when the suite starts running
 func (s *OssUploadSuite) SetUpSuite(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
@@ -32,9 +32,9 @@ func (s *OssUploadSuite) SetUpSuite(c *C) {
 	testLogger.Println("test upload started")
 }
 
-// Run before each test or benchmark starts running
+// TearDownSuite runs before each test or benchmark starts running
 func (s *OssUploadSuite) TearDownSuite(c *C) {
-	// Delete Part
+	// Delete part
 	lmur, err := s.bucket.ListMultipartUploads()
 	c.Assert(err, IsNil)
 
@@ -45,7 +45,7 @@ func (s *OssUploadSuite) TearDownSuite(c *C) {
 		c.Assert(err, IsNil)
 	}
 
-	// Delete Objects
+	// Delete objects
 	lor, err := s.bucket.ListObjects()
 	c.Assert(err, IsNil)
 
@@ -57,25 +57,25 @@ func (s *OssUploadSuite) TearDownSuite(c *C) {
 	testLogger.Println("test upload completed")
 }
 
-// Run after each test or benchmark runs
+// SetUpTest runs after each test or benchmark runs
 func (s *OssUploadSuite) SetUpTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// Run once after all tests or benchmarks have finished running
+// TearDownTest runs once after all tests or benchmarks have finished running
 func (s *OssUploadSuite) TearDownTest(c *C) {
 	err := removeTempFiles("../oss", ".jpg")
 	c.Assert(err, IsNil)
 }
 
-// TestUploadRoutineWithoutRecovery 多线程无断点恢复的上传
+// TestUploadRoutineWithoutRecovery tests multiroutineed upload without checkpoint
 func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	objectName := objectNamePrefix + "turwr"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "upload-new-file.jpg"
 
-	// 不指定Routines,默认单线程
+	// Routines is not specified, by default single routine
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024)
 	c.Assert(err, IsNil)
 
@@ -90,7 +90,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 指定线程数1
+	// Specify routine count as 1
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(1))
 	c.Assert(err, IsNil)
 
@@ -105,7 +105,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 指定线程数3,小于分片数5
+	// Specify routine count as 3, which is smaller than parts count 5
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
@@ -120,7 +120,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 指定线程数5,等于分片数
+	// Specify routine count as 5, which is same as the part count 5
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(5))
 	c.Assert(err, IsNil)
 
@@ -135,7 +135,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 指定线程数10,大于分片数5
+	// Specify routine count as 10, which is bigger than the part count 5.
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(10))
 	c.Assert(err, IsNil)
 
@@ -150,7 +150,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 线程值无效自动变成1
+	// Invalid routine count, it will use 1 automatically.
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(0))
 	os.Remove(newFile)
 	err = s.bucket.GetObjectToFile(objectName, newFile)
@@ -163,7 +163,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 线程值无效自动变成1
+	// Invalid routine count, it will use 1 automatically
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(-1))
 	os.Remove(newFile)
 	err = s.bucket.GetObjectToFile(objectName, newFile)
@@ -176,7 +176,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// option
+	// Option
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Meta("myprop", "mypropval"))
 
 	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
@@ -195,7 +195,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecovery(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// ErrorHooker UploadPart请求Hook
+// ErrorHooker is a UploadPart hook---it will fail the 5th part's upload.
 func ErrorHooker(id int, chunk FileChunk) error {
 	if chunk.Number == 5 {
 		time.Sleep(time.Second)
@@ -204,23 +204,23 @@ func ErrorHooker(id int, chunk FileChunk) error {
 	return nil
 }
 
-// TestUploadRoutineWithoutRecovery 多线程无断点恢复的上传
+// TestUploadRoutineWithoutRecoveryNegative is multiroutineed upload without checkpoint
 func (s *OssUploadSuite) TestUploadRoutineWithoutRecoveryNegative(c *C) {
 	objectName := objectNamePrefix + "turwrn"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
 	uploadPartHooker = ErrorHooker
-	// worker线程错误
+	// Worker routine error
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(2))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
 
-	// 本地文件不存在
+	// Local file does not exist
 	err = s.bucket.UploadFile(objectName, "NotExist", 100*1024, Routines(2))
 	c.Assert(err, NotNil)
 
-	// 指定的分片大小无效
+	// The part size is invalid
 	err = s.bucket.UploadFile(objectName, fileName, 1024, Routines(2))
 	c.Assert(err, NotNil)
 
@@ -228,21 +228,21 @@ func (s *OssUploadSuite) TestUploadRoutineWithoutRecoveryNegative(c *C) {
 	c.Assert(err, NotNil)
 }
 
-// TestUploadRoutineWithRecovery 多线程且有断点恢复的上传
+// TestUploadRoutineWithRecovery is multi-routine upload with resumable recovery
 func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	objectName := objectNamePrefix + "turtr"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "upload-new-file-2.jpg"
 
-	// Routines默认值,CP开启默认路径是fileName+.cp
-	// 第一次上传,上传4片
+	// Use default routines and default CP file path (fileName+.cp)
+	// First upload for 4 parts
 	uploadPartHooker = ErrorHooker
 	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
 
-	// check cp
+	// Check CP
 	ucp := uploadCheckpoint{}
 	err = ucp.load(fileName + ".cp")
 	c.Assert(err, IsNil)
@@ -258,7 +258,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(len(ucp.todoParts()), Equals, 1)
 	c.Assert(len(ucp.allParts()), Equals, 5)
 
-	// 第二次上传,完成剩余的一片
+	// Second upload, finish the remaining part
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, ""))
 	c.Assert(err, IsNil)
 
@@ -276,14 +276,14 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	err = ucp.load(fileName + ".cp")
 	c.Assert(err, NotNil)
 
-	// Routines指定,CP指定
+	// Specify routines and CP
 	uploadPartHooker = ErrorHooker
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(2), Checkpoint(true, objectName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
 
-	// check cp
+	// Check CP
 	ucp = uploadCheckpoint{}
 	err = ucp.load(objectName + ".cp")
 	c.Assert(err, IsNil)
@@ -316,7 +316,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	err = ucp.load(objectName + ".cp")
 	c.Assert(err, NotNil)
 
-	// 一次完成上传,中间没有错误
+	// Upload all 5 parts without error
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, ""))
 	c.Assert(err, IsNil)
 
@@ -331,7 +331,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// 用多协程下载,中间没有错误
+	// Upload all 5 parts with 10 routines without error
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(10), Checkpoint(true, ""))
 	c.Assert(err, IsNil)
 
@@ -346,7 +346,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// option
+	// Option
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, ""), Meta("myprop", "mypropval"))
 
 	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
@@ -365,19 +365,19 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 }
 
-// TestUploadRoutineWithoutRecovery 多线程无断点恢复的上传
+// TestUploadRoutineWithRecoveryNegative is multiroutineed upload without checkpoint
 func (s *OssUploadSuite) TestUploadRoutineWithRecoveryNegative(c *C) {
 	objectName := objectNamePrefix + "turrn"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
-	// 本地文件不存在
+	// The local file does not exist
 	err := s.bucket.UploadFile(objectName, "NotExist", 100*1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 
 	err = s.bucket.UploadFile(objectName, "NotExist", 100*1024, Routines(2), Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 
-	// 指定的分片大小无效
+	// Specified part size is invalid
 	err = s.bucket.UploadFile(objectName, fileName, 1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
 
@@ -391,7 +391,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecoveryNegative(c *C) {
 	c.Assert(err, NotNil)
 }
 
-// TestUploadLocalFileChange 上传过程中文件修改了
+// TestUploadLocalFileChange tests the file is updated while being uploaded
 func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
 	objectName := objectNamePrefix + "tulfc"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
@@ -402,7 +402,7 @@ func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
 	err := copyFile(fileName, localFile)
 	c.Assert(err, IsNil)
 
-	// 第一次上传,上传4片
+	// First upload for 4 parts
 	uploadPartHooker = ErrorHooker
 	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, ""))
 	c.Assert(err, NotNil)
@@ -413,7 +413,7 @@ func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
 	err = copyFile(fileName, localFile)
 	c.Assert(err, IsNil)
 
-	// 文件修改,第二次上传全部分片重新上传
+	// Updating the file. The second upload will re-upload all 5 parts.
 	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, ""))
 	c.Assert(err, IsNil)
 

+ 29 - 29
oss/utils.go

@@ -14,8 +14,8 @@ import (
 	"time"
 )
 
-// Get User Agent
-// Go sdk相关信息,包括sdk版本,操作系统类型,GO版本
+// userAgent gets user agent
+// It has the SDK version information, OS information and GO version
 var userAgent = func() string {
 	sys := getSysInfo()
 	return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
@@ -23,13 +23,13 @@ var userAgent = func() string {
 }()
 
 type sysInfo struct {
-	name    string // 操作系统名称windows/Linux
-	release string // 操作系统版本 2.6.32-220.23.2.ali1089.el5.x86_64等
-	machine string // 机器类型amd64/x86_64
+	name    string // OS name such as windows/Linux
+	release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc
+	machine string // CPU type amd64/x86_64
 }
 
-// Get system info
-// 获取操作系统信息、机器类型
+// getSysInfo gets system info
+// gets the OS information and CPU type
 func getSysInfo() sysInfo {
 	name := runtime.GOOS
 	release := "-"
@@ -48,13 +48,13 @@ func getSysInfo() sysInfo {
 
 // unpackedRange
 type unpackedRange struct {
-	hasStart bool  // 是否指定了起点
-	hasEnd   bool  // 是否指定了终点
-	start    int64 // 起点
-	end      int64 // 终点
+	hasStart bool  // Flag indicates if the start point is specified
+	hasEnd   bool  // Flag indicates if the end point is specified
+	start    int64 // Start point
+	end      int64 // End point
 }
 
-// invalid Range Error
+// invalidRangeError returns invalid range error
 func invalidRangeError(r string) error {
 	return fmt.Errorf("InvalidRange %s", r)
 }
@@ -67,13 +67,13 @@ func parseRange(normalizedRange string) (*unpackedRange, error) {
 	var start int64
 	var end int64
 
-	// bytes==M-N or ranges=M-N
+	// Bytes==M-N or ranges=M-N
 	nrSlice := strings.Split(normalizedRange, "=")
 	if len(nrSlice) != 2 || nrSlice[0] != "bytes" {
 		return nil, invalidRangeError(normalizedRange)
 	}
 
-	// bytes=M-N,X-Y
+	// Bytes=M-N,X-Y
 	rSlice := strings.Split(nrSlice[1], ",")
 	rStr := rSlice[0]
 
@@ -114,7 +114,7 @@ func parseRange(normalizedRange string) (*unpackedRange, error) {
 	return &unpackedRange{hasStart, hasEnd, start, end}, nil
 }
 
-// adjustRange return adjusted range, adjust the range according to the length of the file
+// adjustRange returns adjusted range, adjust the range according to the length of the file
 func adjustRange(ur *unpackedRange, size int64) (start, end int64) {
 	if ur == nil {
 		return 0, size
@@ -145,7 +145,7 @@ func adjustRange(ur *unpackedRange, size int64) (start, end int64) {
 }
 
 // GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC.
-// 获取当前时间,从UTC开始的秒数。
+// gets the current time in Unix time, in seconds.
 func GetNowSec() int64 {
 	return time.Now().Unix()
 }
@@ -154,25 +154,25 @@ func GetNowSec() int64 {
 // since January 1, 1970 UTC. The result is undefined if the Unix time
 // in nanoseconds cannot be represented by an int64. Note that this
 // means the result of calling UnixNano on the zero Time is undefined.
-// 获取当前时间,从UTC开始的纳秒。
+// gets the current time in Unix time, in nanoseconds.
 func GetNowNanoSec() int64 {
 	return time.Now().UnixNano()
 }
 
-// GetNowGMT 获取当前时间,格式形如"Mon, 02 Jan 2006 15:04:05 GMT",HTTP中使用的时间格式
+// GetNowGMT gets the current time in GMT format.
 func GetNowGMT() string {
 	return time.Now().UTC().Format(http.TimeFormat)
 }
 
-// FileChunk 文件片定义
+// FileChunk is the file chunk definition
 type FileChunk struct {
-	Number int   // 块序号
-	Offset int64 // 块在文件中的偏移量
-	Size   int64 // 块大小
+	Number int   // Chunk number
+	Offset int64 // Chunk offset
+	Size   int64 // Chunk size.
 }
 
-// SplitFileByPartNum Split big file to part by the num of part
-// 按指定的块数分割文件。返回值FileChunk为分割结果,error为nil时有效。
+// SplitFileByPartNum splits big file into parts by the num of parts.
+// Split the file with specified parts count, returns the split result when error is nil.
 func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
 	if chunkNum <= 0 || chunkNum > 10000 {
 		return nil, errors.New("chunkNum invalid")
@@ -210,8 +210,8 @@ func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) {
 	return chunks, nil
 }
 
-// SplitFileByPartSize Split big file to part by the size of part
-// 按块大小分割文件。返回值FileChunk为分割结果,error为nil时有效。
+// SplitFileByPartSize splits big file into parts by the size of parts.
+// Splits the file by the part size. Returns the FileChunk when error is nil.
 func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) {
 	if chunkSize <= 0 {
 		return nil, errors.New("chunkSize invalid")
@@ -229,7 +229,7 @@ func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error)
 	}
 	var chunkN = stat.Size() / chunkSize
 	if chunkN >= 10000 {
-		return nil, errors.New("Too many parts, please increase part size.")
+		return nil, errors.New("Too many parts, please increase part size")
 	}
 
 	var chunks []FileChunk
@@ -251,7 +251,7 @@ func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error)
 	return chunks, nil
 }
 
-// GetPartEnd 计算结束位置
+// GetPartEnd calculates the end position
 func GetPartEnd(begin int64, total int64, per int64) int64 {
 	if begin+per > total {
 		return total - 1
@@ -259,7 +259,7 @@ func GetPartEnd(begin int64, total int64, per int64) int64 {
 	return begin + per - 1
 }
 
-// crcTable returns the Table constructed from the specified polynomial
+// crcTable returns the table constructed from the specified polynomial
 var crcTable = func() *crc64.Table {
 	return crc64.MakeTable(crc64.ECMA)
 }

+ 5 - 5
oss/utils_test.go

@@ -143,7 +143,7 @@ func (s *OssUtilsSuite) TestParseRange(c *C) {
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "InvalidRange bytes=1-N")
 
-	// ranges=M-N
+	// Ranges=M-N
 	ur, err := parseRange("bytes=1024-4096")
 	c.Assert(err, IsNil)
 	c.Assert(ur.start, Equals, (int64)(1024))
@@ -151,7 +151,7 @@ func (s *OssUtilsSuite) TestParseRange(c *C) {
 	c.Assert(ur.hasStart, Equals, true)
 	c.Assert(ur.hasEnd, Equals, true)
 
-	// ranges=M-N,X-Y
+	// Ranges=M-N,X-Y
 	ur, err = parseRange("bytes=1024-4096,2048-4096")
 	c.Assert(err, IsNil)
 	c.Assert(ur.start, Equals, (int64)(1024))
@@ -159,7 +159,7 @@ func (s *OssUtilsSuite) TestParseRange(c *C) {
 	c.Assert(ur.hasStart, Equals, true)
 	c.Assert(ur.hasEnd, Equals, true)
 
-	// ranges=M-
+	// Ranges=M-
 	ur, err = parseRange("bytes=1024-")
 	c.Assert(err, IsNil)
 	c.Assert(ur.start, Equals, (int64)(1024))
@@ -167,7 +167,7 @@ func (s *OssUtilsSuite) TestParseRange(c *C) {
 	c.Assert(ur.hasStart, Equals, true)
 	c.Assert(ur.hasEnd, Equals, false)
 
-	// ranges=-N
+	// Ranges=-N
 	ur, err = parseRange("bytes=-4096")
 	c.Assert(err, IsNil)
 	c.Assert(ur.start, Equals, (int64)(0))
@@ -177,7 +177,7 @@ func (s *OssUtilsSuite) TestParseRange(c *C) {
 }
 
 func (s *OssUtilsSuite) TestAdjustRange(c *C) {
-	// nil
+	// Nil
 	start, end := adjustRange(nil, 8192)
 	c.Assert(start, Equals, (int64)(0))
 	c.Assert(end, Equals, (int64)(8192))

+ 15 - 15
sample/append_object.go

@@ -12,9 +12,9 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// AppendObjectSample 展示了追加上传的用法
+// AppendObjectSample shows the append file's usage
 func AppendObjectSample() {
-	// 创建Bucket
+	// Create bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
@@ -25,20 +25,20 @@ func AppendObjectSample() {
 	var str = "弃我去者,昨日之日不可留。 乱我心者,今日之日多烦忧!"
 	var nextPos int64
 
-	// 场景1:追加字符串到object
-	// 第一次追加的位置是0,返回值为下一次追加的位置
+	// Case 1: Append a string to the object
+	// The first append position is 0 and the return value is for the next append's position.
 	nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(str), nextPos)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 第二次追加
+	// Second append
 	nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(str), nextPos)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 下载
+	// Download
 	body, err := bucket.GetObject(objectKey)
 	if err != nil {
 		HandleError(err)
@@ -55,21 +55,21 @@ func AppendObjectSample() {
 		HandleError(err)
 	}
 
-	// 场景2:追加[]byte到object
+	// Case 2: Append byte array to the object
 	nextPos = 0
-	// 第一次追加的位置是0,返回值为下一次追加的位置
+	// The first append position is 0, and the return value is for the next append's position.
 	nextPos, err = bucket.AppendObject(objectKey, bytes.NewReader([]byte(str)), nextPos)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 第二次追加
+	// Second append
 	nextPos, err = bucket.AppendObject(objectKey, bytes.NewReader([]byte(str)), nextPos)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 下载
+	// Download
 	body, err = bucket.GetObject(objectKey)
 	if err != nil {
 		HandleError(err)
@@ -86,7 +86,7 @@ func AppendObjectSample() {
 		HandleError(err)
 	}
 
-	//场景3:本地文件追加到Object
+	// Case 3: Append a local file to the object
 	fd, err := os.Open(localFile)
 	if err != nil {
 		HandleError(err)
@@ -99,7 +99,7 @@ func AppendObjectSample() {
 		HandleError(err)
 	}
 
-	// 场景4,您可以通过GetObjectDetailedMeta获取下次追加的位置
+	// Case 4: Get the next append position by GetObjectDetailedMeta
 	props, err := bucket.GetObjectDetailedMeta(objectKey)
 	nextPos, err = strconv.ParseInt(props.Get(oss.HTTPHeaderOssNextAppendPosition), 10, 0)
 	if err != nil {
@@ -116,7 +116,7 @@ func AppendObjectSample() {
 		HandleError(err)
 	}
 
-	// 场景5:第一次追加操作时,可以指定Object的Properties,包括以"x-oss-meta-my"为前缀的用户自定义属性
+	// Case 5: Specify the object properties for the first append, including the "x-oss-meta"'s custom metadata.
 	options := []oss.Option{
 		oss.Expires(futureDate),
 		oss.ObjectACL(oss.ACLPublicRead),
@@ -127,7 +127,7 @@ func AppendObjectSample() {
 	if err != nil {
 		HandleError(err)
 	}
-	// 第二次追加
+	// Second append
 	fd.Seek(0, os.SEEK_SET)
 	nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(str), nextPos)
 	if err != nil {
@@ -146,7 +146,7 @@ func AppendObjectSample() {
 	}
 	fmt.Println("Object ACL:", goar.ACL)
 
-	// 删除object和bucket
+	// Delete the object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 9 - 9
sample/archive.go

@@ -8,9 +8,9 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// ArchiveSample Archive Sample
+// ArchiveSample archives sample
 func ArchiveSample() {
-	// create archive bucket
+	// Create archive bucket
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
@@ -26,27 +26,27 @@ func ArchiveSample() {
 		HandleError(err)
 	}
 
-	// put archive object
+	// Put archive object
 	var val = "花间一壶酒,独酌无相亲。 举杯邀明月,对影成三人。"
 	err = archiveBucket.PutObject(objectKey, strings.NewReader(val))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// check whether the object is archive class
+	// Check whether the object is archive class
 	meta, err := archiveBucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
 		HandleError(err)
 	}
 
 	if meta.Get("X-Oss-Storage-Class") == string(oss.StorageArchive) {
-		// restore object
+		// Restore object
 		err = archiveBucket.RestoreObject(objectKey)
 		if err != nil {
 			HandleError(err)
 		}
 
-		// wait for restore completed
+		// Wait for restore completed
 		meta, err = archiveBucket.GetObjectDetailedMeta(objectKey)
 		for meta.Get("X-Oss-Restore") == "ongoing-request=\"true\"" {
 			fmt.Println("x-oss-restore:" + meta.Get("X-Oss-Restore"))
@@ -55,16 +55,16 @@ func ArchiveSample() {
 		}
 	}
 
-	// get restored object
+	// Get restored object
 	err = archiveBucket.GetObjectToFile(objectKey, localFile)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// restore repeatedly
+	// Restore repeatedly
 	err = archiveBucket.RestoreObject(objectKey)
 
-	// delete object and bucket
+	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 6 - 6
sample/bucket_acl.go

@@ -6,34 +6,34 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// BucketACLSample 展示了如何读取/设置存储空间的权限(Bucket ACL)
+// BucketACLSample shows how to get and set the bucket ACL
 func BucketACLSample() {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 使用默认参数创建bucket
+	// Create a bucket with default parameters
 	err = client.CreateBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景:设置Bucket ACL,可选权限有ACLPrivate、ACLPublicRead、ACLPublicReadWrite
+	// Set bucket ACL. The valid ACLs are ACLPrivate、ACLPublicRead、ACLPublicReadWrite
 	err = client.SetBucketACL(bucketName, oss.ACLPublicRead)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 查看Bucket ACL
+	// Get bucket ACL
 	gbar, err := client.GetBucketACL(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Bucket ACL:", gbar.ACL)
 
-	// 删除bucket
+	// Delete the bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		HandleError(err)

+ 8 - 8
sample/bucket_cors.go

@@ -6,15 +6,15 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// BucketCORSSample 展示了如何设置/读取/清除存储空间的跨域访问(Bucket CORS)
+// BucketCORSSample shows how to get or set the bucket CORS.
 func BucketCORSSample() {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 使用默认参数创建bucket
+	// Create the bucket with default parameters
 	err = client.CreateBucket(bucketName)
 	if err != nil {
 		HandleError(err)
@@ -36,32 +36,32 @@ func BucketCORSSample() {
 		MaxAgeSeconds: 100,
 	}
 
-	// 场景1:设置Bucket的CORS规则
+	// Case 1: Set the bucket CORS rules
 	err = client.SetBucketCORS(bucketName, []oss.CORSRule{rule1})
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景2:设置Bucket的CORS规则,如果该Bucket上已经设置了CORS规则,则会覆盖。
+	// Case 2: Set the bucket CORS rules. if CORS rules exist, they will be overwritten.
 	err = client.SetBucketCORS(bucketName, []oss.CORSRule{rule1, rule2})
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 获取Bucket上设置的CORS
+	// Get the bucket's CORS
 	gbl, err := client.GetBucketCORS(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Bucket CORS:", gbl.CORSRules)
 
-	// 删除Bucket上的CORS设置
+	// Delete bucket's CORS
 	err = client.DeleteBucketCORS(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除bucket
+	// Delete bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		HandleError(err)

+ 9 - 9
sample/bucket_lifecycle.go

@@ -6,21 +6,21 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// BucketLifecycleSample 展示了如何设置/读取/清除存储空间中文件的生命周期(Bucket Lifecycle)
+// BucketLifecycleSample shows how to set, get and delete bucket's lifecycle.
 func BucketLifecycleSample() {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 使用默认参数创建bucket
+	// Create the bucket with default parameters
 	err = client.CreateBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景1:设置Lifecycle,其中规则的id是id1,规则生效的object前缀是one,符合的Object绝对过期时间2015/11/11
+	// Case 1: Set the lifecycle. The rule ID is id1 and the applied objects' prefix is one and expired time is 11/11/2015
 	var rule1 = oss.BuildLifecycleRuleByDate("id1", "one", true, 2015, 11, 11)
 	var rules = []oss.LifecycleRule{rule1}
 	err = client.SetBucketLifecycle(bucketName, rules)
@@ -28,7 +28,7 @@ func BucketLifecycleSample() {
 		HandleError(err)
 	}
 
-	// 场景2:设置Lifecycle,其中规则的id是id2,规则生效的object前缀是two,符合的Object相对过期时间是3天后
+	// Case 2: Set the lifecycle, The rule ID is id2 and the applied objects' prefix is two and the expired time is three days after the object created.
 	var rule2 = oss.BuildLifecycleRuleByDays("id2", "two", true, 3)
 	rules = []oss.LifecycleRule{rule2}
 	err = client.SetBucketLifecycle(bucketName, rules)
@@ -36,7 +36,7 @@ func BucketLifecycleSample() {
 		HandleError(err)
 	}
 
-	// 场景3:在Bucket上同时设置两条规格,两个规则分别作用与不同的对象。规则id相同是会覆盖老的规则。
+	// Case 3: Create two rules in the bucket for different objects. The rule with the same ID will be overwritten.
 	var rule3 = oss.BuildLifecycleRuleByDays("id1", "two", true, 365)
 	var rule4 = oss.BuildLifecycleRuleByDate("id2", "one", true, 2016, 11, 11)
 	rules = []oss.LifecycleRule{rule3, rule4}
@@ -45,20 +45,20 @@ func BucketLifecycleSample() {
 		HandleError(err)
 	}
 
-	// 获取Bucket上设置的Lifecycle
+	// Get the bucket's lifecycle
 	gbl, err := client.GetBucketLifecycle(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Bucket Lifecycle:", gbl.Rules)
 
-	// 删除Bucket上的Lifecycle设置
+	// Delete bucket's Lifecycle
 	err = client.DeleteBucketLifecycle(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除bucket
+	// Delete bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		HandleError(err)

+ 13 - 13
sample/bucket_logging.go

@@ -6,52 +6,52 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// BucketLoggingSample 展示了如何设置/读取/清除存储空间的日志(Bucket Logging)
+// BucketLoggingSample shows how to set, get and delete the bucket logging configuration
 func BucketLoggingSample() {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 创建bucket
+	// Create the bucket with default parameters
 	err = client.CreateBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
-	// 创建Target bucket,存储访问日志
+	// Create target bucket to store the logging files.
 	var targetBucketName = "target-bucket"
 	err = client.CreateBucket(targetBucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景1:设置Logging,bucketName中以"prefix"为前缀的object的访问日志将被记录到targetBucketName
+	// Case 1: Set the logging for the object prefixed with "prefix-1" and save their access logs to the target bucket
 	err = client.SetBucketLogging(bucketName, targetBucketName, "prefix-1", true)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景2:设置Logging,bucketName中以"prefix"为前缀的object的访问日志将被记录到bucketName
-	// 注意:相同bucket,相同prefix,多次设置后者会覆盖前者
+	// Case 2: Set the logging for the object prefixed with "prefix-2" and save their logs to the same bucket
+	// Note: the rule will overwrite other rules if they have same bucket and prefix
 	err = client.SetBucketLogging(bucketName, bucketName, "prefix-2", true)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除Bucket上的Logging设置
+	// Delete the bucket's logging configuration
 	err = client.DeleteBucketLogging(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景3:设置但不生效
+	// Case 3: Set the logging without enabling it
 	err = client.SetBucketLogging(bucketName, targetBucketName, "prefix-3", false)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 获取Bucket上设置的Logging
+	// Get the bucket's logging configuration
 	gbl, err := client.GetBucketLogging(bucketName)
 	if err != nil {
 		HandleError(err)
@@ -63,20 +63,20 @@ func BucketLoggingSample() {
 		HandleError(err)
 	}
 
-	// 获取Bucket上设置的Logging
+	// Get the bucket's logging configuration
 	gbl, err = client.GetBucketLogging(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Bucket Logging:", gbl.LoggingEnabled)
 
-	// 删除Bucket上的Logging设置
+	// Delete the bucket's logging configuration
 	err = client.DeleteBucketLogging(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除bucket
+	// Delete bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		HandleError(err)

+ 8 - 8
sample/bucket_referer.go

@@ -6,15 +6,15 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// BucketRefererSample 展示了如何设置/读取/清除存储空间的白名单(Bucket Referer)
+// BucketRefererSample shows how to set, get and delete the bucket referer.
 func BucketRefererSample() {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 使用默认参数创建bucket
+	// Create the bucket with default parameters
 	err = client.CreateBucket(bucketName)
 	if err != nil {
 		HandleError(err)
@@ -26,28 +26,28 @@ func BucketRefererSample() {
 		"http://www.*.com",
 	}
 
-	// 场景1:设置referers,referer中支持?和*,分布代替一个或多个字符
+	// Case 1: Set referers. The referers are with wildcards ? and * which could represent one and zero to multiple characters
 	err = client.SetBucketReferer(bucketName, referers, false)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景2:清空referers
+	// Case 2: Clear referers
 	referers = []string{}
 	err = client.SetBucketReferer(bucketName, referers, true)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 获取Bucket上设置的Lifecycle
-	gbr, err := client.GetBucketReferer(bucketName)
+	// Get bucket referer configuration
+	gbr, err := client.GetBucketReferqer(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Bucket Referers:", gbr.RefererList,
 		"AllowEmptyReferer:", gbr.AllowEmptyReferer)
 
-	// 删除bucket
+	// Delete bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		HandleError(err)

+ 13 - 13
sample/cname_sample.go

@@ -8,35 +8,35 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// CnameSample 展示了Cname的用法
+// CnameSample shows the cname usage
 func CnameSample() {
-	// NewClient
+	// New client
 	client, err := oss.New(endpoint4Cname, accessID4Cname, accessKey4Cname,
 		oss.UseCname(true))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// CreateBucket
+	// Create bucket
 	err = client.CreateBucket(bucketName4Cname)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// SetBucketACL
+	// Set bucket ACL
 	err = client.SetBucketACL(bucketName4Cname, oss.ACLPrivate)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 查看Bucket ACL
+	// Look up bucket ACL
 	gbar, err := client.GetBucketACL(bucketName4Cname)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Bucket ACL:", gbar.ACL)
 
-	// ListBuckets, cname用户不能使用该操作
+	// List buckets, the list operation could not be done by cname's endpoint
 	_, err = client.ListBuckets()
 	if err == nil {
 		HandleError(err)
@@ -47,15 +47,15 @@ func CnameSample() {
 		HandleError(err)
 	}
 
-	objectValue := "长忆观潮,满郭人争江上望。来疑沧海尽成空,万面鼓声中。弄潮儿向涛头立,手把红旗旗不湿。别来几向梦中看,梦觉尚心寒。"
+	objectValue := "长忆观潮, 满郭人争江上望。来疑沧海尽成空, 万面鼓声中。弄潮儿向涛头立, 手把红旗旗不湿。别来几向梦中看, 梦觉尚心寒。"
 
-	// PutObject
+	// Put object
 	err = bucket.PutObject(objectKey, strings.NewReader(objectValue))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// GetObject
+	// Get object
 	body, err := bucket.GetObject(objectKey)
 	if err != nil {
 		HandleError(err)
@@ -67,26 +67,26 @@ func CnameSample() {
 	}
 	fmt.Println(objectKey, ":", string(data))
 
-	// PutObjectFromFile
+	// Put object from file
 	err = bucket.PutObjectFromFile(objectKey, localFile)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// GetObjectToFile
+	// Get object to file
 	err = bucket.GetObjectToFile(objectKey, newPicName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// ListObjects
+	// List objects
 	lor, err := bucket.ListObjects()
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("objects:", lor.Objects)
 
-	// DeleteObject
+	// Delete object
 	err = bucket.DeleteObject(objectKey)
 	if err != nil {
 		HandleError(err)

+ 14 - 14
sample/comm.go

@@ -14,27 +14,27 @@ var (
 	futureDate = time.Date(2049, time.January, 10, 23, 0, 0, 0, time.UTC)
 )
 
-// HandleError sample中的错误处理
+// HandleError is the error handling method in the sample code
 func HandleError(err error) {
 	fmt.Println("occurred error:", err)
 	os.Exit(-1)
 }
 
-// GetTestBucket 创建sample的Bucket并返回OssBucket对象,该函数为了简化sample,让sample代码更明了
+// GetTestBucket creates the test bucket
 func GetTestBucket(bucketName string) (*oss.Bucket, error) {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		return nil, err
 	}
 
-	// Create Bucket
+	// Create bucket
 	err = client.CreateBucket(bucketName)
 	if err != nil {
 		return nil, err
 	}
 
-	// Get Bucket
+	// Get bucket
 	bucket, err := client.Bucket(bucketName)
 	if err != nil {
 		return nil, err
@@ -87,21 +87,21 @@ func DeleteTestBucketAndLiveChannel(bucketName string) error {
 	return nil
 }
 
-// DeleteTestBucketAndObject 删除sample的object和bucket,该函数为了简化sample,让sample代码更明了
+// DeleteTestBucketAndObject deletes the test bucket and its objects
 func DeleteTestBucketAndObject(bucketName string) error {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		return err
 	}
 
-	// Get Bucket
+	// Get bucket
 	bucket, err := client.Bucket(bucketName)
 	if err != nil {
 		return err
 	}
 
-	// Delete Part
+	// Delete part
 	lmur, err := bucket.ListMultipartUploads()
 	if err != nil {
 		return err
@@ -116,7 +116,7 @@ func DeleteTestBucketAndObject(bucketName string) error {
 		}
 	}
 
-	// Delete Objects
+	// Delete objects
 	lor, err := bucket.ListObjects()
 	if err != nil {
 		return err
@@ -129,7 +129,7 @@ func DeleteTestBucketAndObject(bucketName string) error {
 		}
 	}
 
-	// Delete Bucket
+	// Delete bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		return err
@@ -138,13 +138,13 @@ func DeleteTestBucketAndObject(bucketName string) error {
 	return nil
 }
 
-// Object pair of key and value
+// Object defines pair of key and value
 type Object struct {
 	Key   string
 	Value string
 }
 
-// CreateObjects 创建一组对象,该函数为了简化sample,让sample代码更明了
+// CreateObjects creates some objects
 func CreateObjects(bucket *oss.Bucket, objects []Object) error {
 	for _, object := range objects {
 		err := bucket.PutObject(object.Key, strings.NewReader(object.Value))
@@ -155,7 +155,7 @@ func CreateObjects(bucket *oss.Bucket, objects []Object) error {
 	return nil
 }
 
-// DeleteObjects 删除sample的object和bucket,该函数为了简化sample,让sample代码更明了
+// DeleteObjects deletes some objects.
 func DeleteObjects(bucket *oss.Bucket, objects []Object) error {
 	for _, object := range objects {
 		err := bucket.DeleteObject(object.Key)

+ 5 - 6
sample/config.go

@@ -1,24 +1,23 @@
 package sample
 
 const (
-	// sample运行的环境配置。如果您需要运行sample,请先修成您的配置。
+	// Sample code's env configuration. You need to specify them with the actual configuration if you want to run sample code
 	endpoint   string = "<endpoint>"
 	accessID   string = "<AccessKeyId>"
 	accessKey  string = "<AccessKeySecret>"
 	bucketName string = "<my-bucket>"
 
-	// 运行cname的示例程序sample/cname_sample的示例程序的配置。
-	// 如果您需要运行sample/cname_sample,请先修成您的配置。
+	// The cname endpoint
+	// These information are required to run sample/cname_sample
 	endpoint4Cname   string = "<endpoint>"
 	accessID4Cname   string = "<AccessKeyId>"
 	accessKey4Cname  string = "<AccessKeySecret>"
 	bucketName4Cname string = "<my-cname-bucket>"
 
-	// 运行sample时的Object名称
+	// The object name in the sample code
 	objectKey string = "my-object"
 
-	// 运行sample需要的资源,即sample目录目录下的BingWallpaper-2015-11-07.jpg
-	// 和The Go Programming Language.html,请根据实际情况修改
+	// The local files to run sample code.
 	localFile     string = "src/sample/BingWallpaper-2015-11-07.jpg"
 	htmlLocalFile string = "src/sample/The Go Programming Language.html"
 	newPicName    string = "src/sample/NewBingWallpaper-2015-11-07.jpg"

+ 16 - 16
sample/copy_object.go

@@ -6,28 +6,28 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// CopyObjectSample 展示了拷贝文件的用法
+// CopyObjectSample shows the copy files usage
 func CopyObjectSample() {
-	// 创建Bucket
+	// Create a bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 创建一个Object
+	// Create an object
 	err = bucket.PutObjectFromFile(objectKey, localFile)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景1:把已经存在的对象copy成一个新对象
+	// Case 1: Copy an existing object
 	var descObjectKey = "descobject"
 	_, err = bucket.CopyObject(objectKey, descObjectKey)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景2:把已经存在的对象copy成一个新对象,目标对象存在时,会覆盖
+	// Case 2: Copy an existing object to another existing object
 	_, err = bucket.CopyObject(objectKey, descObjectKey)
 	if err != nil {
 		HandleError(err)
@@ -38,20 +38,20 @@ func CopyObjectSample() {
 		HandleError(err)
 	}
 
-	// 场景3:对象copy时对源对象执行约束条件,满足时候copy,不满足时返回错误,不执行copy
-	// 约束条件不满足,copy没有执行
+	// Case 3: Copy file with constraints. When the constraints are met, the copy executes. otherwise the copy does not execute.
+	// constraints are not met, copy does not execute
 	_, err = bucket.CopyObject(objectKey, descObjectKey, oss.CopySourceIfModifiedSince(futureDate))
 	if err == nil {
 		HandleError(err)
 	}
 	fmt.Println("CopyObjectError:", err)
-	// 约束条件满足,copy执行
+	// Constraints are met, the copy executes
 	_, err = bucket.CopyObject(objectKey, descObjectKey, oss.CopySourceIfUnmodifiedSince(futureDate))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景4:对象copy时,可以指定目标对象的Properties,同时一定要指定MetadataDirective为MetaReplace
+	// Case 4: Specify the properties when copying. The MetadataDirective needs to be MetaReplace
 	options := []oss.Option{
 		oss.Expires(futureDate),
 		oss.Meta("myprop", "mypropval"),
@@ -67,7 +67,7 @@ func CopyObjectSample() {
 	}
 	fmt.Println("meta:", meta)
 
-	// 场景5:当源对象和目标对象相同时,目的是用来修改源对象的meta
+	// Case 5: When the source file is the same as the target file, the copy could be used to update metadata
 	options = []oss.Option{
 		oss.Expires(futureDate),
 		oss.Meta("myprop", "mypropval"),
@@ -79,32 +79,32 @@ func CopyObjectSample() {
 	}
 	fmt.Println("meta:", meta)
 
-	// 场景6:大文件分片拷贝,支持并发、断点续传功能。
-	// 分片上传,分片大小为100K。默认使用不使用并发上传,不使用断点续传。
+	// Case 6: Big file's multipart copy. It supports concurrent copy with resumable upload
+	// copy file with multipart. The part size is 100K. By default one routine is used without resumable upload
 	err = bucket.CopyFile(bucketName, objectKey, descObjectKey, 100*1024)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 分片大小为100K,3个协程并发拷贝。
+	// Part size is 100K and three coroutines for the concurrent copy
 	err = bucket.CopyFile(bucketName, objectKey, descObjectKey, 100*1024, oss.Routines(3))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 分片大小为100K,3个协程并发拷贝,使用断点续传拷贝文件。
+	// Part size is 100K and three coroutines for the concurrent copy with resumable upload
 	err = bucket.CopyFile(bucketName, objectKey, descObjectKey, 100*1024, oss.Routines(3), oss.Checkpoint(true, ""))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 断点续传功能需要使用本地文件,记录哪些分片已经上传。该文件路径可以Checkpoint的第二个参数指定,如果为空,则为当前目录下的{descObjectKey}.cp。
+	// Specify the checkpoint file path. If the checkpoint file path is not specified, the current folder is used.
 	err = bucket.CopyFile(bucketName, objectKey, descObjectKey, 100*1024, oss.Checkpoint(true, localFile+".cp"))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除object和bucket
+	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 7 - 7
sample/create_bucket.go

@@ -6,9 +6,9 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// CreateBucketSample 展示了如何创建存储空间
+// CreateBucketSample shows how to create bucket
 func CreateBucketSample() {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
@@ -16,31 +16,31 @@ func CreateBucketSample() {
 
 	DeleteTestBucketAndObject(bucketName)
 
-	// 场景1:使用默认参数创建bucket
+	// Case 1: Create a bucket with default parameters
 	err = client.CreateBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除bucket
+	// Delete bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景2:创建bucket时指定其权限
+	// Case 2: Create the bucket with ACL
 	err = client.CreateBucket(bucketName, oss.ACL(oss.ACLPublicRead))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景3:重复创建OSS不会报错,但是不做任何操作,指定的ACL无效
+	// Case 3: Repeat the same bucket. OSS will not return error, but just no op. The ACL is not updated.
 	err = client.CreateBucket(bucketName, oss.ACL(oss.ACLPublicReadWrite))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除bucket
+	// Delete bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		HandleError(err)

+ 7 - 7
sample/delete_object.go

@@ -7,9 +7,9 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// DeleteObjectSample 展示了删除单个文件、批量删除文件的方法
+// DeleteObjectSample shows how to delete single file or multiple files
 func DeleteObjectSample() {
-	// 创建Bucket
+	// Create a bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
@@ -17,7 +17,7 @@ func DeleteObjectSample() {
 
 	var val = "抽刀断水水更流,举杯销愁愁更愁。 人生在世不称意,明朝散发弄扁舟。"
 
-	// 场景1:删除Object
+	// Case 1: Delete an object
 	err = bucket.PutObject(objectKey, strings.NewReader(val))
 	if err != nil {
 		HandleError(err)
@@ -28,7 +28,7 @@ func DeleteObjectSample() {
 		HandleError(err)
 	}
 
-	// 场景2:删除多个Object
+	// Case 2: Delete multiple Objects
 	err = bucket.PutObject(objectKey+"1", strings.NewReader(val))
 	if err != nil {
 		HandleError(err)
@@ -51,7 +51,7 @@ func DeleteObjectSample() {
 	}
 	fmt.Println("Objects:", getObjectsFormResponse(lsRes))
 
-	// 场景3:删除多个Object,详细模式时返回的结果中会包含成功删除的Object,默认该模式
+	// Case 3: Delete multiple objects and it will return deleted objects in detail mode which is by default.
 	err = bucket.PutObject(objectKey+"1", strings.NewReader(val))
 	if err != nil {
 		HandleError(err)
@@ -75,7 +75,7 @@ func DeleteObjectSample() {
 	}
 	fmt.Println("Objects:", getObjectsFormResponse(lsRes))
 
-	// 场景4:删除多个Object,简单模式返回的消息体中只包含删除出错的Object结果
+	// Case 4: Delete multiple objects and returns undeleted objects in quiet mode
 	err = bucket.PutObject(objectKey+"1", strings.NewReader(val))
 	if err != nil {
 		HandleError(err)
@@ -98,7 +98,7 @@ func DeleteObjectSample() {
 	}
 	fmt.Println("Objects:", getObjectsFormResponse(lsRes))
 
-	// 删除object和bucket
+	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 21 - 20
sample/get_object.go

@@ -10,21 +10,21 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// GetObjectSample 展示了流式下载、范围下载、断点续传下载的用法
+// GetObjectSample shows the streaming download, range download and resumable download. 
 func GetObjectSample() {
-	// 创建Bucket
+	// Create bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 上传对象
+	// Upload the object
 	err = bucket.PutObjectFromFile(objectKey, localFile)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景1:下载object存储到ReadCloser,注意需要Close。
+	// Case 1: Download the object into ReadCloser(). The body needs to be closed
 	body, err := bucket.GetObject(objectKey)
 	if err != nil {
 		HandleError(err)
@@ -36,7 +36,7 @@ func GetObjectSample() {
 	}
 	data = data // use data
 
-	// 场景2:下载object存储到bytes数组,适合小对象。
+	// Case 2: Download the object to byte array. This is for small object.
 	buf := new(bytes.Buffer)
 	body, err = bucket.GetObject(objectKey)
 	if err != nil {
@@ -45,7 +45,7 @@ func GetObjectSample() {
 	io.Copy(buf, body)
 	body.Close()
 
-	// 场景3:下载object存储到本地文件,用户打开文件传入句柄。
+	// Case 3: Download the object to local file. The file handle needs to be specified
 	fd, err := os.OpenFile("mynewfile-1.jpg", os.O_WRONLY|os.O_CREATE, 0660)
 	if err != nil {
 		HandleError(err)
@@ -59,20 +59,20 @@ func GetObjectSample() {
 	io.Copy(fd, body)
 	body.Close()
 
-	// 场景4:下载object存储到本地文件。
+	// Case 4: Download the object to local file with file name specified
 	err = bucket.GetObjectToFile(objectKey, "mynewfile-2.jpg")
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景5:满足约束条件下载,否则返回错误。GetObject/GetObjectToFile/DownloadFile都支持该功能。
-	// 修改时间,约束条件满足,执行下载。
+	// Case 5: Get the object with contraints. When contraints are met, download the file. Otherwise return precondition error
+	// last modified time constraint is met, download the file
 	body, err = bucket.GetObject(objectKey, oss.IfModifiedSince(pastDate))
 	if err != nil {
 		HandleError(err)
 	}
 	body.Close()
-	// 修改时间,约束条件不满足,不执行下载。
+	// Last modified time contraint is not met, do not download the file
 	_, err = bucket.GetObject(objectKey, oss.IfUnmodifiedSince(pastDate))
 	if err == nil {
 		HandleError(err)
@@ -83,45 +83,46 @@ func GetObjectSample() {
 		HandleError(err)
 	}
 	etag := meta.Get(oss.HTTPHeaderEtag)
-	// 校验内容,约束条件满足,执行下载。
+	// Check the content, etag contraint is met, download the file
 	body, err = bucket.GetObject(objectKey, oss.IfMatch(etag))
 	if err != nil {
 		HandleError(err)
 	}
 	body.Close()
 
-	// 校验内容,约束条件不满足,不执行下载。
+	// Check the content, etag contraint is not met, do not download the file
 	body, err = bucket.GetObject(objectKey, oss.IfNoneMatch(etag))
 	if err == nil {
 		HandleError(err)
 	}
 
-	// 场景6:大文件分片下载,支持并发下载,断点续传功能。
-	// 分片下载,分片大小为100K。默认使用不使用并发下载,不使用断点续传。
+	// Case 6: Big file's multipart download, concurrent and resumable download is supported.
+	// multipart download with part size 100KB. By default single coroutine is used and no checkpoint
 	err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 分片大小为100K,3个协程并发下载。
+	// Part size is 100K and 3 coroutines are used
 	err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024, oss.Routines(3))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 分片大小为100K,3个协程并发下载,使用断点续传下载文件。
+	// Part size is 100K and 3 coroutines with checkpoint
 	err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024, oss.Routines(3), oss.Checkpoint(true, ""))
 	if err != nil {
 		HandleError(err)
 	}
-
-	// 断点续传功能需要使用本地文件,记录哪些分片已经下载。该文件路径可以Checkpoint的第二个参数指定,如果为空,则为下载文件目录。
+	
+	// Specify the checkpoint file path to record which parts have been downloaded. 
+	// This file path can be specified by the 2nd parameter of Checkpoint, it will be the download directory if the file path is empty.
 	err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024, oss.Checkpoint(true, "mynewfile.cp"))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景7:内容进行 GZIP压缩传输的用户。GetObject/GetObjectToFile具有相同功能。
+	// Case 7: Use GZIP encoding for downloading the file, GetObject/GetObjectToFile are the same.
 	err = bucket.PutObjectFromFile(objectKey, htmlLocalFile)
 	if err != nil {
 		HandleError(err)
@@ -132,7 +133,7 @@ func GetObjectSample() {
 		HandleError(err)
 	}
 
-	// 删除object和bucket
+	// Delete the object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 12 - 12
sample/list_buckets.go

@@ -6,7 +6,7 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// ListBucketsSample 展示了列举存储空间的用法,包括默认参数列举、指定参数列举
+// ListBucketsSample shows the list bucket, including default and specified parameters.
 func ListBucketsSample() {
 	var myBuckets = []string{
 		"my-bucket-1",
@@ -18,13 +18,13 @@ func ListBucketsSample() {
 		"my-bucket-31",
 		"my-bucket-32"}
 
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// remove other bucket
+	// Remove other bucket
 	lbr, err := client.ListBuckets()
 	if err != nil {
 		HandleError(err)
@@ -37,7 +37,7 @@ func ListBucketsSample() {
 		}
 	}
 
-	// 创建bucket
+	// Create bucket
 	for _, bucketName := range myBuckets {
 		err = client.CreateBucket(bucketName)
 		if err != nil {
@@ -45,35 +45,35 @@ func ListBucketsSample() {
 		}
 	}
 
-	// 场景1:使用默认参数参数
+	// Case 1: Use default parameter
 	lbr, err = client.ListBuckets()
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("my buckets:", lbr.Buckets)
 
-	// 场景2:指定最大返回数量
+	// Case 2: Specify the max keys : 3
 	lbr, err = client.ListBuckets(oss.MaxKeys(3))
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("my buckets max num:", lbr.Buckets)
 
-	// 场景3:返回指定前缀的Bucket
+	// Case 3: Specify the prefix of buckets.
 	lbr, err = client.ListBuckets(oss.Prefix("my-bucket-2"))
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("my buckets prefix :", lbr.Buckets)
 
-	// 场景4:指定从某个之后返回
+	// Case 4: Specify the marker to return from a certain one
 	lbr, err = client.ListBuckets(oss.Marker("my-bucket-22"))
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("my buckets marker :", lbr.Buckets)
 
-	// 场景5:分页获取所有bucket,每次返回3个
+	// Case 5: Specify max key and list all buckets with paging, return 3 items each time.
 	marker := oss.Marker("")
 	for {
 		lbr, err = client.ListBuckets(oss.MaxKeys(3), marker)
@@ -87,7 +87,7 @@ func ListBucketsSample() {
 		}
 	}
 
-	// 场景6:分页所有获取从某个之后的bucket,每次返回3个
+	// Case 6: List bucket with marker and max key; return 3 items each time.
 	marker = oss.Marker("my-bucket-22")
 	for {
 		lbr, err = client.ListBuckets(oss.MaxKeys(3), marker)
@@ -101,7 +101,7 @@ func ListBucketsSample() {
 		}
 	}
 
-	// 场景7:分页所有获取前缀的bucket,每次返回3个
+	// Case 7: List bucket with prefix and max key, return 3 items each time.
 	pre := oss.Prefix("my-bucket-2")
 	marker = oss.Marker("")
 	for {
@@ -117,7 +117,7 @@ func ListBucketsSample() {
 		}
 	}
 
-	// 删除bucket
+	// Delete bucket
 	for _, bucketName := range myBuckets {
 		err = client.DeleteBucket(bucketName)
 		if err != nil {

+ 14 - 14
sample/list_objects.go

@@ -6,7 +6,7 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// ListObjectsSample 展示了列举文件的用法,包括默认参数列举、指定参数列举
+// ListObjectsSample shows the file list, including default and specified parameters.
 func ListObjectsSample() {
 	var myObjects = []Object{
 		{"my-object-1", ""},
@@ -18,47 +18,47 @@ func ListObjectsSample() {
 		{"my-object-31", ""},
 		{"my-object-32", ""}}
 
-	// 创建Bucket
+	// Create bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 创建object
+	// Create objects
 	err = CreateObjects(bucket, myObjects)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景1:使用默认参数参数
+	// Case 1: Use default parameters
 	lor, err := bucket.ListObjects()
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("my objects:", getObjectsFormResponse(lor))
 
-	// 场景2:指定最大返回数量
+	// Case 2: Specify max keys
 	lor, err = bucket.ListObjects(oss.MaxKeys(3))
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("my objects max num:", getObjectsFormResponse(lor))
 
-	// 场景3:返回指定前缀的Bucket
+	// Case 3: Specify prefix of objects
 	lor, err = bucket.ListObjects(oss.Prefix("my-object-2"))
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("my objects prefix :", getObjectsFormResponse(lor))
 
-	// 场景4:指定从某个之后返回
+	// Case 4: Specify the marker
 	lor, err = bucket.ListObjects(oss.Marker("my-object-22"))
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("my objects marker :", getObjectsFormResponse(lor))
 
-	// 场景5:分页获取所有object,每次返回3个
+	// Case 5: List object with paging. each page has 3 objects
 	marker := oss.Marker("")
 	for {
 		lor, err = bucket.ListObjects(oss.MaxKeys(3), marker)
@@ -72,7 +72,7 @@ func ListObjectsSample() {
 		}
 	}
 
-	// 场景6:分页所有获取从某个之后的object,每次返回3个
+	// Case 6: List object with paging , marker and max keys; return 3 items each time.
 	marker = oss.Marker("my-object-22")
 	for {
 		lor, err = bucket.ListObjects(oss.MaxKeys(3), marker)
@@ -86,7 +86,7 @@ func ListObjectsSample() {
 		}
 	}
 
-	// 场景7:分页所有获取前缀的object,每次返回2个
+	// Case 7: List object with paging , with prefix and max keys; return 2 items each time.
 	pre := oss.Prefix("my-object-2")
 	marker = oss.Marker("")
 	for {
@@ -107,8 +107,8 @@ func ListObjectsSample() {
 		HandleError(err)
 	}
 
-	// 场景8:prefix和delimiter结合,完成分组功能,ListObjectsResponse.Objects表示不再组中,
-	// ListObjectsResponse.CommonPrefixes分组结果
+	// Case 8: Combine the prefix and delimiter for grouping. ListObjectsResponse.Objects is the objects returned.
+	// ListObjectsResponse.CommonPrefixes is the common prefixes returned.
 	myObjects = []Object{
 		{"fun/test.txt", ""},
 		{"fun/test.jpg", ""},
@@ -117,7 +117,7 @@ func ListObjectsSample() {
 		{"fun/music/001.mp3", ""},
 		{"fun/music/001.mp3", ""}}
 
-	// 创建object
+	// Create object
 	err = CreateObjects(bucket, myObjects)
 	if err != nil {
 		HandleError(err)
@@ -130,7 +130,7 @@ func ListObjectsSample() {
 	fmt.Println("my objects prefix :", getObjectsFormResponse(lor),
 		"common prefixes:", lor.CommonPrefixes)
 
-	// 删除object和bucket
+	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 7 - 7
sample/new_bucket.go

@@ -7,40 +7,40 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// NewBucketSample 展示了如何初始化Client、Bucket
+// NewBucketSample shows how to initialize client and bucket
 func NewBucketSample() {
-	// New Client
+	// New client
 	client, err := oss.New(endpoint, accessID, accessKey)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// Create Bucket
+	// Create bucket
 	err = client.CreateBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// New Bucket
+	// New bucket
 	bucket, err := client.Bucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// Put Object,上传一个Object
+	// Put object, uploads an object
 	var objectName = "myobject"
 	err = bucket.PutObject(objectName, strings.NewReader("MyObjectValue"))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// Delete Object,删除Object
+	// Delete object, deletes an object
 	err = bucket.DeleteObject(objectName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除bucket
+	// Delete bucket
 	err = client.DeleteBucket(bucketName)
 	if err != nil {
 		HandleError(err)

+ 6 - 6
sample/object_acl.go

@@ -7,34 +7,34 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// ObjectACLSample 展示了如何设置、读取文件权限(object acl)
+// ObjectACLSample shows how to set and get object ACL
 func ObjectACLSample() {
-	// 创建Bucket
+	// Create bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 创建object
+	// Create object
 	err = bucket.PutObject(objectKey, strings.NewReader("YoursObjectValue"))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景:设置Bucket ACL,可选权限有ACLPrivate、ACLPublicRead、ACLPublicReadWrite
+	// Case 1: Set bucket ACL, valid ACLs are ACLPrivate、ACLPublicRead、ACLPublicReadWrite
 	err = bucket.SetObjectACL(objectKey, oss.ACLPrivate)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 查看Object ACL,返回的权限标识为private、public-read、public-read-write其中之一
+	// Get object ACL, returns one of the three values: private、public-read、public-read-write
 	goar, err := bucket.GetObjectACL(objectKey)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Object ACL:", goar.ACL)
 
-	// 删除object和bucket
+	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 9 - 9
sample/object_meta.go

@@ -7,22 +7,22 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// ObjectMetaSample 展示了如何设置、读取文件元数据(object meta)
+// ObjectMetaSample shows how to get and set the object metadata
 func ObjectMetaSample() {
-	// 创建Bucket
+	// Create bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 创建object
+	// Delete object
 	err = bucket.PutObject(objectKey, strings.NewReader("YoursObjectValue"))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景:设置Bucket Meta,可以设置一个或多个属性。
-	// 注意:Meta不区分大小写
+	// Case 0: Set bucket meta. one or more properties could be set
+	// Note: Meta is case insensitive
 	options := []oss.Option{
 		oss.Expires(futureDate),
 		oss.Meta("myprop", "mypropval")}
@@ -31,21 +31,21 @@ func ObjectMetaSample() {
 		HandleError(err)
 	}
 
-	// 场景1:查看Object的meta,只返回少量基本meta信息,如ETag、Size、LastModified。
+	// Case 1: Get the object metadata. Only return basic meta information includes ETag, size and last modified.
 	props, err := bucket.GetObjectMeta(objectKey)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Object Meta:", props)
 
-	// 场景2:查看Object的所有Meta,包括自定义的meta。
+	// Case 2: Get all the detailed object meta including custom meta
 	props, err = bucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
 		HandleError(err)
 	}
 	fmt.Println("Expires:", props.Get("Expires"))
 
-	// 场景3:查看Object的所有Meta,符合约束条件返回,不符合约束条件保存,包括自定义的meta。
+	// Case 3: Get the object's all metadata with contraints. When constraints are met, return the metadata.
 	props, err = bucket.GetObjectDetailedMeta(objectKey, oss.IfUnmodifiedSince(futureDate))
 	if err != nil {
 		HandleError(err)
@@ -63,7 +63,7 @@ func ObjectMetaSample() {
 	}
 	fmt.Println("Object ACL:", goar.ACL)
 
-	// 删除object和bucket
+	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 14 - 13
sample/put_object.go

@@ -9,9 +9,9 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// PutObjectSample 展示了简单上传、断点续传的使用方法
+// PutObjectSample illustrates two methods for uploading a file: simple upload and multipart upload.
 func PutObjectSample() {
-	// 创建Bucket
+	// Create bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
@@ -19,19 +19,19 @@ func PutObjectSample() {
 
 	var val = "花间一壶酒,独酌无相亲。 举杯邀明月,对影成三人。"
 
-	// 场景1:上传object,value是字符串。
+	// Case 1: Upload an object from a string
 	err = bucket.PutObject(objectKey, strings.NewReader(val))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景2:上传object,value是[]byte。
+	// Case 2: Upload an object whose value is a byte[]
 	err = bucket.PutObject(objectKey, bytes.NewReader([]byte(val)))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景3:上传本地文件,用户打开文件,传入句柄。
+	// Case 3: Upload the local file with file handle, user should open the file at first.
 	fd, err := os.Open(localFile)
 	if err != nil {
 		HandleError(err)
@@ -43,13 +43,13 @@ func PutObjectSample() {
 		HandleError(err)
 	}
 
-	// 场景4:上传本地文件,不需要打开文件。
+	// Case 4: Upload an object with local file name, user need not open the file.
 	err = bucket.PutObjectFromFile(objectKey, localFile)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 场景5:上传object,上传时指定对象属性。PutObject/PutObjectFromFile/UploadFile都支持该功能。
+	// Case 5: Upload an object with specified properties, PutObject/PutObjectFromFile/UploadFile also support this feature.
 	options := []oss.Option{
 		oss.Expires(futureDate),
 		oss.ObjectACL(oss.ACLPublicRead),
@@ -66,32 +66,33 @@ func PutObjectSample() {
 	}
 	fmt.Println("Object Meta:", props)
 
-	// 场景6:大文件分片上传,支持并发上传,断点续传功能。
-	// 分片上传,分片大小为100K。默认使用不使用并发上传,不使用断点续传。
+	// Case 6: Big file's multipart upload. It supports concurrent upload with resumable upload.
+	// multipart upload with 100K as part size. By default 1 coroutine is used and no checkpoint is used.
 	err = bucket.UploadFile(objectKey, localFile, 100*1024)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 分片大小为100K,3个协程并发上传。
+	// Part size is 100K and 3 coroutines are used
 	err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.Routines(3))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 分片大小为100K,3个协程并发下载,使用断点续传上传文件。
+	// Part size is 100K and 3 coroutines with checkpoint
 	err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.Routines(3), oss.Checkpoint(true, ""))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 断点续传功能需要使用本地文件,记录哪些分片已经上传。该文件路径可以Checkpoint的第二个参数指定,如果为空,则为上传文件目录。
+	// Specify the local file path for checkpoint files. 
+	// the 2nd parameter of Checkpoint can specify the file path, when the file path is empty, it will upload the directory.
 	err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.Checkpoint(true, localFile+".cp"))
 	if err != nil {
 		HandleError(err)
 	}
 
-	// 删除object和bucket
+	// Delete object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)

+ 7 - 7
sample/sign_url.go

@@ -8,15 +8,15 @@ import (
 	"github.com/aliyun/aliyun-oss-go-sdk/oss"
 )
 
-// SignURLSample sign url sample
+// SignURLSample signs URL sample
 func SignURLSample() {
-	// 创建Bucket
+	// Create bucket
 	bucket, err := GetTestBucket(bucketName)
 	if err != nil {
 		HandleError(err)
 	}
 
-	// put object
+	// Put object
 	signedURL, err := bucket.SignURL(objectKey, oss.HTTPPut, 60)
 	if err != nil {
 		HandleError(err)
@@ -28,7 +28,7 @@ func SignURLSample() {
 		HandleError(err)
 	}
 
-	// put object with option
+	// Put object with option
 	options := []oss.Option{
 		oss.Meta("myprop", "mypropval"),
 		oss.ContentType("image/tiff"),
@@ -44,7 +44,7 @@ func SignURLSample() {
 		HandleError(err)
 	}
 
-	// get object
+	// Get object
 	signedURL, err = bucket.SignURL(objectKey, oss.HTTPGet, 60)
 	if err != nil {
 		HandleError(err)
@@ -54,7 +54,7 @@ func SignURLSample() {
 	if err != nil {
 		HandleError(err)
 	}
-	// read content
+	// Read content
 	data, err := ioutil.ReadAll(body)
 	body.Close()
 	data = data // use data
@@ -64,7 +64,7 @@ func SignURLSample() {
 		HandleError(err)
 	}
 
-	// 删除object和bucket
+	// Delete the object and bucket
 	err = DeleteTestBucketAndObject(bucketName)
 	if err != nil {
 		HandleError(err)