ソースを参照

Merge pull request #202 from aliyun/preview_v2.0.1

Preview v2.0.1
wangtaowei 6 年 前
コミット
a837721af4
23 ファイル変更1758 行追加275 行削除
  1. 3 0
      .travis.yml
  2. 20 6
      oss/auth.go
  3. 615 0
      oss/bucket_credential_test.go
  4. 179 0
      oss/bucket_test.go
  5. 195 119
      oss/client.go
  6. 210 4
      oss/client_test.go
  7. 68 23
      oss/conf.go
  8. 24 20
      oss/conn.go
  9. 4 1
      oss/conn_test.go
  10. 6 2
      oss/const.go
  11. 12 10
      oss/download.go
  12. 12 10
      oss/multicopy.go
  13. 15 1
      oss/option.go
  14. 5 3
      oss/progress.go
  15. 110 33
      oss/progress_test.go
  16. 27 0
      oss/type.go
  17. 14 8
      oss/upload.go
  18. 25 23
      sample.go
  19. 6 0
      sample/bucket_policy.go
  20. 65 0
      sample/bucket_qosInfo.go
  21. 117 0
      sample/bucket_requestpayment.go
  22. 5 0
      sample/config.go
  23. 21 12
      sample/get_object.go

+ 3 - 0
.travis.yml

@@ -40,3 +40,6 @@ env:
   - secure: IM9VEzf9MrtQnzK3VSD+YxmZWycxTgB5YbINDwE0LsAwWUWucDq95+2ZZRjKLrip5uYvoMohC5X2lxKuLhW8uDN3M6MxondHcCjgBCh8hWP7JociXWMxSIWA5ctU0oCgobm6rKvbarDuO8Bx+NE4QDDAWgjy9oOywE7mVi1G2//LY+2zC+iwXlkLs7VRdo9wtKgIG+FvswiJHhvFbU6GYxOeh1xclLnjwoWf66zzzbIW9Z4bFY0Zun/3u1ySQJWfF03/6ZzlCi3JPvfXDQ35T6PbD/5BvMDPmiXyWGqKUpZfRNus8VkG1G8TWwvHsVct9M47sZqLDuyzWVcwx613i8uxDsEIp9pVy4IxVl2UxvDJH1CPnNdvEhS3NeD9XfkMKMJtShHu1iVzB+j668W5SXsiv/N+psa2SIs+wuDY7FSirRg2Uwk9mlsXS+lSgV9B8BEcEIPEngA40MZCrEzNUdI0iYxaaqB6GNWeHiszOWz94rMkXk+FF3Ic75O4RyCmfXJmR8wYQs+rH4j7JJGzwTHXpYpV7H5V0OtMVIh6EUEvu3Veu/lkT4rsop55OHmcBmpKv8gGllX1P+g7H0+g35wn6Hz+FaSlkPvAuuSudgHFznOtfCowYhDUVp0TxTkcraLhkiROD4Qidw8r/HWh5CzrkfM9LWEBvziWFM77X/M=
   - secure: SfJARcGrIyoemZ1jUENyX272acqmDTqQjk0LBjuzWxR5gqWp0Bn5TJS6x7niD8qzVC/n2FwR7F9FG3ll0cmToBgb8vh9763aL+ciA8cfk8iIUdjjQHNkywdhiLLw1EiR4TuUGOB3pV03WxMXl5JJTopJHScHgvTHfyhHS4vnQdrU3j6PwFDvVLsECyFiTCflDUgZ6zHocbIT4fqVjoBxy+5R0t9vyb4eE3IE3Lqjnac5oYVXm4KCpGUYDdCL0W2OlwPYgaBtWGetoWj/v6Kg4cjUVbKJHGK1q2nKyrCGPRvoKTqw83Pkfw1zkSdC0kCozMKr/gYsYBpt5IFXHaON/OkjvgdLbVKrpAHoauQ5KkWcTg9ntuUNndtg5pMaitBmd/J2AaMIxEde8ZvgfDR3sZdv0umFzLXJeAu/tbLR6ozP/BwOFpj89Lw0jNyNxJjdkLHP/4M2W5Ka0W1s+F6xNTahTuXdV32laPGN8SSmBrjzXwFOLOQTCgwlbD+YbTqAXUs+DLq/zIT6ZTaEfOdas+HTsu6H+cQfsmv9JlMP7PoT/CbL2tYe8kA2tjmxsQwUlVTcE1nP5ni5kHX+yXuClTzLimY+BOqU9SLJBKv6umygFqt/9nXk0YU6+UqfeDFCVzrMN0vnzWkmB0SQ3ElDC6pbb6NE/2RgmIy1pAUMDzY=
   - secure: NPgEmEHVxJrLtaQ35IJnmQSbEHMqzSJt1l0OHX+a6VzfBiAP9Z4Qv1Ita/SQZpA4roLK2YkGr6TALndXgiY1EXU3e7ebcPnCgaQ6mCSJpAKtXhSGmArvysEmmxKSJjMxc8aBlJAeeEQlngXb0/pqA//kMw5KC+pPo+8O3UUbBs0qY9Q4FX3/cU+chv04tk5cSVR2P9nhOqY7LYm+FXjwF4vfjGKF7mWEcQUzTS8E2dRsertl/4law5O9/0gEW/9MaqyylPxq+0pS38sXP25zYotgEL3L8t0fupi58loPioDndardxVl35SXaatPuOxEdHr3Q0Q3dKVjbrTEx8bP7NgLY4nh7r5hqcEtvlM0xCAsfiOmGe0Dd0BFqo7kxzK/Fl6oI4MoEfvAYR+1QyBy9zOAJmjoj1hu/NEDAtT9NPxXJKl4A/iJPIYIt3lrMw34ewTSLl16ERojx6CZfSqg58eyUndmmOPBB8b4rIX0fQj5/ShsoLbM/rmOmDpPXN0QGu2EHhtR2eCbpdan84VfPifi5LjPH42PjdUxpJ+rOHZJHLesJ5JwQLFLrnDf7Bg96iucyz1QMpNFoqN+q4YDARot2zdw+1ISjgPx3bhHu8ly8oT1UYNl5mMjv7zfNDvsmBycpQEBQWGvIioRfx6dM/EtZy2E/og5Ci0j9v19YI2E=
+  - secure: f52LTJZfcwkCnlK0ig1/iskZxeTRyU5dGBjuF2f4CKXdV1ajlfjlNj/oX8V7eVYvSVIz1IkE9h2pi2x+P7Jp63ITdzNPAQlbaiqJqEG15g4JicRvTFgKba+dKfZL8lK+mHd/QZE6dd1QsjPuU2BbHuqKccS3NyrqhFkSFGD2eQtft6fXg7O7uFMEdEss8J2H9rYkmV/jH8H3xkaxH/EBeHa03NCMUBWampQ69SKstg+vSMXQCvrQWLbfpq+/K5SWnAW4UoWrvtlFT6lyFeWUTEXmHP8W8/DgubaMZCbMqrautsw4Qjf5VGucgitMVXEkT14MZIJtvhqjF+KPER8D0oPATDp5lb6k7iI100sZkpESGed2rnTwxP4rOrYpT1K3xju1iaTjUpVOZ55yCHCPyB5G1e5BYCEoXlnO2YzLpuGJJs02Mb/cSLL/JgyjPqI5HguN1mz1SsCG9yxknuEdjwwhyHDYudi3FZ8f8RooOE/SQBA7zycvSd5OqvlzgnN3hzG7cA1Bje3E7v1b4rBjF0sfkPpjadvleq2rex8THhFQeVFlrGvRS777/+xg5IA9C5wLaKqAvcbTMV4hzWO89L7W/+13vfH6JecdQGkbxmdr+Y/bDn1SwflfFcUiju9/p64T2rYFGDbwu5jDKV5KGhbMFbUJxds0ETyUKpGOCRs=
+  - secure: BDZ2Skv2cJU3mzK9fYISp3qWjJHXKUtQIUw6dYN0MYN78LTApjF1af3NLtjhPP92zcdAefAYNk0Dic7K+nEMS6fxoenVbeH3USUXWgIiu8iRt8wfMOOJiCzmoU8yoM+8HFA0/CgAfgkeqhW+zKTL3auIxwCGaVSElBmWB6tefyXEGF2fci/bXG3bDprQ4kZUAyvON8qABXQ+dsX225YwCP46CMTjRK+C1wj5zb6DDQC04sHRmFOMm3QATLXRXI7Lt5ju+Wkbg9JZqfL2Zo6GvZo0J2mpUijznGaCSSdy+r5edfnMmilPxeKUXODTyabSb33Lbinbp2ITUU3cIFGY76PVPYU7a3JN+vssPgpZm1M3kCDUUV71tipbl32//PjBfN0gYqCEGO6yvzklJIHjdgrd8zX2dDyY+vk7aZPDFySoVd5Dr2jGvCNOyE7bDD/9Ahis7VvkAMdU1S2NrLAUin9ZQaW4LjiiAWj3aXACI1i9FwA5SuGcQoPBnFsK7K3TkpgeSxG1YnaegOjm40QJJ8VqexMcBHDgMM2RECyJfhJbBM4JJpQngL8as0aO8HIbOlNSgyG3VCra4Lsa2GHWoOq1A7JI+HztdhCFICTEAcA6IqsFYsdQR/PC50FJawfqNuyDcXw2OtomYUe77/KsI5oOOL/cDYtNvasWW/brrCg=
+  - secure: aXiJ7R2Pe086+B5x5cuPmK0kG3f/G0vSryQEgmNvlU+Gpr0KaFsV76xzLyy0b8SOeVxA055O7/X7ZA4Pd/IKc5qz/98RX2gieozPDvwryfZ1weZbEYkjVBnWpzoIadN8jqTzN4LDf5nwTDcdEs45gU4uQgYBMWYvgPUBE20oD9iI7052qo/tknlQusPUisRiISuBpgnB1twnLk81ZZe3lRJdxmAOl6thaQj37UbqfvxCgyZVeVq3Y0tY6xl50/KNMyiecluoZuKu0h4EZGYk1T6UaheQ+MQRWNedJIsXeXhG8WoWrmCy5ZInzXWjsKwU4daLaaSgLzgcoNkYzyjAWwPNsepjiyAdXnVpzyifjdNm+LfB9Al5weAllFfzl580yAWVZlf/g0+X/dDxGxZME1jE53BcvUnvj/9cp0mox8JdjvB+ARHmxbPDUCfC9mcBPg0W24duk3brjT6BuR/xUMKEuydO0WDRAgOKopsAQalf/w+vEXQ+liKFs2CSwuo7CEUbtW+cTKz3XBQq6Ws5PAEeHmE3xy8wE5mafx0n6vv3oPGRQiTiko+CwUPrkqUmHzcHf+0N82RCcnT1CdALO7E+fowab5GVdOffUMKurQs5/pi8ux2YyNbS7qSQopAytV2rjiOei6ZDefExhWOWJWej9mPfDf9MGqlDkzbB8UE=

+ 20 - 6
oss/auth.go

@@ -22,14 +22,17 @@ type headerSorter struct {
 
 // signHeader signs the header and sets it as the authorization header.
 func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) {
+
+	akIf := conn.config.GetCredentials()
+
 	// Get the final authorization string
-	authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource)
+	authorizationStr := "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
 
 	// Give the parameter "Authorization" value
 	req.Header.Set(HTTPHeaderAuthorization, authorizationStr)
 }
 
-func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string {
+func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string {
 	// Find out the "x-oss-"'s address in header of the request
 	temp := make(map[string]string)
 
@@ -57,16 +60,27 @@ func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) s
 
 	signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
 
-	conn.config.WriteLog(Debug, "[Req:%p]signStr:%s.\n", req, signStr)
+	// convert sign to log for easy to view
+	if conn.config.LogLevel >= Debug {
+		var signBuf bytes.Buffer
+		for i := 0; i < len(signStr); i++ {
+			if signStr[i] != '\n' {
+				signBuf.WriteByte(signStr[i])
+			} else {
+				signBuf.WriteString("\\n")
+			}
+		}
+		conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, signBuf.String())
+	}
 
-	h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
+	h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
 	io.WriteString(h, signStr)
 	signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
 
 	return signedStr
 }
 
-func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, params map[string]interface{}) string {
+func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string {
 	if params[HTTPParamAccessKeyID] == nil {
 		return ""
 	}
@@ -88,7 +102,7 @@ func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string,
 	expireStr := strconv.FormatInt(expiration, 10)
 	signStr := expireStr + "\n" + canonParamsStr + canonResource
 
-	h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
+	h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
 	io.WriteString(h, signStr)
 	signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
 	return signedStr

+ 615 - 0
oss/bucket_credential_test.go

@@ -0,0 +1,615 @@
+// Credentials test
+package oss
+
+import (
+	"os"
+	"strings"
+	"io/ioutil"
+	"math/rand"
+	"bytes"
+	"strconv"
+	. "gopkg.in/check.v1"
+)
+
+type OssCredentialBucketSuite struct {
+	client        *Client
+	creClient     *Client
+	bucket        *Bucket
+	creBucket     *Bucket
+}
+
+var _ = Suite(&OssCredentialBucketSuite{})
+
+func (cs *OssCredentialBucketSuite)credentialSubUser(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+	err = client.CreateBucket(credentialBucketName)
+	c.Assert(err, IsNil)
+	cs.client = client
+	policyInfo := `
+	{
+		"Version":"1",
+		"Statement":[
+			{
+				"Action":[
+					"oss:*"
+				],
+				"Effect":"Allow",
+				"Principal":["`+ credentialUID + `"],
+				"Resource":["acs:oss:*:*:` + credentialBucketName + `", "acs:oss:*:*:` + credentialBucketName + `/*"]
+			}
+		]
+	}`
+
+	err = client.SetBucketPolicy(credentialBucketName, policyInfo)
+	c.Assert(err, IsNil)
+
+	bucket, err := cs.client.Bucket(credentialBucketName)
+	c.Assert(err, IsNil)
+	cs.bucket = bucket
+}
+
+// SetUpSuite runs once when the suite starts running.
+func (cs *OssCredentialBucketSuite) SetUpSuite(c *C) {
+	if credentialUID == ""{
+		testLogger.Println("the cerdential UID is NULL, skip the credential test")
+		c.Skip("the credential Uid is null")
+	}
+
+	cs.credentialSubUser(c)
+	client, err := New(endpoint, credentialAccessID, credentialAccessKey)
+	c.Assert(err, IsNil)
+	cs.creClient = client
+
+	bucket, err := cs.creClient.Bucket(credentialBucketName)
+	c.Assert(err, IsNil)
+	cs.creBucket = bucket
+
+	testLogger.Println("test credetial bucket started")
+}
+
+func (cs *OssCredentialBucketSuite) TearDownSuite(c *C) {
+	if credentialUID == ""{
+		c.Skip("the credential Uid is null")
+	}
+	for _, bucket := range []*Bucket{cs.bucket} {
+		// Delete multipart
+		keyMarker := KeyMarker("")
+		uploadIDMarker := UploadIDMarker("")
+		for {
+			lmu, err := bucket.ListMultipartUploads(keyMarker, uploadIDMarker)
+			c.Assert(err, IsNil)
+			for _, upload := range lmu.Uploads {
+				imur := InitiateMultipartUploadResult{Bucket: credentialBucketName, Key: upload.Key, UploadID: upload.UploadID}
+				err = bucket.AbortMultipartUpload(imur)
+				c.Assert(err, IsNil)
+			}
+			keyMarker = KeyMarker(lmu.NextKeyMarker)
+			uploadIDMarker = UploadIDMarker(lmu.NextUploadIDMarker)
+			if !lmu.IsTruncated {
+				break
+			}
+		}
+		// Delete objects
+		marker := Marker("")
+		for {
+			lor, err := bucket.ListObjects(marker)
+			c.Assert(err, IsNil)
+			for _, object := range lor.Objects {
+				err = bucket.DeleteObject(object.Key)
+				c.Assert(err, IsNil)
+			}
+			marker = Marker(lor.NextMarker)
+			if !lor.IsTruncated{
+				break
+			}
+		}
+	}
+	err := cs.client.DeleteBucket(credentialBucketName)
+	c.Assert(err, IsNil)
+	testLogger.Println("test credential bucket completed")
+}
+
+// Test put/get/list/delte object
+func (cs *OssCredentialBucketSuite) TestReqerPaymentNoRequester(c *C) {
+	// Set bucket is requester who send the request
+	reqPayConf := RequestPaymentConfiguration{
+		Payer:string(Requester),
+	}
+	err := cs.client.SetBucketRequestPayment(credentialBucketName, reqPayConf)
+	c.Assert(err, IsNil)
+
+	key := objectNamePrefix + randStr(8)
+	objectValue := randStr(18)
+
+	// Put object
+	err = cs.creBucket.PutObject(key, strings.NewReader(objectValue))
+	c.Assert(err, NotNil)
+
+	// Get object
+	_, err = cs.creBucket.GetObject(key)
+	c.Assert(err, NotNil)
+
+	// List object
+	_, err = cs.creBucket.ListObjects()
+	c.Assert(err, NotNil)
+
+	err = cs.creBucket.DeleteObject(key)
+	c.Assert(err, NotNil)
+
+	// Set bucket is BucketOwner
+	reqPayConf.Payer = string(BucketOwner)
+	err = cs.client.SetBucketRequestPayment(credentialBucketName, reqPayConf)
+	c.Assert(err, IsNil)
+}
+
+// Test put/get/list/delte object
+func (cs *OssCredentialBucketSuite) TestReqerPaymentWithRequester(c *C) {
+	// Set bucket is requester who send the request
+	reqPayConf := RequestPaymentConfiguration{
+		Payer:string(Requester),
+	}
+	err := cs.client.SetBucketRequestPayment(credentialBucketName, reqPayConf)
+	c.Assert(err, IsNil)
+
+	key := objectNamePrefix + randStr(8)
+	objectValue := randStr(18)
+
+	// Put object with a bucketowner
+	err = cs.creBucket.PutObject(key, strings.NewReader(objectValue), RequestPayer(BucketOwner))
+	c.Assert(err, NotNil)
+
+	// Put object
+	err = cs.creBucket.PutObject(key, strings.NewReader(objectValue), RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Get object
+	body, err := cs.creBucket.GetObject(key, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	defer body.Close()
+
+	data, err := ioutil.ReadAll(body)
+	c.Assert(err, IsNil)
+	c.Assert(string(data), Equals, objectValue)
+
+	// List object
+	lor, err := cs.creBucket.ListObjects(RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	c.Assert(len(lor.Objects), Equals, 1)
+
+	err = cs.creBucket.DeleteObject(key, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Set bucket is BucketOwner
+	reqPayConf.Payer = string(BucketOwner)
+	err = cs.client.SetBucketRequestPayment(credentialBucketName, reqPayConf)
+	c.Assert(err, IsNil)
+}
+
+// Test put/get/list/delte object
+func (cs *OssCredentialBucketSuite) TestOwnerPaymentNoRequester(c *C) {
+	// Set bucket is requester who send the request
+	reqPayConf := RequestPaymentConfiguration{
+		Payer:string(BucketOwner),
+	}
+	err := cs.client.SetBucketRequestPayment(credentialBucketName, reqPayConf)
+	c.Assert(err, IsNil)
+
+	key := objectNamePrefix + randStr(8)
+	objectValue := randStr(18)
+
+	// Put object
+	err = cs.creBucket.PutObject(key, strings.NewReader(objectValue))
+	c.Assert(err, IsNil)
+
+	// Get object
+	body, err := cs.creBucket.GetObject(key)
+	c.Assert(err, IsNil)
+	defer body.Close()
+
+	data, err := ioutil.ReadAll(body)
+	c.Assert(err, IsNil)
+	c.Assert(string(data), Equals, objectValue)
+
+	// List object
+	lor, err := cs.creBucket.ListObjects()
+	c.Assert(err, IsNil)
+	c.Assert(len(lor.Objects), Equals, 1)
+
+	err = cs.creBucket.DeleteObject(key)
+	c.Assert(err, IsNil)
+}
+
+// Test put/get/list/delte object
+func (cs *OssCredentialBucketSuite) TestOwnerPaymentWithRequester(c *C) {
+	// Set bucket is BucketOwner payer
+	reqPayConf := RequestPaymentConfiguration{
+		Payer:string(BucketOwner),
+	}
+
+	err := cs.client.SetBucketRequestPayment(credentialBucketName, reqPayConf)
+	c.Assert(err, IsNil)
+
+	key := objectNamePrefix + randStr(8)
+	objectValue := randStr(18)
+
+	// Put object
+	err = cs.creBucket.PutObject(key, strings.NewReader(objectValue), RequestPayer(BucketOwner))
+	c.Assert(err, IsNil)
+
+	// Put object
+	err = cs.creBucket.PutObject(key, strings.NewReader(objectValue), RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Get object
+	body, err := cs.creBucket.GetObject(key, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	defer body.Close()
+
+	data, err := ioutil.ReadAll(body)
+	c.Assert(err, IsNil)
+	c.Assert(string(data), Equals, objectValue)
+
+	// List object
+	lor, err := cs.creBucket.ListObjects(RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	c.Assert(len(lor.Objects), Equals, 1)
+
+	err = cs.creBucket.DeleteObject(key, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+}
+
+// TestPutObjectFromFile
+func (cs *OssCredentialBucketSuite) TestPutObjectFromFile(c *C) {
+	objectName := objectNamePrefix + randStr(8)
+	localFile := "../sample/BingWallpaper-2015-11-07.jpg"
+	newFile := randStr(8) + ".jpg"
+
+	// Put
+	err := cs.creBucket.PutObjectFromFile(objectName, localFile, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Check
+	err = cs.creBucket.GetObjectToFile(objectName, newFile, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	eq, err := compareFiles(localFile, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	meta, err := cs.creBucket.GetObjectDetailedMeta(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	c.Assert(meta.Get("Content-Type"), Equals, "image/jpeg")
+
+	acl, err := cs.creBucket.GetObjectACL(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("aclRes:", acl)
+	c.Assert(acl.ACL, Equals, "default")
+
+	err = cs.creBucket.DeleteObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Put with properties
+	options := []Option{
+		Expires(futureDate),
+		ObjectACL(ACLPublicRead),
+		Meta("myprop", "mypropval"),
+		RequestPayer(Requester),
+	}
+	err = cs.creBucket.PutObjectFromFile(objectName, localFile, options...)
+	c.Assert(err, IsNil)
+
+	// Check
+	err = cs.creBucket.GetObjectToFile(objectName, newFile, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	eq, err = compareFiles(localFile, newFile)
+	c.Assert(err, IsNil)
+	c.Assert(eq, Equals, true)
+
+	acl, err = cs.creBucket.GetObjectACL(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectACL:", acl)
+	c.Assert(acl.ACL, Equals, string(ACLPublicRead))
+
+	meta, err = cs.creBucket.GetObjectDetailedMeta(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta)
+	c.Assert(meta.Get("X-Oss-Meta-Myprop"), Equals, "mypropval")
+
+	err = cs.creBucket.DeleteObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+}
+
+// TestCopyObject
+func (cs *OssCredentialBucketSuite) TestCopyObject(c *C) {
+	objectName := objectNamePrefix + randStr(8)
+	objectValue := randStr(18)
+
+	err := cs.creBucket.PutObject(objectName, strings.NewReader(objectValue),
+		ACL(ACLPublicRead), Meta("my", "myprop"), RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Copy
+	var objectNameDest = objectName + "dest"
+	_, err = cs.creBucket.CopyObject(objectName, objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Check
+	lor, err := cs.creBucket.ListObjects(Prefix(objectName), RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("objects:", lor.Objects)
+	c.Assert(len(lor.Objects), Equals, 2)
+
+	body, err := cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err := readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	err = cs.creBucket.DeleteObject(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Copy with constraints x-oss-copy-source-if-modified-since
+	_, err = cs.creBucket.CopyObject(objectName, objectNameDest, CopySourceIfModifiedSince(futureDate), RequestPayer(Requester))
+	c.Assert(err, NotNil)
+	testLogger.Println("CopyObject:", err)
+
+	// Copy with constraints x-oss-copy-source-if-unmodified-since
+	_, err = cs.creBucket.CopyObject(objectName, objectNameDest, CopySourceIfUnmodifiedSince(futureDate), RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Check
+	lor, err = cs.creBucket.ListObjects(Prefix(objectName), RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("objects:", lor.Objects)
+	c.Assert(len(lor.Objects), Equals, 2)
+
+	body, err = cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err = readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	err = cs.creBucket.DeleteObject(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Copy with constraints x-oss-copy-source-if-match
+	meta, err := cs.creBucket.GetObjectDetailedMeta(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta)
+
+	_, err = cs.creBucket.CopyObject(objectName, objectNameDest, CopySourceIfMatch(meta.Get("Etag")), RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Check
+	body, err = cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err = readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	err = cs.creBucket.DeleteObject(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Copy with constraints x-oss-copy-source-if-none-match
+	_, err = cs.creBucket.CopyObject(objectName, objectNameDest, CopySourceIfNoneMatch(meta.Get("Etag")), RequestPayer(Requester))
+	c.Assert(err, NotNil)
+
+	// Copy with constraints x-oss-metadata-directive
+	_, err = cs.creBucket.CopyObject(objectName, objectNameDest, Meta("my", "mydestprop"),
+		MetadataDirective(MetaCopy), RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Check
+	body, err = cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err = readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	destMeta, err := cs.creBucket.GetObjectDetailedMeta(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	c.Assert(meta.Get("X-Oss-Meta-My"), Equals, "myprop")
+
+	acl, err := cs.creBucket.GetObjectACL(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	c.Assert(acl.ACL, Equals, "default")
+
+	err = cs.creBucket.DeleteObject(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Copy with constraints x-oss-metadata-directive and self defined dest object meta
+	options := []Option{
+		ObjectACL(ACLPublicReadWrite),
+		Meta("my", "mydestprop"),
+		MetadataDirective(MetaReplace),
+		RequestPayer(Requester),
+	}
+	_, err = cs.creBucket.CopyObject(objectName, objectNameDest, options...)
+	c.Assert(err, IsNil)
+
+	// Check
+	body, err = cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err = readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	destMeta, err = cs.creBucket.GetObjectDetailedMeta(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	c.Assert(destMeta.Get("X-Oss-Meta-My"), Equals, "mydestprop")
+
+	acl, err = cs.creBucket.GetObjectACL(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	c.Assert(acl.ACL, Equals, string(ACLPublicReadWrite))
+
+	err = cs.creBucket.DeleteObject(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	err = cs.creBucket.DeleteObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+}
+
+// TestCopyObjectToOrFrom
+func (cs *OssCredentialBucketSuite) TestCopyObjectToOrFrom(c *C) {
+	objectName := objectNamePrefix + randStr(8)
+	objectValue := randStr(18)
+	sorBucketName := credentialBucketName + "-sor"
+	objectNameDest := objectName + "-Dest"
+
+	err := cs.client.CreateBucket(sorBucketName)
+	c.Assert(err, IsNil)
+	// Set ACL_PUBLIC_R
+	err = cs.client.SetBucketACL(sorBucketName, ACLPublicRead)
+	c.Assert(err, IsNil)
+
+	sorBucket, err := cs.client.Bucket(sorBucketName)
+	c.Assert(err, IsNil)
+
+	err = sorBucket.PutObject(objectName, strings.NewReader(objectValue))
+	c.Assert(err, IsNil)
+
+	// Copy from
+	_, err = cs.creBucket.CopyObjectFrom(sorBucketName, objectName, objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Check
+	body, err := cs.creBucket.GetObject(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err := readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	err = cs.creBucket.DeleteObject(objectNameDest, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Copy to
+	_, err = sorBucket.CopyObjectTo(credentialBucketName, objectName, objectName)
+	c.Assert(err, IsNil)
+
+	// Check
+	body, err = cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err = readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	// Clean
+	err = sorBucket.DeleteObject(objectName)
+	c.Assert(err, IsNil)
+
+	err = cs.creBucket.DeleteObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	err = cs.client.DeleteBucket(sorBucketName)
+	c.Assert(err, IsNil)
+}
+
+// TestAppendObject
+func (cs *OssCredentialBucketSuite) TestAppendObject(c *C) {
+	objectName := objectNamePrefix + randStr(8)
+	objectValue1 := randStr(18)
+	objectValue2 := randStr(18)
+	objectValue := objectValue1 + objectValue2
+	var val = []byte(objectValue)
+	var localFile = randStr(8) + ".txt"
+	var nextPos int64
+	var midPos = 1 + rand.Intn(len(val)-1)
+
+	var err = createFileAndWrite(localFile+"1", val[0:midPos])
+	c.Assert(err, IsNil)
+	err = createFileAndWrite(localFile+"2", val[midPos:])
+	c.Assert(err, IsNil)
+
+	// String append
+	nextPos, err = cs.creBucket.AppendObject(objectName, strings.NewReader(objectValue1), nextPos, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	nextPos, err = cs.creBucket.AppendObject(objectName, strings.NewReader(objectValue2), nextPos, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	body, err := cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err := readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	err = cs.creBucket.DeleteObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// Byte append
+	nextPos = 0
+	nextPos, err = cs.creBucket.AppendObject(objectName, bytes.NewReader(val[0:midPos]), nextPos, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	nextPos, err = cs.creBucket.AppendObject(objectName, bytes.NewReader(val[midPos:]), nextPos, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	body, err = cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err = readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	err = cs.creBucket.DeleteObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+
+	// File append
+	options := []Option{
+		ObjectACL(ACLPublicReadWrite),
+		Meta("my", "myprop"),
+		RequestPayer(Requester),
+	}
+
+	fd, err := os.Open(localFile + "1")
+	c.Assert(err, IsNil)
+	defer fd.Close()
+	nextPos = 0
+	nextPos, err = cs.creBucket.AppendObject(objectName, fd, nextPos, options...)
+	c.Assert(err, IsNil)
+
+	meta, err := cs.creBucket.GetObjectDetailedMeta(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta:", meta, ",", nextPos)
+	c.Assert(meta.Get("X-Oss-Object-Type"), Equals, "Appendable")
+	c.Assert(meta.Get("X-Oss-Meta-My"), Equals, "myprop")
+	c.Assert(meta.Get("x-oss-Meta-Mine"), Equals, "")
+	c.Assert(meta.Get("X-Oss-Next-Append-Position"), Equals, strconv.FormatInt(nextPos, 10))
+
+	acl, err := cs.creBucket.GetObjectACL(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectACL:", acl)
+	c.Assert(acl.ACL, Equals, string(ACLPublicReadWrite))
+
+	// Second append
+	options = []Option{
+		ObjectACL(ACLPublicRead),
+		Meta("my", "myproptwo"),
+		Meta("mine", "mypropmine"),
+		RequestPayer(Requester),
+	}
+	fd, err = os.Open(localFile + "2")
+	c.Assert(err, IsNil)
+	defer fd.Close()
+	nextPos, err = cs.creBucket.AppendObject(objectName, fd, nextPos, options...)
+	c.Assert(err, IsNil)
+
+	body, err = cs.creBucket.GetObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	str, err = readBody(body)
+	c.Assert(err, IsNil)
+	c.Assert(str, Equals, objectValue)
+
+	meta, err = cs.creBucket.GetObjectDetailedMeta(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	testLogger.Println("GetObjectDetailedMeta xxx:", meta)
+	c.Assert(meta.Get("X-Oss-Object-Type"), Equals, "Appendable")
+	c.Assert(meta.Get("X-Oss-Meta-My"), Equals, "myprop")
+	c.Assert(meta.Get("x-Oss-Meta-Mine"), Equals, "")
+	c.Assert(meta.Get("X-Oss-Next-Append-Position"), Equals, strconv.FormatInt(nextPos, 10))
+
+	acl, err = cs.creBucket.GetObjectACL(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+	c.Assert(acl.ACL, Equals, string(ACLPublicRead))
+
+	err = cs.creBucket.DeleteObject(objectName, RequestPayer(Requester))
+	c.Assert(err, IsNil)
+}

+ 179 - 0
oss/bucket_test.go

@@ -4476,3 +4476,182 @@ func (s *OssBucketSuite) TestOptionsMethod(c *C) {
 	bucket.DeleteObject(objectName)
 	forceDeleteBucket(client, bucketName, c)
 }
+
+func (s *OssBucketSuite) TestBucketTrafficLimitObject(c *C) {
+	// create a bucket with default proprety
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	bucketName := bucketNamePrefix + randLowStr(6)
+	err = client.CreateBucket(bucketName)
+	c.Assert(err, IsNil)
+
+	bucket, err := client.Bucket(bucketName)
+
+	var respHeader http.Header
+	var qosDelayTime string
+	var traffic int64 = 819220 // 100KB
+	maxTraffic := traffic * 120 / 100
+
+	objectName := objectNamePrefix + randStr(8)
+	localFile := "../sample/BingWallpaper-2015-11-07.jpg"
+	fd, err := os.Open(localFile)
+	c.Assert(err, IsNil)
+	defer fd.Close()
+
+	tryGetFileSize := func(f *os.File) int64 {
+		fInfo, _ := f.Stat()
+		return fInfo.Size()
+	}
+	contentLength := tryGetFileSize(fd) * 8
+
+	// put object
+	start := time.Now().UnixNano() / 1000 / 1000
+	err = bucket.PutObject(objectName, fd, TrafficLimitHeader(traffic), GetResponseHeader(&respHeader))
+	c.Assert(err, IsNil)
+	endingTime := time.Now().UnixNano() / 1000 / 1000
+	costT := endingTime - start
+	costV := contentLength * 1000 / costT // bit * 1000 / Millisecond = bit/s
+	c.Assert((costV < maxTraffic), Equals, true)
+	qosDelayTime = GetQosDelayTime(respHeader)
+	c.Assert(len(qosDelayTime) > 0, Equals, true)
+
+	// putobject without TrafficLimit
+	//
+	// fd, err = os.Open(localFile)
+	// c.Assert(err, IsNil)
+	// defer fd.Close()
+	// start = time.Now().UnixNano() / 1000 / 1000
+	// err = bucket.PutObject(objectName, fd)
+	// c.Assert(err, IsNil)
+	// endingTime = time.Now().UnixNano() / 1000 / 1000
+	// costT = endingTime - start
+	// costV = contentLength * 1000 / costT  // bit * 1000 / Millisecond = bit/s
+	// testLogger.Println(traffic, maxTraffic, contentLength, costT, costV)
+	// c.Assert((costV < maxTraffic), Equals, true)
+
+	// get object to file
+	newFile := "test-file-" + randStr(10)
+	start = time.Now().UnixNano() / 1000 / 1000
+	err = bucket.GetObjectToFile(objectName, newFile, TrafficLimitHeader(traffic))
+	c.Assert(err, IsNil)
+	endingTime = time.Now().UnixNano() / 1000 / 1000
+	costT = endingTime - start
+	costV = contentLength * 1000 / costT // bit * 1000 / Millisecond = bit/s
+	c.Assert((costV < maxTraffic), Equals, true)
+	os.Remove(newFile)
+
+	// append object
+	newFile = "test-file-" + randStr(10)
+	objectKey := objectNamePrefix + randStr(8)
+	var nextPos int64
+	fd, err = os.Open(localFile)
+	c.Assert(err, IsNil)
+	defer fd.Close()
+	start = time.Now().UnixNano() / 1000 / 1000
+	nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(randStr(18)), nextPos)
+	c.Assert(err, IsNil)
+
+	var respAppendHeader http.Header
+	nextPos, err = bucket.AppendObject(objectKey, fd, nextPos, TrafficLimitHeader(traffic), GetResponseHeader(&respAppendHeader))
+	c.Assert(err, IsNil)
+	endingTime = time.Now().UnixNano() / 1000 / 1000
+	costT = endingTime - start
+	costV = contentLength * 1000 / costT // bit * 1000 / Millisecond = bit/s
+	c.Assert((costV < maxTraffic), Equals, true)
+	qosDelayTime = GetQosDelayTime(respAppendHeader)
+	c.Assert(len(qosDelayTime) > 0, Equals, true)
+
+	err = bucket.GetObjectToFile(objectKey, newFile, TrafficLimitHeader(traffic))
+	c.Assert(err, IsNil)
+	err = bucket.DeleteObject(objectKey)
+	c.Assert(err, IsNil)
+	os.Remove(newFile)
+
+	// put object with url
+	fd, err = os.Open(localFile)
+	c.Assert(err, IsNil)
+	defer fd.Close()
+	strURL, err := bucket.SignURL(objectName, HTTPPut, 60, TrafficLimitParam(traffic))
+	start = time.Now().UnixNano() / 1000 / 1000
+	err = bucket.PutObjectWithURL(strURL, fd)
+	c.Assert(err, IsNil)
+	endingTime = time.Now().UnixNano() / 1000 / 1000
+	costT = endingTime - start
+	costV = contentLength * 1000 / costT // bit * 1000 / Millisecond = bit/s
+	c.Assert((costV < maxTraffic), Equals, true)
+
+	// get object with url
+	newFile = "test-file-" + randStr(10)
+	strURL, err = bucket.SignURL(objectName, HTTPGet, 60, TrafficLimitParam(traffic))
+	c.Assert(err, IsNil)
+	start = time.Now().UnixNano() / 1000 / 1000
+	err = bucket.GetObjectToFileWithURL(strURL, newFile)
+	c.Assert(err, IsNil)
+	endingTime = time.Now().UnixNano() / 1000 / 1000
+	costT = endingTime - start
+	costV = contentLength * 1000 / costT // bit * 1000 / Millisecond = bit/s
+	c.Assert((costV < maxTraffic), Equals, true)
+	os.Remove(newFile)
+
+	// copy object
+	destObjectName := objectNamePrefix + randStr(8)
+	_, err = bucket.CopyObject(objectName, destObjectName, TrafficLimitHeader(traffic))
+	c.Assert(err, IsNil)
+	err = bucket.DeleteObject(destObjectName)
+	c.Assert(err, IsNil)
+
+	forceDeleteBucket(client, bucketName, c)
+}
+
+func (s *OssBucketSuite) TestBucketTrafficLimitUpload(c *C) {
+	// create a bucket with default proprety
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	bucketName := bucketNamePrefix + randLowStr(6)
+	err = client.CreateBucket(bucketName)
+	c.Assert(err, IsNil)
+
+	bucket, err := client.Bucket(bucketName)
+
+	var traffic int64 = 819220 // 100KB
+	maxTraffic := traffic * 120 / 100
+	contentLength := 500 * 1024
+
+	var fileName = "test-file-" + randStr(8)
+	objectName := objectNamePrefix + randStr(8)
+	content := randStr(contentLength)
+	createFile(fileName, content, c)
+
+	chunks, err := SplitFileByPartNum(fileName, 3)
+	c.Assert(err, IsNil)
+
+	options := []Option{
+		Expires(futureDate), Meta("my", "myprop"),
+	}
+
+	fd, err := os.Open(fileName)
+	c.Assert(err, IsNil)
+	defer fd.Close()
+
+	imur, err := bucket.InitiateMultipartUpload(objectName, options...)
+	c.Assert(err, IsNil)
+	var parts []UploadPart
+	start := time.Now().UnixNano() / 1000 / 1000
+	for _, chunk := range chunks {
+		fd.Seek(chunk.Offset, os.SEEK_SET)
+		part, err := bucket.UploadPart(imur, fd, chunk.Size, chunk.Number, TrafficLimitHeader(traffic))
+		c.Assert(err, IsNil)
+		parts = append(parts, part)
+	}
+	_, err = bucket.CompleteMultipartUpload(imur, parts)
+	c.Assert(err, IsNil)
+	endingTime := time.Now().UnixNano() / 1000 / 1000
+	costT := endingTime - start
+	costV := int64(contentLength) * 8 * 1000 / costT // B * 8 * 1000 / Millisecond = bit/s
+	c.Assert((costV < maxTraffic), Equals, true)
+	os.Remove(fileName)
+
+	forceDeleteBucket(client, bucketName, c)
+}

+ 195 - 119
oss/client.go

@@ -113,15 +113,7 @@ func (client Client) CreateBucket(bucketName string, options ...Option) error {
 	}
 
 	params := map[string]interface{}{}
-	resp, err := client.do("PUT", bucketName, params, headers, buffer)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
-
+	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
 	if err != nil {
 		return err
 	}
@@ -148,15 +140,7 @@ func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
 		return out, err
 	}
 
-	resp, err := client.do("GET", "", params, nil, nil)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
-
+	resp, err := client.do("GET", "", params, nil, nil, options...)
 	if err != nil {
 		return out, err
 	}
@@ -558,14 +542,7 @@ func (client Client) SetBucketWebsiteDetail(bucketName string, wxml WebsiteXML,
 
 	params := map[string]interface{}{}
 	params["website"] = nil
-	resp, err := client.do("PUT", bucketName, params, headers, buffer)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
+	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
 	if err != nil {
 		return err
 	}
@@ -744,14 +721,7 @@ func (client Client) SetBucketVersioning(bucketName string, versioningConfig Ver
 
 	params := map[string]interface{}{}
 	params["versioning"] = nil
-	resp, err := client.do("PUT", bucketName, params, headers, buffer)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
+	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
 
 	if err != nil {
 		return err
@@ -767,14 +737,7 @@ func (client Client) GetBucketVersioning(bucketName string, options ...Option) (
 	var out GetBucketVersioningResult
 	params := map[string]interface{}{}
 	params["versioning"] = nil
-	resp, err := client.do("GET", bucketName, params, nil, nil)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
+	resp, err := client.do("GET", bucketName, params, nil, nil, options...)
 
 	if err != nil {
 		return out, err
@@ -806,14 +769,7 @@ func (client Client) SetBucketEncryption(bucketName string, encryptionRule Serve
 
 	params := map[string]interface{}{}
 	params["encryption"] = nil
-	resp, err := client.do("PUT", bucketName, params, headers, buffer)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
+	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
 
 	if err != nil {
 		return err
@@ -829,14 +785,7 @@ func (client Client) GetBucketEncryption(bucketName string, options ...Option) (
 	var out GetBucketEncryptionResult
 	params := map[string]interface{}{}
 	params["encryption"] = nil
-	resp, err := client.do("GET", bucketName, params, nil, nil)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
+	resp, err := client.do("GET", bucketName, params, nil, nil, options...)
 
 	if err != nil {
 		return out, err
@@ -853,14 +802,7 @@ func (client Client) GetBucketEncryption(bucketName string, options ...Option) (
 func (client Client) DeleteBucketEncryption(bucketName string, options ...Option) error {
 	params := map[string]interface{}{}
 	params["encryption"] = nil
-	resp, err := client.do("DELETE", bucketName, params, nil, nil)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
+	resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
 
 	if err != nil {
 		return err
@@ -892,15 +834,7 @@ func (client Client) SetBucketTagging(bucketName string, tagging Tagging, option
 
 	params := map[string]interface{}{}
 	params["tagging"] = nil
-	resp, err := client.do("PUT", bucketName, params, headers, buffer)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
-
+	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
 	if err != nil {
 		return err
 	}
@@ -915,15 +849,7 @@ func (client Client) GetBucketTagging(bucketName string, options ...Option) (Get
 	var out GetBucketTaggingResult
 	params := map[string]interface{}{}
 	params["tagging"] = nil
-	resp, err := client.do("GET", bucketName, params, nil, nil)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
-
+	resp, err := client.do("GET", bucketName, params, nil, nil, options...)
 	if err != nil {
 		return out, err
 	}
@@ -941,15 +867,7 @@ func (client Client) GetBucketTagging(bucketName string, options ...Option) (Get
 func (client Client) DeleteBucketTagging(bucketName string, options ...Option) error {
 	params := map[string]interface{}{}
 	params["tagging"] = nil
-	resp, err := client.do("DELETE", bucketName, params, nil, nil)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
-
+	resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
 	if err != nil {
 		return err
 	}
@@ -988,15 +906,7 @@ func (client Client) GetBucketPolicy(bucketName string, options ...Option) (stri
 	params := map[string]interface{}{}
 	params["policy"] = nil
 
-	resp, err := client.do("GET", bucketName, params, nil, nil)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
-
+	resp, err := client.do("GET", bucketName, params, nil, nil, options...)
 	if err != nil {
 		return "", err
 	}
@@ -1024,15 +934,7 @@ func (client Client) SetBucketPolicy(bucketName string, policy string, options .
 
 	buffer := strings.NewReader(policy)
 
-	resp, err := client.do("PUT", bucketName, params, nil, buffer)
-
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
-	}
-
+	resp, err := client.do("PUT", bucketName, params, nil, buffer, options...)
 	if err != nil {
 		return err
 	}
@@ -1052,20 +954,177 @@ func (client Client) SetBucketPolicy(bucketName string, policy string, options .
 func (client Client) DeleteBucketPolicy(bucketName string, options ...Option) error {
 	params := map[string]interface{}{}
 	params["policy"] = nil
-	resp, err := client.do("DELETE", bucketName, params, nil, nil)
+	resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+	if err != nil {
+		return err
+	}
 
-	// get response header
-	respHeader, _ := findOption(options, responseHeader, nil)
-	if respHeader != nil {
-		pRespHeader := respHeader.(*http.Header)
-		*pRespHeader = resp.Headers
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
+}
+
+// SetBucketRequestPayment API operation for Object Storage Service.
+//
+// Set the requestPayment of bucket
+//
+// bucketName the bucket name.
+//
+// paymentConfig the payment configuration
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketRequestPayment(bucketName string, paymentConfig RequestPaymentConfiguration, options ...Option) error {
+	params := map[string]interface{}{}
+	params["requestPayment"] = nil
+
+	var bs []byte
+	bs, err := xml.Marshal(paymentConfig)
+
+	if err != nil {
+		return err
 	}
 
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	contentType := http.DetectContentType(buffer.Bytes())
+	headers := map[string]string{}
+	headers[HTTPHeaderContentType] = contentType
+
+	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
 	if err != nil {
 		return err
 	}
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
 
+// GetBucketRequestPayment API operation for Object Storage Service.
+//
+// Get bucket requestPayment
+//
+// bucketName the bucket name.
+//
+// RequestPaymentConfiguration the payment configuration
+//
+// error it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketRequestPayment(bucketName string, options ...Option) (RequestPaymentConfiguration, error) {
+	var out RequestPaymentConfiguration
+	params := map[string]interface{}{}
+	params["requestPayment"] = nil
+
+	resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+// GetUserQoSInfo API operation for Object Storage Service.
+//
+// Get user qos.
+//
+// UserQoSConfiguration the User Qos and range Information.
+//
+// error    it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetUserQoSInfo(options ...Option) (UserQoSConfiguration, error) {
+	var out UserQoSConfiguration
+	params := map[string]interface{}{}
+	params["qosInfo"] = nil
+
+	resp, err := client.do("GET", "", params, nil, nil, options...)
+	if err != nil {
+		return out, err
+	}
 	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+// SetBucketQoSInfo API operation for Object Storage Service.
+//
+// Set Bucket Qos information.
+//
+// bucketName tht bucket name.
+//
+// qosConf the qos configuration.
+//
+// error    it's nil if no error, otherwise it's an error object.
+//
+func (client Client) SetBucketQoSInfo(bucketName string, qosConf BucketQoSConfiguration, options ...Option) error {
+	params := map[string]interface{}{}
+	params["qosInfo"] = nil
+
+	var bs []byte
+	bs, err := xml.Marshal(qosConf)
+	if err != nil {
+		return err
+	}
+	buffer := new(bytes.Buffer)
+	buffer.Write(bs)
+
+	contentTpye := http.DetectContentType(buffer.Bytes())
+	headers := map[string]string{}
+	headers[HTTPHeaderContentType] = contentTpye
+
+	resp, err := client.do("PUT", bucketName, params, headers, buffer, options...)
+	if err != nil {
+		return err
+	}
+
+	defer resp.Body.Close()
+	return checkRespCode(resp.StatusCode, []int{http.StatusOK})
+}
+
+// GetBucketQosInfo API operation for Object Storage Service.
+//
+// Get Bucket Qos information.
+//
+// bucketName tht bucket name.
+//
+// BucketQoSConfiguration the  return qos configuration.
+//
+// error    it's nil if no error, otherwise it's an error object.
+//
+func (client Client) GetBucketQosInfo(bucketName string, options ...Option) (BucketQoSConfiguration, error) {
+	var out BucketQoSConfiguration
+	params := map[string]interface{}{}
+	params["qosInfo"] = nil
+
+	resp, err := client.do("GET", bucketName, params, nil, nil, options...)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = xmlUnmarshal(resp.Body, &out)
+	return out, err
+}
+
+// DeleteBucketQosInfo API operation for Object Storage Service.
+//
+// Delete Bucket QoS information.
+//
+// bucketName tht bucket name.
+//
+// error    it's nil if no error, otherwise it's an error object.
+//
+func (client Client) DeleteBucketQosInfo(bucketName string, options ...Option) error {
+	params := map[string]interface{}{}
+	params["qosInfo"] = nil
+
+	resp, err := client.do("DELETE", bucketName, params, nil, nil, options...)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
 	return checkRespCode(resp.StatusCode, []int{http.StatusNoContent})
 }
 
@@ -1216,9 +1275,26 @@ func SetLogger(Logger *log.Logger) ClientOption {
 	}
 }
 
+// SetAKInterface sets funciton for get the user's ak
+//
+func SetCredentialsProvider(provider CredentialsProvider) ClientOption {
+	return func(client *Client) {
+		client.Config.CredentialsProvider = provider
+	}
+}
+
 // Private
 func (client Client) do(method, bucketName string, params map[string]interface{},
-	headers map[string]string, data io.Reader) (*Response, error) {
-	return client.Conn.Do(method, bucketName, "", params,
+	headers map[string]string, data io.Reader, options ...Option) (*Response, error) {
+	resp, err := client.Conn.Do(method, bucketName, "", params,
 		headers, data, 0, nil)
+
+	// get response header
+	respHeader, _ := findOption(options, responseHeader, nil)
+	if respHeader != nil {
+		pRespHeader := respHeader.(*http.Header)
+		*pRespHeader = resp.Headers
+	}
+
+	return resp, err
 }

+ 210 - 4
oss/client_test.go

@@ -5,11 +5,13 @@
 package oss
 
 import (
+	"encoding/json"
 	"io/ioutil"
 	"log"
 	"math/rand"
 	"net/http"
 	"os"
+	"reflect"
 	"runtime"
 	"strconv"
 	"strings"
@@ -43,6 +45,11 @@ var (
 	stsaccessID  = os.Getenv("OSS_TEST_STS_ID")
 	stsaccessKey = os.Getenv("OSS_TEST_STS_KEY")
 	stsARN       = os.Getenv("OSS_TEST_STS_ARN")
+
+	// Credential
+	credentialAccessID  = os.Getenv("OSS_CREDENTIAL_KEY_ID")
+	credentialAccessKey = os.Getenv("OSS_CREDENTIAL_KEY_SECRET")
+	credentialUID       = os.Getenv("OSS_CREDENTIAL_UID")
 )
 
 var (
@@ -55,6 +62,8 @@ var (
 	objectNamePrefix = "go-sdk-test-object-"
 	// sts region is one and only hangzhou
 	stsRegion = "cn-hangzhou"
+	// Credentials
+	credentialBucketName = bucketNamePrefix + randLowStr(6)
 )
 
 var (
@@ -2441,14 +2450,14 @@ func (s *OssBucketSuite) TestGetBucketVersioning(c *C) {
 	forceDeleteBucket(client, bucketName, c)
 }
 
-func (s *OssClientSuite) TestBucketPolicy(c *C){
+func (s *OssClientSuite) TestBucketPolicy(c *C) {
 	client, err := New(endpoint, accessID, accessKey)
 	c.Assert(err, IsNil)
 
 	bucketName := bucketNamePrefix + randLowStr(5)
 	err = client.CreateBucket(bucketName)
 	c.Assert(err, IsNil)
-	
+
 	var responseHeader http.Header
 	ret, err := client.GetBucketPolicy(bucketName, GetResponseHeader(&responseHeader))
 	c.Assert(err, NotNil)
@@ -2522,7 +2531,7 @@ func (s *OssClientSuite) TestBucketPolicyNegative(c *C) {
 	}`
 	err = client.SetBucketPolicy(bucketName, errPolicy, GetResponseHeader(&responseHeader))
 	c.Assert(err, NotNil)
-	testLogger.Println("err:",err)
+	testLogger.Println("err:", err)
 	requestId = GetRequestId(responseHeader)
 	c.Assert(len(requestId) > 0, Equals, true)
 
@@ -2531,7 +2540,7 @@ func (s *OssClientSuite) TestBucketPolicyNegative(c *C) {
 
 	bucketNameEmpty := bucketNamePrefix + randLowStr(5)
 	client.DeleteBucket(bucketNameEmpty)
-	
+
 	err = client.DeleteBucketPolicy(bucketNameEmpty, GetResponseHeader(&responseHeader))
 	c.Assert(err, NotNil)
 	requestId = GetRequestId(responseHeader)
@@ -2539,3 +2548,200 @@ func (s *OssClientSuite) TestBucketPolicyNegative(c *C) {
 
 	client.DeleteBucket(bucketName)
 }
+
+func (s *OssClientSuite) TestSetBucketRequestPayment(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	bucketName := bucketNamePrefix + randLowStr(5)
+	err = client.CreateBucket(bucketName)
+	c.Assert(err, IsNil)
+
+	reqPayConf := RequestPaymentConfiguration{
+		Payer: "Requester",
+	}
+	err = client.SetBucketRequestPayment(bucketName, reqPayConf)
+	c.Assert(err, IsNil)
+
+	ret, err := client.GetBucketRequestPayment(bucketName)
+	c.Assert(err, IsNil)
+	c.Assert(ret.Payer, Equals, "Requester")
+
+	client.DeleteBucket(bucketName)
+	c.Assert(err, IsNil)
+}
+
+func (s *OssClientSuite) TestSetBucketRequestPaymentNegative(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	bucketName := bucketNamePrefix + randLowStr(5)
+	err = client.CreateBucket(bucketName)
+	c.Assert(err, IsNil)
+
+	reqPayConf := RequestPaymentConfiguration{
+		Payer: "Requesterttttt", // this is a error configuration
+	}
+	err = client.SetBucketRequestPayment(bucketName, reqPayConf)
+	c.Assert(err, NotNil)
+
+	ret, err := client.GetBucketRequestPayment(bucketName)
+	c.Assert(err, IsNil)
+	c.Assert(ret.Payer, Equals, "BucketOwner")
+
+	client.DeleteBucket(bucketName)
+	c.Assert(err, IsNil)
+}
+
+func (s *OssClientSuite) TestBucketQos(c *C) {
+	client, err := New(endpoint, accessID, accessKey)
+	c.Assert(err, IsNil)
+
+	ret, err := client.GetUserQoSInfo()
+	c.Assert(err, IsNil)
+	testLogger.Println("QosInfo:", ret)
+
+	bucketName := bucketNamePrefix + randLowStr(5)
+	_ = client.DeleteBucket(bucketName)
+
+	err = client.CreateBucket(bucketName)
+	c.Assert(err, IsNil)
+
+	_, err = client.GetBucketQosInfo(bucketName)
+	c.Assert(err, NotNil)
+
+	// case 1 set BucketQoSConfiguration every member
+	five := 5
+	four := 4
+	totalQps := 200
+	qosConf := BucketQoSConfiguration{
+		TotalUploadBandwidth:      &five,
+		IntranetUploadBandwidth:   &four,
+		ExtranetUploadBandwidth:   &four,
+		TotalDownloadBandwidth:    &four,
+		IntranetDownloadBandwidth: &four,
+		ExtranetDownloadBandwidth: &four,
+		TotalQPS:                  &totalQps,
+		IntranetQPS:               &totalQps,
+		ExtranetQPS:               &totalQps,
+	}
+	var responseHeader http.Header
+	err = client.SetBucketQoSInfo(bucketName, qosConf, GetResponseHeader(&responseHeader))
+	c.Assert(err, IsNil)
+	requestId := GetRequestId(responseHeader)
+	c.Assert(len(requestId) > 0, Equals, true)
+
+	// wait a moment for configuration effect
+	time.Sleep(time.Second)
+
+	retQos, err := client.GetBucketQosInfo(bucketName)
+	c.Assert(err, IsNil)
+
+	// set qosConf default value
+	qosConf.XMLName.Local = "QoSConfiguration"
+	c.Assert(struct2string(retQos, c), Equals, struct2string(qosConf, c))
+
+	// case 2 set BucketQoSConfiguration not every member
+	qosConfNo := BucketQoSConfiguration{
+		TotalUploadBandwidth:      &five,
+		IntranetUploadBandwidth:   &four,
+		ExtranetUploadBandwidth:   &four,
+		TotalDownloadBandwidth:    &four,
+		IntranetDownloadBandwidth: &four,
+		ExtranetDownloadBandwidth: &four,
+		TotalQPS:                  &totalQps,
+	}
+	err = client.SetBucketQoSInfo(bucketName, qosConfNo)
+	c.Assert(err, IsNil)
+
+	// wait a moment for configuration effect
+	time.Sleep(time.Second)
+
+	retQos, err = client.GetBucketQosInfo(bucketName)
+	c.Assert(err, IsNil)
+
+	// set qosConfNo default value
+	qosConfNo.XMLName.Local = "QoSConfiguration"
+	defNum := -1
+	qosConfNo.IntranetQPS = &defNum
+	qosConfNo.ExtranetQPS = &defNum
+	c.Assert(struct2string(retQos, c), Equals, struct2string(qosConfNo, c))
+
+	err = client.DeleteBucketQosInfo(bucketName)
+	c.Assert(err, IsNil)
+
+	// wait a moment for configuration effect
+	time.Sleep(time.Second)
+
+	_, err = client.GetBucketQosInfo(bucketName)
+	c.Assert(err, NotNil)
+
+	// this is a error qos configuration
+	to := *ret.TotalUploadBandwidth + 2
+	qosErrConf := BucketQoSConfiguration{
+		TotalUploadBandwidth:      &to, // this exceed user TotalUploadBandwidth
+		IntranetUploadBandwidth:   &four,
+		ExtranetUploadBandwidth:   &four,
+		TotalDownloadBandwidth:    &four,
+		IntranetDownloadBandwidth: &four,
+		ExtranetDownloadBandwidth: &four,
+		TotalQPS:                  &totalQps,
+		IntranetQPS:               &totalQps,
+		ExtranetQPS:               &totalQps,
+	}
+	err = client.SetBucketQoSInfo(bucketName, qosErrConf)
+	c.Assert(err, NotNil)
+
+	err = client.DeleteBucketQosInfo(bucketName)
+	c.Assert(err, IsNil)
+
+	err = client.DeleteBucket(bucketName)
+	c.Assert(err, IsNil)
+}
+
+// struct to string
+func struct2string(obj interface{}, c *C) string {
+	t := reflect.TypeOf(obj)
+	v := reflect.ValueOf(obj)
+
+	var data = make(map[string]interface{})
+	for i := 0; i < t.NumField(); i++ {
+		data[t.Field(i).Name] = v.Field(i).Interface()
+	}
+	str, err := json.Marshal(data)
+	c.Assert(err, IsNil)
+	return string(str)
+}
+
+type TestCredentials struct {
+}
+
+func (testCreInf *TestCredentials) GetAccessKeyID() string {
+	return os.Getenv("OSS_TEST_ACCESS_KEY_ID")
+}
+
+func (testCreInf *TestCredentials) GetAccessKeySecret() string {
+	return os.Getenv("OSS_TEST_ACCESS_KEY_SECRET")
+}
+
+func (testCreInf *TestCredentials) GetSecurityToken() string {
+	return ""
+}
+
+type TestCredentialsProvider struct {
+}
+
+func (testInfBuild *TestCredentialsProvider) GetCredentials() Credentials {
+	return &TestCredentials{}
+}
+
+func (s *OssClientSuite) TestClientCredentialInfBuild(c *C) {
+	var bucketNameTest = bucketNamePrefix + randLowStr(6)
+	var defaultBuild TestCredentialsProvider
+	client, err := New(endpoint, "", "", SetCredentialsProvider(&defaultBuild))
+	c.Assert(err, IsNil)
+	err = client.CreateBucket(bucketNameTest)
+	c.Assert(err, IsNil)
+	err = client.DeleteBucket(bucketNameTest)
+	c.Assert(err, IsNil)
+}

+ 68 - 23
oss/conf.go

@@ -35,31 +35,68 @@ type HTTPMaxConns struct {
 	MaxIdleConnsPerHost int
 }
 
+// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken
+type Credentials interface {
+	GetAccessKeyID() string
+	GetAccessKeySecret() string
+	GetSecurityToken() string
+}
+
+// CredentialInfBuild is interface for get CredentialInf
+type CredentialsProvider interface {
+	GetCredentials() Credentials
+}
+
+type defaultCredentials struct {
+	config *Config
+}
+
+func (defCre *defaultCredentials) GetAccessKeyID() string {
+	return defCre.config.AccessKeyID
+}
+
+func (defCre *defaultCredentials) GetAccessKeySecret() string {
+	return defCre.config.AccessKeySecret
+}
+
+func (defCre *defaultCredentials) GetSecurityToken() string {
+	return defCre.config.SecurityToken
+}
+
+type defaultCredentialsProvider struct {
+	config *Config
+}
+
+func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials {
+	return &defaultCredentials{config: defBuild.config}
+}
+
 // Config defines oss configuration
 type Config struct {
-	Endpoint         string       // OSS endpoint
-	AccessKeyID      string       // AccessId
-	AccessKeySecret  string       // AccessKey
-	RetryTimes       uint         // Retry count by default it's 5.
-	UserAgent        string       // SDK name/version/system information
-	IsDebug          bool         // Enable debug mode. Default is false.
-	Timeout          uint         // Timeout in seconds. By default it's 60.
-	SecurityToken    string       // STS Token
-	IsCname          bool         // If cname is in the endpoint.
-	HTTPTimeout      HTTPTimeout  // HTTP timeout
-	HTTPMaxConns     HTTPMaxConns // Http max connections
-	IsUseProxy       bool         // Flag of using proxy.
-	ProxyHost        string       // Flag of using proxy host.
-	IsAuthProxy      bool         // Flag of needing authentication.
-	ProxyUser        string       // Proxy user
-	ProxyPassword    string       // Proxy password
-	IsEnableMD5      bool         // Flag of enabling MD5 for upload.
-	MD5Threshold     int64        // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
-	IsEnableCRC      bool         // Flag of enabling CRC for upload.
-	LogLevel         int          // Log level
-	Logger           *log.Logger  // For write log
-	UploadLimitSpeed int          // Upload limit speed:KB/s, 0 is unlimited
-	UploadLimiter    *OssLimiter  // Bandwidth limit reader for upload
+	Endpoint            string              // OSS endpoint
+	AccessKeyID         string              // AccessId
+	AccessKeySecret     string              // AccessKey
+	RetryTimes          uint                // Retry count by default it's 5.
+	UserAgent           string              // SDK name/version/system information
+	IsDebug             bool                // Enable debug mode. Default is false.
+	Timeout             uint                // Timeout in seconds. By default it's 60.
+	SecurityToken       string              // STS Token
+	IsCname             bool                // If cname is in the endpoint.
+	HTTPTimeout         HTTPTimeout         // HTTP timeout
+	HTTPMaxConns        HTTPMaxConns        // Http max connections
+	IsUseProxy          bool                // Flag of using proxy.
+	ProxyHost           string              // Flag of using proxy host.
+	IsAuthProxy         bool                // Flag of needing authentication.
+	ProxyUser           string              // Proxy user
+	ProxyPassword       string              // Proxy password
+	IsEnableMD5         bool                // Flag of enabling MD5 for upload.
+	MD5Threshold        int64               // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
+	IsEnableCRC         bool                // Flag of enabling CRC for upload.
+	LogLevel            int                 // Log level
+	Logger              *log.Logger         // For write log
+	UploadLimitSpeed    int                 // Upload limit speed:KB/s, 0 is unlimited
+	UploadLimiter       *OssLimiter         // Bandwidth limit reader for upload
+	CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken
 }
 
 // LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0
@@ -92,6 +129,11 @@ func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
 	config.Logger.Printf("%s", logBuffer.String())
 }
 
+// for get Credentials
+func (config *Config) GetCredentials() Credentials {
+	return config.CredentialsProvider.GetCredentials()
+}
+
 // getDefaultOssConfig gets the default configuration.
 func getDefaultOssConfig() *Config {
 	config := Config{}
@@ -127,5 +169,8 @@ func getDefaultOssConfig() *Config {
 	config.LogLevel = LogOff
 	config.Logger = log.New(os.Stdout, "", log.LstdFlags)
 
+	provider := &defaultCredentialsProvider{config: &config}
+	config.CredentialsProvider = provider
+
 	return &config
 }

+ 24 - 20
oss/conn.go

@@ -36,13 +36,13 @@ var signKeyList = []string{"acl", "uploads", "location", "cors",
 	"replicationLocation", "cname", "bucketInfo",
 	"comp", "qos", "live", "status", "vod",
 	"startTime", "endTime", "symlink",
-	"x-oss-process", "response-content-type",
+	"x-oss-process", "response-content-type", "x-oss-traffic-limit",
 	"response-content-language", "response-expires",
 	"response-cache-control", "response-content-disposition",
 	"response-content-encoding", "udf", "udfName", "udfImage",
 	"udfId", "udfImageDesc", "udfApplication", "comp",
-	"udfApplicationLog", "restore", "callback", "callback-var",
-	"policy", "stat", "encryption", "versions", "versioning", "versionId"}
+	"udfApplicationLog", "restore", "callback", "callback-var", "qosInfo",
+	"policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment"}
 
 // init initializes Conn
 func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error {
@@ -123,7 +123,7 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 	}
 
 	// Transfer started
-	event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
+	event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0)
 	publishProgress(listener, event)
 
 	if conn.config.LogLevel >= Debug {
@@ -133,7 +133,7 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 	resp, err := conn.client.Do(req)
 	if err != nil {
 		// Transfer failed
-		event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
+		event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0)
 		publishProgress(listener, event)
 		return nil, err
 	}
@@ -144,7 +144,7 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 	}
 
 	// Transfer completed
-	event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
+	event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0)
 	publishProgress(listener, event)
 
 	return conn.handleResponse(resp, crc)
@@ -239,8 +239,10 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 	req.Header.Set(HTTPHeaderDate, date)
 	req.Header.Set(HTTPHeaderHost, conn.config.Endpoint)
 	req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent)
-	if conn.config.SecurityToken != "" {
-		req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken)
+
+	akIf := conn.config.GetCredentials()
+	if akIf.GetSecurityToken() != "" {
+		req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken())
 	}
 
 	if headers != nil {
@@ -252,7 +254,7 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 	conn.signHeader(req, canonicalizedResource)
 
 	// Transfer started
-	event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
+	event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0)
 	publishProgress(listener, event)
 
 	if conn.config.LogLevel >= Debug {
@@ -263,7 +265,7 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 
 	if err != nil {
 		// Transfer failed
-		event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
+		event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0)
 		publishProgress(listener, event)
 		return nil, err
 	}
@@ -274,15 +276,16 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 	}
 
 	// Transfer completed
-	event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
+	event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0)
 	publishProgress(listener, event)
 
 	return conn.handleResponse(resp, crc)
 }
 
 func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string {
-	if conn.config.SecurityToken != "" {
-		params[HTTPParamSecurityToken] = conn.config.SecurityToken
+	akIf := conn.config.GetCredentials()
+	if akIf.GetSecurityToken() != "" {
+		params[HTTPParamSecurityToken] = akIf.GetSecurityToken()
 	}
 	subResource := conn.getSubResource(params)
 	canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource)
@@ -309,10 +312,10 @@ func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expir
 		}
 	}
 
-	signedStr := conn.getSignedStr(req, canonicalizedResource)
+	signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())
 
 	params[HTTPParamExpires] = strconv.FormatInt(expiration, 10)
-	params[HTTPParamAccessKeyID] = conn.config.AccessKeyID
+	params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID()
 	params[HTTPParamSignature] = signedStr
 
 	urlParams := conn.getURLParams(params)
@@ -327,12 +330,13 @@ func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expir
 	expireStr := strconv.FormatInt(expiration, 10)
 	params[HTTPParamExpires] = expireStr
 
-	if conn.config.AccessKeyID != "" {
-		params[HTTPParamAccessKeyID] = conn.config.AccessKeyID
-		if conn.config.SecurityToken != "" {
-			params[HTTPParamSecurityToken] = conn.config.SecurityToken
+	akIf := conn.config.GetCredentials()
+	if akIf.GetAccessKeyID() != "" {
+		params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID()
+		if akIf.GetSecurityToken() != "" {
+			params[HTTPParamSecurityToken] = akIf.GetSecurityToken()
 		}
-		signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, params)
+		signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params)
 		params[HTTPParamSignature] = signedStr
 	}
 

+ 4 - 1
oss/conn_test.go

@@ -147,6 +147,7 @@ func (s *OssConnSuite) TestConnToolFunc(c *C) {
 
 func (s *OssConnSuite) TestSignRtmpURL(c *C) {
 	cfg := getDefaultOssConfig()
+
 	um := urlMaker{}
 	um.Init(endpoint, false, false)
 	conn := Conn{cfg, &um, nil}
@@ -174,11 +175,13 @@ func (s *OssConnSuite) TestGetRtmpSignedStr(c *C) {
 	um.Init(endpoint, false, false)
 	conn := Conn{cfg, &um, nil}
 
+	akIf := conn.config.GetCredentials()
+
 	//Anonymous
 	channelName := "test-get-rtmp-signed-str"
 	playlistName := "playlist.m3u8"
 	expiration := time.Now().Unix() + 3600
 	params := map[string]interface{}{}
-	signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, params)
+	signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params)
 	c.Assert(signedStr, Equals, "")
 }

+ 6 - 2
oss/const.go

@@ -79,7 +79,10 @@ type PayerType string
 
 const (
 	// Requester the requester who send the request
-	Requester PayerType = "requester"
+	Requester PayerType = "Requester"
+
+	// BucketOwner the requester who send the request
+	BucketOwner PayerType = "BucketOwner"
 )
 
 // HTTPMethod HTTP request method
@@ -153,6 +156,7 @@ const (
 	HTTPHeaderOssRequester                   = "X-Oss-Request-Payer"
 	HTTPHeaderOssTagging                     = "X-Oss-Tagging"
 	HTTPHeaderOssTaggingDirective            = "X-Oss-Tagging-Directive"
+	HTTPHeaderOssTrafficLimit                = "X-Oss-Traffic-Limit"
 )
 
 // HTTP Param
@@ -178,5 +182,5 @@ const (
 
 	NullVersion = "null"
 
-	Version = "v2.0.0" // Go SDK version
+	Version = "v2.0.1" // Go SDK version
 )

+ 12 - 10
oss/download.go

@@ -263,7 +263,7 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 
 	var completedBytes int64
 	totalBytes := getObjectBytes(parts)
-	event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+	event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
 	publishProgress(listener, event)
 
 	// Start the download workers
@@ -281,13 +281,14 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 		select {
 		case part := <-results:
 			completed++
-			completedBytes += (part.End - part.Start + 1)
+			downBytes := (part.End - part.Start + 1)
+			completedBytes += downBytes
 			parts[part.Index].CRC64 = part.CRC64
-			event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+			event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes)
 			publishProgress(listener, event)
 		case err := <-failed:
 			close(die)
-			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
 			publishProgress(listener, event)
 			return err
 		}
@@ -297,7 +298,7 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 		}
 	}
 
-	event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
+	event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
 	publishProgress(listener, event)
 
 	if enableCRC {
@@ -510,7 +511,7 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 	die := make(chan bool)
 
 	completedBytes := dcp.getCompletedBytes()
-	event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size)
+	event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0)
 	publishProgress(listener, event)
 
 	// Start the download workers routine
@@ -531,12 +532,13 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 			dcp.PartStat[part.Index] = true
 			dcp.Parts[part.Index].CRC64 = part.CRC64
 			dcp.dump(cpFilePath)
-			completedBytes += (part.End - part.Start + 1)
-			event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size)
+			downBytes := (part.End - part.Start + 1)
+			completedBytes += downBytes
+			event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes)
 			publishProgress(listener, event)
 		case err := <-failed:
 			close(die)
-			event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size)
+			event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0)
 			publishProgress(listener, event)
 			return err
 		}
@@ -546,7 +548,7 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 		}
 	}
 
-	event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size)
+	event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0)
 	publishProgress(listener, event)
 
 	if dcp.enableCRC {

+ 12 - 10
oss/multicopy.go

@@ -169,7 +169,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 
 	var completedBytes int64
 	totalBytes := getSrcObjectBytes(parts)
-	event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+	event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
 	publishProgress(listener, event)
 
 	// Start to copy workers
@@ -189,13 +189,14 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 		case part := <-results:
 			completed++
 			ups[part.PartNumber-1] = part
-			completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
-			event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+			copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
+			completedBytes += copyBytes
+			event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes)
 			publishProgress(listener, event)
 		case err := <-failed:
 			close(die)
 			descBucket.AbortMultipartUpload(imur, options...)
-			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
 			publishProgress(listener, event)
 			return err
 		}
@@ -205,7 +206,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 		}
 	}
 
-	event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes)
+	event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0)
 	publishProgress(listener, event)
 
 	// Complete the multipart upload
@@ -418,7 +419,7 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 	die := make(chan bool)
 
 	completedBytes := ccp.getCompletedBytes()
-	event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size)
+	event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0)
 	publishProgress(listener, event)
 
 	// Start the worker coroutines
@@ -438,12 +439,13 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 			completed++
 			ccp.update(part)
 			ccp.dump(cpFilePath)
-			completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
-			event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size)
+			copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1)
+			completedBytes += copyBytes
+			event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes)
 			publishProgress(listener, event)
 		case err := <-failed:
 			close(die)
-			event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size)
+			event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0)
 			publishProgress(listener, event)
 			return err
 		}
@@ -453,7 +455,7 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 		}
 	}
 
-	event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
+	event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size,0)
 	publishProgress(listener, event)
 
 	return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, options)

+ 15 - 1
oss/option.go

@@ -206,7 +206,7 @@ func CallbackVar(callbackVar string) Option {
 
 // RequestPayer is an option to set payer who pay for the request
 func RequestPayer(payerType PayerType) Option {
-	return setHeader(HTTPHeaderOssRequester, string(payerType))
+	return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType)))
 }
 
 // SetTagging is an option to set object tagging
@@ -240,6 +240,11 @@ func ACReqHeaders(value string) Option {
 	return setHeader(HTTPHeaderACReqHeaders, value)
 }
 
+// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit
+func TrafficLimitHeader(value int64) Option {
+	return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10))
+}
+
 // Delimiter is an option to set delimiler parameter
 func Delimiter(value string) Option {
 	return addParam("delimiter", value)
@@ -392,6 +397,11 @@ func Process(value string) Option {
 	return addParam("x-oss-process", value)
 }
 
+// TrafficLimitParam is a option to set x-oss-traffic-limit
+func TrafficLimitParam(value int64) Option {
+	return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10))
+}
+
 func setHeader(key string, value interface{}) Option {
 	return func(params map[string]optionValue) error {
 		if value == nil {
@@ -531,3 +541,7 @@ func GetDeleteMark(header http.Header) bool {
 	}
 	return false
 }
+
+func GetQosDelayTime(header http.Header) string {
+	return header.Get("x-oss-qos-delay-time")
+}

+ 5 - 3
oss/progress.go

@@ -20,6 +20,7 @@ const (
 type ProgressEvent struct {
 	ConsumedBytes int64
 	TotalBytes    int64
+	RwBytes       int64
 	EventType     ProgressEventType
 }
 
@@ -30,10 +31,11 @@ type ProgressListener interface {
 
 // -------------------- Private --------------------
 
-func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent {
+func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent {
 	return &ProgressEvent{
 		ConsumedBytes: consumed,
 		TotalBytes:    total,
+		RwBytes:       rwBytes,
 		EventType:     eventType}
 }
 
@@ -78,7 +80,7 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
 
 	// Read encountered error
 	if err != nil && err != io.EOF {
-		event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes)
+		event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0)
 		publishProgress(t.listener, event)
 	}
 
@@ -92,7 +94,7 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
 		}
 		// Progress
 		if t.listener != nil {
-			event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes)
+			event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n))
 			publishProgress(t.listener, event)
 		}
 		// Track

+ 110 - 33
oss/progress_test.go

@@ -102,6 +102,7 @@ func (s *OssProgressSuite) TearDownTest(c *C) {
 
 // OssProgressListener is the progress listener
 type OssProgressListener struct {
+	TotalRwBytes int64
 }
 
 // ProgressChanged handles progress event
@@ -111,6 +112,7 @@ func (listener *OssProgressListener) ProgressChanged(event *ProgressEvent) {
 		testLogger.Printf("Transfer Started, ConsumedBytes: %d, TotalBytes %d.\n",
 			event.ConsumedBytes, event.TotalBytes)
 	case TransferDataEvent:
+		listener.TotalRwBytes += event.RwBytes
 		testLogger.Printf("Transfer Data, ConsumedBytes: %d, TotalBytes %d, %d%%.\n",
 			event.ConsumedBytes, event.TotalBytes, event.ConsumedBytes*100/event.TotalBytes)
 	case TransferCompletedEvent:
@@ -128,17 +130,24 @@ func (s *OssProgressSuite) TestPutObject(c *C) {
 	objectName := randStr(8) + ".jpg"
 	localFile := "../sample/The Go Programming Language.html"
 
+	fileInfo, err := os.Stat(localFile)
+	c.Assert(err, IsNil)
+
 	// PutObject
 	fd, err := os.Open(localFile)
 	c.Assert(err, IsNil)
 	defer fd.Close()
 
-	err = s.bucket.PutObject(objectName, fd, Progress(&OssProgressListener{}))
+	progressListener := OssProgressListener{}
+	err = s.bucket.PutObject(objectName, fd, Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	// PutObjectFromFile
-	err = s.bucket.PutObjectFromFile(objectName, localFile, Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.PutObjectFromFile(objectName, localFile, Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	// DoPutObject
 	fd, err = os.Open(localFile)
@@ -150,13 +159,17 @@ func (s *OssProgressSuite) TestPutObject(c *C) {
 		Reader:    fd,
 	}
 
-	options := []Option{Progress(&OssProgressListener{})}
+	progressListener.TotalRwBytes = 0
+	options := []Option{Progress(&progressListener)}
 	_, err = s.bucket.DoPutObject(request, options)
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	// PutObject size is 0
-	err = s.bucket.PutObject(objectName, strings.NewReader(""), Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.PutObject(objectName, strings.NewReader(""), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(0))
 
 	testLogger.Println("OssProgressSuite.TestPutObject")
 }
@@ -169,7 +182,8 @@ func (s *OssProgressSuite) TestSignURL(c *C) {
 	createFile(filePath, content, c)
 
 	// Sign URL for put
-	str, err := s.bucket.SignURL(objectName, HTTPPut, 60, Progress(&OssProgressListener{}))
+	progressListener := OssProgressListener{}
+	str, err := s.bucket.SignURL(objectName, HTTPPut, 60, Progress(&progressListener))
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
@@ -180,42 +194,52 @@ func (s *OssProgressSuite) TestSignURL(c *C) {
 	c.Assert(err, IsNil)
 	defer fd.Close()
 
-	err = s.bucket.PutObjectWithURL(str, fd, Progress(&OssProgressListener{}))
+	err = s.bucket.PutObjectWithURL(str, fd, Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(len(content)))
 
 	// Put object from file with URL
-	err = s.bucket.PutObjectFromFileWithURL(str, filePath, Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.PutObjectFromFileWithURL(str, filePath, Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(len(content)))
 
 	// DoPutObject
 	fd, err = os.Open(filePath)
 	c.Assert(err, IsNil)
 	defer fd.Close()
 
-	options := []Option{Progress(&OssProgressListener{})}
+	progressListener.TotalRwBytes = 0
+	options := []Option{Progress(&progressListener)}
 	_, err = s.bucket.DoPutObjectWithURL(str, fd, options)
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(len(content)))
 
 	// Sign URL for get
-	str, err = s.bucket.SignURL(objectName, HTTPGet, 60, Progress(&OssProgressListener{}))
+	str, err = s.bucket.SignURL(objectName, HTTPGet, 60, Progress(&progressListener))
 	c.Assert(err, IsNil)
 	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
 	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
 
 	// Get object with URL
-	body, err := s.bucket.GetObjectWithURL(str, Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	body, err := s.bucket.GetObjectWithURL(str, Progress(&progressListener))
 	c.Assert(err, IsNil)
 	str, err = readBody(body)
 	c.Assert(err, IsNil)
 	c.Assert(str, Equals, content)
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(len(content)))
 
 	// Get object to file with URL
-	str, err = s.bucket.SignURL(objectName, HTTPGet, 10, Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	str, err = s.bucket.SignURL(objectName, HTTPGet, 10, Progress(&progressListener))
 	c.Assert(err, IsNil)
 
 	newFile := randStr(10)
-	err = s.bucket.GetObjectToFileWithURL(str, newFile, Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.GetObjectToFileWithURL(str, newFile, Progress(&progressListener))
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(len(content)))
 	c.Assert(err, IsNil)
 	eq, err := compareFiles(filePath, newFile)
 	c.Assert(err, IsNil)
@@ -251,14 +275,16 @@ func (s *OssProgressSuite) TestPutObjectNegative(c *C) {
 // TestAppendObject
 func (s *OssProgressSuite) TestAppendObject(c *C) {
 	objectName := objectNamePrefix + randStr(8)
-	objectValue := "昨夜雨疏风骤,浓睡不消残酒。试问卷帘人,却道海棠依旧。知否?知否?应是绿肥红瘦。"
+	objectValue := randStr(100)
 	var val = []byte(objectValue)
 	var nextPos int64
 	var midPos = 1 + rand.Intn(len(val)-1)
 
 	// AppendObject
-	nextPos, err := s.bucket.AppendObject(objectName, bytes.NewReader(val[0:midPos]), nextPos, Progress(&OssProgressListener{}))
+	progressListener := OssProgressListener{}
+	nextPos, err := s.bucket.AppendObject(objectName, bytes.NewReader(val[0:midPos]), nextPos, Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, nextPos)
 
 	// DoAppendObject
 	request := &AppendObjectRequest{
@@ -278,6 +304,9 @@ func (s *OssProgressSuite) TestMultipartUpload(c *C) {
 	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
 
+	fileInfo, err := os.Stat(fileName)
+	c.Assert(err, IsNil)
+
 	chunks, err := SplitFileByPartNum(fileName, 3)
 	c.Assert(err, IsNil)
 	testLogger.Println("chunks:", chunks)
@@ -287,6 +316,7 @@ func (s *OssProgressSuite) TestMultipartUpload(c *C) {
 	defer fd.Close()
 
 	// Initiate
+	progressListener := OssProgressListener{}
 	imur, err := s.bucket.InitiateMultipartUpload(objectName)
 	c.Assert(err, IsNil)
 
@@ -294,7 +324,7 @@ func (s *OssProgressSuite) TestMultipartUpload(c *C) {
 	var parts []UploadPart
 	for _, chunk := range chunks {
 		fd.Seek(chunk.Offset, os.SEEK_SET)
-		part, err := s.bucket.UploadPart(imur, fd, chunk.Size, chunk.Number, Progress(&OssProgressListener{}))
+		part, err := s.bucket.UploadPart(imur, fd, chunk.Size, chunk.Number, Progress(&progressListener))
 		c.Assert(err, IsNil)
 		parts = append(parts, part)
 	}
@@ -302,6 +332,7 @@ func (s *OssProgressSuite) TestMultipartUpload(c *C) {
 	// Complete
 	_, err = s.bucket.CompleteMultipartUpload(imur, parts)
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
@@ -313,6 +344,8 @@ func (s *OssProgressSuite) TestMultipartUpload(c *C) {
 func (s *OssProgressSuite) TestMultipartUploadFromFile(c *C) {
 	objectName := objectNamePrefix + randStr(8)
 	var fileName = "../sample/BingWallpaper-2015-11-07.jpg"
+	fileInfo, err := os.Stat(fileName)
+	c.Assert(err, IsNil)
 
 	chunks, err := SplitFileByPartNum(fileName, 3)
 	c.Assert(err, IsNil)
@@ -322,9 +355,10 @@ func (s *OssProgressSuite) TestMultipartUploadFromFile(c *C) {
 	c.Assert(err, IsNil)
 
 	// UploadPart
+	progressListener := OssProgressListener{}
 	var parts []UploadPart
 	for _, chunk := range chunks {
-		part, err := s.bucket.UploadPartFromFile(imur, fileName, chunk.Offset, chunk.Size, chunk.Number, Progress(&OssProgressListener{}))
+		part, err := s.bucket.UploadPartFromFile(imur, fileName, chunk.Offset, chunk.Size, chunk.Number, Progress(&progressListener))
 		c.Assert(err, IsNil)
 		parts = append(parts, part)
 	}
@@ -332,6 +366,7 @@ func (s *OssProgressSuite) TestMultipartUploadFromFile(c *C) {
 	// Complete
 	_, err = s.bucket.CompleteMultipartUpload(imur, parts)
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
@@ -345,47 +380,64 @@ func (s *OssProgressSuite) TestGetObject(c *C) {
 	localFile := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "newpic-progress-1.jpg"
 
+	fileInfo, err := os.Stat(localFile)
+	c.Assert(err, IsNil)
+
+	progressListener := OssProgressListener{}
 	// PutObject
-	err := s.bucket.PutObjectFromFile(objectName, localFile, Progress(&OssProgressListener{}))
+	err = s.bucket.PutObjectFromFile(objectName, localFile, Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	// GetObject
-	body, err := s.bucket.GetObject(objectName, Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	body, err := s.bucket.GetObject(objectName, Progress(&progressListener))
 	c.Assert(err, IsNil)
 	_, err = ioutil.ReadAll(body)
 	c.Assert(err, IsNil)
 	body.Close()
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	// GetObjectToFile
-	err = s.bucket.GetObjectToFile(objectName, newFile, Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.GetObjectToFile(objectName, newFile, Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	// DoGetObject
+	progressListener.TotalRwBytes = 0
 	request := &GetObjectRequest{objectName}
-	options := []Option{Progress(&OssProgressListener{})}
+	options := []Option{Progress(&progressListener)}
 	result, err := s.bucket.DoGetObject(request, options)
 	c.Assert(err, IsNil)
 	_, err = ioutil.ReadAll(result.Response.Body)
 	c.Assert(err, IsNil)
 	result.Response.Body.Close()
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	// GetObject with range
-	body, err = s.bucket.GetObject(objectName, Range(1024, 4*1024), Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	body, err = s.bucket.GetObject(objectName, Range(1024, 4*1024), Progress(&progressListener))
 	c.Assert(err, IsNil)
-	_, err = ioutil.ReadAll(body)
+	text, err := ioutil.ReadAll(body)
 	c.Assert(err, IsNil)
 	body.Close()
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(len(text)))
 
 	// PutObject size is 0
-	err = s.bucket.PutObject(objectName, strings.NewReader(""), Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.PutObject(objectName, strings.NewReader(""), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(0))
 
 	// GetObject size is 0
-	body, err = s.bucket.GetObject(objectName, Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	body, err = s.bucket.GetObject(objectName, Progress(&progressListener))
 	c.Assert(err, IsNil)
 	_, err = ioutil.ReadAll(body)
 	c.Assert(err, IsNil)
 	body.Close()
+	c.Assert(progressListener.TotalRwBytes, Equals, int64(0))
 
 	testLogger.Println("OssProgressSuite.TestGetObject")
 }
@@ -425,11 +477,18 @@ func (s *OssProgressSuite) TestUploadFile(c *C) {
 	objectName := objectNamePrefix + randStr(8)
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
-	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(5), Progress(&OssProgressListener{}))
+	fileInfo, err := os.Stat(fileName)
+	c.Assert(err, IsNil)
+
+	progressListener := OssProgressListener{}
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(5), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"), Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	testLogger.Println("OssProgressSuite.TestUploadFile")
 }
@@ -440,18 +499,27 @@ func (s *OssProgressSuite) TestDownloadFile(c *C) {
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	newFile := "down-new-file-progress-2.jpg"
 
+	fileInfo, err := os.Stat(fileName)
+	c.Assert(err, IsNil)
+
 	// Upload
-	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(5), Progress(&OssProgressListener{}))
+	progressListener := OssProgressListener{}
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(5), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
-	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024, Routines(3), Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024, Routines(3), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
-	err = s.bucket.DownloadFile(objectName, newFile, 50*1024, Routines(3), Checkpoint(true, ""), Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.DownloadFile(objectName, newFile, 50*1024, Routines(3), Checkpoint(true, ""), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	testLogger.Println("OssProgressSuite.TestDownloadFile")
 }
@@ -462,15 +530,24 @@ func (s *OssProgressSuite) TestCopyFile(c *C) {
 	destObjectName := srcObjectName + "-copy"
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
+	fileInfo, err := os.Stat(fileName)
+	c.Assert(err, IsNil)
+
 	// Upload
-	err := s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3))
+	progressListener := OssProgressListener{}
+	err = s.bucket.UploadFile(srcObjectName, fileName, 100*1024, Routines(3), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(5), Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 100*1024, Routines(5), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(3), Checkpoint(true, ""), Progress(&OssProgressListener{}))
+	progressListener.TotalRwBytes = 0
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(3), Checkpoint(true, ""), Progress(&progressListener))
 	c.Assert(err, IsNil)
+	c.Assert(progressListener.TotalRwBytes, Equals, fileInfo.Size())
 
 	testLogger.Println("OssProgressSuite.TestCopyFile")
 }

+ 27 - 0
oss/type.go

@@ -831,3 +831,30 @@ type BucketStat struct {
 	MultipartUploadCount int64    `xml:"MultipartUploadCount"`
 }
 type GetBucketStatResult BucketStat
+
+// RequestPaymentConfiguration define the request payment configuration
+type RequestPaymentConfiguration struct {
+	XMLName xml.Name `xml:"RequestPaymentConfiguration"`
+	Payer   string   `xml:"Payer,omitempty"`
+}
+
+// BucketQoSConfiguration define QoS configuration
+type BucketQoSConfiguration struct {
+	XMLName                   xml.Name `xml:"QoSConfiguration"`	
+	TotalUploadBandwidth      *int      `xml:"TotalUploadBandwidth"`      // Total upload bandwidth
+	IntranetUploadBandwidth   *int      `xml:"IntranetUploadBandwidth"`   // Intranet upload bandwidth
+	ExtranetUploadBandwidth   *int      `xml:"ExtranetUploadBandwidth"`   // Extranet upload bandwidth
+	TotalDownloadBandwidth    *int      `xml:"TotalDownloadBandwidth"`    // Total download bandwidth
+	IntranetDownloadBandwidth *int      `xml:"IntranetDownloadBandwidth"` // Intranet download bandwidth
+	ExtranetDownloadBandwidth *int      `xml:"ExtranetDownloadBandwidth"` // Extranet download bandwidth
+	TotalQPS                  *int      `xml:"TotalQps"`                  // Total Qps
+	IntranetQPS               *int      `xml:"IntranetQps"`               // Intranet Qps
+	ExtranetQPS               *int      `xml:"ExtranetQps"`               // Extranet Qps
+}
+
+// UserQoSConfiguration define QoS and Range configuration
+type UserQoSConfiguration struct {
+	XMLName xml.Name `xml:"QoSConfiguration"`
+	Region  string   `xml:"Region,omitempty"` // Effective area of Qos configuration
+	BucketQoSConfiguration
+}

+ 14 - 8
oss/upload.go

@@ -188,7 +188,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 
 	var completedBytes int64
 	totalBytes := getTotalBytes(chunks)
-	event := newProgressEvent(TransferStartedEvent, 0, totalBytes)
+	event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0)
 	publishProgress(listener, event)
 
 	// Start the worker coroutine
@@ -209,11 +209,14 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 			completed++
 			parts[part.PartNumber-1] = part
 			completedBytes += chunks[part.PartNumber-1].Size
-			event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes)
+
+			// why RwBytes in ProgressEvent is 0 ?
+			// because read or write event has been notified in teeReader.Read()
+			event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, 0)
 			publishProgress(listener, event)
 		case err := <-failed:
 			close(die)
-			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
+			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0)
 			publishProgress(listener, event)
 			bucket.AbortMultipartUpload(imur, options...)
 			return err
@@ -224,7 +227,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 		}
 	}
 
-	event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes)
+	event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes, 0)
 	publishProgress(listener, event)
 
 	// Complete the multpart upload
@@ -470,7 +473,10 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 	die := make(chan bool)
 
 	completedBytes := ucp.getCompletedBytes()
-	event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size)
+
+	// why RwBytes in ProgressEvent is 0 ?
+	// because read or write event has been notified in teeReader.Read()
+	event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0)
 	publishProgress(listener, event)
 
 	// Start the workers
@@ -491,11 +497,11 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 			ucp.updatePart(part)
 			ucp.dump(cpFilePath)
 			completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size
-			event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size)
+			event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, 0)
 			publishProgress(listener, event)
 		case err := <-failed:
 			close(die)
-			event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size)
+			event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0)
 			publishProgress(listener, event)
 			return err
 		}
@@ -505,7 +511,7 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 		}
 	}
 
-	event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size)
+	event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0)
 	publishProgress(listener, event)
 
 	// Complete the multipart upload

+ 25 - 23
sample.go

@@ -12,29 +12,31 @@ import (
 
 // sampleMap contains all samples
 var sampleMap = map[string]interface{}{
-	"CreateBucketSample":     sample.CreateBucketSample,
-	"NewBucketSample":        sample.NewBucketSample,
-	"ListBucketsSample":      sample.ListBucketsSample,
-	"BucketACLSample":        sample.BucketACLSample,
-	"BucketLifecycleSample":  sample.BucketLifecycleSample,
-	"BucketRefererSample":    sample.BucketRefererSample,
-	"BucketLoggingSample":    sample.BucketLoggingSample,
-	"BucketWebsiteSample":	  sample.BucketWebsiteSample,
-	"BucketCORSSample":       sample.BucketCORSSample,
-	"BucketPolicySample":	  sample.BucketPolicySample,
-	"ObjectACLSample":        sample.ObjectACLSample,
-	"ObjectMetaSample":       sample.ObjectMetaSample,
-	"ListObjectsSample":      sample.ListObjectsSample,
-	"DeleteObjectSample":     sample.DeleteObjectSample,
-	"AppendObjectSample":     sample.AppendObjectSample,
-	"CopyObjectSample":       sample.CopyObjectSample,
-	"PutObjectSample":        sample.PutObjectSample,
-	"GetObjectSample":        sample.GetObjectSample,
-	"CnameSample":            sample.CnameSample,
-	"SignURLSample":          sample.SignURLSample,
-	"ArchiveSample":          sample.ArchiveSample,
-	"ObjectTaggingSample":    sample.ObjectTaggingSample,
-	"BucketEncryptionSample": sample.BucketEncryptionSample,
+	"CreateBucketSample":          sample.CreateBucketSample,
+	"NewBucketSample":             sample.NewBucketSample,
+	"ListBucketsSample":           sample.ListBucketsSample,
+	"BucketACLSample":             sample.BucketACLSample,
+	"BucketLifecycleSample":       sample.BucketLifecycleSample,
+	"BucketRefererSample":         sample.BucketRefererSample,
+	"BucketLoggingSample":         sample.BucketLoggingSample,
+	"BucketWebsiteSample":	       sample.BucketWebsiteSample,
+	"BucketCORSSample":            sample.BucketCORSSample,
+	"BucketPolicySample":	       sample.BucketPolicySample,
+	"BucketrRequestPaymentSample": sample.BucketrRequestPaymentSample,
+	"BucketQoSInfoSample":	       sample.BucketQoSInfoSample,
+	"ObjectACLSample":             sample.ObjectACLSample,
+	"ObjectMetaSample":            sample.ObjectMetaSample,
+	"ListObjectsSample":           sample.ListObjectsSample,
+	"DeleteObjectSample":          sample.DeleteObjectSample,
+	"AppendObjectSample":          sample.AppendObjectSample,
+	"CopyObjectSample":            sample.CopyObjectSample,
+	"PutObjectSample":             sample.PutObjectSample,
+	"GetObjectSample":             sample.GetObjectSample,
+	"CnameSample":                 sample.CnameSample,
+	"SignURLSample":               sample.SignURLSample,
+	"ArchiveSample":               sample.ArchiveSample,
+	"ObjectTaggingSample":         sample.ObjectTaggingSample,
+	"BucketEncryptionSample":      sample.BucketEncryptionSample,
 }
 
 func main() {

+ 6 - 0
sample/bucket_policy.go

@@ -57,5 +57,11 @@ func BucketPolicySample() {
 		HandleError(err)
 	}
 
+	// Delete bucket
+	err = client.DeleteBucket(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+
 	fmt.Println("BucketPolicySample completed")
 }

+ 65 - 0
sample/bucket_qosInfo.go

@@ -0,0 +1,65 @@
+package sample
+
+import (
+	"fmt"
+
+	"github.com/aliyun/aliyun-oss-go-sdk/oss"
+)
+
+// BucketQoSInfoSample shows how to set, get and delete the bucket QoS configuration
+func BucketQoSInfoSample() {
+	// New client
+	client, err := oss.New(endpoint, accessID, accessKey)
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Create the bucket with default parameters
+	err = client.CreateBucket(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+	// Initial QoS Configuration
+	five := 5
+	four := 4
+	totalQps := 200
+	qosConf := oss.BucketQoSConfiguration{
+		TotalUploadBandwidth:      &five,
+		IntranetUploadBandwidth:   &four,
+		ExtranetUploadBandwidth:   &four,
+		TotalDownloadBandwidth:    &four,
+		IntranetDownloadBandwidth: &four,
+		ExtranetDownloadBandwidth: &four,
+		TotalQPS:                  &totalQps,
+		IntranetQPS:               &totalQps,
+		ExtranetQPS:               &totalQps,
+	}
+
+	// Set Qos Info
+	err = client.SetBucketQoSInfo(bucketName, qosConf)
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Get Qos Info
+	ret, err := client.GetBucketQosInfo(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+	fmt.Printf("Bucket QoSInfo\n  TotalUploadBandwidth: %d\n  IntranetUploadBandwidth: %d\n  ExtranetUploadBandwidth: %d\n",
+		*ret.TotalUploadBandwidth, *ret.IntranetUploadBandwidth, *ret.ExtranetUploadBandwidth)
+
+	// Delete QosInfo
+	err = client.DeleteBucketQosInfo(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Delete bucket
+	err = client.DeleteBucket(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+
+	fmt.Println("BucketPolicySample completed")
+}

+ 117 - 0
sample/bucket_requestpayment.go

@@ -0,0 +1,117 @@
+package sample
+
+import (
+	"fmt"
+	"strings"
+	"io/ioutil"
+	"github.com/aliyun/aliyun-oss-go-sdk/oss"
+)
+
+// BucketrRequestPaymentSample shows how to set, get the bucket request payment.
+func BucketrRequestPaymentSample() {
+	// New client
+	client, err := oss.New(endpoint, accessID, accessKey)
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Create the bucket with default parameters
+	err = client.CreateBucket(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+
+	reqPayConf := oss.RequestPaymentConfiguration{
+		Payer: string(oss.Requester),
+	}
+
+	// Case 1: Set bucket request payment.
+	err = client.SetBucketRequestPayment(bucketName, reqPayConf)
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Get bucket request payment configuration
+	ret, err := client.GetBucketRequestPayment(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+	fmt.Println("Bucket request payer:", ret.Payer)
+
+	if credentialUID == "" {
+		fmt.Println("Please enter a credential User ID, if you want to test credential user.")
+		clearData(client, bucketName)
+		return
+	}
+	// Credential other User
+	policyInfo := `
+	{
+		"Version":"1",
+		"Statement":[
+			{
+				"Action":[
+					"oss:*"
+				],
+				"Effect":"Allow",
+				"Principal":["` + credentialUID + `"],
+				"Resource":["acs:oss:*:*:` + bucketName + `", "acs:oss:*:*:` + bucketName + `/*"]
+			}
+		]
+	}`
+
+	err = client.SetBucketPolicy(bucketName, policyInfo)
+	if err != nil {
+		HandleError(err)
+	}
+
+	// New a Credential client
+	creClient, err := oss.New(endpoint, credentialAccessID, credentialAccessKey)
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Get credential bucket
+	creBucket, err := creClient.Bucket(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Put object by credential User
+	key := "testCredentialObject"
+	objectValue := "this is a test string."
+	// Put object
+	err = creBucket.PutObject(key, strings.NewReader(objectValue), oss.RequestPayer(oss.Requester))
+	if err != nil {
+		HandleError(err)
+	}
+	// Get object
+	body, err := creBucket.GetObject(key, oss.RequestPayer(oss.Requester))
+	if err != nil {
+		HandleError(err)
+	}
+	defer body.Close()
+
+	data, err := ioutil.ReadAll(body)
+	if err != nil {
+		HandleError(err)
+	}
+	fmt.Println(string(data))
+	
+	// Delete object
+	err = creBucket.DeleteObject(key, oss.RequestPayer(oss.Requester))
+	if err != nil {
+		HandleError(err)
+	}
+
+	clearData(client, bucketName)
+}
+
+func clearData(client *oss.Client, bucketName string) {
+	// Delete bucket
+	err := client.DeleteBucket(bucketName)
+	if err != nil {
+		HandleError(err)
+	}
+
+	fmt.Println("BucketrRequestPaymentSample completed")
+}

+ 5 - 0
sample/config.go

@@ -10,6 +10,11 @@ var (
 	bucketName = os.Getenv("OSS_TEST_BUCKET")
 	kmsID      = os.Getenv("OSS_TEST_KMS_ID")
 
+	// Credential
+	credentialAccessID  = os.Getenv("OSS_CREDENTIAL_KEY_ID")
+	credentialAccessKey = os.Getenv("OSS_CREDENTIAL_KEY_SECRET")
+	credentialUID       = os.Getenv("OSS_CREDENTIAL_UID")
+
 	// The cname endpoint
 	endpoint4Cname = os.Getenv("OSS_TEST_CNAME_ENDPOINT")
 )

+ 21 - 12
sample/get_object.go

@@ -37,7 +37,16 @@ func GetObjectSample() {
 	}
 	fmt.Println("size of data is: ", len(data))
 
-	// Case 2: Download the object to byte array. This is for small object.
+	// Case 2: Download in the range of object.
+	body, err = bucket.GetObject(objectKey, oss.Range(15, 19))
+	if err != nil {
+		HandleError(err)
+	}
+	data, err = ioutil.ReadAll(body)
+	body.Close()
+	fmt.Println("the range of data is: ", string(data))
+
+	// Case 3: Download the object to byte array. This is for small object.
 	buf := new(bytes.Buffer)
 	body, err = bucket.GetObject(objectKey)
 	if err != nil {
@@ -46,7 +55,7 @@ func GetObjectSample() {
 	io.Copy(buf, body)
 	body.Close()
 
-	// Case 3: Download the object to local file. The file handle needs to be specified
+	// Case 4: Download the object to local file. The file handle needs to be specified
 	fd, err := os.OpenFile("mynewfile-1.jpg", os.O_WRONLY|os.O_CREATE, 0660)
 	if err != nil {
 		HandleError(err)
@@ -60,13 +69,13 @@ func GetObjectSample() {
 	io.Copy(fd, body)
 	body.Close()
 
-	// Case 4: Download the object to local file with file name specified
+	// Case 5: Download the object to local file with file name specified
 	err = bucket.GetObjectToFile(objectKey, "mynewfile-2.jpg")
 	if err != nil {
 		HandleError(err)
 	}
 
-	// Case 5: Get the object with contraints. When contraints are met, download the file. Otherwise return precondition error
+	// Case 6: Get the object with contraints. When contraints are met, download the file. Otherwise return precondition error
 	// last modified time constraint is met, download the file
 	body, err = bucket.GetObject(objectKey, oss.IfModifiedSince(pastDate))
 	if err != nil {
@@ -75,11 +84,11 @@ func GetObjectSample() {
 	body.Close()
 
 	// Last modified time contraint is not met, do not download the file
-	body, err = bucket.GetObject(objectKey, oss.IfUnmodifiedSince(pastDate))
+	_, err = bucket.GetObject(objectKey, oss.IfUnmodifiedSince(pastDate))
 	if err == nil {
-		HandleError(err)
+		HandleError(fmt.Errorf("This result is not the expected result"))
 	}
-	body.Close()
+	// body.Close()
 
 	meta, err := bucket.GetObjectDetailedMeta(objectKey)
 	if err != nil {
@@ -94,13 +103,13 @@ func GetObjectSample() {
 	body.Close()
 
 	// Check the content, etag contraint is not met, do not download the file
-	body, err = bucket.GetObject(objectKey, oss.IfNoneMatch(etag))
+	_, err = bucket.GetObject(objectKey, oss.IfNoneMatch(etag))
 	if err == nil {
-		HandleError(err)
+		HandleError(fmt.Errorf("This result is not the expected result"))
 	}
-	body.Close()
+	// body.Close()
 
-	// Case 6: Big file's multipart download, concurrent and resumable download is supported.
+	// Case 7: Big file's multipart download, concurrent and resumable download is supported.
 	// multipart download with part size 100KB. By default single coroutine is used and no checkpoint
 	err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024)
 	if err != nil {
@@ -126,7 +135,7 @@ func GetObjectSample() {
 		HandleError(err)
 	}
 
-	// Case 7: Use GZIP encoding for downloading the file, GetObject/GetObjectToFile are the same.
+	// Case 8: Use GZIP encoding for downloading the file, GetObject/GetObjectToFile are the same.
 	err = bucket.PutObjectFromFile(objectKey, htmlLocalFile)
 	if err != nil {
 		HandleError(err)