hangzws 7 سال پیش
والد
کامیت
64ab8509a1
33فایلهای تغییر یافته به همراه999 افزوده شده و 293 حذف شده
  1. 9 9
      .travis.yml
  2. 23 0
      CHANGELOG.md
  3. 1 1
      README-CN.md
  4. 1 1
      README.md
  5. 3 0
      oss/auth.go
  6. 60 20
      oss/bucket.go
  7. 53 2
      oss/bucket_test.go
  8. 38 9
      oss/client.go
  9. 90 1
      oss/client_test.go
  10. 59 20
      oss/conf.go
  11. 115 18
      oss/conn.go
  12. 17 0
      oss/conn_test.go
  13. 17 4
      oss/const.go
  14. 69 51
      oss/download.go
  15. 39 26
      oss/download_test.go
  16. 7 2
      oss/error.go
  17. 27 0
      oss/error_test.go
  18. 8 0
      oss/model.go
  19. 59 51
      oss/multicopy.go
  20. 26 14
      oss/multicopy_test.go
  21. 22 10
      oss/multipart.go
  22. 2 2
      oss/multipart_test.go
  23. 50 3
      oss/option.go
  24. 41 1
      oss/option_test.go
  25. 8 1
      oss/progress.go
  26. 2 0
      oss/transport_1_6.go
  27. 3 0
      oss/transport_1_7.go
  28. 18 0
      oss/type.go
  29. 67 25
      oss/upload.go
  30. 30 18
      oss/upload_test.go
  31. 2 2
      oss/utils.go
  32. 1 0
      sample/config.go
  33. 32 2
      sample/put_object.go

+ 9 - 9
.travis.yml

@@ -16,12 +16,12 @@ script:
 - "$HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci"
 env:
   global:
-  - secure: RSQJ+EfldTf9PA4pCfxPEf3HMCDZ1jcf+6NU3uwjOxxpDhnnmW6cUwZydheUhjZawXmk+oYzI/6aqsrGR4fq/9w7ey1gpiDeNCUKFPAcJgwb9P3R6TUgQZvin09Mgp7xK3hTXtbdXHPSbaGrXH+mh49AxGw7e9ZCtQ/f0ENUYdfAWiGqPI4W6ojJCyxiWzV+pJ0TW7JM32So98KPLQZNrBwTa0+O6I7tJcYPq62kP2jaWIwkXIb3eTKLrACYW2nKTTWVY17KfJIw/BO4ZtPwk8/EgL8E+NFEczldPbkg81QXsPRDcuQOqNnBsSD78Ej+5pjF6o715BTPYVTOcDNY5ebJ37W6SAvuOIcANwAmcrkuAtdueaHtv6lPMnfqaBp+eDIm3r1cs2tMka67r9Z9K50GxrjNMkrlNLHIXx+TZAn1CfRMslTWzb6dcEy8ncBlKmFZE0h8yRoExwOTCaPb2BH6c92e7zRJPNb3jGwabnjT0YGmswfNx/Y5C+Xv/VptU9NYYHMkbc2VgDWasuqV95wKWsgD68P7Mrqei2wEKDqCnEjSKAsTGEx3FxgvTulNUv1KRRlqrxS8u0p6V96Fg4c3IUkziEMXjtmsAysHHDjwjZ/oDE79lnjIXAvTslVv6zTMBUepMaMQrN3hW8VZ3LPWtYAxJT3MvbNZEqb8FAw=
-  - secure: ACE5vbYP0GUp6I0aUVfKk9acqTZQdcHlVS/Zw69JIzuewxjisrT1D1/tQTtvoi/PNIG+NVbFiEvyzJGILQgVEEMJ3Bnjzg3M1oD1dGSFQQZX0VuyU16B7Dcc3qvXcwVg/hbYehBNFIMi01gXZBTsAhm9M1La6omnjWPK7zVs3x+CBw5hTTplxaIxXPlANfeOBBH5ziHUL72700yXaDOWifu5gkzCwXId6abte+4Y79XozkS7DJir6B8NB5D3XkH/TFIQ1vlRrmKpSOFigKhhAUWG2X4FT5TxZes2P3lZApU5NYHvDwXISc0sivn+hXVb8JlmEgSRzp0qQWd6BEoUG2gtV2nFE6Kc8TGCkt4d2v5e0Nmel7oMjqRe2df6TYGPu3h22tYSjXdn65+fxcfWQHorW7AmIL3bMZ9hDHHi2Fp+HaaI9vvHxJPUALSlVqCn7FLwLTC1B9MgEVlPD2g7FPGMdL6n/5CT0YTJk4/4uv/aNKllp4OAx3gOuOqT4Vw4VUTXREKvIym4vK4yWBCrPCq29WEUbSaMrBh2pctEpI9dyxJmjznUsjiJ8cGYvT1zUPVcKHgybWZrERK6zHKeIL+umobi8nAMRapSZ8WCkyIlRY3ThO8qSS2jgdjg4hqD59w2QZezzSyejsGjCwNhbInJkn2ixc+pvdoxsBVM9Yw=
-  - secure: j3GX9Cnx40e61nby/5UWmrBDHVnBP0OdKnk/PG5c5b8XPl3LH68xViI8Ifn36q8bOHDTFfiMaeKvoLHzkZsXqDJbgNY6PKP1Cd/6h+FErHwDdIc8XEaczYOCawwf/esjL0f082PIsk9MUGAc5TH0DDIVgEEXL6HibxMqbEh06JZ9urIVekQCw0woHkCSh8flyWYtJf+VYiR9+llXmhZEfxEe+BIUkL+caZUSkkC54lbbW2ENvzasLHsue6+jHEyL7NobfgodM3RvB4XZ2wdxUXUqDzkOTlOLODWpXI45afdSk+h2+co436PUVOb+eg0b0RccOWb25uueamLRv2NDqbrbYBNzrRMcFtzisMORB+NDDHAcWTuJ70q89eaFBpkB03X8y6LgM37IdYITp/T9YB3TnOfgWwG2r1luJ0q8Z1vEVX7+hGBlXWi5GVk4L+q6Os+acrT8HYxZKDa/EBjS4XCiiwhzsTloxmBUJNcxDVgzzBW9NE8pQ0ZNdjPH88Ca1oy6qdMRsuMz+WSAyZ3CF0TYPxiZ5r6Pnt8aSVSneClg+8GMwFUyO5rt7ySXJNruwhtrs7r45S8v74/pSRiI1EgN0BAb2CI8AhD2wzj2vv2MNvbI/ppLeaEFz2RCEbpARJpP6ihl2GxU6WHjrqchyAgFHMTp5YrGb0/cAVLnQ0k=
-  - secure: VScdIW+AGYKJYr8Z5U2A1xoYdHxVw1eZ2DNCk+E4r8YhyTfGup9V3hbGgngT6urkOU8qQnqI7vDjI7ZyXFdzEUXKQYZDcA3s61KkiVl7c+cZZMOkD8b8fNN8uAgVgag/f04VmR5HHU2yyzaK3BkU+QEWahRkNHdjW+pJYQpYJHEoYUzCecc5335a7j1ysJ8LGXbMYiCRL29pWrTQEpC0M1mPri9kMmY7pPSr4k/iNPrAm+OCHHlrPVmHmIOH5QEduHzPkSauiExuSt3ZzU5ZqA29EwLtkm6As20ttVCz32bW1zSRlYTmkwcBjASvS4duBvSdS65AWccnRVbOqIvZJTPByYHpUi2tzuwdLVYK68m4JD51LWfFM7PCGssva6mrDSY0MvWgMF+isGugfarhLinne+hYt/r+5JxUASzYXWBKFbce1lk4bT7wp2zO0h8Q2Cna+K+D35YROV77fDiDoxarURGsqAHvU/n7PSoEnfA8WOiugVkIPuOhYhlhui+46ahMYxL69gcu88y66AMPxEHuacxW4GpYgaF/xjV8180/c3ck02csECKOjv/BQU2bbV/ngNo+bDH5g8xiHknE9oWdVEB/dxzZmmcKroXcxz6zqbMJjn03mgqBZiEkPhRStL7cny6Juq/aDTLREooHtn7OA6zsttVOkgAdCJ1VxN8=
-  - secure: lHKgxwLtpq9ydgxQcEe8legRMq5eUL+vP7pIHnauYVcQAWmfVaEwmwVeKJubPiT43m3VaXvKUvYodeNMXj4bhe+8/jG0SSVRd9UlyWlcgFXBJUBOM5FLOhPrnc2zdaCcOg8g554VqM4Xmhu1tX93E0xzBS1B7N+cDwb4xD5D5iQ5rLIbCdK8MbdJwOZLyqKN2cQ9eZRAMy5kz2WnV0LMrm3MeyxJe+bytgf8rSCE6OchNqLRyOCikkSmMiZROzvBZH2RTS1reVJHNhKfDsCkIt+QA55M9Fjnjjvs94rMNgxZwIJhj7wIgkfA2ypAi/3ESRk/yxJqsDyiutfpq/8N4l0+VCV9KIdeLn+rqEiZz8WYYp6oCVHhe/eM8HWwGmMM1JTQfUu1W6ADb8WhUY/4e+AZMb8CRjh6bJ5wf43Z5EZNOKkXZgGQyE5je3ORuPfpG/PliuJS7hUAyMzhjPnp+DMBsOPNMS/glaZWL0GB3Fr7V2IxEQZ0MUPDEebEpuQm6VddQ2Lysw+raRZuC/c3kUBC9Rt7MIHDJjTdBQmbo3GO08ErhzKVgyCLqGKUx1WpApMXtsrb6ovk7VNXrjDsWCYyVtUbIgGCYsv7OGEhQAPMyAy9YMNT0OC/Y7n+5YDjWEjCPVW38TqtK1izbJ8mdbtY9jsy5uR9ET1aCMkXiG0=
-  - secure: gDu6CUQn7QT/9wR0wrfkYGm4dHFgpDUbsQ6SfwxCarP2waxlLpuXMdPO8TZZbHVgii+Md5JqGGJGIWzSlUwygH9WpRqyJnAfb6OWheJPusYpDyFWwiKwYMyjohKRrLk44qyvdNgVN9huBVpOHwks/vmvtFtJOfqfDwSpMplHKUEhtCeGpXzVwrH7xhiZgpylBoc3/9fvstpKfTiaGPBfDaX9NUSrl2ZGDOkixRWwisuCxNvk8+xVK3sxxJwfyuBZdzQ6S9hdGgCpUcty/mWjHtaAMtaE3sfCpuZrcZiyh4wnapWQ4P0oH5pzixOesm8ul6bGXHb7f3gj/xbX8cO9FoPESSqFWGnGugTPetvE1apuwlq1lNiv8SAHA2wLTRDkGZ8Go55ZqOHgdzlTWxQRroREzo904fERXU+txsH51AX3EakQuATXBb9KHrYfsDnHmY7YPAEjzdTPpcmny18loFvpTjzcq+TFPsQXXUA2VPO1lq/ZzGTBy26X8MshdFsc3LNIkcym6smzk1/fOJ5FT1OuH0m2AoHdUJserlWtcLyU1gPujybXRmSPQwV41CG/ANVT1Yq4+4WmQ0atdBaUvL2HkrzWjpWofpK6KAdWZ60GR86f4bSsSkuE0sgNmrfHULYqJimFFur1ATxjeCCLX5rS8mUqVHNKFHohOXfbrhg=
-  - secure: a3x1V86zVwlh9Cyf7d/jR/g7RfLEZjdIcn+jcKoJlcVMbgFcEddf7Stj7xAXe2cHiuk2cieiBCmqfOB2dzm8jL/D7xL+H6n/CXSXOy1j5xljKlTviqdg2lN0Ic/k3RuM9TCylc+kuDgsy/seKyKFp+zQvIEDpmbcLUqeblJNYYkUJ6Tugo+dmjU7/gCsxACVsYRINYr2B2ZCyJjU7AwGxBssBhmCEQqoKjhhpFkDixR243Oq8gp6zKKrnePkADXAnbQc/qCA5egsH8JCP/di/I7U6UVxf38Ks5SlRkwsNNEFhaa9hEsTlWJv0MOfTdRQCEI2+4yuxc2VAKXdw8GLzPIY4U049KrP2fiGf+q/wtJgW/JE6Ux7/qeB2xE9EpyrcspLndzHNSBYUPaa92va1r3VmjGmZLs0vV1ITR7IfMJSqg2vZhXR8GJ+uu0OiLq/CS8jLpGz3tncyQQ9P5LzCLVEv5Zy187/z7WaOGpZllecgfsr772pcM08IEMvWWrj1rc8WrOF1T/81uLSGUu6/7j8Nw8PX0778Phswb48n55qptEwF+32zmJiMxVNFhIax+BT13bjwIw6MBqZhjwGgsLgORnvSc9EBxNG3g5rOvdnaaGy5xM2El2uYk9buFui6oDgQpNXpb92IJ1giAPRCaOs2QF+G3jHHYmAcaNr24A=
-  - secure: O+kTgLPklEbq65FdltBo6lMhayc+nlw+MVPOENt5oTaMgBWhr6DzmIAXJ7UGZMl1CNv5lcg7VBn/jpsH06B3WK6IOx/SR5Fg2de7T1DjYCJmjgSuwwBNf+kB1xS6nlcbh7YXaQlD3WJnb48PToigucgTHjGPFV+dL8w9mxKRHwWEE9NAXLAiDJY0+GRz4rQrM5BOAhgFijQohrYKFDfn0r4rNYT5F7WPh1KJVQhJ6kiEXYlTIk9GP7xQ1sCjNzZmo9p1WxoXlpCDsvTcn467Vt8dBhxDyqlNfqj8kpk5dWQz56xAvQ2FKP7RAGqmjgWW4S/xAFOmw1llD7xHYQqw4oDPfUtPm9jXU+4LmWffLgZ8rpcyPOMAoWB6yHdqoNxDKh9Hh/88MBkEXCbUIPX/pG4OG/aOTdTzDOM7D+3AIXXOvXHPVdYcMgrkY6RZAgCtB4q1pd7I7Afn3xyOmUbgjVJjyO8OX7Th4SgzAMh2eOd9PqzaMEwrpR2JBMx5uC/6LxM96xNMKYqDdmztdJ71/2Xzjxh9t0vcS2QiTs83p/So0QvUBQ30Ej8Al805YpzjYxJyp+hbHYhoz8J0XIr67rtV+HLB7QLC99IJGq6ZBWaJPDgxPNliiACDk7/6DHFr6SKqbBG4JJ2t8wDey/Vra9cIO/55/izi8heeqg3TDAw=
-  - secure: fRZkr+JoUz5QMmX9D4CSCjG78FrWiUdYbRKzkGpF7MXyfvNOqES2uvH67ffgubXBoeO7IS/x+hV/UcwrPpOZKDOny8dY5bxeZVjGWKkGlioqzfgPa6DDW2t+bvdtcpoB9r0N0Kp6LVoOjjn4qdjeG+4tuPIWeaEGh02mHTe+45IrPF4jhUQYEOu5kEwF/ayuH8Svvxu/zG4cWnVJ61rooW42TCXJ6Bj1Sq7lgTrTXLFYsOiuIujQiB3hhlxVi5vfD/hwx4qZkh+S0NSVIesU1uU0SFJUZ2ZTK4NWMuGDypZ2ulB/tYAAJIrfknDXJc+D8w3YWYia7q+E1tWUtoBG91rFM3hyhK7kCB2MZrUXqif20JHYpwXne66K06xpmym0/pUvKGH2Kw/qEyZTOKGSu8sl29FfsTFDRuCc4AXoP1Zyv6gHIjllmX30aFTGe0HZOftVmgnOen109Dtz2u1qSg7JrvAaQljltvRVdVlfOQ+Ub0DWFEgfUisL5WdxMDzJZalYYqR+GQy0/Wn8F2fZz4zvQ7pR4OEQoS9bWAhwcFwKSJ2IqLtAjggrIRp+h3u1OQFXeFMdhcr415QFJaw3rKsdMzubXOgOzCipfYAQbqm4KJK7boDLT2paPIKLTN6Fkf655XorMw4TuRHsOFlRqS/DOlaM2VwZP8hjc5wT4ak=
+  - secure: ZCL5egxJmZA+o1ujsaJe//AIh1ag/exSUJ2FzoKPF3O9c84hMc2k5EYO2rGzTNn1ML6M89Mo5hAvHQhyJEHjRuMtjc1QrfxAaA3mqm4scGXIXesPMqYGuvuPSh++6/fkAwaVBAhrk5GaDG1/FuxHE5zusGx3SvGegnCwO7n/2YCfXco6DWgVCdrz4p1EpPkAM3JIdHFUzsDWiimVuiNAvJmAT8+IeOPTT+WgusCJj4ORS3X3LddTjttBP+hRrp/pGSoNqPMzfysWybtaL2SJ8URtvsxW0Mo5BwocHAxAhPP+M2OscQbDzthSAezCLngYvrfBplfIyWlahlgzNz/FjXz5pQwWdYVNoibyxLLMOH685n75LNONN/xVO/GFmVPx7DMGapkN5NzIWS62D4v8QrRkwtms42OUkyEUHjDh8Evui3K2MNJVXA3TI9zOAR+C0krD7OEyS37qrppodhRxJSqFUlgXnk//wLldMC7vleDd7L2UQSWjqyBHqFOgsVaiLU2KRTY3zvv7ke+dqb5VF31mH6qAr8lJTR9un8M1att0VwCEKxoIRT4cKJCpEtZd8ovXOVt1uE695ThVXE9I5e00GXdTzqXOuv6zT4hv/dgmbz9JN9MYeCwmokEoIUmJKNYERa/bNVVefdnJt7h+dm+KpyPAS+XvPLzjbnWdYNA=
+  - secure: GdrPX7nUoZhB1DYTVD6x/vgA7H9dOlQc4n7nCsqEDyif+Y1XdPT83Ic3gSOt+cfy0/Kjh0/TT5xmLqpSh7wr7eyTpBPZGjz4ZbxBOcSLTfrf/spacgzla9I1335CvaTmpvrnvGUlOuVS8rb3J/+19dHlN6dfxX+ucjdfShR504d2JEcCLpTc1CEXAl+HEt3hM9gztOX5ykxyrtibDr0OPkNF7QjZ485V6UJkfyVlBM6JL59ywgh2dhdZn6JwmexHjVPsw6V8Ka07GzbpOs1e5eis42RUJe8eSqRRToCcTUbA9HOgWXswuu5k7nAwErygX2ub3hZ+yIjc+9JLsiy6F69RaUPVFlxfw8s5NLeInTIt28+A6iaf3X2k4lOaVFytgTl7lkYGNWz4eV/vXf2H4wZmaZn5OI0WKd3WuEJ04rsm7xzx9rC8znnsI3R1BHfapU/y6z2QGjgJsHqZmgfvXNKgSOurM6O/nlDnEsYOwYLQLhpeXVFNmbo+M77HAVicKD4yL08+5uBZaeYYipzyC1O4DEHX5BNdl34NpNxUdMqUb4MfNEnYeqvmemvZkOO6BO+xucP1S2LSKNXfFxH5iVfKbz4+VJ/2kt5R77672lkG3bXPUJFk4t1CTHBOLizEJNTTD8uzRIsW7Io+XFk6oyoEqXF0sT6Lbp/4pUHJQQM=
+  - secure: DGIgOKinCvYcLdcaIOKcecidQe5q/K4aGAjTyl8/fCp3mRWwFTrlv5gPMy9sHQEsiRjzQehpubMO1d0XFVO+0LvqdGLnXyM6lSnMhN6voQMnF1GaIXmxxBfvP3BHwyN8kMyY+4oqgMROvpxDKvq0IH/GE8opWRJhQdNkRscbtfHUvnqSk62oNziqIBxXrYBIuezuNcgZkCwEoZQwPu674efIAwVr01BmRfd/8Q5W27dJbCJFF9ceyOQrsjG2QGmW6GWraaOpqYfQ82A5ROB2saVU8mEs/f+mGoJgOP2aH3ErFBkJWBCUNzajluAyGyU9VgHGRQ+GMbSr/HqRILd5aYpDCGA0oFer4/jP32CqeEt1Jdqs2lWar5mFX1sae6PIxFyl5lnENgjTfbOt4oiGGye6t0mgI4+psRdCCQV5FQ7WfobEZ2ryfQxbiVU1X219jMoHhHmFzC83e/T6V/mkHrh8OR67k9pieH+DqNGvWFtv7BBs/ihfoo2ONNgsHcofsPj85I6odWAhsBnsYm6FsR31N19nObnggeDyqiCyh5qUFvSGPkH4fXTKthKETIRdsdOEDOcbCD6kUpZqIWyuk6TKeOD8PxjgKzm3hZjlugU1x3amVv71EKjA5/FOVyIVuekLKoLn7pt+n3PVlT30IZfWorEfqjeVAKp2SglE8nA=
+  - secure: LnL0Hl9yZEie1aYngEO5QK332cn3W2i4f6R5+kxX3tncdqBDFhsp5tQfMvlKHIzFlK94DI/G0diAl8zJmYQfAYARe4uvW5FAINRCkOUz2jwIA/gtQDr+oONqHK0OLPWYuZ6KJM4t7dmuPUR2/frKe0/6r5XeFkeAz9l8r1Gw9o42jFDQPDkhBj7k5EkmB1DpuAY74vXy0tVBCJsd57/kuaRqbX4euhx3zFrDcr+xQEiDWHKzNHlJd6DZnruy0KDuWmIbUWhR2rd8YKAnzP7OHzpbTHsnbXvrVpaN1Mv8lXz2dpPTr7I2LMCrMEtfECu9U+LDIqhbHVMsp9rZ+fNQ048fREoj8HZrorIxmsRJzV0ZQzjdW9Q6EVaiYcLZPFOASsyuTNBbSJ2AIrE/izo4EUKme8BY+0mFzTJwMk7XwAtatItVhEUXb2wXWZm8GR8wIrNmbeSzle5NkwXpdpW3QzZ2EADL63/pP80aV1aMBmoAuLMIHxeHEnXOTAgQj6SOiloY+II/iJE4cE5vo9UNtZsqnJZqdd22s3kLdQV0kbFMWq8S3qmxtDFPeoZAy03xhTVnJUBkdjSL2UER5WAacZIr85M6Z2APc6dzMUlWEE+4QkkM1UAbwDBTXFrrmfDVYc0LrePRuoHQiOmSvTus5+WV9iIQF7rM4BcSLnOEW5U=
+  - secure: SgvbTYTbMEkmqDXP8MW6lbERkHUjBRwg477hUL11Ok1TiRdHCbEDrq3mfUP+Tl2sS1x5qQ2JFg2NyWS2ikCAd0zjO3QEfmhfQFRpmfgb5O67wY2oEAsbRDanjIXTwpDAZn87KFIPB6ohVsX4LEztfR8zqXKIfXrVFs6lyDHS1LIgbgQhJl+XfJRsfPlWRq7QydANTCY34raUXgXgBxtbv43b232LT8UusizUvZS4HJbrbo4oXhVfhkUH46B2o0ct2Xt6EIlxyOtxtZOmnai8O7kIFHoG+GcHxeZ7++X6FaHR3Cdv3rr7EEg3MwsOIZ6SmgeQ0gcs32RZf0giFMwo1LkgiB9KrJTBXkU0CSYysxfeLUCEd11q4h4lZyhxU8CMvgs+1m3s6A2/5uYDYSSqJuAHTHQgntMn7/baXKVXuWXrSSERxkUiqwlKjFFHz0kshj3ZXiTZ3EhjBZgXeeGzGEEbZBQCEJgXKUpl+C0D07PLKt6f2ya4TVTZ3WOjh7dKq14+0nC3w+5z0ZtGtv+IS1LFfajNs/LsT4fDmKsIoEQg2Kf5S//ckeuzaR4bkMGCm3qquJrNE6Uqq0MZzSUdUpnsILjfLiVlrtHk/9So7ulRc8XyhBIWDy9lTS7NxOfpw5cnVJRcjEwrZ4Q6iNwdsh4vPZLVgavwfFRW87DjF6A=
+  - secure: Jef94P0QfHuMT5GQNrzfMdtVUVvV9dEGsvOLFqPvIkPLJZWGqwaIFUG5t0mCPEgn8uct3XPOiIYivgxnOURa1JNegnTbjRLXBOAjhE5hw2x2IRWj2xT+ylYCNqh9jtIEJdAjUzJnXXj1iasZGCKa4DNwNLgsuQ8d5Yl3bY5l5YI4MtTsdzbBUT9WewDWgnO/MhZM52w18XLBx+Fsq80F0VwpzoStKRula8anOhL+Bvj1uAielMOUo3QcpYcV2XfTnM5n0ApwqUhmv/8YoJpHXjGTeKRM1Hem1jFtfWCjSRrlEKEFJALEJImf0iWbZN5Z0TOcfJqzPY09/8h60OOfi0TXcnwVnSX33Zp1oDLDlRnsN7HQg+yIub0N03OwHqmC2AO0ShkO/lBmEMsfqlEoc4o2GJ3YL+JpC5vPsy7fFMad+jNGXlg6jPAshvCJ2DfnmK1jYSSVdVNUUeP1Bk5rhQkFzFH3vgNgX3nFk0gEYrfDn3/Ea6tORybSJzaAkB9bU1n4U2e3OplvWr1Ll8O/t87ws8ctyY/Ah2hRmhSKEG9cdySnm7Uq8H7696MZEEw9aatj+bRJk5CbCVtSX8v49I3C0tERcUBO5M3U+/g0qeBW9hEhxnBeG3y253Bo1FhSxbaZhGwSGJ91htRXLlJlUs2QrOcSYMsCT6p35KdWaqA=
+  - secure: NMVS9EU+ahQXGiyTCHyZ44rf+8b3me3UXD1DozMm04lCvnWoBqJE4aXBGQsDAWuOL4NTTm0SaVu6sBY6ZTXOYYF59mwEbxt4qpmVjZ+vBrtMbMiqoxv145blquR9JKedkdP6IGSd7VSQwSba71f/RVv5VeGvxUSEhCwA04kKxToOPwmnORmT6qwb7PkPCMNHxz4VpsUIsKx8jRrY6Gmp6FvQJBHfKEHnDQohB1ReIYEYi39ijLvpbCZqrB5u1N9oF6WlpBiNIX3kQizn7ftUyewJgoZMnfpW/Lta6e91yzFInWg75bZdW3faa30Qy0yw0zlQIPLs89c8A/XH1fGVECH9At9VNmdYrb0fD9aWnH7zdX6Im+Bw7Ptph4x6tB7zPeFoZR5cVZT7L06/HbnW7NeQk4tg/N4I1tOaO7AQl+ofhCzesZ56bSxETiNFn9QiNwWFTzjlkG7jxN1iAAkdYsZEQHwtEK63R//NJtXpbbtNA831QqgDqBK+IxyKeLhmxmu17dWcUw9tm4jlZ7d6nPB9bzJcVM6K2uRJyW07SlBqd65WJTXPV1PFww8zh+chAC4ZkLDhupn+7ZSG2ylLYGgepmABoC/CXHkXEsNzdQ8wPX/pDIz2WNmwEXyC/Nv+WNpFS/tWIAryIPOLMuETIgbaOLbD5vZDSKxDZVGDvPE=
+  - secure: cNr4PiK6ZZADoRMacL4lvdMYWgM9H4lKN3u+nfMA/MrVrnSjeRonkO7RjMJWs9HicPanEks10R1w/T/6nWyFQV2CPkEBSNSLf3DAD+dlRekKfWogGXzYnvqeiki1HzsIPYTImiD5BtPn6SbJmO/ErJt3zuogspyBws/X7XfZ+u8FIpPsYEmvHslT2jARuput0yNfldUgxxyI0IvgkuCEcCTFwePspjbn6zR6Df67e+r5ibFqkdPYdXkQVpvfA90RPpfBUuuaEO7kkFlKbPK+Nl/jbUnjcfbe8zJRpSb8j/S2USAiBUjFsqsdvFqZ9WumjXJLrrNFt/UgIXaMyG3Y8xJl9kzCcx36wcNoaP2mx2qucYTdC0ey49g0uywvOVDdykmctQRF7uYQS+UkMqs5jRLgAjQ1/wJISrvtcpaK/4DyhLBUFrOf9chep2hzWBFaWPto1IUpWu9Sjtz5rtjsAm5LR7zvIxcorvRall5kRokAspHb9+TaQKSDOpYNF+PB77cb9H0OLZBLVPGo0WJHq5dv0NVZSH9JVq4AiJDmvMWI6weBis+tLbECMwbeTezo6hDOuII7VZt/KcHgzlt3KCDwv8krQq71q7ySDt7SxrvLeDjeiEFkjwA0lN7Cin1yqjON83LsIsWkKxbf+xNJRngSE4bPn95j3pHELdo3uqY=
+  - secure: iDkNjibPknbawHN+jobw1AEqhQDhqFvGPBUX7FkxlrIanNR71Tj8CPAFtDpJbYaBMdPt4IzOmD2JVo9w7E1TfNX4DsOpkb2MbOr55TxfXQ/+y7TBdJ9B/62BvhFWk8Hvq8TWVPTCgNIaVXNfqBAj6WQSX3AbUVHWSOM9Aey5joBZuhdWXSmKo46lLOradacDDPZYjrLEsMTS2CotzduKQ4C8dGCVcMEbBnS3O2WRj3oR0wiiP3X0jbnxJkOV2MCoadZxSu5B+gaaJ+Yv7EKT0vy6ASp6LYrWkjY0eKbTqy8NtCvCFlliND/iaq4LEv838hQfO/o0WeB2b7/2MH2EW1v8XLacV12ak5wJgb7b+L6fG+lMKMta5Re+vjdRYgoU5EVeWQNxrnX1chEdzFXb/q2+5DVp43qH5i1Tu4FR/kSBobQeSAbT7HTkWAVz3kg8HmubYZ3P0eXToZA/DlX0dphWxO9ShR3H+XTJhh3tSqzxMZxxhGqPcN4DPSfOTnJQ0v0NPz016lulCr9SuIOSM3f7HpeGXo5SeQbrY3yCnBG8Qxpx2kYaZZlT4J6fx3iFl77SY/lQu6H/Y8ZtufWEogPSkGEh+NLLWuwwBQFC3vH8l3J26vcqHZR3N9+GyqX13CSqWEUysMF4nBOi52ckhwJRF8hAeX+DIqxoLfjUkDc=

+ 23 - 0
CHANGELOG.md

@@ -1,4 +1,27 @@
 # ChangeLog - Aliyun OSS SDK for Go
+## 版本号:1.9.2 日期:2018-11-16
+### 变更内容
+- 变更:添加支持设置request Payer的option
+- 变更:添加支持设置checkpoint目录的option
+- 变更:getobjectmeta接口增加options参数,可以支持传入option选项
+- 变更:listobjecs接口增加options参数,可以支持传入option选项
+- 变更:listmultipartuploads接口增加options参数, 可以支持传入option选项
+- 修复:解决调用接口返回出错时,且返回的http body为空时,打印错误消息不包含"request_id"的问题
+- 变更:abortmultipartupload接口增加options参数, 可以支持传入option选项
+- 变更:completemultipartupload接口增加options参数, 可以支持传入option选项
+
+## 版本号:1.9.1 日期:2018-09-17
+### 变更内容
+ - 变更:支持ipv6
+ - 变更:支持修改对象的存储类型
+ - 修复:修改sample中GetBucketReferer方法名拼写错误
+ - 修复:修复NopCloser在close的时候并不释放内存的内存泄漏问题
+ - 变更:增加ProcessObject接口
+ - 修复:修改图片处理接口参数拼写错误导致无法处理的bug
+ - 修复:增加ListUploadedParts接口的options选项
+ - 修复:增加Callback&CallbackVal选项,支持回调使用
+ - 修复:GetObject接口返回Response,支持用户读取crc等返回值
+ - 修复:当以压缩格式返回数据时,GetObject接口不校验crc
 
 ## 版本号:1.9.0 日期:2018-06-15
 ### 变更内容

+ 1 - 1
README-CN.md

@@ -13,7 +13,7 @@
 > - 使用此SDK,用户可以方便地在任何应用、任何时间、任何地点上传,下载和管理数据。
 
 ## 版本
-> - 当前版本:1.9.0
+> - 当前版本:1.9.2
 
 ## 运行环境
 > - Go 1.5及以上。

+ 1 - 1
README.md

@@ -13,7 +13,7 @@
 > - With this SDK, you can upload, download and manage data on any app anytime and anywhere conveniently. 
 
 ## Version
-> - Current version: 1.9.0. 
+> - Current version: 1.9.2.
 
 ## Running Environment
 > - Go 1.5 or above. 

+ 3 - 0
oss/auth.go

@@ -56,6 +56,9 @@ func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) s
 	contentMd5 := req.Header.Get(HTTPHeaderContentMD5)
 
 	signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
+
+	conn.config.WriteLog(Debug, "[Req:%p]signStr:%s.\n", req, signStr)
+
 	h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret))
 	io.WriteString(h, signStr)
 	signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))

+ 60 - 20
oss/bucket.go

@@ -9,11 +9,11 @@ import (
 	"hash"
 	"hash/crc64"
 	"io"
-	"io/ioutil"
 	"net/http"
 	"net/url"
 	"os"
 	"strconv"
+	"strings"
 	"time"
 )
 
@@ -128,7 +128,8 @@ func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadClos
 	if err != nil {
 		return nil, err
 	}
-	return result.Response.Body, nil
+
+	return result.Response, nil
 }
 
 // GetObjectToFile downloads the data to a local file.
@@ -147,7 +148,7 @@ func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Opti
 	if err != nil {
 		return err
 	}
-	defer result.Response.Body.Close()
+	defer result.Response.Close()
 
 	// If the local file does not exist, create a new one. If it exists, overwrite it.
 	fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
@@ -164,7 +165,12 @@ func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Opti
 
 	// Compares the CRC value
 	hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
-	if bucket.getConfig().IsEnableCRC && !hasRange {
+	encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil)
+	acceptEncoding := ""
+	if encodeOpt != nil {
+		acceptEncoding = encodeOpt.(string)
+	}
+	if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
 		result.Response.ClientCRC = result.ClientCRC.Sum64()
 		err = checkCRC(result.Response, "GetObjectToFile")
 		if err != nil {
@@ -185,7 +191,7 @@ func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Opti
 // error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) {
-	params := map[string]interface{}{}
+	params, _ := getRawParams(options)
 	resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil)
 	if err != nil {
 		return nil, err
@@ -208,7 +214,7 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
 	listener := getProgressListener(options)
 
 	contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
-	resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
+	resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil)
 
 	return result, nil
 }
@@ -219,7 +225,7 @@ func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*
 // destObjectKey    the target object to copy.
 // options    options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch,
 //            CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective.
-//            Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires, 
+//            Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires,
 //            ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details :
 //            https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html
 //
@@ -303,7 +309,7 @@ func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, op
 // reader    io.Reader. The read instance for reading the data to append.
 // appendPosition    the start position to append.
 // destObjectProperties    the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding,
-//                         Expires, ServerSideEncryption, ObjectACL. 
+//                         Expires, ServerSideEncryption, ObjectACL.
 //
 // int64    the next append position, it's valid when error is nil.
 // error    it's nil if no error, otherwise it's an error object.
@@ -451,7 +457,7 @@ func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
 
 	switch err.(type) {
 	case ServiceError:
-		if err.(ServiceError).StatusCode == 404 && err.(ServiceError).Code == "NoSuchKey" {
+		if err.(ServiceError).StatusCode == 404 {
 			return false, nil
 		}
 	}
@@ -464,7 +470,7 @@ func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
 // options    it contains all the filters for listing objects.
 //            It could specify a prefix filter on object keys,  the max keys count to return and the object key marker and the delimiter for grouping object names.
 //            The key marker means the returned objects' key must be greater than it in lexicographic order.
-// 
+//
 //            For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21,
 //            my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns
 //            my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns
@@ -474,9 +480,9 @@ func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) {
 //            But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties.
 //            For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects.
 //            But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix.
-// 
+//
 //            For common usage scenario, check out sample/list_object.go.
-// 
+//
 // ListObjectsResponse    the return value after operation succeeds (only valid when error is nil).
 //
 func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
@@ -488,7 +494,7 @@ func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
 		return out, err
 	}
 
-	resp, err := bucket.do("GET", "", params, nil, nil, nil)
+	resp, err := bucket.do("GET", "", params, options, nil, nil)
 	if err != nil {
 		return out, err
 	}
@@ -547,11 +553,11 @@ func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option)
 // http.Header    the object's metadata, valid when error is nil.
 // error    it's nil if no error, otherwise it's an error object.
 //
-func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
+func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) {
 	params := map[string]interface{}{}
 	params["objectMeta"] = nil
 	//resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
-	resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
+	resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil)
 	if err != nil {
 		return nil, err
 	}
@@ -802,7 +808,7 @@ func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.R
 	if err != nil {
 		return nil, err
 	}
-	return result.Response.Body, nil
+	return result.Response, nil
 }
 
 // GetObjectToFileWithURL downloads the object into a local file with the signed URL.
@@ -821,7 +827,7 @@ func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options
 	if err != nil {
 		return err
 	}
-	defer result.Response.Body.Close()
+	defer result.Response.Close()
 
 	// If the file does not exist, create one. If exists, then overwrite it.
 	fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode)
@@ -838,7 +844,13 @@ func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options
 
 	// Compare the CRC value. If CRC values do not match, return error.
 	hasRange, _, _ := isOptionSet(options, HTTPHeaderRange)
-	if bucket.getConfig().IsEnableCRC && !hasRange {
+	encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil)
+	acceptEncoding := ""
+	if encodeOpt != nil {
+		acceptEncoding = encodeOpt.(string)
+	}
+
+	if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" {
 		result.Response.ClientCRC = result.ClientCRC.Sum64()
 		err = checkCRC(result.Response, "GetObjectToFileWithURL")
 		if err != nil {
@@ -859,7 +871,7 @@ func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options
 // error    it's nil if no error, otherwise it's an error object.
 //
 func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) {
-	params := map[string]interface{}{}
+	params, _ := getRawParams(options)
 	resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil)
 	if err != nil {
 		return nil, err
@@ -882,11 +894,39 @@ func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*Ge
 	listener := getProgressListener(options)
 
 	contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64)
-	resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil))
+	resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil)
 
 	return result, nil
 }
 
+//
+// ProcessObject apply process on the specified image file.
+//
+// The supported process includes resize, rotate, crop, watermark, format,
+// udf, customized style, etc.
+//
+//
+// objectKey	object key to process.
+// process	process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA"
+//
+// error    it's nil if no error, otherwise it's an error object.
+//
+func (bucket Bucket) ProcessObject(objectKey string, process string) (ProcessObjectResult, error) {
+	var out ProcessObjectResult
+	params := map[string]interface{}{}
+	params["x-oss-process"] = nil
+	processData := fmt.Sprintf("%v=%v", "x-oss-process", process)
+	data := strings.NewReader(processData)
+	resp, err := bucket.do("POST", objectKey, params, nil, data, nil)
+	if err != nil {
+		return out, err
+	}
+	defer resp.Body.Close()
+
+	err = jsonUnmarshal(resp.Body, &out)
+	return out, err
+}
+
 // Private
 func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option,
 	data io.Reader, listener ProgressListener) (*Response, error) {

+ 53 - 2
oss/bucket_test.go

@@ -4,6 +4,7 @@ package oss
 
 import (
 	"bytes"
+	"encoding/base64"
 	"errors"
 	"fmt"
 	"io"
@@ -134,7 +135,7 @@ func (s *OssBucketSuite) TestPutObject(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	// Put bytes 
+	// Put bytes
 	err = s.bucket.PutObject(objectName, bytes.NewReader([]byte(objectValue)))
 	c.Assert(err, IsNil)
 
@@ -353,6 +354,13 @@ func (s *OssBucketSuite) TestSignURL(c *C) {
 	c.Assert(err.(ServiceError).Code, Equals, "SignatureDoesNotMatch")
 	c.Assert(body, IsNil)
 
+	err = s.bucket.PutObjectFromFile(objectName, "../sample/The Go Programming Language.html")
+	c.Assert(err, IsNil)
+	str, err = s.bucket.SignURL(objectName, HTTPGet, 3600, AcceptEncoding("gzip"))
+	c.Assert(err, IsNil)
+	s.bucket.GetObjectToFileWithURL(str, newFile)
+	c.Assert(err, IsNil)
+
 	os.Remove(filePath)
 	os.Remove(newFile)
 
@@ -857,6 +865,12 @@ func (s *OssBucketSuite) TestGetObject(c *C) {
 	_, err = s.bucket.GetObject(objectName, IfNoneMatch(meta.Get("Etag")))
 	c.Assert(err, NotNil)
 
+	// process
+	err = s.bucket.PutObjectFromFile(objectName, "../sample/BingWallpaper-2015-11-07.jpg")
+	c.Assert(err, IsNil)
+	_, err = s.bucket.GetObject(objectName, Process("image/format,png"))
+	c.Assert(err, IsNil)
+
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 }
@@ -963,12 +977,18 @@ func (s *OssBucketSuite) TestGetObjectToFile(c *C) {
 	eq, err = compareFileData(newFile, val)
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
-	os.Remove(newFile)
 
 	// If-None-Match
 	err = s.bucket.GetObjectToFile(objectName, newFile, IfNoneMatch(meta.Get("Etag")))
 	c.Assert(err, NotNil)
 
+	// Accept-Encoding:gzip
+	err = s.bucket.PutObjectFromFile(objectName, "../sample/The Go Programming Language.html")
+	c.Assert(err, IsNil)
+	err = s.bucket.GetObjectToFile(objectName, newFile, AcceptEncoding("gzip"))
+	c.Assert(err, IsNil)
+
+	os.Remove(newFile)
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 }
@@ -2049,6 +2069,37 @@ func (s *OssBucketSuite) TestRestoreObject(c *C) {
 	c.Assert(meta.Get("X-Oss-Storage-Class"), Equals, "Archive")
 }
 
+// TestProcessObject
+func (s *OssBucketSuite) TestProcessObject(c *C) {
+	objectName := objectNamePrefix + "_process_src.jpg"
+	err := s.bucket.PutObjectFromFile(objectName, "../sample/BingWallpaper-2015-11-07.jpg")
+	c.Assert(err, IsNil)
+
+	// If bucket-name not specified, it is saved to the current bucket by default.
+	destObjName := objectNamePrefix + "_process_dest_1.jpg"
+	process := fmt.Sprintf("image/resize,w_100|sys/saveas,o_%v", base64.URLEncoding.EncodeToString([]byte(destObjName)))
+	result, err := s.bucket.ProcessObject(objectName, process)
+	c.Assert(err, IsNil)
+	exist, _ := s.bucket.IsObjectExist(destObjName)
+	c.Assert(exist, Equals, true)
+	c.Assert(result.Bucket, Equals, "")
+	c.Assert(result.Object, Equals, destObjName)
+
+	destObjName = objectNamePrefix + "_process_dest_1.jpg"
+	process = fmt.Sprintf("image/resize,w_100|sys/saveas,o_%v,b_%v", base64.URLEncoding.EncodeToString([]byte(destObjName)), base64.URLEncoding.EncodeToString([]byte(s.bucket.BucketName)))
+	result, err = s.bucket.ProcessObject(objectName, process)
+	c.Assert(err, IsNil)
+	exist, _ = s.bucket.IsObjectExist(destObjName)
+	c.Assert(exist, Equals, true)
+	c.Assert(result.Bucket, Equals, s.bucket.BucketName)
+	c.Assert(result.Object, Equals, destObjName)
+
+	//no support process
+	process = fmt.Sprintf("image/resize,w_100|saveas,o_%v,b_%v", base64.URLEncoding.EncodeToString([]byte(destObjName)), base64.URLEncoding.EncodeToString([]byte(s.bucket.BucketName)))
+	result, err = s.bucket.ProcessObject(objectName, process)
+	c.Assert(err, NotNil)
+}
+
 // Private
 func createFileAndWrite(fileName string, data []byte) error {
 	os.Remove(fileName)

+ 38 - 9
oss/client.go

@@ -6,6 +6,7 @@ import (
 	"bytes"
 	"encoding/xml"
 	"io"
+	"log"
 	"net/http"
 	"strings"
 	"time"
@@ -18,8 +19,9 @@ import (
 type (
 	// Client OSS client
 	Client struct {
-		Config *Config // OSS client configuration
-		Conn   *Conn   // Send HTTP request
+		Config     *Config      // OSS client configuration
+		Conn       *Conn        // Send HTTP request
+		HTTPClient *http.Client //http.Client to use - if nil will make its own
 	}
 
 	// ClientOption client option such as UseCname, Timeout, SecurityToken.
@@ -51,8 +53,8 @@ func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption)
 
 	// OSS client
 	client := &Client{
-		config,
-		conn,
+		Config: config,
+		Conn:   conn,
 	}
 
 	// Client options parse
@@ -61,7 +63,7 @@ func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption)
 	}
 
 	// Create HTTP connection
-	err := conn.init(config, url)
+	err := conn.init(config, url, client.HTTPClient)
 
 	return client, err
 }
@@ -149,7 +151,7 @@ func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) {
 // IsBucketExist checks if the bucket exists
 //
 // bucketName    the bucket name.
-// 
+//
 // bool    true if it exists, and it's only valid when error is nil.
 // error    it's nil if no error, otherwise it's an error object.
 //
@@ -184,7 +186,7 @@ func (client Client) DeleteBucket(bucketName string) error {
 
 // GetBucketLocation gets the bucket location.
 //
-// Checks out the following link for more information : 
+// Checks out the following link for more information :
 // https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html
 //
 // bucketName    the bucket name
@@ -253,7 +255,7 @@ func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error)
 // bucketName    the bucket name.
 // rules    the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively.
 //          Check out sample/bucket_lifecycle.go for more details.
-// 
+//
 // error    it's nil if no error, otherwise it's an error object.
 //
 func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error {
@@ -300,7 +302,7 @@ func (client Client) DeleteBucketLifecycle(bucketName string) error {
 // GetBucketLifecycle gets the bucket's lifecycle settings.
 //
 // bucketName    the bucket name.
-// 
+//
 // GetBucketLifecycleResponse    the result object upon successful request. It's only valid when error is nil.
 // error    it's nil if no error, otherwise it's an error object.
 //
@@ -757,6 +759,33 @@ func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption {
 	}
 }
 
+//
+// HTTPClient sets the http.Client in use to the one passed in
+//
+func HTTPClient(HTTPClient *http.Client) ClientOption {
+	return func(client *Client) {
+		client.HTTPClient = HTTPClient
+	}
+}
+
+//
+// SetLogLevel sets the oss sdk log level
+//
+func SetLogLevel(LogLevel int) ClientOption {
+	return func(client *Client) {
+		client.Config.LogLevel = LogLevel
+	}
+}
+
+//
+// SetLogLevel sets the oss sdk log level
+//
+func SetLogger(Logger *log.Logger) ClientOption {
+	return func(client *Client) {
+		client.Config.Logger = Logger
+	}
+}
+
 // Private
 func (client Client) do(method, bucketName string, params map[string]interface{},
 	headers map[string]string, data io.Reader) (*Response, error) {

+ 90 - 1
oss/client_test.go

@@ -5,8 +5,10 @@
 package oss
 
 import (
+	"io/ioutil"
 	"log"
 	"math/rand"
+	"net/http"
 	"os"
 	"strings"
 	"testing"
@@ -1411,8 +1413,15 @@ func (s *OssClientSuite) TestClientOption(c *C) {
 	c.Assert(client.Conn.config.ProxyPassword, Equals, proxyPasswd)
 
 	client, err = New(endpoint, accessID, accessKey, UserAgent("go sdk user agent"))
-
 	c.Assert(client.Conn.config.UserAgent, Equals, "go sdk user agent")
+
+	// Check we can overide the http.Client
+	httpClient := new(http.Client)
+	client, err = New(endpoint, accessID, accessKey, HTTPClient(httpClient))
+	c.Assert(client.HTTPClient, Equals, httpClient)
+	c.Assert(client.Conn.client, Equals, httpClient)
+	client, err = New(endpoint, accessID, accessKey)
+	c.Assert(client.HTTPClient, IsNil)
 }
 
 // TestProxy
@@ -1497,3 +1506,83 @@ func (s *OssClientSuite) getBucket(buckets []BucketProperties, bucket string) (b
 	}
 	return false, BucketProperties{}
 }
+
+func (s *OssClientSuite) TestHttpLogNotSignUrl(c *C) {
+	logName := "." + string(os.PathSeparator) + "test-go-sdk-httpdebug.log" + randStr(5)
+	f, err := os.OpenFile(logName, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)
+	c.Assert(err, IsNil)
+
+	client, err := New(endpoint, accessID, accessKey)
+	client.Config.LogLevel = Debug
+
+	client.Config.Logger = log.New(f, "", log.LstdFlags)
+
+	var testBucketName = bucketNamePrefix + strings.ToLower(randStr(5))
+
+	// CreateBucket
+	err = client.CreateBucket(testBucketName)
+	f.Close()
+
+	// read log file,get http info
+	contents, err := ioutil.ReadFile(logName)
+	c.Assert(err, IsNil)
+
+	httpContent := string(contents)
+	//fmt.Println(httpContent)
+
+	c.Assert(strings.Contains(httpContent, "signStr"), Equals, true)
+	c.Assert(strings.Contains(httpContent, "Method:"), Equals, true)
+
+	// delete test bucket and log
+	os.Remove(logName)
+	client.DeleteBucket(testBucketName)
+}
+
+func (s *OssClientSuite) TestHttpLogSignUrl(c *C) {
+	logName := "." + string(os.PathSeparator) + "test-go-sdk-httpdebug-signurl.log" + randStr(5)
+	f, err := os.OpenFile(logName, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)
+	c.Assert(err, IsNil)
+
+	client, err := New(endpoint, accessID, accessKey)
+	client.Config.LogLevel = Debug
+	client.Config.Logger = log.New(f, "", log.LstdFlags)
+
+	var testBucketName = bucketNamePrefix + strings.ToLower(randStr(5))
+
+	// CreateBucket
+	err = client.CreateBucket(testBucketName)
+	f.Close()
+
+	// clear log
+	f, err = os.OpenFile(logName, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)
+	client.Config.Logger = log.New(f, "", log.LstdFlags)
+
+	bucket, _ := client.Bucket(testBucketName)
+	objectName := objectNamePrefix + randStr(5)
+	objectValue := randStr(20)
+
+	// Sign URL for put
+	str, err := bucket.SignURL(objectName, HTTPPut, 60)
+	c.Assert(err, IsNil)
+	c.Assert(strings.Contains(str, HTTPParamExpires+"="), Equals, true)
+	c.Assert(strings.Contains(str, HTTPParamAccessKeyID+"="), Equals, true)
+	c.Assert(strings.Contains(str, HTTPParamSignature+"="), Equals, true)
+
+	// Error put object with URL
+	err = bucket.PutObjectWithURL(str, strings.NewReader(objectValue), ContentType("image/tiff"))
+	f.Close()
+
+	// read log file,get http info
+	contents, err := ioutil.ReadFile(logName)
+	c.Assert(err, IsNil)
+
+	httpContent := string(contents)
+	//fmt.Println(httpContent)
+
+	c.Assert(strings.Contains(httpContent, "signStr"), Equals, true)
+	c.Assert(strings.Contains(httpContent, "Method:"), Equals, true)
+
+	// delete test bucket and log
+	os.Remove(logName)
+	client.DeleteBucket(testBucketName)
+}

+ 59 - 20
oss/conf.go

@@ -1,9 +1,23 @@
 package oss
 
 import (
+	"bytes"
+	"fmt"
+	"log"
+	"os"
 	"time"
 )
 
+const (
+	LogOff = iota
+	Error
+	Warn
+	Info
+	Debug
+)
+
+var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"}
+
 // HTTPTimeout defines HTTP timeout.
 type HTTPTimeout struct {
 	ConnectTimeout   time.Duration
@@ -13,26 +27,46 @@ type HTTPTimeout struct {
 	IdleConnTimeout  time.Duration
 }
 
+type HTTPMaxConns struct {
+	MaxIdleConns        int
+	MaxIdleConnsPerHost int
+}
+
 // Config defines oss configuration
 type Config struct {
-	Endpoint        string      // OSS endpoint
-	AccessKeyID     string      // AccessId
-	AccessKeySecret string      // AccessKey
-	RetryTimes      uint        // Retry count by default it's 5.
-	UserAgent       string      // SDK name/version/system information
-	IsDebug         bool        // Enable debug mode. Default is false.
-	Timeout         uint        // Timeout in seconds. By default it's 60.
-	SecurityToken   string      // STS Token
-	IsCname         bool        // If cname is in the endpoint.
-	HTTPTimeout     HTTPTimeout // HTTP timeout
-	IsUseProxy      bool        // Flag of using proxy.
-	ProxyHost       string      // Flag of using proxy host.
-	IsAuthProxy     bool        // Flag of needing authentication.
-	ProxyUser       string      // Proxy user
-	ProxyPassword   string      // Proxy password
-	IsEnableMD5     bool        // Flag of enabling MD5 for upload.
-	MD5Threshold    int64       // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
-	IsEnableCRC     bool        // Flag of enabling CRC for upload.
+	Endpoint        string       // OSS endpoint
+	AccessKeyID     string       // AccessId
+	AccessKeySecret string       // AccessKey
+	RetryTimes      uint         // Retry count by default it's 5.
+	UserAgent       string       // SDK name/version/system information
+	IsDebug         bool         // Enable debug mode. Default is false.
+	Timeout         uint         // Timeout in seconds. By default it's 60.
+	SecurityToken   string       // STS Token
+	IsCname         bool         // If cname is in the endpoint.
+	HTTPTimeout     HTTPTimeout  // HTTP timeout
+	HTTPMaxConns    HTTPMaxConns // Http max connections
+	IsUseProxy      bool         // Flag of using proxy.
+	ProxyHost       string       // Flag of using proxy host.
+	IsAuthProxy     bool         // Flag of needing authentication.
+	ProxyUser       string       // Proxy user
+	ProxyPassword   string       // Proxy password
+	IsEnableMD5     bool         // Flag of enabling MD5 for upload.
+	MD5Threshold    int64        // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used.
+	IsEnableCRC     bool         // Flag of enabling CRC for upload.
+	LogLevel        int          // log level
+	Logger          *log.Logger  // For write log
+}
+
+// WriteLog
+func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) {
+	if config.LogLevel < LogLevel || config.Logger == nil {
+		return
+	}
+
+	var logBuffer bytes.Buffer
+	logBuffer.WriteString(LogTag[LogLevel-1])
+	logBuffer.WriteString(fmt.Sprintf(format, a...))
+	config.Logger.Printf("%s", logBuffer.String())
 }
 
 // getDefaultOssConfig gets the default configuration.
@@ -44,8 +78,8 @@ func getDefaultOssConfig() *Config {
 	config.AccessKeySecret = ""
 	config.RetryTimes = 5
 	config.IsDebug = false
-	config.UserAgent = userAgent
-	config.Timeout = 60  // Seconds
+	config.UserAgent = userAgent()
+	config.Timeout = 60 // Seconds
 	config.SecurityToken = ""
 	config.IsCname = false
 
@@ -54,6 +88,8 @@ func getDefaultOssConfig() *Config {
 	config.HTTPTimeout.HeaderTimeout = time.Second * 60    // 60s
 	config.HTTPTimeout.LongTimeout = time.Second * 300     // 300s
 	config.HTTPTimeout.IdleConnTimeout = time.Second * 50  // 50s
+	config.HTTPMaxConns.MaxIdleConns = 100
+	config.HTTPMaxConns.MaxIdleConnsPerHost = 100
 
 	config.IsUseProxy = false
 	config.ProxyHost = ""
@@ -65,5 +101,8 @@ func getDefaultOssConfig() *Config {
 	config.IsEnableMD5 = false
 	config.IsEnableCRC = true
 
+	config.LogLevel = LogOff
+	config.Logger = log.New(os.Stdout, "", log.LstdFlags)
+
 	return &config
 }

+ 115 - 18
oss/conn.go

@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"crypto/md5"
 	"encoding/base64"
+	"encoding/json"
 	"encoding/xml"
 	"fmt"
 	"hash"
@@ -26,25 +27,28 @@ type Conn struct {
 	client *http.Client
 }
 
-var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore"}
+var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore", "callback", "callback-var"}
 
 // init initializes Conn
-func (conn *Conn) init(config *Config, urlMaker *urlMaker) error {
-	// New transport
-	transport := newTransport(conn, config)
-
-	// Proxy
-	if conn.config.IsUseProxy {
-		proxyURL, err := url.Parse(config.ProxyHost)
-		if err != nil {
-			return err
+func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error {
+	if client == nil {
+		// New transport
+		transport := newTransport(conn, config)
+
+		// Proxy
+		if conn.config.IsUseProxy {
+			proxyURL, err := url.Parse(config.ProxyHost)
+			if err != nil {
+				return err
+			}
+			transport.Proxy = http.ProxyURL(proxyURL)
 		}
-		transport.Proxy = http.ProxyURL(proxyURL)
+		client = &http.Client{Transport: transport}
 	}
 
 	conn.config = config
 	conn.url = urlMaker
-	conn.client = &http.Client{Transport: transport}
+	conn.client = client
 
 	return nil
 }
@@ -107,6 +111,10 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 	event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
 	publishProgress(listener, event)
 
+	if conn.config.LogLevel >= Debug {
+		conn.LoggerHttpReq(req)
+	}
+
 	resp, err := conn.client.Do(req)
 	if err != nil {
 		// Transfer failed
@@ -115,6 +123,11 @@ func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]s
 		return nil, err
 	}
 
+	if conn.config.LogLevel >= Debug {
+		//print out http resp
+		conn.LoggerHttpResp(req, resp)
+	}
+
 	// Transfer completed
 	event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
 	publishProgress(listener, event)
@@ -227,7 +240,12 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 	event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength)
 	publishProgress(listener, event)
 
+	if conn.config.LogLevel >= Debug {
+		conn.LoggerHttpReq(req)
+	}
+
 	resp, err := conn.client.Do(req)
+
 	if err != nil {
 		// Transfer failed
 		event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength)
@@ -235,6 +253,11 @@ func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource st
 		return nil, err
 	}
 
+	if conn.config.LogLevel >= Debug {
+		//print out http resp
+		conn.LoggerHttpResp(req, resp)
+	}
+
 	// Transfer completed
 	event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength)
 	publishProgress(listener, event)
@@ -367,16 +390,19 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
 		}
 
 		if len(respBody) == 0 {
-			// No error in response body
-			err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
+			err = ServiceError{
+				StatusCode: statusCode,
+				RequestID:  resp.Header.Get(HTTPHeaderOssRequestID),
+			}
 		} else {
 			// Response contains storage service error object, unmarshal
 			srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
 				resp.Header.Get(HTTPHeaderOssRequestID))
-			if err != nil { // error unmarshaling the error response
-				err = errIn
+			if errIn != nil { // error unmarshaling the error response
+				err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
+			} else {
+				err = srvErr
 			}
-			err = srvErr
 		}
 
 		return &Response{
@@ -409,6 +435,63 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
 	}, nil
 }
 
+func (conn Conn) LoggerHttpReq(req *http.Request) {
+	var logBuffer bytes.Buffer
+	logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s,", req, req.Method))
+	logBuffer.WriteString(fmt.Sprintf("Host:%s,", req.URL.Host))
+	logBuffer.WriteString(fmt.Sprintf("Path:%s,", req.URL.Path))
+	logBuffer.WriteString(fmt.Sprintf("Query:%s,", req.URL.RawQuery))
+	logBuffer.WriteString(fmt.Sprintf("Header info:"))
+
+	for k, v := range req.Header {
+		var valueBuffer bytes.Buffer
+		for j := 0; j < len(v); j++ {
+			if j > 0 {
+				valueBuffer.WriteString(" ")
+			}
+			valueBuffer.WriteString(v[j])
+		}
+		logBuffer.WriteString(fmt.Sprintf("%s:%s,", k, valueBuffer.String()))
+	}
+	conn.config.WriteLog(Debug, "%s.\n", logBuffer.String())
+}
+
+func (conn Conn) LoggerHttpResp(req *http.Request, resp *http.Response) {
+	var logBuffer bytes.Buffer
+	logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d,", req, resp.StatusCode))
+	logBuffer.WriteString(fmt.Sprintf("Header info:"))
+	for k, v := range resp.Header {
+		var valueBuffer bytes.Buffer
+		for j := 0; j < len(v); j++ {
+			if j > 0 {
+				valueBuffer.WriteString(" ")
+			}
+			valueBuffer.WriteString(v[j])
+		}
+		logBuffer.WriteString(fmt.Sprintf("%s:%s,", k, valueBuffer.String()))
+	}
+
+	statusCode := resp.StatusCode
+	if statusCode >= 400 && statusCode <= 505 {
+		// 4xx and 5xx indicate that the operation has error occurred
+		var respBody []byte
+		respBody, err := readResponseBody(resp)
+		if err != nil {
+			return
+		}
+
+		if len(respBody) == 0 {
+			// No error in response body
+		} else {
+			// Response contains storage service error object, unmarshal
+			logBuffer.WriteString(fmt.Sprintf("Body:%s", string(respBody)))
+		}
+	} else if statusCode >= 300 && statusCode <= 307 {
+		// OSS use 3xx, but response has no body
+	}
+	conn.config.WriteLog(Debug, "%s.\n", logBuffer.String())
+}
+
 func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) {
 	if contentLen == 0 || contentLen > md5Threshold {
 		// Huge body, use temporary file
@@ -444,9 +527,11 @@ func readResponseBody(resp *http.Response) ([]byte, error) {
 
 func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
 	var storageErr ServiceError
+
 	if err := xml.Unmarshal(body, &storageErr); err != nil {
 		return storageErr, err
 	}
+
 	storageErr.StatusCode = statusCode
 	storageErr.RequestID = requestID
 	storageErr.RawMessage = string(body)
@@ -461,6 +546,14 @@ func xmlUnmarshal(body io.Reader, v interface{}) error {
 	return xml.Unmarshal(data, v)
 }
 
+func jsonUnmarshal(body io.Reader, v interface{}) error {
+	data, err := ioutil.ReadAll(body)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(data, v)
+}
+
 // timeoutConn handles HTTP timeout
 type timeoutConn struct {
 	conn        net.Conn
@@ -545,7 +638,11 @@ func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) {
 	host, _, err := net.SplitHostPort(um.NetLoc)
 	if err != nil {
 		host = um.NetLoc
+		if host[0] == '[' && host[len(host)-1] == ']' {
+			host = host[1 : len(host)-1]
+		}
 	}
+
 	ip := net.ParseIP(host)
 	if ip != nil {
 		um.Type = urlTypeIP
@@ -618,7 +715,7 @@ func (um urlMaker) buildURL(bucket, object string) (string, string) {
 	return host, path
 }
 
-// getResource gets canonicalized resource 
+// getResource gets canonicalized resource
 func (um urlMaker) getResource(bucketName, objectName, subResource string) string {
 	if subResource != "" {
 		subResource = "?" + subResource

+ 17 - 0
oss/conn_test.go

@@ -69,6 +69,16 @@ func (s *OssConnSuite) TestURLMarker(c *C) {
 	c.Assert(um.Type, Equals, urlTypeIP)
 	c.Assert(um.Scheme, Equals, "https")
 	c.Assert(um.NetLoc, Equals, "127.0.0.1:8080")
+
+	um.Init("http://[2401:b180::dc]", false, false)
+	c.Assert(um.Type, Equals, urlTypeIP)
+	c.Assert(um.Scheme, Equals, "http")
+	c.Assert(um.NetLoc, Equals, "[2401:b180::dc]")
+
+	um.Init("https://[2401:b180::dc]:8080", false, false)
+	c.Assert(um.Type, Equals, urlTypeIP)
+	c.Assert(um.Scheme, Equals, "https")
+	c.Assert(um.NetLoc, Equals, "[2401:b180::dc]:8080")
 }
 
 func (s *OssConnSuite) TestAuth(c *C) {
@@ -125,6 +135,13 @@ func (s *OssConnSuite) TestConnToolFunc(c *C) {
 	unexpect := UnexpectedStatusCodeError{[]int{200}, 202}
 	c.Assert(len(unexpect.Error()) > 0, Equals, true)
 	c.Assert(unexpect.Got(), Equals, 202)
+
+	fd, err := os.Open("../sample/BingWallpaper-2015-11-07.jpg")
+	c.Assert(err, IsNil)
+	fd.Close()
+	var out ProcessObjectResult
+	err = jsonUnmarshal(fd, &out)
+	c.Assert(err, NotNil)
 }
 
 func (s *OssConnSuite) TestSignRtmpURL(c *C) {

+ 17 - 4
oss/const.go

@@ -44,6 +44,14 @@ const (
 	StorageArchive StorageClassType = "Archive"
 )
 
+// PayerType the type of request payer
+type PayerType string
+
+const (
+	// Requester the requester who send the request
+	Requester PayerType = "requester"
+)
+
 // HTTPMethod HTTP request method
 type HTTPMethod string
 
@@ -95,6 +103,7 @@ const (
 	HTTPHeaderOssObjectACL                   = "X-Oss-Object-Acl"
 	HTTPHeaderOssSecurityToken               = "X-Oss-Security-Token"
 	HTTPHeaderOssServerSideEncryption        = "X-Oss-Server-Side-Encryption"
+	HTTPHeaderOssServerSideEncryptionKeyID   = "X-Oss-Server-Side-Encryption-Key-Id"
 	HTTPHeaderOssCopySource                  = "X-Oss-Copy-Source"
 	HTTPHeaderOssCopySourceRange             = "X-Oss-Copy-Source-Range"
 	HTTPHeaderOssCopySourceIfMatch           = "X-Oss-Copy-Source-If-Match"
@@ -106,6 +115,10 @@ const (
 	HTTPHeaderOssRequestID                   = "X-Oss-Request-Id"
 	HTTPHeaderOssCRC64                       = "X-Oss-Hash-Crc64ecma"
 	HTTPHeaderOssSymlinkTarget               = "X-Oss-Symlink-Target"
+	HTTPHeaderOssStorageClass                = "X-Oss-Storage-Class"
+	HTTPHeaderOssCallback                    = "X-Oss-Callback"
+	HTTPHeaderOssCallbackVar                 = "X-Oss-Callback-Var"
+	HTTPHeaderOSSRequester                   = "X-Oss-Request-Payer"
 )
 
 // HTTP Param
@@ -124,10 +137,10 @@ const (
 
 	FilePermMode = os.FileMode(0664) // Default file permission
 
-	TempFilePrefix = "oss-go-temp-"  // Temp file prefix
-	TempFileSuffix = ".temp"         // Temp file suffix
+	TempFilePrefix = "oss-go-temp-" // Temp file prefix
+	TempFileSuffix = ".temp"        // Temp file suffix
 
-	CheckpointFileSuffix = ".cp"     // Checkpoint file suffix
+	CheckpointFileSuffix = ".cp" // Checkpoint file suffix
 
-	Version = "1.9.0" // Go SDK version
+	Version = "1.9.2" // Go SDK version
 )

+ 69 - 51
oss/download.go

@@ -5,11 +5,14 @@ import (
 	"encoding/base64"
 	"encoding/json"
 	"errors"
+	"fmt"
 	"hash"
 	"hash/crc64"
 	"io"
 	"io/ioutil"
+	"net/http"
 	"os"
+	"path/filepath"
 	"strconv"
 )
 
@@ -27,25 +30,34 @@ func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, op
 		return errors.New("oss: part size smaller than 1")
 	}
 
-	cpConf, err := getCpConfig(options, filePath)
-	if err != nil {
-		return err
-	}
-
 	uRange, err := getRangeConfig(options)
 	if err != nil {
 		return err
 	}
 
+	cpConf := getCpConfig(options)
 	routines := getRoutines(options)
 
-	if cpConf.IsEnable {
-		return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines, uRange)
+	if cpConf != nil && cpConf.IsEnable {
+		cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, filePath)
+		if cpFilePath != "" {
+			return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
+		}
 	}
 
 	return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
 }
 
+func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destFile string) string {
+	if cpConf.FilePath == "" && cpConf.DirPath != "" {
+		src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
+		absPath, _ := filepath.Abs(destFile)
+		cpFileName := getCpFileName(src, absPath)
+		cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
+	}
+	return cpConf.FilePath
+}
+
 // getRangeConfig gets the download range from the options.
 func getRangeConfig(options []Option) (*unpackedRange, error) {
 	rangeOpt, err := findOption(options, HTTPHeaderRange, nil)
@@ -76,7 +88,7 @@ func defaultDownloadPartHook(part downloadPart) error {
 	return nil
 }
 
-// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject. 
+// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
 type defaultDownloadProgressListener struct {
 }
 
@@ -168,27 +180,8 @@ type downloadPart struct {
 }
 
 // getDownloadParts gets download parts
-func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *unpackedRange) ([]downloadPart, bool, uint64, error) {
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return nil, false, 0, err
-	}
-
+func getDownloadParts(objectSize, partSize int64, uRange *unpackedRange) []downloadPart {
 	parts := []downloadPart{}
-	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
-	if err != nil {
-		return nil, false, 0, err
-	}
-
-	enableCRC := false
-	crcVal := (uint64)(0)
-	if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
-		if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
-			enableCRC = true
-			crcVal, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
-		}
-	}
-
 	part := downloadPart{}
 	i := 0
 	start, end := adjustRange(uRange, objectSize)
@@ -201,7 +194,7 @@ func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *
 		parts = append(parts, part)
 		i++
 	}
-	return parts, enableCRC, crcVal, nil
+	return parts
 }
 
 // getObjectBytes gets object bytes length
@@ -232,6 +225,12 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 	tempFilePath := filePath + TempFileSuffix
 	listener := getProgressListener(options)
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// If the file does not exist, create one. If exists, the download will overwrite it.
 	fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
 	if err != nil {
@@ -239,12 +238,27 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 	}
 	fd.Close()
 
-	// Get the parts of the file
-	parts, enableCRC, expectedCRC, err := getDownloadParts(&bucket, objectKey, partSize, uRange)
+	meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
+	if err != nil {
+		return err
+	}
+
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return err
 	}
 
+	enableCRC := false
+	expectedCRC := (uint64)(0)
+	if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
+		if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
+			enableCRC = true
+			expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
+		}
+	}
+
+	// Get the parts of the file
+	parts := getDownloadParts(objectSize, partSize, uRange)
 	jobs := make(chan downloadPart, len(parts))
 	results := make(chan downloadPart, len(parts))
 	failed := make(chan error)
@@ -325,7 +339,7 @@ type objectStat struct {
 }
 
 // isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
-func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *unpackedRange) (bool, error) {
+func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (bool, error) {
 	// Compare the CP's Magic and the MD5
 	cpb := cp
 	cpb.MD5 = ""
@@ -337,12 +351,6 @@ func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *u
 		return false, nil
 	}
 
-	// Ensure the object is not updated.
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return false, err
-	}
-
 	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return false, err
@@ -424,18 +432,12 @@ func (cp downloadCheckpoint) getCompletedBytes() int64 {
 }
 
 // prepare initiates download tasks
-func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
+func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
 	// CP
 	cp.Magic = downloadCpMagic
 	cp.FilePath = filePath
 	cp.Object = objectKey
 
-	// Object
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return err
-	}
-
 	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return err
@@ -445,11 +447,15 @@ func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string
 	cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
 	cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
 
-	// Parts
-	cp.Parts, cp.enableCRC, cp.CRC, err = getDownloadParts(bucket, objectKey, partSize, uRange)
-	if err != nil {
-		return err
+	if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
+		if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
+			cp.enableCRC = true
+			cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
+		}
 	}
+
+	// Parts
+	cp.Parts = getDownloadParts(objectSize, partSize, uRange)
 	cp.PartStat = make([]bool, len(cp.Parts))
 	for i := range cp.PartStat {
 		cp.PartStat[i] = false
@@ -468,6 +474,12 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 	tempFilePath := filePath + TempFileSuffix
 	listener := getProgressListener(options)
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// Load checkpoint data.
 	dcp := downloadCheckpoint{}
 	err := dcp.load(cpFilePath)
@@ -475,10 +487,16 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 		os.Remove(cpFilePath)
 	}
 
+	// Get the object detailed meta.
+	meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
+	if err != nil {
+		return err
+	}
+
 	// Load error or data invalid. Re-initialize the download.
-	valid, err := dcp.isValid(&bucket, objectKey, uRange)
+	valid, err := dcp.isValid(meta, uRange)
 	if err != nil || !valid {
-		if err = dcp.prepare(&bucket, objectKey, filePath, partSize, uRange); err != nil {
+		if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
 			return err
 		}
 		os.Remove(cpFilePath)

+ 39 - 26
oss/download_test.go

@@ -146,7 +146,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 
 	// Download a file with default checkpoint
 	downloadPartHooker = DownErrorHooker
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
@@ -165,9 +165,9 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(len(dcp.Parts), Equals, 5)
 	c.Assert(len(dcp.todoParts()), Equals, 1)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, IsNil)
-
+	//download success, checkpoint file has been deleted
 	err = dcp.load(newFile + ".cp")
 	c.Assert(err, NotNil)
 
@@ -175,17 +175,30 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// Resumable download with checkpoint
+	// Resumable download with empty checkpoint file path
+	downloadPartHooker = DownErrorHooker
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	downloadPartHooker = defaultDownloadPartHook
+
+	dcp = downloadCheckpoint{}
+	err = dcp.load(newFile + ".cp")
+	c.Assert(err, NotNil)
+
+	// Resumable download with checkpoint dir
 	os.Remove(newFile)
 	downloadPartHooker = DownErrorHooker
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, objectName+".cp"))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, CheckpointDir(true, "./"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
 
 	// Check
 	dcp = downloadCheckpoint{}
-	err = dcp.load(objectName + ".cp")
+	cpConf := cpConfig{IsEnable: true, DirPath: "./"}
+	cpFilePath := getDownloadCpFilePath(&cpConf, s.bucket.BucketName, objectName, newFile)
+	err = dcp.load(cpFilePath)
 	c.Assert(err, IsNil)
 	c.Assert(dcp.Magic, Equals, downloadCpMagic)
 	c.Assert(len(dcp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
@@ -197,10 +210,10 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(len(dcp.Parts), Equals, 5)
 	c.Assert(len(dcp.todoParts()), Equals, 1)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, objectName+".cp"))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, CheckpointDir(true, "./"))
 	c.Assert(err, IsNil)
-
-	err = dcp.load(objectName + ".cp")
+	//download success, checkpoint file has been deleted
+	err = dcp.load(cpFilePath)
 	c.Assert(err, NotNil)
 
 	eq, err = compareFiles(fileName, newFile)
@@ -209,7 +222,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 
 	// Resumable download with checkpoint at a time. No error is expected in the download procedure.
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, IsNil)
 
 	err = dcp.load(newFile + ".cp")
@@ -221,7 +234,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 
 	// Resumable download with checkpoint at a time. No error is expected in the download procedure.
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(10), Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(10), Checkpoint(true, newFile+".cp"))
 	c.Assert(err, IsNil)
 
 	err = dcp.load(newFile + ".cp")
@@ -263,7 +276,7 @@ func (s *OssDownloadSuite) TestDownloadOption(c *C) {
 	c.Assert(err, NotNil)
 
 	// IfMatch
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), IfMatch(meta.Get("Etag")))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), IfMatch(meta.Get("Etag")))
 	c.Assert(err, IsNil)
 
 	eq, err = compareFiles(fileName, newFile)
@@ -271,7 +284,7 @@ func (s *OssDownloadSuite) TestDownloadOption(c *C) {
 	c.Assert(eq, Equals, true)
 
 	// IfNoneMatch
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), IfNoneMatch(meta.Get("Etag")))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), IfNoneMatch(meta.Get("Etag")))
 	c.Assert(err, NotNil)
 }
 
@@ -287,7 +300,7 @@ func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
 
 	// Download with default checkpoint
 	downloadPartHooker = DownErrorHooker
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
@@ -295,7 +308,7 @@ func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, IsNil)
 
 	eq, err := compareFiles(fileName, newFile)
@@ -335,23 +348,23 @@ func (s *OssDownloadSuite) TestDownloadNegative(c *C) {
 	c.Assert(err, IsNil)
 
 	// Local file does not exist
-	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024)
 	c.Assert(err, NotNil)
 
-	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Routines(2))
 	c.Assert(err, NotNil)
 
 	// Invalid part size
-	err = s.bucket.DownloadFile(objectName, newFile, -1, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, -1)
 	c.Assert(err, NotNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 0, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 0, Routines(2))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100)
 	c.Assert(err, NotNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Routines(2))
 	c.Assert(err, NotNil)
 }
 
@@ -457,7 +470,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 	newFileGet := "down-new-file-tdwcr-2.jpg"
 
 	// Upload a file
-	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, fileName+".cp"))
 	c.Assert(err, IsNil)
 
 	fileSize, err := getFileSize(fileName)
@@ -465,7 +478,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 
 	// Download with range, from 1024 to 4096
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), Range(1024, 4095))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, newFile+".cp"), Range(1024, 4095))
 	c.Assert(err, IsNil)
 
 	// Check
@@ -484,7 +497,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 
 	// Download with range, from 1024 to 4096
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, ""), NormalizedRange("1024-4095"))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, newFile+".cp"), NormalizedRange("1024-4095"))
 	c.Assert(err, IsNil)
 
 	// Check
@@ -503,7 +516,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 
 	// Download with range, from 2048 to the end
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024, Routines(3), Checkpoint(true, ""), NormalizedRange("2048-"))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024, Routines(3), Checkpoint(true, newFile+".cp"), NormalizedRange("2048-"))
 	c.Assert(err, IsNil)
 
 	// Check
@@ -522,7 +535,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 
 	// Download with range, the last 4096 bytes
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, ""), NormalizedRange("-4096"))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, newFile+".cp"), NormalizedRange("-4096"))
 	c.Assert(err, IsNil)
 
 	// Check

+ 7 - 2
oss/error.go

@@ -14,14 +14,19 @@ type ServiceError struct {
 	Message    string   `xml:"Message"`   // The detail error message from OSS
 	RequestID  string   `xml:"RequestId"` // The UUID used to uniquely identify the request
 	HostID     string   `xml:"HostId"`    // The OSS server cluster's Id
+	Endpoint   string   `xml:"Endpoint"`
 	RawMessage string   // The raw messages from OSS
 	StatusCode int      // HTTP status code
 }
 
 // Error implements interface error
 func (e ServiceError) Error() string {
-	return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s",
-		e.StatusCode, e.Code, e.Message, e.RequestID)
+	if e.Endpoint == "" {
+		return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s",
+			e.StatusCode, e.Code, e.Message, e.RequestID)
+	}
+	return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s",
+		e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint)
 }
 
 // UnexpectedStatusCodeError is returned when a storage service responds with neither an error

+ 27 - 0
oss/error_test.go

@@ -3,6 +3,7 @@ package oss
 import (
 	"math"
 	"net/http"
+	"strings"
 
 	. "gopkg.in/check.v1"
 )
@@ -82,3 +83,29 @@ func (s *OssErrorSuite) TestCheckDownloadCRC(c *C) {
 	c.Assert(err, NotNil)
 	testLogger.Println("error:", err)
 }
+
+func (s *OssErrorSuite) TestServiceErrorEndPoint(c *C) {
+	xmlBody := `<?xml version="1.0" encoding="UTF-8"?>
+	<Error>
+	  <Code>AccessDenied</Code>
+	  <Message>The bucket you visit is not belong to you.</Message>
+	  <RequestId>5C1B5E9BD79A6B9B6466166E</RequestId>
+	  <HostId>oss-c-sdk-test-verify-b.oss-cn-shenzhen.aliyuncs.com</HostId>
+	</Error>`
+	serverError, _ := serviceErrFromXML([]byte(xmlBody), 403, "5C1B5E9BD79A6B9B6466166E")
+	errMsg := serverError.Error()
+	c.Assert(strings.Contains(errMsg, "Endpoint="), Equals, false)
+
+	xmlBodyWithEndPoint := `<?xml version="1.0" encoding="UTF-8"?>
+	<Error>
+      <Code>AccessDenied</Code>
+	  <Message>The bucket you are attempting to access must be addressed using the specified endpoint. Please send all future requests to this endpoint.</Message>
+	  <RequestId>5C1B595ED51820B569C6A12F</RequestId>
+	  <HostId>hello-hangzws.oss-cn-qingdao.aliyuncs.com</HostId>
+	  <Bucket>hello-hangzws</Bucket>
+	  <Endpoint>oss-cn-shenzhen.aliyuncs.com</Endpoint>
+	</Error>`
+	serverError, _ = serviceErrFromXML([]byte(xmlBodyWithEndPoint), 406, "5C1B595ED51820B569C6A12F")
+	errMsg = serverError.Error()
+	c.Assert(strings.Contains(errMsg, "Endpoint=oss-cn-shenzhen.aliyuncs.com"), Equals, true)
+}

+ 8 - 0
oss/model.go

@@ -15,6 +15,14 @@ type Response struct {
 	ServerCRC  uint64
 }
 
+func (r *Response) Read(p []byte) (n int, err error) {
+	return r.Body.Read(p)
+}
+
+func (r *Response) Close() error {
+	return r.Body.Close()
+}
+
 // PutObjectRequest is the request of DoPutObject
 type PutObjectRequest struct {
 	ObjectKey string

+ 59 - 51
oss/multicopy.go

@@ -5,9 +5,10 @@ import (
 	"encoding/base64"
 	"encoding/json"
 	"errors"
+	"fmt"
 	"io/ioutil"
+	"net/http"
 	"os"
-	"path/filepath"
 	"strconv"
 )
 
@@ -27,22 +28,30 @@ func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string,
 		return errors.New("oss: part size invalid range (1024KB, 5GB]")
 	}
 
-	cpConf, err := getCpConfig(options, filepath.Base(destObjectKey))
-	if err != nil {
-		return err
-	}
-
+	cpConf := getCpConfig(options)
 	routines := getRoutines(options)
 
-	if cpConf.IsEnable {
-		return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
-			partSize, options, cpConf.FilePath, routines)
+	if cpConf != nil && cpConf.IsEnable {
+		cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey)
+		if cpFilePath != "" {
+			return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
+		}
 	}
 
 	return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
 		partSize, options, routines)
 }
 
+func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject string) string {
+	if cpConf.FilePath == "" && cpConf.DirPath != "" {
+		dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
+		src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
+		cpFileName := getCpFileName(src, dest)
+		cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
+	}
+	return cpConf.FilePath
+}
+
 // ----- Concurrently copy without checkpoint ---------
 
 // copyWorkerArg defines the copy worker arguments
@@ -103,18 +112,8 @@ type copyPart struct {
 }
 
 // getCopyParts calculates copy parts
-func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return nil, err
-	}
-
+func getCopyParts(objectSize, partSize int64) []copyPart {
 	parts := []copyPart{}
-	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
-	if err != nil {
-		return nil, err
-	}
-
 	part := copyPart{}
 	i := 0
 	for offset := int64(0); offset < objectSize; offset += partSize {
@@ -124,7 +123,7 @@ func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart,
 		parts = append(parts, part)
 		i++
 	}
-	return parts, nil
+	return parts
 }
 
 // getSrcObjectBytes gets the source file size
@@ -143,12 +142,24 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	srcBucket, err := bucket.Client.Bucket(srcBucketName)
 	listener := getProgressListener(options)
 
-	// Get copy parts
-	parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
+	meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
 	if err != nil {
 		return err
 	}
 
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+	if err != nil {
+		return err
+	}
+
+	// Get copy parts
+	parts := getCopyParts(objectSize, partSize)
 	// Initialize the multipart upload
 	imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
 	if err != nil {
@@ -166,7 +177,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	publishProgress(listener, event)
 
 	// Start to copy workers
-	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
+	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
 	for w := 1; w <= routines; w++ {
 		go copyWorker(w, arg, jobs, results, failed, die)
 	}
@@ -187,7 +198,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 			publishProgress(listener, event)
 		case err := <-failed:
 			close(die)
-			descBucket.AbortMultipartUpload(imur)
+			descBucket.AbortMultipartUpload(imur, payerOptions...)
 			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
 			publishProgress(listener, event)
 			return err
@@ -202,9 +213,9 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	publishProgress(listener, event)
 
 	// Complete the multipart upload
-	_, err = descBucket.CompleteMultipartUpload(imur, ups)
+	_, err = descBucket.CompleteMultipartUpload(imur, ups, payerOptions...)
 	if err != nil {
-		bucket.AbortMultipartUpload(imur)
+		bucket.AbortMultipartUpload(imur, payerOptions...)
 		return err
 	}
 	return nil
@@ -229,7 +240,7 @@ type copyCheckpoint struct {
 }
 
 // isValid checks if the data is valid which means CP is valid and object is not updated.
-func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
+func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
 	// Compare CP's magic number and the MD5.
 	cpb := cp
 	cpb.MD5 = ""
@@ -241,12 +252,6 @@ func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error)
 		return false, nil
 	}
 
-	// Make sure the object is not updated.
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return false, err
-	}
-
 	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return false, err
@@ -326,7 +331,7 @@ func (cp copyCheckpoint) getCompletedBytes() int64 {
 }
 
 // prepare initializes the multipart upload
-func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
+func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
 	partSize int64, options []Option) error {
 	// CP
 	cp.Magic = copyCpMagic
@@ -335,12 +340,6 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
 	cp.DestBucketName = destBucket.BucketName
 	cp.DestObjectKey = destObjectKey
 
-	// Object
-	meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
-	if err != nil {
-		return err
-	}
-
 	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return err
@@ -351,10 +350,7 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
 	cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
 
 	// Parts
-	cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
-	if err != nil {
-		return err
-	}
+	cp.Parts = getCopyParts(objectSize, partSize)
 	cp.PartStat = make([]bool, len(cp.Parts))
 	for i := range cp.PartStat {
 		cp.PartStat[i] = false
@@ -371,10 +367,10 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
 	return nil
 }
 
-func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error {
+func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
 	imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
 		Key: cp.DestObjectKey, UploadID: cp.CopyID}
-	_, err := bucket.CompleteMultipartUpload(imur, parts)
+	_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
 	if err != nil {
 		return err
 	}
@@ -389,6 +385,12 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 	srcBucket, err := bucket.Client.Bucket(srcBucketName)
 	listener := getProgressListener(options)
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// Load CP data
 	ccp := copyCheckpoint{}
 	err = ccp.load(cpFilePath)
@@ -396,10 +398,16 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 		os.Remove(cpFilePath)
 	}
 
+	// Make sure the object is not updated.
+	meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
+	if err != nil {
+		return err
+	}
+
 	// Load error or the CP data is invalid---reinitialize
-	valid, err := ccp.isValid(srcBucket, srcObjectKey)
+	valid, err := ccp.isValid(meta)
 	if err != nil || !valid {
-		if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
+		if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
 			return err
 		}
 		os.Remove(cpFilePath)
@@ -422,7 +430,7 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 	publishProgress(listener, event)
 
 	// Start the worker coroutines
-	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
+	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
 	for w := 1; w <= routines; w++ {
 		go copyWorker(w, arg, jobs, results, failed, die)
 	}
@@ -456,5 +464,5 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 	event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
 	publishProgress(listener, event)
 
-	return ccp.complete(descBucket, ccp.CopyParts, cpFilePath)
+	return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, payerOptions)
 }

+ 26 - 14
oss/multicopy_test.go

@@ -254,7 +254,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	// Copy object with checkpoint enabled, single runtine.
 	// Copy 4 parts---the CopyErrorHooker makes sure the copy of part 5 will fail.
 	copyPartHooker = CopyErrorHooker
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	copyPartHooker = defaultCopyPartHook
@@ -278,7 +278,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(ccp.PartStat[4], Equals, false)
 
 	// Second copy, finish the last part
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, IsNil)
 
 	err = s.bucket.GetObjectToFile(destObjectName, newFile)
@@ -295,16 +295,28 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	err = ccp.load(fileName + ".cp")
 	c.Assert(err, NotNil)
 
-	// Specify Routine and CP's path
+	//multicopy with empty checkpoint path
+	copyPartHooker = CopyErrorHooker
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	copyPartHooker = defaultCopyPartHook
+	ccp = copyCheckpoint{}
+	err = ccp.load(destObjectName + ".cp")
+	c.Assert(err, NotNil)
+
+	//multi copy with checkpoint dir
 	copyPartHooker = CopyErrorHooker
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), Checkpoint(true, srcObjectName+".cp"))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), CheckpointDir(true, "./"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	copyPartHooker = defaultCopyPartHook
 
 	// Check CP
 	ccp = copyCheckpoint{}
-	err = ccp.load(srcObjectName + ".cp")
+	cpConf := cpConfig{IsEnable: true, DirPath: "./"}
+	cpFilePath := getCopyCpFilePath(&cpConf, bucketName, srcObjectName, s.bucket.BucketName, destObjectName)
+	err = ccp.load(cpFilePath)
 	c.Assert(err, IsNil)
 	c.Assert(ccp.Magic, Equals, copyCpMagic)
 	c.Assert(len(ccp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
@@ -321,7 +333,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(ccp.PartStat[4], Equals, false)
 
 	// Second copy, finish the last part.
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), Checkpoint(true, srcObjectName+".cp"))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), CheckpointDir(true, "./"))
 	c.Assert(err, IsNil)
 
 	err = s.bucket.GetObjectToFile(destObjectName, newFile)
@@ -339,7 +351,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(err, NotNil)
 
 	// First copy without error.
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(3), Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(3), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, IsNil)
 
 	err = s.bucket.GetObjectToFile(destObjectName, newFile)
@@ -354,7 +366,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	os.Remove(newFile)
 
 	// Copy with multiple coroutines, no errors.
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(10), Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(10), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, IsNil)
 
 	err = s.bucket.GetObjectToFile(destObjectName, newFile)
@@ -369,7 +381,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	os.Remove(newFile)
 
 	// Option
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, ""), Meta("myprop", "mypropval"))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, destObjectName+".cp"), Meta("myprop", "mypropval"))
 	c.Assert(err, IsNil)
 
 	meta, err := s.bucket.GetObjectDetailedMeta(destObjectName)
@@ -398,19 +410,19 @@ func (s *OssCopySuite) TestCopyRoutineWithRecoveryNegative(c *C) {
 	destObjectName := srcObjectName + "-copy"
 
 	// Source bucket does not exist
-	err := s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, ""))
+	err := s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 
 	// Source object does not exist
-	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 
 	// Specify part size is invalid.
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*1024*1024*100, Routines(2), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 }
 
@@ -434,7 +446,7 @@ func (s *OssCopySuite) TestCopyFileCrossBucket(c *C) {
 	os.Remove(newFile)
 
 	// Copy files
-	err = destBucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, ""))
+	err = destBucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, IsNil)
 
 	err = destBucket.GetObjectToFile(destObjectName, newFile)

+ 22 - 10
oss/multipart.go

@@ -5,6 +5,7 @@ import (
 	"encoding/xml"
 	"io"
 	"net/http"
+	"net/url"
 	"os"
 	"sort"
 	"strconv"
@@ -13,7 +14,7 @@ import (
 // InitiateMultipartUpload initializes multipart upload
 //
 // objectKey    object name
-// options    the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, 
+// options    the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires,
 //            ServerSideEncryption, Meta, check out the following link:
 //            https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html
 //
@@ -106,11 +107,11 @@ func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, file
 //
 func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
 	listener := getProgressListener(options)
-	opts := []Option{ContentLength(request.PartSize)}
+	options = append(options, ContentLength(request.PartSize))
 	params := map[string]interface{}{}
 	params["partNumber"] = strconv.Itoa(request.PartNumber)
 	params["uploadId"] = request.InitResult.UploadID
-	resp, err := bucket.do("PUT", request.InitResult.Key, params, opts,
+	resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
 		&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
 	if err != nil {
 		return &UploadPartResult{}, err
@@ -151,7 +152,7 @@ func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucke
 	var out UploadPartCopyResult
 	var part UploadPart
 
-	opts := []Option{CopySource(srcBucketName, srcObjectKey),
+	opts := []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)),
 		CopySourceRange(startPosition, partSize)}
 	opts = append(opts, options...)
 	params := map[string]interface{}{}
@@ -182,7 +183,7 @@ func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucke
 // error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
-	parts []UploadPart) (CompleteMultipartUploadResult, error) {
+	parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
 	var out CompleteMultipartUploadResult
 
 	sort.Sort(uploadParts(parts))
@@ -197,7 +198,7 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
 
 	params := map[string]interface{}{}
 	params["uploadId"] = imur.UploadID
-	resp, err := bucket.do("POST", imur.Key, params, nil, buffer, nil)
+	resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
 	if err != nil {
 		return out, err
 	}
@@ -213,10 +214,10 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
 //
 // error    it's nil if the operation succeeds, otherwise it's an error object.
 //
-func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
+func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
 	params := map[string]interface{}{}
 	params["uploadId"] = imur.UploadID
-	resp, err := bucket.do("DELETE", imur.Key, params, nil, nil, nil)
+	resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
 	if err != nil {
 		return err
 	}
@@ -231,9 +232,16 @@ func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) er
 // ListUploadedPartsResponse    the return value if it succeeds, only valid when error is nil.
 // error    it's nil if the operation succeeds, otherwise it's an error object.
 //
-func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) {
+func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) {
 	var out ListUploadedPartsResult
+	options = append(options, EncodingType("url"))
+
 	params := map[string]interface{}{}
+	params, err := getRawParams(options)
+	if err != nil {
+		return out, err
+	}
+
 	params["uploadId"] = imur.UploadID
 	resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil)
 	if err != nil {
@@ -242,6 +250,10 @@ func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (List
 	defer resp.Body.Close()
 
 	err = xmlUnmarshal(resp.Body, &out)
+	if err != nil {
+		return out, err
+	}
+	err = decodeListUploadedPartsResult(&out)
 	return out, err
 }
 
@@ -263,7 +275,7 @@ func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploa
 	}
 	params["uploads"] = nil
 
-	resp, err := bucket.do("GET", "", params, nil, nil, nil)
+	resp, err := bucket.do("GET", "", params, options, nil, nil)
 	if err != nil {
 		return out, err
 	}

+ 2 - 2
oss/multipart_test.go

@@ -622,7 +622,7 @@ func (s *OssBucketMultipartSuite) TestMultipartNegative(c *C) {
 	imur, err := s.bucket.InitiateMultipartUpload(string(data))
 	c.Assert(err, NotNil)
 
-	// Invalid imur 
+	// Invalid imur
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 	fd, err := os.Open(fileName)
 	c.Assert(err, IsNil)
@@ -668,7 +668,7 @@ func (s *OssBucketMultipartSuite) TestMultipartNegative(c *C) {
 	err = s.bucket.AbortMultipartUpload(imur)
 	c.Assert(err, IsNil)
 
-	// Invalid option 
+	// Invalid option
 	_, err = s.bucket.InitiateMultipartUpload(objectName, IfModifiedSince(futureDate))
 	c.Assert(err, IsNil)
 }

+ 50 - 3
oss/option.go

@@ -65,6 +65,11 @@ func ContentEncoding(value string) Option {
 	return setHeader(HTTPHeaderContentEncoding, value)
 }
 
+// ContentLanguage is an option to set Content-Language header
+func ContentLanguage(value string) Option {
+	return setHeader(HTTPHeaderContentLanguage, value)
+}
+
 // ContentMD5 is an option to set Content-MD5 header
 func ContentMD5(value string) Option {
 	return setHeader(HTTPHeaderContentMD5, value)
@@ -157,6 +162,11 @@ func ServerSideEncryption(value string) Option {
 	return setHeader(HTTPHeaderOssServerSideEncryption, value)
 }
 
+// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header
+func ServerSideEncryptionKeyID(value string) Option {
+	return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value)
+}
+
 // ObjectACL is an option to set X-Oss-Object-Acl header
 func ObjectACL(acl ACLType) Option {
 	return setHeader(HTTPHeaderOssObjectACL, string(acl))
@@ -172,6 +182,26 @@ func Origin(value string) Option {
 	return setHeader(HTTPHeaderOrigin, value)
 }
 
+// ObjectStorageClass is an option to set the storage class of object
+func ObjectStorageClass(storageClass StorageClassType) Option {
+	return setHeader(HTTPHeaderOssStorageClass, string(storageClass))
+}
+
+// Callback is an option to set callback values
+func Callback(callback string) Option {
+	return setHeader(HTTPHeaderOssCallback, callback)
+}
+
+// CallbackVar is an option to set callback user defined values
+func CallbackVar(callbackVar string) Option {
+	return setHeader(HTTPHeaderOssCallbackVar, callbackVar)
+}
+
+// RequestPayer is an option to set payer who pay for the request
+func RequestPayer(payerType PayerType) Option {
+	return setHeader(HTTPHeaderOSSRequester, string(payerType))
+}
+
 // Delimiter is an option to set delimiler parameter
 func Delimiter(value string) Option {
 	return addParam("delimiter", value)
@@ -212,6 +242,16 @@ func UploadIDMarker(value string) Option {
 	return addParam("upload-id-marker", value)
 }
 
+// MaxParts is an option to set max-parts parameter
+func MaxParts(value int) Option {
+	return addParam("max-parts", strconv.Itoa(value))
+}
+
+// PartNumberMarker is an option to set part-number-marker parameter
+func PartNumberMarker(value int) Option {
+	return addParam("part-number-marker", strconv.Itoa(value))
+}
+
 // DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false.
 func DeleteObjectsQuiet(isQuiet bool) Option {
 	return addArg(deleteObjectsQuiet, isQuiet)
@@ -226,11 +266,17 @@ func StorageClass(value StorageClassType) Option {
 type cpConfig struct {
 	IsEnable bool
 	FilePath string
+	DirPath  string
 }
 
 // Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
 func Checkpoint(isEnable bool, filePath string) Option {
-	return addArg(checkpointConfig, &cpConfig{isEnable, filePath})
+	return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath})
+}
+
+// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
+func CheckpointDir(isEnable bool, dirPath string) Option {
+	return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath})
 }
 
 // Routines DownloadFile/UploadFile routine count
@@ -278,10 +324,11 @@ func ResponseContentEncoding(value string) Option {
 	return addParam("response-content-encoding", value)
 }
 
-// Process is an option to set X-Oss-Process param
+// Process is an option to set x-oss-process param
 func Process(value string) Option {
-	return addParam("X-Oss-Process", value)
+	return addParam("x-oss-process", value)
 }
+
 func setHeader(key string, value interface{}) Option {
 	return func(params map[string]optionValue) error {
 		if value == nil {

+ 41 - 1
oss/option_test.go

@@ -127,6 +127,31 @@ var headerTestcases = []optionTestCase{
 		key:    "X-Oss-Object-Acl",
 		value:  "private",
 	},
+	{
+		option: ObjectStorageClass(StorageStandard),
+		key:    "X-Oss-Storage-Class",
+		value:  "Standard",
+	},
+	{
+		option: Callback("JTdCJTIyY2FsbGJhY2tVcmwlMjIlM0ElMjJleGFtcGxlLmNvbS9pbmRleC5odG1sJTIyJTdE"),
+		key:    "X-Oss-Callback",
+		value:  "JTdCJTIyY2FsbGJhY2tVcmwlMjIlM0ElMjJleGFtcGxlLmNvbS9pbmRleC5odG1sJTIyJTdE",
+	},
+	{
+		option: CallbackVar("JTdCJTIyeCUzQXZhcjElMjIlM0ElMjJ2YWx1ZTElMjIlMkMlMjJ4JTNBdmFyMiUyMiUzQSUyMnZhbHVlMiUyMiU3RA=="),
+		key:    "X-Oss-Callback-Var",
+		value:  "JTdCJTIyeCUzQXZhcjElMjIlM0ElMjJ2YWx1ZTElMjIlMkMlMjJ4JTNBdmFyMiUyMiUzQSUyMnZhbHVlMiUyMiU3RA==",
+	},
+	{
+		option: ContentLanguage("zh-CN"),
+		key:    "Content-Language",
+		value:  "zh-CN",
+	},
+	{
+		option: ServerSideEncryptionKeyID("xossekid"),
+		key:    "X-Oss-Server-Side-Encryption-Key-Id",
+		value:  "xossekid",
+	},
 }
 
 func (s *OssOptionSuite) TestHeaderOptions(c *C) {
@@ -181,6 +206,21 @@ var paramTestCases = []optionTestCase{
 		key:    "upload-id-marker",
 		value:  "xyz",
 	},
+	{
+		option: MaxParts(1000),
+		key:    "max-parts",
+		value:  "1000",
+	},
+	{
+		option: PartNumberMarker(1),
+		key:    "part-number-marker",
+		value:  "1",
+	},
+	{
+		option: Process("image/format,png"),
+		key:    "x-oss-process",
+		value:  "image/format,png",
+	},
 }
 
 func (s *OssOptionSuite) TestParamOptions(c *C) {
@@ -231,7 +271,7 @@ func (s *OssOptionSuite) TestHandleParams(c *C) {
 	c.Assert(err, IsNil)
 
 	out := client.Conn.getURLParams(params)
-	c.Assert(len(out), Equals, 120)
+	c.Assert(len(out), Equals, 191)
 
 	options = []Option{KeyMarker(""), nil}
 

+ 8 - 1
oss/progress.go

@@ -62,7 +62,7 @@ type teeReader struct {
 // corresponding writes to w.  There is no internal buffering -
 // the write must complete before the read completes.
 // Any error encountered while writing is reported as a read error.
-func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.Reader {
+func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser {
 	return &teeReader{
 		reader:        reader,
 		writer:        writer,
@@ -103,3 +103,10 @@ func (t *teeReader) Read(p []byte) (n int, err error) {
 
 	return
 }
+
+func (t *teeReader) Close() error {
+	if rc, ok := t.reader.(io.ReadCloser); ok {
+		return rc.Close()
+	}
+	return nil
+}

+ 2 - 0
oss/transport_1_6.go

@@ -9,6 +9,7 @@ import (
 
 func newTransport(conn *Conn, config *Config) *http.Transport {
 	httpTimeOut := conn.config.HTTPTimeout
+	httpMaxConns := conn.config.HTTPMaxConns
 	// New Transport
 	transport := &http.Transport{
 		Dial: func(netw, addr string) (net.Conn, error) {
@@ -18,6 +19,7 @@ func newTransport(conn *Conn, config *Config) *http.Transport {
 			}
 			return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
 		},
+		MaxIdleConnsPerHost:   httpMaxConns.MaxIdleConnsPerHost,
 		ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
 	}
 	return transport

+ 3 - 0
oss/transport_1_7.go

@@ -9,6 +9,7 @@ import (
 
 func newTransport(conn *Conn, config *Config) *http.Transport {
 	httpTimeOut := conn.config.HTTPTimeout
+	httpMaxConns := conn.config.HTTPMaxConns
 	// New Transport
 	transport := &http.Transport{
 		Dial: func(netw, addr string) (net.Conn, error) {
@@ -18,6 +19,8 @@ func newTransport(conn *Conn, config *Config) *http.Transport {
 			}
 			return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil
 		},
+		MaxIdleConns:          httpMaxConns.MaxIdleConns,
+		MaxIdleConnsPerHost:   httpMaxConns.MaxIdleConnsPerHost,
 		IdleConnTimeout:       httpTimeOut.IdleConnTimeout,
 		ResponseHeaderTimeout: httpTimeOut.HeaderTimeout,
 	}

+ 18 - 0
oss/type.go

@@ -363,6 +363,14 @@ type UncompletedUpload struct {
 	Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z
 }
 
+// ProcessObjectResult defines result object of ProcessObject
+type ProcessObjectResult struct {
+	Bucket   string `json:"bucket"`
+	FileSize int    `json:"fileSize"`
+	Object   string `json:"object"`
+	Status   string `json:"status"`
+}
+
 // decodeDeleteObjectsResult decodes deleting objects result in URL encoding
 func decodeDeleteObjectsResult(result *DeleteObjectsResult) error {
 	var err error
@@ -409,6 +417,16 @@ func decodeListObjectsResult(result *ListObjectsResult) error {
 	return nil
 }
 
+// decodeListUploadedPartsResult decodes
+func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error {
+	var err error
+	result.Key, err = url.QueryUnescape(result.Key)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
 // decodeListMultipartUploadResult decodes list multipart upload result in URL encoding
 func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error {
 	var err error

+ 67 - 25
oss/upload.go

@@ -3,10 +3,13 @@ package oss
 import (
 	"crypto/md5"
 	"encoding/base64"
+	"encoding/hex"
 	"encoding/json"
 	"errors"
+	"fmt"
 	"io/ioutil"
 	"os"
+	"path/filepath"
 	"time"
 )
 
@@ -21,39 +24,55 @@ import (
 //
 func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error {
 	if partSize < MinPartSize || partSize > MaxPartSize {
-		return errors.New("oss: part size invalid range (1024KB, 5GB]")
-	}
-
-	cpConf, err := getCpConfig(options, filePath)
-	if err != nil {
-		return err
+		return errors.New("oss: part size invalid range (100KB, 5GB]")
 	}
 
+	cpConf := getCpConfig(options)
 	routines := getRoutines(options)
 
-	if cpConf.IsEnable {
-		return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
+	if cpConf != nil && cpConf.IsEnable {
+		cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey)
+		if cpFilePath != "" {
+			return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines)
+		}
 	}
 
 	return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
 }
 
+func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string {
+	if cpConf.FilePath == "" && cpConf.DirPath != "" {
+		dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
+		absPath, _ := filepath.Abs(srcFile)
+		cpFileName := getCpFileName(absPath, dest)
+		cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
+	}
+	return cpConf.FilePath
+}
+
 // ----- concurrent upload without checkpoint  -----
 
 // getCpConfig gets checkpoint configuration
-func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
-	cpc := &cpConfig{}
+func getCpConfig(options []Option) *cpConfig {
 	cpcOpt, err := findOption(options, checkpointConfig, nil)
 	if err != nil || cpcOpt == nil {
-		return cpc, err
+		return nil
 	}
 
-	cpc = cpcOpt.(*cpConfig)
-	if cpc.IsEnable && cpc.FilePath == "" {
-		cpc.FilePath = filePath + CheckpointFileSuffix
-	}
+	return cpcOpt.(*cpConfig)
+}
 
-	return cpc, nil
+// getCpFileName return the name of the checkpoint file
+func getCpFileName(src, dest string) string {
+	md5Ctx := md5.New()
+	md5Ctx.Write([]byte(src))
+	srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
+
+	md5Ctx.Reset()
+	md5Ctx.Write([]byte(dest))
+	destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
+
+	return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
 }
 
 // getRoutines gets the routine count. by default it's 1.
@@ -73,6 +92,16 @@ func getRoutines(options []Option) int {
 	return rs
 }
 
+// getPayer return the payer of the request
+func getPayer(options []Option) string {
+	payerOpt, err := findOption(options, HTTPHeaderOSSRequester, nil)
+	if err != nil || payerOpt == nil {
+		return ""
+	}
+
+	return payerOpt.(string)
+}
+
 // getProgressListener gets the progress callback
 func getProgressListener(options []Option) ProgressListener {
 	isSet, listener, _ := isOptionSet(options, progressListener)
@@ -96,6 +125,7 @@ type workerArg struct {
 	bucket   *Bucket
 	filePath string
 	imur     InitiateMultipartUploadResult
+	options  []Option
 	hook     uploadPartHook
 }
 
@@ -106,7 +136,7 @@ func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadP
 			failed <- err
 			break
 		}
-		part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number)
+		part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, arg.options...)
 		if err != nil {
 			failed <- err
 			break
@@ -145,6 +175,12 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 		return err
 	}
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// Initialize the multipart upload
 	imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
 	if err != nil {
@@ -162,7 +198,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 	publishProgress(listener, event)
 
 	// Start the worker coroutine
-	arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
+	arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
 	for w := 1; w <= routines; w++ {
 		go worker(w, arg, jobs, results, failed, die)
 	}
@@ -185,7 +221,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 			close(die)
 			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
 			publishProgress(listener, event)
-			bucket.AbortMultipartUpload(imur)
+			bucket.AbortMultipartUpload(imur, payerOptions...)
 			return err
 		}
 
@@ -198,9 +234,9 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 	publishProgress(listener, event)
 
 	// Complete the multpart upload
-	_, err = bucket.CompleteMultipartUpload(imur, parts)
+	_, err = bucket.CompleteMultipartUpload(imur, parts, payerOptions...)
 	if err != nil {
-		bucket.AbortMultipartUpload(imur)
+		bucket.AbortMultipartUpload(imur, payerOptions...)
 		return err
 	}
 	return nil
@@ -397,10 +433,10 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
 }
 
 // complete completes the multipart upload and deletes the local CP files
-func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
+func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
 	imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
 		Key: cp.ObjectKey, UploadID: cp.UploadID}
-	_, err := bucket.CompleteMultipartUpload(imur, parts)
+	_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
 	if err != nil {
 		return err
 	}
@@ -412,6 +448,12 @@ func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePa
 func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
 	listener := getProgressListener(options)
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// Load CP data
 	ucp := uploadCheckpoint{}
 	err := ucp.load(cpFilePath)
@@ -444,7 +486,7 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 	publishProgress(listener, event)
 
 	// Start the workers
-	arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
+	arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
 	for w := 1; w <= routines; w++ {
 		go worker(w, arg, jobs, results, failed, die)
 	}
@@ -479,6 +521,6 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 	publishProgress(listener, event)
 
 	// Complete the multipart upload
-	err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
+	err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, payerOptions)
 	return err
 }

+ 30 - 18
oss/upload_test.go

@@ -237,7 +237,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	// Use default routines and default CP file path (fileName+.cp)
 	// First upload for 4 parts
 	uploadPartHooker = ErrorHooker
-	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, ""))
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
@@ -259,7 +259,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(len(ucp.allParts()), Equals, 5)
 
 	// Second upload, finish the remaining part
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, fileName+".cp"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)
@@ -276,16 +276,28 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	err = ucp.load(fileName + ".cp")
 	c.Assert(err, NotNil)
 
-	// Specify routines and CP
+	// Resumable upload with empty checkpoint path
 	uploadPartHooker = ErrorHooker
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(2), Checkpoint(true, objectName+".cp"))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), CheckpointDir(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	uploadPartHooker = defaultUploadPart
+	ucp = uploadCheckpoint{}
+	err = ucp.load(fileName + ".cp")
+	c.Assert(err, NotNil)
+
+	// Resumable upload with checkpoint dir
+	uploadPartHooker = ErrorHooker
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), CheckpointDir(true, "./"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
 
 	// Check CP
 	ucp = uploadCheckpoint{}
-	err = ucp.load(objectName + ".cp")
+	cpConf := cpConfig{IsEnable: true, DirPath: "./"}
+	cpFilePath := getUploadCpFilePath(&cpConf, fileName, s.bucket.BucketName, objectName)
+	err = ucp.load(cpFilePath)
 	c.Assert(err, IsNil)
 	c.Assert(ucp.Magic, Equals, uploadCpMagic)
 	c.Assert(len(ucp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
@@ -299,7 +311,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(len(ucp.todoParts()), Equals, 1)
 	c.Assert(len(ucp.allParts()), Equals, 5)
 
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), CheckpointDir(true, "./"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)
@@ -313,11 +325,11 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	err = ucp.load(objectName + ".cp")
+	err = ucp.load(cpFilePath)
 	c.Assert(err, NotNil)
 
 	// Upload all 5 parts without error
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)
@@ -332,7 +344,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 
 	// Upload all 5 parts with 10 routines without error
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(10), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(10), Checkpoint(true, objectName+".cp"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)
@@ -347,7 +359,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 
 	// Option
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, ""), Meta("myprop", "mypropval"))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"), Meta("myprop", "mypropval"))
 
 	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
 	c.Assert(err, IsNil)
@@ -371,23 +383,23 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecoveryNegative(c *C) {
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
 	// The local file does not exist
-	err := s.bucket.UploadFile(objectName, "NotExist", 100*1024, Checkpoint(true, ""))
+	err := s.bucket.UploadFile(objectName, "NotExist", 100*1024, Checkpoint(true, "NotExist.cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.UploadFile(objectName, "NotExist", 100*1024, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, "NotExist", 100*1024, Routines(2), Checkpoint(true, "NotExist.cp"))
 	c.Assert(err, NotNil)
 
 	// Specified part size is invalid
-	err = s.bucket.UploadFile(objectName, fileName, 1024, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 1024, Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.UploadFile(objectName, fileName, 1024, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 1024, Routines(2), Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Routines(2), Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 }
 
@@ -404,7 +416,7 @@ func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
 
 	// First upload for 4 parts
 	uploadPartHooker = ErrorHooker
-	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, localFile+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
@@ -414,7 +426,7 @@ func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
 	c.Assert(err, IsNil)
 
 	// Updating the file. The second upload will re-upload all 5 parts.
-	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, localFile+".cp"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)

+ 2 - 2
oss/utils.go

@@ -16,11 +16,11 @@ import (
 
 // userAgent gets user agent
 // It has the SDK version information, OS information and GO version
-var userAgent = func() string {
+func userAgent() string {
 	sys := getSysInfo()
 	return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name,
 		sys.release, sys.machine, runtime.Version())
-}()
+}
 
 type sysInfo struct {
 	name    string // OS name such as windows/Linux

+ 1 - 0
sample/config.go

@@ -6,6 +6,7 @@ const (
 	accessID   string = "<AccessKeyId>"
 	accessKey  string = "<AccessKeySecret>"
 	bucketName string = "<my-bucket>"
+	kmsID      string = "<KmsID>"
 
 	// The cname endpoint
 	// These information are required to run sample/cname_sample

+ 32 - 2
sample/put_object.go

@@ -2,6 +2,8 @@ package sample
 
 import (
 	"bytes"
+	"encoding/base64"
+	"encoding/json"
 	"fmt"
 	"os"
 	"strings"
@@ -66,7 +68,35 @@ func PutObjectSample() {
 	}
 	fmt.Println("Object Meta:", props)
 
-	// Case 6: Big file's multipart upload. It supports concurrent upload with resumable upload.
+	// Case 6: Upload an object with sever side encrpytion kms and kms id specified
+	err = bucket.PutObject(objectKey, strings.NewReader(val), oss.ServerSideEncryption("KMS"), oss.ServerSideEncryptionKeyID(kmsID))
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Case 7: Upload an object with callback
+	callbackMap := map[string]string{}
+	callbackMap["callbackUrl"] = "http://oss-demo.aliyuncs.com:23450"
+	callbackMap["callbackHost"] = "oss-cn-hangzhou.aliyuncs.com"
+	callbackMap["callbackBody"] = "filename=${object}&size=${size}&mimeType=${mimeType}"
+	callbackMap["callbackBodyType"] = "application/x-www-form-urlencoded"
+
+	callbackBuffer := bytes.NewBuffer([]byte{})
+	callbackEncoder := json.NewEncoder(callbackBuffer)
+	//do not encode '&' to "\u0026"
+	callbackEncoder.SetEscapeHTML(false)
+	err = callbackEncoder.Encode(callbackMap)
+	if err != nil {
+		HandleError(err)
+	}
+
+	callbackVal := base64.StdEncoding.EncodeToString(callbackBuffer.Bytes())
+	err = bucket.PutObject(objectKey, strings.NewReader(val), oss.Callback(callbackVal))
+	if err != nil {
+		HandleError(err)
+	}
+
+	// Case 8: Big file's multipart upload. It supports concurrent upload with resumable upload.
 	// multipart upload with 100K as part size. By default 1 coroutine is used and no checkpoint is used.
 	err = bucket.UploadFile(objectKey, localFile, 100*1024)
 	if err != nil {
@@ -85,7 +115,7 @@ func PutObjectSample() {
 		HandleError(err)
 	}
 
-	// Specify the local file path for checkpoint files. 
+	// Specify the local file path for checkpoint files.
 	// the 2nd parameter of Checkpoint can specify the file path, when the file path is empty, it will upload the directory.
 	err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.Checkpoint(true, localFile+".cp"))
 	if err != nil {