Browse Source

Merge pull request #130 from aliyun/preview_1.9.2

Preview 1.9.2
fengyu 7 years ago
parent
commit
cab2ffdaf4
12 changed files with 336 additions and 208 deletions
  1. 9 9
      .travis.yml
  2. 3 3
      oss/bucket.go
  3. 7 4
      oss/conn.go
  4. 9 0
      oss/const.go
  5. 69 51
      oss/download.go
  6. 39 26
      oss/download_test.go
  7. 59 51
      oss/multicopy.go
  8. 26 14
      oss/multicopy_test.go
  9. 7 7
      oss/multipart.go
  10. 12 1
      oss/option.go
  11. 66 24
      oss/upload.go
  12. 30 18
      oss/upload_test.go

+ 9 - 9
.travis.yml

@@ -16,12 +16,12 @@ script:
 - "$HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci"
 env:
   global:
-  - secure: RSQJ+EfldTf9PA4pCfxPEf3HMCDZ1jcf+6NU3uwjOxxpDhnnmW6cUwZydheUhjZawXmk+oYzI/6aqsrGR4fq/9w7ey1gpiDeNCUKFPAcJgwb9P3R6TUgQZvin09Mgp7xK3hTXtbdXHPSbaGrXH+mh49AxGw7e9ZCtQ/f0ENUYdfAWiGqPI4W6ojJCyxiWzV+pJ0TW7JM32So98KPLQZNrBwTa0+O6I7tJcYPq62kP2jaWIwkXIb3eTKLrACYW2nKTTWVY17KfJIw/BO4ZtPwk8/EgL8E+NFEczldPbkg81QXsPRDcuQOqNnBsSD78Ej+5pjF6o715BTPYVTOcDNY5ebJ37W6SAvuOIcANwAmcrkuAtdueaHtv6lPMnfqaBp+eDIm3r1cs2tMka67r9Z9K50GxrjNMkrlNLHIXx+TZAn1CfRMslTWzb6dcEy8ncBlKmFZE0h8yRoExwOTCaPb2BH6c92e7zRJPNb3jGwabnjT0YGmswfNx/Y5C+Xv/VptU9NYYHMkbc2VgDWasuqV95wKWsgD68P7Mrqei2wEKDqCnEjSKAsTGEx3FxgvTulNUv1KRRlqrxS8u0p6V96Fg4c3IUkziEMXjtmsAysHHDjwjZ/oDE79lnjIXAvTslVv6zTMBUepMaMQrN3hW8VZ3LPWtYAxJT3MvbNZEqb8FAw=
-  - secure: ACE5vbYP0GUp6I0aUVfKk9acqTZQdcHlVS/Zw69JIzuewxjisrT1D1/tQTtvoi/PNIG+NVbFiEvyzJGILQgVEEMJ3Bnjzg3M1oD1dGSFQQZX0VuyU16B7Dcc3qvXcwVg/hbYehBNFIMi01gXZBTsAhm9M1La6omnjWPK7zVs3x+CBw5hTTplxaIxXPlANfeOBBH5ziHUL72700yXaDOWifu5gkzCwXId6abte+4Y79XozkS7DJir6B8NB5D3XkH/TFIQ1vlRrmKpSOFigKhhAUWG2X4FT5TxZes2P3lZApU5NYHvDwXISc0sivn+hXVb8JlmEgSRzp0qQWd6BEoUG2gtV2nFE6Kc8TGCkt4d2v5e0Nmel7oMjqRe2df6TYGPu3h22tYSjXdn65+fxcfWQHorW7AmIL3bMZ9hDHHi2Fp+HaaI9vvHxJPUALSlVqCn7FLwLTC1B9MgEVlPD2g7FPGMdL6n/5CT0YTJk4/4uv/aNKllp4OAx3gOuOqT4Vw4VUTXREKvIym4vK4yWBCrPCq29WEUbSaMrBh2pctEpI9dyxJmjznUsjiJ8cGYvT1zUPVcKHgybWZrERK6zHKeIL+umobi8nAMRapSZ8WCkyIlRY3ThO8qSS2jgdjg4hqD59w2QZezzSyejsGjCwNhbInJkn2ixc+pvdoxsBVM9Yw=
-  - secure: j3GX9Cnx40e61nby/5UWmrBDHVnBP0OdKnk/PG5c5b8XPl3LH68xViI8Ifn36q8bOHDTFfiMaeKvoLHzkZsXqDJbgNY6PKP1Cd/6h+FErHwDdIc8XEaczYOCawwf/esjL0f082PIsk9MUGAc5TH0DDIVgEEXL6HibxMqbEh06JZ9urIVekQCw0woHkCSh8flyWYtJf+VYiR9+llXmhZEfxEe+BIUkL+caZUSkkC54lbbW2ENvzasLHsue6+jHEyL7NobfgodM3RvB4XZ2wdxUXUqDzkOTlOLODWpXI45afdSk+h2+co436PUVOb+eg0b0RccOWb25uueamLRv2NDqbrbYBNzrRMcFtzisMORB+NDDHAcWTuJ70q89eaFBpkB03X8y6LgM37IdYITp/T9YB3TnOfgWwG2r1luJ0q8Z1vEVX7+hGBlXWi5GVk4L+q6Os+acrT8HYxZKDa/EBjS4XCiiwhzsTloxmBUJNcxDVgzzBW9NE8pQ0ZNdjPH88Ca1oy6qdMRsuMz+WSAyZ3CF0TYPxiZ5r6Pnt8aSVSneClg+8GMwFUyO5rt7ySXJNruwhtrs7r45S8v74/pSRiI1EgN0BAb2CI8AhD2wzj2vv2MNvbI/ppLeaEFz2RCEbpARJpP6ihl2GxU6WHjrqchyAgFHMTp5YrGb0/cAVLnQ0k=
-  - secure: VScdIW+AGYKJYr8Z5U2A1xoYdHxVw1eZ2DNCk+E4r8YhyTfGup9V3hbGgngT6urkOU8qQnqI7vDjI7ZyXFdzEUXKQYZDcA3s61KkiVl7c+cZZMOkD8b8fNN8uAgVgag/f04VmR5HHU2yyzaK3BkU+QEWahRkNHdjW+pJYQpYJHEoYUzCecc5335a7j1ysJ8LGXbMYiCRL29pWrTQEpC0M1mPri9kMmY7pPSr4k/iNPrAm+OCHHlrPVmHmIOH5QEduHzPkSauiExuSt3ZzU5ZqA29EwLtkm6As20ttVCz32bW1zSRlYTmkwcBjASvS4duBvSdS65AWccnRVbOqIvZJTPByYHpUi2tzuwdLVYK68m4JD51LWfFM7PCGssva6mrDSY0MvWgMF+isGugfarhLinne+hYt/r+5JxUASzYXWBKFbce1lk4bT7wp2zO0h8Q2Cna+K+D35YROV77fDiDoxarURGsqAHvU/n7PSoEnfA8WOiugVkIPuOhYhlhui+46ahMYxL69gcu88y66AMPxEHuacxW4GpYgaF/xjV8180/c3ck02csECKOjv/BQU2bbV/ngNo+bDH5g8xiHknE9oWdVEB/dxzZmmcKroXcxz6zqbMJjn03mgqBZiEkPhRStL7cny6Juq/aDTLREooHtn7OA6zsttVOkgAdCJ1VxN8=
-  - secure: lHKgxwLtpq9ydgxQcEe8legRMq5eUL+vP7pIHnauYVcQAWmfVaEwmwVeKJubPiT43m3VaXvKUvYodeNMXj4bhe+8/jG0SSVRd9UlyWlcgFXBJUBOM5FLOhPrnc2zdaCcOg8g554VqM4Xmhu1tX93E0xzBS1B7N+cDwb4xD5D5iQ5rLIbCdK8MbdJwOZLyqKN2cQ9eZRAMy5kz2WnV0LMrm3MeyxJe+bytgf8rSCE6OchNqLRyOCikkSmMiZROzvBZH2RTS1reVJHNhKfDsCkIt+QA55M9Fjnjjvs94rMNgxZwIJhj7wIgkfA2ypAi/3ESRk/yxJqsDyiutfpq/8N4l0+VCV9KIdeLn+rqEiZz8WYYp6oCVHhe/eM8HWwGmMM1JTQfUu1W6ADb8WhUY/4e+AZMb8CRjh6bJ5wf43Z5EZNOKkXZgGQyE5je3ORuPfpG/PliuJS7hUAyMzhjPnp+DMBsOPNMS/glaZWL0GB3Fr7V2IxEQZ0MUPDEebEpuQm6VddQ2Lysw+raRZuC/c3kUBC9Rt7MIHDJjTdBQmbo3GO08ErhzKVgyCLqGKUx1WpApMXtsrb6ovk7VNXrjDsWCYyVtUbIgGCYsv7OGEhQAPMyAy9YMNT0OC/Y7n+5YDjWEjCPVW38TqtK1izbJ8mdbtY9jsy5uR9ET1aCMkXiG0=
-  - secure: gDu6CUQn7QT/9wR0wrfkYGm4dHFgpDUbsQ6SfwxCarP2waxlLpuXMdPO8TZZbHVgii+Md5JqGGJGIWzSlUwygH9WpRqyJnAfb6OWheJPusYpDyFWwiKwYMyjohKRrLk44qyvdNgVN9huBVpOHwks/vmvtFtJOfqfDwSpMplHKUEhtCeGpXzVwrH7xhiZgpylBoc3/9fvstpKfTiaGPBfDaX9NUSrl2ZGDOkixRWwisuCxNvk8+xVK3sxxJwfyuBZdzQ6S9hdGgCpUcty/mWjHtaAMtaE3sfCpuZrcZiyh4wnapWQ4P0oH5pzixOesm8ul6bGXHb7f3gj/xbX8cO9FoPESSqFWGnGugTPetvE1apuwlq1lNiv8SAHA2wLTRDkGZ8Go55ZqOHgdzlTWxQRroREzo904fERXU+txsH51AX3EakQuATXBb9KHrYfsDnHmY7YPAEjzdTPpcmny18loFvpTjzcq+TFPsQXXUA2VPO1lq/ZzGTBy26X8MshdFsc3LNIkcym6smzk1/fOJ5FT1OuH0m2AoHdUJserlWtcLyU1gPujybXRmSPQwV41CG/ANVT1Yq4+4WmQ0atdBaUvL2HkrzWjpWofpK6KAdWZ60GR86f4bSsSkuE0sgNmrfHULYqJimFFur1ATxjeCCLX5rS8mUqVHNKFHohOXfbrhg=
-  - secure: a3x1V86zVwlh9Cyf7d/jR/g7RfLEZjdIcn+jcKoJlcVMbgFcEddf7Stj7xAXe2cHiuk2cieiBCmqfOB2dzm8jL/D7xL+H6n/CXSXOy1j5xljKlTviqdg2lN0Ic/k3RuM9TCylc+kuDgsy/seKyKFp+zQvIEDpmbcLUqeblJNYYkUJ6Tugo+dmjU7/gCsxACVsYRINYr2B2ZCyJjU7AwGxBssBhmCEQqoKjhhpFkDixR243Oq8gp6zKKrnePkADXAnbQc/qCA5egsH8JCP/di/I7U6UVxf38Ks5SlRkwsNNEFhaa9hEsTlWJv0MOfTdRQCEI2+4yuxc2VAKXdw8GLzPIY4U049KrP2fiGf+q/wtJgW/JE6Ux7/qeB2xE9EpyrcspLndzHNSBYUPaa92va1r3VmjGmZLs0vV1ITR7IfMJSqg2vZhXR8GJ+uu0OiLq/CS8jLpGz3tncyQQ9P5LzCLVEv5Zy187/z7WaOGpZllecgfsr772pcM08IEMvWWrj1rc8WrOF1T/81uLSGUu6/7j8Nw8PX0778Phswb48n55qptEwF+32zmJiMxVNFhIax+BT13bjwIw6MBqZhjwGgsLgORnvSc9EBxNG3g5rOvdnaaGy5xM2El2uYk9buFui6oDgQpNXpb92IJ1giAPRCaOs2QF+G3jHHYmAcaNr24A=
-  - secure: O+kTgLPklEbq65FdltBo6lMhayc+nlw+MVPOENt5oTaMgBWhr6DzmIAXJ7UGZMl1CNv5lcg7VBn/jpsH06B3WK6IOx/SR5Fg2de7T1DjYCJmjgSuwwBNf+kB1xS6nlcbh7YXaQlD3WJnb48PToigucgTHjGPFV+dL8w9mxKRHwWEE9NAXLAiDJY0+GRz4rQrM5BOAhgFijQohrYKFDfn0r4rNYT5F7WPh1KJVQhJ6kiEXYlTIk9GP7xQ1sCjNzZmo9p1WxoXlpCDsvTcn467Vt8dBhxDyqlNfqj8kpk5dWQz56xAvQ2FKP7RAGqmjgWW4S/xAFOmw1llD7xHYQqw4oDPfUtPm9jXU+4LmWffLgZ8rpcyPOMAoWB6yHdqoNxDKh9Hh/88MBkEXCbUIPX/pG4OG/aOTdTzDOM7D+3AIXXOvXHPVdYcMgrkY6RZAgCtB4q1pd7I7Afn3xyOmUbgjVJjyO8OX7Th4SgzAMh2eOd9PqzaMEwrpR2JBMx5uC/6LxM96xNMKYqDdmztdJ71/2Xzjxh9t0vcS2QiTs83p/So0QvUBQ30Ej8Al805YpzjYxJyp+hbHYhoz8J0XIr67rtV+HLB7QLC99IJGq6ZBWaJPDgxPNliiACDk7/6DHFr6SKqbBG4JJ2t8wDey/Vra9cIO/55/izi8heeqg3TDAw=
-  - secure: fRZkr+JoUz5QMmX9D4CSCjG78FrWiUdYbRKzkGpF7MXyfvNOqES2uvH67ffgubXBoeO7IS/x+hV/UcwrPpOZKDOny8dY5bxeZVjGWKkGlioqzfgPa6DDW2t+bvdtcpoB9r0N0Kp6LVoOjjn4qdjeG+4tuPIWeaEGh02mHTe+45IrPF4jhUQYEOu5kEwF/ayuH8Svvxu/zG4cWnVJ61rooW42TCXJ6Bj1Sq7lgTrTXLFYsOiuIujQiB3hhlxVi5vfD/hwx4qZkh+S0NSVIesU1uU0SFJUZ2ZTK4NWMuGDypZ2ulB/tYAAJIrfknDXJc+D8w3YWYia7q+E1tWUtoBG91rFM3hyhK7kCB2MZrUXqif20JHYpwXne66K06xpmym0/pUvKGH2Kw/qEyZTOKGSu8sl29FfsTFDRuCc4AXoP1Zyv6gHIjllmX30aFTGe0HZOftVmgnOen109Dtz2u1qSg7JrvAaQljltvRVdVlfOQ+Ub0DWFEgfUisL5WdxMDzJZalYYqR+GQy0/Wn8F2fZz4zvQ7pR4OEQoS9bWAhwcFwKSJ2IqLtAjggrIRp+h3u1OQFXeFMdhcr415QFJaw3rKsdMzubXOgOzCipfYAQbqm4KJK7boDLT2paPIKLTN6Fkf655XorMw4TuRHsOFlRqS/DOlaM2VwZP8hjc5wT4ak=
+  - secure: ZCL5egxJmZA+o1ujsaJe//AIh1ag/exSUJ2FzoKPF3O9c84hMc2k5EYO2rGzTNn1ML6M89Mo5hAvHQhyJEHjRuMtjc1QrfxAaA3mqm4scGXIXesPMqYGuvuPSh++6/fkAwaVBAhrk5GaDG1/FuxHE5zusGx3SvGegnCwO7n/2YCfXco6DWgVCdrz4p1EpPkAM3JIdHFUzsDWiimVuiNAvJmAT8+IeOPTT+WgusCJj4ORS3X3LddTjttBP+hRrp/pGSoNqPMzfysWybtaL2SJ8URtvsxW0Mo5BwocHAxAhPP+M2OscQbDzthSAezCLngYvrfBplfIyWlahlgzNz/FjXz5pQwWdYVNoibyxLLMOH685n75LNONN/xVO/GFmVPx7DMGapkN5NzIWS62D4v8QrRkwtms42OUkyEUHjDh8Evui3K2MNJVXA3TI9zOAR+C0krD7OEyS37qrppodhRxJSqFUlgXnk//wLldMC7vleDd7L2UQSWjqyBHqFOgsVaiLU2KRTY3zvv7ke+dqb5VF31mH6qAr8lJTR9un8M1att0VwCEKxoIRT4cKJCpEtZd8ovXOVt1uE695ThVXE9I5e00GXdTzqXOuv6zT4hv/dgmbz9JN9MYeCwmokEoIUmJKNYERa/bNVVefdnJt7h+dm+KpyPAS+XvPLzjbnWdYNA=
+  - secure: GdrPX7nUoZhB1DYTVD6x/vgA7H9dOlQc4n7nCsqEDyif+Y1XdPT83Ic3gSOt+cfy0/Kjh0/TT5xmLqpSh7wr7eyTpBPZGjz4ZbxBOcSLTfrf/spacgzla9I1335CvaTmpvrnvGUlOuVS8rb3J/+19dHlN6dfxX+ucjdfShR504d2JEcCLpTc1CEXAl+HEt3hM9gztOX5ykxyrtibDr0OPkNF7QjZ485V6UJkfyVlBM6JL59ywgh2dhdZn6JwmexHjVPsw6V8Ka07GzbpOs1e5eis42RUJe8eSqRRToCcTUbA9HOgWXswuu5k7nAwErygX2ub3hZ+yIjc+9JLsiy6F69RaUPVFlxfw8s5NLeInTIt28+A6iaf3X2k4lOaVFytgTl7lkYGNWz4eV/vXf2H4wZmaZn5OI0WKd3WuEJ04rsm7xzx9rC8znnsI3R1BHfapU/y6z2QGjgJsHqZmgfvXNKgSOurM6O/nlDnEsYOwYLQLhpeXVFNmbo+M77HAVicKD4yL08+5uBZaeYYipzyC1O4DEHX5BNdl34NpNxUdMqUb4MfNEnYeqvmemvZkOO6BO+xucP1S2LSKNXfFxH5iVfKbz4+VJ/2kt5R77672lkG3bXPUJFk4t1CTHBOLizEJNTTD8uzRIsW7Io+XFk6oyoEqXF0sT6Lbp/4pUHJQQM=
+  - secure: DGIgOKinCvYcLdcaIOKcecidQe5q/K4aGAjTyl8/fCp3mRWwFTrlv5gPMy9sHQEsiRjzQehpubMO1d0XFVO+0LvqdGLnXyM6lSnMhN6voQMnF1GaIXmxxBfvP3BHwyN8kMyY+4oqgMROvpxDKvq0IH/GE8opWRJhQdNkRscbtfHUvnqSk62oNziqIBxXrYBIuezuNcgZkCwEoZQwPu674efIAwVr01BmRfd/8Q5W27dJbCJFF9ceyOQrsjG2QGmW6GWraaOpqYfQ82A5ROB2saVU8mEs/f+mGoJgOP2aH3ErFBkJWBCUNzajluAyGyU9VgHGRQ+GMbSr/HqRILd5aYpDCGA0oFer4/jP32CqeEt1Jdqs2lWar5mFX1sae6PIxFyl5lnENgjTfbOt4oiGGye6t0mgI4+psRdCCQV5FQ7WfobEZ2ryfQxbiVU1X219jMoHhHmFzC83e/T6V/mkHrh8OR67k9pieH+DqNGvWFtv7BBs/ihfoo2ONNgsHcofsPj85I6odWAhsBnsYm6FsR31N19nObnggeDyqiCyh5qUFvSGPkH4fXTKthKETIRdsdOEDOcbCD6kUpZqIWyuk6TKeOD8PxjgKzm3hZjlugU1x3amVv71EKjA5/FOVyIVuekLKoLn7pt+n3PVlT30IZfWorEfqjeVAKp2SglE8nA=
+  - secure: LnL0Hl9yZEie1aYngEO5QK332cn3W2i4f6R5+kxX3tncdqBDFhsp5tQfMvlKHIzFlK94DI/G0diAl8zJmYQfAYARe4uvW5FAINRCkOUz2jwIA/gtQDr+oONqHK0OLPWYuZ6KJM4t7dmuPUR2/frKe0/6r5XeFkeAz9l8r1Gw9o42jFDQPDkhBj7k5EkmB1DpuAY74vXy0tVBCJsd57/kuaRqbX4euhx3zFrDcr+xQEiDWHKzNHlJd6DZnruy0KDuWmIbUWhR2rd8YKAnzP7OHzpbTHsnbXvrVpaN1Mv8lXz2dpPTr7I2LMCrMEtfECu9U+LDIqhbHVMsp9rZ+fNQ048fREoj8HZrorIxmsRJzV0ZQzjdW9Q6EVaiYcLZPFOASsyuTNBbSJ2AIrE/izo4EUKme8BY+0mFzTJwMk7XwAtatItVhEUXb2wXWZm8GR8wIrNmbeSzle5NkwXpdpW3QzZ2EADL63/pP80aV1aMBmoAuLMIHxeHEnXOTAgQj6SOiloY+II/iJE4cE5vo9UNtZsqnJZqdd22s3kLdQV0kbFMWq8S3qmxtDFPeoZAy03xhTVnJUBkdjSL2UER5WAacZIr85M6Z2APc6dzMUlWEE+4QkkM1UAbwDBTXFrrmfDVYc0LrePRuoHQiOmSvTus5+WV9iIQF7rM4BcSLnOEW5U=
+  - secure: SgvbTYTbMEkmqDXP8MW6lbERkHUjBRwg477hUL11Ok1TiRdHCbEDrq3mfUP+Tl2sS1x5qQ2JFg2NyWS2ikCAd0zjO3QEfmhfQFRpmfgb5O67wY2oEAsbRDanjIXTwpDAZn87KFIPB6ohVsX4LEztfR8zqXKIfXrVFs6lyDHS1LIgbgQhJl+XfJRsfPlWRq7QydANTCY34raUXgXgBxtbv43b232LT8UusizUvZS4HJbrbo4oXhVfhkUH46B2o0ct2Xt6EIlxyOtxtZOmnai8O7kIFHoG+GcHxeZ7++X6FaHR3Cdv3rr7EEg3MwsOIZ6SmgeQ0gcs32RZf0giFMwo1LkgiB9KrJTBXkU0CSYysxfeLUCEd11q4h4lZyhxU8CMvgs+1m3s6A2/5uYDYSSqJuAHTHQgntMn7/baXKVXuWXrSSERxkUiqwlKjFFHz0kshj3ZXiTZ3EhjBZgXeeGzGEEbZBQCEJgXKUpl+C0D07PLKt6f2ya4TVTZ3WOjh7dKq14+0nC3w+5z0ZtGtv+IS1LFfajNs/LsT4fDmKsIoEQg2Kf5S//ckeuzaR4bkMGCm3qquJrNE6Uqq0MZzSUdUpnsILjfLiVlrtHk/9So7ulRc8XyhBIWDy9lTS7NxOfpw5cnVJRcjEwrZ4Q6iNwdsh4vPZLVgavwfFRW87DjF6A=
+  - secure: Jef94P0QfHuMT5GQNrzfMdtVUVvV9dEGsvOLFqPvIkPLJZWGqwaIFUG5t0mCPEgn8uct3XPOiIYivgxnOURa1JNegnTbjRLXBOAjhE5hw2x2IRWj2xT+ylYCNqh9jtIEJdAjUzJnXXj1iasZGCKa4DNwNLgsuQ8d5Yl3bY5l5YI4MtTsdzbBUT9WewDWgnO/MhZM52w18XLBx+Fsq80F0VwpzoStKRula8anOhL+Bvj1uAielMOUo3QcpYcV2XfTnM5n0ApwqUhmv/8YoJpHXjGTeKRM1Hem1jFtfWCjSRrlEKEFJALEJImf0iWbZN5Z0TOcfJqzPY09/8h60OOfi0TXcnwVnSX33Zp1oDLDlRnsN7HQg+yIub0N03OwHqmC2AO0ShkO/lBmEMsfqlEoc4o2GJ3YL+JpC5vPsy7fFMad+jNGXlg6jPAshvCJ2DfnmK1jYSSVdVNUUeP1Bk5rhQkFzFH3vgNgX3nFk0gEYrfDn3/Ea6tORybSJzaAkB9bU1n4U2e3OplvWr1Ll8O/t87ws8ctyY/Ah2hRmhSKEG9cdySnm7Uq8H7696MZEEw9aatj+bRJk5CbCVtSX8v49I3C0tERcUBO5M3U+/g0qeBW9hEhxnBeG3y253Bo1FhSxbaZhGwSGJ91htRXLlJlUs2QrOcSYMsCT6p35KdWaqA=
+  - secure: NMVS9EU+ahQXGiyTCHyZ44rf+8b3me3UXD1DozMm04lCvnWoBqJE4aXBGQsDAWuOL4NTTm0SaVu6sBY6ZTXOYYF59mwEbxt4qpmVjZ+vBrtMbMiqoxv145blquR9JKedkdP6IGSd7VSQwSba71f/RVv5VeGvxUSEhCwA04kKxToOPwmnORmT6qwb7PkPCMNHxz4VpsUIsKx8jRrY6Gmp6FvQJBHfKEHnDQohB1ReIYEYi39ijLvpbCZqrB5u1N9oF6WlpBiNIX3kQizn7ftUyewJgoZMnfpW/Lta6e91yzFInWg75bZdW3faa30Qy0yw0zlQIPLs89c8A/XH1fGVECH9At9VNmdYrb0fD9aWnH7zdX6Im+Bw7Ptph4x6tB7zPeFoZR5cVZT7L06/HbnW7NeQk4tg/N4I1tOaO7AQl+ofhCzesZ56bSxETiNFn9QiNwWFTzjlkG7jxN1iAAkdYsZEQHwtEK63R//NJtXpbbtNA831QqgDqBK+IxyKeLhmxmu17dWcUw9tm4jlZ7d6nPB9bzJcVM6K2uRJyW07SlBqd65WJTXPV1PFww8zh+chAC4ZkLDhupn+7ZSG2ylLYGgepmABoC/CXHkXEsNzdQ8wPX/pDIz2WNmwEXyC/Nv+WNpFS/tWIAryIPOLMuETIgbaOLbD5vZDSKxDZVGDvPE=
+  - secure: cNr4PiK6ZZADoRMacL4lvdMYWgM9H4lKN3u+nfMA/MrVrnSjeRonkO7RjMJWs9HicPanEks10R1w/T/6nWyFQV2CPkEBSNSLf3DAD+dlRekKfWogGXzYnvqeiki1HzsIPYTImiD5BtPn6SbJmO/ErJt3zuogspyBws/X7XfZ+u8FIpPsYEmvHslT2jARuput0yNfldUgxxyI0IvgkuCEcCTFwePspjbn6zR6Df67e+r5ibFqkdPYdXkQVpvfA90RPpfBUuuaEO7kkFlKbPK+Nl/jbUnjcfbe8zJRpSb8j/S2USAiBUjFsqsdvFqZ9WumjXJLrrNFt/UgIXaMyG3Y8xJl9kzCcx36wcNoaP2mx2qucYTdC0ey49g0uywvOVDdykmctQRF7uYQS+UkMqs5jRLgAjQ1/wJISrvtcpaK/4DyhLBUFrOf9chep2hzWBFaWPto1IUpWu9Sjtz5rtjsAm5LR7zvIxcorvRall5kRokAspHb9+TaQKSDOpYNF+PB77cb9H0OLZBLVPGo0WJHq5dv0NVZSH9JVq4AiJDmvMWI6weBis+tLbECMwbeTezo6hDOuII7VZt/KcHgzlt3KCDwv8krQq71q7ySDt7SxrvLeDjeiEFkjwA0lN7Cin1yqjON83LsIsWkKxbf+xNJRngSE4bPn95j3pHELdo3uqY=
+  - secure: iDkNjibPknbawHN+jobw1AEqhQDhqFvGPBUX7FkxlrIanNR71Tj8CPAFtDpJbYaBMdPt4IzOmD2JVo9w7E1TfNX4DsOpkb2MbOr55TxfXQ/+y7TBdJ9B/62BvhFWk8Hvq8TWVPTCgNIaVXNfqBAj6WQSX3AbUVHWSOM9Aey5joBZuhdWXSmKo46lLOradacDDPZYjrLEsMTS2CotzduKQ4C8dGCVcMEbBnS3O2WRj3oR0wiiP3X0jbnxJkOV2MCoadZxSu5B+gaaJ+Yv7EKT0vy6ASp6LYrWkjY0eKbTqy8NtCvCFlliND/iaq4LEv838hQfO/o0WeB2b7/2MH2EW1v8XLacV12ak5wJgb7b+L6fG+lMKMta5Re+vjdRYgoU5EVeWQNxrnX1chEdzFXb/q2+5DVp43qH5i1Tu4FR/kSBobQeSAbT7HTkWAVz3kg8HmubYZ3P0eXToZA/DlX0dphWxO9ShR3H+XTJhh3tSqzxMZxxhGqPcN4DPSfOTnJQ0v0NPz016lulCr9SuIOSM3f7HpeGXo5SeQbrY3yCnBG8Qxpx2kYaZZlT4J6fx3iFl77SY/lQu6H/Y8ZtufWEogPSkGEh+NLLWuwwBQFC3vH8l3J26vcqHZR3N9+GyqX13CSqWEUysMF4nBOi52ckhwJRF8hAeX+DIqxoLfjUkDc=

+ 3 - 3
oss/bucket.go

@@ -494,7 +494,7 @@ func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) {
 		return out, err
 	}
 
-	resp, err := bucket.do("GET", "", params, nil, nil, nil)
+	resp, err := bucket.do("GET", "", params, options, nil, nil)
 	if err != nil {
 		return out, err
 	}
@@ -553,11 +553,11 @@ func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option)
 // http.Header    the object's metadata, valid when error is nil.
 // error    it's nil if no error, otherwise it's an error object.
 //
-func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) {
+func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) {
 	params := map[string]interface{}{}
 	params["objectMeta"] = nil
 	//resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil)
-	resp, err := bucket.do("GET", objectKey, params, nil, nil, nil)
+	resp, err := bucket.do("GET", objectKey, params, options, nil, nil)
 	if err != nil {
 		return nil, err
 	}

+ 7 - 4
oss/conn.go

@@ -348,15 +348,16 @@ func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response
 
 		if len(respBody) == 0 {
 			// No error in response body
-			err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status)
+			err = fmt.Errorf("oss: service returned empty response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
 		} else {
 			// Response contains storage service error object, unmarshal
 			srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode,
 				resp.Header.Get(HTTPHeaderOssRequestID))
-			if err != nil { // error unmarshaling the error response
-				err = errIn
+			if errIn != nil { // error unmarshaling the error response
+				err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID))
+			} else {
+				err = srvErr
 			}
-			err = srvErr
 		}
 
 		return &Response{
@@ -424,9 +425,11 @@ func readResponseBody(resp *http.Response) ([]byte, error) {
 
 func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) {
 	var storageErr ServiceError
+
 	if err := xml.Unmarshal(body, &storageErr); err != nil {
 		return storageErr, err
 	}
+
 	storageErr.StatusCode = statusCode
 	storageErr.RequestID = requestID
 	storageErr.RawMessage = string(body)

+ 9 - 0
oss/const.go

@@ -44,6 +44,14 @@ const (
 	StorageArchive StorageClassType = "Archive"
 )
 
+// PayerType the type of request payer
+type PayerType string
+
+const (
+	// Requester the requester who send the request
+	Requester PayerType = "requester"
+)
+
 // HTTPMethod HTTP request method
 type HTTPMethod string
 
@@ -109,6 +117,7 @@ const (
 	HTTPHeaderOssStorageClass                = "X-Oss-Storage-Class"
 	HTTPHeaderOssCallback                    = "X-Oss-Callback"
 	HTTPHeaderOssCallbackVar                 = "X-Oss-Callback-Var"
+	HTTPHeaderOSSRequester                   = "X-Oss-Request-Payer"
 )
 
 // HTTP Param

+ 69 - 51
oss/download.go

@@ -5,11 +5,14 @@ import (
 	"encoding/base64"
 	"encoding/json"
 	"errors"
+	"fmt"
 	"hash"
 	"hash/crc64"
 	"io"
 	"io/ioutil"
+	"net/http"
 	"os"
+	"path/filepath"
 	"strconv"
 )
 
@@ -27,25 +30,34 @@ func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, op
 		return errors.New("oss: part size smaller than 1")
 	}
 
-	cpConf, err := getCpConfig(options, filePath)
-	if err != nil {
-		return err
-	}
-
 	uRange, err := getRangeConfig(options)
 	if err != nil {
 		return err
 	}
 
+	cpConf := getCpConfig(options)
 	routines := getRoutines(options)
 
-	if cpConf.IsEnable {
-		return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines, uRange)
+	if cpConf != nil && cpConf.IsEnable {
+		cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, filePath)
+		if cpFilePath != "" {
+			return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange)
+		}
 	}
 
 	return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange)
 }
 
+func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destFile string) string {
+	if cpConf.FilePath == "" && cpConf.DirPath != "" {
+		src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
+		absPath, _ := filepath.Abs(destFile)
+		cpFileName := getCpFileName(src, absPath)
+		cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
+	}
+	return cpConf.FilePath
+}
+
 // getRangeConfig gets the download range from the options.
 func getRangeConfig(options []Option) (*unpackedRange, error) {
 	rangeOpt, err := findOption(options, HTTPHeaderRange, nil)
@@ -76,7 +88,7 @@ func defaultDownloadPartHook(part downloadPart) error {
 	return nil
 }
 
-// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject. 
+// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject.
 type defaultDownloadProgressListener struct {
 }
 
@@ -168,27 +180,8 @@ type downloadPart struct {
 }
 
 // getDownloadParts gets download parts
-func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *unpackedRange) ([]downloadPart, bool, uint64, error) {
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return nil, false, 0, err
-	}
-
+func getDownloadParts(objectSize, partSize int64, uRange *unpackedRange) []downloadPart {
 	parts := []downloadPart{}
-	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
-	if err != nil {
-		return nil, false, 0, err
-	}
-
-	enableCRC := false
-	crcVal := (uint64)(0)
-	if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
-		if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
-			enableCRC = true
-			crcVal, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
-		}
-	}
-
 	part := downloadPart{}
 	i := 0
 	start, end := adjustRange(uRange, objectSize)
@@ -201,7 +194,7 @@ func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *
 		parts = append(parts, part)
 		i++
 	}
-	return parts, enableCRC, crcVal, nil
+	return parts
 }
 
 // getObjectBytes gets object bytes length
@@ -232,6 +225,12 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 	tempFilePath := filePath + TempFileSuffix
 	listener := getProgressListener(options)
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// If the file does not exist, create one. If exists, the download will overwrite it.
 	fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode)
 	if err != nil {
@@ -239,12 +238,27 @@ func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, op
 	}
 	fd.Close()
 
-	// Get the parts of the file
-	parts, enableCRC, expectedCRC, err := getDownloadParts(&bucket, objectKey, partSize, uRange)
+	meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
+	if err != nil {
+		return err
+	}
+
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return err
 	}
 
+	enableCRC := false
+	expectedCRC := (uint64)(0)
+	if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
+		if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
+			enableCRC = true
+			expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
+		}
+	}
+
+	// Get the parts of the file
+	parts := getDownloadParts(objectSize, partSize, uRange)
 	jobs := make(chan downloadPart, len(parts))
 	results := make(chan downloadPart, len(parts))
 	failed := make(chan error)
@@ -325,7 +339,7 @@ type objectStat struct {
 }
 
 // isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated.
-func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *unpackedRange) (bool, error) {
+func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (bool, error) {
 	// Compare the CP's Magic and the MD5
 	cpb := cp
 	cpb.MD5 = ""
@@ -337,12 +351,6 @@ func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *u
 		return false, nil
 	}
 
-	// Ensure the object is not updated.
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return false, err
-	}
-
 	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return false, err
@@ -424,18 +432,12 @@ func (cp downloadCheckpoint) getCompletedBytes() int64 {
 }
 
 // prepare initiates download tasks
-func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
+func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error {
 	// CP
 	cp.Magic = downloadCpMagic
 	cp.FilePath = filePath
 	cp.Object = objectKey
 
-	// Object
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return err
-	}
-
 	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return err
@@ -445,11 +447,15 @@ func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string
 	cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified)
 	cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
 
-	// Parts
-	cp.Parts, cp.enableCRC, cp.CRC, err = getDownloadParts(bucket, objectKey, partSize, uRange)
-	if err != nil {
-		return err
+	if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" {
+		if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) {
+			cp.enableCRC = true
+			cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0)
+		}
 	}
+
+	// Parts
+	cp.Parts = getDownloadParts(objectSize, partSize, uRange)
 	cp.PartStat = make([]bool, len(cp.Parts))
 	for i := range cp.PartStat {
 		cp.PartStat[i] = false
@@ -468,6 +474,12 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 	tempFilePath := filePath + TempFileSuffix
 	listener := getProgressListener(options)
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// Load checkpoint data.
 	dcp := downloadCheckpoint{}
 	err := dcp.load(cpFilePath)
@@ -475,10 +487,16 @@ func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int
 		os.Remove(cpFilePath)
 	}
 
+	// Get the object detailed meta.
+	meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...)
+	if err != nil {
+		return err
+	}
+
 	// Load error or data invalid. Re-initialize the download.
-	valid, err := dcp.isValid(&bucket, objectKey, uRange)
+	valid, err := dcp.isValid(meta, uRange)
 	if err != nil || !valid {
-		if err = dcp.prepare(&bucket, objectKey, filePath, partSize, uRange); err != nil {
+		if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil {
 			return err
 		}
 		os.Remove(cpFilePath)

+ 39 - 26
oss/download_test.go

@@ -146,7 +146,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 
 	// Download a file with default checkpoint
 	downloadPartHooker = DownErrorHooker
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
@@ -165,9 +165,9 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(len(dcp.Parts), Equals, 5)
 	c.Assert(len(dcp.todoParts()), Equals, 1)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, IsNil)
-
+	//download success, checkpoint file has been deleted
 	err = dcp.load(newFile + ".cp")
 	c.Assert(err, NotNil)
 
@@ -175,17 +175,30 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 	c.Assert(eq, Equals, true)
 
-	// Resumable download with checkpoint
+	// Resumable download with empty checkpoint file path
+	downloadPartHooker = DownErrorHooker
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	downloadPartHooker = defaultDownloadPartHook
+
+	dcp = downloadCheckpoint{}
+	err = dcp.load(newFile + ".cp")
+	c.Assert(err, NotNil)
+
+	// Resumable download with checkpoint dir
 	os.Remove(newFile)
 	downloadPartHooker = DownErrorHooker
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, objectName+".cp"))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, CheckpointDir(true, "./"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
 
 	// Check
 	dcp = downloadCheckpoint{}
-	err = dcp.load(objectName + ".cp")
+	cpConf := cpConfig{IsEnable: true, DirPath: "./"}
+	cpFilePath := getDownloadCpFilePath(&cpConf, s.bucket.BucketName, objectName, newFile)
+	err = dcp.load(cpFilePath)
 	c.Assert(err, IsNil)
 	c.Assert(dcp.Magic, Equals, downloadCpMagic)
 	c.Assert(len(dcp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
@@ -197,10 +210,10 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 	c.Assert(len(dcp.Parts), Equals, 5)
 	c.Assert(len(dcp.todoParts()), Equals, 1)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, objectName+".cp"))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, CheckpointDir(true, "./"))
 	c.Assert(err, IsNil)
-
-	err = dcp.load(objectName + ".cp")
+	//download success, checkpoint file has been deleted
+	err = dcp.load(cpFilePath)
 	c.Assert(err, NotNil)
 
 	eq, err = compareFiles(fileName, newFile)
@@ -209,7 +222,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 
 	// Resumable download with checkpoint at a time. No error is expected in the download procedure.
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, IsNil)
 
 	err = dcp.load(newFile + ".cp")
@@ -221,7 +234,7 @@ func (s *OssDownloadSuite) TestDownloadRoutineWithRecovery(c *C) {
 
 	// Resumable download with checkpoint at a time. No error is expected in the download procedure.
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(10), Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(10), Checkpoint(true, newFile+".cp"))
 	c.Assert(err, IsNil)
 
 	err = dcp.load(newFile + ".cp")
@@ -263,7 +276,7 @@ func (s *OssDownloadSuite) TestDownloadOption(c *C) {
 	c.Assert(err, NotNil)
 
 	// IfMatch
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), IfMatch(meta.Get("Etag")))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), IfMatch(meta.Get("Etag")))
 	c.Assert(err, IsNil)
 
 	eq, err = compareFiles(fileName, newFile)
@@ -271,7 +284,7 @@ func (s *OssDownloadSuite) TestDownloadOption(c *C) {
 	c.Assert(eq, Equals, true)
 
 	// IfNoneMatch
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), IfNoneMatch(meta.Get("Etag")))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), IfNoneMatch(meta.Get("Etag")))
 	c.Assert(err, NotNil)
 }
 
@@ -287,7 +300,7 @@ func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
 
 	// Download with default checkpoint
 	downloadPartHooker = DownErrorHooker
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	downloadPartHooker = defaultDownloadPartHook
@@ -295,7 +308,7 @@ func (s *OssDownloadSuite) TestDownloadObjectChange(c *C) {
 	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
 	c.Assert(err, IsNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Checkpoint(true, newFile+".cp"))
 	c.Assert(err, IsNil)
 
 	eq, err := compareFiles(fileName, newFile)
@@ -335,23 +348,23 @@ func (s *OssDownloadSuite) TestDownloadNegative(c *C) {
 	c.Assert(err, IsNil)
 
 	// Local file does not exist
-	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024)
 	c.Assert(err, NotNil)
 
-	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, "/tmp/", 100*1024, Routines(2))
 	c.Assert(err, NotNil)
 
 	// Invalid part size
-	err = s.bucket.DownloadFile(objectName, newFile, -1, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, -1)
 	c.Assert(err, NotNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 0, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 0, Routines(2))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100)
 	c.Assert(err, NotNil)
 
-	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024*1024*100, Routines(2))
 	c.Assert(err, NotNil)
 }
 
@@ -457,7 +470,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 	newFileGet := "down-new-file-tdwcr-2.jpg"
 
 	// Upload a file
-	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3))
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, fileName+".cp"))
 	c.Assert(err, IsNil)
 
 	fileSize, err := getFileSize(fileName)
@@ -465,7 +478,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 
 	// Download with range, from 1024 to 4096
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, ""), Range(1024, 4095))
+	err = s.bucket.DownloadFile(objectName, newFile, 100*1024, Routines(3), Checkpoint(true, newFile+".cp"), Range(1024, 4095))
 	c.Assert(err, IsNil)
 
 	// Check
@@ -484,7 +497,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 
 	// Download with range, from 1024 to 4096
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, ""), NormalizedRange("1024-4095"))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, newFile+".cp"), NormalizedRange("1024-4095"))
 	c.Assert(err, IsNil)
 
 	// Check
@@ -503,7 +516,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 
 	// Download with range, from 2048 to the end
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024, Routines(3), Checkpoint(true, ""), NormalizedRange("2048-"))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024*1024, Routines(3), Checkpoint(true, newFile+".cp"), NormalizedRange("2048-"))
 	c.Assert(err, IsNil)
 
 	// Check
@@ -522,7 +535,7 @@ func (s *OssDownloadSuite) TestDownloadWithCheckoutAndRange(c *C) {
 
 	// Download with range, the last 4096 bytes
 	os.Remove(newFile)
-	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, ""), NormalizedRange("-4096"))
+	err = s.bucket.DownloadFile(objectName, newFile, 1024, Routines(3), Checkpoint(true, newFile+".cp"), NormalizedRange("-4096"))
 	c.Assert(err, IsNil)
 
 	// Check

+ 59 - 51
oss/multicopy.go

@@ -5,9 +5,10 @@ import (
 	"encoding/base64"
 	"encoding/json"
 	"errors"
+	"fmt"
 	"io/ioutil"
+	"net/http"
 	"os"
-	"path/filepath"
 	"strconv"
 )
 
@@ -27,22 +28,30 @@ func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string,
 		return errors.New("oss: part size invalid range (1024KB, 5GB]")
 	}
 
-	cpConf, err := getCpConfig(options, filepath.Base(destObjectKey))
-	if err != nil {
-		return err
-	}
-
+	cpConf := getCpConfig(options)
 	routines := getRoutines(options)
 
-	if cpConf.IsEnable {
-		return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
-			partSize, options, cpConf.FilePath, routines)
+	if cpConf != nil && cpConf.IsEnable {
+		cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey)
+		if cpFilePath != "" {
+			return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines)
+		}
 	}
 
 	return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey,
 		partSize, options, routines)
 }
 
+func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject string) string {
+	if cpConf.FilePath == "" && cpConf.DirPath != "" {
+		dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
+		src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject)
+		cpFileName := getCpFileName(src, dest)
+		cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
+	}
+	return cpConf.FilePath
+}
+
 // ----- Concurrently copy without checkpoint ---------
 
 // copyWorkerArg defines the copy worker arguments
@@ -103,18 +112,8 @@ type copyPart struct {
 }
 
 // getCopyParts calculates copy parts
-func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) {
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return nil, err
-	}
-
+func getCopyParts(objectSize, partSize int64) []copyPart {
 	parts := []copyPart{}
-	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
-	if err != nil {
-		return nil, err
-	}
-
 	part := copyPart{}
 	i := 0
 	for offset := int64(0); offset < objectSize; offset += partSize {
@@ -124,7 +123,7 @@ func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart,
 		parts = append(parts, part)
 		i++
 	}
-	return parts, nil
+	return parts
 }
 
 // getSrcObjectBytes gets the source file size
@@ -143,12 +142,24 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	srcBucket, err := bucket.Client.Bucket(srcBucketName)
 	listener := getProgressListener(options)
 
-	// Get copy parts
-	parts, err := getCopyParts(srcBucket, srcObjectKey, partSize)
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
+	meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
 	if err != nil {
 		return err
 	}
 
+	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
+	if err != nil {
+		return err
+	}
+
+	// Get copy parts
+	parts := getCopyParts(objectSize, partSize)
 	// Initialize the multipart upload
 	imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...)
 	if err != nil {
@@ -166,7 +177,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	publishProgress(listener, event)
 
 	// Start to copy workers
-	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
+	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
 	for w := 1; w <= routines; w++ {
 		go copyWorker(w, arg, jobs, results, failed, die)
 	}
@@ -187,7 +198,7 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 			publishProgress(listener, event)
 		case err := <-failed:
 			close(die)
-			descBucket.AbortMultipartUpload(imur)
+			descBucket.AbortMultipartUpload(imur, payerOptions...)
 			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
 			publishProgress(listener, event)
 			return err
@@ -202,9 +213,9 @@ func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destO
 	publishProgress(listener, event)
 
 	// Complete the multipart upload
-	_, err = descBucket.CompleteMultipartUpload(imur, ups)
+	_, err = descBucket.CompleteMultipartUpload(imur, ups, payerOptions...)
 	if err != nil {
-		bucket.AbortMultipartUpload(imur)
+		bucket.AbortMultipartUpload(imur, payerOptions...)
 		return err
 	}
 	return nil
@@ -229,7 +240,7 @@ type copyCheckpoint struct {
 }
 
 // isValid checks if the data is valid which means CP is valid and object is not updated.
-func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) {
+func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) {
 	// Compare CP's magic number and the MD5.
 	cpb := cp
 	cpb.MD5 = ""
@@ -241,12 +252,6 @@ func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error)
 		return false, nil
 	}
 
-	// Make sure the object is not updated.
-	meta, err := bucket.GetObjectDetailedMeta(objectKey)
-	if err != nil {
-		return false, err
-	}
-
 	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return false, err
@@ -326,7 +331,7 @@ func (cp copyCheckpoint) getCompletedBytes() int64 {
 }
 
 // prepare initializes the multipart upload
-func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
+func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string,
 	partSize int64, options []Option) error {
 	// CP
 	cp.Magic = copyCpMagic
@@ -335,12 +340,6 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
 	cp.DestBucketName = destBucket.BucketName
 	cp.DestObjectKey = destObjectKey
 
-	// Object
-	meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey)
-	if err != nil {
-		return err
-	}
-
 	objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0)
 	if err != nil {
 		return err
@@ -351,10 +350,7 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
 	cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag)
 
 	// Parts
-	cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize)
-	if err != nil {
-		return err
-	}
+	cp.Parts = getCopyParts(objectSize, partSize)
 	cp.PartStat = make([]bool, len(cp.Parts))
 	for i := range cp.PartStat {
 		cp.PartStat[i] = false
@@ -371,10 +367,10 @@ func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBu
 	return nil
 }
 
-func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error {
+func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
 	imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName,
 		Key: cp.DestObjectKey, UploadID: cp.CopyID}
-	_, err := bucket.CompleteMultipartUpload(imur, parts)
+	_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
 	if err != nil {
 		return err
 	}
@@ -389,6 +385,12 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 	srcBucket, err := bucket.Client.Bucket(srcBucketName)
 	listener := getProgressListener(options)
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// Load CP data
 	ccp := copyCheckpoint{}
 	err = ccp.load(cpFilePath)
@@ -396,10 +398,16 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 		os.Remove(cpFilePath)
 	}
 
+	// Make sure the object is not updated.
+	meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...)
+	if err != nil {
+		return err
+	}
+
 	// Load error or the CP data is invalid---reinitialize
-	valid, err := ccp.isValid(srcBucket, srcObjectKey)
+	valid, err := ccp.isValid(meta)
 	if err != nil || !valid {
-		if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
+		if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil {
 			return err
 		}
 		os.Remove(cpFilePath)
@@ -422,7 +430,7 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 	publishProgress(listener, event)
 
 	// Start the worker coroutines
-	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker}
+	arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker}
 	for w := 1; w <= routines; w++ {
 		go copyWorker(w, arg, jobs, results, failed, die)
 	}
@@ -456,5 +464,5 @@ func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName,
 	event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size)
 	publishProgress(listener, event)
 
-	return ccp.complete(descBucket, ccp.CopyParts, cpFilePath)
+	return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, payerOptions)
 }

+ 26 - 14
oss/multicopy_test.go

@@ -254,7 +254,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	// Copy object with checkpoint enabled, single runtine.
 	// Copy 4 parts---the CopyErrorHooker makes sure the copy of part 5 will fail.
 	copyPartHooker = CopyErrorHooker
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	copyPartHooker = defaultCopyPartHook
@@ -278,7 +278,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(ccp.PartStat[4], Equals, false)
 
 	// Second copy, finish the last part
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, IsNil)
 
 	err = s.bucket.GetObjectToFile(destObjectName, newFile)
@@ -295,16 +295,28 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	err = ccp.load(fileName + ".cp")
 	c.Assert(err, NotNil)
 
-	// Specify Routine and CP's path
+	//multicopy with empty checkpoint path
+	copyPartHooker = CopyErrorHooker
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Checkpoint(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	copyPartHooker = defaultCopyPartHook
+	ccp = copyCheckpoint{}
+	err = ccp.load(destObjectName + ".cp")
+	c.Assert(err, NotNil)
+
+	//multi copy with checkpoint dir
 	copyPartHooker = CopyErrorHooker
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), Checkpoint(true, srcObjectName+".cp"))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), CheckpointDir(true, "./"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	copyPartHooker = defaultCopyPartHook
 
 	// Check CP
 	ccp = copyCheckpoint{}
-	err = ccp.load(srcObjectName + ".cp")
+	cpConf := cpConfig{IsEnable: true, DirPath: "./"}
+	cpFilePath := getCopyCpFilePath(&cpConf, bucketName, srcObjectName, s.bucket.BucketName, destObjectName)
+	err = ccp.load(cpFilePath)
 	c.Assert(err, IsNil)
 	c.Assert(ccp.Magic, Equals, copyCpMagic)
 	c.Assert(len(ccp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
@@ -321,7 +333,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(ccp.PartStat[4], Equals, false)
 
 	// Second copy, finish the last part.
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), Checkpoint(true, srcObjectName+".cp"))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(2), CheckpointDir(true, "./"))
 	c.Assert(err, IsNil)
 
 	err = s.bucket.GetObjectToFile(destObjectName, newFile)
@@ -339,7 +351,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	c.Assert(err, NotNil)
 
 	// First copy without error.
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(3), Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(3), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, IsNil)
 
 	err = s.bucket.GetObjectToFile(destObjectName, newFile)
@@ -354,7 +366,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	os.Remove(newFile)
 
 	// Copy with multiple coroutines, no errors.
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(10), Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(10), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, IsNil)
 
 	err = s.bucket.GetObjectToFile(destObjectName, newFile)
@@ -369,7 +381,7 @@ func (s *OssCopySuite) TestCopyRoutineWithRecovery(c *C) {
 	os.Remove(newFile)
 
 	// Option
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, ""), Meta("myprop", "mypropval"))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, destObjectName+".cp"), Meta("myprop", "mypropval"))
 	c.Assert(err, IsNil)
 
 	meta, err := s.bucket.GetObjectDetailedMeta(destObjectName)
@@ -398,19 +410,19 @@ func (s *OssCopySuite) TestCopyRoutineWithRecoveryNegative(c *C) {
 	destObjectName := srcObjectName + "-copy"
 
 	// Source bucket does not exist
-	err := s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, ""))
+	err := s.bucket.CopyFile("NotExist", srcObjectName, destObjectName, 100*1024, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err, NotNil)
 
 	// Source object does not exist
-	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, "NotExist", destObjectName, 100*1024, Routines(2), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 
 	// Specify part size is invalid.
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024, Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*1024*1024*100, Routines(2), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, NotNil)
 }
 
@@ -434,7 +446,7 @@ func (s *OssCopySuite) TestCopyFileCrossBucket(c *C) {
 	os.Remove(newFile)
 
 	// Copy files
-	err = destBucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, ""))
+	err = destBucket.CopyFile(bucketName, srcObjectName, destObjectName, 1024*100, Routines(5), Checkpoint(true, destObjectName+".cp"))
 	c.Assert(err, IsNil)
 
 	err = destBucket.GetObjectToFile(destObjectName, newFile)

+ 7 - 7
oss/multipart.go

@@ -107,11 +107,11 @@ func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, file
 //
 func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) {
 	listener := getProgressListener(options)
-	opts := []Option{ContentLength(request.PartSize)}
+	options = append(options, ContentLength(request.PartSize))
 	params := map[string]interface{}{}
 	params["partNumber"] = strconv.Itoa(request.PartNumber)
 	params["uploadId"] = request.InitResult.UploadID
-	resp, err := bucket.do("PUT", request.InitResult.Key, params, opts,
+	resp, err := bucket.do("PUT", request.InitResult.Key, params, options,
 		&io.LimitedReader{R: request.Reader, N: request.PartSize}, listener)
 	if err != nil {
 		return &UploadPartResult{}, err
@@ -183,7 +183,7 @@ func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucke
 // error    it's nil if the operation succeeds, otherwise it's an error object.
 //
 func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
-	parts []UploadPart) (CompleteMultipartUploadResult, error) {
+	parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) {
 	var out CompleteMultipartUploadResult
 
 	sort.Sort(uploadParts(parts))
@@ -198,7 +198,7 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
 
 	params := map[string]interface{}{}
 	params["uploadId"] = imur.UploadID
-	resp, err := bucket.do("POST", imur.Key, params, nil, buffer, nil)
+	resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil)
 	if err != nil {
 		return out, err
 	}
@@ -214,10 +214,10 @@ func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult,
 //
 // error    it's nil if the operation succeeds, otherwise it's an error object.
 //
-func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error {
+func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error {
 	params := map[string]interface{}{}
 	params["uploadId"] = imur.UploadID
-	resp, err := bucket.do("DELETE", imur.Key, params, nil, nil, nil)
+	resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil)
 	if err != nil {
 		return err
 	}
@@ -275,7 +275,7 @@ func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploa
 	}
 	params["uploads"] = nil
 
-	resp, err := bucket.do("GET", "", params, nil, nil, nil)
+	resp, err := bucket.do("GET", "", params, options, nil, nil)
 	if err != nil {
 		return out, err
 	}

+ 12 - 1
oss/option.go

@@ -187,6 +187,11 @@ func CallbackVar(callbackVar string) Option {
 	return setHeader(HTTPHeaderOssCallbackVar, callbackVar)
 }
 
+// RequestPayer is an option to set payer who pay for the request
+func RequestPayer(payerType PayerType) Option {
+	return setHeader(HTTPHeaderOSSRequester, string(payerType))
+}
+
 // Delimiter is an option to set delimiler parameter
 func Delimiter(value string) Option {
 	return addParam("delimiter", value)
@@ -251,11 +256,17 @@ func StorageClass(value StorageClassType) Option {
 type cpConfig struct {
 	IsEnable bool
 	FilePath string
+	DirPath  string
 }
 
 // Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile.
 func Checkpoint(isEnable bool, filePath string) Option {
-	return addArg(checkpointConfig, &cpConfig{isEnable, filePath})
+	return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath})
+}
+
+// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile.
+func CheckpointDir(isEnable bool, dirPath string) Option {
+	return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath})
 }
 
 // Routines DownloadFile/UploadFile routine count

+ 66 - 24
oss/upload.go

@@ -3,10 +3,13 @@ package oss
 import (
 	"crypto/md5"
 	"encoding/base64"
+	"encoding/hex"
 	"encoding/json"
 	"errors"
+	"fmt"
 	"io/ioutil"
 	"os"
+	"path/filepath"
 	"time"
 )
 
@@ -24,36 +27,52 @@ func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, opti
 		return errors.New("oss: part size invalid range (1024KB, 5GB]")
 	}
 
-	cpConf, err := getCpConfig(options, filePath)
-	if err != nil {
-		return err
-	}
-
+	cpConf := getCpConfig(options)
 	routines := getRoutines(options)
 
-	if cpConf.IsEnable {
-		return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines)
+	if cpConf != nil && cpConf.IsEnable {
+		cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey)
+		if cpFilePath != "" {
+			return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines)
+		}
 	}
 
 	return bucket.uploadFile(objectKey, filePath, partSize, options, routines)
 }
 
+func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string {
+	if cpConf.FilePath == "" && cpConf.DirPath != "" {
+		dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject)
+		absPath, _ := filepath.Abs(srcFile)
+		cpFileName := getCpFileName(absPath, dest)
+		cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName
+	}
+	return cpConf.FilePath
+}
+
 // ----- concurrent upload without checkpoint  -----
 
 // getCpConfig gets checkpoint configuration
-func getCpConfig(options []Option, filePath string) (*cpConfig, error) {
-	cpc := &cpConfig{}
+func getCpConfig(options []Option) *cpConfig {
 	cpcOpt, err := findOption(options, checkpointConfig, nil)
 	if err != nil || cpcOpt == nil {
-		return cpc, err
+		return nil
 	}
 
-	cpc = cpcOpt.(*cpConfig)
-	if cpc.IsEnable && cpc.FilePath == "" {
-		cpc.FilePath = filePath + CheckpointFileSuffix
-	}
+	return cpcOpt.(*cpConfig)
+}
 
-	return cpc, nil
+// getCpFileName return the name of the checkpoint file
+func getCpFileName(src, dest string) string {
+	md5Ctx := md5.New()
+	md5Ctx.Write([]byte(src))
+	srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
+
+	md5Ctx.Reset()
+	md5Ctx.Write([]byte(dest))
+	destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil))
+
+	return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum)
 }
 
 // getRoutines gets the routine count. by default it's 1.
@@ -73,6 +92,16 @@ func getRoutines(options []Option) int {
 	return rs
 }
 
+// getPayer return the payer of the request
+func getPayer(options []Option) string {
+	payerOpt, err := findOption(options, HTTPHeaderOSSRequester, nil)
+	if err != nil || payerOpt == nil {
+		return ""
+	}
+
+	return payerOpt.(string)
+}
+
 // getProgressListener gets the progress callback
 func getProgressListener(options []Option) ProgressListener {
 	isSet, listener, _ := isOptionSet(options, progressListener)
@@ -96,6 +125,7 @@ type workerArg struct {
 	bucket   *Bucket
 	filePath string
 	imur     InitiateMultipartUploadResult
+	options  []Option
 	hook     uploadPartHook
 }
 
@@ -106,7 +136,7 @@ func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadP
 			failed <- err
 			break
 		}
-		part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number)
+		part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, arg.options...)
 		if err != nil {
 			failed <- err
 			break
@@ -145,6 +175,12 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 		return err
 	}
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// Initialize the multipart upload
 	imur, err := bucket.InitiateMultipartUpload(objectKey, options...)
 	if err != nil {
@@ -162,7 +198,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 	publishProgress(listener, event)
 
 	// Start the worker coroutine
-	arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
+	arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
 	for w := 1; w <= routines; w++ {
 		go worker(w, arg, jobs, results, failed, die)
 	}
@@ -185,7 +221,7 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 			close(die)
 			event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes)
 			publishProgress(listener, event)
-			bucket.AbortMultipartUpload(imur)
+			bucket.AbortMultipartUpload(imur, payerOptions...)
 			return err
 		}
 
@@ -198,9 +234,9 @@ func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, opti
 	publishProgress(listener, event)
 
 	// Complete the multpart upload
-	_, err = bucket.CompleteMultipartUpload(imur, parts)
+	_, err = bucket.CompleteMultipartUpload(imur, parts, payerOptions...)
 	if err != nil {
-		bucket.AbortMultipartUpload(imur)
+		bucket.AbortMultipartUpload(imur, payerOptions...)
 		return err
 	}
 	return nil
@@ -397,10 +433,10 @@ func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, b
 }
 
 // complete completes the multipart upload and deletes the local CP files
-func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error {
+func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error {
 	imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName,
 		Key: cp.ObjectKey, UploadID: cp.UploadID}
-	_, err := bucket.CompleteMultipartUpload(imur, parts)
+	_, err := bucket.CompleteMultipartUpload(imur, parts, options...)
 	if err != nil {
 		return err
 	}
@@ -412,6 +448,12 @@ func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePa
 func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error {
 	listener := getProgressListener(options)
 
+	payerOptions := []Option{}
+	payer := getPayer(options)
+	if payer != "" {
+		payerOptions = append(payerOptions, RequestPayer(PayerType(payer)))
+	}
+
 	// Load CP data
 	ucp := uploadCheckpoint{}
 	err := ucp.load(cpFilePath)
@@ -444,7 +486,7 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 	publishProgress(listener, event)
 
 	// Start the workers
-	arg := workerArg{&bucket, filePath, imur, uploadPartHooker}
+	arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker}
 	for w := 1; w <= routines; w++ {
 		go worker(w, arg, jobs, results, failed, die)
 	}
@@ -479,6 +521,6 @@ func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64
 	publishProgress(listener, event)
 
 	// Complete the multipart upload
-	err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath)
+	err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, payerOptions)
 	return err
 }

+ 30 - 18
oss/upload_test.go

@@ -237,7 +237,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	// Use default routines and default CP file path (fileName+.cp)
 	// First upload for 4 parts
 	uploadPartHooker = ErrorHooker
-	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, ""))
+	err := s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
@@ -259,7 +259,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(len(ucp.allParts()), Equals, 5)
 
 	// Second upload, finish the remaining part
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Checkpoint(true, fileName+".cp"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)
@@ -276,16 +276,28 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	err = ucp.load(fileName + ".cp")
 	c.Assert(err, NotNil)
 
-	// Specify routines and CP
+	// Resumable upload with empty checkpoint path
 	uploadPartHooker = ErrorHooker
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(2), Checkpoint(true, objectName+".cp"))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), CheckpointDir(true, ""))
+	c.Assert(err, NotNil)
+	c.Assert(err.Error(), Equals, "ErrorHooker")
+	uploadPartHooker = defaultUploadPart
+	ucp = uploadCheckpoint{}
+	err = ucp.load(fileName + ".cp")
+	c.Assert(err, NotNil)
+
+	// Resumable upload with checkpoint dir
+	uploadPartHooker = ErrorHooker
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), CheckpointDir(true, "./"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
 
 	// Check CP
 	ucp = uploadCheckpoint{}
-	err = ucp.load(objectName + ".cp")
+	cpConf := cpConfig{IsEnable: true, DirPath: "./"}
+	cpFilePath := getUploadCpFilePath(&cpConf, fileName, s.bucket.BucketName, objectName)
+	err = ucp.load(cpFilePath)
 	c.Assert(err, IsNil)
 	c.Assert(ucp.Magic, Equals, uploadCpMagic)
 	c.Assert(len(ucp.MD5), Equals, len("LC34jZU5xK4hlxi3Qn3XGQ=="))
@@ -299,7 +311,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(len(ucp.todoParts()), Equals, 1)
 	c.Assert(len(ucp.allParts()), Equals, 5)
 
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), CheckpointDir(true, "./"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)
@@ -313,11 +325,11 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	err = s.bucket.DeleteObject(objectName)
 	c.Assert(err, IsNil)
 
-	err = ucp.load(objectName + ".cp")
+	err = ucp.load(cpFilePath)
 	c.Assert(err, NotNil)
 
 	// Upload all 5 parts without error
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)
@@ -332,7 +344,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 
 	// Upload all 5 parts with 10 routines without error
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(10), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(10), Checkpoint(true, objectName+".cp"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)
@@ -347,7 +359,7 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecovery(c *C) {
 	c.Assert(err, IsNil)
 
 	// Option
-	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, ""), Meta("myprop", "mypropval"))
+	err = s.bucket.UploadFile(objectName, fileName, 100*1024, Routines(3), Checkpoint(true, objectName+".cp"), Meta("myprop", "mypropval"))
 
 	meta, err := s.bucket.GetObjectDetailedMeta(objectName)
 	c.Assert(err, IsNil)
@@ -371,23 +383,23 @@ func (s *OssUploadSuite) TestUploadRoutineWithRecoveryNegative(c *C) {
 	fileName := "../sample/BingWallpaper-2015-11-07.jpg"
 
 	// The local file does not exist
-	err := s.bucket.UploadFile(objectName, "NotExist", 100*1024, Checkpoint(true, ""))
+	err := s.bucket.UploadFile(objectName, "NotExist", 100*1024, Checkpoint(true, "NotExist.cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.UploadFile(objectName, "NotExist", 100*1024, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, "NotExist", 100*1024, Routines(2), Checkpoint(true, "NotExist.cp"))
 	c.Assert(err, NotNil)
 
 	// Specified part size is invalid
-	err = s.bucket.UploadFile(objectName, fileName, 1024, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 1024, Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.UploadFile(objectName, fileName, 1024, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 1024, Routines(2), Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 
-	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Routines(2), Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, fileName, 1024*1024*1024*100, Routines(2), Checkpoint(true, fileName+".cp"))
 	c.Assert(err, NotNil)
 }
 
@@ -404,7 +416,7 @@ func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
 
 	// First upload for 4 parts
 	uploadPartHooker = ErrorHooker
-	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, localFile+".cp"))
 	c.Assert(err, NotNil)
 	c.Assert(err.Error(), Equals, "ErrorHooker")
 	uploadPartHooker = defaultUploadPart
@@ -414,7 +426,7 @@ func (s *OssUploadSuite) TestUploadLocalFileChange(c *C) {
 	c.Assert(err, IsNil)
 
 	// Updating the file. The second upload will re-upload all 5 parts.
-	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, ""))
+	err = s.bucket.UploadFile(objectName, localFile, 100*1024, Checkpoint(true, localFile+".cp"))
 	c.Assert(err, IsNil)
 
 	os.Remove(newFile)