fail to upload file from RadosGW by Python+S3

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



my radosgw disbaled 100-continue

[global]
fsid = 075f1aae-48de-412e-b024-b0f014dbc8cf
mon_initial_members = ceph01-vm, ceph02-vm, ceph04-vm
mon_host = 192.168.123.251,192.168.123.252,192.168.123.250
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
filestore_xattr_use_omap = true

*rgw print continue = false*
rgw dns name = ceph-radosgw
osd pool default pg num = 128
osd pool default pgp num = 128

#debug rgw = 20
[client.radosgw.gateway]
host = ceph-radosgw
keyring = /etc/ceph/ceph.client.radosgw.keyring
rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
log file = /var/log/ceph/client.radosgw.gateway.log



2014-08-21 22:42 GMT+07:00 debian Only <onlydebian at gmail.com>:

> when i use Dragondisk , i unselect Expect "100-continue " header , upload
> file sucessfully.  when select this option, upload file will hang.
>
> maybe the python script can not upload file due to the 100-continue ??  my
> radosgw Apache2 not use 100-continue.
>
> if my guess is ture,  how to disable this in python s3-connection and make
> python script working for upload file?
>
>
>
> 2014-08-21 20:57 GMT+07:00 debian Only <onlydebian at gmail.com>:
>
> i can upload file to RadosGW by s3cmd , and software Dragondisk.
>>
>> the script can list all bucket and all file in the bucket.  but can not
>> from python s3.
>> ###########
>> #coding=utf-8
>> __author__ = 'Administrator'
>>
>> #!/usr/bin/env python
>> import fnmatch
>> import os, sys
>> import boto
>> import boto.s3.connection
>>
>> access_key = 'VC8R6C193WDVKNTDCRKA'
>> secret_key = 'ASUWdUTx6PwVXEf/oJRRmDnvKEWp509o3rl1Xt+h'
>>
>> pidfile = "copytoceph.pid"
>>
>>
>> def check_pid(pid):
>>     try:
>>         os.kill(pid, 0)
>>     except OSError:
>>         return False
>>     else:
>>         return True
>>
>>
>> if os.path.isfile(pidfile):
>>     pid = long(open(pidfile, 'r').read())
>>     if check_pid(pid):
>>         print "%s already exists, doing natting" % pidfile
>>          sys.exit()
>>
>> pid = str(os.getpid())
>> file(pidfile, 'w').write(pid)
>>
>> conn = boto.connect_s3(
>>     aws_access_key_id=access_key,
>>     aws_secret_access_key=secret_key,
>>     host='ceph-radosgw.lab.com',
>>     port=80,
>>     is_secure=False,
>>     calling_format=boto.s3.connection.OrdinaryCallingFormat(),
>>  )
>>
>> print conn
>> mybucket = conn.get_bucket('foo')
>> print mybucket
>> mylist = mybucket.list()
>> print mylist
>> buckets = conn.get_all_buckets()
>> for bucket in buckets:
>>     print "{name}\t{created}".format(
>>         name=bucket.name,
>>         created=bucket.creation_date,
>>     )
>>
>>     for key in bucket.list():
>>         print "{name}\t{size}\t{modified}".format(
>>             name=(key.name).encode('utf8'),
>>              size=key.size,
>>             modified=key.last_modified,
>>             )
>>
>>
>> key = mybucket.new_key('hello.txt')
>> print key
>> key.set_contents_from_string('Hello World!')
>>
>> ###############
>>
>> root at ceph-radosgw:~# python rgwupload.py
>> S3Connection:ceph-radosgw.lab.com
>> <Bucket: foo>
>> <boto.s3.bucketlistresultset.BucketListResultSet object at 0x1d6ae10>
>> backup  2014-08-21T10:23:08.000Z
>> add volume for vms.png  23890   2014-08-21T10:53:43.000Z
>> foo     2014-08-20T16:11:19.000Z
>> file0001.txt    29      2014-08-21T04:22:25.000Z
>> galley/DSC_0005.JPG     2142126 2014-08-21T04:24:29.000Z
>> galley/DSC_0006.JPG     2005662 2014-08-21T04:24:29.000Z
>> galley/DSC_0009.JPG     1922686 2014-08-21T04:24:29.000Z
>> galley/DSC_0010.JPG     2067713 2014-08-21T04:24:29.000Z
>> galley/DSC_0011.JPG     2027689 2014-08-21T04:24:30.000Z
>> galley/DSC_0012.JPG     2853358 2014-08-21T04:24:30.000Z
>> galley/DSC_0013.JPG     2844746 2014-08-21T04:24:30.000Z
>> iso     2014-08-21T04:43:16.000Z
>> pdf     2014-08-21T09:36:15.000Z
>> <Key: foo,hello.txt>
>>
>> it hanged at here.
>>
>> Same error when i run this script on radosgw host.
>>
>> Traceback (most recent call last):
>>   File "D:/Workspace/S3-Ceph/test.py", line 65, in <module>
>>     key.set_contents_from_string('Hello World!')
>>   File "c:\Python27\lib\site-packages\boto\s3\key.py", line 1419, in
>> set_contents_from_string
>>     encrypt_key=encrypt_key)
>>   File "c:\Python27\lib\site-packages\boto\s3\key.py", line 1286, in
>> set_contents_from_file
>>     chunked_transfer=chunked_transfer, size=size)
>>   File "c:\Python27\lib\site-packages\boto\s3\key.py", line 746, in
>> send_file
>>     chunked_transfer=chunked_transfer, size=size)
>>   File "c:\Python27\lib\site-packages\boto\s3\key.py", line 944, in
>> _send_file_internal
>>     query_args=query_args
>>   File "c:\Python27\lib\site-packages\boto\s3\connection.py", line 664,
>> in make_request
>>     retry_handler=retry_handler
>>   File "c:\Python27\lib\site-packages\boto\connection.py", line 1053, in
>> make_request
>>     retry_handler=retry_handler)
>>   File "c:\Python27\lib\site-packages\boto\connection.py", line 1009, in
>> _mexe
>>     raise BotoServerError(response.status, response.reason, body)
>> boto.exception.BotoServerError: BotoServerError: 500 Internal Server Error
>> None
>>
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.ceph.com/pipermail/ceph-users-ceph.com/attachments/20140821/bd85873a/attachment.htm>


[Index of Archives]     [Information on CEPH]     [Linux Filesystem Development]     [Ceph Development]     [Ceph Large]     [Ceph Dev]     [Linux USB Development]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [xfs]


  Powered by Linux