我在使用Fineuploader和Django / boto从S3删除文件时遇到问题。我可以使用Fineuploader成功上传文件到S3,并检索并显示图片网址,但删除并没有成功。
通过查看boto调试日志,看起来boto没有将令牌作为请求的一部分发送给S3,我认为这可能是我的问题。
首先我有boto调试输出,因为我怀疑更熟悉它的人可以在看完之后提供帮助,但我之后完成了设置(尽可能紧跟https://github.com/Widen/fine-uploader-server/blob/master/python/django-fine-uploader-s3/的示例)< / p> 删除
终端输出
bucket_name: XXXXXXXX
key_name: b45069b8-dc44-45fe-8b67-b25fc088bdea.jpg
aws_bucket: <Bucket: XXXXXXXXX>
aws_key: <Key: XXXXXXXX,b45069b8-dc44-45fe-8b67-b25fc088bdea.jpg>
2014-04-17 15:01:56,576 boto [DEBUG]:path=/b45069b8-dc44-45fe-8b67-b25fc088bdea.jpg
2014-04-17 15:01:56,577 boto [DEBUG]:auth_path=/thisorthis/b45069b8-dc44-45fe-8b67-b25fc088bdea.jpg
2014-04-17 15:01:56,577 boto [DEBUG]:Method: DELETE
2014-04-17 15:01:56,577 boto [DEBUG]:Path: /b45069b8-dc44-45fe-8b67-b25fc088bdea.jpg
2014-04-17 15:01:56,577 boto [DEBUG]:Data:
2014-04-17 15:01:56,577 boto [DEBUG]:Headers: {}
2014-04-17 15:01:56,577 boto [DEBUG]:Host: XXXXXXX.s3.amazonaws.com
2014-04-17 15:01:56,578 boto [DEBUG]:Port: 443
2014-04-17 15:01:56,578 boto [DEBUG]:Params: {}
2014-04-17 15:01:56,578 boto [DEBUG]:establishing HTTPS connection: host=thisorthis.s3.amazonaws.com, kwargs={'port': 443, 'timeout': 70}
2014-04-17 15:01:56,578 boto [DEBUG]:Token: None
2014-04-17 15:01:56,578 boto [DEBUG]:StringToSign:
DELETE
Thu, 17 Apr 2014 15:01:56 GMT
/XXXXXXXX/b45069b8-dc44-45fe-8b67-b25fc088bdea.jpg
2014-04-17 15:01:56,579 boto [DEBUG]:Signature:
AWS AKIAJYS27FQSNHPH3CXQ:dVKlBpulsY9LrOtHOa+xQmurIEM=
[17/Apr/2014 15:01:57] "DELETE /s3/delete/b45069b8-dc44-45fe-8b67-b25fc088bdea?key=b45069b8-dc44-45fe-8b67-b25fc088bdea.jpg&bucket=XXXXXXXX HTTP/1.1" 500 15975
settings.py:
AWS_CLIENT_SECRET_KEY = os.getenv("AWS_CLIENT_SECRET_KEY")
AWS_SERVER_PUBLIC_KEY = os.getenv("AWS_SERVER_PUBLIC_KEY")
AWS_SERVER_SECRET_KEY = os.getenv("AWS_SERVER_SECRET_KEY")
AWS_EXPECTED_BUCKET = 'mybucketname'
AWS_MAX_SIZE = 15000000
显然我在那里有我的实际存储桶名称,因为我说上传工作正在进行中,所以我不认为问题出在设置中。
Fineuploader实例
$("#fine-uploader).fineUploaderS3({
debug: true,
request: {
endpoint: 'XXXXX',
accessKey: 'XXXXXXXX'
},
template: "simple-previews-template",
signature: {
endpoint: '/s3/signature/'
},
uploadSuccess: {
endpoint: '/s3/success/'
},
iframeSupport: {
localBlankPagePath: '/success.html'
},
deleteFile: {
enabled: true,
endpoint: '/s3/delete/'
},
classes: {
dropActive: "cssClassToAddToDropZoneOnEnter"
},
})
urls.py
url(r'^s3/signature/', views.handle_s3, name="s3_signee"),
url(r'^s3/delete/', views.handle_s3, name='s3_delete'),
url(r'^s3/success/', views.success_redirect_endpoint, name="s3_succes_endpoint")
views.py
try:
import boto
from boto.s3.connection import Key, S3Connection
boto.set_stream_logger('boto')
S3 = S3Connection(development.AWS_SERVER_PUBLIC_KEY, development.AWS_SERVER_SECRET_KEY)
except ImportError, e:
print("Could not import boto, the Amazon SDK for Python.")
print("Deleting files will not work.")
print("Install boto with")
print("$ pip install boto")
@csrf_exempt
def success_redirect_endpoint(request):
""" This is where the upload will snd a POST request after the
file has been stored in S3.
"""
key = request.POST.get('key')
response = {}
response['url'] = key
return HttpResponse(json.dumps(response), content_type="application/json")
@csrf_exempt
def handle_s3(request):
""" View which handles all POST and DELETE requests sent by Fine Uploader
S3. You will need to adjust these paths/conditions based on your setup.
"""
if request.method == "POST":
return handle_POST(request)
elif request.method == "DELETE":
return handle_DELETE(request)
else:
return HttpResponse(status=405)
def handle_POST(request):
""" Handle S3 uploader POST requests here. For files <=5MiB this is a simple
request to sign the policy document. For files >5MiB this is a request
to sign the headers to start a multipart encoded request.
"""
if request.POST.get('success', None):
return make_response(200)
else:
request_payload = json.loads(request.body)
headers = request_payload.get('headers', None)
if headers:
print "headers"
# The presence of the 'headers' property in the request payload
# means this is a request to sign a REST/multipart request
# and NOT a policy document
response_data = sign_headers(headers)
else:
print "no headers"
if not is_valid_policy(request_payload):
print "is not valid"
return make_response(400, {'invalid': True})
response_data = sign_policy_document(request_payload)
response_payload = json.dumps(response_data)
return make_response(200, response_payload)
def handle_DELETE(request):
""" Handle file deletion requests. For this, we use the Amazon Python SDK,
boto.
"""
print "handle delete"
if boto:
bucket_name = request.REQUEST.get('bucket')
print "bucket_name: ", bucket_name
key_name = request.REQUEST.get('key')
print "key_name:", key_name
aws_bucket = S3.get_bucket(bucket_name, validate=False)
print "aws_bucket: ", aws_bucket
aws_key = Key(aws_bucket, key_name)
print "aws_key: ", aws_key
aws_key.delete()
print "after aws_key.delete()"
return make_response(200)
else:
return make_response(500)
def make_response(status=200, content=None):
""" Construct an HTTP response. Fine Uploader expects 'application/json'.
"""
response = HttpResponse()
response.status_code = status
response['Content-Type'] = "application/json"
response.content = content
return response
def is_valid_policy(policy_document):
""" Verify the policy document has not been tampered with client-side
before sending it off.
"""
bucket = development.AWS_EXPECTED_BUCKET
parsed_max_size = development.AWS_MAX_SIZE
print "check validity"
# bucket = ''
# parsed_max_size = 0
for condition in policy_document['conditions']:
if isinstance(condition, list) and condition[0] == 'content-length-range':
parsed_max_size = condition[2]
else:
if condition.get('bucket', None):
bucket = condition['bucket']
return bucket == development.AWS_EXPECTED_BUCKET and parsed_max_size == development.AWS_MAX_SIZE
def sign_policy_document(policy_document):
""" Sign and return the policy doucument for a simple upload.
http://aws.amazon.com/articles/1434/#signyours3postform
"""
policy = base64.b64encode(json.dumps(policy_document))
signature = base64.b64encode(hmac.new(development.AWS_CLIENT_SECRET_KEY, policy, hashlib.sha1).digest())
return {
'policy': policy,
'signature': signature
}
def sign_headers(headers):
""" Sign and return the headers for a chunked upload. """
print "sign headers"
return {
'signature': base64.b64encode(hmac.new(development.AWS_CLIENT_SECRET_KEY, headers, hashlib.sha1).digest())
}
答案 0 :(得分:2)
事实证明我没有正确配置我的S3存储桶策略以允许来自我的服务器的DELETE请求,因此存储桶日志显示错误204.我已经允许PUT和GET请求,因此上传和检索工作,但不是删除。我将我的存储桶政策更改为:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["s3:ListAllMyBuckets"],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:GetBucketLocation"
],
"Resource": "arn:aws:s3:::xxxxx"
},
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject"
],
"Resource": "arn:aws:s3:::xxxxx/*"
}
]
}