我创建了一个小Django应用程序并将其部署在Ubuntu上, views.py
from django.shortcuts import render
import json
import psycopg2
import csv
from django.http import HttpResponse
from ratelimit.decorators import ratelimit
def create_connection():
return psycopg2.connect(database="data",user="ag",host="localhost",port=5432,password='pass')
def execute_query(query, parameters):
with create_connection() as connection:
result = []
cursor = connection.cursor()
cursor.execute(query, parameters)
for row in cursor:
result.append(row)
for i in range(len(result)):
result[i] = list(result[i])
for j in range(len(result[i])):
result[i][j] = result[i][j] if result[i][j] else ""
return result
@ratelimit(key='ip', rate='10/m')
def to_csv(request):
j_body = json.loads(request.GET.get('data', ''))
query = j_body['query']
print(query)
print("****************************************************************")
parameters = j_body['params']
headers = j_body['headers']
rows = execute_query(query, parameters)
response = HttpResponse(content_type='text_csv')
response['Content-Disposition'] = 'attachment; filename="result.csv"'
writer = csv.writer(response)
writer.writerow(headers)
writer.writerows(rows)
return response
这个应用程序的主要目标 - 获取sql查询并将csv响应作为文件返回,其大小可以超过200 MB。我有一个问题 - 我的响应总是被截断为696 kb(无论cursor.execute结果的行数是多少)。有例子。您可以看到最后一个截断的行。
服务器由' python3 manage.py runserver 0.0.0.0:8000'运行;命令。 请帮我解决问题。
已更新 我用Nginx和gunicorn重新部署了它,并在这里尝试了解决方案 ngnix + gunicorn throws truncated response body 尽管如此,我仍然得到同样的错误
我按照这篇文章修改了我的代码 - https://docs.djangoproject.com/en/1.10/howto/outputting-csv/#streaming-large-csv-files
#Also changed execute_query method, so it doesn't create redurant copy of response from database
def execute_query(query, parameters):
with create_connection() as connection:
result = []
cursor = connection.cursor()
cursor.execute(query, parameters)
return cursor
class Echo(object):
"""An object that implements just the write method of the file-like
interface.
"""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
@ratelimit(key='ip', rate='10/m')
def to_csv(request):
try:
j_body = json.loads(request.GET.get('data', ''))
query = j_body['query']
print("****************************************************************")
parameters = j_body['params']
headers = j_body['headers']
cursor = execute_query(query, parameters)
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
writer.writerow(headers)
print('Writing began')
response = StreamingHttpResponse((writer.writerow(row) for row in cursor),
content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename="result.csv"'
print('Sending began')
return response
except:
return HttpResponse('Error')
现在一切正常。
答案 0 :(得分:0)
听起来你可能需要StreamingHttpResponse
。你试过这个吗?您似乎遇到了缓冲/分块响应的某种问题。像gunicorn或nginx这样的东西可以用来增加这个缓冲区大小,但它不是一个真正的解决方案。