我需要获取原始(压缩)内容。我的目标是将其原样保存到S3。使用requests
很容易:
import requests
response = requests.get('http://google.com', stream=True)
content = response.raw.read() # b'\x1f\x8b\x08\x00\x00\x00\x00...'
但是使用aiohttp
时,我总是会获得未压缩的内容:
import asyncio
import aiohttp
async def download(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.content.read() # b'<!doctype html><html...'
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(download('http://google.com'))
答案 0 :(得分:2)
class aiohttp.ClientSession(
*,
connector=None,
loop=None,
cookies=None,
headers=None,
skip_auto_headers=None,
auth=None,
json_serialize=json.dumps,
version=aiohttp.HttpVersion11,
cookie_jar=None,
read_timeout=None,
conn_timeout=None,
timeout=sentinel,
raise_for_status=False,
connector_owner=True,
auto_decompress=True,
proxies=None
)
尝试设置auto_decompress = false docs