我正在编写一个目前使用Ipython笔记本的脚本。
import pandas as pd
import pyhs2
import os
import datetime
q1= "set hive.query.max.partition = 3000 ;
select 'Device_id' as key,
'All Time' as type,
count(distinct a.dev_id) as count
from (select distinct dev_id from DevID
where dev_type = '*****'
union all
select distinct
key_value_lookup(raw_url, '*****', '&', '=') as dev_id
from actions
where raw_url like '%*****%'
and raw_url like '%*****%'
and data_date >= '20150901' and data_date <= '20151231') a"
def read_hive(query):
conn = pyhs2.connect(host='*****',
port=*****,
authMechanism="*****",
user='*****',
password='*****',
database='*****')
cur = conn.cursor()
cur.execute(query)
#Return column info from query
if cur.getSchema() is None:
cur.close()
conn.close()
return Nonea
columnNames = [a['columnName'] for a in cur.getSchema()]
print columnNames
columnNamesStrings = [a['columnName'] for a in cur.getSchema() if a['type']=='STRING_TYPE']
output = pd.DataFrame(cur.fetch(),columns=columnNames)
cur.close()
conn.close()
return output
调用read_hive(q1)
时,收到以下错误:
FAILED因为hive.query.max.partition需要INT值
我认为这是因为我将查询存储在字符串中,但我不完全确定。该查询从Hue完美运行。
有没有人对改变最大分区数量的最佳方法有直觉?这可以在我的功能中完成吗?
答案 0 :(得分:0)
Hive配置设置应作为字典传递给pyhs2 Connection对象 - 而不是作为要执行的查询字符串的一部分。
在你的情况下:
conn = pyhs2.connect(host='*****',
port=*****,
authMechanism="*****",
user='*****',
password='*****',
database='*****',
configuration={'hive.query.max.partition': '3000'})