我正在尝试按照教程在Python中制作Reddit和Twitter机器人。我使用Python(2.7.10),因为我认为这是本教程中使用的版本,但是我有以下错误:
Traceback (most recent call last):
File "C:\Python27\twitterbot.py", line 82, in <module>
main()
File "C:\Python27\twitterbot.py", line 63, in main
post_dict, post_ids = tweet_creator(subreddit)
File "C:\Python27\twitterbot.py", line 30, in tweet_creator
short_link = shorten(post_link)
File "C:\Python27\twitterbot.py", line 46, in shorten
link = json.loads(r.text)['id']
KeyError: 'id'
下面可以看到完整的脚本(删除了令牌和密钥):
import praw
import json
import requests
import tweepy
import time
access_token = 'REMOVED'
access_token_secret = 'REMOVED'
consumer_key = 'REMOVED'
consumer_secret = 'REMOVED'
def strip_title(title):
if len(title) < 94:
return title
else:
return title[:93] + "..."
def tweet_creator(subreddit_info):
post_dict = {}
post_ids = []
print "[Computer] Getting posts from Reddit"
for submission in subreddit_info.get_hot(limit=20):
post_dict[strip_title(submission.title)] = submission.url
post_ids.append(submission.id)
print "[Computer] Generating short link using goo.gl"
mini_post_dict = {}
for post in post_dict:
post_title = post
post_link = post_dict[post]
short_link = shorten(post_link)
mini_post_dict[post_title] = short_link
return mini_post_dict, post_ids
def setup_connection_reddit(subreddit):
print "[Computer] setting up connection with Reddit"
r = praw.Reddit('yasoob_python reddit twitter Computer '
'monitoring %s' %(subreddit))
subreddit = r.get_subreddit(subreddit)
return subreddit
def shorten(url):
headers = {'content-type': 'application/json'}
payload = {"longUrl": url}
url = "https://www.googleapis.com/urlshortener/v1/url"
r = requests.post(url, data=json.dumps(payload), headers=headers)
link = json.loads(r.text)['id']
return link
def duplicate_check(id):
found = 0
with open('posted_posts.txt', 'r') as file:
for line in file:
if id in line:
found = 1
return found
def add_id_to_file(id):
with open('posted_posts.txt', 'a') as file:
file.write(str(id) + "\n")
def main():
subreddit = setup_connection_reddit('showerthoughts')
post_dict, post_ids = tweet_creator(subreddit)
tweeter(post_dict, post_ids)
def tweeter(post_dict, post_ids):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
for post, post_id in zip(post_dict, post_ids):
found = duplicate_check(post_id)
if found == 0:
print "[Computer] Posting this link on twitter"
print post+" "+post_dict[post]+" #Python #reddit #Computer"
api.update_status(post+" "+post_dict[post]+" #Python #reddit #Computer")
add_id_to_file(post_id)
time.sleep(30)
else:
print "[Computer] Already posted"
if __name__ == '__main__':
main()
答案 0 :(得分:0)
我遇到了一些类似的问题,但我不确定这是否是同一个问题。 从PRAW 3.0开始,Redditor类使用了lazyload功能,该功能用于PRAW 2.x中的Subreddit类。 你可以用 断言(redditor.has_fetched) 检查对象是否已加载。
具体到Redditor类,两者都是&#39; id&#39;和&#39; name&#39;是lazyload属性,对于其他一些属性也是如此,例如&#39; link_karma&#39;。我之前直接查询它们: VARS(redditor)[&#39; ID&#39;] 它适用于PRAW 2.x并报告了PRAW 3.0的错误 我的修复是电话: redditor.link_karma 加载所有功能。