Python疤痕,保存到csv文件。但是,并非所有变量都在csv中

时间:2017-07-31 10:41:09

标签: python csv selenium web-scraping beautifulsoup

将删除的数据写入csv文件时遇到问题。在加载页面并且脚本的第一部分工作时,写入csv会导致问题。

我的问题是:如何编写数据;名称,Home State和Backer状态为csv文件?以下代码仅将类别写入csv文件。

代码:

import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
import pandas as pd
import time 
from datetime import datetime
from collections import OrderedDict
import re

browser = webdriver.Firefox()
browser.get('https://www.kickstarter.com/discover?ref=nav')
categories = browser.find_elements_by_class_name('category-container')

category_links = []
for category_link in categories:
    #Each item in the list is a tuple of the category's name and its link.
    category_links.append((str(category_link.find_element_by_class_name('f3').text),
    category_link.find_element_by_class_name('bg-white').get_attribute('href')))

scraped_data = []
now = datetime.now()
counter = 1

for category in category_links:
    browser.get(category[1])
    browser.find_element_by_class_name('sentence-open').click()
    time.sleep(2)
    browser.find_element_by_id('category_filter').click()
    time.sleep(2)

for i in range(27):
    try:
        time.sleep(2)
        browser.find_element_by_id('category_'+str(i)).click()
        time.sleep(2)            
    except:
        pass


projects = []
for project_link in browser.find_elements_by_class_name('clamp-3'):
    projects.append(project_link.find_element_by_tag_name('a').get_attribute('href'))

for counter, project in enumerate(projects): 
    page1 = urllib.request.urlopen(projects[counter])
    soup1 = BeautifulSoup(page1, "lxml")
    page2 = urllib.request.urlopen(projects[counter].split('?')[0]+'/community')
    soup2 = BeautifulSoup(page2, "lxml")
    time.sleep(2)
    print(str(counter)+': '+project+'\nStatus: Started.')
    project_dict = OrderedDict()
    project_dict['Category'] = category[0]
    browser.get(project)
    project_dict['Name'] = soup1.find(class_='type-24 type-28-sm type-38-md navy-700 medium mb3').text

    project_dict['Home State'] = soup1.find(class_='nowrap navy-700 flex items-center medium type-12').text

    try:
        project_dict['Backer State'] = soup2.find(class_='location-list-wrapper js-location-list-wrapper').text
    except:
        pass

    print('Status: Done.')
    counter+=1
    scraped_data.append(project_dict)

    later = datetime.now()
    diff = later - now

    print('The scraping took '+str(round(diff.seconds/60.0,2))+' minutes,         and                         scraped '+str(len(scraped_data))+' projects.')

    df = pd.DataFrame(scraped_data)
    df.to_csv('kickstarter-data1.csv')

0 个答案:

没有答案