我对python和编程很陌生,而我所知道的就是为日常办公工作编写简单的脚本。但是,我遇到了一个场景,我必须使用python访问特定的网页,这是特定生物信息学网络服务器的搜索输出。
在该网页中,有一个表格,其中第二列是一个超链接,打开一个带有蛋白质序列的FASTA文件的弹出框。
我希望能够编写一个系统地单击这些链接的脚本,一个接一个地复制每个链接的FASTA序列,然后将它们粘贴到文本文件中。
使用python可以实现这种自动化吗?如果是这样,我在哪里开始,在模块方面访问Internet Explorer /网页等?如果你可以指导我正确的方向或给我一个示例脚本,我可以尝试自己做!
非常感谢你!
我会发布我尝试的内容,但我完全不知道从哪里开始!
答案 0 :(得分:1)
这需要大约一分半钟才能运行,之后会打开包含序列的文本文件。当然,您需要在最后添加凭据等。
import os
import mechanize
import cookielib
from bs4 import BeautifulSoup
from urlparse import urljoin
class SequenceDownloader(object):
def __init__(self, base_url, analyzes_page, email, password, result_path):
self.base_url = base_url
self.login_page = urljoin(self.base_url, 'login')
self.analyzes_page = urljoin(self.base_url, analyzes_page)
self.email = email
self.password = password
self.result_path = result_path
self.browser = mechanize.Browser()
self.browser.set_handle_robots(False)
# set cookie
cj = cookielib.CookieJar()
self.browser.set_cookiejar(cj)
def login(self):
self.browser.open(self.login_page)
# select the first (and only) form and log in
self.browser.select_form(nr=0)
self.browser.form['email'] = self.email
self.browser.form['password'] = self.password
self.browser.submit()
def get_html(self, url):
self.browser.open(url)
return self.browser.response().read()
def scrape_overview_page(self, html):
sequences = []
soup = BeautifulSoup(html)
table = soup.find('table', {'class': 'styled data-table'})
table_body = table.find('tbody')
rows = table_body.find_all('tr', {'class': 'search_result'})
for row in rows:
cols = row.find_all('td')
sequence_url = cols[1].a.get('href')
sequence_html = self.get_html(sequence_url)
sequence_soup = BeautifulSoup(sequence_html)
sequence = sequence_soup.find('pre').text
sequences.append(sequence)
return sequences
def save(self, sequences):
with open(result_path, 'w') as f:
for sequence in sequences:
f.write(sequence + '\n')
def get_sequences(self):
self.login()
overview_html = self.get_html(self.analyzes_page)
sequences = self.scrape_overview_page(overview_html)
self.save(sequences)
if __name__ == '__main__':
base_url = r'https://usgene.sequencebase.com'
analyzes_page = 'user/reports/123/analyzes/9876'
email = 'user1998510@gmail.com'
password = 'YourPassword'
result_path = r'C:path\to\result.fasta'
sd = SequenceDownloader(base_url, analyzes_page, email, password, result_path)
sd.get_sequences()
os.startfile(result_path)