参考:
# coding: utf-8
import json
import urllib.request
from datetime import datetime
from collections import OrderedDict
import requests
from lxml import etree
cnt_now = datetime.now()
cnt_time = "{}{}{}{}{}".format(cnt_now.year, cnt_now.month, cnt_now.day, cnt_now.hour, cnt_now.minute)
url = "http://www.ricedata.cn/gene/accessions_switch.aspx?p={}&cloned=true"
user_agent = "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"
headers = {"User-Agent": user_agent}
result_path = "./doc/{}.xlsx".format(cnt_time)
xls_data = OrderedDict()
result_biao = []
# 存储目标数据结构
result_data = dict()
# 确定每列的标题
result_data['title'] = ["GeneID", "基因名称或注释", "基因符号", "RAP_Locus", "MSU_Locus", "cDNAs", "RefSeq_Locus", "Uniprots"]
for page in range(1, 36):
files = {
"__VIEWSTATE": "/wEPDwUKMTU1NzczODAwMA9kFgICAw9kFgQCAQ8WAh4LXyFJdGVtQ291bnQCDxYeZg9kFgJmDxUIIOS4ueaxnyAgICAgICAgICAgICAgICAgICAgICAgICAgIei/h+mjjualvCAgICAgICAgICAgICAgICAgICAgICAgIAsxOOaXpSAwN+aXtgU5My42NgQ0Ny40A+W5swAAZAIBD2QWAmYPFQgg6buE5rKzICAgICAgICAgICAgICAgICAgICAgICAgICAg6b6Z6ZeoICAgICAgICAgICAgICAgICAgICAgICAgICALMTjml6UgMDbml7YGMzgwLjA3BDI5MDAD5raoCDUwMDAuMDAwCDc2MDAuMDAwZAICD2QWAmYPFQgh5YyX5rSb5rKzICAgICAgICAgICAgICAgICAgICAgICAgIeWNl+iNo+WNjiAgICAgICAgICAgICAgICAgICAgICAgIAsxOOaXpSAwNuaXtgUzNDEuOAMxMjcD5raoBzUwMC4wMDAIMTUwMC4wMDBkAgMPZBYCZg8VCCDkuLnmsZ8gICAgICAgICAgICAgICAgICAgICAgICAgICDkuLnlh6QgICAgICAgICAgICAgICAgICAgICAgICAgIAsxOOaXpSAwNuaXtgY1NDQuNDkDNy4zA+iQvQc4MDAuMDAwCDE1MDAuMDAwZAIED2QWAmYPFQgg5Li55rGfICAgICAgICAgICAgICAgICAgICAgICAgICAg5Li55YekICAgICAgICAgICAgICAgICAgICAgICAgICALMTjml6UgMDbml7YGNTQ0LjQ1BDYuNzgD5raoBzgwMC4wMDAIMTUwMC4wMDBkAgUPZBYCZg8VCCDmsYnmsZ8gICAgICAgICAgICAgICAgICAgICAgICAgICDnmb3msrMgICAgICAgICAgICAgICAgICAgICAgICAgIAsxOOaXpSAwNuaXtgYxNzcuNDQEMjIwMAPokL0JMTMwMDAuMDAwCTIyNTAwLjAwMGQCBg9kFgJmDxUIIOaxieaxnyAgICAgICAgICAgICAgICAgICAgICAgICAgIOWuieW6tyAgICAgICAgICAgICAgICAgICAgICAgICAgCzE45pelIDA25pe2BjIzOC41OAQyMzAwA+a2qAkxMDAwMC4wMDAJMTYwMDAuMDAwZAIHD2QWAmYPFQgg5rGJ5rGfICAgICAgICAgICAgICAgICAgICAgICAgICAg5rSL5Y6/ICAgICAgICAgICAgICAgICAgICAgICAgICALMTjml6UgMDbml7YGNDU3LjQ2BDEzMTAD5raoCDcwMDAuMDAwCDgwMDAuMDAwZAIID2QWAmYPFQgg5rGJ5rGfICAgICAgICAgICAgICAgICAgICAgICAgICAg55+z5rOJICAgICAgICAgICAgICAgICAgICAgICAgICALMTjml6UgMDbml7YGMzYzLjAyBDMwNjAD6JC9CTEyMDAwLjAwMAkxNTAwMC4wMDBkAgkPZBYCZg8VCCHljJfmtJvmsrMgICAgICAgICAgICAgICAgICAgICAgICAg54q2IOWktCAgICAgICAgICAgICAgICAgICAgICAgICALMTjml6UgMDbml7YGMzYyLjEyAzE5MwPokL0IMjUwMC4wMDAIMzMwMC4wMDBkAgoPZBYCZg8VCCHljJfmtJvmsrMgICAgICAgICAgICAgICAgICAgICAgICAh5Lqk5Y+j5rKzICAgICAgICAgICAgICAgICAgICAgICAgCzE45pelIDA25pe2Bjc4NS42OAI1NwPmtqgIMjAwMC4wMDAIMzAwMC4wMDBkAgsPZBYCZg8VCCHljJfmtJvmsrMgICAgICAgICAgICAgICAgICAgICAgICAh5YiY5a625rKzICAgICAgICAgICAgICAgICAgICAgICAgCzE45pelIDA25pe2BjExMTYuOQQyMy40A+W5swgzMDAwLjAwMAg0MDAwLjAwMGQCDA9kFgJmDxUIIeWYiemZteaxnyAgICAgICAgICAgICAgICAgICAgICAgICDnlaXpmLMgICAgICAgICAgICAgICAgICAgICAgICAgIAsxOOaXpSAwNuaXtgY2MzguNTkENDEzMAPokL0IMzAwMC4wMDAINDAwMC4wMDBkAg0PZBYCZg8VCCDmsYnmsZ8gICAgICAgICAgICAgICAgICAgICAgICAgICHmrabkvq/plYcgICAgICAgICAgICAgICAgICAgICAgICALMTjml6UgMDbml7YGNTU3LjYzAzM2OAPokL0IMjUwMC4wMDAIMzAwMC4wMDBkAg4PZBYCZg8VCCDpu4TmsrMgICAgICAgICAgICAgICAgICAgICAgICAgICDlkLTloKEgICAgICAgICAgICAgICAgICAgICAgICAgIAsxOOaXpSAwNuaXtgY2MzguMTkEMjg0MAPmtqgINTAwMC4wMDAJMTgwMDAuMDAwZAIDDw8WBB4QQ3VycmVudFBhZ2VJbmRleAIEHgtSZWNvcmRjb3VudAKTBGRkZDQ1F1G8R0ahKi0b16nKosVMYaT1",
"__VIEWSTATEGENERATOR": "961FD43C",
"__EVENTTARGET": "pager",
"__EVENTARGUMENT": page,
"pager_input": 3}
url = "http://www.shxsw.com.cn/iframe/hdsqxx_list.aspx"
response = requests.post(url, data=files)
# response=requests.post(url,data=json.dumps(files),headers={'Content-Type':'application/x-www-form-urlencoded'})
ret_str = ""
if response.status_code != 200:
print('sadffffffffffffffffffffffffffffffffff')
break
try:
# ret_str = (str(response.content, encoding="utf8"))
# response = urllib.request.urlopen(url.format(page))
new_html = etree.HTML(response.content.decode("utf-8"))
tr_elements = new_html.xpath('//tr')
except Exception as e:
print("\n\n\n", url.format(page), "爬取失败, 失败原因: {}".format(e), "\n\n\n")
continue
for i in range(1, len(tr_elements)):
# 第1, 2 tr是无效数据,剔除, 最后一个tr也是无效数据, 剔除
""" 注意Chrome 浏览器通过 inspect -> copy -> copy Xpath得到的如下Xpath, 需要去掉'/tbody'部分
//*[@id="tb"]/tbody/tr[3]/td[1]
//*[@id="tb"]/tbody/tr[3]/td[2]
//*[@id="tb"]/tbody/tr[3]/td[3]
//*[@id="tb"]/tbody/tr[3]/td[4]
//*[@id="tb"]/tbody/tr[3]/td[5]
//*[@id="tb"]/tbody/tr[3]/td[6]
//*[@id="tb"]/tbody/tr[3]/td[7]
//*[@id="tb"]/tbody/tr[3]/td[8]
"""
try:
tr_data = list()
td_1 = new_html.xpath('//*[@id="tb"]/tr[{}]/td[1]'.format(i))[0].text
td_2 = new_html.xpath('//*[@id="tb"]/tr[{}]/td[2]'.format(i))[0].text
td_3 = new_html.xpath('//*[@id="tb"]/tr[{}]/td[3]'.format(i))
td_3_latest = td_3[0].text if td_3 else ''
td_4 = new_html.xpath('//*[@id="tb"]/tr[{}]/td[4]'.format(i))
td_4_latest = td_4[0].text if td_4 else ''
td_5 = new_html.xpath('//*[@id="tb"]/tr[{}]/td[5]'.format(i))
td_5_latest = td_5[0].text if td_5 else ''
td_6 = new_html.xpath('//*[@id="tb"]/tr[{}]/td[6]'.format(i))
td_6_latest = td_6[0].text if td_6 else ''
td_7 = new_html.xpath('//*[@id="tb"]/tr[{}]/td[7]'.format(i))
td_7_0 = td_7[0].text if td_7 else ''
td_8 = new_html.xpath('//*[@id="tb"]/tr[{}]/td[8]'.format(i))
td_8_latest = td_8[0].text if td_8 else ''
tr_data_list = [td_1, td_2, td_3_latest, td_4_latest, td_5_latest, td_6_latest if td_6_latest else '', td_8_latest if td_8_latest else '']
print(tr_data_list)
tr_data.extend(tr_data_list)
except Exception as e:
print("\n\n\n", url.format(page), "tr: {}".format(i), "爬取失败, 失败原因: {}".format(e), "\n\n\n")
continue
# page_data.append(tr_data)
print("crawling ", url.format(page), " success.")
# 存储爬取结果进json文件
with open("./json_data/{}.json".format(cnt_time), "w+") as fp:
fp.write(json.dumps(result_data))
print("\n\n\nwrite json success.")
# 读取json文件
with open("./json_data/{}.json".format(cnt_time), "r+") as fp:
json_data = json.loads(fp.read())
print("\n\n\nread json success.")
# result_biao.append(json_data['title'])
# for page in range(1, 96):
# page_data = json_data["page_{}".format(page)]
# for tr in page_data:
# result_biao.append(tr)
# print("page_{}: ".format(page), tr)
# print("\n")
xls_data.update({u"sheet1": result_biao})
# save_data(result_path, xls_data)
print("\n\n\nsave {} success.".format(result_path))