爬虫实战14:爬取江苏省环境监测项目

import requests
from bs4 import BeautifulSoup
import time
import re
import os

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    # 请求报头域,制定客户端可接受的类型信息
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "en-US,en;q=0.5",
    "Connection": "keep-alive",
    "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0"}


def login(username, passwd):
    data = {
        'username': username,
        'password': passwd,
        'btnSubmit': ""
    }
    session = requests.session()
    html = session.get(
        'http://58.213.159.173/Login.aspx',
        headers=headers).text
    soup = BeautifulSoup(html, 'lxml')
    data['__VIEWSTATEGENERATOR'] = soup.find(
        'input', id='__VIEWSTATEGENERATOR').get('value')
    data['__EVENTVALIDATION'] = soup.find(
        'input', id='__EVENTVALIDATION').get('value')
    data['__VIEWSTATE'] = soup.find('input', id='__VIEWSTATE').get('value')
    session.post(
        'http://58.213.159.173/Login.aspx',
        data=data,
        headers=headers)
    return session


def get_data(session, filedir, site, sites):
    html = session.get('http://58.213.159.173/Atmosphere/view/HistoryDataList.aspx',cookies={'amdb_Js_station_id': sites[site]},headers=headers).text
    soup = BeautifulSoup(html, 'lxml')
    inputs = soup.find_all('input')
    data = {}
    for item in inputs:
        try:
            data[item.get('id')] = item.get('value')
        except BaseException:
            continue
    data['btnCancel'] = ''
    data['hid_Ctrl'] = ''
    data['btn_Ctrl'] = ''
    data['hidPageSize'] = 15
    data['ScriptManager1'] = "UpdatePanel1|btnSubmit"
    data['AspNetPager1$DropDownList1'] = "20"
    data['AspNetPager1$AspNetPager1_input'] = "1"
    data['__ASYNCPOST'] = "true"
    keys = [
        '__LASTFOCUS',
        'btnSubmit',
        'AspNetPager1$AspNetPager1_input',
        'start_time',
        '__VIEWSTATEGENERATOR',
        '__ASYNCPOST',
        '__EVENTARGUMENT',
        'AspNetPager1$DropDownList1',
        '__EVENTTARGET',
        'end_time',
        'hidPageSize',
        '__EVENTVALIDATION',
        'ScriptManager1',
        'hid_Ctrl',
        '__VIEWSTATE']
    postdata = {}
    for key in data:
        if key in keys:
            postdata[key] = data[key]
        if 'cblChannelList' in key:
            postdata[key.replace('_', '$')] = data[key]
    html = session.post(
        'http://58.213.159.173/Atmosphere/view/HistoryDataList.aspx',
        data=postdata,
        cookies={
            'amdb_Js_station_id': sites[site]},
        headers=headers).text
    table = BeautifulSoup(
        html, 'lxml').find(
        'div', {
            'class': 'tbodyContainer'}).find(
                'div', id='div_print').find_all('tr')
    f = open(filedir + '/' + site + '.txt', 'a', encoding='utf-8')
    for tr in table[1:16]:
        line = ''
        for td in tr.find_all('td'):
            line += td.get_text() + '\t'
        f.write(line + '\n')
    f.close()


def crawl(session, filedir):
    try:
        os.mkdir(filedir)
    except BaseException:
        pass
    html = session.get(
        'http://58.213.159.173/Atmosphere/left.aspx',
        headers=headers).text
    table = BeautifulSoup(html, 'lxml').find('div', id='TreeView1n0Nodes').find_all('td', {'class': 'TreeView1_3'})
    sites = {}
    for item in table:
        try:
            name = item.find('a').get_text()
            amdb_Js_station_id = re.findall( r"doGet\('','(\d+)'\)", str(item))[0]
        except BaseException:
            continue
        sites[name] = amdb_Js_station_id
    for site in sites:
        count = 0
        while True:
            try:
                get_data(session, filedir, site, sites)
                break
            except BaseException:
                count += 1
                if count == 3:
                    break


def main():
    users = ['nj-nj',
             'sz-sz',
             'wx-wx',
             'cz-cz',
             'yz-yz',
             'zj-zj',
             'nt-nt',
             'xz-xz',
             'tz-tz',
             'yc-yc',
             'ha-ha',
             'lyg-lyg',
             'sq-sq']
    for item in users:
        user = item.split('-')
        try:
            session = login(user[0], user[1])
            crawl(session, user[0])
        except BaseException:
            timenow = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
            print(timenow, user[0], 'failed')


main()

爬取空气质量检测网的部分城市的历年每天质量数据 思路----------------------------------------- 从某城市的空气质量网页获取某市每月的链接,再爬取每个月的表格数据。连云港市:https://www.aqistudy.cn/historydata/daydata.php?city=连云港 连云港2014年5月的空气质量:https://www.aqistudy.cn/historydata/daydata.php?city=连云港&month=2014-05 遇到的问题----------------------------------------- 获取的网页中的表格数据隐藏,尝试requests无法获取。判断可能是动态加载的网页 尝试----------------------------------------- 1. 通过XHR,js查找隐藏数据的加载网页,没有找到。 2. 使用phantomjs.get() result=pd.read_html ,可以获得隐藏的表格数据,但是并不稳定,只是偶尔出现加载的表格数据,无法大规模的获取 解决方法----------------------------------------- 查找资料得知这个网站的表格数据在Console里的items中, 使用selenium的webdriver.firefox(),driver.execute_script("return items") 数据可获得。 仍遇到的问题:----------------------------------------- 爬取一个网页可获得数据,但是连续的获取网页,会出现两个错误。 1.Message: ReferenceError: items is not defined 2.connection refused 解决方法: 1.connection refused问题,可能是网页开太多,使用driver.quit() 2. 如果 execute_script 还是出错,可尝试pd.read_html获取信息。之前用phantomjs获取的时候输出空的表格,可能由于加载不够,用 Waite直到table出现之后再获取网页 Element=wait.until(EC.element_to_be_clickable((By.XPATH,"/html/body/div[3]/div[1]/div[1]/table/tbody"))) 3.之后出现偶尔出现输出为空,使用循环,如果输出表格为空,再重新获取。 if len(result)>1: filename = str(month) + '.xls' result.to_excel('E:\python\案例程序\data\\' + filename) print('成功存入'+filename) driver.quit() else: driver.quit() return getdata(monthhref,month)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值