利用python实现爬虫

科研项目的小板块:爬取政府类公开网站信息

目标网址:贵阳市人民政府 科技项目管理 (guiyang.gov.cn)

后说:代码比较冗余,反爬虫用的是请求头,下次用ip代理池试试

# -*- coding: utf-8 -*-
"""
Created on Sat Apr 16 11:33:42 2022

@author: wangxingmei

"""
import requests	#requests是一个很实用的Python HTTP客户端库,编写爬虫和测试服务器响应数据时经常会用到。
import re		#正则表达式(要从网页代码里面提取特定的标签里面的内容)
import random
from lxml import etree	
import  bs4
import pymongo # PyMongo 驱动
import time
from bs4 import BeautifulSoup
import pymysql
import math
import random

def create():
    db = pymysql.connect(host="localhost",user="root",password="123456",db="guiyangshi")#连接数据库 
 
    cursor = db.cursor()
    #cursor.execute("DROP TABLE IF EXISTS GY_fagui")
 
    sql = """CREATE TABLE GY_xiangmu (
            ID INT PRIMARY KEY AUTO_INCREMENT,
            categ VARCHAR(50) DEFAULT '项目',
            title CHAR(100),
            date CHAR(20),
            source CHAR(50),
            article TEXT,
            appendix TEXT
            )"""
 
    cursor.execute(sql)
 
    db.close()
 
 
create()  #创建表
def insert():
    db = pymysql.connect(host="localhost",user="root",password="123456",db="guiyangshi")#连接数据库 
 
    cursor = db.cursor()
    sql = "INSERT INTO GY_xiangmu(title,date,source,article,appendix) VALUES (%s,%s,%s,%s,%s)"
    data = (title,date,source,article,appendix)
    try:
        cursor.execute(sql,data)
        db.commit()
        print('插入数据成功')
    except Exception as e:
        db.rollback()
        print("插入数据失败")
        print(e)
    db.close()


user_agent = [
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
    "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
    "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
    "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
    "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
    "UCWEB7.0.2.37/28/999",
    "NOKIA5700/ UCWEB7.0.2.37/28/999",
    "Openwave/ UCWEB7.0.2.37/28/999",
    "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
	"Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",

]

headers = {'User-Agent': random.choice(user_agent)}

def get_user_agent():
    return random.choice(user_agent)


#获取1-n页的链接
for i in range(1,2):
    #贵阳市人民政府解读回应
    if i==1:
        url="http://www.guiyang.gov.cn/zwgk/zdlyxxgkx/kjcx_5617899/kjxmgl/index.html#%E7%A7%91%E6%8A%80%E5%88%9B%E6%96%B0_5617899"
    else:
        url="http://www.guiyang.gov.cn/zwgk/zdlyxxgkx/kjcx_5617899/kjxmgl/index_"+str(i-1)+".html"+"#%E7%A7%91%E6%8A%80%E5%88%9B%E6%96%B0_5617899"
    print(url)
    
    
    r = requests.get(url,headers=headers)
    html = bs4.BeautifulSoup(r.content, "html.parser")
    r.encoding = r.apparent_encoding
    
    res = requests.get(url,headers=headers)	  #GET请求
    selector = etree.HTML(res.text)		  # 利用 etree.HTML 把字符串解析成 HTML 文件

    href = selector.xpath('//ul[@class="zwgk-ul clearFix"]/li/a/@href')
    #new_list = ['https://www.guiyang.gov.cn/jdhy/jdhyhygq1/'+ x for x in href]
    
    #获取每一篇文章内容的链接    
    for link in href[0:17]:
        content_url = f'{link}'
        #content_url=content_url.replace("./", "")
        print(content_url) #政策内容的url
        
        res = requests.get(content_url)	  #GET请求
        res.encoding = res.apparent_encoding  # 更改可能的编码方式也可以直接用"GB2312"
        selector = etree.HTML(res.text)		  # 利用 etree.HTML 把字符串解析成 HTML 文件
        
        #文章标题
        title = selector.xpath('//div[@class="main_321"]/h4/text()')[0]	#寻找名为xxym的块级元素下的h4文本
        print(title)   
        
        
        # 发布日期,若要精确使时间,(.*)后面加 空格
        date = re.findall('发布日期:(.*) ', selector.xpath('//p[@class="main_321p1"]/span/text()')[0])[0]
        print(date)
        
        
        #取来源:s=selector.xpath('//div[@class="main_321"]/p/script/text()')[0],查看网页源代码<script
        source =re.findall('来源:(.*)<', selector.xpath('//div[@class="main_321"]/p/script/text()')[0])[0].strip()
        print(source)
        
         
        article = '\n'.join(selector.xpath('//*[@class="main_3211 llx3 "]//following::p[1]/text()')[:-2])
        print(article)
        
        
        download_link = selector.xpath('//div[@class="main_3211 llx3 "]/div/p/a/@href')
        print(download_link)
        
        links = []	#空列表
        for i in download_link:
            links.append(re.sub('^.', 'https://www.guiyang.gov.cn/zwgk/zdlyxxgkx/kjcx_5617899/kjxmgl', str(i)))
        print(links)
        
        #https://www.guiyang.gov.cn/zwgk/zdlyxxgkx/kjcx_5617899/kjxmgl/202203/P020220325601229019197.doc
        #https://www.guiyang.gov.cn/zwgk/zdlyxxgkx/kjcx_5617899/kjxmgl/202111/P020211104804867974068.xls
        
        var = []
        for j in download_link:
            if len(download_link) == 0: 
                appendix1 = '无'		#如果没有附件
            else:
                appendix1 = re.sub('^.', 'https://www.guiyang.gov.cn/zwgk/zdlyxxgkx/kjcx_5617899/kjxmgl',str(j))
                #appendix1 = re.search('^http(.*)/', res.url)[0] + download_link[0].replace('./', '')     #得到完整的附件链接                 
                var.append(appendix1)
        print(var[:])
        
        
        
        
        var = []
        for j in download_link:
            if len(download_link) == 0: 
                appendix1 = '无'		#如果没有附件
            else:
                appendix1 = str(re.search('^http(.*)/', res.url)[0]) + download_link[0].replace('./', '')     #得到完整的附件链接 
                var.append(appendix1)
        print(var[:])
        
                
        appendix = '\n'.join(var)
        #print(appendix)
 
        #data = (title,date,source,article,appendix)
        #insert()


def get_link(url):  # 获取每一页公示的所有链接
    res = requests.get(url, headers)	#GET请求
    selector = etree.HTML(res.text)		# 利用 etree.HTML 把字符串解析成 HTML文件
    href = selector.xpath('//div[@class="main_3211 llx3 "]/div/p/a/@href')
	#获取值为list-group的ul元素下li标签下的a所有href属性对应的值,href 属性用于指定超链接目标的 URL
	#这里即得到每篇公示的url,比如从源码可以看到某个为 href="./201911/t20191114_489608.html
	#XPath 是一门在 XML/HTML 文档中查找信息的语言。XPath 可用来在 XML /HTML文档中对元素和属性进行遍历。
    links = []	#空列表
    for i in href:
        links.append(re.sub('^.', 'https://www.guiyang.gov.cn/zwgk/zdlyxxgkx/kjcx_5617899/kjxmgl', str(i)))
        '''
	在上面得到的url前添加http://pnr.sz.gov.cn/xxgk/gggs/
	这样我们就得到了完整的链接如:http://pnr.sz.gov.cn/xxgk/gggs/201911/t20191114_489608.html
	re.sub()字符串替换函数,
	第一个参数为正则表达式需要被替换的参数,
	第二个参数是替换后的字符串,
	第三个参数为输入的字符串
	'''
    return links






  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值