python 简单爬虫教程

urllib是 由 python3 提供一系列操作url的库,模拟用户使用浏览器访问网页。
Beautiful Soup 是 python 用于处理抓取的网页数据的一个库。

1.前提条件

搭建 python 环境
安装 pycharm
安装 pymysql:pip install pymysql

2.导入 urllib

2.1 打开pycharm,新建 py 文件,抓取百度首页内容

# 导入urllib包
from urllib import request

# 输入url
resp = request.urlopen('http://www.baidu.com/')

# 输出响应数据,read().decode()解析编码
print(resp.read().decode('utf-8'))

运行如下:

在这里插入图片描述

2.2 如需携带头部文件,如 Origin、User-Agent,使用 resp.add_header(key,value)方法。

# 导入urllib包
from urllib import request

# 新建url
resp = request.Request('http://www.baidu.com/')

# 添加头部文件
resp.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36")

# 输入url
rep = request.urlopen(resp)

# 输出响应数据,read().decode()解析编码
print(rep.read().decode('utf-8'))

2.3 使用 post 请求

postData = parse.urlencode({
    (key,value),
    (key,value),
        ...
})
rep = request.urlopen(resp,data=postData.encode('utf-8'))

3.安装 Beautiful Soup

查看 Beautiful Soup4 文档
下载 Beautiful Soup4
或者 pip install beautifulsoup4安装

在这里插入图片描述
3.1 解析一段 html 文本

# 导入BeautifulSoup包
from bs4 import BeautifulSoup as bs

# 新建html文本
doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""

# 解析doc
soup = bs(doc,"html.parser")

# 输出soup,prettify()按照标准缩进格式
print(soup.prettify())

输出结果

在这里插入图片描述
3.2 抓取数据的方法

soup.title
# <title>The Dormouse's story</title>

soup.title.name
# 'title'

soup.title.string
# 'The Dormouse's story'
# 识别一个

soup.get_text()
# The Dormouse's story
#
# The Dormouse's story
#
# Once upon a time there were three little sisters; and their names were
# Elsie,
# Lacie and
# Tillie;
# and they lived at the bottom of a well.
#
# ...
# 识别多个

soup.title.parent.name
# 'head'

soup.p
# <p class="title"><b>The Dormouse's story</b></p>

soup.p['class']
# ['title']

soup.a
# <a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>

soup.find_all('a')
# [<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
#  <a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
#  <a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]

soup.find_all("a",href=re.compile(r"^http://example\.com/e"))
# [<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>]
# re.compile 正则表达式,需 import re

soup.find(id="link3")
# <a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>

soup.find("p",{"class":"story"})
# <p class="story">Once upon a time there were three little sisters; and their names were
# <a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
# <a class="sister" href="http://example.com/lacie" id="link2">Lacie</a> and
# <a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>;
# and they lived at the bottom of a well.</p>

for link in soup.find_all('a'):
    print(link.get('href'))
    # http://example.com/elsie
    # http://example.com/lacie
    # http://example.com/tillie

3.3 解析网页 html,获取中国知网上的各行业服务链接

# 导入包
from urllib import request
from bs4 import BeautifulSoup as bs
import re

# 新建url
rep = request.Request('https://www.cnki.net/')

# 添加头部
rep.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36")

# 获取网页数据
resp = request.urlopen(rep).read().decode('utf-8')

# 使用 Beautiful Soup解析网页
soup = bs(resp,"html.parser")
# print(soup)

# 获取所有链接含有/r.cnki.net/的a标签
lists = soup.find_all("a",href=re.compile("/r.cnki.net/"))
# print(lists)

# 循环列出所有url
for url in lists:
    # if not re.search("\.(jpg|JPG)$",url["href"]):
    print(url.get_text(),"------","https:" + url['href'])
    

输出结果

在这里插入图片描述

4.持久化数据

4.1 新建一张表 cnki,字段 id,name,url(id为主键,且设为自增长)

在这里插入图片描述
4.2 将数据存储到 cnki 表

# 导入包
from urllib import request
from bs4 import BeautifulSoup as bs
import re
# 导入pymysql
import pymysql

# 新建url
rep = request.Request('https://www.cnki.net/')

# 添加头部
rep.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36")

# 获取网页数据
resp = request.urlopen(rep).read().decode('utf-8')

# 使用 Beautiful Soup解析网页
soup = bs(resp,"html.parser")
# print(soup)

# 获取所有链接含有/r.cnki.net/的a标签
lists = soup.find_all("a",href=re.compile("/r.cnki.net/"))
# print(lists)

# 循环列出所有url
for url in lists:
    # if not re.search("\.(jpg|JPG)$",url["href"]):
    print(url.get_text(),"------","https:" + url['href'])

    # 连接数据库
    conn = pymysql.connect(host = "localhost",port = 3306,
                    user = 'root',passwd = "123456",
                    db = 'py_fecth',charset = 'utf8')

    # 跳过异常,直接关闭数据库
    try:
        # with conn.cursor() as data:
        # 定义会话指针
        data = conn.cursor()
        # 定义sql语句
        sql = "insert into `cnki` (`name`,`url`) values (%s,%s)"
        # 执行sql语句
        data.execute(sql,(url.get_text(),"https:" + url['href']))
        # 提交数据
        conn.commit()
        # 关闭会话指针
        data.close()
    finally:
        # 关闭数据库
        conn.close()


4.3 cnki 表引入39条数据

在这里插入图片描述
4.4 查询数据库

# 导入pymysql
import pymysql

# 连接数据库
conn = pymysql.connect(host = "localhost",port = 3306,
                user = 'root',passwd = "123456",
                db = 'py_fecth',charset = 'utf8')

# 跳过异常,直接关闭数据库
try:
    # with conn.cursor() as data:
    # 定义会话指针
    data = conn.cursor()
    # 定义sql语句
    sql = "select `name`,`url` from `cnki` where `id` is not null"

    # 统计总记录数
    count = data.execute(sql)
    print(count)

    # 查询几行数据
    res = data.fetchmany(size=1)
    print(res)

    # 查询下一行
    print(data.fetchone())

    # 获取所有数据
    result = data.fetchall()
    # for i in result:
    #     print(i)

    # 关闭会话指针
    data.close()
finally:
    # 关闭数据库
    conn.close()

输出结果

在这里插入图片描述

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值