爬虫学习笔记 - day02


爬虫流程

重写ua
指定url
封装参数
通用爬虫
聚焦爬虫
导入模块
发起请求
处理响应
持久化存储
处理数据

爬虫入门代码示例

# 最简单的爬虫

# 导入模块
import requests

# 指定url
url = "https://www.sogou.com/web"

# 发送请求, 拿到响应结果
rep = requests.get(url=url)

# 得到网页原编码
repEncode = rep.encoding
with open(filename, "w", encoding=repEncode) as f:
	f.write(rep.text)


关于响应结果

方法结果
response.status_code状态码, 正常为200
response.encoding网页原字符编码
response.text字符串
response.contentbytes类型结果
response.url请求的url
response.headers请求头信息

通用爬虫 - requests模块

代码示例

get请求

"""

带参数发送get请求, 之后所有代码都会进行ua伪装

"""
# 导入模块
import requests

# 重写ua(ua伪装)
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}

# 指定url
url = "https://www.sogou.com/web"

# 封装参数
params = {
	"query": "美女图片",
}

# 发送请求, 拿到响应结果
rep = requests.get(url=url, params=params, headers=headers)

# 得到网页原编码
repEncode = rep.encoding
with open(filename, "w", encoding=repEncode) as f:
	f.write(rep.text)


post请求

import requests

# 指定url
url = "xxx"

# ua伪装
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}

# 设置参数
data = {
    "keyA": "valueA",
    "keyB": "valueB",
}

# 发送post请求
rep = requests.post(url=url, data=data, headers=headers)


# 获取取数据
repText = rep.text

# 模拟持久化存储
print(repText)


ajax get请求

import requests

# 通过抓包确定url
url = "http://image.so.com/zjl?ch=beauty&sn=60&listtype=new&temp=1"

# 处理参数
param = {
    "ch": "beauty",
    "sn": "60",
    "listtype": "new",
    "temp": "1",
}

# 伪装ua
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}

# 发送请求
rep = requests.get(url=url, params=param, headers=headers)

repEncode = rep.encoding
repText = rep.text

print(repText)

# 输出结果
with open("360pic美女.html","w",encoding=repEncode) as f:
    f.write(repText)
    

ajax post请求

import requests

# 通过转包确定url
url = "http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword"

data = {
    "cname": "",
    "pid": "",
    "keyword": "北京",
    "pageindex": "3",
    "pageSize": "10",
}

headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}

rep = requests.post(url=url, data=data, headers=headers)

print(rep.text)


练习 - 普通爬虫

# get请求 爬取页码范围内的数据
#
# 涉及内容:
# 	1. 交互
#   2. 代理IP

import requests

# 指定url
url = "https://www.sogou.com/sogou"

# 获取关键字, 页码范围
keyword = input("input key word: ")
startPage = int(input("input start page: "))
endPage = int(input("input end page: "))

# ua伪装
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}

# 循环页码范围, 分别设定参数并请求数据, 增加了代理ip设置
for i in range(startPage, endPage+1):
    params = {
        "query": keyword,
        "page": str(i),
    }

    # 增加代理ip设置, 网址www.goubanjia.com
    # 在网页中选择一个ip, 在proxies参数中填写协议和ip端口
    rep = requests.get(url=url, params=params, headers=headers, proxies={"http": "117.127.16.207:8080"})

    # 获取页面的编码
    repEncode = rep.encoding

    # 文件名
    fileName = "{}_{}.html".format(keyword, i)

    # 根据编码写入文件
    with open(fileName, "w", encoding=repEncode) as f:
        f.write(rep.text)


聚焦爬虫

正则

介绍

正则表达式


代码示例

import requests
import re
import os

# 指定url
url = "https://www.qiushibaike.com/pic/"

# 伪装ua
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}

# 发请求, 得到数据
rep = requests.get(url=url,headers=headers)

# 拿到页面数据
repText = rep.text

# 设定正则条件
re_rule = '<div class="thumb">.*?<img src="//(.*?)" .*?</div>' # 这里使用正则的非贪婪匹配模式

# 根据正则拿到图片地址
img_url = re.findall(re_rule, repText, re.S) # re.S单行匹配(simple), re.M多行匹配(More), re.I忽略大小写(ignore)

# 设置图片存储路径
if not os.path.exists("./qiutu"):
    os.mkdir("qiutu")

# 循环正则匹配的结果
for i in img_url:
    # 完善url
    url = "http://" + i

    # 根据url请求图片
    r = requests.get(url=url, headers=headers)

    # 获得二进制数据
    rCont = r.content

    # 生成文件名
    filename = i.split("/")[-1]

    # 指定存储路径
    file = "./qiutu/" + filename

    # 持久化存储
    with open(file,"wb") as f:
        f.write(rCont)
        print("{}下载完成".format(file))


xpath

xpath实例化

# 使用本地文件
tree = etree.parse(文件名)
tree.xpath("xpath表达式")

# 使用网络数据
tree = etree.HTML(网页内容字符串)
tree.xpath("xpath表达式")


xpath表达式

属性定位:
    # 找到class属性值为song的div标签
    //div[@class="song"] 

层级&索引定位:
    # 找到class属性值为tang的div的直系子标签ul下的第二个子标签li下的直系子标签a
    //div[@class="tang"]/ul/li[2]/a

逻辑运算:
    # 找到href属性值为空且class属性值为du的a标签
    //a[@href="" and @class="du"]

模糊匹配:
    //div[contains(@class, "ng")]
    //div[starts-with(@class, "ta")]

取文本:
    # /表示获取某个标签下的文本内容
    # //表示获取某个标签下的文本内容和所有子标签下的文本内容

    //div[@class="song"]/p[1]/text()
    //div[@class="tang"]//text()

取属性:
    //div[@class="tang"]//li[2]/a/@href


代码示例

# 爬取17173手游网即将开测
import requests
from lxml import etree
import xlwt


def write_excel(data):
    # 创建一个workbook 设置编码
    workbook = xlwt.Workbook(encoding='utf-8')
    # 创建一个worksheet
    worksheet = workbook.add_sheet('My Worksheet')

    # 写入excel
    # 参数对应 行, 列, 值
    for i in range(len(data)):
        for j in range(len(data[i])):
            if len(data[i][j]) > 1:
                data[i][j] = "{}, {}".format(data[i][j][0], data[i][j][1])
            worksheet.write(i, j, data[i][j])

    # 保存
    workbook.save('Excel_test.xls')


def spider():
    """
    爬取特定url的数据
    并将数据处理, 返回列表
    :return:
    """
    url = "http://newgame.17173.com/shouyou/ceshi"

    # ua伪装
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
    }

    # 请求
    rep = requests.get(url=url, headers=headers)
    rep_text = rep.text

    tree = etree.HTML(rep_text) # tree 是element 类型

    # 获取列表
    div_list = tree.xpath('//div[@class="g-box4 box"][1]//ul[2]/li')

    ret = [["游戏名", "上线时间", "测试类型", "游戏类型", "平台", "工作室"]]

    for i in div_list:
        name = i.xpath('.//h6[@class="c1"]/a/text()')
        time = i.xpath('.//p[@class="c2"]/text()')
        qa_type = i.xpath('.//p[@class="c3"]/text()')
        game_type = i.xpath('.//i[@class="c4"]/text()')
        plate = i.xpath('./p[@class="c5"]/span//text()')
        auth = i.xpath('.//span[@class="c7"]/text()')


        data = [name, time, qa_type, game_type, plate, auth]
        ret.append(data)

    return ret


if __name__ == "__main__":
    data = spider()
    # print(data)
    write_excel(data)
	

bs4

使用

# 封装成bs4对象
# 本地
soup = BeautifulSoup(open('localfile'), 'lxml')


# 网络
soup = BeautifulSoup(bytes类型数据, 'lxml')
使用bs4对象的方法

方法

# 根据标签名查找
soup.a # 找到第一个a标签

# 获取属性
soup.a.attrs # 获取a的说有属性和属性值, 返回值为dict
soup.a.attrs["href"] # 获取href属性
soup.a["href"] # 同上

# 获取内容
soup.a.string # 类似xpath中的 /text()
soup.a.text # 类似xpath中的 //text()
soup.a.get_text() # 同上

# find 找到第一个符合要求的标签
soup.find("a")  # 找到第一个a标签
soup.find("a", title="xxx") # 找到第一个title属性为xxx的a标签
soup.find("a", class_="xxx") # 找到第一个class属性为xxx的a标签, 注意有下划线
soup.find("a", id="xxx") # 找到第一个id属性为xxx的a标签

# find_all 找到所有符合要求的标签
soup.find_all("a") # 找到所有的a标签, 返回值为list
soup.find_all(["a","b"]) # 找到所有的a标签和b标签
soup.find_all("a", limit=2) # 限制前2个

# 选择器
soup.select("#xxx") # 找到id为xxx的标签
soup.select(".book-mulu > ul > li") # 找到类为book-mulu, 子标签ul, 子标签li


选择器

  • 普通选择器
    • 标签选择器(div)
    • 类选择器(.)
    • id选择器(#)
  • 层级选择器
    • . a #b .c #d --> //(后代选择器)
    • .a>#b>.c>#d --> /(子代选择器)

代码示例

# 爬取三国演义

import requests
from bs4 import BeautifulSoup

# 指定url
url = "http://www.shicimingju.com/book/sanguoyanyi.html"

# 拿到域名
url_root = "http://"+url.split('/')[2]

# ua伪装
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}

# 发请求, 拿到结果
rep = requests.get(url=url, headers=headers)
rep_text = rep.text

# 实例化为bs4对象
soup = BeautifulSoup(rep_text,'lxml')

# 创建文件句柄
fp = open('./sanguo.txt','w',encoding="utf-8")

# 使用bs4选择器找到多个标签
li_list = soup.select(".book-mulu > ul > li")

# 循环标签拿到标题和章节连接
for i in li_list:
	# 拿到标题
    title = i.a.text

    # 拿到章节连接, 并拼接为正常连接
    content = url_root+i.a["href"]

    # 请求, 拿到响应对象
    content_page_text = requests.get(url=content, headers=headers).text

    # 实例化bs4对象
    soup = BeautifulSoup(content_page_text, "lxml")
    
	# 用find拿到唯一值
	content_text = soup.find('div', class_="chapter_content").text

    # 持久化存储
    fp.write(title+""+content_text)

# 关闭文件句柄
fp.close()


cookie的使用

# 实例化
session = requests.session()

# 使用方法和requests.get; requests.post相同
session.post(url=url, data=data, headers=headers)  # 会将cookies自动存到headers

session.get(url=url, params=params, headers=headers) # 会带着cookies进行请求


代码示例

# 爬取人人网页面
import requests

url = "http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=2019731514156"

# ua伪装
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
}

# 封装参数
data = {
    "email":"15810020949",
    "icode":"",
    "origURL":"http://www.renren.com/home",
    "domain":"renren.com",
    "key_id":"1",
    "captcha_type":"web_login",
    "password":"xxxxxxxxxxxxxxxxxxxxxxxxx",
    "rkey":"386db1871dce1b360f18ae81a91aabdf",
    "f":"http%3A%2F%2Fwww.renren.com%2F266632989",

}

session = requests.session()

# 使用session发请求, 会将cookies自动存到headers
session.post(url=url, headers=headers, data=data)

url = "http://www.renren.com/266632989/newsfeed/photo"
rep = session.get(url=url, headers=headers)
with open('./renren.html','w',encoding='utf-8') as f:
    f.write(rep.text)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值