python 爬虫问题解决

1、爬取晋江网页

(1)报错:UnicodeDecodeError: 'utf-8' codec can't decode byte 0x8b in position 1: invalid start by...

(2)原因:网站发包是gzip格式,因此解码会是乱码:headers['Accept-Encoding'] = "gzip

(3)解决

用gzip包解压传回来的网页

正常

def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75"
    }

    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode('GBK')
        # html = response.read()
        # buff = BytesIO(html)
        # f = gzip.GzipFile(fileobj=buff)
        # html = f.read().decode("GBK")
        # 这是之前解不开编码时尝试的其它方法
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html

gzip:

import gzip

from io import BytesIO

def askURL_novel(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75",
        "Accept-encoding": "gzip"
    }
    print(url)
    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        #html = response.read().decode('GBK')
        html = response.read()
        buff = BytesIO(html)
        f = gzip.GzipFile(fileobj=buff)
        html = f.read().decode("gbk","ignore")
        # 这是之前解不开编码时尝试的其它方法
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html

2、还存在解码问题

f.read().decode("gbk")改成

f.read().decode("gbk","ignore")

3、有的网页是gzip有的不是,那么就通过header让所有网页都用gzip的格式进行发送

head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75",
        "Accept-encoding": "gzip"
    }

4、写了爬虫爬取晋江某一搜索方案得到的结果的所有小说的小说名字、作者、字数、链接、并且跳入链接爬取小说文案。

参考文章:https: // blog.csdn.net / qq_45886782 / article / details / 114708628,主要加入了爬取文案的部分,

存在的不足:文案中的字符没有处理好,应该提取中文字符串形成文案的。

#- * - codeding = utf - 8 -*-

import urllib
import urllib.request
from bs4 import BeautifulSoup
import re
import xlwt
import sqlite3
import gzip
from io import BytesIO


def main():
    baseurl="http://www.jjwxc.net/bookbase.php?fw0=0&fbsj6=6&ycx1=1&xx1=1&mainview0=0&sd2=2&sd3=4&lx0=0&fg0=0&collectiontypes=ors&notlikecollectiontypes=ors&bq=&removebq=&searchkeywords=&page="
    # 1.爬取网页
    datalist = getData(baseurl)
    print(len(datalist))
    savepath = ".\\晋江小说.xls"
    # dbpath = "book.db"
    # 3.保存数据
    saveData(savepath, datalist)
    # saveDataDb(datalist, dbpath)


# 先准备的提取信息内容的正则表达式
findName = re.compile(r'">(.*?)</a>', re.U)
findLink = re.compile(r'<a href="(.*?)"')
findSize = re.compile(r'<td align="right">(\d+)</td>')


# 1.爬取网页(完成)
def getData(baseurl):
    datalist = []
    for i in range(1,6):  # 搜索结果的页数
        url = baseurl + str(i)
        html = askURL(url)
        # 2.逐一解析
        soup = BeautifulSoup(html, "html.parser")
        i = 0
        for item in soup.findAll('tr'):
            data = []  # 保存一本书的所有信息
            item = str(item)
            # 提取文名
            Name = re.findall(findName, item)
            # 提取文章链接
            Link = re.findall(findLink, item)
            # 提取字数
            Size = re.findall(findSize, item)
            if i:
                Link[1] = 'http://www.jjwxc.net/' + Link[1]
                intro=load_intro(Link[1])
                data.append(Name[1])
                print(Name[1])
                print(intro)
                data.append(Link[1])
                data.append(Size[0])
                data.append(intro)
                datalist.append(data)

            i = 1
    return datalist


# 2.得到指定一个URL的网页的内容
def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75"
    }

    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode('GBK')
        # html = response.read()
        # buff = BytesIO(html)
        # f = gzip.GzipFile(fileobj=buff)
        # html = f.read().decode("GBK")
        # 这是之前解不开编码时尝试的其它方法
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html

def askURL_novel(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 Edg/87.0.664.75",
        "Accept-encoding": "gzip"
    }
    print(url)
    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        #html = response.read().decode('GBK')
        html = response.read()
        buff = BytesIO(html)
        f = gzip.GzipFile(fileobj=buff)
        html = f.read().decode("gbk","ignore")
        # 这是之前解不开编码时尝试的其它方法
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html

def load_intro(url_this):
    html_novel=askURL_novel(url_this)
    soup = BeautifulSoup(html_novel, "html.parser")
    intro_pre = soup.findAll("div", id="novelintro")
    intro=str(intro_pre)[44:]
    intro=re.sub("<br/>", "", intro)
    intro = re.sub("<br>", "", intro)
    return intro

# 3.保存数据
# 保存到文件中
def saveData(savepath, datalist):
    workbook = xlwt.Workbook(encoding='utf-8', style_compression=0)
    worksheet = workbook.add_sheet('晋江1', cell_overwrite_ok=True)
    col = ("书名", "链接", "字数","文案")
    for i in range(0, 3):
        worksheet.write(0, i, col[i])
    for i in range(0, len(datalist)):
        data = datalist[i]
        for j in range(0, len(data)):
            worksheet.write(i + 1, j, data[j])
    workbook.save(savepath)

if __name__ == '__main__':
    # 调用函数
    main()

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值