(豆瓣top250)~ 第一次爬虫 经典好玩有趣 还有成就感~

源码


# @Time : 2021/8/30 15:01
# @Author : DongGu
# coding = utf-8

import re
import urllib.request
import bs4
import xlwt

#正则提取
findlink = re.compile(r'<a href="(.*?)">')
findimg = re.compile(r'<img.*src="(.*?)"',re.S)
findname = re.compile(r'<span class="title">(.*)</span>')
findrating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
findjudge = re.compile(r'<span>(\d*)人评价</span>')
findinq = re.compile(r'<span class="inq">(.*)</span>')
findbd = re.compile(r'<p class="">(.*?)</p>',re.S)

#得到一个网页的指定内容
def askURL(url):
    head = {
        "User-Agent": "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome / 92.0.4515.159 Safari / 537.36"
    }
    request = urllib.request.Request(url,headers=head)
    html = ""

    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        #print(html)
    except urllib.error.URLError as e:
        if hasattr(e,"code"):
            print(e.code)
        if hasattr(e,"reason"):
            print(e.reason)
    return html

#爬取网页
def getdata(baseurl):
    datalist = []

    for i in range(0,10):
        url = baseurl + str(i * 25)
        html = askURL(url)     #保存 获取到的网页源码

        #解析
        soup = bs4.BeautifulSoup(html,"html.parser")
        for item in soup.find_all('div',class_="item"):
            data = []
            item = str(item)  # 变成字符串 就可以通过正则表达式进行提取
            link = re.findall(findlink,item)[0]
            data.append(link)

            img = re.findall(findimg,item)[0]
            data.append(img)

            name = re.findall(findname,item)   #有中文或英文 或只含有一个或两个或0
            if(len(name) == 2):
                cname = name[0]
                data.append(cname)
                oname = name[1].replace("/","")
                data.append(oname)
            else:
                data.append(name[0])
                data.append(' ')

            rating = re.findall(findrating,item)[0]
            data.append((rating))

            judgenum = re.findall(findjudge,item)[0]
            data.append(judgenum)

            inq = re.findall(findinq,item)  #有可能没有概识
            if(len(inq) != 0):
                inq = inq[0].replace("。","")
                data.append(inq)
            else:
                data.append(" ")

            bd = re.findall(findbd,item)[0]
            bd = re.sub('br(\s+)?/>(\s+)?'," ",bd)
            bd = re.sub('/'," ",bd)
            data.append(bd.strip())     #去掉前后的空格

            datalist.append(data)
    return datalist

def savedata(datalist,savepath):
    book = xlwt.Workbook(encoding="utf-8",style_compression=0)
    sheet = book.add_sheet("豆瓣电影Top250",cell_overwrite_ok=True)
    col = ("电影链接","图片链接","影片中文名","影片外国名","评分","评价数","概识","相关信息")
    for i in range(0,8):
        sheet.write(0,i,col[i])

    for i in range(0,250):
        print("第%d条"%(i+1))
        data = datalist[i]
        for j in range(0,8):
            sheet.write(i+1,j,data[j])

    book.save(savepath)

def main():
    baseurl = "https://movie.douban.com/top250?start="  #你要爬的网页
    datalist = getdata(baseurl)
    savepath = "top.250.xls"
    savedata(datalist,savepath)

#相当于函数入口
if __name__ == "__main__":
    main()

爬出来的效果

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

DongGu.

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值