豆瓣Top250电影信息python爬虫excel/sqlite数据存储教程

1.代码

#-*- coding= utf-8 -*-
#@Time: 2020/5/1312:40
#@Author: evating
#@File: Travel.py
#@Software:PyCharm.py

import re
from bs4 import BeautifulSoup
import urllib
import xlwt
import sqlite3

def main():
    baseurl = "https://movie.douban.com/top250?start="
    # 爬取网页
    datalist = getData(baseurl)
    savepath = ".\\豆瓣电影Top250.xls"
    dbpath ="movie.db"
    #3.保存数据
    # saveData(datalist,savepath)
    saveData2DB(datalist, dbpath)
    askURL("https://movie.douban.com/top250?start=")


def saveData(datalist,savepath):
    print("save...")
    book = xlwt.Workbook(encoding='utf-8',style_compression=0)
    sheet = book.add_sheet('豆瓣电影Top250',cell_overwrite_ok=True)
    col = ("电影详情链接","图片链接","影片中文名","影片外国名","评分","评价数","概况","相关信息")
    for i in range(0,8):
        sheet.write(0,i,col[i])
    for i in range(0,250):
        print("第%d条"%(i+1))
        data = datalist[i]
        for j in range(0,8):
            sheet.write(i+1,j,data[j])
    book.save(savepath)


def saveData2DB(datalist, dbpath):
    init_db(dbpath)
    conn = sqlite3.connect(dbpath)
    cur = conn.cursor()
    cur = conn.cursor()
    for data in datalist:
        for index in range(len(data)):
            if index ==4 or index ==5:
                continue
            data[index]='"'+data[index]+'"'
        sql = '''
            insert into movie250(
            info_link,pic_link,cname,ename,score,rate,instroduction,info)
            values (%s)'''%",".join(data)
        print(sql)
        cur.execute(sql)
        conn.commit()
    cur.close()
    conn.close()


def init_db(dbpath):
    sql = '''
        create table movie250
        (
        id integer primary key autoincrement,
        info_link text,
        pic_link text,
        cname varchar,
        ename varchar,
        score numeric, 
        rate numeric,
        instroduction text,
        info text        
        )
    '''
        #创建数据
    conn = sqlite3.connect(dbpath)#关联数据
    cursor = conn.cursor()#创建游标
    cursor.execute(sql)#调用数据
    conn.commit()  # 提交数据库操作
    conn.close()  # 关闭数据库操作


# 影片详情的链接规则
findLink = re.compile(r'<a href="(.*?)">')
#影片图片的链接
findImgSrc = re.compile(r'<img.*src="(.*?)"',re.S)  #让换行符表示在字符中
# 影片片名
findTitle = re.compile(r'<span class="title">(.*)</span>')
# 影片评分
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
# 找到评价人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
# 找到概况
findInq = re.compile(r'<span class="inq">(.*)</span>')
# 找到影片的相关内容
findBd = re.compile(r'<p class="">(.*?)</p>',re.S)#换行符!!


def getData(baseurl):
    datalist = []

    for i in range(0,10):
        url = baseurl + str(i*25)
        html = askURL(url)

        #2.逐一解析数据
        soup = BeautifulSoup(html,"html.parser")
        for item in soup.find_all('div',class_="item"):  #查找符合要求的字符串形成列表
            # print(item)
            data = []
            item = str(item)
            # print(item)
            # break

            link = re.findall(findLink,item)[0]  #通过正则表达式来查找指定字符串
            # print(link)
            data.append(link)
            imgSrc = re.findall(findImgSrc,item)[0]
            data.append(imgSrc)
            titles = re.findall(findTitle,item)
            if(len(titles)==2):
                ctitle = titles[0]
                data.append(ctitle)
                otitle = titles[1].replace("/","")
                data.append(otitle)
            else:
                data.append(titles[0])
                data.append('')
            rating = re.findall(findRating,item)[0]
            data.append(rating)
            judgeNum = re.findall(findJudge,item)[0]
            data.append(judgeNum)
            inq = re.findall(findInq,item)
            if len(inq) !=0:
                inq = inq[0].replace("。","")
                data.append(inq)
            else:
                data.append("")

            bd = re.findall(findBd,item)[0]
            bd = re.sub('<br(\s+)?/>(\s+)?',"",bd)
            bd = re.sub('/', "", bd)
            data.append(bd.strip())#去掉前后空格

            datalist.append(data)
    print(datalist)
    return datalist


# 得到一个指定url的网页内容
def askURL(url):
    head ={
    "User-Agent": "Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML,likeGecko) Chrome / 81.0.4044.138 Safari / 537.36"
    }
    request = urllib.request.Request(url,headers=head)
    html= ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        # print(html)
    except urllib.error.URLError as e:
        if hasattr(e,"code"):
            print(e,"code")
        if hasattr(e,"reason"):
            print(e.reason)

    return html


if __name__ == '__main__':
    main()
    # init_db("movietest.db")
    print('爬取完毕')

2.运行结果

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值