学习Python爬虫案例-获得豆瓣Top250的电影数据

把豆瓣top250的电影数据爬取到Excel

from bs4 import BeautifulSoup  # 网页解析,获取数据
import re  # 正则表达式,进行文字匹配
import urllib.request, urllib.error  # 指定URL,获取网页数据
import xlwt  # 进行excel操作
import _sqlite3  # 进行SQLite数据库操作


def main():
    baseUrl = "https://movie.douban.com/top250?start="
    savePath = '.\\豆瓣电影Top250.xls'
    # 1.爬取网页
    # 2.解析数据
    dataList = getData(baseUrl)
    # 3.保存至数据库
    saveData(dataList, savePath)

##以下为正则匹配的模板设置
findLink = re.compile(r'<a href="(.*?)">')  # 生成电影链接的规则
# 影片的图片 re.S(让换行符包含在字符中)
findImgSrc = re.compile(r'<img.*src="(.*?)"', re.S)
findTitle = re.compile(r'<span class="title">(.*?)</span>')
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>')
# 评价人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
# 找到概况
findInq = re.compile(r'<span class="inq">(.*)</span>', re.S)
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)


# 爬取网页
def getData(baseUrl):
    dataList = []
    for i in range(0, 10):
        url = baseUrl + str(i * 25)
        # 通过url请求服务器获得html数据
        html = askURL(url)
        # 逐一解析数据,采用本地解析器
        soup = BeautifulSoup(html, "html.parser")
        for item in soup.find_all('div', class_="item"):  # 查找符合要求的字符串,形成列表
            data = []  # 保存一部电影的所有信息
            item = str(item)#转为字符串
            link = re.findall(findLink, item)[0]
            data.append(link)
            imgSrc = re.findall(findImgSrc, item)[0]
            data.append(imgSrc)
            titles = re.findall(findTitle, item)
            if (len(titles) == 2):
                ctitle = titles[0]
                data.append(ctitle)
                otitle = titles[1].replace("/", "")
                data.append(otitle)
            else:
                data.append(titles[0])
                data.append(' ')

            rating = re.findall(findRating, item)[0]
            data.append(rating)
            judge = re.findall(findJudge, item)[0]
            data.append(judge)
            inq = re.findall(findInq, item)
            if len(inq) != 0:
                inq = inq[0].replace(".", "")
                data.append(inq)
            else:
                data.append("")

            bd = re.findall(findBd, item)[0]
            bd = re.sub('<br(\s+)?/>', '', bd)
            bd = re.sub('/', '', bd)
            data.append(bd.strip())

            dataList.append(data)
    return dataList


# 保存数据
def saveData(dataList, savePath):
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)
    sheet = book.add_sheet('豆瓣电影Top250', cell_overwrite_ok=True)
    col = ("电影详情链接", "图片链接", "影片中文名", "影片外国名", "评分", "评价数", "概况", "相关信息")
    for i in range(0, 8):
        #写入表头
        sheet.write(0, i, col[i])
    for i in range(0, 250):
        print("第{0}条".format(str(i + 1)))
        data = dataList[i]
        for j in range(0, 8):
            #写到Excel
            sheet.write(i + 1, j, data[j])
    book.save(savePath)


# 得到指定一个URL的网页内容
def askURL(url):
    head = {
        #设置代理,模拟为浏览器请求
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36"
    }
    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        #得到网站的html数据,并进行utf-8解码
        html = response.read().decode('utf-8')
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html


if __name__ == "__main__":
    main()

把豆瓣top250的数据爬取后保存到SQLite

from bs4 import BeautifulSoup  # 网页解析,获取数据
import re  # 正则表达式,进行文字匹配
import urllib.request, urllib.error  # 指定URL,获取网页数据
import xlwt  # 进行excel操作
import _sqlite3  # 进行SQLite数据库操作


def main():
    baseUrl = "https://movie.douban.com/top250?start="
    savePath = '.\\豆瓣电影Top250.xls'
    dbPath = "movie.db"
    # 1.爬取网页
    # 2.解析数据
    dataList = getData(baseUrl)
    # 3.保存至数据库
    # saveData(dataList, savePath)
    saveDBData(dataList, dbPath)


##以下为正则匹配的模板设置
findLink = re.compile(r'<a href="(.*?)">')  # 生成电影链接的规则
# 影片的图片 re.S(让换行符包含在字符中)
findImgSrc = re.compile(r'<img.*src="(.*?)"', re.S)
findTitle = re.compile(r'<span class="title">(.*?)</span>')
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>')
# 评价人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
# 找到概况
findInq = re.compile(r'<span class="inq">(.*)</span>', re.S)
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)


# 爬取网页
def getData(baseUrl):
    dataList = []
    for i in range(0, 10):
        url = baseUrl + str(i * 25)
        # 通过url请求服务器获得html数据
        html = askURL(url)
        # 逐一解析数据,采用本地解析器
        soup = BeautifulSoup(html, "html.parser")
        for item in soup.find_all('div', class_="item"):  # 查找符合要求的字符串,形成列表
            data = []  # 保存一部电影的所有信息
            item = str(item)  # 转为字符串
            link = re.findall(findLink, item)[0]
            data.append(link)
            imgSrc = re.findall(findImgSrc, item)[0]
            data.append(imgSrc)
            titles = re.findall(findTitle, item)
            if (len(titles) == 2):
                ctitle = titles[0]
                data.append(ctitle)
                otitle = titles[1].replace("/", "")
                data.append(otitle)
            else:
                data.append(titles[0])
                data.append(' ')

            rating = re.findall(findRating, item)[0]
            data.append(rating)
            judge = re.findall(findJudge, item)[0]
            data.append(judge)
            inq = re.findall(findInq, item)
            if len(inq) != 0:
                inq = inq[0].replace(".", "")
                data.append(inq)
            else:
                data.append("")

            bd = re.findall(findBd, item)[0]
            bd = re.sub('<br(\s+)?/>', '', bd)
            bd = re.sub('/', '', bd)
            data.append(bd.strip())

            dataList.append(data)
    return dataList


# 保存数据
def saveData(dataList, savePath):
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)
    sheet = book.add_sheet('豆瓣电影Top250', cell_overwrite_ok=True)
    col = ("电影详情链接", "图片链接", "影片中文名", "影片外国名", "评分", "评价数", "概况", "相关信息")
    for i in range(0, 8):
        # 写入表头
        sheet.write(0, i, col[i])
    for i in range(0, 250):
        print("第{0}条".format(str(i + 1)))
        data = dataList[i]
        for j in range(0, 8):
            # 写到Excel
            sheet.write(i + 1, j, data[j])
    book.save(savePath)


# 报错到sqllite
def saveDBData(dataList, dbPath):
    # init_db(dbPath)
    conn = _sqlite3.connect(dbPath)
    cur = conn.cursor()
    for data in dataList:
        for index in range(len(data)):
            data[index] = '"' + data[index] + '"'
        sql = """
            insert into movie250 (
            info_link,pic_link,cname,ename,score,rated,instrodouction,info
            )
            values(%s)
            """ % ",".join(data)
        print(sql)
        cur.execute(sql)
        conn.commit()


    cur.close()
    conn.close()


def init_db(dbPath):
    sql = """
    create table movie250(
    id integer  primary key autoincrement,
    info_link text,
     pic_link text,
     cname varchar,
     ename varchar,
     score numeric,
     rated numeric,
     instrodouction text,
     info text)
    """
    conn = _sqlite3.connect(dbPath)
    cursor = conn.cursor()
    cursor.execute(sql)
    conn.commit()
    conn.close()


# 得到指定一个URL的网页内容
def askURL(url):
    head = {
        # 设置代理,模拟为浏览器请求
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36"
    }
    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        # 得到网站的html数据,并进行utf-8解码
        html = response.read().decode('utf-8')
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html


if __name__ == "__main__":
    main()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值