[爬虫][python][入门][网页源码][百度图片][豆瓣TOP250]

Robots协议 查看爬取规则 遵守相关法律法规
  • Robots协议(也称为爬虫协议、机器人协议等)的全称是“网络爬虫排除标准”(Robots Exclusion Protocol),网站通过Robots协议告诉爬虫哪些页面可以抓取,哪些页面不能抓取。
  • robots.txt是搜索引擎中访问网站的时候要查看的第一个文件。robots.txt文件告诉蜘蛛程序在服务器上什么文件是可以被查看的。
    bilibili
抓取某网页源码
  • 输入网址后若失败 即不允许爬虫
  • 如输入网址后 只在浏览器中打开页面 请将光标重新移动到末端 点击空格 后按回车
import requests # python http客户端库,编写爬虫和测试服务器响应数据经常会用到的

import re # 导入正则表达式模块,用于提取所需要的内容

import random # 随机生成一个实数,它的取值范围 [0,1]

def spiderPic(html,keyword):

    print('正在查找:'+keyword+' 对应的文件,正在从百度文件库中下载文件,亲稍等 .....')

    for addr in re.findall('"objURL":"(.*?)"',html,re.S):

        print('现在正在爬取URL地址:'+str(addr)[0:30]+' ....')

        try:

            pics = requests.get(addr,timeout=10) # 请求图像的URL地址(最大时间10s)

        except requests.exceptions.ConnectionError:

            print('您当前的URL地址请求错误 !')

            continue


        fq = open('S:\\python\\search\\img\\'+(str(random.randrange(0,1000,4))+'.jpg'),'wb')

        fq.write(pics.content)

        fq.close()

    # python 主方法

if __name__ == '__main__':

    print('太棒了 !')


    word = input('请输入你想要爬取的文件的关键词:')

    result = requests.get('http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='+word)

    # 调用函数
    spiderPic(result.text,word)

实战
豆瓣电影250top

原文链接:https://blog.csdn.net/qq_36759224/article/details/101572275

  • 成功运行
import requests
from lxml import etree
import csv
import re
import time
import os

headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}


def index_pages(number):
    url = 'https://movie.douban.com/top250?start=%s&filter=' % number
    index_response = requests.get(url=url, headers=headers)
    tree = etree.HTML(index_response.text)
    m_urls = tree.xpath("//li/div/div/a/@href")
    return m_urls


def parse_pages(url):
    movie_pages = requests.get(url=url, headers=headers)
    parse_movie = etree.HTML(movie_pages.text)

    # 排名
    ranking = parse_movie.xpath("//span[@class='top250-no']/text()")

    # 电影名
    name = parse_movie.xpath("//h1/span[1]/text()")

    # 评分
    score = parse_movie.xpath("//div[@class='rating_self clearfix']/strong/text()")

    # 参评人数
    value = parse_movie.xpath("//span[@property='v:votes']/text()")
    number = [" ".join(['参评人数:'] + value)]
    # value = parse_movie.xpath("//a[@class='rating_people']")
    # string = [value[0].xpath('string(.)')]
    # number = [a.strip() for a in string]
    # print(number)

    # 类型
    value = parse_movie.xpath("//span[@property='v:genre']/text()")
    types = [" ".join(['类型:'] + value)]

    # 制片国家/地区
    value = re.findall('<span class="pl">制片国家/地区:</span>(.*?)<br/>', movie_pages.text)
    country = [" ".join(['制片国家:'] + value)]

    # 语言
    value = re.findall('<span class="pl">语言:</span>(.*?)<br/>', movie_pages.text)
    language = [" ".join(['语言:'] + value)]

    # 上映时期
    value = parse_movie.xpath("//span[@property='v:initialReleaseDate']/text()")
    date = [" ".join(['上映日期:'] + value)]

    # 片长
    value = parse_movie.xpath("//span[@property='v:runtime']/text()")
    time = [" ".join(['片长:'] + value)]

    # 又名
    value = re.findall('<span class="pl">又名:</span>(.*?)<br/>', movie_pages.text)
    other_name = [" ".join(['又名:'] + value)]

    # 导演
    value = parse_movie.xpath("//div[@id='info']/span[1]/span[@class='attrs']/a/text()")
    director = [" ".join(['导演:'] + value)]

    # 编剧
    value = parse_movie.xpath("//div[@id='info']/span[2]/span[@class='attrs']/a/text()")
    screenwriter = [" ".join(['编剧:'] + value)]

    # 主演
    value = parse_movie.xpath("//div[@id='info']/span[3]")
    performer = [value[0].xpath('string(.)')]

    # URL
    m_url = ['豆瓣链接:' + movie_url]

    # IMDb链接
    value = parse_movie.xpath("//div[@id='info']/a/@href")
    imdb_url = [" ".join(['IMDb链接:'] + value)]

    # 保存电影海报
    poster = parse_movie.xpath("//div[@id='mainpic']/a/img/@src")
    response = requests.get(poster[0])
    name2 = re.sub(r'[A-Za-z\:\s]', '', name[0])
    poster_name = str(ranking[0]) + ' - ' + name2 + '.jpg'
    dir_name = 'douban_poster'
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)
    poster_path = dir_name + '/' + poster_name
    with open(poster_path, "wb")as f:
        f.write(response.content)

    return zip(ranking, name, score, number, types, country, language, date, time, other_name, director, screenwriter, performer, m_url, imdb_url)


def save_results(data):
    with open('douban.csv', 'a', encoding="utf-8-sig") as fp:
        writer = csv.writer(fp)
        writer.writerow(data)


if __name__ == '__main__':
    num = 0
    for i in range(0, 250, 25):
        movie_urls = index_pages(i)
        for movie_url in movie_urls:
            results = parse_pages(movie_url)
            for result in results:
                num += 1
                save_results(result)
                print('第' + str(num) + '条电影信息保存完毕!')
                time.sleep(3)

豆瓣TOP250

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值