爬虫入门实例(六)

import requests
import re
import json
from bs4 import BeautifulSoup

def get_html(url):
    try:
        d = {'User Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0"}
        r = requests.get(url, params=d )
        if r.status_code == 200:
            return r.text
    except:
        print("爬取失败")

def parse_html(html):
    soup = BeautifulSoup(html, "html.parser")
    data = soup.body.find_all('table', attrs = {'width': "100%",'class':""})
    with open('C:/Users/root/Desktop/py/gg.txt', 'a',encoding='utf-8') as f:
        for i in data:
            people = i.text.replace('\n', "").strip()
            f.write(json.dumps(people, ensure_ascii=False) + '\n')
    f.close()
    print("文本写入完成")
    photo = soup.select('img[width="75"]')
    photo = repr(photo)
    p = re.compile(r'https://img3.doubanio.com/view/photo/s_ratio_poster/public/p25\S*.jpg')
    url_photo = p.findall(photo)
    return url_photo

def save_photo(list):
    pass
    d = {'User Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0"}
    root = "C:/Users/root/Desktop/py/"
    for i in list:
        path = root + i.split('/')[-1]
        address = requests.get(i, params=d)
        with open(path,'wb') as f:
            f.write(address.content)
        f.close()
    print("图片存储完成")

def main():
    url = "https://movie.douban.com/chart"
    html = get_html(url)
    list = parse_html(html)
    save_photo(list)

main()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值