爬取简单的网页并储存为json、csv、BeautifulSoup提取数据、lxml解析、urllib.request.urlretrieve、通过stmp发送邮件

储存为json

关键步骤:
1、requests.get()get请求网页
2、BeautifulSoup()提取数据
3、find_all(class_=。。。)找到需要的数据
4、json.dump()编码存入j.txt
5、json.load(fp)读取文本内容

import requests
from bs4 import BeautifulSoup
import json
# 获取html内容
u = "Mozilla/4.0(compatable; MSTE 5.5; Windoes NT)"
h = {'User-Agent':u}
r = requests.get('http://seputu.com/',headers = h)
s = BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')
txt = []
for m in s.find_all(class_='mulu'):
    c = m.find('center')
    h2 = m.find('h2') #标题
    if h2 != None:
        h2 = h2.string
        l = []
        for a in m.find(class_='box').find_all('a'):
            hr = a.get('href')
            b = a.get('title')
            l.append({b:hr})
        txt.append(h2)
        txt.append(l)
#编码
print(txt)
with open("j.txt","w") as fp:
    json.dump(txt,fp=fp,ensure_ascii=False)
#解码
with open('j.txt','r') as fp:
    print(json.load(fp))

储存为csv

关键步骤:
1、requests.get()get请求网页
2、BeautifulSoup()提取数据
3、find_all(class_=。。。)找到需要的数据
4、f=csv.writer(fp)
f.writerow(csv_h)
f.writerows(csv_r)存入数据
5、f=csv.reader(fp)
fh=next(f)
Row = namedtuple(‘Row’,fh) #使用命名元组

import requests
from bs4 import BeautifulSoup
import csv
from collections import namedtuple
# 获取html内容
u = "Mozilla/4.0(compatable; MSTE 5.5; Windoes NT)"
h = {'User-Agent':u}
r = requests.get('http://seputu.com/',headers = h)
s = BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')
csv_h = ['UserName','Id1','Id2']
csv_r = []
for m in s.find_all(class_='mulu'):
    c = m.find('center')
    h2 = m.find('h2') #标题
    if h2 != None:
        h2 = h2.string
        for a in m.find(class_='box').find_all('a'):
            hr = a.get('href')
            b = a.get('title')
            csv_r.append([h2,b,hr])
# print(csv_r)
with open("c.csv","w") as fp:
    f=csv.writer(fp)
    f.writerow(csv_h)
    f.writerows(csv_r)

with open('c.csv','r') as fp:
    f=csv.reader(fp)
    fh=next(f)
    Row = namedtuple('Row',fh) #使用命名元组
    for r in f:
        if r != []:
            # print(r)
            row =Row(*r)
            print(row.UserName)

lxml解析,xpath、re

关键步骤:
1、requests.get()
2、etree.HTML()下载网页
3、html.xpath()找到需要匹配的子节点
4、re.compile()正则匹配数据里需要的对象

from lxml import etree
import requests
import re
r = requests.get('http://seputu.com/',headers = {'User-Agent':"Mozilla/4.0(compatable; MSTE 5.5; Windoes NT)"})
html = etree.HTML(r.text)
d = html.xpath('.//*[@class="mulu"]')  #找到所有class="mulu"的标记
for dm in d:
    h2 = dm.xpath('./div[@class="mulu-title"]/center/h2/text()')  #找到div h2
    if len(h2)>0:
        h2_title = h2[0]
        a_s = dm.xpath('./div[@class="box"]/ul/li/a')
        for a in a_s:
            href = a.xpath('./@href')[0]
            title = a.xpath('./@title')[0].encode('utf-8')
            title = title.decode()
            tre = re.compile(r'\s*\[(.*)\]\s+(.*)')
            title2= tre.search(title).group(2)

            print(title2,href)

多媒体文件提取,获取下载进度

关键步骤:
1、requests.get()
2、etree.HTML()下载网页
3、html.xpath()找到需要匹配的子节点
4、urllib.request.urlretrieve()存入并获取当前下载进度

import urllib.request
from lxml import etree
import requests
import re
import os
def Sc(bnum,bsize,tsize):
    '''
    :param bnum:当前传输的块数
    :param bsize:数据块大小
    :param tsize:远程文件大小
    :return:
    '''
    per =  100.0 *bnum*bsize/tsize
    if per>100:
        per = 100
    print('当前下载进度:%d' % per)

u = "Mozilla/4.0(compatable; MSTE 5.5; Windoes NT)"
h = {'User-Agent':u}
r = requests.get('https://www.rouding.com/roudingtuku/shishangzhaopian/',headers = h)
html = etree.HTML(r.text)
d = html.xpath('.//img/@src')  #选取所有img节点的src属性,也就是图片网址
i=0
for img in d:
    img = 'https:'+img
    file_path = '.\img00002'
    if not os.path.exists(file_path):
        os.mkdir(file_path)
    urllib.request.urlretrieve(img,'.\img00002\img'+str(i)+'.jpg',Sc)
    i += 10


通过stmp发送邮件

from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr,formataddr
import smtplib

def _format_addr(s):
    name,addr = parseaddr(s)
    return formataddr((Header(name,'utf-8').encode(),addr))

# #发件人地址
from_addr = '*****@163.com'
# #邮箱密码
password = '********'
# #收件人地址
to_addr = '*****@qq.com'
# #163网易邮箱服务器地址
smtp_server = 'smtp.163.com'

#设置邮箱信息
# msg = MIMEText(html,'html','utf-8')#发送网页
msg = MIMEText('python运行异常,异常信息遇到http403','plain','utf-8')
msg['From'] = _format_addr('一号爬虫<%s>'%from_addr)
msg['To'] = _format_addr('管理员<%s>'%to_addr)
msg['Subject'] = Header('一号爬虫运行状态','utf-8').encode()

#发送邮件
server = smtplib.SMTP('smtp.163.com',25)
server.login(from_addr,password)
server.sendmail(from_addr,[to_addr],msg.as_string())
server.quit()
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值