数据存储(本地)

39 篇文章 3 订阅

更多爬虫实例请见 https://blog.csdn.net/weixin_39777626/article/details/81564819这里写图片描述

HTML正文抽取

JSON

import requests
from bs4 import BeautifulSoup

user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331'
headers={'User-Agent':user_agent}
r=requests.get('http://seputu.com/',headers=headers)
soup=BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')
for mulu in soup.find_all('div','box'):
    for ml in mulu.find_all('a'):
        href=ml.get('href')
        title=ml.get('title')
        print(title,href)
PythonJSON
dictObject
list,tuplearray
str,unicodestring
int,long,floatnumber
Truetrue
Falsefalse
Nonenull
import json

#编码
str=[{'username':'七夜','age':24},(2,3),1]
json_str=json.dumps(str,ensure_ascii=False)
print(json_str)
with open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/qiye.txt','w') as fp:
    json.dump(str,fp=fp,ensure_ascii=False)
#解码
new_str=json.loads(json_str)
print(new_str)
with open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/qiye.txt','r') as fp:
    print(json.load(fp))
#标题&章节&链接==>JSON存储
import json
from bs4 import BeautifulSoup
import requests

user_agent=user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331'
headers={'User-Agent':user_agent}
r=requests.get('http://seputu.com/',headers=headers)
soup=BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')
content=[]
for mulu in soup.find_all('div','mulu'):
    h2=mulu.find('h2')
    if h2!=None:
        h2_title=h2.string
        List=[]
        for a in mulu.find('div','box').find_all('a'):
            href=a.get('href')
            box_title=a.get('title')
            List.append({'href':href,'box_title':box_title})
        content.append({'title':h2_title,'content':List})

content_str=json.dumps(content,ensure_ascii=False)
with open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/qiye.json','w') as fp:
    json.dump(content_str, fp,indent=4)

CSV

写入

#数据元组
import csv

headers=['ID','UserName','Password','Age','Country']
rows=[
    (1001,'qiye','qiye_pass',24,'China'),
    (1002,'Mary','Mary_pass',20,'USA'),
    (1003,'Jack','Jack_pass',20,'USA'),
    ]
with open('qiye.csv','w') as f:
    f_csv=csv.writer(f)
    f_csv.writerow(headers)
    f_csv.writerows(rows)
#字典数据
import csv

headers=['ID','UserName','Password','Age','Country']
rows=[
    {'ID':1001,'UserName':'qiye','Password':'qiye_pass','Age':24,'Country':'China'},
    {'ID':1002,'UserName':'Mary','Password':'Mary_pass','Age':20,'Country':'USA'},
    {'ID':1003,'UserName':'Jack','Password':'Jack_pass','Age':20,'Country':'USA'},
    ]
with open('qiye.csv','w') as f:
    f_csv=csv.DictWriter(f,headers)
    f_csv.writeheader()
    f_csv.writerows(rows)

读取

#列表访问
import csv

with open('qiye.csv') as f:
    f_csv=csv.reader(f)
    headers=next(f_csv)
    print(headers)
    for row in f_csv:
        print(row)
#元组访问
from collections import namedtuple
import csv

with open('qiye.csv') as f:
    f_csv=csv.reader(f)
    headings=next(f_csv)
    Row=namedtuple('Row',headings)
    for r in f_csv:
        row=Row(*r)
        print(row.UserName,row.Password)
        print(row)
#字典访问
import csv

with open('qiye.csv') as f:
    f_csv=csv.DictReader(f)
    for row in f_csv:
        print(row.get('UserName'),row.get('Password'))
from lxml import etree
import requests
import re

user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331'
headers={'User-Agent':user_agent}
r=requests.get('http://seputu.com/',headers=headers)
html=etree.HTML(r.content)
div_mulus=html.xpath('/html/body/div/div/div')
rows=[]
for div_mulu in div_mulus:
    div_h2=div_mulu.xpath('./div[1]/center/h2/text()')
    if len(div_h2)>0:
        h2_title=div_h2[0]
        a_s=div_mulu.xpath('./div/ul/li/a')
        for a in a_s:
            href=a.xpath('./@href')
            box_title=a.xpath('./@title')[0]
            pattern=re.compile(r'\s*\[(.*)\]\s+(.*)')
            match=pattern.search(box_title)
            if match!=None:
                date=match.group(1).encode('utf-8')
                real_title=match.group(2)
                content=(h2_title,real_title,href,date)
                print(content)
                rows.append(content)
                
headers=['title','real_title','href','date']
with open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/qiye.csv','w') as f:
    f_csv=csv.writer(f,)
    f_csv.writerow(headers)
    f_csv.writerows(rows)

多媒体文件抽取

import urllib
from lxml import etree
import requests

def Schedule(blocknum,blocksize,totalsize):
    """
    blocknum:已下载的数据块
    blocksize:数据块大小
    totalsize:远程文件大小
    """
    per=100.0*blocknum*blocksize/totalsize
    if per>100:
        per=100
    print('当前下载进度:%.2f%%'%per)
user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331'
headers={'User-Agent':user_agent}
r=requests.get('http://www.ivsky.com/tupian/ziranfengguang/',headers=headers)
html=etree.HTML(r.text)
img_urls=html.xpath('/html/body/div[3]/div[2]/ul/li/div/a/img/@src')
i=0
for img_url in img_urls:
    urllib.request.urlretrieve(img_url,'/home/as/文档/爬虫/开发与项目实战/基础篇/save/img%s.jpg'%i,Schedule)
    i+=1

Email提醒

from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr,formataddr
import smtplib

def _format_addr(s):
    name,addr=parseaddr(s)
    return formataddr((Header(name,'utf-8').encode(),addr))

from_addr='15xxxxxxxxxxxx@163.com'
password='3xxxxxxxxxxxx'
to_addr='13xxxxxxxxxxxxx@163.com'

smtp_server='smtp.163.com'
msg=MIMEText('Python爬虫运行异常,异常信息为:HTTP 403','plain','utf-8')
msg['From']=_format_addr('一号爬虫<%s>'%from_addr)
msg['To']=_format_addr('管理员<%s>'%to_addr)
msg['Subject']=Header('一号爬虫状态','utf-8').encode()

server=smtplib.SMTP(smtp_server,25)
server.login(from_addr,password)
server.sendmail(from_addr,[to_addr],msg.as_string())
server.quit()

更多爬虫实例请见 https://blog.csdn.net/weixin_39777626/article/details/81564819

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值