更多爬虫实例请见 https://blog.csdn.net/weixin_39777626/article/details/81564819
HTML正文抽取
JSON
import requests
from bs4 import BeautifulSoup
user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331'
headers={'User-Agent':user_agent}
r=requests.get('http://seputu.com/',headers=headers)
soup=BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')
for mulu in soup.find_all('div','box'):
for ml in mulu.find_all('a'):
href=ml.get('href')
title=ml.get('title')
print(title,href)
Python | JSON |
---|---|
dict | Object |
list,tuple | array |
str,unicode | string |
int,long,float | number |
True | true |
False | false |
None | null |
import json
#编码
str=[{'username':'七夜','age':24},(2,3),1]
json_str=json.dumps(str,ensure_ascii=False)
print(json_str)
with open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/qiye.txt','w') as fp:
json.dump(str,fp=fp,ensure_ascii=False)
#解码
new_str=json.loads(json_str)
print(new_str)
with open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/qiye.txt','r') as fp:
print(json.load(fp))
#标题&章节&链接==>JSON存储
import json
from bs4 import BeautifulSoup
import requests
user_agent=user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331'
headers={'User-Agent':user_agent}
r=requests.get('http://seputu.com/',headers=headers)
soup=BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')
content=[]
for mulu in soup.find_all('div','mulu'):
h2=mulu.find('h2')
if h2!=None:
h2_title=h2.string
List=[]
for a in mulu.find('div','box').find_all('a'):
href=a.get('href')
box_title=a.get('title')
List.append({'href':href,'box_title':box_title})
content.append({'title':h2_title,'content':List})
content_str=json.dumps(content,ensure_ascii=False)
with open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/qiye.json','w') as fp:
json.dump(content_str, fp,indent=4)
CSV
写入
#数据元组
import csv
headers=['ID','UserName','Password','Age','Country']
rows=[
(1001,'qiye','qiye_pass',24,'China'),
(1002,'Mary','Mary_pass',20,'USA'),
(1003,'Jack','Jack_pass',20,'USA'),
]
with open('qiye.csv','w') as f:
f_csv=csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
#字典数据
import csv
headers=['ID','UserName','Password','Age','Country']
rows=[
{'ID':1001,'UserName':'qiye','Password':'qiye_pass','Age':24,'Country':'China'},
{'ID':1002,'UserName':'Mary','Password':'Mary_pass','Age':20,'Country':'USA'},
{'ID':1003,'UserName':'Jack','Password':'Jack_pass','Age':20,'Country':'USA'},
]
with open('qiye.csv','w') as f:
f_csv=csv.DictWriter(f,headers)
f_csv.writeheader()
f_csv.writerows(rows)
读取
#列表访问
import csv
with open('qiye.csv') as f:
f_csv=csv.reader(f)
headers=next(f_csv)
print(headers)
for row in f_csv:
print(row)
#元组访问
from collections import namedtuple
import csv
with open('qiye.csv') as f:
f_csv=csv.reader(f)
headings=next(f_csv)
Row=namedtuple('Row',headings)
for r in f_csv:
row=Row(*r)
print(row.UserName,row.Password)
print(row)
#字典访问
import csv
with open('qiye.csv') as f:
f_csv=csv.DictReader(f)
for row in f_csv:
print(row.get('UserName'),row.get('Password'))
from lxml import etree
import requests
import re
user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331'
headers={'User-Agent':user_agent}
r=requests.get('http://seputu.com/',headers=headers)
html=etree.HTML(r.content)
div_mulus=html.xpath('/html/body/div/div/div')
rows=[]
for div_mulu in div_mulus:
div_h2=div_mulu.xpath('./div[1]/center/h2/text()')
if len(div_h2)>0:
h2_title=div_h2[0]
a_s=div_mulu.xpath('./div/ul/li/a')
for a in a_s:
href=a.xpath('./@href')
box_title=a.xpath('./@title')[0]
pattern=re.compile(r'\s*\[(.*)\]\s+(.*)')
match=pattern.search(box_title)
if match!=None:
date=match.group(1).encode('utf-8')
real_title=match.group(2)
content=(h2_title,real_title,href,date)
print(content)
rows.append(content)
headers=['title','real_title','href','date']
with open('/home/as/文档/爬虫/开发与项目实战/基础篇/save/qiye.csv','w') as f:
f_csv=csv.writer(f,)
f_csv.writerow(headers)
f_csv.writerows(rows)
多媒体文件抽取
import urllib
from lxml import etree
import requests
def Schedule(blocknum,blocksize,totalsize):
"""
blocknum:已下载的数据块
blocksize:数据块大小
totalsize:远程文件大小
"""
per=100.0*blocknum*blocksize/totalsize
if per>100:
per=100
print('当前下载进度:%.2f%%'%per)
user_agent='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331'
headers={'User-Agent':user_agent}
r=requests.get('http://www.ivsky.com/tupian/ziranfengguang/',headers=headers)
html=etree.HTML(r.text)
img_urls=html.xpath('/html/body/div[3]/div[2]/ul/li/div/a/img/@src')
i=0
for img_url in img_urls:
urllib.request.urlretrieve(img_url,'/home/as/文档/爬虫/开发与项目实战/基础篇/save/img%s.jpg'%i,Schedule)
i+=1
Email提醒
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr,formataddr
import smtplib
def _format_addr(s):
name,addr=parseaddr(s)
return formataddr((Header(name,'utf-8').encode(),addr))
from_addr='15xxxxxxxxxxxx@163.com'
password='3xxxxxxxxxxxx'
to_addr='13xxxxxxxxxxxxx@163.com'
smtp_server='smtp.163.com'
msg=MIMEText('Python爬虫运行异常,异常信息为:HTTP 403','plain','utf-8')
msg['From']=_format_addr('一号爬虫<%s>'%from_addr)
msg['To']=_format_addr('管理员<%s>'%to_addr)
msg['Subject']=Header('一号爬虫状态','utf-8').encode()
server=smtplib.SMTP(smtp_server,25)
server.login(from_addr,password)
server.sendmail(from_addr,[to_addr],msg.as_string())
server.quit()
更多爬虫实例请见 https://blog.csdn.net/weixin_39777626/article/details/81564819