原文链接: 简单爬虫 爬知乎日报
上一篇: 爬虫下载文章 BeautifulSoup
下一篇: Python 学生管理
# coding=utf-8
import urllib
import urllib2
import urlparse
import re
import bs4
import requests
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
url = 'https://daily.zhihu.com/'
# 拿到网页源码
def getHtml(url):
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
}
request = urllib2.Request(url, headers=header)
response = urllib2.urlopen(request)
text = response.read()
return text
# 拿到超链接
def getUrls(html):
# 正则表达式,提高效率re.s匹配换行符
pattern = re.compile('<a href="/story/(.*?)"', re.S)
items = re.findall(pattern, html)
urls = []
for i in items:
urls.append('https://daily.zhihu.com/story/' + i)
# print urls
return urls
# 解析标题和内容
def getContent(url):
html = getHtml(url)
# <title>在一件事情上付出越多,你对它就越喜欢,真怪</title>
pattern = re.compile('<title>(.*?)</title>', re.S)
titles = re.findall(pattern, html)
# 中文在可迭代对象中是Unicode编码,
# print titles[0]
pattern = re.compile('<div class="content">(.*?)<div class="view-more">', re.S);
content = re.findall(pattern, html)[0]
return '<h1>'+titles[0]+'/h1' + '\n' + content
#去除不需要的标签
def clear(html):
pattern= re.compile('<p>(.*?)</p>|<li>(.*?)</li> ')
items = re.findall(pattern,html)
result = []
html = getHtml(url)
urls = getUrls(html)
with open('out.html', 'w+') as f:
f.write(getContent(urls[0]))