内容来源:雨敲窗python教程
爬取糗事百科的段子
#_*_ coding: utf-8 _*_
import re
import requests
import html
import time
def crawl_joke_list(page = 1):
url = "https://www.qiushibaike.com/8hr/page/" + str(page)
res = requests.get(url)
#获取每个段子的正则
pattern = re.compile("<div class=\"article block untagged mb15.*?<div class=\"content\">.*?</div>", re.S)
body = html.unescape(res.text).replace("<br/>", "\n")
m = pattern.findall(body)
#抽取用户名的正则
user_pattern = re.compile("<div class=\"author clearfix\">.*?<h2>(.*?)</h2>", re.S)
#抽取段子内容的正则
content_pattern = re.compile("<div class=\"content\">.*?<span>(.*?)</span>.*?</div>", re.S)
f = open("c:/Users/47864/Desktop/1.txt", "a", encoding='utf-8')
for joke in m:
user = user_pattern.findall(joke)
output = []
if len(user) > 0:
output.append(user[0])
content = content_pattern.findall(joke)
if len(content) > 0:
output.append(content[0].replace("\n", ""))
print("\t".join(output))
f.writelines("\t".join(output))
time.sleep(2)
f.close()
if __name__ == '__main__':
for i in range(1, 10):
crawl_joke_list(i)
爬取糗事百科图片
#_*_coding: utf-8_*_
import requests
import re
def crawl_img(img_url, img_local_path):
r = requests.get(img_url, stream=True)
with open(img_local_path, "wb") as f:
f.write(r.content)
def crawl(page):
img_path = "c:/Users/47864/Desktop/img/"
url = "http://www.qiushibaike.com/imgrank/page/" + str(page)
res = requests.get(url)
content_list = re.findall("<div class=\"thumb\">(.*?)</div>", res.content.decode('utf-8'), re.S)
for content in content_list:
img_list = re.findall("<img src=\"(.*?)\"", content)
for img_url in img_list:
crawl_img("https:" + img_url, img_path + img_url.strip().split('/')[-1])
if __name__ == '__main__':
crawl(1)
需要注意的是爬取图片的时候糗事百科网页源代码中图片的url是没有协议的,需要我们自己补上去,不然会报错requests.exceptions.MissingSchema: Invalid URL