# encoding: utf-8
import urllib.request
import urllib.parse
import time
import random
def load_page(url):
"""
通过url来获取网页内容jfa
:param url: 待获取的页面
:return: url对应的网页内容
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read()
return content.decode("utf-8")
def write_page(html, filename):
"""
:param html: 要保持的页面内容
:param filename: 要保持页面内容的文件名
:return:
"""
print("正在保存文件:" + filename)
with open(filename, "w", encoding="utf-8") as f:
f.write(html)
print("保持文件完毕:" + filename)
def tieba_spider(keyword, start, end):
"""
爬取百度贴吧内容
:param keyword: 指定要爬取的贴吧
:param start: 开始的页面
:param end: 终止的页面
:return:
"""
search = {'kw': keyword}
kw = urllib.parse.urlencode(search)
# kw = urllib.parse.quote(keyword)
url = "http://tieba.baidu.com/f?"
url = url + kw + "&ie=utf-8"
urls = []
for val in range(start-1, end):
temp = url + "&pn=" + str(val * 50)
urls.append(temp)
# print(urls)
page = 1
for url in urls:
html = load_page(url)
write_page(html, keyword + str(page) + ".html")
time.sleep(random.randrange(3, 15))
page = page + 1
if __name__ == "__main__":
# content = load_page(url)
# write_page(content, "baidutieba.html")
keyword = input("请输入要爬取的贴吧")
tieba_spider(keyword, 1, 5)