【声明:仅供新手学习使用,请勿用作非法用途,如有非法情况发生,与本文无关。】
import urllib.request
import re
import urllib.parse
from bs4 import BeautifulSoup
def build_onepage_crawl_function(keyword, number_of_page):
#构建请求头
user_agent_header = ("User-Agent","替换成自己的请求头")
opener_build = urllib.request.build_opener()
opener_build.addheaders = [user_agent_header]
#对搜索关键词进行编码
encoded_keyword = urllib.parse.quote(keyword)
# 通过for循环爬去每一页
for i in range(int(number_of_page)):
url = "https://www.*******.com"
number_of_page = i * 10 # 构造 &pn=
print("搜索页面链接:",number_of_page)
url = url + "/s?wd=" + encoded_keyword + "&pn=" + str(number_of_page) # 完整的url包括关键词说页数,这里keyword本身就是str
print(url)
#构造get请求中的url
original_html = opener_build.open(url, timeout = 2).read().decode("utf-8","ignore")
soup = BeautifulSoup(original_html ,'html.parser')
for title in soup.select('.tts-title a'):
print(title.text)
#输入关键词以及需要爬取的页数
keyword_input = input("请输入关键词:")
number_of_page_input = input("请输入页数:")
build_onepage_crawl_function(keyword_input, number_of_page_input)