1.代码
import requests
import os
from re import findall,DOTALL,search
from bs4 import BeautifulSoup
from urllib import parse
#1.通过关键字获取百度前5页的url
# 参数:keyword,返回url列表
#2.爬取每个url获取该url页面需求后缀的href
# 参数:url,extension_word 返回该页面中所需后缀的url列表
#3.分析每个url是否可以访问
#4.以每行每个的格式写入txt
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
}
num = 0
#通过百度搜索栏的base_url获取搜索出的url
def parse_baidu_url(url):
global headers,num
url_list = []
response = requests.get(url=url,headers=headers)
response = response.content.decode("utf-8")
soup = BeautifulSoup(response,"lxml")
h3_labels = soup.find_all("h3",attrs={"class":"t"})
for h3_label in h3_labels:
a_labels = h3_l