python第一步
导入requests 包和爬虫数据处理包BeautifulSoup
from bs4 import BeautifulSoup
import requests
接下来是需要访问的地址
url = 'https://duanziwang.com/'
#防反爬配置
header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
代码实现
“”"
Created on Thu Dec 10 20:17:10 2020
“”"
from bs4 import BeautifulSoup
import requests
import copy
import xlsxwriter
url = ‘https://duanziwang.com/’
header = {“User-Agent”: “Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0”}
response = requests.get(url = url, headers = header)
response.encoding = ‘utf-8’
data= response.text
workbook = xlsxwriter.Workbook(r’D:\study\first1.xlsx’)
worksheet = workbook.add_worksheet()
print(worksheet)
soup = BeautifulSoup(data,‘lxml’)
lis = soup.find_all(name =‘div’,attrs= {‘class’:‘post-content’})
i = 0
for li in lis:
i+=1
d = copy.copy(i)
a = ‘A’
s =a+str(d)
temp = li.find(name = ‘code’)
worksheet.write(s,temp.text)
workbook.close()