1 import requests#请求库
2 import re#表达式解析库
3 importcsv4 def html_save(s):#爬取内容保存函数
5 with open('save3.csv','a', newline='')as f:#以追加的方式存数据newline控制文本模式之下,一行的结束字符
6 writer = csv.writer(f)#将数据写入csv文件
7 writer.writerow(s)8
9 def get_url(n):#保存网址
10 urls=[]11 for i in range(1,101):#测试得出网址范围
12 urls.append('http://www.nymbler.com/nymbler/more/%s'%i)13 returnurls14 pass
15
16 def get_detail(url):#对网页内容进行解析获取
17 headers = {'Cookie':"heroku-session-affinity=AECDaANoA24IAaj0sYj+//8HYgAH2hNiAAsB42EDbAAAAANtAAAABXdlYi4zbQAAAAV3ZWIuMm0AAAAFd2ViLjFqTiF9lGfQyz4HBcluZEIivsLibgo_; PLAY_SESSION=e625836109d6e09af14be41657c35e808ca31e72-session_id=240bcff7-ebb5-49ee-8fa4-ffcc5ba32e48; _ga=GA1.2.408125030.1575511582; _gid=GA1.2.1377013858.1575511582; td_cookie=18446744071831041204; _gat_gtag_UA_1763772_1=1"}#反爬虫请求头
18 response = requests.post(url)#解析网页
19 docx=(response.text)#得到解析文本
20 name=re.findall(r'"name":"([^"]+)"',docx)#正则匹配name的value
21 gender=re.findall(r'"gender":"([^"]+)"',docx) #正则匹配gender的value
22 info=re.findall(r'"info":"([^"]+)"',docx)23 meaning=re.findall(r'"meaning":"([^"]+)"',docx)24 for i in range(len(meaning)):#将获取的信息进行有序处理
25 tmp=[]26 tmp.append(name[i])27 tmp.append(gender[i])28 tmp.append(meaning[i])29 tmp.append(info[i])30 html_save(tmp)#对信息进行保存
31 returntmp32 pass
33
34 def get_all(n):#获取所有网页的信息
35 alldata=[]36 for url inget_url(n):37 alldata.extend(get_detail(url))#将get_url(n)内的所有网页一一进行解析保存
38 returnalldata39 pass
40
41 get_all(100)#函数调用