import requests
if __name__ == "__main__":
header = {
"User-Agent": ""
}
url = " http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword"
keyword = str(input("请输入你要查询的地址信息:"))
data = {
# 'op': 'keyword',
'cname': '',
'pid': '',
'keyword': keyword,
'pageIndex': '',
"pageSize": '10'
}
page = 0
statue = requests.post(url=url, headers=header, data=data)
list = []
while True:
page += 1
data['pageIndex'] = page
response = requests.post(url=url, headers=header, data=data)
date = response.json()
if len(date['Table1']) == 0:
page -= 1
break
for j in range(0, len(date['Table1'])):
list.append('店名:' + date['Table1'][j]['storeName'] + '\t' + '地址:' + date['Table1'][j]['addressDetail'])
with open("./"+keyword+".txt",'w',encoding='utf-8') as f:
for i in list:
f.write(i+'\n')
print("finsh!一共爬取"+str(page)+"页")
基于肯德基地区爬取餐厅(爬虫)
于 2023-09-05 11:55:41 首次发布