python- 数据接口和selenium
1.数据接口获取网页数据
response = requests.get('https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js')
res = response.json()
# 练习:接口,皮肤名称,图片地址并用皮肤名称命名
def download_image(url: str, name:str, name1:str):
response = requests.get(url)
data = response.content
if not os.path.exists(f'all_hero/{name1}'):
os.mkdir(f'all_hero/{name1}')
with open(f'all_hero/{name1}/{name}.jpg', 'wb') as f:
if __name__ == '__main__':
response1 = requests.get('https://game.gtimg.cn/images/lol/act/img/js/hero/1.js')
res1 = response1.json()
for x in res1['skins']:
name = x['name']
img = x['mainImg']
if not img:
img = x['chromaImg']
download_image(img, name)
# 下载多个英雄的皮肤
if not os.path.exists('all_hero'):
os.mkdir('all_hero')
for x in range(103, 104):
response = requests.get(f'https://game.gtimg.cn/images/lol/act/img/js/hero/{x}.js')
if not response:
continue
res = response.json()
name1 = res['hero']['name']
for i in res['skins']:
name = i['name'].replace('/', '')
img = i['mainImg']
if not img:
continue
download_image(img, name, name1)
2.selenium
1.创建浏览器对象(若是全局变量,操作结束后不会自动关闭)
b = Chrome()
2.打开网页(你需要爬的数据在哪个网页里,就打开之)
b.get('https://movie.douban.com/top250')
3.获取网页源代码(获取到的一定是页面中加载出来的)
print(b.page_source)
4.关闭浏览器
b.close()
3.selenium+解析
# 1.输入框输入内容
# 找到输入框
b = Chrome()
b.get('https://www.jd.com')
# 2.点击按钮
# 找到输入框
input_tag = b.find_element_by_id('key')
# 在输入框中输入内容
input_tag.send_keys('电脑\n')
sleep(2)
print(b.page_source)
# 3.切换选项卡
# 1.找到需要点击的标签
btn = b.find_element_by_css_selector('#navitems-group2 .b')
# 2.点击标签
btn.click()
b = Chrome()
b.get('https://www.jd.com')
input_tag = b.find_element_by_id('key') # id值是key 搜索框
input_tag.send_keys('裤子\n')
sleep(2)
all_1 = []
soup = BeautifulSoup(b.page_source, 'lxml') # b.page_source就是当前页的对象
all_price = soup.select('#J_goodsList>ul>li>div.gl-i-wrap') # 找大的框架,后面遍历
for x in all_price:
name = x.select_one('.p-name em').text
price = x.select_one('.p-price i').text
all_1.append([name, price])
btn = b.find_element_by_css_selector('.pn-next')
btn.click()
sleep(2) # 切换界面最好休眠
# 解析第二页数据
soup = BeautifulSoup(b.page_source, 'lxml')
all_goods_div = soup.select('#J_goodsList>ul>li>div.gl-i-wrap')
for x in all_goods_div:
name = x.select_one('.p-name em').text
price = x.select_one('.p-price i').text
all_1.append([name, price])
writer = csv.writer(open('files/毛线.csv', 'w', encoding='utf-8', newline=''))
writer.writerows(all_1)
input('结束:')
b.close()
4.切换选项卡
b = Chrome() # 创建浏览器
b.get('https://www.cnki.net/') # 打开中国知网
search_tag = b.find_element_by_id('txt_SearchText') # 获取输入框
search_tag.send_keys('数据分析\n') # 输入框输入'数据分析',然后按回车
sleep(1) # 切换界面最后做一个等待操作
获取需要点击的所有标签: 如果拿到标签后需要点击或者输入,必须通过浏览器获取标签
all_result = b.find_elements_by_css_selector('.result-table-list .name>a')
# 点击第一个结果(这儿会打开一个新的选项卡)
all_result[0].click()
sleep(1)
- 切换选项卡
注意:selenium中,浏览器对象(b)默认指向一开始打开的选项卡,除非用代码切换,否则浏览器对象指向的选项卡不会变
1)获取当前浏览器上所有的窗口(选项卡): 浏览器.window_handles
2)切换选项卡
b.switch_to.window(b.window_handles[-1])
# 3)解析内容
soup = BeautifulSoup(b.page_source, 'lxml')
result = soup.select_one('#ChDivSummary').text
print(result)
b.close() # 关闭当前指向的窗口(最后一个窗口),窗口关闭后,浏览器对象的指向不会发生改变
# 回到第一个窗口,点击下一个搜索结果
b.switch_to.window(b.window_handles[0])
all_result[1].click()
sleep(1)
b.switch_to.window(b.window_handles[-1])
soup = BeautifulSoup(b.page_source, 'lxml')
result = soup.select_one('#ChDivSummary').text
print(result)
b.close()
input('结束:')
b.close()