# -*- coding:utf-8 -*-
import os
import time
import requests
import urllib
import uuid
from pyquery import PyQuery
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/75.0.3770.142 Safari/537.36"
}
def href_url_download():
# 1.填写要爬取关键词的list.txt
keyword_list = open("list.txt", 'r', encoding='utf-8')
lines = keyword_list.readlines()
keyword_list.close()
for keyword in lines:
keyword = keyword.strip()
print(keyword)
# 2.修改爬取的页数(1,101),默认爬取100页
for pages in range(1, 3):
page = str(pages)
url = "https://pixabay.com/zh/images/search/" + keyword + "/?pagi=" + page # 获取keyword网页
# print(url)
try:
txt = requests.get(url, headers=headers).text # 获取URL及headers
pixabay网站图片爬虫
最新推荐文章于 2024-05-01 14:20:05 发布