爬取网站图片1.0版

爬取网站图片1.0版
2023-5-15
1.对https://sc.chinaz.com/tupian/shuaigetupian.html进行了图片的爬取
2.并下载到了本地的D:\spider_image的文件夹中

import random
from selenium import webdriver
import time
from lxml import etree
import requests

user_agent = [
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
    "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
    "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
    "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
    "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
    "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
    "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
    "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
    "UCWEB7.0.2.37/28/999",
    "NOKIA5700/ UCWEB7.0.2.37/28/999",
    "Openwave/ UCWEB7.0.2.37/28/999",
    "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
    # iPhone 6:
    "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
]
headers = {'User-Agent': random.choice(user_agent)}
url = 'https://sc.chinaz.com/tupian/shuaigetupian.html'


if __name__ == '__main__':
    # tree=etree.parse(url)

    # r=tree.xpath('/html/div')
    # print(r)

    # print(tree)

    #因为网页中的图片是动态加载的,所以这里我们使用selenium动态加载
    #浏览器驱动绝对路径
    driver = webdriver.Chrome()
    #跳转网页
    driver.get(url)
    #睡眠2秒
    time.sleep(2)

    driver.maximize_window()


    #睡眠2秒
    time.sleep(2)

    #点击20w以上
    # driver.find_element_by_xpath('/html/body/div/div/div/div[1]/div[3]/div[1]/div[2]/div[3]/div[2]/div/div/span[9]').click()
    #睡眠2秒
    # time.sleep(2)


    # 滚动条下拉
    # js = "var q=document.documentElement.scrollTop=5000"
    # # for i in range(5):
    # #     driver.execute_script(js)
    # #     time.sleep(5)
    # driver.execute_script(js)
    time.sleep(5)
    height=0
    for i in range(10):
        driver.execute_script("window.scrollTo({},{});".format(height,height+500))
        height = height + 500
        time.sleep(1)

    # driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
    time.sleep(5)


    source_result = driver.page_source
    driver.quit()

    # result=driver.find_element_by_xpath('/html/body/div[3]/div[2]/div/img/@src').text()

    html=etree.HTML(source_result)

    # print(result1.text)

    # print(result)
    # print(result1)
    img_urls=html.xpath('/html/body/div[3]/div[2]/div/img/@src')
    print(img_urls)

    url_list=[]

    for i in range(len(img_urls)):
        full_url="http:"+img_urls[i]
        url_list.append(full_url)
        print(full_url+'\n')



    # print(driver.page_source)

dir="D://spider_image"


for ur in url_list:
    # time.sleep(1)
    file_name = ur.split('/')[-1]
    path=dir + '/' + file_name
    # print(path+'\n')
    f = requests.get(ur, headers=headers)
    with open(path,'wb') as h:
        h.write(f.content)

爬取思路:
1.首先,进入对应网址后,会有很多图片,我们要下载图片,需要获取到对应的下载链接。
2.这里我们采取的是用xpath定位。但是在定位的过程中,发现有些图片,需要加载,不然对应的图片的下载链接,就会如图1一样。所以这里我们采用selenium,动态的获取对应的源码,最终通过拼接,如图2所示。
图1:
在这里插入图片描述

图2:
在这里插入图片描述

3.得到对应的图片的下载链接后,就很容易了,进行相关文件操作,将图片下载到本地文件夹中,最终效果如图3所示。

图3:
在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值