获取全站URL、关键字获取URL、获取图片

#coding:utf-8
#百度搜索爬虫,给定关键词和页数以及存储到哪个文件中,返回结果去重复后的url集  合
# 网页url采集爬虫,给定网址,以及存储文件,将该网页内全部网址采集下,可指定文件存储方式
import requests,time
from lxml import etree
"""
    url:给定的url
    save_file_name:为url存储文件
"""
def Redirect(url):
    try:
        res = requests.get(url,timeout=10)
        url = res.url
    except Exception as e:
        print("4",e)
        time.sleep(1)
    return url

def requests_for_url(url, save_file_name, file_model):
    headers = {
        'pragma': "no-cache",
        'accept-encoding': "gzip, deflate, br",
        'accept-language': "zh-CN,zh;q=0.8",
        'upgrade-insecure-requests': "1",
        'user-agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
        'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        'cache-control': "no-cache",
        'connection': "keep-alive",
        }
    try:
        response = requests.request("GET", url, headers=headers)
        selector = etree.HTML(response.text, parser=etree.HTMLParser(encoding='utf-8'))
    except Exception as e:
        print ("页面加载失败", e)

    return_set = set()
    with open(save_file_name,file_model) as f:
        try:
            context = selector.xpath('//a/@href')
            for i in context:
                try:
                    if i[0] == "j":
                        continue
                    if i[0] == "/":
                        print i
                        i = url+i.replace("/","");
                    f.write(i)
                    f.write("\n")
                    return_set.add(i)
                    print(len(context),context[0],i)
                except Exception as e:
                    print("1",e)
        except Exception as e:
            print("2",e)
    return return_set

if __name__ == '__main__':
     # 网页url采集爬虫,给定网址,以及存储文件,将该网页内全部网址采集下,可指定文件存储方式
     url = "http://news.baidu.com/"
     save_file_name = "save_url_2.txt"
     return_set = requests_for_url(url,save_file_name,"a") #“a”:追加
     print(len(return_set))

给定网址采集全URL


#-*-coding:utf-8 -*-
#网页url采集爬虫,给定网址,以及存储文件,将该网页内全部网址采集下,可指定文件存储方式
import requests,time
from lxml import etree

def Redirect(url):
    try:
        res = requests.get(url,timeout=10)
        url = res.url
    except Exception as e:
        print("4",e)
        time.sleep(1)
    return url

def baidu_search(wd,pn_max,save_file_name):

    #百度搜索爬虫,给定关键词和页数以及存储到哪个文件中,返回结果去重复后的url集合
    url = "https://www.baidu.com/s"
    return_set = set()
    for page in range(pn_max):
        pn = page*10
        querystring = {"wd":wd,"pn":pn}
        headers = {
            'pragma': "no-cache",
            'accept-encoding': "gzip, deflate, br",
            'accept-language': "zh-CN,zh;q=0.8",
            'upgrade-insecure-requests': "1",
            'user-agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
            'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            'cache-control': "no-cache",
            'connection': "keep-alive",
            }
        try:
            response = requests.request("GET", url, headers=headers, params=querystring)
            print("!!!!!!!!!!!!",response.url)
            #解析html
            selector = etree.HTML(response.text, parser=etree.HTMLParser(encoding='utf-8'))
        except Exception as e:
            print ("页面加载失败", e)
            continue

        with open(save_file_name,"a") as f:
            for i in range(1,10):
                try:
                    #根据属性href筛选标签
                    context = selector.xpath('//*[@id="'+str(pn+i)+'"]/h3/a[1]/@href')
                    print(len(context),context[0])
                    #跳转到获取的url,若可跳转则返回url
                    i = Redirect(context[0])
                    f.write(i)
                    return_set.add(i)
                    f.write("\n")
                except Exception as e:
                    print(i,return_set)
                    print("3",e)
    return return_set

if __name__ == '__main__':

    wd = "阿里巴巴 双十一"
    pn = 3
    save_file_name = "save_url.txt"
    return_set = baidu_search(wd,pn,save_file_name)

给定关键字&页数获取URL

# -*- coding:utf-8 -*-
import requests,json
url = "https://image.baidu.com/search/acjson"


"""
获取图片的所有urls,存入set集合中去重,存在urls.txt中
"""
def get_urls(key,sum):
    #请求头
    url = "https://image.baidu.com/search/index"
    headers = {
        'cookie': "td_cookie=2373937907; BDqhfp=%E6%B5%B7%E8%BE%B9%26%260-10-1undefined%26%260%26%261; BAIDUID=E26F8B2E16E037DF58FED1FDEAD8A636:FG=1; BIDUPSID=E26F8B2E16E037DF58FED1FDEAD8A636; PSTM=1506000312; pgv_pvi=229598208; td_cookie=2463294905; BDRCVFR[X_XKQks0S63]=mk3SLVN4HKm; BDRCVFR[-pGxjrCMryR]=mk3SLVN4HKm; firstShowTip=1; indexPageSugList=%5B%22%E6%B5%B7%E8%BE%B9%22%2C%22%E6%B5%B7%E5%B2%B8%22%5D; cleanHistoryStatus=0; BDRCVFR[dG2JNJb_ajR]=mk3SLVN4HKm; userFrom=null",
        'accept-encoding': "gzip, deflate, br",
        'accept-language': "zh-CN,zh;q=0.8",
        'user-agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
        'accept': "text/plain, */*; q=0.01",
        'referer': "https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=index&fr=&hs=0&xthttps=111111&sf=1&fmq=&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E6%B5%B7%E8%BE%B9&oq=%E6%B5%B7%E8%BE%B9&rsp=-1",
        'x-requested-with': "XMLHttpRequest",
        'connection': "keep-alive",
        'cache-control': "no-cache",
        'postman-token': "c1717c49-7d6f-b452-0005-026e525e7b43"
        }
    #pn控制页数
    pn = 0
    n = 0
    pages = 1
    flag = True
    s = set()

    #循环获取url
    for pn in range(0,21):
        print "pn=" + str(pn*30)
        url = "https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=" + key + "&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word=" + key + "&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn=" + str(pn*30) + "&rn=30"
        #url = "http://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&fp=result&queryWord={key}&cl=2&lm=-1&ie=utf-8&oe=utf-8&st=-1&ic=0&word={key}&face=0&istype=2nc=1&pn={pn}&rn=60"
        #url = "https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord="+key+"&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word=%E6%B5%B7%E8%BE%B9&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn="+str(pn*30)+"&rn=30"
        #print url
        r = requests.get(url,headers=headers).text.encode("utf-8")
        try:
            dictinfo = json.loads(r)
            #每页30张
            for i in range(0,30):
                #print i
                if n == sum:
                    flag = False
                else:
                    temp = dictinfo["data"][i]["thumbURL"]
                    n = n + 1
                    s.add(str(temp)+"\n")
        except:
            print "请求发送失败重试"

    print len(s)
    f = open("urls.txt","w")
    for url in s:
        f.write(url)
    f.close()
    print "get_urls 完成"


#将图片写在pic文件夹
def write_pics(SavePath,sum):

    f = open("./urls.txt","r")
    m = 1

    for url in f:
        Path = SavePath + str(m) +".jpg"
        f1 = open(Path,"wb")
        r = requests.get(url)
        f1.write(r.content)
        #print m
        m = m + 1

    f1.close()
    print "write_pics 完成"

def get_class(name):
    name = name.split("|")
    print len(name)

    for index in range(0,len(name)):
        print "mingzi=" + name[index]
        SavePath = "./snow/"+ str(name[index]) + "_"
        print SavePath
        get_urls(name[index], sum)
        write_pics(SavePath,sum)
        print "a"


if __name__=="__main__":
    sea = "海边|海岸|沿海"
    tree = "山林|树林|森林"
    road = "公路|道路|小路|马路"
    snow ="雪景|雪地|冬天景色"
    town ="小镇|小镇风光|小镇建筑风景"
    sum = 600
    get_class(snow)

多关键字图片搜索


  以上转载自xunalove的博客 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值