python ASF API下载Sentine-1数据

说明:采用ASF API查询哨兵一号数据,后续仿照ASF网页中批量下载数据的脚本构建下载链接和下载数据,下载最好开VPN,网速慢和网络中断会导致下载未完成就结束或者压缩包无法解压,建议下载完成后自行利用7z测试压缩包。

import os,sys
from selenium import webdriver
import time
import base64
import pandas as pd
import geopandas as gpd
from urllib.request import build_opener, Request
from urllib.request import HTTPHandler, HTTPSHandler, HTTPCookieProcessor
from http.cookiejar import CookieJar


def search_ASF_API(start_time, end_time, search_file):
    # ASF API Search
    asf_baseurl = 'https://api.daac.asf.alaska.edu/services/search/param?'
    platform = "platform=Sentinel-1A,Sentinel-1B&processingLevel=SLC&beamMode=IW"
    st = "start=" + start_time + "T00:00:00UTC"
    ed = "end=" + end_time + "T23:59:59UTC"
    wrs = gpd.GeoDataFrame.from_file(search_file)
    bounds = (wrs.bounds.values)[0]
    bbox = "bbox=" + str(bounds[0]) + "," + str(bounds[1]) + "," + str(bounds[2]) + "," + str(bounds[3])
    # 构建查询参数
    arg_list = [platform, st, ed, bbox, "output=csv"]
    arg_str = '&'.join('%s' % item for item in arg_list)
    url = asf_baseurl + arg_str
    FileDir = 'E:\\**\\' + str(time.time()).replace('.', '')
    os.mkdir(FileDir)
    # 配置selenium的参数
    options = webdriver.ChromeOptions()
    prefs = {'profile.default_content_settings.popups': 0, 'download.default_directory': FileDir}
    options.add_experimental_option('prefs', prefs)
    chromedriver = r"C:\***\Chrome\Application\chromedriver.exe"
    driver = webdriver.Chrome(executable_path=chromedriver, options=options)
    # 自动打开Search页
    driver.get(url)
    # 下载查询结果CSV需要一定时间
    time.sleep(50)
    driver.quit()

    # 获取下载的csv文件的文件名
    csvfilename = os.listdir(FileDir)[0]
    # 构造文件路径
    csvfilepath = os.path.join(FileDir, csvfilename)
    # 获取下载链接
    if os.path.isfile(csvfilepath):
        csvvalue = (pd.read_csv(csvfilepath)).values
        download_list = [cv[25] for cv in csvvalue]
        return download_list

def download_Sentinel1_from_ASF(download_files):
    file_count = 0
    for url in download_files:
        file_count += 1
        filename = url.split("/")[-1]
        outdir = 'E:/Temp/' + (filename.split("_")[5])[0:4]
        if not os.path.isdir(outdir):
            os.mkdir(outdir)
        path = outdir+'/'+filename
        if not os.path.exists(path):
            print("({0}/{1}) Downloading {2}".format(file_count, len(download_files), url.split("/")[-1]))
            with open(path, 'w+b') as out:
                geturl(url, out)

def geturl(url,out):
    auth_cookie_url = 'https://urs.earthdata.nasa.gov/oauth/authorize?client_id=BO_n7nTIlMljdvU6kRRB3g&redirect_uri=https://auth.asf.alaska.edu/login&response_type=code&state='
    username = "username"#Eatrhdata账号密码
    password = "password"
    context = {}
    user_pass = base64.b64encode(bytes(username + ":" + password, "utf-8"))
    user_pass = user_pass.decode("utf-8")
    cookie_jar = CookieJar()#MozillaCookieJar()
    # 重定向
    opener = build_opener(HTTPCookieProcessor(cookie_jar), HTTPHandler(), HTTPSHandler(**context))
    # 登录earthdata
    request = Request(auth_cookie_url, headers={"Authorization": "Basic {0}".format(user_pass)})
    # 获取响应
    response = opener.open(request)
    # 请求下载地址
    download_request = Request(url)
    download_request.get_method = lambda: 'HEAD'
    download_response = opener.open(download_request)
    start = time.time()
    # 将链接中的下载文件写入本地文件 并返回文件写入进度
    chunk_read(download_response, out, report_hook=chunk_report)
    elapsed = max(time.time() - start, 1.0)
    # 平均下载速度
    size = get_total_size(download_response)
    rate = (size / 1024 ** 2) / elapsed
    print("Downloaded {0}b in {1:.2f}secs, Average Rate: {2:.2f}MB/sec".format(size, elapsed,rate))

def chunk_read(response, local_file, chunk_size=10240, report_hook=None):
    # 文件大小
   file_size = get_total_size(response)
   bytes_so_far = 0
    # 文件写入本地
   while 1:
      try:
          # 从地址中读取固定大小文件对象
         chunk = response.read(chunk_size)
      except:
         sys.stdout.write("\n > There was an error reading data. \n")
         break

      try:
          # 将读取出的文件对象写入本地文件
         local_file.write(chunk)
      except TypeError:
         local_file.write(chunk.decode(local_file.encoding))
      # 写入完成即更新已下载文件大小
      bytes_so_far += len(chunk)

      if not chunk:
         break
      if report_hook:
         # 获取下载进度
         report_hook(bytes_so_far, file_size)

   return bytes_so_far

def chunk_report( bytes_so_far, file_size):
   if file_size is not None:
       # 计算下载进度百分比
       percent = float(bytes_so_far) / file_size
       percent = round(percent*100, 2)
       sys.stdout.write(" > Downloaded %d of %d bytes (%0.2f%%)\r" %
           (bytes_so_far, file_size, percent))
   else:
       # We couldn't figure out the size.
       sys.stdout.write(" > Downloaded %d of unknown Size\r" % (bytes_so_far))

# 从网站响应获取文件大小
def get_total_size(response):
   try:
      file_size = response.info().getheader('Content-Length').strip()
   except AttributeError:
      try:
         file_size = response.getheader('Content-Length').strip()
      except AttributeError:
         print ("> Problem getting size")
         return None
   return int(file_size)



if __name__ == "__main__":

    start_time = sys.argv[1]#"2021-04-21"#
    end_time = sys.argv[2]#"2021-05-01"#
    search_file = sys.argv[3]#r'E:\***\北京市.shp'#
    download_files = search_ASF_API(start_time, end_time, search_file)
    download_Sentinel1_from_ASF(download_files)
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值