下载哨兵1精轨数据教程

更新2022-03-11

1 差别说明

request = urllib.request.Request(url,headers=headers)
response = urllib.request.urlopen(request)

2 准备工作

2.1 拿到cookie

1) 访问https://s1qc.asf.alaska.edu/aux_poeorb/ 并登录(EarthDATA账号密码)
https://s1qc.asf.alaska.edu/aux_poeorb/

在这里插入图片描述
2) 点击一个精轨文件拿到cookie
在这里插入图片描述

2.2 安装virtualenv和lxml

打开cmd–>pin install virtualenv -->pip install lxml

3 代码

需要修改地方

out_path = r'F:\精轨数据' #精轨保存文件夹地址
"Cookie": "****"

代码如下(下载全部精轨数据)

from urllib.parse import urlparse
import urllib.request
from bs4 import BeautifulSoup
import re
import os
import datetime
import requests
import time
import sys


def download(dest_dir, url):
    print(url)
    print(dest_dir)
    headers={
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
	"Accept-Encoding": "gzip, deflate, br",
	"Accept-Language": "zh-CN,zh;q=0.9",
	"Connection": "keep-alive",
	"Cookie": "_gid=GA1.2.1113423179.1646831231; _ga_XXXXXXXXXX=GS1.1.1646831244949.3sfnbwdp.1.0.1646831244.0; asf-urs=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmaXJzdF9uYW1lIjoiWkFITkciLCJsYXN0X25hbWUiOiJ0ZW5naGZlaSIsInVycy11c2VyLWlkIjoiemFobmd5ZW5nZmVpIiwidXJzLWFjY2Vzcy10b2tlbiI6ImV5SjBlWEFpT2lKS1YxUWlMQ0p2Y21sbmFXNGlPaUpGWVhKMGFHUmhkR0VnVEc5bmFXNGlMQ0poYkdjaU9pSklVekkxTmlKOS5leUowZVhCbElqb2lUMEYxZEdnaUxDSmpiR2xsYm5SZmFXUWlPaUpDVDE5dU4yNVVTV3hOYkdwa2RsVTJhMUpTUWpObklpd2laWGh3SWpveE5qUTVOREl6TlRVekxDSnBZWFFpT2pFMk5EWTRNekUxTlRNc0ltbHpjeUk2SWtWaGNuUm9aR0YwWVNCTWIyZHBiaUlzSW5WcFpDSTZJbnBoYUc1bmVXVnVaMlpsYVNKOS5CaEZ2RFhuMl9kQ1Iydk5Va0ExZHFFeHU4cjIzRnQ2cnZybV9ZcUNicG5JIiwidXJzLWdyb3VwcyI6W10sImlhdCI6MTY0NjgzMTU1MywiZXhwIjoxNjQ3NDM2MzUzfQ.Xnun9zN0fo9UYUVw2Ei4JtjSbp_p_zCfB-BV4b1oARMmFPB8qLsyLzl631mOMOLjKP13DR0YQr9sVU3KrXPdeyP2ClI5PagCtS8Vif1s892spRhVpGritLpWNo2gEBojp4DcmE4rLVlZfjAOgSI5fHqP2vN7wT7hIE6MIvIhaXezpxQlXCQSUkysx8BFzqOsDSVj78RIDdzTUZwi-CytUB31tdHYld4pUj-m5FSfUwo8K3UpB7vOXqHsfNqhU58c7pnkOdiT9ObpOeaQ-qNCReT_meFRN1UF_q1zIbVygcgTw2xsUdzrEYoVByVEoj8yRtegadsOuqFlcJJ6qlyj3Si1yF3MFqGuVMdEwNJz3KBK-4Kb5vvTP-FDrxp2a6Sfa1lvMkYxNiBa6xZz5cbz7vMaTNqD4QTjjReB5xIuj_2xQWXXW-fxEQX9YNiQyLYA7pVhjpa3W53Szse4Yq_vxdv8sjV5t-S1XRm0xBFXiUJ1b4LOJRvGNkeu9ppEJXjbDhXuEzAnpmuY-oa7i5yYWAdD54fp0q8-8T_10UZT0W5Y80Z-1_HpW2kKdVpgsO2HMD_GS9OggVKS5ri5GTPmA10c8G4YtvXDDfVW--6wBj8DvsKfAQvzJhyirWZnlP1Uj5r7DtAfcForPRlnn_pXW5SwmsHtTQc7-y82eRtG45g; urs-user-id=zahngyengfei; urs-access-token=eyJ0eXAiOiJKV1QiLCJvcmlnaW4iOiJFYXJ0aGRhdGEgTG9naW4iLCJhbGciOiJIUzI1NiJ9.eyJ0eXBlIjoiT0F1dGgiLCJjbGllbnRfaWQiOiJCT19uN25USWxNbGpkdlU2a1JSQjNnIiwiZXhwIjoxNjQ5NDIzNTUzLCJpYXQiOjE2NDY4MzE1NTMsImlzcyI6IkVhcnRoZGF0YSBMb2dpbiIsInVpZCI6InphaG5neWVuZ2ZlaSJ9.BhFvDXn2_dCR2vNUkA1dqExu8r23Ft6rvrm_YqCbpnI; _ga=GA1.2.581701792.1646831231",
	"Host": "s1qc.asf.alaska.edu",
	"Referer": "https://s1qc.asf.alaska.edu/aux_poeorb/?sentinel1__mission=S1A&validity_start=2015-02-19",
	"sec-ch-ua": "\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"96\", \"Google Chrome\";v=\"96\"",
	"sec-ch-ua-mobile": "?0",
	"sec-ch-ua-platform": "\"Windows \"",
	"Sec-Fetch-Dest": "document",
	"Sec-Fetch-Mode": "navigate",
	"Sec-Fetch-Site": "same-origin",
	"Sec-Fetch-User": "?1",
	"Upgrade-Insecure-Requests": "1",
	"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36",
     }    
    try:
        request = urllib.request.Request(url,headers=headers)
        response = urllib.request.urlopen(request)
        f = open(dest_dir ,"w")
        lines = response.readlines()
        for line in lines:
            f.write(line.decode())
        f.close()
    except:  
         error_url.append(url)
         print("\tError retrieving the URL:", dest_dir)
    else: # 没有异常
        if url in error_url: #在错误列表里
            error_url.remove(url)
   
def get_yestoday(mytime):
    myday = datetime.datetime(int(mytime[0:4]),int(mytime[4:6]),int(mytime[6:8]) )
    delta = datetime.timedelta(days=-1)
    my_yestoday = myday + delta
    my_yes_time = my_yestoday.strftime('%Y%m%d')
    return my_yes_time


def usage():
    print("""
    usage DownloadSentinalPreciseOrbit.py <data road > <orbit road> 

    data  road     (input) raw data road
    orbit road     (output) orbit data road
    """)
    sys.exit()


if __name__ == '__main__':
    error_url = []
    out_path = r'F:\精轨数据' 
	
    url_param_json = {}
    #url_param_json['sentinel1__mission'] = param[0:3]
    url_param_json['sentinel1__mission'] = 'S1A'
    #date = re.findall(r"\d{8}",param)[0]
    #date = get_yestoday(date)
    #tmp = list(date)
    #tmp.insert(4,'-');tmp.insert(7,'-')
    #date = "".join(tmp)
    date = '2015-01-01'
    url_param_json['validity_start'] = date

    # 获得EOF下载网址
    url_param = urllib.parse.urlencode(url_param_json) #url参数
    url = 'https://s1qc.asf.alaska.edu/aux_poeorb/?%s' % url_param #拼接
    html=requests.get(url).content
    dom = BeautifulSoup(html,"lxml") # 解析html文档
    a_list = dom.findAll("a")  # 找出<a>
    eof_lists = [a['href'] for a in a_list if a['href'].endswith('.EOF')]  # 找出EOF
    for eof in eof_lists:
        savefile = os.path.join(out_path,eof)
        download(savefile,'https://s1qc.asf.alaska.edu/aux_poeorb/'+eof)

    print ("------------------------------------")
    print("精密轨道数据下载完成")
    print("------------------------------------")
    print ("开始下载出错的数据")
    # 下载出错的数据重新下载
    while len(error_url)!=0:
        print("出错的数据有")
        print(error_url)
        for eof in error_url:
            savefile = os.path.join(out_path, eof[50:])
            download(savefile, eof)
  • 2
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值