Python获取高德地图POI

原创 2016年08月28日 20:28:59
# -*- encoding: utf-8 -*-

# coding:utf-8
import xlrd
from xlwt import Workbook
from tempfile import TemporaryFile
import urllib.request
import xml.dom.minidom as minidom
import xlwt

#结果目录
inforst = '/home/yuhz/PycharmProjects/untitled2/POI/'

data = xlrd.open_workbook('/home/yuhz/PycharmProjects/untitled2/POI/city1.xls')
sheet=data.sheets()[0]

file_name = 'result.txt'  # write result to this file
#keyword = '汽车修理' urllib.parse.quote(keyword)
url_amap = 'http://restapi.amap.com/v3/place/text?&keywords=&types=010800&city=370602&citylimit=true&&output=xml&offset=20&page=1&key=&extensions=base'
#facility_type = r'types=170300'  # factory facilities
#region = r'city=120113'  # beichen of tianjin
each_page_rec = 20  # results that displays in one page
which_pach = r'page=1'  # display which page
xml_file = 'tmp.xml'  # xml filen name

#写入Excel(定义Excel表头)
book = Workbook()
#sheet1 = book.add_sheet('Sheet 0')
#自动换行
style = xlwt.easyxf('align: wrap on')


# get html by url and save the data to xml file
def getHtml(url):
    page = urllib.request.urlopen(url)
    html = page.read()

    try:
        # open xml file and save data to it
        with open(xml_file, 'wb') as xml_file_handle:
            xml_file_handle.write(html)
    except IOError as err:
        print("IO error: " + str(err))
        return -1

    return 0


# phrase data from xml
def parseXML(index):
    total_rec = 1  # record number

    # open xml file and get data record
    try:
        with open(file_name, 'a') as file_handle:
            dom = minidom.parse(xml_file)
            root = dom.getElementsByTagName("response")  # The function getElementsByTagName returns NodeList.

            for node in root:
                total_rec = node.getElementsByTagName("count")[0].childNodes[0].nodeValue

                pois = node.getElementsByTagName("pois")
                for poi in pois[0].getElementsByTagName('poi'):
                    name = poi.getElementsByTagName("name")[0].childNodes[0].nodeValue
                    try:
                        address = poi.getElementsByTagName("address")[0].childNodes[0].nodeValue
                    except IndexError:
                        address = ""
                    try:
                        tel = poi.getElementsByTagName("tel")[0].childNodes[0].nodeValue
                    except IndexError:
                        tel = ""
                    try:
                        pname = poi.getElementsByTagName("pname")[0].childNodes[0].nodeValue
                    except IndexError:
                        pname = ""
                    try:
                        cityname = poi.getElementsByTagName("cityname")[0].childNodes[0].nodeValue
                    except IndexError:
                        cityname = ""
                    try:
                        adname = poi.getElementsByTagName("adname")[0].childNodes[0].nodeValue
                    except IndexError:
                        adname = ""
                    location = poi.getElementsByTagName("location")[0].childNodes[0].nodeValue
                    #写入Excel
                    index = index + 1
                    row1 = sheet1.row(index)
                    row1.write(0, name, style)
                    row1.write(1, address, style)
                    row1.write(2, tel, style)
                    row1.write(3, pname, style)
                    row1.write(4, cityname, style)
                    row1.write(5, adname, style)
                    row1.write(6, location, style)

    except IOError as err:
        print
        "IO error: " + str(err)

    return total_rec


if __name__ == '__main__':
    nrows = sheet.nrows
    ncols = sheet.ncols
    sheet2 = book.add_sheet('All Data')
    row2 = sheet2.row(0)
    row2.write(0, 'Owner')
    row2.write(1, 'city')
    row2.write(2, 'count')
    sheet2.col(0).width = 10000
    sheet2.col(1).width = 10000
    sheet2.col(2).width = 10000
    sheet2.col(3).width = 5000
    total_record = 0
    keywords = ''
    owner =''
    for i in range(nrows):

        keywords = sheet.row(i)[1].value
        owner = sheet.row(i)[0].value
        sheet1 = book.add_sheet(keywords)
        row1 = sheet1.row(0)
        row1.write(0, 'CORPNAME')
        row1.write(1, 'ADDRESS')
        row1.write(2, 'TEL')
        row1.write(3, 'pname')
        row1.write(4, 'cityname')
        row1.write(5, 'adname')
        row1.write(6, 'location')
        sheet1.col(0).width = 10000
        sheet1.col(1).width = 10000
        sheet1.col(2).width = 10000
        sheet1.col(3).width = 5000
        sheet1.col(4).width = 5000
        sheet1.col(5).width = 5000
        sheet1.col(6).width = 5000
        index = 0
        # for j in range(ncols):
        url_amap = 'http://restapi.amap.com/v3/place/text?&keywords=&types=010800&city='+ urllib.parse.quote(
            keywords) +'&citylimit=true&&output=xml&offset=20&page=1&key=&extensions=base'
        if getHtml(url_amap) == 0:
            print('parsing page 1 ... ...')
            # parse the xml file and get the total record number
            total_record_str = parseXML(index)

            total_record = int(total_record_str)
            if (total_record % each_page_rec) != 0:
                page_number = total_record / each_page_rec + 2
            else:
                page_number = total_record / each_page_rec + 1

             # retrive the other records
            for each_page in range(2, int(page_number)):
                index = index + 20
                print('parsing page ' + str(each_page) + ' ... ...')
                url_amap = url_amap.replace('page=' + str(each_page - 1), 'page=' + str(each_page))
                getHtml(url_amap)
                total_record_str = parseXML(index)
                total_record = int(total_record_str)
                if total_record == 0:
                    break

        else:
            print
            'error: fail to get xml from amap'
        # 保存Excel
        row2 = sheet2.row(i+1)
        row2.write(0, owner)
        row2.write(1, keywords)
        row2.write(2, total_record)
        sheet2.col(0).width = 10000
        sheet2.col(1).width = 10000
        sheet2.col(2).width = 10000
    book.save(inforst + 'result.xls')
    book.save(TemporaryFile())

version1.0 支持跨sheet页获取数据,并且按照原sheet页顺序写入。

# -*- encoding: utf-8 -*-

# coding:utf-8
import xlrd
from xlwt import Workbook
from tempfile import TemporaryFile
import urllib.request
import xml.dom.minidom as minidom
import xlwt

#结果目录
inforst = 'C:/Users/玲玲/PycharmProjects/untitled/POI'

data = xlrd.open_workbook('C:/Users/玲玲/PycharmProjects/untitled/POI/city.xls')
#sheet=data.sheets()[0]

file_name = 'result.txt'  # write result to this file
#keyword = '汽车修理' urllib.parse.quote(keyword)
url_amap = 'http://restapi.amap.com/v3/place/text?&keywords=&types=010800&city=370602&citylimit=true&&output=xml&offset=20&page=1&key=¥¥&extensions=base'
#facility_type = r'types=170300'  # factory facilities
#region = r'city=120113'  # beichen of tianjin
each_page_rec = 20  # results that displays in one page
which_pach = r'page=1'  # display which page
xml_file = 'tmp.xml'  # xml filen name

#写入Excel(定义Excel表头)
book = Workbook()
all_index = 0
#sheet1 = book.add_sheet('Sheet 0')
#自动换行
style = xlwt.easyxf('align: wrap on')


# get html by url and save the data to xml file
def getHtml(url):
    page = urllib.request.urlopen(url)
    html = page.read()

    try:
        # open xml file and save data to it
        with open(xml_file, 'wb') as xml_file_handle:
            xml_file_handle.write(html)
    except IOError as err:
        print("IO error: " + str(err))
        return -1

    return 0


# phrase data from xml
def parseXML(owner):
    total_rec = 1  # record number

    # open xml file and get data record
    try:
        with open(file_name, 'a') as file_handle:
            dom = minidom.parse(xml_file)
            root = dom.getElementsByTagName("response")  # The function getElementsByTagName returns NodeList.
            for node in root:
                total_rec = node.getElementsByTagName("count")[0].childNodes[0].nodeValue

                pois = node.getElementsByTagName("pois")
                for poi in pois[0].getElementsByTagName('poi'):
                    name = poi.getElementsByTagName("name")[0].childNodes[0].nodeValue
                    try:
                        address = poi.getElementsByTagName("address")[0].childNodes[0].nodeValue
                    except IndexError:
                        address = ""
                    try:
                        tel = poi.getElementsByTagName("tel")[0].childNodes[0].nodeValue
                    except IndexError:
                        tel = ""
                    try:
                        pname = poi.getElementsByTagName("pname")[0].childNodes[0].nodeValue
                    except IndexError:
                        pname = ""
                    try:
                        cityname = poi.getElementsByTagName("cityname")[0].childNodes[0].nodeValue
                    except IndexError:
                        cityname = ""
                    try:
                        adname = poi.getElementsByTagName("adname")[0].childNodes[0].nodeValue
                    except IndexError:
                        adname = ""
                    location = poi.getElementsByTagName("location")[0].childNodes[0].nodeValue
                    #写入Excel
                    #index = index + 1
                    global all_index
                    all_index = all_index + 1
                    row1 = sheet1.row(all_index)
                    row1.write(0, name, style)
                    row1.write(1, address, style)
                    row1.write(2, tel, style)
                    row1.write(3, pname, style)
                    row1.write(4, cityname, style)
                    row1.write(5, adname, style)
                    row1.write(6, location, style)
                    row1.write(7,owner, style)

    except IOError as err:
        print
        "IO error: " + str(err)

    return total_rec


if __name__ == '__main__':
    # 打开工作表
    worksheets = data.sheet_names()
    sheet2 = book.add_sheet('All Data')
    row2 = sheet2.row(0)
    row2.write(0, 'Owner')
    row2.write(1, 'city')
    row2.write(2, 'count')
    row2.write(3, 'area')
    sheet2.col(0).width = 10000
    sheet2.col(1).width = 10000
    sheet2.col(2).width = 10000
    sheet2.col(3).width = 5000
    summary_index = 1
    total_record = 0
    # 遍历所有sheet对象
    for worksheet_name in worksheets:
        sheet = data.sheet_by_name(worksheet_name)
        nrows = sheet.nrows
        ncols = sheet.ncols
        keywords = ''
        owner =''
        pre_owner = ''
        sheet1 = book.add_sheet(worksheet_name)
        row1 = sheet1.row(0)
        row1.write(0, 'CORPNAME')
        row1.write(1, 'ADDRESS')
        row1.write(2, 'TEL')
        row1.write(3, 'pname')
        row1.write(4, 'cityname')
        row1.write(5, 'adname')
        row1.write(6, 'location')
        row1.write(7, 'owner')
        sheet1.col(0).width = 10000
        sheet1.col(1).width = 10000
        sheet1.col(2).width = 10000
        sheet1.col(3).width = 5000
        sheet1.col(4).width = 5000
        sheet1.col(5).width = 5000
        sheet1.col(6).width = 5000
        sheet1.col(7).width = 2000
        all_index = 0
        index_from = 0
        for i in range(nrows):
            #index = index + 1
            keywords = sheet.row(i)[1].value
            if owner.strip() == '':
                pre_owner = pre_owner
            else:
                pre_owner = owner
            #print(pre_owner)
            #print(owner)
            temp_owner = owner
            if owner.strip() == '':
                temp_owner = pre_owner
            owner = sheet.row(i)[0].value
            #sheet1 = book.add_sheet(keywords)
            # for j in range(ncols):
            url_amap = 'http://restapi.amap.com/v3/place/text?&keywords=&types=010800&city='+ urllib.parse.quote(
                keywords) +'&citylimit=true&&output=xml&offset=20&page=1&key=*****&extensions=base'
            if getHtml(url_amap) == 0:
                print('parsing page 1 ... ...')
                # parse the xml file and get the total record number
                #print(index)
                #total_record_str = parseXML(temp_owner) #如果该列有合并单元格.使用temp_owner
                total_record_str = parseXML(owner) #如果该列没有合并单元格.使用owner
                total_record = int(total_record_str)
                #print(total_record_str)
                if (total_record % each_page_rec) != 0:
                    page_number = total_record / each_page_rec + 2
                else:
                    page_number = total_record / each_page_rec + 1
                #index = index + 20
                 # retrive the other records
                for each_page in range(2, int(page_number)):
                    #index = index + 20
                    print('parsing page ' + str(each_page) + ' ... ...')
                    url_amap = url_amap.replace('page=' + str(each_page - 1), 'page=' + str(each_page))
                    getHtml(url_amap)
                    #total_record_str = parseXML(temp_owner) #如果该列有合并单元格.使用temp_owner
                    total_record_str = parseXML(owner) #如果该列没有合并单元格.使用owner
                    total_record = int(total_record_str)
                    #print(index)
                    #print(total_record_str)
                    if total_record == 0:
                        break

            else:
                print
                'error: fail to get xml from amap'
            # 保存Excel
            row2 = sheet2.row(summary_index)

            #row2.write(0, temp_owner) #如果该列有合并单元格.使用temp_owner
            row2.write(0, owner) #如果该列没有合并单元格.使用owner
            row2.write(1, keywords)
            row2.write(2, total_record)
            row2.write(3, worksheet_name)
            sheet2.col(0).width = 10000
            sheet2.col(1).width = 10000
            sheet2.col(2).width = 10000
            sheet2.col(3).width = 5000
            summary_index = summary_index + 1
    book.save(inforst + 'result.xls')
    book.save(TemporaryFile())
版权声明:本文为博主原创文章,未经博主允许不得转载。

Python微博地点签到大数据实战(三)大数据利器:爬虫

很多情况下你想要的东西就在网上,比如现在我想获取获得高德地图POI点的火星坐标,随便搜索一下就找到了这个网站:http://www.poi86.com...

高德API+Python解决租房问题

高德API+Python解决租房问题 一、课程介绍 1. 课程背景 课程来自一段租房血泪史(夸张):事情是这样的,笔者是接着念大四准备考研,而室友是应届毕业在找工作,说白了就是都没有...

基于高德地图api和Python的区县地理边界坐标提取

在工作中,经常想用到类似于地热图的方式进行数据展示,奈何工作环境是内网,无法在线进行地图关联,没办法,只好自己想办法上网找边界坐标。 查了很多文档和费心以后,最终发现高德地图api的方法和方式最简单,...

基于python和amap(高德地图)web api的爬虫,用于搜索某POI点

目的: 通过Python实现的爬虫技术,及高德地图提供的web api,来获取地图上的POI点及其相关信息 方法: 1.通过Python的urllib模块来处理网络请求和响应,向高德地图发送请求,并接...

python利用basemap叠加地图

# -*- coding: utf-8 -*- ''' Created on Sat Sep 19 16:22:59 2015 @author: liangxw ''' from __future_...

[Python]利用高德地图api实现经纬度与地址的批量转换

我们都知道,可以使用高德地图api实现经纬度与地址的转换。那么,当我们有很多个地址与经纬度,需要批量转换的时候,应该怎么办呢? 在这里,选用高德Web服务的API,其中的地址/逆地址编码,可以实现经纬...

从高德地图抓取数据

老板是搞交通的,要我从高德上抓数据放到自己的数据库中。说做就做! 一,工具 1,VisualStudio2010 或其他的HTML编辑器 2,访问数据库 ...

百度地图POI抓取——python

之前网上有看到用javascript写的抓取百度地图POI的程序,效果还不错。作为python初学者,尝试用python写了下面代码,如有错误,还望大神们提供建议。 # -*- coding: utf...

python在大量地图poi数据中进行位置查找:来源于Rtree的思想

被给予了一个腾讯的在北京的poi数据,里面有具体地理位置的用户打分数据格式如下 10003977431176793344 北京东方美服装租赁公司 北京市朝阳区广渠路66号双井桥百环家园18...

02.21获取高德地图API返回的数据

2017.02.21申请高德地图开发者,然后接入它的地图服务。做毕业设计需要其中的地理数据,不知道从哪些下手,然后看高德的JS API文档以及Web API。1.以前用过最土的一种方法,就是使用高德的...
内容举报
返回顶部
收藏助手
不良信息举报
您举报文章:Python获取高德地图POI
举报原因:
原因补充:

(最多只允许输入30个字)