获取本机公网IP地址
import requests, re
text = requests.get("http://txt.go.sohu.com/ip/soip").text
ip_address = re.findall(r'\d+.\d+.\d+.\d+', text)
print "本机外网IP===>", ip_address
text = requests.get("http://pv.sohu.com/cityjson?ie=utf-8").text
ip_address = re.search('\d+.\d+.\d+.\d+', text).group(0)
print "本机外网IP===>", ip_address
# http://ip.360.cn/IPShare/info, http://myip.com.tw/, http://ip.xianhua.com.cn/,
# http://www.ip.cn/, http://www.123cha.com/ip,
# http://www.ip38.com/, http://ip.chinaz.com
将主机名转换为IP地址
import socket
hostname = 'www.baidu.com'
addr = socket.gethostbyname(hostname)
print "addr===>", addr # 结果:addr===> 14.215.177.39
# 获取计算机名称
hostname=socket.gethostname()
print hostname # 结果:DESKTOP-D6K8STR
# 获取本机内网IP
ip=socket.gethostbyname(hostname)
print(ip) # 结果:192.168.1.38
#coding:utf-8
import socket
def URL2IP():
for oneurl in urllist.readlines():
url=str(oneurl.strip())[7:]
print url
try:
ip =socket.gethostbyname(url)
print ip
iplist.writelines(str(ip)+"\n")
except:
print "this URL 2 IP ERROR "
try:
urllist=open("D:\urllist.txt","r")
iplist=open("D:\iplist.txt","w")
URL2IP()
urllist.close()
iplist.close()
print "complete !"
except:
print "ERROR !"
获取ip信息(国家、城市等)
# 文件下载地址
# http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.tar.gz
# http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz
import geoip2.database
reader = geoip2.database.Reader('./GeoLite2-Country.mmdb')
c = reader.country('14.215.177.39')
print c.country.names # 结果:{u'ru': u'\u041a\u0438\u0442\u0430\u0439', u'fr': u'Chine', u'en': u'China', u'de': u'China', u'zh-CN': u'\u4e2d\u56fd', u'pt-BR': u'China', u'ja': u'\u4e2d\u56fd', u'es': u'China'}
print c.country.names.get('zh-CN') # 结果:中国
reader = geoip2.database.Reader('GeoLite2-City.mmdb')
c = reader.city('14.215.177.39')
print c.city.names # 结果:{u'ru': u'\u0428\u044d\u043d\u044c\u0447\u0436\u044d\u043d\u044c', u'fr': u'Shenzhen', u'en': u'Shenzhen', u'de': u'Shenzhen', u'zh-CN': u'\u6df1\u5733\u5e02', u'pt-BR': u'Shenzhen', u'ja': u'\u6df1\u30bb\u30f3\u5e02', u'es': u'Shenzhen'}
print c.city.names.get('zh-CN') # 结果:深圳市
print c.country.name # 结果:China
print c.city.name # 结果:Shenzhen
print ("Subdivisions: ", c.subdivisions.most_specific.name) # 结果:('Subdivisions: ', u'Guangdong')
print ("Latitude: ", c.location.latitude) # 结果:('Latitude: ', 22.5333)
print ("Longitude: ", c.location.longitude) # 结果:('Longitude: ', 114.1333)
# -*- coding: utf-8 -*-
import requests
def checkip(ip):
URL = 'http://ip.taobao.com/service/getIpInfo.php'
try:
r = requests.get(URL, params=ip, timeout=3)
except requests.RequestException as e:
print(e)
else:
json_data = r.json()
if json_data[u'code'] == 0:
print '所在国家: ' + json_data[u'data'][u'country'].encode('utf-8')
print '所在地区: ' + json_data[u'data'][u'area'].encode('utf-8')
print '所在省份: ' + json_data[u'data'][u'region'].encode('utf-8')
print '所在城市: ' + json_data[u'data'][u'city'].encode('utf-8')
print '所属运营商:' + json_data[u'data'][u'isp'].encode('utf-8')
else:
print '查询失败,请稍后再试!'
ip = {'ip': '202.102.193.68'}
checkip(ip)
Python获取全国所有的省、市、县、镇、村
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# 转载:https://blog.csdn.net/qq_26656329/article/details/78182535
"""
通过国家统计局数据
获取中国所有城市列表
"""
import sys
import os
import re
# from urllib import request
import requests
from bs4 import BeautifulSoup
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
reload(sys)
sys.setdefaultencoding("utf-8")
url = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2016/'
header = {
'Cookie': 'AD_RS_COOKIE=20080917',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \ AppleWeb\Kit/537.36 (KHTML, like Gecko)\ '
'Chrome/58.0.3029.110 Safari/537.36'}
class GetHttp:
def __init__(self, url, headers=None, charset='utf8'):
if headers is None:
headers = {}
self._response = ''
try:
print(url)
# self._response = request.urlopen(request.Request(url=url, headers=headers))
self._response = requests.get(url=url, headers=headers)
except Exception as e:
print(e)
self._c = charset
@property
def text(self):
try:
# return self._response.read().decode(self._c)
self._response.encoding = "utf-8"
return self._response.text
except Exception as e:
print(e)
return ''
def provincetr(u, he, lists):
# 获取全国省份和直辖市
t = GetHttp(u, he, 'gbk').text
if t:
soup = BeautifulSoup(t, 'html.parser')
for i in soup.find_all(attrs={'class': 'provincetr'}):
for a in i.find_all('a'):
id = re.sub("\D", "", a.get('href'))
lists[id] = {'id': id, 'name': a.text, 'pid': '0', 'pid1': '0', 'pid2': '0', 'pid3': '0', 'pid4': '0',
'code': id}
# time.sleep(1 / 10)
return lists
def citytr(u, he, lists):
# 获取省下级市
l = lists.copy()
for i in l:
t = GetHttp(u+i+'.html', he, 'gbk').text
if not t:
continue
soup = BeautifulSoup(t, 'html.parser')
for v in soup.find_all(attrs={'class': 'citytr'}):
id = str(v.find_all('td')[0].text)
if id[0:4] not in lists.keys():
lists[id[0:4]] = {'id': id[0:4], 'name': str(v.find_all('td')[1].text),
'pid': '0', 'pid1': i, 'pid2': '0', 'pid3': '0', 'pid4': '0', 'code': id}
return lists
def countytr(u, he, lists):
# 获取市下级县
l = lists.copy()
a = {}
for i in l:
t = GetHttp(u+i[0:2]+'/'+i+'.html', he, 'gbk').text
if not t:
continue
soup = BeautifulSoup(t, 'html.parser')
for v in soup.find_all(attrs={'class': 'countytr'}):
id = str(v.find_all('td')[0].text)
if id[0:6] not in lists.keys():
lists[id[0:6]] = {'id': id[0:6], 'name': str(v.find_all('td')[1].text),
'pid': '0', 'pid1': l[i]['pid1'], 'pid2': i, 'pid3': '0', 'pid4': '0', 'code': id}
return lists
def towntr(u, he, lists):
# 县下级镇
l = lists.copy()
for i in l:
t = GetHttp(u+i[0:2]+'/'+i[2:4]+'/'+i+'.html', he, 'gbk').text
if not t:
continue
soup = BeautifulSoup(t, 'html.parser')
for v in soup.find_all(attrs={'class': 'towntr'}):
id = str(v.find_all('td')[0].text)
if id[0:9] not in lists.keys():
lists[id[0:9]] = {'id': id[0:9], 'name': str(v.find_all('td')[1].text), 'pid': '0',
'pid1': l[i]['pid1'], 'pid2': l[i]['pid2'], 'pid3': i, 'pid4': '0', 'code': id}
return lists
def villagetr(u, he, lists):
# 镇下级村
l = lists.copy()
for i in l:
t = GetHttp(u+i[0:2]+'/'+i[2:4]+'/'+i[4:6]+'/'+i+'.html', he, 'gbk').text
if not t:
continue
soup = BeautifulSoup(t, 'html.parser')
for v in soup.find_all(attrs={'class': 'villagetr'}):
id = str(v.find_all('td')[0].text)
if id[0:12] not in lists.keys():
lists[id[0:12]] = {'id': id[0:12], 'name': str(v.find_all('td')[1].text), 'pid': '0',
'pid1': l[i]['pid1'], 'pid2': l[i]['pid2'], 'pid3': l[i]['pid2'], 'pid4': i,
'code': id}
return lists
p = provincetr(u=url, he=header, lists={})
print('省')
c = citytr(u=url, he=header, lists=p)
print('市')
o = countytr(u=url, he=header, lists=c)
print('县')
t = towntr(u=url, he=header, lists=o)
print('镇')
v = villagetr(u=url, he=header, lists=t)
print('村')