python爬虫---拉勾网与前程无忧网招聘数据获取(多线程,数据库,反爬虫应对)

以下代码是一个综合了拉勾网与前程无忧网招聘信息爬取功能的爬虫,讲解起来比较复杂,懂的自然懂,直接放代码:

"""
关于拉勾网和前程无忧网的爬虫
作者:jc
时间:2020.7.17
"""
import time
import configparser
import bs4
import csv
import requests
from lxml import etree
import threading
import random
import time
import datetime
import pymysql
from queue import Queue
from threading import Thread
from bs4 import BeautifulSoup
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from DBUtils.PooledDB import PooledDB

# 关闭安全请求警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
i=1

class Job51Spider:
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/65.0.3325.181 Safari/537.36",
            }
    jobs = ['软件', '图像', '自然语言处理', '人工智能', '学习', '前端', '后端', '数据', '算法', '测试', '网络安全', '运维', 'UI', '区块链', '网络', '全栈',
            '硬件', 'Java', 'C++', 'PHP', 'C#', '.NET', 'Hadoop', 'Python', 'Perl', 'Ruby', 'Nodejs', 'Go',
            'Javascript',
            'Delphi', 'jsp', 'sql']

    citys = ['广州','上海','北京', '深圳', '成都', '南宁','合肥','杭州', '南京', '苏州', '西安', '长沙', '重庆','东莞', '无锡', '福州', '大连', '宁波','武汉',
             '郑州', '济南', '天津', '佛山', '昆山', '沈阳', '青岛', '珠海', '厦门', '昆明', '南昌', '常州', '中山', '惠州', '长春', '哈尔滨',
             '嘉兴', '石家庄', '贵阳', '南通', '张家港', '兰州', '海口', '江门', '温州', '徐州', '扬州', '太原', '烟台', '镇江', '泉州', '唐山', '绵阳',
             '太仓', '洛阳', '金华', '台州', '湖州', '柳州', '威海', '芜湖', '义乌', '保定', '泰州', '秦皇岛', '咸阳', '株洲', '韶关', '常熟', '澳门',
             '湘潭', '宜昌', '香港', '盐城', '潍坊', '襄阳', '绍兴', '马鞍山', '三亚', '汕头', '宿迁', '鹰潭', '乌鲁木齐', '连云港', '呼和浩特', '德阳',
             '岳阳',
             '靖江', '延安', '莆田', '新乡', '桂林', '盘锦', '鄂州', '滁州', '玉林', '黄石', '邢台', '云浮', '大理', '九江', '自贡', '济宁', '漳州',
             '揭阳',
             '银川', '梅州', '鄂尔多斯', '宜春', '上饶', '鞍山', '枣庄', '六安', '荆门', '赣州', '龙岩', '西宁', '孝感', '德州', '南平', '泰安', '菏泽',
             '阜阳', '拉萨', '清远', '宿州', '丽水', '铜陵', '湛江', '沧州', '黄山', '阿克苏', '舟山', '安庆', '临沂', '衢州', '南阳', '肇庆', '随州',
             '吉安', '兴安盟', '萍乡', '攀枝花', '承德', '上海']





    def run(self):
        print("开始爬取")
        conf = configparser.ConfigParser()
        conf.read('C:/Users/Administrator/source/repos/51job爬虫/51job爬虫/Spider51/conf.ini')
        for city in self.citys:
            for job in self.jobs:
                citycode = conf['citycode'][city]
                print("获取到的城市代码为", citycode)
                page = 1
                # 获得总页数
                url = "https://search.51job.com/list/{},000000,0000,00,9,99,{},2," \
                      "{}.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99" \
                      "&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line" \
                      "=&specialarea=00&from=&welfare=".format(citycode, job, page)
                a = requests.get(url=url, headers=self.headers)
                a.encoding = 'gbk'
                try:
                    html = etree.HTML(a.text)
                    maxpage = html.xpath('//*[@id="resultList"]/div[2]/div[5]/text()')[2].replace('/', '').strip()
                    maxpage = eval(maxpage)
                    # 解析页数
                    while True:
                        url = "https://search.51job.com/list/{},000000,0000,00,9,99,{},2," \
                              "{}.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99" \
                              "&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line" \
                              "=&specialarea=00&from=&welfare=".format(citycode, job, page)
                        self.get_urls(url)
                        print('多线程+' + str(page) + '页完成--' + city + job)
                        page = page + 1
                        if page == maxpage + 1:
                            break
                except:
                    pass

    def get_urls(self, url):
        try:
            a = requests.get(url=url, headers=self.headers)
            a.encoding = 'gbk'
            html = etree.HTML(a.content)
            urls = html.xpath('//*[@id="resultList"]/div[@class="el"]/p/span/a')
            for i in urls:
                t = threading.Thread(target=self.get_job_detail, args=(i.get('href'),))
                t.start()
                time.sleep(0.03)
        except:
            time.sleep(2)
            self.get_urls(url)

    def load(result):#数据存储
        with open('C:/Users/Administrator/Desktop/前程无忧数据.csv', 'a', newline='')as csv_file:
            text=[]
            text.append(result["title"])
            text.append(result["salary"])
            text.append(result["experience"])
            text.append(result["education"])
            text.append(result["companytype"])
            text.append(result["industry"])
            text.append(result["description"])
            writer = csv.writer(csv_file)
            writer.writerow(text)

    def get_job_detail(self, url):
        if 'jobs' not in url:
            return
        try:
            while True:
                try:
                    a = requests.get(url=url, headers=self.headers)
                    a.encoding = 'gbk'
                    htmls=bs4.BeautifulSoup(a.text,'lxml')
                    number=htmls.find(class_="com_tag").select("p")[1]["title"]#公司规模
                    place=htmls.find(class_="msg ltype")["title"][0:5].split("|")[0]#地点
                    industry=htmls.find(class_="com_tag").select("p")[2]["title"]#公司名称
                    html = etree.HTML(a.text)
                    break
                except:
                    time.sleep(1)
            try:
                pay = html.xpath('/ html / body / div[3] / div[2] / div[2] / div / div[1] / strong/text()')[0].strip()
            except:
                pay = ''
            list1 = html.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/p[2]/@title')[0].split("|")

            list1 = [i.strip() for i in list1]
            education = None
            if '科'in list1[2]:
                education = list1[2]
            if '专'in list1[2]:
                education = list1[2]
            if '硕'in list1[2]:
                education = list1[2]
            if "验" in list1[1]:
             experience=list1[1]
            if '在' in list1[1]:
             experience=list1[1]


            result = {
                'provider': '前程无忧网',
                'title': html.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/h1/text()')[0].strip(),
                'salary': pay,
                'experience': experience,
                'education': education,
                'companytype': html.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[1]/text()')[0].strip(),
                'industry': html.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[3]/text()')[0].strip(),
                'description': html.xpath(' / html / body / div[3] / div[2] / div[3] / div[1] / div')[0].xpath(
                    'string(.)').strip().replace('"', '').strip().replace('\t', '').replace('\r', '').replace('\n', '')
            }
            print("爬取的一项工作信息:",industry)

            with open('C:/Users/Administrator/Desktop/前程无忧数据.csv', 'a', newline='')as csv_file:
             text=[]
             text.append(result["title"])
             text.append(place)
             text.append(result["salary"])
             text.append(result["experience"])
             text.append(result["education"])
             text.append(number)
             text.append(result["companytype"])
             text.append(industry)
             text.append(result["description"])
             writer = csv.writer(csv_file)
             writer.writerow(text)
            return
        except:
            time.sleep(3)
            return


# 计算运行所需的时间
def run_time(func):
    def wrapper(*args, **kw):
        start = time.time()
        func(*args, **kw)
        end = time.time()
        print('running', end - start, 's')
    return wrapper


class LagouSpider:
    def __init__(self):
        self.target_url = "https://www.lagou.com/wuhan/"#此处设置爬取链接
        self.proxy_url = "https://www.kuaidaili.com/free/inha/"
        self.target_file = "../lagou/job_info.txt"
        self.thread_num = 6
        self.db_pool = self.mysql_connection()
        self.user_agent = [
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
            "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
            "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
            "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
            "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
            "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
            "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
            "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
        ]
        self.pool = Queue()
        self.head_urls = Queue()
        self.jobs = []

    def set_proxy(self):
        """
        爬取西刺代理获取代理ip:port
        :return:
        """
        proxy_net = self.proxy_url + str(random.randint(1, 100))
        try:
            r = requests.get(proxy_net, headers={"user_agent": random.choice(self.user_agent)}, verify=False, timeout=1)
        except Exception as e:
            print("代理网站访问失败!")
        else:
            r.encoding = "UTF-8"
            if r.status_code == 200:
                content = BeautifulSoup(r.text, "html.parser")
                tr_divs = content.select("table.table tbody tr")
                tr_divs = [tr_div.text.split("\n") for tr_div in tr_divs]
                for tr_div in tr_divs:
                    ip = tr_div[1]
                    port = tr_div[2]
                    http = tr_div[4].lower()
                    ip_url_next = '://' + ip + ':' + port
                    proxy = {http: http + ip_url_next}
                    self.pool.put(proxy)

    def get_proxy(self):
        while self.pool.empty():
            self.set_proxy()
        proxy = self.pool.get()
        return proxy

    def send_request(self, url):
        """
        使用代理发送请求
        :param url:
        :return:
        """
        requests.adapters.DEFAULT_RETRIES = 5  # 增加重连次数
        code = 0
        while code != 200:
            try:
                req = requests.get(url, headers={"User-Agent": random.choice(self.user_agent)}, proxies=self.get_proxy()
                                   , verify=False, timeout=15)
            except Exception:
                print("抓取网页{}异常".format(url))
            else:
                code = req.status_code
        req.encoding = "UTF-8"
        html = req.text

        return html

    def parse_head_page(self):
        """
        解析拉勾网站首页,获取职位关键词url
        :return:
        """
        t = threading.currentThread()
        print("当前线程:Thread id=%s,Thread name=%s,抓取url:%s" % (t.ident, t.getName(), self.target_url))
        content = self.send_request(self.target_url)
        soup = BeautifulSoup(content, "html.parser")
        tags = soup.find_all("div", class_="mainNavs")
        for tag in tags:
            div_as = tag.find_all("a", class_="", attrs={"data-lg-tj-cid": "idnull"})
            for div_a in div_as:
                job = div_a.text
                url = div_a['href']
                # print("job=", job, "url=", url)
                self.head_urls.put(url)
        print("拉勾网主要工作岗位类别抓取完毕!")

    def parse_details_page(self):
        while not self.head_urls.empty():
            self.parse_details(self.head_urls.get())

    def parse_details(self, url):
        """
        解析每个关键词下的所有职位信息
        :param url: 职位相关的url,如:https://www.lagou.com/shanghai-zhaopin/Python/
        :return:
        """
        t = threading.currentThread()
        print("当前线程:Thread id=%s,Thread name=%s,抓取url:%s" % (t.ident, t.getName(), url))

        job_details = []
        res = self.send_request(url)
        url_bs = BeautifulSoup(res, "html.parser")
        empty_judge = url_bs.select("ul.item_con_list div.empty_position div.txt")
        job_list = url_bs.select("ul.item_con_list li.con_list_item a.position_link h3")

        if empty_judge:
            print("暂时没有符合该搜索条件的职位,跳过")
        elif job_list:
            # 职位 jobs
            jobs = [job_h3.text for job_h3 in job_list]

            # 岗位关键词 keyword
            keyword = url_bs.select("div.keyword-wrapper input")
            keyword = keyword[0]["value"]

            # 岗位地点 addrs
            addrs = url_bs.select("span.add em")
            addrs = [addr.text for addr in addrs]

            # 公司 companys
            companys = url_bs.select("div.company_name a")
            companys = [c.text for c in companys]

            # 职位标签 true_tags
            tagss = url_bs.select("div.list_item_bot div.li_b_l")
            true_tags = []
            for tags in tagss:
                spans = tags.select("span")
                tag_content = ",".join([span.text for span in spans])  # str.join([]) 方法 将一个list中的元素以str来连接
                true_tags.append(tag_content)

            # 职位薪资 moneys
            moneys = url_bs.select("span.money")
            moneys = [money.text for money in moneys]

            # 经验要求 exps 和学历要求 edus
            exps_and_edus = url_bs.select("div.p_bot div.li_b_l")
            exps_and_edus = [ee.text.strip() for ee in exps_and_edus]  # strip()去掉开头和结尾的指定字符串
            edus = [edu.split("/")[1].strip() for edu in exps_and_edus]
            expss = [edu.split("/")[0].strip() for edu in exps_and_edus]
            exps = [exp.split("\n")[1].strip() for exp in expss]

            # 公司描述 types 和 公司融资级别 levels 和 公司规模 csizes
            types_and_levels = url_bs.select("div.industry")
            tl = [tal.text.strip() for tal in types_and_levels]
            types = []  # 公司描述,标签
            levels = []  # 公司融资级别
            csizes = []  # 公司规模
            for tals in tl:
                t_a_l = tals.split(" / ")
                if len(t_a_l) != 3:
                    types.append("无")
                    levels.append("无")
                    csizes.append("无")
                else:
                    types.append(t_a_l[0])
                    levels.append(t_a_l[1])
                    csizes.append(t_a_l[2])

            # 福利待遇 benefitss
            url_bs.select("div.list_item_bot div.li_b_r")
            benefitss = url_bs.select("div.list_item_bot div.li_b_r")
            benefitss = ["/".join(benefits.text.strip('“').strip('”').splitlines()) for benefits in benefitss]

            # 发布时间 pub_dates
            pub_dates = url_bs.select("span.format-time")
            pub_dates_format = []
            for pub_date in pub_dates:
                if pub_date.text.find("天前") > 0:
                    time_delta = int(pub_date.text.split("天前")[0])
                    format_time = (datetime.datetime.now() - datetime.timedelta(days=time_delta)).strftime("%Y-%m-%d")
                elif pub_date.text.find(":") > 0:
                    format_time = (datetime.datetime.now()).strftime("%Y-%m-%d")
                else:
                    format_time = pub_date.text
                pub_dates_format.append(format_time)

            # 取出的值:jobs  addrs  companys  true_tags  moneys  edus  exps types  levels  csizes benefitss pub_dates
            for i in range(len(jobs)):
                record = (keyword, jobs[i], addrs[i], companys[i], true_tags[i], moneys[i], edus[i], exps[i], types[i],
                          levels[i], csizes[i], benefitss[i], pub_dates_format[i])
                # self.jobs.append("^".join(record) + "\n")
                job_details.append(record)
            self.insert_table(job_details)

            div_as = url_bs.select("div.pager_container a")
            for a in div_as:
                if a.text == "下一页" and a["href"].startswith("http"):
                    next_page = a['href']
                    self.parse_details(next_page)
        else:
            print("本次没有获得job信息, 重试!")
            time.sleep(15)
            self.parse_details(url)

    def create_table(self):
        con = self.db_pool.connection()
        cur = con.cursor()
        # sql1 = "DROP TABLE IF EXISTS lagou_shanghai"
        sql2 = """
            CREATE TABLE IF NOT EXISTS lagou_shanghai (
                id int(11) NOT NULL AUTO_INCREMENT,
                t_keyword varchar(255) DEFAULT NULL COMMENT '职位关键字',
                    t_job varchar(255) DEFAULT NULL COMMENT '职位名称',
                t_addr varchar(255) DEFAULT NULL COMMENT '企业地址',
                t_com varchar(255) DEFAULT NULL COMMENT '企业名称',
                t_tag varchar(255) DEFAULT NULL COMMENT '企业标签',
                t_money varchar(255) DEFAULT NULL COMMENT '薪资待遇',
                t_edu varchar(255) DEFAULT NULL COMMENT '学历要求',
                t_exp varchar(255) DEFAULT NULL COMMENT '工作经验要求',
                t_type varchar(255) DEFAULT NULL COMMENT '企业类型',
                t_level varchar(255) DEFAULT NULL COMMENT '企业融资情况',
                t_csize varchar(255) DEFAULT NULL COMMENT '企业规模',
                t_benefit varchar(255) DEFAULT NULL COMMENT '福利待遇',
                t_pubdate varchar(255) DEFAULT NULL COMMENT '发布时间',
                PRIMARY KEY (id)
        ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
        """
        try:
            # cur.execute(sql1)
            cur.execute(sql2)
            con.commit()
        except Exception as e:
            con.rollback()
            print("Error:写入数据库异常!", e)
        finally:
            cur.close()
            con.close()

    def insert_table(self, records):
        sql = "insert into lagou_shanghai(t_keyword,t_job,t_addr,t_com,t_tag,t_money,t_edu,t_exp,t_type,t_level,t_csize,t_benefit,t_pubdate) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) "
        con = self.db_pool.connection()
        cur = con.cursor()
        try:
            cur.executemany(sql, records)
            con.commit()
        except Exception as e:
            con.rollback()
            print("Error:写入数据库异常!", e)
        finally:
            cur.close()
            con.close()

    def mysql_connection(self):
        """
        数据库连接池
        :return:
        """
        pool = PooledDB(
            pymysql,
            self.thread_num,  # 最大连接数,设置为线程数一致
            host='localhost',
            user='root',
            port=3306,
            passwd='123456',
            db='spider',
            use_unicode=True)
        return pool

    @run_time
    def main(self):
        self.create_table()

        ths = []
        th1 = Thread(target=self.parse_head_page)
        # th1.setDaemon(True)
        th1.start()
        ths.append(th1)
        time.sleep(5)

        for _ in range(self.thread_num):
            th = Thread(target=self.parse_details_page)
            th.setDaemon(True)
            th.start()
            ths.append(th)

        for th in ths:
            # 等待线程终止
            th.join()

        # with open(self.target_file, "a+", encoding="utf-8") as fo:
        #     fo.write("".join(self.jobs))


if __name__ == "__main__":
    spider = LagouSpider()
    spider.main()
    spider51 = Job51Spider()
    spider51.run()


conf.ini文件内容如下:
[citycode]
泰州 = 071800
北京 = 010000
上海 = 020000
广州 = 030200
深圳 = 040000
武汉 = 180200
西安 = 200200
杭州 = 080200
南京 = 070200
成都 = 090200
重庆 = 060000
东莞 = 030800
大连 = 230300
沈阳 = 230200
苏州 = 070300
昆明 = 250200
长沙 = 190200
合肥 = 150200
宁波 = 080300
郑州 = 170200
天津 = 050000
青岛 = 120300
济南 = 120200
哈尔滨 = 220200
长春 = 240200
福州 = 110200
三门峡 = 171800
三明 = 110700
三沙 = 101500
三亚 = 100300
山南 = 300500
汕头 = 030400
汕尾 = 032400
商洛 = 201100
商丘 = 171300
上饶 = 131200
韶关 = 031400
邵阳 = 191000
绍兴 = 080500
神农架 = 181700
十堰 = 180600
石河子 = 310800
石家庄 = 160200
石嘴山 = 290500
双鸭山 = 221100
朔州 = 210900
四平 = 240600
松原 = 240700
宿迁 = 072000
宿州 = 151600
随州 = 181200
绥化 = 220400
遂宁 = 091500
塔城 = 311500
台州 = 080800
泰安 = 121100
泰兴 = 072300
太仓 = 071600
太原 = 210200
唐山 = 160500
天门 = 181600
天水 = 270600
铁岭 = 231200
通化 = 240500
通辽 = 280700
铜川 = 200500
铜陵 = 150800
铜仁 = 260600
图木舒克 = 311100
吐鲁番 = 311400
屯昌 = 101200
乐东 = 102000
雅安 = 091800
烟台 = 120400
盐城 = 071300
延安 = 200600
延边 = 241100
延吉 = 240800
燕郊开发区 = 161300
杨凌 = 201200
扬州 = 070800
洋浦经济开发区 = 100400
阳江 = 032800
阳泉 = 210800
伊春 = 220300
伊犁 = 310500
宜宾 = 090700
宜昌 = 180300
宜春 = 131000
义乌 = 081400
益阳 = 190800
银川 = 290200
鹰潭 = 130700
营口 = 230500
永州 = 191300
榆林 = 200800
玉林 = 140600
玉树 = 320900
玉溪 = 250400
岳阳 = 190600
云浮 = 032900
运城 = 210300
枣庄 = 121600
湛江 = 031700
漳州 = 110500
张家港 = 071400
张家界 = 191400
张家口 = 160900
张掖 = 270900
昭通 = 251300
肇庆 = 031800
镇江 = 071000
中山 = 030700
中卫 = 290400
舟山 = 081100
周口 = 170800
珠海 = 030500
株洲 = 190300
驻马店 = 171400
资阳 = 091400
淄博 = 120700
自贡 = 090800
遵义 = 260300
万宁 = 100700
威海 = 120600
潍坊 = 120500
渭南 = 200700
温州 = 080400
文昌 = 100500
文山 = 251400
乌海 = 281000
乌兰察布 = 281200
乌鲁木齐 = 310200
无锡 = 070400
芜湖 = 150300
梧州 = 140700
吴忠 = 290300
武威 = 270700
五家渠 = 311000
五指山 = 101000
西昌 = 091900
西宁 = 320200
西双版纳 = 251500
锡林郭勒盟 = 281400
厦门 = 110300
仙桃 = 181400
咸宁 = 181300
咸阳 = 200300
襄阳 = 180500
湘潭 = 190400
湘西 = 191500
孝感 = 180900
新乡 = 170700
新余 = 130600
忻州 = 211100
信阳 = 171200
兴安盟 = 281300
邢台 = 161100
雄安新区 = 160100
徐州 = 071100
许昌 = 171100
宣城 = 151400
攀枝花 = 091000
盘锦 = 231300
萍乡 = 130500
平顶山 = 171000
平凉 = 271000
莆田 = 110600
普洱 = 251100
濮阳 = 171600
七台河 = 221300
齐齐哈尔 = 220600
黔东南 = 260900
黔南 = 261000
黔西南 = 260800
潜江 = 181500
钦州 = 140900
秦皇岛 = 160600
清远 = 031900
庆阳 = 271300
琼海 = 100600
琼中 = 101600
曲靖 = 250300
泉州 = 110400
衢州 = 081200
日喀则 = 300300
日照 = 121200
拉萨 = 300200
莱芜 = 121800
来宾 = 141300
兰州 = 270200
廊坊 = 160300
乐山 = 090400
丽江 = 250600
丽水 = 081000
连云港 = 071200
凉山 = 092300
聊城 = 121700
辽阳 = 231100
辽源 = 240400
林芝 = 300400
临沧 = 251800
临汾 = 210500
临高 = 101400
临夏 = 271400
临沂 = 120800
陵水 = 102100
柳州 = 140400
六安 = 151200
六盘水 = 260400
龙岩 = 111000
陇南 = 271200
娄底 = 191200
吕梁 = 211200
洛阳 = 170300
泸州 = 090500
漯河 = 171500
马鞍山 = 150500
茂名 = 032300
梅州 = 032600
眉山 = 091200
绵阳 = 090300
牡丹江 = 220700
那曲 = 300700
南昌 = 130200
南充 = 091100
南宁 = 140200
南平 = 110800
南通 = 070900
南阳 = 170600
内江 = 090900
宁德 = 110900
怒江 = 251900
鸡西 = 220900
吉安 = 130900
吉林 = 240300
济宁 = 120900
济源 = 171900
嘉兴 = 080700
嘉峪关 = 270400
佳木斯 = 220800
江门 = 031500
焦作 = 170500
揭阳 = 032200
金昌 = 270300
金华 = 080600
锦州 = 230700
晋城 = 210700
晋中 = 211000
荆门 = 180800
荆州 = 180700
景德镇 = 130400
靖江 = 072500
九江 = 130300
酒泉 = 270500
喀什地区 = 310400
开封 = 170400
开平 = 032700
克拉玛依 = 310300
克孜勒苏柯尔克孜 = 311700
昆山 = 070600
哈密 = 310700
海北 = 320500
海东 = 320300
海口 = 100200
海南 = 320700
海宁 = 081600
海西 = 320400
邯郸 = 160700
汉中 = 200900
菏泽 = 121400
和田 = 311600
河池 = 141200
河源 = 032100
鹤壁 = 171700
鹤岗 = 221000
贺州 = 141500
黑河 = 221200
衡水 = 161200
衡阳 = 190500
红河州 = 251000
呼和浩特 = 280200
呼伦贝尔 = 281100
葫芦岛 = 230900
湖州 = 080900
怀化 = 191100
淮安 = 071900
淮北 = 151700
淮南 = 151100
黄冈 = 181100
黄南 = 320600
黄山 = 151000
黄石 = 180400
惠州 = 030300
达州 = 091700
大理 = 250500
大庆 = 220500
大同 = 210400
大兴安岭 = 221400
丹东 = 230800
丹阳 = 072100
德宏 = 251600
德阳 = 090600
德州 = 121300
邓州 = 172000
迪庆 = 252000
定安 = 101100
定西 = 271100
东方 = 100900
东营 = 121000
儋州 = 100800
鄂尔多斯 = 280800
鄂州 = 181000
恩施 = 181800
防城港 = 140800
佛山 = 030600
抚顺 = 230600
抚州 = 131100
阜新 = 231500
阜阳 = 150700
甘南 = 271500
甘孜 = 092100
赣州 = 130800
固原 = 290600
广安 = 091300
广元 = 091600
桂林 = 140300
贵港 = 141000
贵阳 = 260200
果洛 = 320800
阿坝 = 092200
阿克苏 = 310600
阿拉尔 = 310900
阿拉善盟 = 281500
阿勒泰 = 311300
阿里 = 300800
鞍山 = 230400
安康 = 201000
安庆 = 150400
安顺 = 260500
安阳 = 170900
巴彦淖尔 = 280900
巴音郭楞 = 311800
巴中 = 092000
白城 = 241000
白沙 = 101800
白山 = 240900
白银 = 270800
百色 = 141100
蚌埠 = 150600
包头 = 280400
保定 = 160400
保山 = 251200
保亭 = 101700
宝鸡 = 200400
北海 = 140500
本溪 = 231000
毕节 = 260700
滨州 = 121500
博尔塔拉 = 311900
亳州 = 151800
沧州 = 160800
昌都 = 300600
昌吉 = 311200
昌江 = 101900
常德 = 190700
常熟 = 070700
常州 = 070500
长治 = 210600
朝阳 = 231400
潮州 = 032000
郴州 = 190900
澄迈 = 101300
承德 = 161000
池州 = 151500
赤峰 = 280300
崇左 = 141400
滁州 = 150900
楚雄 = 251700
珠三角 = 01


  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值