python爬取全国社会组织查询网站

# encoding = 'utf-8'

import requests
from bs4 import BeautifulSoup
import time
import pandas as pd


# 民政部

def acquire_minzhengbu(to_page):
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36"
    }
    url = "http://www.chinanpo.gov.cn/search/orgcx.html"

    data_m = {
        "t": 2,
        "orgName": "%E5%AD%A6%E4%BC%9A",
        "corporateType": 1,
        "status": -1,
        "regNumB": 1,
        "regNumD": 2,
        "tabIndex": 1,
        "regNum": -1,
        "isHyxh": 2,
        "page_flag": "true",
        "pagesize_key": "macList",
        "goto_page": to_page,
        "current_page": 1,
        "total_count": 487,
        "page_size": 20,
        "to_page": to_page}

    response = requests.post(url, data=data_m, headers=headers)
    time.sleep(5)
    html = response.text

    soup = BeautifulSoup(html, 'html.parser')

    total = []

    for i in range(0, 120, 6):
        l = []
        a0 = soup.find_all(id='mac-data')[0].find_all('a')[i].get_text().strip()
        a1 = soup.find_all(id='mac-data')[0].find_all('a')[i + 1].get_text().strip()
        a2 = soup.find_all(id='mac-data')[0].find_all('a')[i + 2].get_text().strip()
        a3 = soup.find_all(id='mac-data')[0].find_all('a')[i + 3].get_text().strip()
        a4 = soup.find_all(id='mac-data')[0].find_all('a')[i + 4].get_text().strip()
        a5 = soup.find_all(id='mac-data')[0].find_all('a')[i + 5].get_text().strip()

        l.append(a0)
        l.append(a1)
        l.append(a2)
        l.append(a3)
        l.append(a4)
        l.append(a5)
        total.append(l)

    return total

total_mingzhen = []
for i in range(1,26):
    print(i)
    try:
        data = acquire_minzhengbu(i)
        total_mingzhen +=data
    except:
        print('error:',i)

data11 = pd.DataFrame(total_mingzhen)
data11.columns = ['社会组织名称','统一社会信用编码','社会组织类型','法定代表人','成立登记日期','状态']

data11.to_csv('./学会_民政部登记.csv',encoding='utf-8',index=False)
 

# encoding = 'utf-8'

import requests
from bs4 import BeautifulSoup
import time

def acquire_difang(to_page):
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36"
    }
    url = "http://www.chinanpo.gov.cn/search/orgcx.html"

    data_difang = {
        "t": 3,
        "orgName": "%E5%AD%A6%E4%BC%9A",
        "corporateType": 1,
        "status": -1,
        "regNumB": 1,
        "regNumD": 2,
        "tabIndex": 2,
        "regNum": -1,
        "isHyxh": 2,
        "page_flag": "true",
        "pagesize_key": "usciList",
        "goto_page": to_page,
        "current_page": 1,
        "total_count": 30640,
        "page_size": 20,
        "to_page": to_page
    }
    response = requests.post(url, data=data_difang, headers=headers)
    #     time.sleep()
    html = response.text

    soup = BeautifulSoup(html, 'html.parser')

    total = []

    for i in range(0, 120, 6):
        l = []
        a0 = soup.find_all(id='local-data')[0].find_all('a')[i].get_text().strip()
        a1 = soup.find_all(id='local-data')[0].find_all('a')[i + 1].get_text().strip()
        a2 = soup.find_all(id='local-data')[0].find_all('a')[i + 2].get_text().strip()
        a3 = soup.find_all(id='local-data')[0].find_all('a')[i + 3].get_text().strip()
        a4 = soup.find_all(id='local-data')[0].find_all('a')[i + 4].get_text().strip()
        a5 = soup.find_all(id='local-data')[0].find_all('a')[i + 5].get_text().strip()

        l.append(a0)
        l.append(a1)
        l.append(a2)
        l.append(a3)
        l.append(a4)
        l.append(a5)
        total.append(l)

    return total

total_mingzhen = []

for i in range(1,1533):
    print(i)
    try:
        data = acquire_difang(i)
        total_mingzhen +=data
    except:
        print('error:',i)

data12 = pd.DataFrame(total_mingzhen)
data12.columns = ['社会组织名称','统一社会信用编码','社会组织类型','法定代表人','成立登记日期','状态']

data12.to_csv('./学会_地方登记.csv',encoding='utf-8',index=False)

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值