用正则表达式提取信息到mysql

本文介绍了如何利用正则表达式从200个网页中提取所需信息,并通过多线程优化,将数据高效地存储到SQLite3数据库中,同时涉及了代理机构和乙方单位的优化策略。
摘要由CSDN通过智能技术生成

提取200个网页

import re
from bs4 import BeautifulSoup
import os
import pandas as pd

root = r'C:\Users\hp\Desktop\20190923HTML'
dirs=os.listdir(root)      # 返回指定的文件夹包含的文件或文件夹的名字的列表

#=================================招标人===========================
tenderer_all = []

for file in dirs:           
    path = os.path.join(root, file)   # 连接两个或更多的路径名组件
    with open(path,'r') as f:
        content = f.read()
             
#with open('C:/Users/hp/Desktop/20190923HTML/1000.html','r') as f:
#    text = f.read()

    soup = BeautifulSoup(content,'lxml')
    result = soup.get_text()
   
    try:
      tenderer = re.findall('(?:招\\\n?标\\\n?人|招标单位|单位|采购人|采购方|项目单位|建设单位|比选人|采购单位)(?::|:)?\\\n?[^为].+?(?:办事处|有限公司|事业部|管理局|资源局|商城|体育馆|小学|管委会|大队|学院|大学|学校|管理局|执法局|医院|电视台|支队|政府|办公室|建设局|办事处|管理所|小组|中学|运输局|技术馆|图书馆|检查站|基地|税务局|兽医局|监理总站|宣传部|林业局|联合会|质监局|事务局|消防总队|信息化局|公证处|财政厅|养护站|研究室|检察院|幼儿园|民政局|分局|教育组|基建处|管理站|管理处|总站)',result)[0]   
    except Exception as e:
            tenderer = 'None'
    tenderer = tenderer.split(':')[-1].split('\n')[-1].split()[-1]      
    tenderer_all.append(tenderer)
     
sum(item.count('None') for item in tenderer_all )
    
tenderer_all


#=================================代理机构===========================
daili_all = []

for file in dirs:           
    path = os.path.join(root, file)   # 连接两个或更多的路径名组件
    with open(path,'r') as f:
        content = f.read()
             
#with open('C:/Users/hp/Desktop/20190923HTML/1000.html','r') as f:
#    text = f.read()

    soup = BeautifulSoup(content,'lxml')
    result = soup.get_text()
   
    try:
      daili = re.findall('(?:招标机构|代理机构|磋商代理机构|集中采购机构|招标代理机构|采购代理机构|集采机构|招标代理|招标代理单位|招标代理人)(?::|:)?\\\n?[^为].+?(?:有限公司|有限责任公司|商城|办公室)',result)[0]   
    except Exception as e:
            daili = 'None'
    daili = daili.split(':')[-1].split('\n')[-1].split()[-1]      
    daili_all.append(daili)
     
sum(item.count('None') for item in daili_all )
    
daili_all



#=================================联系人===========================
lianxi_all = []

for file in dirs:           
    path = os.path.join(root, file)   # 连接两个或更多的路径名组件
    with open(path,'r') as f:
        content = f.read()
             
#with open('C:/Users/hp/Desktop/20190923HTML/1000.html','r') as f:
#    text = f.read()

    soup = BeautifulSoup(content,'lxml')
    result = soup.get_text()
   
    try:
      lianxi = re.findall('(?:联\\\n?系\\\n?人)(?::|:)?\\\n?[^。].+?(?:[\\u4e00-\\u9fa5]+)',result)[0]   
    except Exception as e:
            lianxi = 'None'
    lianxi = lianxi.split(':')[-1].split('\n')[-1].split()[-1]      
    lianxi_all.append(lianxi)
     
sum(item.count('None') for item in lianxi_all )
    
lianxi_all


#=================================电话===========================
phone_all = []

for file in dirs:           
    path = os.path.join(root, file)   # 连接两个或更多的路径名组件
    with open(path,'r') as f:
        content = f.read()
             
#with open('C:/Users/hp/Desktop/20190923HTML/1000.html','r') as f:
#    text = f.read()

    soup = BeautifulSoup(content,'lxml')
    result = soup.get_text()
   
    try:
      phone = re.findall('(?:联\\\n?系\\\n?电\\\n?话|联系人|电话|话)(?::|:)?(\d{11}|0\d{3}-\d{8}|0\d{3}-\d{7}|0\d{2}-\d{8}|0\d{3}—\d{7}|\d{8}|\d{7})',result)[0]   
    except Exception as e:
            phone = 'None'
    phone = phone.split(':')[-1].split('\n')[-1].split()[-1]      
    phone_all.append(phone)
     
sum(item.count('None') for item in phone_all )
    
phone_all

#==================================存入csv===============
dataframe = pd.DataFrame({
   '招标人':tenderer_all,'代理机构':daili_all,'联系人':lianxi_all,'电话':phone_all})

dataframe.to_csv(r"C:\Users\hp\Desktop\data.csv",encoding = 'GB2312',sep=',')

优化代理机构

#  共10008  None:初始 5768  优化5258

import re
from bs4 import BeautifulSoup
import os
import pandas as pd

root = r'C:\Users\Administrator\Desktop\100G网页'
dirs=os.listdir(root)
      
######################################  代理机构
proxy_name_all = []

for file in dirs:           
    path = os.path.join(root, file)   # 连接两个或更多的路径名组件
    with open(path,'r',encoding='utf_8_sig') as f:
        content = f.read()
             
#with open('C:/Users/Administrator/Desktop/100G网页/1000.html','r',encoding='utf_8_sig') as f:
#    content = f.read()

    soup = BeautifulSoup(content,'lxml')
    result = soup.get_text()
    text = re.sub('\s| |\xa0| |\\r|\n|\\n|\r|\t|\\t','',result)
    text=re.sub(
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值