1:需要安装的包
import fitz
from tqdm import tqdm
from bs4 import BeautifulSoup
import re
2:逻辑,PDF先解析成html,在对HTML提取txt,目前来看纯文本的txt提取效果不错,双目排版论文格式的提取质量不高,因为论文里包含有图表、图片、作者信息这种,虽然能够按照排版的格式抽取内容信息,但抽取的信息很脏乱。
import fitz
from tqdm import tqdm
from bs4 import BeautifulSoup
import re
#将pdf转成html
def pdf2html(input_path,html_path):
doc = fitz.open(input_path)
print(doc)
html_content =''
for page in tqdm(doc):
html_content += page.get_text('html')
print('开始输出html文件')
html_content +="</body></html>"
with open(html_path, 'w', encoding = 'utf-8', newline='')as fp:
fp.write(html_content)
#使用Beautifulsoup解析本地html
def html2txt(html_path):
html_file = open(html_path, 'r', encoding = 'utf-8')
htmlhandle = html_file.read()
soup = BeautifulSoup(htmlhandle, "html.parser")
for div in soup.find_all('div'):
for p in div:
text = str()
for span in p:
p_info = '<span .*?>(.*?)</span>' #提取规则
res = re.findall(p_info,str(span)) #findall函数
if len(res) ==0:
pass
else:
text+= res[0] #将列表中的字符串内容合并加到行字符串中
print(text)
txt_path = html_path.replace('.html','.txt')
with open(txt_path,'a',encoding = 'utf-8')as text_file:
text_file.write(text)
text_file.write('\n')
def pdf2txt(filepath):
html_path = filepath.replace('.pdf', '.html')
pdf2html(filepath, html_path) # pdf转html
html2txt(html_path)
print('PDF转换完成!')
if __name__ == '__main__':
pdf_path = '需要抓换的PDF文件'
pdf2txt(pdf_path)
3:之后会调研其他转换方法,争取能够提高提取质量。