python之四种方式读取文档
第一种:读取纯文本
1:代码
# coding=utf-8
"""
@author: jiajiknag
程序功能: 读取txt文件
"""
# 导包
from bs4 import BeautifulSoup
from urllib.request import urlopen
# 要读取文档
texPage= urlopen("http://www.pythonscraping.com/pages/warandpeace/chapter1-ru.txt")
# 编码输出
print(str(texPage.read(),'utf-8'))
2:结果
第二种:读取csv文件
1:代码
# coding=utf-8
"""
@author: jiajiknag
程序功能: 读取csv文件
"""
from urllib.request import urlopen
from io import StringIO
import csv
# csv文件
data = urlopen("http://pythonscraping.com/files/MontyPythonAlbums.csv").read().decode('ascii', 'ignore')
# 封装成StringIO对象
dataFile = StringIO(data)
# csvReader = csv.reader(dataFile)
# 读取
dictReader = csv.DictReader(dataFile)
# 输出
print(dictReader.fieldnames)
for row in dictReader:
print(row)
#print("\nThe album \"" + row[0] + "\" was released in " + str(row[1]))
2:结果
第三种:读取PDF文件
1:代码
# coding=utf-8
"""
@author: jiajiknag
程序功能: 读取PDF文件
注释:
readPDF函数最大的好处是,如果你的PDF文件在电脑里,你就可以直接把urlopen返回
的对象pdfFile替换成普通的open()文件对象:
pdfFile = open("../pages/warandpeace/chapter1.pdf", 'rb')
"""
from urllib.request import urlopen
from pdfminer.pdfinterp import PDFResourceManager,process_pdf
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from io import StringIO
from io import open
# 创建一个读取pdf函数
def readPDF(pdfFile):
# 创建对象
# 解析PDF
rsrcmgr = PDFResourceManager()
# 创建StringIO对象
retstr = StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr,laparams=laparams)
process_pdf(rsrcmgr, device,pdfFile)
# 关闭
device.close()
# 利用restr.getvalue()转换为文件对象
content = retstr.getvalue()
# 转换文成之后关闭
retstr.close()
pdfFile = urlopen("http://pythonscraping.com/pages/warandpeace/chapter1.pdf")
# 读取文件
outputString = readPDF(pdfFile)
# 输出
print(outputString)
# 关闭
pdfFile.close()
2:结果
第四种:读取word文件和.docx文件
1:代码
# coding=utf-8
"""
@author: jiajiknag
程序功能:
"""
from zipfile import ZipFile
from urllib.request import urlopen
from io import BytesIO
from bs4 import BeautifulSoup
"""
这段代码把一个远程Word文档读成一个二进制文件对象(BytesIO与本章之前用的
StringIO类似),再用Python的标准库zipfile解压(所有的.docx文件为了节省空间都
进行过压缩),然后读取这个解压文件,就变成XML了。
"""
wordFile = urlopen("http://pythonscraping.com/pages/AWordDocument.docx").read()
wordFile = BytesIO(wordFile)
document = ZipFile(wordFile)
xml_content = document.read('word/document.xml')
wordobj = BeautifulSoup(xml_content.decode('utf-8'))
textStrings = wordobj.findAll("w:t")
for textElem in textStrings:
closeTag = ""
try:
style = textElem.parent.previousSibling.find("w:pstyle")
if style is not None and style["w:val"] == "Title":
print("<h1>")
closeTag = "</h1>"
except AttributeError:
# 不打印标签
pass
print(textElem.text)
print(closeTag)
2:结果