当我们获取到一些数据时,例如使用爬虫将网上的数据抓取下来时,应该怎么把数据保存为不同格式的文件呢?下面会分别介绍用python保存为 txt、csv、excel甚至保存到mongodb数据库中文件的方法。
保存为txt文件
首先我们模拟数据是使用爬虫抓取下来的, 抓取的下来的数据大致就是这样的
下面使用代码保存为txt文件
import requests
from lxml import etree
url = 'https://ke.qq.com/course/list/?mt=1001'
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"}
# 获取放回的html源代码字符串
response = requests.get(url, headers=headers).text
def save_txt(): # 保存为txt文件
f = open("./ke.txt", "w", encoding="utf8")
# 抓取文章目录和标题
html = etree.HTML(response)
li_list = html.xpath('//ul[@class="course-card-list"]/li')
for li in li_list:
title = li.xpath('.//a[@class="item-tt-link"]/@title')[0]
href = li.xpath('.//a[@class="item-tt-link"]/@href')[0]
href = "https:" + href
f.write(f'{title}-->{href}\n')
f.close()
save_txt()
运行程序,效果图如下
保存为csv文件格式
代码如下
import requests
from lxml import etree
import csv
url = 'https://ke.qq.com/course/list/?mt=1001'
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"}
# 获取放回的html源代码字符串
response = requests.get(url, headers=headers).text
def save_csv(): # 保存为csv文件
with open("ke.csv", "w", encoding="utf8", newline='') as f:
header = ["title", "href"]
writer = csv.DictWriter(f, header) # 创建字典writer
writer.writeheader()
# 抓取文章目录和标题
html = etree.HTML(response)
li_list = html.xpath('//ul[@class="course-card-list"]/li')
for i, li in enumerate(li_list): # 获取索引和值
title = li.xpath('.//a[@class="item-tt-link"]/@title')[0]
href = li.xpath('.//a[@class="item-tt-link"]/@href')[0]
href = "https:" + href
item = {"title": title, "href": href}
print(item)
writer.writerow(item)
print("保存成功...")
save_csv()
运行程序,文件会存储再当前目录下。
存储为excel文件
代码如下:
import requests
from lxml import etree
import openpyxl
url = 'https://ke.qq.com/course/list/?mt=1001'
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"}
# 获取放回的html源代码字符串
response = requests.get(url, headers=headers).text
def save_excel(): # 保存为excel文件
wb = openpyxl.Workbook() # 创建工作铺
ws = wb.active # 创建工作表
# 写入表头
ws["A1"] = "课程标题"
ws["B1"] = "课堂链接"
# 抓取文章目录和标题
html = etree.HTML(response)
li_list = html.xpath('//ul[@class="course-card-list"]/li')
for i, li in enumerate(li_list): # 获取索引和值
title = li.xpath('.//a[@class="item-tt-link"]/@title')[0]
href = li.xpath('.//a[@class="item-tt-link"]/@href')[0]
href = "https:" + href
ws.cell(row=i+2, column=1, value=title) # 写入行,列所对应的值
ws.cell(row=i+2, column=2, value=href)
wb.save("./QQ课堂.xlsx")
save_excel()
运行程序,打开文件
保存在mongodb数据库中
代码如下
import requests
import pymongo
from lxml import etree
def save_mongo(): # 将数据存储到monggodb数据库
client = pymongo.MongoClient() # 连接数据库
db = client["ke"] # 创建数据库
collection = db["ke_content"]
items = []
html = etree.HTML(response)
li_list = html.xpath('//ul[@class="course-card-list"]/li')
for i, li in enumerate(li_list): # 获取索引和值
title = li.xpath('.//a[@class="item-tt-link"]/@title')[0]
href = li.xpath('.//a[@class="item-tt-link"]/@href')[0]
href = "https:" + href
item = {"title": title, "href": href}
items.append(item) # 将每个item添加到items列表中
collection.insert_many(items) # 插入多条数据
for content in collection.find(): # 查看数据库中的数据
print(content)
运行代码,可以在终端中查看数据库中的内容
也可以直接进入数据库中查看,打开终端,进入数据库。查看即可
小结
最后把所有代码整理一遍,大家只需要按需所用即可,只需要修改部分代码就好了。
import pymongo
import requests
from lxml import etree
import openpyxl # 保存为excel文件
import csv # 保存为csv文件
url = 'https://ke.qq.com/course/list/?mt=1001'
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"}
# 获取放回的html源代码字符串
response = requests.get(url, headers=headers).text
def save_txt(): # 保存为txt文件
f = open("./ke.txt", "w", encoding="utf8")
# 抓取文章目录和标题
html = etree.HTML(response)
li_list = html.xpath('//ul[@class="course-card-list"]/li')
for li in li_list:
title = li.xpath('.//a[@class="item-tt-link"]/@title')[0]
href = li.xpath('.//a[@class="item-tt-link"]/@href')[0]
href = "https:" + href
f.write(f'{title}-->{href}\n')
f.close()
def save_csv(): # 保存为csv文件
with open("ke.csv", "w", encoding="utf8", newline='') as f:
header = ["title", "href"]
writer = csv.DictWriter(f, header)
writer.writeheader()
# 抓取文章目录和标题
html = etree.HTML(response)
li_list = html.xpath('//ul[@class="course-card-list"]/li')
for i, li in enumerate(li_list): # 获取索引和值
title = li.xpath('.//a[@class="item-tt-link"]/@title')[0]
href = li.xpath('.//a[@class="item-tt-link"]/@href')[0]
href = "https:" + href
item = {"title": title, "href": href}
print(item)
writer.writerow(item)
print("保存成功...")
def save_excel(): # 保存为excel文件
wb = openpyxl.Workbook() # 创建工作铺
ws = wb.active # 创建工作表
# 写入表头
ws["A1"] = "课程标题"
ws["B1"] = "课堂链接"
# 抓取文章目录和标题
html = etree.HTML(response)
li_list = html.xpath('//ul[@class="course-card-list"]/li')
for i, li in enumerate(li_list): # 获取索引和值
title = li.xpath('.//a[@class="item-tt-link"]/@title')[0]
href = li.xpath('.//a[@class="item-tt-link"]/@href')[0]
href = "https:" + href
ws.cell(row=i+2, column=1, value=title) # 写入行,列所对应的值
ws.cell(row=i+2, column=2, value=href)
wb.save("./QQ课堂.xlsx")
print("保存成功")
def save_mongo(): # 将数据存储到monggodb数据库
client = pymongo.MongoClient() # 连接数据库
db = client["ke"] # 创建数据库
collection = db["ke_content"]
items = []
html = etree.HTML(response)
li_list = html.xpath('//ul[@class="course-card-list"]/li')
for i, li in enumerate(li_list): # 获取索引和值
title = li.xpath('.//a[@class="item-tt-link"]/@title')[0]
href = li.xpath('.//a[@class="item-tt-link"]/@href')[0]
href = "https:" + href
item = {"title": title, "href": href}
items.append(item)
collection.insert_many(items) # 插入多条数据
for content in collection.find(): # 遍历在这个集合中的数据
print(content)
if __name__ == '__main__':
save_mongo() # 调用相应的方法即可
如果有什么不足之处请指出,我会加以改进,共同进步!