python爬虫小样+csv文件的存储

from bs4 import BeautifulSoup as bsp
import requests
import re
import csv
#从页面抓取相应的内容
i = 549
titles3 = []
while i > 538:
    urls = "http://www.view.sdu.edu.cn/xyxw/" + str(i) + ".htm"
    page_response = requests.get(urls, timeout = 5)
    page_content = bsp(page_response.content,"html.parser")
    for sublist in page_content.find_all(class_ = "sublist"):
        titles = sublist.find_all("li")
    titles3 += [title.text for title in titles]
    i -= 1
titles4 = str(titles3)
titles5=titles4.replace(",","\n")
temp = []
time_t = []
#将抓取的文件存入.txt文件
with open("viewsdu5.txt", mode = "w", encoding = "utf-8") as f:
    f.write(titles5)
    f.close()
#对字符串进行处理,首先根据日期(正则表达式)分开
#第二部将字符串中的逗号变为回车,从而将内容分层
with open("viewsdu5.txt", mode = "r", encoding = "utf-8") as f:
    for i in f.readlines():
        str = (re.search(r"(\d{4}-\d{1,2}-\d{1,2})",i))
        str1 = str.group(0)
        time_t.append(str1)
        titles6 = i.replace(str1, " ")
        temp.append(titles6)
    f.close()
#存入最终格式为.csv文件(表格),需要先将数据进行分组
with open("viewsdu5.txt", mode = "w", encoding = "utf-8") as f:
    for i in temp:
        f.write(i)
    f.close()
headers = ["内容", "时间"]
rows = []
for (tem,tim) in zip(temp,time_t):
    rows.append([tem,tim])
#将列表存入,该列表是n行2列的列表
with open("csv01.csv", "w", newline='') as f:
    f_csv = csv.writer(f)
    f_csv.writerow(headers)
    for row in rows:
        f_csv.writerow(row)
    f.close()
#summary:1、先按照需求信息爬取网站内容 
# 2、对所有的信息进行字符串处理,也就是所谓数据清洗的工作。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值