csv存储

import csv
headers = ['ID','UserName','Password','Age','Country']
rows = [(1001,'qiye','qiye_pass',24,'China'),
(1002,'刘小强','Mary_pass',20,"USA"),
(1003,"Jack","Jack_pass",20,"USA")]
with open ('qiye.csv','w')as f:
    f_csv = csv.writer(f)
    f_csv.writerow(headers)
    f_csv.writerows(rows)
import csv
with open('qiye.csv')as f:
    f_csv = csv.reader(f)
    headers = next(f_csv)
    print(headers)
    for row in f_csv:
        print(row)
['ID', 'UserName', 'Password', 'Age', 'Country']
['1001', 'qiye', 'qiye_pass', '24', 'China']
['1002', '刘小强', 'Mary_pass', '20', 'USA']
['1003', 'Jack', 'Jack_pass', '20', 'USA']
from collections import namedtuple
import csv
with open('qiye.csv')as f:
    f_csv = csv.reader(f)
    headers = next(f_csv)
    Row = namedtuple('Row',headers)
    for r in f_csv:
        row = Row(*r)
        print(row.UserName,row.Password)
        print(row)
qiye qiye_pass
Row(ID='1001', UserName='qiye', Password='qiye_pass', Age='24', Country='China')
刘小强 Mary_pass
Row(ID='1002', UserName='刘小强', Password='Mary_pass', Age='20', Country='USA')
Jack Jack_pass
Row(ID='1003', UserName='Jack', Password='Jack_pass', Age='20', Country='USA')
from lxml import etree
import requests
import re
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0'
headers = {'User_Agent':user_agent}
r = requests.get('http://seputu.com/',headers = headers).content.decode('utf-8')
#使用lxml解析网页
html = etree.HTML(r)
div_mulus = html.xpath('.//*[@class="mulu"]')#先找到所有div class=mulu标记
rows = []
for div_mulu in div_mulus:
    #找到所有的div_h2标记
    div_h2 = div_mulu.xpath('./div[@class="mulu-title"]/center/h2/text()')
    if len(div_h2)>0:
        h2_title = div_h2[0]
        a_s = div_mulu.xpath('./div[@class="box"]/ul/li/a')
        for a in a_s:
            #找到href属性
            href = a.xpath('./@href')[0]
            #找到title属性
            box_title = a.xpath('./@title')[0]
            #数据清洗
            pattern = re.compile(r'\s*\[(.*)\]\s+(.*)')
            match = pattern.search(box_title)
            if match != None:
                date = match.group(1)
                real_title = match.group(2)
                content = (h2_title,real_title,href,date)
                rows.append(content)

#数据存储
headers = ['title','real_title','href','date']
with open ('qiye.csv','w') as f:
    f_csv = csv.writer(f)
    f_csv.writerow(headers)
    f_csv.writerows(rows)
    

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值