爬虫python写入csv_新手求教python3如何把dict循环写入csv文件(在进行爬虫时遇到的问题)?...

问 题

爬虫生成dict后,想将其写入csv文件,却出错

使用jupyter notebook,window环境。

具体代码如下

import requests

from multiprocessing.dummy import Pool as ThreadPool

from lxml import etree

import sys

import time

import random

import csv

def spider(url):

header={

'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'

}

timeout=random.choice(range(31,50))

html = requests.get(url,header,timeout=timeout)

time.sleep(random.choice(range(8,16)))

selector = etree.HTML(html.text)

content_field = selector.xpath('//*[@class="inner"]/div[3]/div[2]/ul/li')

item ={}

for each in content_field:

g = each.xpath('a/div[1]/div[1]/h3/span/text()')

go = each.xpath('a/div[1]/div[2]/div/h3/text()')

h = each.xpath('a/div[1]/div[2]/div/p/text()[1]')

j= each.xpath('a/div[1]/div[1]/p/text()[2]')

ge = each.xpath('a/div[1]/div[2]/div/p/text()[3]')

x = each.xpath('a/div[1]/div[1]/p/text()[3]')

city = each.xpath('a/div[1]/div[1]/p/text()[1]')

gg = each.xpath('a/div[2]/span/text()')

item['city']="".join(city)

item['hangye']="".join(hangye)

item['guimo']="".join(guimo)

item['gongsi']="".join(gongsi)

item['gongzi']="".join(gongzi)

item['jingyan']="".join(jingyan)

item['xueli']="".join(xueli)

item['gongzuoneirong']="".join(gongzuoneirong)

fieldnames =['city','hangye','guimo','gongsi','gongzi','jingyan','xueli','gongzuoneirong']

with open('bj.csv','a',newline='',errors='ignore')as f:

f_csv=csv.DictWriter(f,fieldnames=fieldnames)

f_csv.writeheader()

f_csv.writerow(item)

if __name__ == '__main__':

pool = ThreadPool(4)

f=open('bj.csv','w')

page = []

for i in range(1,100):

newpage = 'https://www.zhipin.com/c101010100/h_101010100/?query=%E6%95%B0%E6%8D%AE%E8%BF%90%E8%90%A5&page='+str(i) + '&ka=page-' + str(i)

page.append(newpage)

results = pool.map(spider,page)

pool.close()

pool.join()

f.close()

运行上面代码,提示错误为

ValueError: too many values to unpack (expected 2)

通过查询原因是要将dict遍历,需要dict.items()的形式。但在上述代码中如何实现,一直没有理顺,求教各位

解决方案

不好意思哈,现在才有时间来回答你的问题,看到你根据我的建议把代码改过来了,下面我把改过的代码贴出来,我运行过,是没问题的

import requests

from multiprocessing.dummy import Pool

from lxml import etree

import time

import random

import csv

def spider(url):

header = {

'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'

}

timeout = random.choice(range(31, 50))

html = requests.get(url, headers=header, timeout=timeout)

time.sleep(random.choice(range(8, 16)))

selector = etree.HTML(html.text)

content_field = selector.xpath('//*[@class="inner"]/div[3]/div[2]/ul/li')

item = {}

for each in content_field:

g = each.xpath('a/div[1]/div[1]/h3/span/text()')

go = each.xpath('a/div[1]/div[2]/div/h3/text()')

h = each.xpath('a/div[1]/div[2]/div/p/text()[1]')

j = each.xpath('a/div[1]/div[1]/p/text()[2]')

ge = each.xpath('a/div[1]/div[2]/div/p/text()[3]')

x = each.xpath('a/div[1]/div[1]/p/text()[3]')

city = each.xpath('a/div[1]/div[1]/p/text()[1]')

gg = each.xpath('a/div[2]/span/text()')

item['city'] = "".join(city)

item['hangye'] = "".join(g)

item['guimo'] = "".join(go)

item['gongsi'] = "".join(h)

item['gongzi'] = "".join(j)

item['jingyan'] = "".join(ge)

item['xueli'] = "".join(x)

item['gongzuoneirong'] = "".join(gg)

fieldnames = ['city', 'hangye', 'guimo', 'gongsi', 'gongzi', 'jingyan', 'xueli', 'gongzuoneirong']

with open('bj.csv', 'a', newline='', errors='ignore')as f:

f_csv = csv.DictWriter(f, fieldnames=fieldnames)

f_csv.writeheader()

f_csv.writerow(item)

if __name__ == '__main__':

f = open('bj.csv', 'w')

page = []

for i in range(1, 100):

newpage = 'https://www.zhipin.com/c101010100/h_101010100/?query=%E6%95%B0%E6%8D%AE%E8%BF%90%E8%90%A5&page=' + str(

i) + '&ka=page-' + str(i)

page.append(newpage)

print(page)

pool = Pool(4)

results = pool.map(spider, page)

pool.close()

pool.join()

f.close()

这里主要是header,你原来是set类型,我修改后是dict类型

这里还需要给你一些建议

你的代码是放到ide还是文本编辑器中运行的?有的东西在ide下明显会报错啊

建议新手从开始学的时候就遵守PEP8规范,别养成了坏习惯,你看看你的命名

扫一扫关注IT屋

微信公众号搜索 “ IT屋 ” ,选择关注与百万开发者在一起

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值