pandaSpider.py
# -*- coding: utf-8 -*-
import scrapy,json
from crawl_pandatv.items import PandatvItem
import logging
class PandaspiderSpider(scrapy.Spider):
name = 'panda'
allowed_domains = ['baidu.com']
start_urls = ['http://m.api.xingyan.panda.tv/room/list/?pageno=1&pagenum=299',]
def parse(self, response):
jsondata=json.loads(response.text)
logging.warning(type(jsondata))
logging.warning(jsondata['data']['items'])
data_list=jsondata['data']['items']
#data_list=jsonpath.jsonpath(jsondata,'$..data.items')[0]
#mc_list=[]
for each in data_list:
item=PandatvItem()
item['xid'] = each['xid']
item['nickName'] = each['nickName']
item['avatar'] = each['avatar']
item['city'] = each['city']
item['personnum'] = each['personnum']
item['name'] = each['name']
#mc_list.append(item)
yield item
#return mc_list
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json,scrapy,os
from scrapy.utils.project import get_project_settings
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
import logging
class CrawlPandatvPipeline(object):
def __init__(self):
self.output=open('panda.json','w')
def process_item(self, item, spider):
jsontext=json.dumps(dict(item))+',\n'
self.output.write(jsontext)
return item
def close_spider(self,spider):
self.output.close()
class ImgPipeline(ImagesPipeline):
#def process_item(self, item, spider):
# return item
# 获取settings文件里设置的变量值
IMAGES_STORE = get_project_settings().get("IMAGES_STORE")
def get_media_requests(self, item, info):
image_url = item["avatar"]
logging.warning(image_url+"*"*40)
yield scrapy.Request(image_url)
def item_completed(self, result, item, info):
images_path = [x["path"] for ok, x in result if ok]
logging.warning(images_path[0] + "#" * 40)
os.rename(self.IMAGES_STORE + "\\" + images_path[0], self.IMAGES_STORE + "\\" + item["nickName"] + ".jpg")
return item