Scrapy爬取新闻
源码:importscrapy
importjson
classDgcSpider(scrapy.Spider):
name=‘dgc’
defstart_requests(self):
dictions=[
{‘url’:‘http://nh.cnnb.com.cn/system/2013/07/02/010647240.shtml’},
]
fordictionindictions:
yieldscrapy.Request(
url=diction[‘url’],
callback=self.parse,
dont_filter=True,
meta={‘1’:1}
)
defparse(self,response):
res=response.xpath(’//div[@class=“article_leftleft”]/table[2]’)
title=res.xpath(’.//tr[4]/td/b/text()’).get()
content=res.xpath(’.//tr[12]/td//p//text()’).getall()
#contetn=res.xpath(‘string(.//tr[12]/td/p)’).get()
article=’\n’.join(content)
print(title)
print(article)
data={
‘title’:title,
‘article’:article
}
data=json.dumps(data)
print(data)
ff=open(‘data.json’,‘w’,encoding=‘utf-8’)
ff.write(data)
#yieldscrapy.Request(
#url=’’,
#callback=self.parsePage,
#dont_filter=True,
#meta={‘1’:1}
#)
defparsePage(self,response):
meta_=response.meta[‘1’]
第一步:
找到要爬取页面的URL,和要爬取的属性,比如:标题和文章。