学习笔记—Python网络爬虫(二)

一、获取指定页码的所有新闻

import requests
import json
import re
from bs4 import BeautifulSoup
from datetime import datetime


commentURL = 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&\
newsid=comos-{}&group=&compress=0&ie=utf-8&oe=utf-8&page=1&\
page_size=20&jsvar=loader_1509032030204_38862404'


def getCommentCounts(newsurl):
    m = re.search('doc-i(.*).shtml', newsurl)
    newsid = m.group(1)#获取"()"里面的内容
    comments = requests.get(commentURL.format(newsid))
    jd = json.loads(comments.text.strip('var loader_1509032030204_38862404='))
    return jd['result']['count']['total']


def getNewsDetail(newsurl):
    result = {}
    res = requests.get(newsurl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    result['title'] = soup.select('#artibodyTitle')[0].text
    result['newssource'] = soup.select('.time-source span a')[0].text
    timesource = soup.select('.time-source')[0].contents[0].strip()
    result['dt'] = datetime.strptime(timesource, '%Y年%m月%d日%H:%M')
    result['article'] = ' '.join([p.text.strip() for p in soup.select('#artibody p')[:-1]])
    result['editor'] = soup.select('.article-editor')[0].text.lstrip('责任编辑:')
    result['comments'] = getCommentCounts(newsurl)
    return result
    
def parseListLink(url):
    newsdetails = []
    res = requests.get(url)
    jd = json.loads(res.text.limport requests
import json
import re
from bs4 import BeautifulSoup
from datetime import datetime


commentURL = 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&\
newsid=comos-{}&group=&compress=0&ie=utf-8&oe=utf-8&page=1&\
page_size=20&jsvar=loader_1509032030204_38862404'


def getCommentCounts(newsurl):
    m = re.search('doc-i(.*).shtml', newsurl)
    newsid = m.group(1)#获取"()"里面的内容
    comments = requests.get(commentURL.format(newsid))
    jd = json.loads(comments.text.strip('var loader_1509032030204_38862404='))
    return jd['result']['count']['total']


def getNewsDetail(newsurl):
    result = {}
    res = requests.get(newsurl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    result['title'] = soup.select('#artibodyTitle')[0].text
    result['newssource'] = soup.select('.time-source span a')[0].text
    timesource = soup.select('.time-source')[0].contents[0].strip()
    result['dt'] = datetime.strptime(timesource, '%Y年%m月%d日%H:%M')
    result['article'] = ' '.join([p.text.strip() for p in soup.select('#artibody p')[:-1]])
    result['editor'] = soup.select('.article-editor')[0].text.lstrip('责任编辑:')
    result['comments'] = getCommentCounts(newsurl)
    return result
    
def parseListLink(url):
    newsdetails = []
    res = requests.get(url)
    jd = json.loads(res.text.lstrip('  newsloadercallback(').rstrip(');'))
    for ent in jd['result']['data']:
        newsdetails.append(getNewsDetail(ent['url']))
    return newsdetails
    
url = 'http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page=1&callback=newsloadercallback&_=1509170858142'
parseListLink(url)strip('  newsloadercallback(').rstrip(');'))
    for ent in jd['result']['data']:
        newsdetails.append(getNewsDetail(ent['url']))
    return newsdetails
    
url = 'http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page=1&callback=newsloadercallback&_=1509170858142'
parseListLink(url)

二、获取多页新闻内文

#抓取多页新闻内文
url = 'http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page={}&callback=newsloadercallback&_=1509170858142'
news_total = []
for i in range(1,3):
    newsurl = url.format(i)
    newsary = parseListLink(newsurl)
    news_total.extend(newsary)

三、将数据保存至Excel表格中

import pandas
df = pandas.DataFrame(news_total)

四、将数据保存至news.sqlite资料库中

#将数据保存至news.sqlite资料库中
import sqlite3
with sqlite3.connect('news.sqlite') as db:
    df.to_sql('news', con = db)

#将数据从news.sqlite资料库中取出
with sqlite3.connect('news.sqlite') as db:
    df2 = pandas.read_sql_query('SELECT * FROM news', con = db)

  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值