爬取新浪社会新闻源代码

视频地址如下:

https://edu.hellobi.com/course/81/play/lesson/1761

import requests
from bs4 import BeautifulSoup
import re
import json
from datetime import datetime
import pandas

#取得评论数的函数
def getCommentCounts(newsurl):
    m = re.search('doc-i(.+).shtml',newsurl)
    newsid = m.group(1)
    commentURL = 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=sh&newsid=comos-{}&group=&compress=0&ie=utf-8&oe=utf-8&page=1&page_size=20'
    comments = requests.get(commentURL.format(newsid))
    comments.encoding ='utf-8'
    jd = json.loads(comments.text.strip('var data='))
    return (jd['result']['count']['total'])

#在正文页面取得包含标题、新闻来源、时间、正文、编辑姓名、评论数的一个字典result
def getNewsDetail(newsurl):
    result = {}
    res = requests.get(newsurl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    result['title'] = soup.select('#artibodyTitle')[0].text
    result['newssource'] = soup.select('.time-source a')[0].text
    timesource = soup.select('.time-source')[0].contents[0].strip()
    result['dt'] = datetime.strptime(timesource,'%Y年%m月%d日%H:%M')
    result['article'] = ' '.join([p.text.strip() for p in soup.select('#artibody p')[:-1]])
    result['editor'] = soup.select('.article-editor')[0].text.lstrip('责任编辑:')
    result['comments'] = getCommentCounts(newsurl)
    return result

'''
#在新闻标题列表页面如何取得第一次加载的时间、标题以及链接
res_1 = requests.get('http://news.sina.com.cn/society/')
res_1.encoding = 'utf-8'
soup =BeautifulSoup(res_1.text,'html.parser')

for news in soup.select('.news-item'):
    if len(news.select('h2'))>0:
        h2 = news.select('h2')[0].text
        time = news.select('.time')[0].text
        a = news.select('a')[0]['href']
        print (time,h2,a)       
'''

#在新闻标题列表页面如何取得滚动后加载的所有新闻链接,并调用getNewsDetail(newsurl)函数
def parseListLinks(url):
    res = requests.get(url)
    jd = json.loads(res.text.lstrip('   newsloadercallback(').rstrip(');'))
    newsdetails = []
    for each in jd['result']['data']:
        newsdetails.append(getNewsDetail(each['url']))#这里调用了上面的getNewsDetail(newsurl)函数
    return newsdetails



url = 'http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=shxw&cat_2==zqsk||=qwys||=shwx||=fz-shyf&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page={}'
news_total = []
for i in range(1,6):
    newsurl = url.format(i)
    newsary = parseListLinks(newsurl)
    news_total.extend(newsary)
df = pandas.DataFrame(news_total)

df.to_excel('news.xlsx')

import sqlite3
with sqlite3.connect('news.sqlite')as db:
    df.to_sql('news',con = db)

import sqlite3
with sqlite3.connect('news.sqlite')as db:
    df2 = pandas.read_sql_query('SELECT * FROM NEWS',con = db)
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值