一、获取指定页码的所有新闻
import requestsimport json
import re
from bs4 import BeautifulSoup
from datetime import datetime
commentURL = 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&\
newsid=comos-{}&group=&compress=0&ie=utf-8&oe=utf-8&page=1&\
page_size=20&jsvar=loader_1509032030204_38862404'
def getCommentCounts(newsurl):
m = re.search('doc-i(.*).shtml', newsurl)
newsid = m.group(1)#获取"()"里面的内容
comments = requests.get(commentURL.format(newsid))
jd = json.loads(comments.text.strip('var loader_1509032030204_38862404='))
return jd['result']['count']['total']
def getNewsDetail(newsurl):
result = {}
res = requests.get(newsurl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
result['title'] = soup.select('#artibodyTitle')[0].text
result['newssource'] = soup.select('.time-source span a')[0].text
timesource = soup.select('.time-source')[0].contents[0].strip()
result['dt'] = datetime.strptime(timesource, '%Y年%m月%d日%H:%M')
result['article'] = ' '.join([p.text.strip() for p in soup.select('#artibody p')[:-1]])
result['editor'] = soup.select('.article-editor')[0].text.lstrip('责任编辑:')
result['comments'] = getCommentCounts(newsurl)
return result
def parseListLink(url):
newsdetails = []
res = requests.get(url)
jd = json.loads(res.text.limport requests
import json
import re
from bs4 import BeautifulSoup
from datetime import datetime
commentURL = 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&\
newsid=comos-{}&group=&compress=0&ie=utf-8&oe=utf-8&page=1&\
page_size=20&jsvar=loader_1509032030204_38862404'
def getCommentCounts(newsurl):
m = re.search('doc-i(.*).shtml', newsurl)
newsid = m.group(1)#获取"()"里面的内容
comments = requests.get(commentURL.format(newsid))
jd = json.loads(comments.text.strip('var loader_1509032030204_38862404='))
return jd['result']['count']['total']
def getNewsDetail(newsurl):
result = {}
res = requests.get(newsurl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
result['title'] = soup.select('#artibodyTitle')[0].text
result['newssource'] = soup.select('.time-source span a')[0].text
timesource = soup.select('.time-source')[0].contents[0].strip()
result['dt'] = datetime.strptime(timesource, '%Y年%m月%d日%H:%M')
result['article'] = ' '.join([p.text.strip() for p in soup.select('#artibody p')[:-1]])
result['editor'] = soup.select('.article-editor')[0].text.lstrip('责任编辑:')
result['comments'] = getCommentCounts(newsurl)
return result
def parseListLink(url):
newsdetails = []
res = requests.get(url)
jd = json.loads(res.text.lstrip(' newsloadercallback(').rstrip(');'))
for ent in jd['result']['data']:
newsdetails.append(getNewsDetail(ent['url']))
return newsdetails
url = 'http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page=1&callback=newsloadercallback&_=1509170858142'
parseListLink(url)strip(' newsloadercallback(').rstrip(');'))
for ent in jd['result']['data']:
newsdetails.append(getNewsDetail(ent['url']))
return newsdetails
url = 'http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page=1&callback=newsloadercallback&_=1509170858142'
parseListLink(url)
二、获取多页新闻内文
#抓取多页新闻内文
url = 'http://api.roll.news.sina.com.cn/zt_list?channel=news&cat_1=gnxw&cat_2==gdxw1||=gatxw||=zs-pl||=mtjj&level==1||=2&show_ext=1&show_all=1&show_num=22&tag=1&format=json&page={}&callback=newsloadercallback&_=1509170858142'
news_total = []
for i in range(1,3):
newsurl = url.format(i)
newsary = parseListLink(newsurl)
news_total.extend(newsary)
三、将数据保存至Excel表格中
import pandas
df = pandas.DataFrame(news_total)
四、将数据保存至news.sqlite资料库中
#将数据保存至news.sqlite资料库中
import sqlite3
with sqlite3.connect('news.sqlite') as db:
df.to_sql('news', con = db)
#将数据从news.sqlite资料库中取出
with sqlite3.connect('news.sqlite') as db:
df2 = pandas.read_sql_query('SELECT * FROM news', con = db)