py爬取校园网新闻信息

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re


def gzcc_content_clicks(content_url):
content_id = re.search('(\d{2,})\.html', content_url).group(1)
click_url = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(content_id)
resp = requests.get(click_url)
resp.encoding = 'utf-8'
click_number = re.search('\(\'#hits\'\)\.html\(\'(\d+)\'\)', resp.text).group(1)
return int(click_number)


def gzcc_content_info(content_url):
content_info = {}
resp = requests.get(content_url)
resp.encoding = 'utf-8'
soup = BeautifulSoup(resp.text, 'html.parser')

match_str = {'author': '作者:(.*)\s+[审核]?', 'examine': '审核:(.*)\s+[来源]?', 'source': '来源:(.*)\s+[摄影]?', \
'photography': '摄影:(.*)\s+[点击]'}
remarks = soup.select('.show-info')[0].text
for i in match_str:
if re.match('.*' + match_str[i], remarks):
content_info[i] = re.search(match_str[i], remarks).group(1).split("\xa0")[0]
else:
content_info[i] = " "

time = re.search('\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}', remarks).group()
content_info['time'] = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
content_info['title'] = soup.select('.show-title')[0].text
content_info['url'] = content_url
content_info['content'] = soup.select('#content')[0].text

return content_info


url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')

news_list = soup.select('.news-list')[0]
news_point = news_list.select('li')
for i in news_point:
a = i.select('a')[0]['href']
print(gzcc_content_info(a))
print(gzcc_content_clicks(a))

  

转载于:https://www.cnblogs.com/www924121851/p/8707436.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值