作业来源:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE1/homework/3002
0.从新闻url获取点击次数,并整理成函数
- newsUrl
- newsId(re.search())
- clickUrl(str.format())
- requests.get(clickUrl)
- re.search()/.split()
- str.lstrip(),str.rstrip()
- int
- 整理成函数
- 获取新闻发布时间及类型转换也整理成函数
1.从新闻url获取新闻详情: 字典,anews
2.从列表页的url获取新闻url:列表append(字典) alist
3.生成所页列表页的url并获取全部新闻 :列表extend(列表) allnews
*每个同学爬学号尾数开始的10个列表页
4.设置合理的爬取间隔
import time
import random
time.sleep(random.random()*3)
5.用pandas做简单的数据处理并保存
保存到csv或excel文件
newsdf.to_csv(r'F:\duym\爬虫\gzccnews.csv')
代码:
1 import requests 2 from bs4 import BeautifulSoup 3 from datetime import datetime 4 import re 5 import sqlite3 6 import pandas as pd 7 import time 8 import pandas 9 import random 10 11 #获取点击数 12 def click(url): 13 id=re.findall('\d[1,5]',url)[-1] 14 clickUrl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(id) 15 resClick=requests.get(clickUrl) 16 newsClick=int(resClick.text.split('.html')[-1].lstrip("('").rstrip("');")) 17 return newsClick#获取新闻时间 18 def newsdt(showinfo): 19 newsDate=showinfo.split()[0].split(':')[1] 20 newsTime=showinfo.split()[1] 21 newsDT=newsDate+' '+newsTime 22 dt=datetime.strptime(newsDT,'%Y-%m-%d %H:%M:%S') 23 return dt 24 #获取新闻信息 25 def anews(url): 26 newsDetail={} 27 res=requests.get(url) 28 res.encoding='utf-8' 29 soup=BeautifulSoup(res.text,'html.parser') 30 newsDetail['newsTitle']=soup.select('.show-title')[0].text 31 showinfo=soup.select('.show-info')[0].text 32 newsDetail['newsDT']=newsdt(showinfo) 33 newsDetail['newsClick']=click(url) 34 return newsDetail 35 36 def alist(url): 37 res=requests.get(listUrl) 38 res.encoding='utf-8' 39 soup=BeautifulSoup(res.text,'html.parser') 40 newsList=[] 41 for news in soup.select('li'): 42 if len(news.select('.news-list-title'))>0: 43 newsUrl=news.select('a')[0]['href'] 44 newsDest=news.select('.news-list-description')[0].text 45 newsDict=anews(newsUrl) 46 newsDict['description']=newsDest 47 newsList.append(newsDict) 48 return newsList 49 50 51 #url= 'http://news.gzcc.cn/html/2019/xiaoyuanxinwen_0404/11155.html' 52 #anews(url) 53 54 url = 'http://news.gzcc.cn/html/xiaoyuanxinwen' 55 res=requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen') 56 res.encoding='utf-8' 57 soup=BeautifulSoup(res.text,'html.parser') 58 for news in soup.select('li'): 59 if len(news.select('.news-list-title'))>0: 60 newsUrl=news.select('a')[0]['href'] 61 62 allnews = []#获取新闻第2-第12页的所有新闻 63 for i in range(2,12): 64 listUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i) 65 allnews.extend(alist(listUrl)) 66 pd.Series(allnews)#保存文件 67 newsdf=pd.DataFrame(allnews) 68 newsdf.sort_index(by=['newsClick'],ascending=False) 69 newsdf.to_csv(r'F:\hh\bb.csv')#输出点击数大于385的新闻 70 with sqlite3.connect('gzccnewsdb.sqlite')as db: 71 df2=pandas.read_sql_query('SELECT * FROM gzccnewsdb',con=db) 72 df2[df2['newsClick']>385]
运行结果: