作业要求:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE1/homework/3002
0.从新闻url获取点击次数,并整理成函数
- newsUrl
- newsId(re.search())
- clickUrl(str.format())
- requests.get(clickUrl)
- re.search()/.split()
- str.lstrip(),str.rstrip()
- int
- 整理成函数
- 获取新闻发布时间及类型转换也整理成函数
1
2
3
4
5
6
7
8
9
10
11
12
13
|
# 获取点击次数
def
clickCount(url):
newsId
=
re.search(
'/(\d+).html'
, url).groups(
0
)[
0
]
timeUrl
=
'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'
.
format
(newsId)
clickTime
=
re.findall(
"\d+"
,requests.get(timeUrl).text.split(
';'
)[
3
])[
0
]
return
clickTime
#获取新闻时间
def
newsDateTime(head):
date
=
head[
0
][
5
:]
time
=
head[
1
]
format
=
'%Y-%m-%d %H:%M:%S'
return
datetime.strptime(date
+
" "
+
time,
format
)
|
1.从新闻url获取新闻详情: 字典,anews
1
2
3
4
5
6
7
8
9
10
11
12
13
|
#获取新闻信息
def
anews(url):
newsDetail
=
{}
get
=
requests.get(url)
get.encoding
=
'utf-8'
soup
=
BeautifulSoup(get.text,
'html.parser'
)
newsDetail[
'title'
]
=
soup.select(
'.show-title'
)[
0
].text;
# 新闻题目
head
=
soup.select(
'.show-info'
)[
0
].text.split()
newsDetail[
'datetime'
]
=
newsDateTime(head)
# 新闻时间
newsDetail[
'clickTime'
]
=
clickCount(url)
# 点击次数
newsDetail[
'content'
]
=
soup.select(
'.show-content'
)[
0
].text
# 点击内容
newsDetail[
'url'
]
=
url
return
newsDetail
|
2.从列表页的url获取新闻url:列表append(字典) alist
1
2
3
4
5
6
7
8
9
10
11
|
#获取新闻列表页中的新闻url
def
alist(listUrl):
get
=
requests.get(listUrl)
get.encoding
=
'utf-8'
soup
=
BeautifulSoup(get.text,
'html.parser'
)
newsList
=
[]
for
news
in
soup.select(
'li'
):
if
len
(news.select(
'.news-list-title'
))>
0
:
newsUrl
=
news.select(
'a'
)[
0
][
'href'
]
newsList.append(newsUrl)
return
newsList
|
3.生成所页列表页的url并获取全部新闻 :列表extend(列表) allnews
*每个同学爬学号尾数开始的10个列表页
1
2
3
4
5
6
7
|
#爬取64至74页的数据
url
=
[]
for
i
in
range
(
64
,
74
):
url.extend(alist(
'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'
.
format
(i)))
allnews
=
[];
for
i
in
url:
allnews.append(anews(i))
|
4.设置合理的爬取间隔
import time
import random
time.sleep(random.random()*3)
time.sleep(random.random()*3)
5.用pandas做简单的数据处理并保存
保存到csv或excel文件
newsdf.to_csv(r'F:\duym\爬虫\gzccnews.csv')
1
2
3
4
|
#保存文件
pd.Series(allnews)
newsdf
=
pd.DataFrame(allnews)
newsdf.to_csv(
'news.csv'
,encoding
=
'utf-8'
)
|
运行截图:
6.完整代码
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import sqlite3
import pandas as pd
import time
import pandas
import random
# 获取点击次数
def clickCount(url):
newsId = re.search('/(\d+).html', url).groups(0)[0]
timeUrl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
clickTime=re.findall("\d+",requests.get(timeUrl).text.split(';')[3])[0]
return clickTime
#获取新闻时间
def newsDateTime(head):
date=head[0][5:]
time=head[1]
format='%Y-%m-%d %H:%M:%S'
return datetime.strptime(date+" "+time,format)
#获取新闻信息
def anews(url):
newsDetail={}
get=requests.get(url)
get.encoding='utf-8'
soup=BeautifulSoup(get.text,'html.parser')
newsDetail['title']=soup.select('.show-title')[0].text; # 新闻题目
head=soup.select('.show-info')[0].text.split()
newsDetail['datetime']=newsDateTime(head) # 新闻时间
newsDetail['clickTime']=clickCount(url) # 点击次数
newsDetail['content'] = soup.select('.show-content')[0].text # 点击内容
newsDetail['url']=url
return newsDetail
#获取新闻列表页中的新闻url
def alist(listUrl):
get=requests.get(listUrl)
get.encoding='utf-8'
soup=BeautifulSoup(get.text,'html.parser')
newsList=[]
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
newsUrl=news.select('a')[0]['href']
newsList.append(newsUrl)
return newsList
#爬取64至74页的数据
url=[]
for i in range(64,74):
url.extend(alist('http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)))
allnews=[];
for i in url:
allnews.append(anews(i))
time.sleep(random.random()*3)
#保存文件
pd.Series(allnews)
newsdf=pd.DataFrame(allnews)
newsdf.to_csv('news.csv',encoding='utf-8')