#新闻爬虫
#需求:将大河网新闻所有新闻爬到本地
#思路:
#先爬首页,通过正则表达式获取所有新闻链接
#然后依次爬各新闻,并存储到本地
import re
import os
import urllib.request
import urllib.error
import random
my_heardrs=[
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)"
]
random_header=random.choice(my_heardrs)
heardr=("User-Agent",random_header)
opener=urllib.request.build_opener()
opener.addheaders=[heardr]
mainurl="https://www.dahe.cn/"
data=urllib.request.urlopen(mainurl,timeout=1).read().decode('utf-8')
pat='''<li class="dh_modular_news"><a href="(.*?)" target="_blank">(.*?)</a>'''
rst=re.compile(pat).findall(str(data))
fh1=open("C:\\Users\\gaoxingyuan\\Desktop\\大河网新闻链接.txt","w")
for i in range(0,len(rst)):
end=str(rst[i][1])+':'+str(rst[i][0])
fh1.write(end+"\n")
try:
url=rst[i][0]
news=opener.open(str(rst[i][0])).read()
#如果没有该目录则创建该目录
if not os.path.exists("C:\\Users\\gaoxingyuan\\Desktop\\大河网新闻\\"):
os.mkdir("C:\\Users\\gaoxingyuan\\Desktop\\大河网新闻\\")
fh2=open("C:\\Users\\gaoxingyuan\\Desktop\\大河网新闻\\大河网新闻.html","wb")
fh2.write(news)
fh2.close()
if os.path.exists("C:\\Users\\gaoxingyuan\\Desktop\\大河网新闻\\大河网新闻.html")==True:
newname=str(rst[i][1])
os.rename("C:\\Users\\gaoxingyuan\\Desktop\\大河网新闻\\大河网新闻.html","C:\\Users\\gaoxingyuan\\Desktop\\大河网新闻\\"+newname+".html")
except Exception as e:
print(e)
fh1.close()
爬取大河新闻网新闻
最新推荐文章于 2024-09-15 22:31:42 发布