正则表达式匹配网址http://data.eastmoney.com/notice/的公告标题和里面的链接,并自动记录到本地文本里
# -*- coding: UTF-8 -*-
__author__ = 'Intgrp'
import urllib # 网页操作模块,获取网页数据
import re # 正则表达式
def get_content_link_title(url):
html = urllib.urlopen(url).read()
#匹配出股票的 [链接,公告]
reg = r'<a\shref=\"(/notice/[0-9]{8}/[^"]+)\"[^>]*>([^<]+)<\/a>'
reg = re.compile(reg)
content = re.findall(reg, html)
return content
def get_content_num(url):
html = urllib.urlopen(url).read()
#<a href="/Notice/Noticelist.aspx?type=0&market=all&date=&page=30010" target="_self" hidefocus="true" title="转到最后一页">30010</a>
#reg = r'<a.*? title="(.*?)">(\d+)</a>' 匹配类似 <a title=" ">数字</a>的类型
# \xd7\xaa\xb5\xbd\xd7\xee\xba\xf3\xd2\xbb\xd2\xb3 汉子为转到最后一页
reg = r'<a.*? title="\xd7\xaa\xb5\xbd\xd7\xee\xba\xf3\xd2\xbb\xd2\xb3">(\d+)</a>'
reg = re.compile(reg)
page = re.findall(reg,html)
return page
def get_next_page_link(i):
#由网页源代码可以得出,链接类型为
#例如http://data.eastmoney.com/Notice/Noticelist.aspx?type=0&market=all&date=&page=5
#固得出http://data.eastmoney.com/Notice/Noticelist.aspx?type=0&market=all&date=&page=页码
return "http://data.eastmoney.com/Notice/Noticelist.aspx?type=0&market=all&date=&page=%s"%i
'''
def content_Write2Txt(filename,content,i):
f = open(filename,'a')
f.writelines("----------page_num=%s----------------\n"%i)
for i in content:
f.writelines(i[0])
f.writelines("\t")
f.writelines(i[1])
f.write("\n")
f.close()
'''
def content_Write2Txt(content,i):
f.writelines("----------page_num=%s----------------\n"%i) #可以去掉
for i in content:
f.writelines(i[0])
f.writelines("\t")
f.writelines(i[1])
f.write("\n")
if __name__=="__main__":
page_num = get_content_num("http://data.eastmoney.com/notice/")#获得总共几页
print "总共%s页"%page_num[0]
f = open("test.txt",'a')
for i in range(1,int(page_num[0])):
url = get_next_page_link(i)#获取第i页url
content=get_content_link_title(url)#得出每页的内容
content_Write2Txt(content,i)#附加形式写入txt文件
print "第%s页已经完成"%i
f.close()
print "write success!"