0_originJson_writeURL_inTxt.py:
import urllib.request
import json
origin_json_data = urllib.request.urlopen(r"http://www.txwz.qq.com/lib/index.php?m=enterprise&a=get_exsample").read()
ndata = json.loads(origin_json_data) # ndata is decode_json_data
file_name = "f:/2017-05-16.txt" # "/" "\" 或不写 都可以
#这个是你放网址的文件名,改过来就可以了
file_open = open(file_name, "a") # 把提取出的信息写入file_name.txt文件
for i in range( 0,len(ndata["data"]) ):
type_data = ndata["data"][i]["bn"]
url_data = ndata["data"][i]["n"] #从解码后的json数据中提取type、url信息
if "http" not in url_data:
url_data = "http://" + url_data #将前缀没有http;//的url加上协议前缀
file_open.write(url_data + "\n") #将已经提取的信息写入txt文件
file_open.close() #写入完毕
print("txt_write finish")
1_txt_openableUrl_saveInTxt.py:
import urllib.request
import time
import requests #抓取并保存网页源码要用的包
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/49.0.2')]
file = open('f:/1.txt') #这个是你想检测能否打开的url所在文件名,改过来就可以了
lines = file.readlines()
aa=[]
for line in lines:
temp=line.replace('\n','')
aa.append(temp)
print(aa)
print('开始检查:')
count = 0 # 计算txt中能打开的网站的数量
#newfile = open("f:/URL_open.txt","a") #这个是你保存能打开网址的文件名,改过来就可以了
for a in aa:
tempUrl = a
try :
opener.open(tempUrl)
print(tempUrl+'没问题')
newfile = open("f:/URL_open.txt","a")
newfile.write(tempUrl+"\n") #将能打开的url写入f:/URL_open.txt
newfile.close()
count = count + 1
html = requests.get(tempUrl)
tempfileName = "f:/" + str(count) + "_" + str(tempUrl[7:]) + ".txt"
f = open(tempfileName,'a',encoding='utf-8')
f.write(html.text)
f.close() #将能打开的url写入f:/URL_open.txt后 并且抓取其网页源码并保存至f:/count_url.txt中count、url都随之变化
except urllib.error.HTTPError:
print(tempUrl+'=访问页面出错')
time.sleep(2)
except urllib.error.URLError:
print(tempUrl+'=访问页面出错')
time.sleep(2)
time.sleep(0.1)
#newfile.close()
print("txt_openableUrl_saveInTxt-------->finish")