#生成网站
import urllib.request
import time
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/49.0.2')]
for n in range(1,255):
nb = str(n)
target = '192-168-1-'+nb+'.awd.bugku.cn/'
new = open('H:/Python3.0Work/AWD/bugkuwz.txt',mode='a+',encoding='utf-8')
new.write(target)
new.write('\n')
file = open('H:/Python3.0Work/AWD/bugkuwz.txt') #生成批量网站地址
file.close()
#快速检测网站是否存活脚本
import urllib.request
import urllib.error
import time
def get_error_domain(domain_list):
error_list = set()
right_list = set()
url_list = open(domain_list, 'r')
for line in url_list:
if len(line.strip()):
line_no_blank = line.strip()
url = "http://"+ line_no_blank
error_code = ''
error_reason = ''
try:
start = time.clock()
file = urllib.request.urlopen(url,timeout=2)
elapsed = (time.clock() - start) # 获取访问时长
print("%s---->%s, 耗时%s" %(line_no_blank,file.getcode(),elapsed))
right_list.add(line_no_blank)
except urllib.error.URLError as e: # 异常域名会进入except,可以得到出错原因和出错http状态码
print("%s异常" % line_no_blank)
if hasattr(e, "code"):
print("错误状态码:%s" % e.code)
error_code = str(e.code)
if hasattr(e, "reason"):
print("出错原因:%s" % e.reason)
error_reason = str(e.reason)
error_status = error_code + '\t'+error_reason
error_list.add(line_no_blank+'\t'+error_status+"\n") # 将所有异常域名存入set集合,会自动去重
url_list.close()
print("所有正常域名:")
for lineone in right_list:
print(lineone)
# print("所有异常域名:")
# for line in error_list:
# print(line)
# abnormal_list = open('H:/Python3.0Work/AWD/异常域名.txt', 'w')
# abnormal_list.writelines(error_list)
# abnormal_list.close()
get_error_domain('H:/Python3.0Work/AWD/bugkuwz.txt')