如何用python开发针对url路径进行fuzz的脚本

其实这个不仅仅是一个扫描备份的脚本

1.简单的说明
对url进行分割.比如获取到的url为http://testphp.vulnweb.com/AJAX/233/3322/categories.php
分隔出来为[testphp]/[vulnweb]/[vulnweb.com]/[testphp.vulnweb.com]/[AJAX]/[233]/[3322]/[categories]
其实就是按照域名前缀[testphp],域名[vulnweb.com]以及域名名称[vulnweb],目录[AJAX],文件名[categories]来分隔匹配。目录可以为多重。但是别重复,也别出现类似于
http://testphp.vulnweb.com/fuzz.zip/233/3322/categorie

按照分隔出来的分别夹在yasuobaos里面的后缀.比如
    http://testphp.vulnweb.com/AJAX.zip
    http://testphp.vulnweb.com/testphp.zip
    http://testphp.vulnweb.com/vulnweb.zip
    http://testphp.vulnweb.com/vulnweb.com.zip
    http://testphp.vulnweb.com/testphp.vulnweb.com.zip
还可以加一些常见的诸如www web www1 www2类似的组合.

给出一些常见Content-Type
rar       Content-Type: application/x-rar-compressed
tar.gz   Content-Type: application/x-gzip
zip       Content-Type: application/zip
jar       Content-Type: application/java-archive
tar       Content-Type: application/g-tar

大家可以试试讨论下比较恰当的方式.当然也可以拓展开.比如加入策略进描源码备份的扫描啥的.自行脑洞
1.png


来,填坑开始.看我的熊熊来填坑
1.jpeg
#! /usr/bin/env python
#-[i]- coding: utf-8 -[/i]-


import re
import sys
import json
import string
import urlparse
import requests
from tld import get_tld

headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/37.0.2062.124 Safari/537.36 0day5scan"}

#错误页面容器
def errorpage(url):
hosts = urlparse.urlsplit(url)
url = hosts.scheme+"://"+hosts.netloc
errorpages = []
try:
res0 = requests.get(url+'/0day5hiMamXHPWjZQlh2c.php',timeout=15, headers=headers, verify=False)
res1 = requests.get(url+'/0day5hiMamXHPWjZQlh2c/',timeout=15, headers=headers, verify=False)
errorpages.append(res0.content.replace('0day5hiMamXHPWjZQlh2c',''))
errorpages.append(res1.content.replace('0day5hiMamXHPWjZQlh2c',''))
except Exception as e:
print str(e)
errorpages = list(set(errorpages))
return errorpages

def webbackup(url):
"""
1.简单的说明对url进行分割.http://testphp.vulnweb.com/AJAX/categories.php为[testphp]/[vulnweb]/[vulnweb.com]/[testphp.vulnweb.com]/[AJAX]/[categories]
就是按照域名前缀[testphp],域名[vulnweb.com]以及域名名称[vulnweb],目录[AJAX],文件名[categories]来分隔匹配。目录可以为多重

2.按照分隔出来的分别夹在yasuobaos里面的后缀.比如
http://testphp.vulnweb.com/AJAX.zip
http://testphp.vulnweb.com/testphp.zip
http://testphp.vulnweb.com/vulnweb.zip
http://testphp.vulnweb.com/vulnweb.com.zip
http://testphp.vulnweb.com/testphp.vulnweb.com.zip
还有一些常见的诸如www web www1 www2类似的组合

3.判断方式就是当前页面不在错误页面中以及当前页面状态吗不是404.关键是匹配头部的Content-Type

"""
re_results = []
results = errorpage(url)
hosts = urlparse.urlsplit(url)
domain = get_tld(url)
Contents = [ "application/octet-stream","application/x-compressed","application/x-gzip","application/x-gtar","application/gnutar","application/zip","application/x-zip","application/x-zip-compressed","application/x-tar","application/rar","application/x-bzip2","multipart/x-gzip"]
filters = [".gz",".tar.gz",".tgz",".bz2",".sql.gz",".sql.tar.gz",".sql.tgz",".sql.bz2",".sql.tar",".zip",".sql.zip",".rar",".sql.rar"]
genpaths = ["wwwroot", "htdocs", "backup", "data", "web", "w", "ww", "www", "website", "back", "site", "http", "admin","web1","t","r", "www1","webroot","0", "1", "10", "15", "123","2014", "2015","2016","2017",
"bak", "databackup","database","org","tools","tool","flashfxp","leapftp","ftp","acl","sql","${DOMAIN}","${TLD}","${TLD_}","${HOSTNAME}"]
url = hosts.scheme+"://"+hosts.netloc

path = '/'
newpaths = []
newspaths = []
paths = hosts.path.strip('/').split('/') #去掉path中的左边的/,然后按照/来进行分割
for i in range(0,len(paths)):
path = path + paths[i]+'/'
#如果.不在新目录中.表示为目录
if '.' not in path:
expurl = path.rstrip('/')
newspaths.append(expurl)
#如果.在新目录中.表示为文件
elif '.' in path:
newpath = path.split('.')[0]
expurl = newpath.rstrip('/')
newspaths.append(expurl)

newspaths = list(set(newspaths))

#开始替换以及添加参数[自己都感觉有点蠢^_^]
for new in newspaths:
newpaths.append(new)
for x in range(0,len(new.split('/'))):
old = new.split('/')[len(new.split('/'))-1] #获取路径的最后一个参数 类似于/233/222 提取出222 /233 中提取出233
for genpath in genpaths:
genpath = genpath.replace('${DOMAIN}',hosts.netloc).replace('${TLD}',domain).replace('${TLD_}',hosts.netloc.replace('.'+domain,'')).replace('${HOSTNAME}',domain.replace('.com','').replace('.cn',''))
#print genpath
getnew = new.replace(old,genpath) #从正常的url中替换出来.把之前获取的最后一个参数给替换为genpath
newpaths.append(getnew)
newpaths = list(set(newpaths))

#开始添加文件后缀以及验证文件是否为真
for newpath in newpaths:
for filter in filters:
newurl = url + newpath+filter
#print newurl
try:
response = requests.get(newurl,timeout=45, headers=headers, verify=False)
if response.status_code != 404 and response.content not in results:
if 'Content-Type' in response.headers:
if response.headers['Content-Type'] in Contents:
re_results.append(newurl)
except Exception as e:
print str(e)
pass

re_results = list(set(re_results))
if len(re_results) > 0:
return re_results

if __name__ == "__main__":
if len(sys.argv) == 2:
url = sys.argv[1]
print webbackup(url)
sys.exit(0)
else:
print ("usage: python http://testphp.vulnweb.com/index.php")
sys.exit(0)
1.png

 
已邀请:

wukong - 66+88=68

赞同来自:

6666

pygain

赞同来自:

谢表哥分享
 

要回复问题请先登录注册