想着自己搞的话估计是比较符合自己的需求。但是问题就是太耗时了,估计覆盖面也不广泛。
恰逢遇到了http://whatweb.bugscaner.com/这个网站,发现它的覆盖面还是不错的。常见的cms都整合过去了。测试了几个发现误报率还是在可以接受的范围内.于是自动化。一个简单的demo.缺点是只能访问http一类的.https的不支持。它提交的时候会自动去掉http|https://
#!/usr/bin/python
import re
import json
import requests
def whatcms(url):
headers = {"Content-Type":"application/x-www-form-urlencoded; charset=UTF-8",
"Referer":"http://whatweb.bugscaner.com/look/",
}
"""
try:
res = requests.get('http://whatweb.bugscaner.com/look/',timeout=60, verify=False)
if res.status_code==200:
hashes = re.findall(r'value="(.*?)" name="hash" id="hash"',res.content)[0]
except Exception as e:
print str(e)
return False
"""
data = "url=%s&hash=0eca8914342fc63f5a2ef5246b7a3b14_7289fd8cf7f420f594ac165e475f1479"%(url)
try:
respone = requests.post("http://whatweb.bugscaner.com/what/",data=data,headers=headers,timeout=60, verify=False)
if int(respone.status_code)==200:
result = json.loads(respone.content)
if len(result["cms"])>0:
return result["cms"]
else:
return "www"
except Exception as e:
print str(e)
return "www"
if __name__ == '__main__':
import sys
url = sys.avg[1]
print whatcms(url)

无法识别的自动判断为www。既然都可以完美搞定了。接下来开始整合插件.我的想法是先分类.读取文件内容中的service。然后再把文件名称和servvice存进数据库.方便以后调用。简单的来个小脚本
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re,os,glob
from mysql_class import MySQL
"""
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(levelname)s: %(message)s")
"""
"""
1.识别具体的cms
2.从数据库获取cms--如果没有获取到考虑全部便利
3.输出结果
"""
def timestamp():
return str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
"""
DROP TABLE IF EXISTS `bugscan`;
CREATE TABLE `bugscan` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`service` varchar(256) COLLATE utf8_bin DEFAULT NULL,
`filename` varchar(256) COLLATE utf8_bin DEFAULT NULL,
`time` varchar(256) COLLATE utf8_bin DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
SET FOREIGN_KEY_CHECKS = 1;
"""
dbconfig = {'host':'127.0.0.1','port': 3306,'user':'root','passwd':'root123','db':'proscan','charset':'utf8'}
db = MySQL(dbconfig)
def insert(filename):
file_data = open(filename,'rb').read()
service = re.findall(r"if service.*==(.*?):",file_data)
if len(service)>0:
servi = service[0].replace("'", "").replace("\"", "").replace(" ", "")
sqlInsert = "insert into `bugscan`(id,service,filename,time) values ('','%s','%s','%s');" % (str(servi),str(filename.replace('./bugscannew/','')),str(timestamp()))
print sqlInsert
#db.query(sql=sqlInsert)
for filename in glob.glob(r'./bugscannew/*.py'):
insert(filename)

然后思考下怎么调用这个具体的插件来进行判断。其实想了好久。直到前不久空着有空看了pocscan。发现这个方式不错.把文件加入到pypath中.然后from xxx import audit 然后就完美解决这个问题了.
def import_poc(pyfile,url):
poc_path = os.getcwd()+"/bugscannew/"
path = poc_path + pyfile + ".py"
filename = path.split("/")[-1].split(".py")[0]
sys.path.append(poc_path)
poc0 = imp.load_source('audit', path)
audit_function = poc0.audit
from dummy import *
audit_function.func_globals.update(locals())
ret = audit_function(url)
if ret is not None and 'None' not in ret:
#print ret
return ret

暂时没完美调用的方式.简单的贴个demo
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re,os
import imp,sys
import time,json
import logging,glob
import requests
from mysql_class import MySQL
"""
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(levelname)s: %(message)s")
"""
"""
1.识别具体的cms
2.从数据库获取cms--如果没有获取到考虑全部便利
3.输出结果
"""
def timestamp():
return str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
"""
DROP TABLE IF EXISTS `bugscan`;
CREATE TABLE `bugscan` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`service` varchar(256) COLLATE utf8_bin DEFAULT NULL,
`filename` varchar(256) COLLATE utf8_bin DEFAULT NULL,
`time` varchar(256) COLLATE utf8_bin DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
SET FOREIGN_KEY_CHECKS = 1;
"""
dbconfig = {'host':'127.0.0.1','port': 3306,'user':'root','passwd':'root123','db':'proscan','charset':'utf8'}
db = MySQL(dbconfig)
def insert(filename):
file_data = open(filename,'rb').read()
service = re.findall(r"if service.*==(.*?):",file_data)
if len(service)>0:
servi = service[0].replace("'", "").replace("\"", "").replace(" ", "")
sqlInsert = "insert into `bugscan`(id,service,filename,time) values ('','%s','%s','%s');" % (str(servi),str(filename.replace('./bugscannew/','')),str(timestamp()))
print sqlInsert
#db.query(sql=sqlInsert)
#print servi,filename
def check(service,url):
if service == 'www':
sqlsearch = "select filename from `bugscan` where service = '%s'" %(service)
elif service != 'www':
sqlsearch = "select filename from `bugscan` where service = 'www' or service = '%s'" %(service)
print sqlsearch
if int(db.query(sql=sqlsearch))>0:
result = db.fetchAllRows()
for row in result:
#return result
for colum in row:
colum = colum.replace(".py","")
import_poc(colum,url)
def import_poc(pyfile,url):
poc_path = os.getcwd()+"/bugscannew/"
path = poc_path + pyfile + ".py"
filename = path.split("/")[-1].split(".py")[0]
sys.path.append(poc_path)
poc0 = imp.load_source('audit', path)
audit_function = poc0.audit
from dummy import *
audit_function.func_globals.update(locals())
ret = audit_function(url)
if ret is not None and 'None' not in ret:
#print ret
return ret
def whatcms(url):
headers = {"Content-Type":"application/x-www-form-urlencoded; charset=UTF-8",
"Referer":"http://whatweb.bugscaner.com/look/",
}
"""
try:
res = requests.get('http://whatweb.bugscaner.com/look/',timeout=60, verify=False)
if res.status_code==200:
hashes = re.findall(r'value="(.*?)" name="hash" id="hash"',res.content)[0]
except Exception as e:
print str(e)
return False
"""
data = "url=%s&hash=0eca8914342fc63f5a2ef5246b7a3b14_7289fd8cf7f420f594ac165e475f1479"%(url)
try:
respone = requests.post("http://whatweb.bugscaner.com/what/",data=data,headers=headers,timeout=60, verify=False)
if int(respone.status_code)==200:
result = json.loads(respone.content)
if len(result["cms"])>0:
return result["cms"]
else:
return "www"
except Exception as e:
print str(e)
return "www"
if __name__ == '__main__':
#for filename in glob.glob(r'./bugscannew/*.py'):
# insert(filename)
url = "http://0day5.com/"
print check(whatcms(url),url)
其实还有纰漏。比如在调用那块可以考虑下采用多线程来加快速度.还有就是可能出现如果cms无法识别出来。结果肯定不准确。如果全部load进来fuzz一次太耗时了。
得到琦神的demo。貌似更暴力,全加载fuzz一次
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# papapa.py
import re
import socket
import sys
import os
import urlparse
import time
from dummy.common import *
import util
from dummy import *
import importlib
import threading
import Queue as que
class Worker(threading.Thread): # 处理工作请求
def __init__(self, workQueue, resultQueue, **kwds):
threading.Thread.__init__(self, **kwds)
self.setDaemon(True)
self.workQueue = workQueue
self.resultQueue = resultQueue
def run(self):
while 1:
try:
callable, args, kwds = self.workQueue.get(False) # get task
res = callable(*args, **kwds)
self.resultQueue.put(res) # put result
except que.Empty:
break
class WorkManager: # 线程池管理,创建
def __init__(self, num_of_workers=10):
self.workQueue = que.Queue() # 请求队列
self.resultQueue = que.Queue() # 输出结果的队列
self.workers = []
self._recruitThreads(num_of_workers)
def _recruitThreads(self, num_of_workers):
for i in range(num_of_workers):
worker = Worker(self.workQueue, self.resultQueue) # 创建工作线程
self.workers.append(worker) # 加入到线程队列
def start(self):
for w in self.workers:
w.start()
def wait_for_complete(self):
while len(self.workers):
worker = self.workers.pop() # 从池中取出一个线程处理请求
worker.join()
if worker.isAlive() and not self.workQueue.empty():
self.workers.append(worker) # 重新加入线程池中
#logging.info('All jobs were complete.')
def add_job(self, callable, *args, **kwds):
self.workQueue.put((callable, args, kwds)) # 向工作队列中加入请求
def get_result(self, *args, **kwds):
return self.resultQueue.get(*args, **kwds)
"""
lst=os.listdir(os.getcwd())
pocList =(','.join(c.strip('.py') for c in lst if os.path.isfile(c) and c.endswith('.py'))).split(',')
for line in pocList:
try:
#print line
xxoo = importlib.import_module(line)
xxoo.curl = miniCurl.Curl()
xxoo.security_hole = security_hole
xxoo.task_push = task_push
xxoo.util =util
xxoo.security_warning = security_warning
xxoo.security_note = security_note
xxoo.security_info = security_info
xxoo.time = time
xxoo.audit('http://0day5.com')
except Exception as e:
print line,e
"""
def bugscan(line,url):
#print line,url
try:
xxoo = importlib.import_module(line)
xxoo.curl = miniCurl.Curl()
xxoo.security_hole = security_hole
xxoo.task_push = task_push
xxoo.util =util
xxoo.security_warning = security_warning
xxoo.security_note = security_note
xxoo.security_info = security_info
xxoo.time = time
xxoo.audit(url)
except Exception as e:
#print line,e
pass
def main(url):
wm = WorkManager(20)
lst=os.listdir(os.getcwd())
pocList =(','.join(c.strip('.py') for c in lst if os.path.isfile(c) and c.endswith('.py'))).split(',')
for line in pocList:
if 'apa' not in line:
wm.start()
wm.wait_for_complete()
start = time.time()
main('http://0day5.com/')
print time.time()-start

准确率堪忧啊,仅供参考