Arachni 相关

发布时间:June 21, 2016 // 分类:运维工作,开发笔记,工作日志,代码学习,linux,python // 2 Comments

#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import time
import json
import random
import base64
import hashlib
import threading
import subprocess
from gevent.pool import Pool
from urlparse import urlparse
from get_form import auto_fill_form,getform
"""
这里的作用就是把爬虫结果转化为json
检测思路
1.执行命令
arachni --audit-links --audit-forms --http-user-agent="Mozilla/5.0 (X11; Linux i686; U;) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36" \
    http://testphp.vulnweb.com/ --checks=sql_injection,xss,directory_listing,csrf,xpath_injection --report-save-path=/Users/saline/tools/mycode/spider/report/last0.afr

ruby /Users/saline/tool/tools/arachni/bin/../system/arachni-ui-web/bin/arachni --audit-links --audit-forms --audit-jsons --audit-xmls --audit-ui-inputs --scope-exclude-file-extensions=html --scope-exclude-file-extensions=shtml --http-user-agent="iaskspider/2.0(+http://iask.com/help/help_index.html)" --checks=sql_injection,rfi,directory_listing --report-save-path=/Users/saline/tools/mycode/spider/report/demo.aisec.cn_104446e2321d31be6031ec6daad80c47.afr --timeout=8:00:00 http://demo.aisec.cn/demo/

2.把afr利用arachni_reporter转化为json
#arachni_reporter --reporters-list
arachni_reporter /Users/saline/tools/mycode/spider/report/last0.afr --reporter=json:outfile=/Users/saline/tools/mycode/spider/report/last0_result.json

3.从json里面读取相关的结果,再进行二次分析利用
dist = open('/Users/saline/tools/mycode/spider/report/baimao.json').read()
result = json.loads(dist)
for url in result["sitemap"]:
    if int(result["sitemap"][url]) != 404:
        #输出非404的结果,其实还应该执行对比
        #print url
for urls in result["issues"]:
    print urls["vector"]["action"]+"\t"+urls["vector"]["method"]
    print urls["vector"]["inputs"]

参见帮助文档
http://doc.0xa.cc/r/FIdMhkWFYUvhdKOQQFWtBOltIGxlgsqByLSSPqzkXYRULiYZgm:mobile
http://www.cnblogs.com/vamei/archive/2012/09/23/2698014.html
"""
# 需额外安装arachni
# Arachni rpc clint scan class
class Arachni_Console(object):

    def random_useragent(self):
        USER_AGENTS = [
            "Baiduspider+(+http://www.baidu.com/search/spider.htm)",
            "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
            "Googlebot/2.1 (+http://www.googlebot.com/bot.html)",
            "Googlebot/2.1 (+http://www.google.com/bot.html)",
            "Mozilla/5.0 (compatible; Yahoo! Slurp China; http://misc.yahoo.com.cn/help.html)",
            "Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
            "iaskspider/2.0(+http://iask.com/help/help_index.html)",
            "Sogou web spider/3.0(+http://www.sogou.com/docs/help/webmasters.htm#07)",
            "Sogou Push Spider/3.0(+http://www.sogou.com/docs/help/webmasters.htm#07)",
            "Mozilla/5.0 (compatible; YodaoBot/1.0;http://www.yodao.com/help/webmaster/spider/;)",
            "msnbot/1.0 (+http://search.msn.com/msnbot.htm)",
            "Sosospider+(+http://help.soso.com/webspider.htm)",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.8.0.11)  Firefox/1.5.0.11; 360Spider",
            "Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/”; )",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:46.0) Gecko/20100101 Firefox/46.0",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0;  Trident/5.0)",
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
            "Mozilla/5.0 (X11; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0",
        ]
        return random.choice(USER_AGENTS)
    #扫描中需要注意的是几个地方。允许自定义,交互性较好
    #1.自定义cookie --http-cookie-string
    #2.带有401认证的 --http-authentication-username=username --http-authentication-password=password
    #3.自定义扫描路径 --scope-extend-paths
    #4.自定义ua --http-user-agent
    #5.线程数量 --http-request-concurrency 默认20
    #默认排除html/shtml这类静态文件,可能会对部分jsp的页面带来影响
    def __init__(self, url, http_agent="", cookies=""):
        self.http_agent = "%s"%(self.random_useragent())
        self.start_time         = str(time.time())
        self.url                = url
        self.report             = "%s_%s" % (urlparse(url).netloc, hashlib.md5(self.start_time).hexdigest())
        self.arachni_client  = '/Users/saline/tool/tools/arachni/bin/arachni'
        self.arachni_reporter  = '/Users/saline/tool/tools/arachni/bin/arachni_reporter'
        self.report_file  =  " --report-save-path=/Users/saline/tools/mycode/spider/report/%s.afr" % self.report
        self.cookies  = cookies
        #self.audit = "--audit-links --audit-forms --audit-cookies"
        self.audit = "--audit-links --audit-forms --audit-jsons --audit-xmls --audit-ui-inputs --scope-exclude-file-extensions=html --scope-exclude-file-extensions=shtml"
        self.h_agent = " --http-user-agent=\"%s\"" % (self.http_agent)
        self.h_cookies = " --http-cookie-string=\"%s\"" % (self.cookies)
        self.checks = " --checks=sql_injection,rfi,directory_listing"
        # self.checks = "--checks=rfi,directory_listing,sql_injection,sql_injection_timing,sql_injection_differential,source_code_disclosure,file_inclusion"
        self.timeout = " --timeout=%s" % "8:00:00"
        self.option = self.audit + self.h_agent + self.checks + self.report_file + self.timeout
        self.is_timeout = False
        self.proc       = None
        self.report_jsfile  = '/Users/saline/tools/mycode/spider/report/%s.json' % self.report
        self.result = None

    # Start to Scan
    def _Scan(self):
        # subprocess command
        arachni_cmd = "%s %s %s"%(self.arachni_client,self.option,self.url)
        #self.timer = threading.Timer(6000 * 10 * 10, self.set_time_out())
        #self.timer.start()
        os.system(arachni_cmd)
        #调用subprocess执行有问题。放弃,由于这只是需要结果。所以无需回显
        #self.proc = subprocess.Popen(self.cmd, shell=False)
        #self.proc = subprocess.Popen(arachni_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
        #self.proc.wait()
        #for lines in proc.stdout.readlines():
        #    print(lines)
        #self.timer.cancel()
        #for lines in self.proc.stdout.readlines():

    # timeout function
    def set_time_out(self):
        if self.proc is not None:
            self.is_timeout = True
            self.timer.cancel()
            self.proc.kill()

    def get_report(self):
        # arachni_reporter /tmp/test.afr --report=json:outfile=/tmp/test.json
        try:
            self._Scan()
            self._report()
        except Exception, e:
            pass

        return self.result

    # get result, format is json
    def _report(self):
        self.cmd = [
            self.arachni_reporter,
            "/Users/saline/tools/mycode/spider/report/%s.afr" % self.report,
            '--report=json:outfile=%s' % self.report_jsfile
        ]
        self.proc = subprocess.Popen(self.cmd)
        self.proc.wait()
        #self.result = open(self.report_jsfile).read()
        # del report files
        delcmd = "rm -rf /Users/saline/tools/mycode/spider/report/%s.afr" % self.report
        os.system(delcmd)
        self.result = self.report_jsfile
        #self.result = self.get_json(self.report_jsfile)
        #if len(self.result)>0:
        #    return self.result

        #os.remove(self.report_file)
        #os.remove(self.report_jsfile)
#解析json
def get_json(jsonfile):
    #print int(time.time())
    vul_results = []
    jsonresult = []
    dist = open(jsonfile).read()
    result = json.loads(dist)
    #for url in result["sitemap"]:
    #    if int(result["sitemap"][url]) != 404:
    #        pass
            #print url
    if len(result["issues"])>0:
        for urls in result["issues"]:
            data = ''
            acturl = urls["vector"]["action"]
            #urls.append(str(urls["vector"]["action"]))
            #获取input信息
            for vuln in urls["vector"]["inputs"]:
                if len(auto_fill_form(str(vuln)))>0:
                    value = auto_fill_form(str(vuln))
                    data = data + vuln+'='+value+'&'
                else:
                    value = 'casterjs'
                    data = data + vuln +'='+value+'&'
            #获取到actmethod
            if str(urls["vector"]["method"]).find('get')!=-1:
                actmethod = 'GET'
            elif str(urls["vector"]["method"]).find('post')!=-1:
                actmethod = 'POST'

            if str(actmethod).find('get')!=-1 or str(actmethod).find('GET')!=-1:
                if acturl.find('?') ==-1:
                    acturl = acturl +'?'+data.rstrip('&')
                else:
                    acturl = acturl +'&'+data.rstrip('&')
            if len(data.rstrip('&')) == 0:
                actmethod = 'GET'
            vul_results.append(({"url": acturl,
                "probe": {
                    "payload": data.rstrip('&'),
                    "method": actmethod,
                    "url": acturl,
                    "headers": urls["request"]["headers"],}}))
    if len(result["sitemap"])>0:
        for url in result["sitemap"]:
            if result["sitemap"][url] != 404:
                results = getform(url)
                if result is not None:
                    for lists in results:
                        if lists["probe"]['url'] not in jsonresult:
                            data = base64.b64encode(json.dumps(lists["probe"]))
                            newurl = lists["probe"]['url']
                            jsonresult.append(newurl + ' '+ data)
                            #urls.append(newurl + ' '+ data)

    if vul_results is not None:
        for lists in vul_results:
            if lists["probe"]['url'] not in jsonresult:
                data = base64.b64encode(json.dumps(lists["probe"]))
                newurl = lists["probe"]['url']
                jsonresult.append(newurl + ' '+ data)

    if len(jsonresult)>0:
        return jsonresult

if __name__ == '__main__':
    #domain ="http://0cx.cc/"
    domains = ['http://demo.aisec.cn/demo','http://testphp.vulnweb.com/']
    for domain in domains:
        arachni_console = Arachni_Console(domain, http_agent='')
        try:
            results = get_json(arachni_console.get_report())
            for resu in results:
                print resu
        except Exception as e:
            print(str(e))

 

拾取表单的脚本参考http://0cx.cc/get_form_name.jspx

#!/usr/bin/env python
# -*- encoding: utf-8 -*-

#https://github.com/Arachni/arachni/wiki/REST-server
#https://github.com/Arachni/arachni/wiki/REST-API
'''
开启api
arachni_rest_server 
[开启认证]
(./bin/arachni_rest_server  --address=192.168.87.134 --port=7331  --authentication-username=admin --authentication-password=adminpassword)


1.查看扫描状态
GET /scans

2.提交扫描
POST /scans
json.dumps(xxx.json)
其实需要提供的是url和profiles

3.查看某个id的状态
GET /scans/:id

状态大约有几种[
   a.ready 准备中。但是不曾启动扫描
   b.preparing  准备好了,随时可以启动扫描(即初始化插件)
   c.scanning   扫描中
   d.pausing   扫描被暂停了
   e.paused    扫描已经被停职了
   f.cleanup   扫描已经被中止(即等待插件完成等)
   g.aborted   扫描非正常状态结束
   h.done      扫描结束
]

4.暂停扫描
PUT /scans/:id/pause

5.开始[已暂停的]扫描
PUT /scans/:id/resume

6.提取扫描报告
GET /scans/:id/report
GET /scans/:id/report.json
GET /scans/:id/report.xml
GET /scans/:id/report.yaml
GET /scans/:id/report.html.zip

7.删除扫描
DELETE /scans/:id

'''

import urllib2
import json

class ArachniClient(object):

   with open('./profiles/default.json') as f:
      default_profile = json.load(f)

   def __init__(self, arachni_url = 'http://192.168.87.134:7331'):
      self.arachni_url = arachni_url
      self.options = ArachniClient.default_profile

   def get_http_request(self, api_path):
      return urllib2.urlopen(self.arachni_url + api_path).read()

   def post_api(self, api_path):
      options = json.dumps(self.options)
      request = urllib2.Request(self.arachni_url + api_path, options)
      request.add_header('Content-Type', 'application/json')
      return urllib2.urlopen(request).read()

   def put_request(self, api_path):
      request = urllib2.Request(self.arachni_url + api_path)
      request.get_method = lambda: 'PUT'
      return urllib2.urlopen(request).read()

   def delete_request(self, api_path):
      request = urllib2.Request(self.arachni_url + api_path)
      request.get_method = lambda: 'DELETE'
      return urllib2.urlopen(request).read()
   #获取扫描    
   def get_scans(self):
      return json.loads(self.get_http_request('/scans'))
   #获取扫描状态
   def get_status(self, scan_id):
      return json.loads(self.get_http_request('/scans/' + scan_id))
   #暂停扫描
   def pause_scan(self, scan_id):
      return self.put_request('/scans/' + scan_id + '/pause')
   #重启扫描
   def resume_scan(self, scan_id):
      return self.put_request('/scans/' + scan_id + '/resume')
   #获取扫描结果
   def get_report(self, scan_id, report_format = None):
      if self.get_status(scan_id)['status'] == 'done':

         if report_format == 'html':
            report_format = 'html.zip'

         if report_format in ['json', 'xml', 'yaml', 'html.zip']:
            return self.get_http_request('/scans/' + scan_id + '/report.' + report_format)
         elif report_format == None:
            return self.get_http_request('/scans/' + scan_id + '/report')
         else:
            print 'your requested format is not available.'

      else:
         print 'your requested scan is in progress.'
   #删除扫描
   def delete_scan(self, scan_id):
      return self.delete_request('/scans/' + scan_id)
   #开启扫描
   def start_scan(self):
      if self.options['url']:
         return json.loads(self.post_api('/scans'))
      else:
         print 'Target is not set!'

   def target(self, target_url):
      try:
         urllib2.urlopen(target_url)
         self.options['url'] = target_url
      except urllib2.HTTPError, e:
         print e.code

   def profile(self, profile_path):
      with open(profile_path) as f:
         self.options = json.load(f)

if __name__ == '__main__':
   a = ArachniClient()
   a.profile('./profiles/default.json')
   #'http://testphp.vulnweb.com/','http://23.88.112.156/xvwa/'
   a.target('http://23.88.112.156/xvwa/')
   print a.start_scan()

 

github搜索泄露的新姿势

发布时间:June 14, 2016 // 分类:工作日志,运维工作,代码学习,windows // 2 Comments

最开始的信息是来源于http://www.wooyun.org/bugs/wooyun-2016-0218766。看到其中关于ALIYUN_ACCESS_ID以及ALIYUN_ACCESS_KEY的信息泄露后直接利用oss的工具连接成功了。好奇心重的我顺便搜索了下关于oss的信息

直接在github里面搜索ALIYUN_ACCESS_ID或者ALIYUN_ACCESS_KEY抓到不少的信息

https://github.com/search?q=ALIYUN_ACCESS_ID&ref=searchresults&type=Code&utf8=%E2%9C%93

https://github.com/search?q=ALIYUN_ACCESS_KEY&ref=searchresults&type=Code&utf8=%E2%9C%93

测试了下

https://github.com/xukg/GeiliXinli/blob/6c7762b71514fd1a0ac52f7763f3ab18e0f778d5/app/src/main/java/com/geilizhuanjia/android/framework/utils/ConstantUtil.java

顺利成功的连接上了

还有更多~等待挖掘,比如淘宝储存代码的地方,oschina存放代码的地方= =

然后关于连接工具顺利的找到了两个版本[win+mac]

ossclient_mac.zip  ossclient_win.zip

分类
最新文章
最近回复
  • 轨迹: niubility!
  • 没穿底裤: 好办法..
  • emma: 任务计划那有点小问题,调用后Activation.exe不是当前活动窗口,造成回车下一步下一步...
  • 没穿底裤: hook execve函数
  • tuhao lam: 大佬,还有持续跟进Linux命令执行记录这块吗?通过内核拦截exec系统调用的方式,目前有没有...