"
+ for key in out[ip][port]["headers"].keys():
+ logcnt += key + ":" + out[ip][port]["headers"][key] + "\n"
+ logcnt += "
"
+ for title, url, status_code in out[ip][port]["available"]:
+ logcnt += titlehtml(title) + \
+ "" + url + " "+ \
+ "Status Code:" + str(status_code) + " "
+ logcnt += " "
+ center = centerhtml(ips)
+ logcnt = HTML_LOG_TEMPLATE % ( css, center, logcnt)
+ outfile = open(path, "a")
+ outfile.write(logcnt)
+ outfile.close()
+
+def scan(iplst, timeout, headers, savepath):
+ global result
+ start = time.time()
+ threads = []
+
+ for ip in iplst:
+ t = bannerscan(ip,timeout,headers)
+ threads.append(t)
+
+ for t in threads:
+ t.start()
+
+ for t in threads:
+ t.join()
+
+ log(result, savepath)
+ result = dict()
+ print
+
+def main():
+ parser = argparse.ArgumentParser(description='banner scanner. by DM_ http://x0day.me')
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument('-i',
+ action="store",
+ dest="ip",
+ )
+ group.add_argument('-r',
+ action="store",
+ dest="iprange",
+ type=str,
+ )
+ group.add_argument('-f',
+ action="store",
+ dest="ipfile",
+ type=argparse.FileType('r')
+ )
+ parser.add_argument('-s',
+ action="store",
+ required=True,
+ dest="savepath",
+ type=str,
+ )
+ parser.add_argument('-t',
+ action="store",
+ required=False,
+ type = int,
+ dest="timeout",
+ default=5
+ )
+
+ args = parser.parse_args()
+ savepath = args.savepath
+ timeout = args.timeout
+ iprange = args.iprange
+ ipfile = args.ipfile
+ ip = args.ip
+
+ headers['user-agent'] = ua
+
+ print("[*] starting at %s" % time.ctime())
+
+ if ip:
+ iplst = retiplst(ip)
+ scan(iplst, timeout, headers, savepath)
+
+ elif iprange:
+ iplst = retiprangelst(iprange)
+ scan(iplst, timeout, headers, savepath)
+
+ elif ipfile:
+ lines = ipfile.readlines()
+ for line in lines:
+ if re.match(ipPattern, line):
+ iplst = retiplst(line)
+ scan(iplst, timeout, headers, savepath)
+ elif re.match(iprangePattern, line):
+ iplst = retiprangelst(line)
+ scan(iplst, timeout, headers, savepath)
+
+ else:
+ parser.print_help()
+ exit()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/controller.py b/lib/controller.py
old mode 100755
new mode 100644
index ab9cd0b..107d5cf
--- a/lib/controller.py
+++ b/lib/controller.py
@@ -1,147 +1,147 @@
import os
-import time
-import re
-from lib.general import url_parse,get_ip_from_url
-from lib.scanner.oneforall import OneForAll
-from lib.scanner import xray,crawlergo,nmap,masscan,dirsearch,awvs,request_engine,whatweb
+import tempfile
+import sqlite3
+from lib.Tools import *
+from lib.urlParser import Parse
+from lib.report import Report
+from lib.db import db_insert
+
+class Controller:
+ def __init__(self, arguments):
+ self.urls_target = arguments.urlList
+ self.domains_target = arguments.domainList
+ self.logfile = tempfile.NamedTemporaryFile(delete=False).name
+ self.log = {}
+ self.xray = Xray()
+
+ if arguments.tools:
+ self.toolsList = [tool for tool in arguments.tools.split(',')]
+
+ def assign_task(self):
+ def url_scan(urls_target):
+ if urls_target:
+ self.urls_target = sorted(set(self.urls_target), key=urls_target.index)
+ for _target in urls_target:
+ db_insert('insert into target_info (target, batch_num) values (?,?);', _target, now_time)
+ _target = Parse(_target) # return dict{ip,domain,http_url}
+ if _target.data:
+ db_insert('insert into scanned_info (domain, batch_num) values (?,?)', _target.data['http_url'], now_time)
+ self.urls_scan(_target)
+ Report().html_report_single()
+
+ self.xray.passive_scan()
+
+ if self.urls_target:
+ url_scan(self.urls_target)
+
+ if self.domains_target:
+ self.domains_target = sorted(set(self.domains_target), key=self.domains_target.index)
+ for domain in self.domains_target:
+ if not Parse.isIP(domain):
+ if Parse(domain).data: # 域名存在解析不成功的情况
+ subdomains = self.subdomains_scan(Parse(domain).data['domain'])
+ for subdomain in subdomains:
+ target = Parse(subdomain)
+ print(target)
+ if target.data:
+ db_insert('insert into host_info (domain, batch_num) values (?,?)', target.data['domain'], now_time)
+ http_urls = self.ports_scan(target)
+ url_scan(http_urls)
+
+ else:
+ target = Parse(domain)
+ db_insert('insert into host_info (domain, batch_num) values (?,?)', target.data['domain'], now_time)
+ http_urls = self.ports_scan(target)
+ url_scan(http_urls)
+
+ def subdomains_scan(self, target):
+ _ = "python3 oneforall/oneforall.py --target {target} run".format(path=tool_path, target=target)
+ logfile = '{path}/oneforall/results/{target}.csv'.format(path=tool_path, target=target)
+ oneforall = Oneforall(_, logfile)
+ return oneforall.data if oneforall.data else [target]
+
+ def ports_scan(self, target):
+ nslookup = Nslookup(target.data['domain'])
+
+ cdns = ['cdn', 'kunlun', 'bsclink.cn', 'ccgslb.com.cn', 'dwion.com', 'dnsv1.com', 'wsdvs.com', 'wsglb0.com', 'lxdns.com', 'chinacache.net.', 'ccgslb.com.cn', 'aliyun']
+ for cdn in cdns:
+ if cdn in nslookup.log:
+ print('maybe the {} is cdn'.format(target.data['domain']))
+ print(nslookup.log)
+ return [target.data['http_url']]
+
+ _ = "masscan --open -sS -Pn -p 1-20000 {target} --rate 2000".format(target=target.data['ip'])
+ masscan = Masscan(_, None)
+
+ '''
+ 可能存在防火墙等设备,导致扫出的端口非常多
+ '''
+ if not masscan.data or len(masscan.data) > 20:
+ masscan.data = ['21', '22', '445', '80', '1433', '3306', '3389', '6379', '7001', '8080']
+
+ '''
+ nmap 如果80和443同时开放,舍弃443端口
+ '''
+ _ = "nmap -sS -Pn -A -p {ports} {target_ip} -oN {logfile}".format(ports=','.join(masscan.data), target_ip=target.data['ip'], logfile=self.logfile)
+ nmap = Nmap(_, self.logfile)
+ if nmap.data:
+ if 80 in nmap.data and 443 in nmap.data:
+ nmap.data.remove("443")
+ return ['{}:{}'.format(target.data['http_url'], port) for port in nmap.data]
+
+ return [target.data['http_url']]
+
+ def urls_scan(self, target):
+ # 主要查看组织, 是否是云服务器
+ iplocation = IpLocation(target.data['ip'])
+
+ _ = "whatweb --color never {}".format(target.data['http_url'])
+ whatweb = Whatweb(_)
+
+ # 截图
+ snapshot = Snapshot(target.data['http_url'])
+
+ '''
+ crawlergo
+ 扫描出来的子域名动态添加到域名列表中
+ 注意--push-to-proxy必须是http协议, 更换chrome为静态包执行不了
+ '''
+ _ = './crawlergo -c /usr/bin/google-chrome-stable -t 10 --push-to-proxy http://127.0.0.1:7777 -o json {}'.format(target.data['http_url'])
+ crawlergo = Crawlergo(_)
+
+ if crawlergo.data:
+ if self.domains_target:
+ self.domains_target += [domain for domain in crawlergo.data if domain not in self.domains_target]
+
+ '''
+ 等待xray扫描结束,因为各类工具都是多线程高并发,不等待的话xray会一批红:timeout
+ '''
+ if crawlergo.log:
+ print('xray start scanning')
+ while True:
+ if self.xray.wait_xray_ok():
+ break
+
+ '''
+ 将dirsearch扫出的url添加到xray去
+ '''
+ _ = 'python3 dirsearch/dirsearch.py -x 301,302,403,404,405,500,501,502,503 --full-url -u {target} --csv-report {logfile}'.format(
+ target=target.data['http_url'], logfile=self.logfile)
+ dirsearch = Dirsearch(_, self.logfile)
+
+ if dirsearch.data:
+ for url in dirsearch.data:
+ response = Request().repeat(url)
+
-class Controller():
- def __init__(self,arguments):
- self.arguments = arguments
- self.scanned_domains = []
- if self.arguments.args.restore:
- exit("restore exit ")
- def assign_task(self):
- self.init_report()
- self.xray = xray.Xray()
- self.xray.scan()
- for http_url in self.format_url():
- print("scanning : ",http_url)
- if self.arguments.args.fastscan: # fastscan模式只web扫描,并且不重复添加扫描到的子域名
- self.url_scan(http_url)
- continue
- if http_url.count(":") < 2 and http_url.count("/") < 3 : # if like http://a.com:8080 or http://xx.com/1.php ,do self.url_scan()
- ip = get_ip_from_url(http_url)
- if not ip :
- continue
- open_ports = masscan.Masscan(ip).open_ports
- if not open_ports or len(open_ports) > 20:
- self.url_scan(http_url)
- continue
- http_open_ports = nmap.Nmap(url_parse(http_url).get_netloc(),open_ports).http_open_ports #use domain not ip in order to report
- if http_open_ports:
- for port in http_open_ports:
- http_url_with_port = http_url + ":" + port
- self.url_scan(http_url_with_port)
- else:
- print("nmap not found http server port at : ",http_url)
- else:
- self.url_scan(http_url)
-
-
- def url_scan(self,target):
- whatweb.Whatweb(target)
-
- c = crawlergo.Crawlergo(target)
- if c.sub_domains and not self.arguments.args.fastscan : #将crawlergo扫描出的子域名添加到任务清单中
- print("crawlergo found domains : ",c.sub_domains)
- for domains in c.sub_domains:
- if domains not in self.arguments.urlList:
- self.arguments.urlList.append(domains)
-
- time.sleep(5)
- print("waiting xray scan to end")
- while (True): #wait for xray end scan
- if self.xray.check_xray_status():
- break
-
- urls = dirsearch.Dirsearch(target).urls
- if urls:
- request = request_engine.Request() #repeat urls found by dirsearch to xray
- for url in urls:
- request.repeat(url)
- time.sleep(5)
-
- if "awvs" in self.arguments.toolList:
- awvs.Awvs(target)
-
- self.scanned_domains.append(target)
-
- # 使用oneforall遍历子域名
- def format_url(self):
- for url in self.arguments.urlList:
- result = re.search(r'(([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])\.){3}([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])', url)
- if result:
- url = result.group()
- http_url = url_parse(url).get_http_url() #
- yield http_url
-
- elif url.startswith('http'):
- yield url
-
- else:
- # 判断域名是否已经扫描过,包括含有http这类的链接
- scanned_status = False
- compile = '^[http://|https://]*' + url
- for u in self.scanned_domains:
- if re.findall(compile,u):
- print("{} had in scanned_domains list .".format(url))
- scanned_status = True
- break
-
- if scanned_status :
- continue
-
- # 判断是否是二级域名,
- if url.count('.') >= 2:
- is_subdomain = True
- for suffix in [".com.cn", ".edu.cn", ".net.cn", ".org.cn", ".co.jp",".gov.cn", ".co.uk", "ac.cn",]:
- if suffix in url :
- is_subdomain = False
- break
-
- # 二级域名的话跳过,不再爆破三级域名
- if is_subdomain :
- yield url_parse(url).get_http_url()
- continue
-
- # 域名当作url先扫描
- yield url_parse(url).get_http_url()
-
- # 遍历子域名并扫描
- domains_list = OneForAll(url).scan()
- domains_list = sorted(set(domains_list), key=domains_list.index) # 去重 保持顺序
- for domains in domains_list:
- http_url = url_parse(domains).get_http_url() #
- yield http_url
- continue
-
- def init_report(self):
- from .setting import TEMPLATE_FILE
- from .setting import TOOLS_REPORT_FILE
-
- if not os.path.exists(TOOLS_REPORT_FILE):
- with open(TOOLS_REPORT_FILE, 'w+') as w:
- with open(TEMPLATE_FILE, 'r') as r:
- w.write(r.read())
-
- def restore(self):
- main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
- restore_path = os.path.join(main_path,'.restore')
- if not os.path.exists(restore_path):
- exit('not found .restore file')
-
- with open(restore_path,'r') as f: # 判断域名情况
- url = f.read()
- return url
diff --git a/lib/db.py b/lib/db.py
index 23f1c93..f6d714e 100644
--- a/lib/db.py
+++ b/lib/db.py
@@ -1,50 +1,54 @@
-from lib.setting import now_time
-from lib.general import path_build
import sqlite3
+import os
-class db():
- def __init__(self,sql, value=None, dbfile='scanned_info.db'):
- self.db_path = path_build(dbfile)
- self.sql = sql
- self.value = self.replace_date(value)
- self.date = now_time.replace('-','')
- self.db = sqlite3.connect(self.db_path)
- self.c = self.db.cursor()
-
-
- def __enter__(self):
- def run():
- if self.value:
- return self.c.execute(self.sql,self.value)
- else:
- return self.c.execute(self.sql)
-
- try:
- run()
- except sqlite3.OperationalError as e:
- try :
- if 'no such table' in str(e):
- # target table
- self.c.execute('''
- create table if not exists target_info (
- id INTEGER PRIMARY KEY,input_target text, found_domains text, date integer)''')
- # scanned info
- self.c.execute('''
- create table if not exists scanned_info (
- id INTEGER PRIMARY KEY,domain text, date integer, crawlergo text, dirsearch text
- )''')
- run()
- except Exception as e:
- print(e)
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.db.commit()
- self.db.close()
-
-
- def replace_date(self,value):
- if 'date' in value:
- v = list(value)
- index = v.index('date')
- v[index] = now_time
- return tuple(v)
\ No newline at end of file
+main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
+
+
+'''
+初试化表结构
+'''
+def db_init():
+ with sqlite3.connect('scanned_info.db') as conn:
+ conn.execute('''
+ create table if not exists target_info (
+ id INTEGER PRIMARY KEY,
+ target text,
+ oneforall text,
+ crawlergo text,
+ batch_num integer,
+ date timestamp not null default (datetime('now','localtime')))
+ ''')
+
+ conn.execute('''
+ create table if not exists host_info (
+ id INTEGER PRIMARY KEY,
+ domain text,
+ nslookup text,
+ iplocation text,
+ masscan text,
+ nmap text,
+ batch_num integer,
+ date timestamp not null default (datetime('now','localtime')))
+ ''')
+
+ conn.execute('''
+ create table if not exists scanned_info (
+ id INTEGER PRIMARY KEY,
+ domain text,
+ whatweb text,
+ crawlergo text,
+ dirsearch text,
+ batch_num integer,
+ date timestamp not null default (datetime('now','localtime'))
+ )''')
+
+
+def db_insert(sql, *value):
+ with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn:
+ conn.execute(sql, value) # *value返回(1,) (1,2)这种元祖
+
+
+def db_update(table, name, text):
+ with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn:
+ sql = 'update {table} set {column}=? order by id desc limit 1;'.format(table=table, column=name)
+ conn.execute(sql, (text,))
\ No newline at end of file
diff --git a/lib/download_tools.py b/lib/download_tools.py
index 55341bf..e7a3c0a 100644
--- a/lib/download_tools.py
+++ b/lib/download_tools.py
@@ -1,39 +1,31 @@
import os
-from lib.setting import TOOLS_DIR
-def download():
- tools = {
- 'xray' : "https://download.xray.cool/xray/1.7.0/xray_linux_amd64.zip?download=true",
- 'crawlergo' : 'https://github.com/0Kee-Team/crawlergo/releases/download/v0.4.0/crawlergo_linux_amd64.zip',
- 'dirsearch' : 'https://github.com/maurosoria/dirsearch/archive/v0.4.1.zip',
- 'oneforall' : 'https://github.com/shmilylty/OneForAll/archive/v0.4.3.zip',
+TOOLS_DIR = os.path.join(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'tools')
+TOOLS = {
+ 'xray': "https://download.xray.cool/xray/1.7.0/xray_linux_amd64.zip",
+ 'crawlergo': 'https://github.com/0Kee-Team/crawlergo/releases/download/v0.4.0/crawlergo_linux_amd64.zip',
+ 'dirsearch': 'https://github.com/maurosoria/dirsearch/archive/v0.4.1.zip',
+ 'oneforall': 'https://github.com/shmilylty/OneForAll/archive/v0.4.3.zip',
}
- os.chdir(TOOLS_DIR)
+# github上下载的工具解压后会变成xxx-master , 需要变更为xxx
+RENAME_DIRS = ['dirsearch', 'oneforall']
- #download
- for key,value in tools.items():
- if key == 'xray' and not value.endswith('.zip'):
- name = 'xray_linux_amd64.zip'
- os.system('wget --no-check-certificate {url} -O {name}'.format(url=value,name=name))
- else:
- #os.system('wget --no-check-certificate {url}'.format(url=value))
- os.system('wget --no-check-certificate {url}'.format(url=value))
+def download():
+ os.chdir(TOOLS_DIR)
- #extract
+ for key, value in TOOLS.items():
+ os.system('wget --no-check-certificate {url}'.format(url=value))
os.system('unzip "*.zip"')
- #rename dirsearch and oneforall dir
- dirs = os.listdir(TOOLS_DIR)
- for dir in dirs:
- if dir.lower().startswith('dirsearch'):
- os.system('mv {} dirsearch'.format(dir))
-
- if dir.lower().startswith('oneforall'):
- os.system('mv {} oneforall'.format(dir))
-
+ for name_src in os.listdir(TOOLS_DIR):
+ for name_dest in RENAME_DIRS:
+ if name_src.lower().startswith(name_dest):
+ os.system('mv {} {}'.format(name_src, name_dest))
- tool_name = ['crawlergo','dirsearch','oneforall','xray_linux_amd64']
- if set(tool_name) < set(os.listdir()):
- os.system('touch install.lock')
\ No newline at end of file
+ # 判断工具清单的文件是否都在目录了
+ if set([name for name in TOOLS.keys()]).issubset(set(os.listdir())):
+ os.system('touch install.lock')
+ else:
+ exit('The tool has not been downloaded completely, check lib//download_tools.py for details')
diff --git a/lib/general.py b/lib/general.py
index 60cd26e..c41f63f 100755
--- a/lib/general.py
+++ b/lib/general.py
@@ -1,41 +1,38 @@
-import os
from urllib.parse import urlparse
+import os
import socket
import json
import xlrd
-class read_xls():
- def __init__(self,file):
- self.base_str = list('abcdefghijklmnopqrstuvwxyz.-_')
+class read_xls:
+ def __init__(self, file):
+ self.base_str = list('0123456789abcdefghijklmnopqrstuvwxyz.-_')
self.domains = self.read_xls(file)
- def read_xls(self,file):
+ def read_xls(self, file):
try:
workbook = xlrd.open_workbook(file)
sheet1 = workbook.sheet_by_index(0)
column = sheet1.col_values(3)
+ return self.filter(column)
except Exception as e:
exit(e)
- return self.filter(column)
- def filter(self,domains):
+ def filter(self, domains):
domains_filterd = []
for domain in domains:
if domain is None:
break
-
if ';' in domain:
domain = domain.split(';')[0]
-
# 判断域名内容是否标准,比如是否存在中文
if not set(list(domain)) < set(self.base_str):
print('domain {} 不规范,忽略'.format(domain))
continue
-
if not len(domain) < 3:
domains_filterd.append(domain)
- return domains_filterd
+ return sorted(set(domains_filterd), key=domains_filterd.index)
class Run():
def __init__(self,command,logfile='',delete_file=True):
@@ -109,13 +106,10 @@ def get_ip_from_url(http_url):
def get_file_content(file_path):
if not os.path.exists(file_path):
- print("the file path is not correct")
+ exit("not found file:{}".format(file_path))
- urlList = []
- with open(file_path,'r') as f:
- for line in f.readlines():
- urlList.append(line.strip())
- return urlList
+ with open(file_path, 'r') as f:
+ return [line.strip() for line in f.readlines()]
def dir_is_exists_or_create(*dir_path):
@@ -137,7 +131,7 @@ def check_dict_key_vaild(dict,*keys):
def path_build(*path):
main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
- path = os.path.join(main_path,*path)
+ path = os.path.join(main_path, *path)
return path
diff --git a/lib/img/1.png b/lib/img/1.png
new file mode 100644
index 0000000..2220091
Binary files /dev/null and b/lib/img/1.png differ
diff --git a/lib/img/2.png b/lib/img/2.png
new file mode 100644
index 0000000..fd77708
Binary files /dev/null and b/lib/img/2.png differ
diff --git a/lib/img/3.png b/lib/img/3.png
new file mode 100644
index 0000000..79578fc
Binary files /dev/null and b/lib/img/3.png differ
diff --git a/lib/logger.py b/lib/logger.py
deleted file mode 100755
index 1e3bf83..0000000
--- a/lib/logger.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import logging
-import colorlog
-log_colors_config = {
- 'DEBUG': 'cyan',
- 'INFO': 'green',
- 'WARNING': 'yellow',
- 'ERROR': 'red',
- 'CRITICAL': 'red',
-}
-
-logger = logging.getLogger('mylogger')
-logger.setLevel(logging.DEBUG)
-
-fh = logging.FileHandler('test.log')
-fh.setLevel(logging.DEBUG)
-
-ch = logging.StreamHandler()
-ch.setLevel(logging.DEBUG)
-
-file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-color_formatter = colorlog.ColoredFormatter('%(log_color)s[%(levelname)s] - %(asctime)s - %(name)s - %(message)s',log_colors=log_colors_config)
-
-fh.setFormatter(file_formatter)
-ch.setFormatter(color_formatter)
-
-logger.addHandler(fh)
-logger.addHandler(ch)
-
-# logger.info("test info")
-# logger.error("error")
\ No newline at end of file
diff --git a/lib/report.py b/lib/report.py
index d60e717..0a388aa 100755
--- a/lib/report.py
+++ b/lib/report.py
@@ -1,38 +1,152 @@
-from .general import url_parse
-from .setting import TOOLS_REPORT_FILE
-from bs4 import BeautifulSoup
import html
+import os
+import sqlite3
+from bs4 import BeautifulSoup
+from lib.Tools import Snapshot
+
+
+main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
+REPORT_PATH = os.path.join(main_path, 'report')
+REPORT_TEMPLATE = os.path.join(main_path, "lib/template.html")
+
+REPORT_TAB = '''
+
{DOMAIN}
+
+ {IMG_TAB}
+
+
+
+ {LI}
+
+
+
+
+'''
+
+REPORT_LI = '''
+
+
+
+
+
{NAME}
+
{REPORT}
+
+
+'''
+
+
+class Report:
+ def __init__(self):
+ self.body = ""
+ self.report = None
+ self.batch_num = None
+ self.IMG_TAB = r''
+
+ def html_report_single(self):
+ with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn:
+ def parse(fetch):
+ li = ''
+ key = [i[0] for i in fetch.description]
+ for row in fetch.fetchall():
+ value = [str(row[_]) for _ in range(len(row))]
+ domain = value[1]
+ self.batch_num = value[-2]
+ time = value[-1].split()
+ time1, time2 = time[0], time[1]
+ # 生成li模块
+ for name, report in zip(key[2:-2], value[2:-2]):
+ li += REPORT_LI.format(TIME1=time1, TIME2=time2, NAME=name, REPORT=html.escape(report))
+
+ # 返回整个tab模块
+ if domain.startswith('http'):
+ yield REPORT_TAB.format(DOMAIN=domain, IMG_TAB=self.IMG_TAB.format(Snapshot.format_img_name(domain)), LI=li)
+ else:
+ yield REPORT_TAB.format(DOMAIN=domain, IMG_TAB='', LI=li)
+
+ tag = ''
+
+ # 插入时从scanned_info确认是否有port扫描,如有需要先插入port信息
+ sql = '''
+ select * from host_info
+ where batch_num = (select batch_num from scanned_info order by id desc limit 1)
+ order by id desc limit 1 ;
+ '''
+ fetch = conn.execute(sql)
+ for _ in parse(fetch):
+ tag += _
-class Report():
- def __init__(self,target): # all tools use http_url already
- self.target = url_parse(target).get_netloc()
- self.separate = "\n"*6
-
- def report_insert(self,tool_log_file,title="",is_file=True):
- soup = BeautifulSoup(self.read(TOOLS_REPORT_FILE),'html.parser')
- if is_file == True:
- report = self.read(tool_log_file)
- else :
- report = tool_log_file
- report = title + '\n' + report
-
- if not soup.h3 or self.target not in soup.h3.string:
- text = '
{}
{}
\n'.format(self.target,html.escape(report))
- t = BeautifulSoup(text,'html.parser')
+ # 下面的limit 1,1 要删掉1
+ # 如果port_info有信息, 那么要插入scanned_info中对应的domain数据在port下面
+ if tag:
+ sql = '''
+ SELECT * from scanned_info
+ where batch_num = (select batch_num from host_info order by id desc limit 1)
+ and domain LIKE '%'||(SELECT domain from host_info order by id desc limit 1)||'%'
+ order by id desc limit 1;
+ '''
+ for _ in parse(conn.execute(sql)):
+ tag += _
+ else:
+ sql = '''
+ SELECT * from scanned_info order by id desc limit 1 ;
+ '''
+ for _ in parse(conn.execute(sql)):
+ tag += _
+
+ if os.path.exists(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num))):
+ soup = BeautifulSoup(self.read(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num))), 'html.parser')
+ else:
+ soup = BeautifulSoup(self.read(REPORT_TEMPLATE), 'html.parser')
+
+ if soup.h3:
+ t = BeautifulSoup(tag, 'html.parser')
soup.h3.insert_before(t)
+ self.write(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num)), str(soup))
else:
- soup.div.pre.string += self.separate + report
+ print('Failed to write to report file ! ')
- self.rewrite_template(str(soup))
+ '''
+ 获取单个batch_num, 并输出
+ 此处未完成,瞎做
+ '''
+ def html_report_entire(self):
+ with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn:
+ self.batch_num = conn.execute('select batch_num from scanned_info order by id desc limit 1;').fetchone()[0]
+ sql = 'select * from scanned_info where batch_num = {};'.format(self.batch_num)
+ fetch = conn.execute(sql).fetchall()
+ for row in fetch:
+ print(row)
+ title = row[1]
+ value = [str(row[_]) for _ in range(len(row)) if row[_] is not None]
+ value = '\n'.join(value[2:])
+ self.body += '
{}
{}
\n'.format(title, html.escape(value))
- def rewrite_template(self,text):
- with open(TOOLS_REPORT_FILE,'w') as f:
- f.write(text)
+ soup = BeautifulSoup(self.read(REPORT_TEMPLATE), 'html.parser')
+ if soup.h3:
+ t = BeautifulSoup(self.body, 'html.parser')
+ soup.h3.insert_before(t)
+
+ self.write(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num)), str(soup))
- def read(self,file):
- with open(file,'r') as f:
+ @staticmethod
+ def read(file):
+ with open(file, 'r') as f:
return f.read()
-if __name__ == "__main__":
- X = Report("http://a.testphp.vulnweb.com:9000/index.php")
- X.insert("setting.py")
+ @staticmethod
+ def write(file, text):
+ with open(file, 'w+') as f:
+ f.write(text)
+
+ def test(self):
+ with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn:
+ sql = '''select * from scanned_info where batch_num = (
+ select batch_num from scanned_info order by id desc limit 1
+ );'''
+
+ fetch = conn.execute(sql).fetchall()
+ for row in fetch:
+ v = [str(row[c]) for c in range(len(row)) if row[c] is not None]
+ print('\n'.join(v[2:]))
+
+
diff --git a/lib/scanner/__init__.py b/lib/scanner/__init__.py
deleted file mode 100755
index e69de29..0000000
diff --git a/lib/scanner/__pycache__/__init__.cpython-38.pyc b/lib/scanner/__pycache__/__init__.cpython-38.pyc
deleted file mode 100644
index 0be7a35..0000000
Binary files a/lib/scanner/__pycache__/__init__.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/awvs.cpython-38.pyc b/lib/scanner/__pycache__/awvs.cpython-38.pyc
deleted file mode 100644
index eba33c8..0000000
Binary files a/lib/scanner/__pycache__/awvs.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/crawlergo.cpython-38.pyc b/lib/scanner/__pycache__/crawlergo.cpython-38.pyc
deleted file mode 100644
index c123ba9..0000000
Binary files a/lib/scanner/__pycache__/crawlergo.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/dirsearch.cpython-38.pyc b/lib/scanner/__pycache__/dirsearch.cpython-38.pyc
deleted file mode 100644
index c6de570..0000000
Binary files a/lib/scanner/__pycache__/dirsearch.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/masscan.cpython-38.pyc b/lib/scanner/__pycache__/masscan.cpython-38.pyc
deleted file mode 100644
index bfd9195..0000000
Binary files a/lib/scanner/__pycache__/masscan.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/nmap.cpython-38.pyc b/lib/scanner/__pycache__/nmap.cpython-38.pyc
deleted file mode 100644
index 9010bbb..0000000
Binary files a/lib/scanner/__pycache__/nmap.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/oneforall.cpython-38.pyc b/lib/scanner/__pycache__/oneforall.cpython-38.pyc
deleted file mode 100644
index d1b7310..0000000
Binary files a/lib/scanner/__pycache__/oneforall.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/request_engine.cpython-38.pyc b/lib/scanner/__pycache__/request_engine.cpython-38.pyc
deleted file mode 100644
index c3c1e40..0000000
Binary files a/lib/scanner/__pycache__/request_engine.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/whatweb.cpython-38.pyc b/lib/scanner/__pycache__/whatweb.cpython-38.pyc
deleted file mode 100644
index 0674bf4..0000000
Binary files a/lib/scanner/__pycache__/whatweb.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/__pycache__/xray.cpython-38.pyc b/lib/scanner/__pycache__/xray.cpython-38.pyc
deleted file mode 100644
index 0335b54..0000000
Binary files a/lib/scanner/__pycache__/xray.cpython-38.pyc and /dev/null differ
diff --git a/lib/scanner/crawlergo.py b/lib/scanner/crawlergo.py
deleted file mode 100755
index fcc1952..0000000
--- a/lib/scanner/crawlergo.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/python3
-# coding: utf-8
-
-
-import simplejson
-import subprocess
-import os
-from lib.report import Report
-from lib.setting import TOOLS_DIR
-from func_timeout import func_set_timeout
-import func_timeout
-
-class Crawlergo(Report):
- def __init__(self,target):
- super().__init__(target)
- self.BROWERS = '/usr/bin/google-chrome'
- self.XRAY_PROXY = 'http://127.0.0.1:7777'
- self.sub_domains = []
- self.target = target
-
- try:
- self.scan()
- except func_timeout.exceptions.FunctionTimedOut:
- print('crawlergo timeout')
-
-
- @func_set_timeout(300)
- def scan(self):
- print("crawlergo scanning : ",self.target)
-
- try:
- crawlergo = os.path.join(TOOLS_DIR,"crawlergo")
- print(1)
- #cmd = [crawlergo, "-c", self.BROWERS, "-t", "5", "-f", "smart", "--push-to-proxy", self.XRAY_PROXY, "--push-pool-max", "10", "--fuzz-path", "-o", "json", self.target]
- cmd = [crawlergo, "-c", self.BROWERS, "--push-to-proxy", self.XRAY_PROXY, "-o", "json", self.target]
- rsp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- print(2)
- output, error = rsp.communicate()
- # "--[Mission Complete]--" 是任务结束的分隔字符s串
- result = simplejson.loads(output.decode().split("--[Mission Complete]--")[1])
- req_list = result["req_list"]
- urls = []
- for req in req_list:
- print("crawlergo found :" , req['url'])
- urls.append(req['url'])
- #self.put_to_file(req['url'],os.path.join(self.report_dir,url_parse(self.target).get_netloc()+"-crawlergo.url"))
- #print(req_list[0])
-
- #subdomain 在controller模块中已添加进入扫描
- print(type(result["sub_domain_list"]))
-
- self.sub_domains = result["sub_domain_list"]
- # for domain in sub_domains:
- # print("sub found :", domain)
- #self.put_to_file(domain, os.path.join(self.report_dir, url_parse(self.target).get_netloc() + "-crawlergo.sub"))
-
-
- rows = ["crawlergo urls found:"] + urls + ['\n'*1,"crawlergo subdomains found:"] + self.sub_domains
- self.report_insert('\n'.join(rows),"crawlergo report:",is_file=False)
- except Exception as f:
- print(f)
- pass
-
- # def put_to_file(self,row,filename):
- # with open(filename,'a+') as f:
- # f.write(row)
-
-if __name__ == '__main__':
- A = Crawlergo("http://testphp.vulnweb.com")
\ No newline at end of file
diff --git a/lib/scanner/dirsearch.py b/lib/scanner/dirsearch.py
deleted file mode 100755
index 0326d68..0000000
--- a/lib/scanner/dirsearch.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from lib.setting import TOOLS_DIR
-from lib.report import Report
-from lib.general import Run
-import os
-from func_timeout import func_set_timeout
-import func_timeout
-
-class Dirsearch(Report):
- def __init__(self,target):
- super().__init__(target)
- self.LOG = '/tmp/dirsearch.csv'
- self.target = target
- self.urls = []
-
- try:
- self.scan()
- except func_timeout.exceptions.FunctionTimedOut:
- print('dirsearch timeout')
-
- @func_set_timeout(300)
- def scan(self):
- command = 'python3 {}/dirsearch/dirsearch.py -e * -x 301,403,404,405,500,501,502,503 -u {} --csv-report {}'.format(TOOLS_DIR,self.target,self.LOG)
- print('dirsearch is running')
- with Run(command,self.LOG,delete_file=False) as f:
- if os.path.exists(self.LOG):
- #print("dirsearch log file exists , and run report_insert")
- print('\n'.join(self.report_parse(self.LOG)))
- self.report_insert('\n'.join(self.report_parse(self.LOG)),"DIRSEARCH SCAN:",is_file=False)
- os.system('rm -f ' + self.LOG)
- else:
- print("dirsearch log file is not exists , may be because of dirsearch cannot connect the {}".format(self.target))
-
- # 将dirsearch的csv报告重新整理下
- def report_parse(self,log_file):
- rows = []
- with open(log_file,'r') as f:
- try:
- next(f) #always raise a StopIteration error before
- except StopIteration as s:
- print(s)
- finally:
- for line in f.readlines():
- line = line.strip().split(',')
- if not line[1]:
- continue
-
- self.urls.append(line[1])
- try:
- s = "{:<} - {:>5}B - {:<5}".format(line[2], line[3], line[1])
- except:
- continue
- rows.append(s)
- return rows
-
-if __name__ == "__main__":
- X = Dirsearch("http://testphp.vulnweb.com")
diff --git a/lib/scanner/dirsearch1.py b/lib/scanner/dirsearch1.py
deleted file mode 100755
index c351bc1..0000000
--- a/lib/scanner/dirsearch1.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from lib.setting import TOOLS_DIR
-from lib.report import Report
-from lib.general import Run
-import os
-from func_timeout import func_set_timeout
-import func_timeout
-
-class Dirsearch(Report):
- def __init__(self,target):
- super().__init__(target)
- self.LOG = '/tmp/dirsearch.txt'
- self.target = target
- self.urls = []
-
- try:
- self.scan()
- except func_timeout.exceptions.FunctionTimedOut:
- print('dirsearch timeout')
-
-
- def scan(self):
- command = 'python3 {}/dirsearch/dirsearch.py -e * -u {} --plain-text-report {}'.format(TOOLS_DIR,self.target,self.LOG)
- with Run(command,self.LOG,delete_file=False) as f:
- if os.path.exists(self.LOG):
- self.report_insert('\n'.join(self.report_parse(self.LOG)),"DIRSEARCH SCAN:",is_file=False)
- self.parse()
- os.system('rm -f ' + self.LOG)
- else:
- print("dirsearch log file is not exists")
-
- def parse(self):
- with open(self.LOG,'r') as f:
- for line in f.readlines():
- url = line.split(' ')[-1]
- self.urls.append(url.strip())
-
-if __name__ == "__main__":
- X = Dirsearch("http://testphp.vulnweb.com")
\ No newline at end of file
diff --git a/lib/scanner/masscan.py b/lib/scanner/masscan.py
deleted file mode 100755
index e74ae09..0000000
--- a/lib/scanner/masscan.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import re
-from ..setting import TOOLS_DIR
-from func_timeout import func_set_timeout
-import func_timeout
-
-
-class Masscan():
- def __init__(self,target):
- self.target = target
-
- try:
- self.open_ports = self.scan()
- except func_timeout.exceptions.FunctionTimedOut:
- self.open_ports = []
-
- @func_set_timeout(300)
- def scan(self):
- try:
- print("masscan scanning :", self.target)
- #os.system("masscan --ports 0-65535 {0} --rate=10000 -oX {}/masscan.log".format(self.target,self.report_dir))
- result = os.popen("masscan --ports 0-65535 {0} -sS -Pn --rate=1000".format(self.target)).read()
- open_ports = self.reg_port(result)
- print("opening ports:",open_ports)
- except Exception as e:
- print(e)
-
- return open_ports
-
- def reg_port(self,text):
- #masscan output : Discovered open port 8814/tcp on 192.168.1.225
- pattern = '\d{1,5}/tcp'
- result = re.findall(pattern, text)
- result = [x[:-4] for x in result]
- return result
-
-
-if __name__ == "__main__":
- X = Masscan("192.168.1.225")
\ No newline at end of file
diff --git a/lib/scanner/nmap.py b/lib/scanner/nmap.py
deleted file mode 100755
index 498f8a6..0000000
--- a/lib/scanner/nmap.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import re
-import os
-from lib.report import Report
-from lib.general import Run
-from func_timeout import func_set_timeout
-import func_timeout
-
-
-class Nmap(Report):
- def __init__(self,target,ports_scan=[]):
- super().__init__(target)
- self.LOG_FILE = "/tmp/nmap.log"
- self.target = target
- self.ports_scan = ports_scan
-
- try:
- self.http_open_ports = self.scan()
- except func_timeout.exceptions.FunctionTimedOut:
- print('nmap timeout')
- self.http_open_ports = []
-
-
- @func_set_timeout(500)
- def scan(self):
- if not self.ports_scan:
- command = "nmap -sS -Pn -A -v {0} -oN {1}".format(self.target,self.LOG_FILE)
- else:
- command = "nmap -sS -Pn -A -p {0} {1} -v -oN {2}".format(",".join(self.ports_scan),self.target,self.LOG_FILE)
-
- with Run(command,self.LOG_FILE,) as f:
- self.report_insert(self.LOG_FILE,"NMAP report:")
- return self.get_http_ports(f)
-
- def get_http_ports(self,text):
- http_ports = re.findall('\d{1,5}/tcp\s{1,}open\s{1,}[ssl/]*http', text)
- http_ports = [x.split("/")[0] for x in http_ports]
-
- if '80' in http_ports and '443' in http_ports:
- http_ports.remove("443")
-
- return http_ports
-
-if __name__ == "__main__":
- X = Nmap("47.98.126.199",["80"])
diff --git a/lib/scanner/oneforall.py b/lib/scanner/oneforall.py
deleted file mode 100755
index 7e48e83..0000000
--- a/lib/scanner/oneforall.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from ..setting import TOOLS_REPORT_FILE
-from ..setting import TOOLS_DIR
-import os
-import csv
-
-class OneForAll():
- def __init__(self,target):
- self.target = target
-
- if os.path.exists(os.path.join(TOOLS_DIR,'OneForAll')):
- os.system('cp -r {0}/OneForAll {0}/oneforall'.format(TOOLS_DIR))
-
- def scan(self):
- print("Brute domain: " + self.target)
-
- os.system('python3 {}/oneforall/oneforall.py --target {} run'.format(TOOLS_DIR, self.target))
- report_file = "{}/oneforall/results/{}.csv".format(TOOLS_DIR, self.target)
- if not os.path.exists(report_file):
- exit("Not found the oneforall's output file ")
-
- return self.get_subdomains(report_file)
-
-
- def get_subdomains(self,report_file):
- try:
- with open(report_file, 'r') as csvfile:
- csvfile.__next__()
- reader = csv.reader(csvfile)
- column = [row[5] for row in reader]
- urlList = list(set(column))
- except Exception as e:
- print(e)
- urlList = []
-
- return urlList
diff --git a/lib/scanner/request_engine.py b/lib/scanner/request_engine.py
deleted file mode 100644
index 43d57d7..0000000
--- a/lib/scanner/request_engine.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import requests
-
-class Request():
- def __init__(self):
- self.proxy = {'http': 'http://127.0.0.1:7777',
- 'https': 'http://127.0.0.1:7777',}
- self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:27.0) Gecko/20100101 Firefox/27.0)',
- }
-
- def repeat(self,url):
- try:
- response = requests.get(url=url,headers=self.headers,proxies=self.proxy,verify=False,timeout=20)
- #print(response)
- return response
- except Exception as e:
- print(e)
-
-
-if __name__ =="__main__":
- X = Request()
- X.repeat("http://testphp.vulnweb.com:80/.idea/")
-
-
diff --git a/lib/scanner/web_path/dirb.py b/lib/scanner/web_path/dirb.py
deleted file mode 100755
index 65d7eb2..0000000
--- a/lib/scanner/web_path/dirb.py
+++ /dev/null
@@ -1,3 +0,0 @@
-
-class dirb():
- def __init__(self):
diff --git a/lib/scanner/whatweb.py b/lib/scanner/whatweb.py
deleted file mode 100644
index 2ba35f3..0000000
--- a/lib/scanner/whatweb.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from lib.general import Run
-from lib.report import Report
-
-class Whatweb(Report):
- def __init__(self,target):
- super().__init__(target)
- self.target = target
- self.scan()
-
- def scan(self):
- command = "whatweb --color never {}".format(self.target)
- with Run(command) as f:
- for i in f.split('\n'):
- if i.startswith('http'):
- #print(i.replace(',','\n'))
- self.report_insert(i.replace(',','\n'),'WHATWEB SCAN:',is_file=False)
-
-
diff --git a/lib/scanner/xray.py b/lib/scanner/xray.py
deleted file mode 100755
index 6f34bde..0000000
--- a/lib/scanner/xray.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import os
-import time
-import threading
-import re
-from ..setting import XRAY_REPORT_FILE,TOOLS_DIR
-
-class Xray():
- def __init__(self):
- self.LOG = "/tmp/xray.log"
- self.PROXY = '127.0.0.1:7777'
- self.vuln_scaned = 0
-
- self.kill_previous_xray_process()
- self.clear_previous_xray_log()
-
- def scan(self):
- t = threading.Thread(target=self.xray_run, daemon=True)
- t.start()
-
- ##阻塞模式,因为之前实测后面工具扫描时导致xray请求一片红,
- # while(True):
- # if self.check_xray_status():
- # os.system("rm /tmp/xray.log")
- # self.xray_scan_over = 1
- # break
-
- def xray_run(self):
- run_command = "{0}/xray_linux_amd64 webscan --listen {1} --html-output {2} | tee -a {3}".format(TOOLS_DIR,self.PROXY,XRAY_REPORT_FILE,self.LOG)
- print("xray command : ",run_command)
- os.system(run_command)
-
-
- def check_xray_status(self):
- cmd = "wc " + self.LOG +"| awk '{print $1}'"
- rows0 = os.popen(cmd).read()
- time.sleep(5)
- rows1 = os.popen(cmd).read()
- cmd = "tail -n 10 {}".format(self.LOG)
- s = os.popen(cmd).read()
-
- if rows0 == rows1 and "All pending requests have been scanned" in s:
- os.system('echo "" > {}'.format(self.LOG))
- return True
- else:
- return False
-
- def clear_previous_xray_log(self):
- if os.path.exists(self.LOG):
- os.system("rm -f {}".format(self.LOG))
-
- def kill_previous_xray_process(self):
- port = self.PROXY.rsplit(':')[-1]
- process_status = os.popen("netstat -pantu | grep " + port).read()
- if process_status:
- process_num = re.findall("\d{1,}/xray", process_status)
- if process_num:
- process_num = ''.join(process_num)[:-5]
- print(port," port exist previous xray process , killing")
- os.system("kill " + str(process_num))
-
-if __name__ == "__main__":
- A = Xray()
diff --git a/lib/setting.py b/lib/setting.py
deleted file mode 100755
index 90c924f..0000000
--- a/lib/setting.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from lib.general import path_build
-from lib.general import dir_is_exists_or_create
-import time
-
-
-# TIME
-now_time = time.strftime("%Y-%m-%d-%H-%M-%S-", time.localtime(time.time()))
-
-
-# DIR PATH
-REPORT_DIR = path_build("report")
-TOOLS_DIR = path_build("tools")
-dir_is_exists_or_create(REPORT_DIR,TOOLS_DIR)
-
-TOOLS_REPORT_NAME = now_time + "tools-scan.html"
-TOOLS_REPORT_FILE = path_build('report',TOOLS_REPORT_NAME)
-
-XRAY_REPORT_NAME = now_time + "xray.html"
-XRAY_REPORT_FILE = path_build('report',XRAY_REPORT_NAME)
-
-AWVS_REPORT_FILE = now_time + "{}.awvs.html"
-
-TEMPLATE_FILE = path_build('lib','template.html')
-
-
-
diff --git a/lib/template.html b/lib/template.html
index 4927968..79ad1f0 100755
--- a/lib/template.html
+++ b/lib/template.html
@@ -1,104 +1,434 @@
+
+
+
+
+
+
+
-
-
-
-