Added whitespaces after commas & operators

This commit is contained in:
Ahmed Aboul-Ela 2016-11-19 05:47:52 +04:00
parent 7a84f06e93
commit 201d797b87
1 changed files with 182 additions and 157 deletions

View File

@ -22,7 +22,7 @@ from subbrute import subbrute
import dns.resolver import dns.resolver
import requests import requests
#Python 2.x and 3.x compatiablity # Python 2.x and 3.x compatiablity
if sys.version > '3': if sys.version > '3':
import urllib.parse as urlparse import urllib.parse as urlparse
import urllib.parse as urllib import urllib.parse as urllib
@ -30,63 +30,70 @@ else:
import urlparse import urlparse
import urllib import urllib
#In case you cannot install some of the required development packages, there's also an option to disable the SSL warning: # In case you cannot install some of the required development packages
# there's also an option to disable the SSL warning:
try: try:
import requests.packages.urllib3 import requests.packages.urllib3
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
except: except:
pass pass
#Check if we are running this on windows platform # Check if we are running this on windows platform
is_windows = sys.platform.startswith('win') is_windows = sys.platform.startswith('win')
#Console Colors # Console Colors
if is_windows: if is_windows:
G = Y = B = R = W = G = Y = B = R = W = '' #use no terminal colors on windows # use no terminal colors on windows
G = Y = B = R = W = G = Y = B = R = W = ''
else: else:
G = '\033[92m' #green G = '\033[92m' # green
Y = '\033[93m' #yellow Y = '\033[93m' # yellow
B = '\033[94m' #blue B = '\033[94m' # blue
R = '\033[91m' #red R = '\033[91m' # red
W = '\033[0m' #white W = '\033[0m' # white
def banner(): def banner():
print("""%s print("""%s
____ _ _ _ _ _____ ____ _ _ _ _ _____
/ ___| _ _| |__ | (_)___| |_|___ / _ __ / ___| _ _| |__ | (_)___| |_|___ / _ __
\___ \| | | | '_ \| | / __| __| |_ \| '__| \___ \| | | | '_ \| | / __| __| |_ \| '__|
___) | |_| | |_) | | \__ \ |_ ___) | | ___) | |_| | |_) | | \__ \ |_ ___) | |
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s |____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
# Coded By Ahmed Aboul-Ela - @aboul3la # Coded By Ahmed Aboul-Ela - @aboul3la
"""%(R,W,Y)) """ % (R, W, Y))
def parser_error(errmsg): def parser_error(errmsg):
banner() banner()
print("Usage: python "+sys.argv[0]+" [Options] use -h for help") print("Usage: python " + sys.argv[0] + " [Options] use -h for help")
print(R+"Error: "+errmsg+W) print(R + "Error: " + errmsg + W)
sys.exit() sys.exit()
def parse_args(): def parse_args():
#parse the arguments # parse the arguments
parser = argparse.ArgumentParser(epilog = '\tExample: \r\npython '+sys.argv[0]+" -d google.com") parser = argparse.ArgumentParser(epilog='\tExample: \r\npython ' + sys.argv[0] + " -d google.com")
parser.error = parser_error parser.error = parser_error
parser._optionals.title = "OPTIONS" parser._optionals.title = "OPTIONS"
parser.add_argument('-d', '--domain', help="Domain name to enumerate it's subdomains", required=True) parser.add_argument('-d', '--domain', help="Domain name to enumerate it's subdomains", required=True)
parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module',nargs='?', default=False) parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False)
parser.add_argument('-p', '--ports', help='Scan the found subdomains against specified tcp ports') parser.add_argument('-p', '--ports', help='Scan the found subdomains against specified tcp ports')
parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime',nargs='?', default=False) parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False)
parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=30) parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=30)
parser.add_argument('-e', '--engines', help='Specify a comma-separated list of search engines') parser.add_argument('-e', '--engines', help='Specify a comma-separated list of search engines')
parser.add_argument('-o', '--output', help='Save the results to text file') parser.add_argument('-o', '--output', help='Save the results to text file')
return parser.parse_args() return parser.parse_args()
def write_file(filename, subdomains): def write_file(filename, subdomains):
#saving subdomains results to output file # saving subdomains results to output file
print("%s[-] Saving results to file: %s%s%s%s"%(Y,W,R,filename,W)) print("%s[-] Saving results to file: %s%s%s%s" % (Y, W, R, filename, W))
with open(str(filename), 'wt') as f: with open(str(filename), 'wt') as f:
for subdomain in subdomains: for subdomain in subdomains:
f.write(subdomain+"\r\n") f.write(subdomain + "\r\n")
def subdomain_cmp(d1, d2): def subdomain_cmp(d1, d2):
"""cmp function for subdomains d1 and d2. """cmp function for subdomains d1 and d2.
@ -111,14 +118,14 @@ def subdomain_cmp(d1, d2):
d1 = d1.split('.')[::-1] d1 = d1.split('.')[::-1]
d2 = d2.split('.')[::-1] d2 = d2.split('.')[::-1]
val = 1 if d1>d2 else (-1 if d1<d2 else 0) val = 1 if d1 > d2 else (-1 if d1 < d2 else 0)
if ((len(d1) < len(d2)) and if ((len(d1) < len(d2)) and
(d1[-1] == 'www') and (d1[-1] == 'www') and
(d1[:-1] == d2[:len(d1)-1])): (d1[: - 1] == d2[:len(d1) - 1])):
val = -1 val = -1
elif ((len(d1) > len(d2)) and elif ((len(d1) > len(d2)) and
(d2[-1] == 'www') and (d2[-1] == 'www') and
(d1[:len(d2)-1] == d2[:-1])): (d1[:len(d2) - 1] == d2[: - 1])):
val = 1 val = 1
elif d1[:-1] == d2[:-1]: elif d1[:-1] == d2[:-1]:
if d1[-1] == 'www': if d1[-1] == 'www':
@ -127,6 +134,7 @@ def subdomain_cmp(d1, d2):
val = 1 val = 1
return val return val
class enumratorBase(object): class enumratorBase(object):
def __init__(self, base_url, engine_name, domain, subdomains=None, silent=False, verbose=True): def __init__(self, base_url, engine_name, domain, subdomains=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -147,33 +155,31 @@ class enumratorBase(object):
def print_banner(self): def print_banner(self):
""" subclass can override this if they want a fancy banner :)""" """ subclass can override this if they want a fancy banner :)"""
self.print_(G+"[-] Searching now in %s.." %(self.engine_name)+W) self.print_(G + "[-] Searching now in %s.." % (self.engine_name) + W)
return return
def send_req(self, query, page_no=1): def send_req(self, query, page_no=1):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0', headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Accept-Language': 'en-GB,en;q=0.5', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-GB,en;q=0.5',
'Connection': 'keep-alive' 'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
} }
url = self.base_url.format(query=query, page_no=page_no) url = self.base_url.format(query=query, page_no=page_no)
try: try:
resp = self.session.get(url, headers=headers, timeout=self.timeout) resp = self.session.get(url, headers=headers, timeout=self.timeout)
except Exception as e: except Exception:
resp = None resp = None
return self.get_response(resp) return self.get_response(resp)
def get_response(self,response): def get_response(self, response):
if response is None: if response is None:
return 0 return 0
if hasattr(response, "text"): return response.text if hasattr(response, "text") else response.content
return response.text
else:
return response.content
def check_max_subdomains(self,count): def check_max_subdomains(self, count):
if self.MAX_DOMAINS == 0: if self.MAX_DOMAINS == 0:
return False return False
return count >= self.MAX_DOMAINS return count >= self.MAX_DOMAINS
@ -183,12 +189,12 @@ class enumratorBase(object):
return False return False
return num >= self.MAX_PAGES return num >= self.MAX_PAGES
#Override # override
def extract_domains(self, resp): def extract_domains(self, resp):
""" chlid class should override this function """ """ chlid class should override this function """
return return
#override # override
def check_response_errors(self, resp): def check_response_errors(self, resp):
""" chlid class should override this function """ chlid class should override this function
The function should return True if there are no errors and False otherwise The function should return True if there are no errors and False otherwise
@ -215,28 +221,28 @@ class enumratorBase(object):
while flag: while flag:
query = self.generate_query() query = self.generate_query()
count = query.count(self.domain) #finding the number of subdomains found so far count = query.count(self.domain) # finding the number of subdomains found so far
#if they we reached the maximum number of subdomains in search query # if they we reached the maximum number of subdomains in search query
#then we should go over the pages # then we should go over the pages
if self.check_max_subdomains(count): if self.check_max_subdomains(count):
page_no = self.get_page(page_no) page_no = self.get_page(page_no)
if self.check_max_pages(page_no): #maximum pages for Google to avoid getting blocked if self.check_max_pages(page_no): # maximum pages for Google to avoid getting blocked
return self.subdomains return self.subdomains
resp = self.send_req(query, page_no) resp = self.send_req(query, page_no)
#check if there is any error occured # check if there is any error occured
if not self.check_response_errors(resp): if not self.check_response_errors(resp):
return self.subdomains return self.subdomains
links = self.extract_domains(resp) links = self.extract_domains(resp)
#if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page # if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
if links == prev_links: if links == prev_links:
retries += 1 retries += 1
page_no = self.get_page(page_no) page_no = self.get_page(page_no)
#make another retry maybe it isn't the last page # make another retry maybe it isn't the last page
if retries >= 3: if retries >= 3:
return self.subdomains return self.subdomains
@ -260,6 +266,7 @@ class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
for domain in domain_list: for domain in domain_list:
self.q.append(domain) self.q.append(domain)
class GoogleEnum(enumratorBaseThreaded): class GoogleEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -278,20 +285,20 @@ class GoogleEnum(enumratorBaseThreaded):
for link in links_list: for link in links_list:
link = re.sub('<span.*>', '', link) link = re.sub('<span.*>', '', link)
if not link.startswith('http'): if not link.startswith('http'):
link="http://"+link link = "http://" + link
subdomain = urlparse.urlparse(link).netloc subdomain = urlparse.urlparse(link).netloc
if subdomain and subdomain not in self.subdomains and subdomain != self.domain: if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain)) self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
except Exception as e: except Exception:
pass pass
return links_list return links_list
def check_response_errors(self, resp): def check_response_errors(self, resp):
if 'Our systems have detected unusual traffic' in resp: if 'Our systems have detected unusual traffic' in resp:
self.print_(R+"[!] Error: Google probably now is blocking our requests"+W) self.print_(R + "[!] Error: Google probably now is blocking our requests" + W)
self.print_(R+"[~] Finished now the Google Enumeration ..."+W) self.print_(R + "[~] Finished now the Google Enumeration ..." + W)
return False return False
return True return True
@ -302,12 +309,13 @@ class GoogleEnum(enumratorBaseThreaded):
def generate_query(self): def generate_query(self):
if self.subdomains: if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}' fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS-2]) found = ' -'.join(self.subdomains[:self.MAX_DOMAINS - 2])
query = fmt.format(domain=self.domain, found=found) query = fmt.format(domain=self.domain, found=found)
else: else:
query = "site:{domain} -www.{domain}".format(domain=self.domain) query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query return query
class YahooEnum(enumratorBaseThreaded): class YahooEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -326,19 +334,19 @@ class YahooEnum(enumratorBaseThreaded):
try: try:
links = link_regx.findall(resp) links = link_regx.findall(resp)
links2 = link_regx2.findall(resp) links2 = link_regx2.findall(resp)
links_list = links+links2 links_list = links + links2
for link in links_list: for link in links_list:
link = re.sub("<(\/)?b>","", link) link = re.sub("<(\/)?b>", "", link)
if not link.startswith('http'): if not link.startswith('http'):
link="http://"+link link = "http://" + link
subdomain = urlparse.urlparse(link).netloc subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain): if not subdomain.endswith(self.domain):
continue continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain: if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain)) self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
except Exception as e: except Exception:
pass pass
return links_list return links_list
@ -346,7 +354,7 @@ class YahooEnum(enumratorBaseThreaded):
def should_sleep(self): def should_sleep(self):
return return
def get_page(self,num): def get_page(self, num):
return num + 10 return num + 10
def generate_query(self): def generate_query(self):
@ -358,6 +366,7 @@ class YahooEnum(enumratorBaseThreaded):
query = "site:{domain}".format(domain=self.domain) query = "site:{domain}".format(domain=self.domain)
return query return query
class AskEnum(enumratorBaseThreaded): class AskEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -375,18 +384,18 @@ class AskEnum(enumratorBaseThreaded):
links_list = link_regx.findall(resp) links_list = link_regx.findall(resp)
for link in links_list: for link in links_list:
if not link.startswith('http'): if not link.startswith('http'):
link="http://"+link link = "http://" + link
subdomain = urlparse.urlparse(link).netloc subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain: if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain)) self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
except Exception as e: except Exception:
pass pass
return links_list return links_list
def get_page(self,num): def get_page(self, num):
return num + 1 return num + 1
def generate_query(self): def generate_query(self):
@ -399,6 +408,7 @@ class AskEnum(enumratorBaseThreaded):
return query return query
class BingEnum(enumratorBaseThreaded): class BingEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -406,9 +416,9 @@ class BingEnum(enumratorBaseThreaded):
self.engine_name = "Bing" self.engine_name = "Bing"
self.MAX_DOMAINS = 30 self.MAX_DOMAINS = 30
self.MAX_PAGES = 0 self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains,q=q, silent=silent) enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent)
self.q = q self.q = q
self.verbose=verbose self.verbose = verbose
return return
def extract_domains(self, resp): def extract_domains(self, resp):
@ -417,18 +427,18 @@ class BingEnum(enumratorBaseThreaded):
try: try:
links = link_regx.findall(resp) links = link_regx.findall(resp)
links2 = link_regx2.findall(resp) links2 = link_regx2.findall(resp)
links_list = links+links2 links_list = links + links2
for link in links_list: for link in links_list:
link = re.sub('<(\/)?strong>|<span.*?>|<|>', '', link) link = re.sub('<(\/)?strong>|<span.*?>|<|>', '', link)
if not link.startswith('http'): if not link.startswith('http'):
link="http://"+link link = "http://" + link
subdomain = urlparse.urlparse(link).netloc subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain: if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain)) self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
except Exception as e: except Exception:
pass pass
return links_list return links_list
@ -442,6 +452,7 @@ class BingEnum(enumratorBaseThreaded):
query = "domain:{domain} -www.{domain}".format(domain=self.domain) query = "domain:{domain} -www.{domain}".format(domain=self.domain)
return query return query
class BaiduEnum(enumratorBaseThreaded): class BaiduEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -449,7 +460,7 @@ class BaiduEnum(enumratorBaseThreaded):
self.engine_name = "Baidu" self.engine_name = "Baidu"
self.MAX_DOMAINS = 2 self.MAX_DOMAINS = 2
self.MAX_PAGES = 760 self.MAX_PAGES = 760
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains, q=q, silent=silent, verbose=verbose) enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.querydomain = self.domain self.querydomain = self.domain
self.q = q self.q = q
return return
@ -463,16 +474,16 @@ class BaiduEnum(enumratorBaseThreaded):
for link in links: for link in links:
link = re.sub('<.*?>|>|<|&nbsp;', '', link) link = re.sub('<.*?>|>|<|&nbsp;', '', link)
if not link.startswith('http'): if not link.startswith('http'):
link="http://"+link link = "http://" + link
subdomain = urlparse.urlparse(link).netloc subdomain = urlparse.urlparse(link).netloc
if subdomain.endswith(self.domain): if subdomain.endswith(self.domain):
subdomain_list.append(subdomain) subdomain_list.append(subdomain)
if subdomain not in self.subdomains and subdomain != self.domain: if subdomain not in self.subdomains and subdomain != self.domain:
found_newdomain = True found_newdomain = True
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain)) self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
except Exception as e: except Exception:
pass pass
if not found_newdomain and subdomain_list: if not found_newdomain and subdomain_list:
self.querydomain = self.findsubs(subdomain_list) self.querydomain = self.findsubs(subdomain_list)
@ -500,6 +511,7 @@ class BaiduEnum(enumratorBaseThreaded):
query = "site:{domain} -site:www.{domain}".format(domain=self.domain) query = "site:{domain} -site:www.{domain}".format(domain=self.domain)
return query return query
class NetcraftEnum(enumratorBaseThreaded): class NetcraftEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -512,11 +524,13 @@ class NetcraftEnum(enumratorBaseThreaded):
def req(self, url, cookies=None): def req(self, url, cookies=None):
cookies = cookies or {} cookies = cookies or {}
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0', headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept-Language': 'en-GB,en;q=0.5', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
} }
try: try:
resp = self.session.get(url, headers=headers, timeout=self.timeout,cookies=cookies) resp = self.session.get(url, headers=headers, timeout=self.timeout,cookies=cookies)
except Exception as e: except Exception as e:
@ -527,8 +541,8 @@ class NetcraftEnum(enumratorBaseThreaded):
def get_next(self, resp): def get_next(self, resp):
link_regx = re.compile('<A href="(.*?)"><b>Next page</b></a>') link_regx = re.compile('<A href="(.*?)"><b>Next page</b></a>')
link = link_regx.findall(resp) link = link_regx.findall(resp)
link = re.sub('host=.*?%s'%self.domain, 'host=%s'%self.domain, link[0]) link = re.sub('host=.*?%s' % self.domain, 'host=%s' % self.domain, link[0])
url = 'http://searchdns.netcraft.com'+link url = 'http://searchdns.netcraft.com' + link
return url return url
def create_cookies(self, cookie): def create_cookies(self, cookie):
@ -538,7 +552,7 @@ class NetcraftEnum(enumratorBaseThreaded):
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1])).hexdigest() cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1])).hexdigest()
return cookies return cookies
def get_cookies(self,headers): def get_cookies(self, headers):
if 'set-cookie' in headers: if 'set-cookie' in headers:
cookies = self.create_cookies(headers['set-cookie']) cookies = self.create_cookies(headers['set-cookie'])
else: else:
@ -551,7 +565,7 @@ class NetcraftEnum(enumratorBaseThreaded):
cookies = self.get_cookies(resp.headers) cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain=self.domain) url = self.base_url.format(domain=self.domain)
while True: while True:
resp = self.get_response(self.req(url,cookies)) resp = self.get_response(self.req(url, cookies))
self.extract_domains(resp) self.extract_domains(resp)
if 'Next page' not in resp: if 'Next page' not in resp:
return self.subdomains return self.subdomains
@ -568,9 +582,9 @@ class NetcraftEnum(enumratorBaseThreaded):
continue continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain: if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain)) self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
except Exception as e: except Exception:
pass pass
return links_list return links_list
@ -587,7 +601,7 @@ class DNSdumpster(enumratorBaseThreaded):
super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose) super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return return
def check_host(self,host): def check_host(self, host):
is_valid = False is_valid = False
Resolver = dns.resolver.Resolver() Resolver = dns.resolver.Resolver()
Resolver.nameservers = ['8.8.8.8', '8.8.4.4'] Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
@ -596,7 +610,7 @@ class DNSdumpster(enumratorBaseThreaded):
ip = Resolver.query(host, 'A')[0].to_text() ip = Resolver.query(host, 'A')[0].to_text()
if ip: if ip:
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, host)) self.print_("%s%s: %s%s" % (R, self.engine_name, W, host))
is_valid = True is_valid = True
self.live_subdomains.append(host) self.live_subdomains.append(host)
except: except:
@ -606,11 +620,12 @@ class DNSdumpster(enumratorBaseThreaded):
def req(self, req_method, url, params=None): def req(self, req_method, url, params=None):
params = params or {} params = params or {}
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0', headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept-Language': 'en-GB,en;q=0.5', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-GB,en;q=0.5',
'Referer': 'https://dnsdumpster.com' 'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://dnsdumpster.com'
} }
try: try:
@ -624,25 +639,25 @@ class DNSdumpster(enumratorBaseThreaded):
return self.get_response(resp) return self.get_response(resp)
def get_csrftoken(self, resp): def get_csrftoken(self, resp):
csrf_regex = re.compile("<input type='hidden' name='csrfmiddlewaretoken' value='(.*?)' />",re.S) csrf_regex = re.compile("<input type='hidden' name='csrfmiddlewaretoken' value='(.*?)' />", re.S)
token = csrf_regex.findall(resp)[0] token = csrf_regex.findall(resp)[0]
return token.strip() return token.strip()
def enumerate(self): def enumerate(self):
resp = self.req('GET', self.base_url) resp = self.req('GET', self.base_url)
token = self.get_csrftoken(resp) token = self.get_csrftoken(resp)
params = {'csrfmiddlewaretoken':token, 'targetip':self.domain} params = {'csrfmiddlewaretoken': token, 'targetip': self.domain}
post_resp = self.req('POST', self.base_url, params) post_resp = self.req('POST', self.base_url, params)
self.extract_domains(post_resp) self.extract_domains(post_resp)
for subdomain in self.subdomains: for subdomain in self.subdomains:
t = threading.Thread(target=self.check_host,args=(subdomain,)) t = threading.Thread(target=self.check_host, args=(subdomain,))
t.start() t.start()
t.join() t.join()
return self.live_subdomains return self.live_subdomains
def extract_domains(self, resp): def extract_domains(self, resp):
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>',re.S) tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>', re.S)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>',re.S) link_regex = re.compile('<td class="col-md-4">(.*?)<br>', re.S)
links = [] links = []
try: try:
results_tbl = tbl_regex.findall(resp)[0] results_tbl = tbl_regex.findall(resp)[0]
@ -658,6 +673,7 @@ class DNSdumpster(enumratorBaseThreaded):
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
return links return links
class Virustotal(enumratorBaseThreaded): class Virustotal(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -669,12 +685,13 @@ class Virustotal(enumratorBaseThreaded):
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose) super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return return
#the main send_req need to be rewritten # the main send_req need to be rewritten
def send_req(self, url): def send_req(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0', headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept-Language': 'en-GB,en;q=0.5', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
} }
try: try:
@ -685,7 +702,7 @@ class Virustotal(enumratorBaseThreaded):
return self.get_response(resp) return self.get_response(resp)
#once the send_req is rewritten we don't need to call this function, the stock one should be ok # once the send_req is rewritten we don't need to call this function, the stock one should be ok
def enumerate(self): def enumerate(self):
url = self.base_url.format(domain=self.domain) url = self.base_url.format(domain=self.domain)
resp = self.send_req(url) resp = self.send_req(url)
@ -702,9 +719,9 @@ class Virustotal(enumratorBaseThreaded):
continue continue
if subdomain not in self.subdomains and subdomain != self.domain: if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain)) self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
except Exception as e: except Exception:
pass pass
@ -720,15 +737,16 @@ class ThreatCrowd(enumratorBaseThreaded):
return return
def req(self, url): def req(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0', headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept-Language': 'en-GB,en;q=0.5', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
} }
try: try:
resp = self.session.get(url, headers=headers, timeout=self.timeout) resp = self.session.get(url, headers=headers, timeout=self.timeout)
except Exception as e: except Exception:
resp = None resp = None
return self.get_response(resp) return self.get_response(resp)
@ -746,7 +764,6 @@ class ThreatCrowd(enumratorBaseThreaded):
self.print_(e) self.print_(e)
return return
try: try:
links = json.loads(resp)['subdomains'] links = json.loads(resp)['subdomains']
for link in links: for link in links:
@ -760,6 +777,7 @@ class ThreatCrowd(enumratorBaseThreaded):
except Exception as e: except Exception as e:
pass pass
class CrtSearch(enumratorBaseThreaded): class CrtSearch(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -772,15 +790,16 @@ class CrtSearch(enumratorBaseThreaded):
return return
def req(self, url): def req(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0', headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept-Language': 'en-GB,en;q=0.5', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
} }
try: try:
resp = self.session.get(url, headers=headers, timeout=self.timeout) resp = self.session.get(url, headers=headers, timeout=self.timeout)
except Exception as e: except Exception:
resp = None resp = None
return self.get_response(resp) return self.get_response(resp)
@ -807,6 +826,7 @@ class CrtSearch(enumratorBaseThreaded):
except Exception as e: except Exception as e:
pass pass
class PassiveDNS(enumratorBaseThreaded): class PassiveDNS(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True): def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or [] subdomains = subdomains or []
@ -819,10 +839,11 @@ class PassiveDNS(enumratorBaseThreaded):
return return
def req(self, url): def req(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0', headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept-Language': 'en-GB,en;q=0.5', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
} }
try: try:
@ -833,7 +854,6 @@ class PassiveDNS(enumratorBaseThreaded):
return self.get_response(resp) return self.get_response(resp)
def enumerate(self): def enumerate(self):
url = self.base_url.format(domain=self.domain) url = self.base_url.format(domain=self.domain)
resp = self.req(url) resp = self.req(url)
@ -850,20 +870,20 @@ class PassiveDNS(enumratorBaseThreaded):
subdomain = link[:link.find('[')].strip() subdomain = link[:link.find('[')].strip()
if subdomain not in self.subdomains and subdomain != self.domain and subdomain.endswith(self.domain): if subdomain not in self.subdomains and subdomain != self.domain and subdomain.endswith(self.domain):
if self.verbose: if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain)) self.print_("%s%s: %s%s" %( R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip()) self.subdomains.append(subdomain.strip())
except Exception as e: except Exception:
pass pass
class portscan():
def __init__(self,subdomains,ports): class portscan():
def __init__(self, subdomains, ports):
self.subdomains = subdomains self.subdomains = subdomains
self.ports = ports self.ports = ports
self.threads = 20 self.threads = 20
self.lock = threading.BoundedSemaphore(value=self.threads) self.lock = threading.BoundedSemaphore(value=self.threads)
def port_scan(self,host,ports): def port_scan(self, host, ports):
openports = [] openports = []
self.lock.acquire() self.lock.acquire()
for port in ports: for port in ports:
@ -874,15 +894,15 @@ class portscan():
if result == 0: if result == 0:
openports.append(port) openports.append(port)
s.close() s.close()
except Exception as e: except Exception:
pass pass
self.lock.release() self.lock.release()
if len(openports) > 0: if len(openports) > 0:
print("%s%s%s - %sFound open ports:%s %s%s%s"%(G,host,W,R,W,Y,', '.join(openports),W)) print("%s%s%s - %sFound open ports:%s %s%s%s" % (G, host, W, R, W, Y, ', '.join(openports), W))
def run(self): def run(self):
for subdomain in self.subdomains: for subdomain in self.subdomains:
t = threading.Thread(target=self.port_scan,args=(subdomain,self.ports)) t = threading.Thread(target=self.port_scan, args=(subdomain, self.ports))
t.start() t.start()
def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, engines): def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, engines):
@ -894,65 +914,69 @@ def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, e
else: else:
subdomains_queue = multiprocessing.Manager().list() subdomains_queue = multiprocessing.Manager().list()
# Check Bruteforce Status
#Check Bruteforce Status
if enable_bruteforce or enable_bruteforce is None: if enable_bruteforce or enable_bruteforce is None:
enable_bruteforce = True enable_bruteforce = True
#Validate domain # Validate domain
domain_check = re.compile("^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$") domain_check = re.compile("^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
if not domain_check.match(domain): if not domain_check.match(domain):
if not silent: print(R+"Error: Please enter a valid domain"+W) if not silent:
print(R + "Error: Please enter a valid domain" + W)
return [] return []
if not domain.startswith('http://') or not domain.startswith('https://'): if not domain.startswith('http://') or not domain.startswith('https://'):
domain = 'http://'+domain domain = 'http://' + domain
parsed_domain = urlparse.urlparse(domain) parsed_domain = urlparse.urlparse(domain)
if not silent: print(B+"[-] Enumerating subdomains now for %s"%parsed_domain.netloc+W) if not silent:
print(B + "[-] Enumerating subdomains now for %s"%parsed_domain.netloc + W)
if verbose and not silent: if verbose and not silent:
print(Y+"[-] verbosity is enabled, will show the subdomains results in realtime"+W) print(Y + "[-] verbosity is enabled, will show the subdomains results in realtime" + W)
supported_engines = {'baidu':BaiduEnum, supported_engines = {'baidu': BaiduEnum,
'yahoo':YahooEnum, 'yahoo': YahooEnum,
'google':GoogleEnum, 'google': GoogleEnum,
'bing':BingEnum, 'bing': BingEnum,
'ask':AskEnum, 'ask': AskEnum,
'netcraft':NetcraftEnum, 'netcraft': NetcraftEnum,
'dnsdumpster':DNSdumpster, 'dnsdumpster': DNSdumpster,
'virustotal':Virustotal, 'virustotal': Virustotal,
'threatcrowd':ThreatCrowd, 'threatcrowd': ThreatCrowd,
'ssl':CrtSearch, 'ssl': CrtSearch,
'passivedns':PassiveDNS 'passivedns': PassiveDNS
} }
chosenEnums = [] chosenEnums = []
if engines == None: if engines is None:
chosenEnums = [BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum, chosenEnums = [
NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd, CrtSearch, PassiveDNS] BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum,
NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd,
CrtSearch, PassiveDNS
]
else: else:
engines = engines.split(',') engines = engines.split(',')
for engine in engines: for engine in engines:
if engine.lower() in supported_engines: if engine.lower() in supported_engines:
chosenEnums.append(supported_engines[engine.lower()]) chosenEnums.append(supported_engines[engine.lower()])
#Start the engines enumeration # Start the engines enumeration
enums = [enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose) for enum in chosenEnums] enums = [enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose) for enum in chosenEnums]
for enum in enums: for enum in enums:
enum.start() enum.start()
for enum in enums: for enum in enums:
enum.join() enum.join()
subdomains = set(subdomains_queue) subdomains = set(subdomains_queue)
for subdomain in subdomains: for subdomain in subdomains:
search_list.add(subdomain) search_list.add(subdomain)
if enable_bruteforce: if enable_bruteforce:
if not silent: print(G+"[-] Starting bruteforce module now using subbrute.."+W) if not silent:
print(G + "[-] Starting bruteforce module now using subbrute.." + W)
record_type = False record_type = False
path_to_file = os.path.dirname(os.path.realpath(__file__)) path_to_file = os.path.dirname(os.path.realpath(__file__))
subs = os.path.join(path_to_file, 'subbrute', 'names.txt') subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
@ -972,19 +996,22 @@ def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, e
if savefile: if savefile:
write_file(savefile, subdomains) write_file(savefile, subdomains)
if not silent: print(Y+"[-] Total Unique Subdomains Found: %s"%len(subdomains)+W) if not silent:
print(Y + "[-] Total Unique Subdomains Found: %s" % len(subdomains) + W)
if ports: if ports:
if not silent: print(G+"[-] Start port scan now for the following ports: %s%s"%(Y,ports)+W) if not silent:
print(G + "[-] Start port scan now for the following ports: %s%s" % (Y,ports) + W)
ports = ports.split(',') ports = ports.split(',')
pscan = portscan(subdomains,ports) pscan = portscan(subdomains, ports)
pscan.run() pscan.run()
elif not silent: elif not silent:
for subdomain in subdomains: for subdomain in subdomains:
print(G+subdomain+W) print(G + subdomain + W)
return subdomains return subdomains
if __name__=="__main__": if __name__=="__main__":
args = parse_args() args = parse_args()
domain = args.domain domain = args.domain
@ -996,7 +1023,5 @@ if __name__=="__main__":
engines = args.engines engines = args.engines
if verbose or verbose is None: if verbose or verbose is None:
verbose = True verbose = True
banner() banner()
res = main(domain, threads, savefile, ports, silent=False, verbose=verbose, enable_bruteforce=enable_bruteforce, engines=engines) res = main(domain, threads, savefile, ports, silent=False, verbose=verbose, enable_bruteforce=enable_bruteforce, engines=engines)