Added whitespaces after commas & operators

This commit is contained in:
Ahmed Aboul-Ela 2016-11-19 05:47:52 +04:00
parent 7a84f06e93
commit 201d797b87
1 changed files with 182 additions and 157 deletions

View File

@ -22,7 +22,7 @@ from subbrute import subbrute
import dns.resolver
import requests
#Python 2.x and 3.x compatiablity
# Python 2.x and 3.x compatiablity
if sys.version > '3':
import urllib.parse as urlparse
import urllib.parse as urllib
@ -30,63 +30,70 @@ else:
import urlparse
import urllib
#In case you cannot install some of the required development packages, there's also an option to disable the SSL warning:
# In case you cannot install some of the required development packages
# there's also an option to disable the SSL warning:
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
#Check if we are running this on windows platform
# Check if we are running this on windows platform
is_windows = sys.platform.startswith('win')
#Console Colors
# Console Colors
if is_windows:
G = Y = B = R = W = G = Y = B = R = W = '' #use no terminal colors on windows
# use no terminal colors on windows
G = Y = B = R = W = G = Y = B = R = W = ''
else:
G = '\033[92m' #green
Y = '\033[93m' #yellow
B = '\033[94m' #blue
R = '\033[91m' #red
W = '\033[0m' #white
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
def banner():
print("""%s
print("""%s
____ _ _ _ _ _____
/ ___| _ _| |__ | (_)___| |_|___ / _ __
\___ \| | | | '_ \| | / __| __| |_ \| '__|
___) | |_| | |_) | | \__ \ |_ ___) | |
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
# Coded By Ahmed Aboul-Ela - @aboul3la
"""%(R,W,Y))
# Coded By Ahmed Aboul-Ela - @aboul3la
""" % (R, W, Y))
def parser_error(errmsg):
banner()
print("Usage: python "+sys.argv[0]+" [Options] use -h for help")
print(R+"Error: "+errmsg+W)
print("Usage: python " + sys.argv[0] + " [Options] use -h for help")
print(R + "Error: " + errmsg + W)
sys.exit()
def parse_args():
#parse the arguments
parser = argparse.ArgumentParser(epilog = '\tExample: \r\npython '+sys.argv[0]+" -d google.com")
# parse the arguments
parser = argparse.ArgumentParser(epilog='\tExample: \r\npython ' + sys.argv[0] + " -d google.com")
parser.error = parser_error
parser._optionals.title = "OPTIONS"
parser.add_argument('-d', '--domain', help="Domain name to enumerate it's subdomains", required=True)
parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module',nargs='?', default=False)
parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False)
parser.add_argument('-p', '--ports', help='Scan the found subdomains against specified tcp ports')
parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime',nargs='?', default=False)
parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False)
parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=30)
parser.add_argument('-e', '--engines', help='Specify a comma-separated list of search engines')
parser.add_argument('-o', '--output', help='Save the results to text file')
return parser.parse_args()
def write_file(filename, subdomains):
#saving subdomains results to output file
print("%s[-] Saving results to file: %s%s%s%s"%(Y,W,R,filename,W))
# saving subdomains results to output file
print("%s[-] Saving results to file: %s%s%s%s" % (Y, W, R, filename, W))
with open(str(filename), 'wt') as f:
for subdomain in subdomains:
f.write(subdomain+"\r\n")
f.write(subdomain + "\r\n")
def subdomain_cmp(d1, d2):
"""cmp function for subdomains d1 and d2.
@ -111,14 +118,14 @@ def subdomain_cmp(d1, d2):
d1 = d1.split('.')[::-1]
d2 = d2.split('.')[::-1]
val = 1 if d1>d2 else (-1 if d1<d2 else 0)
val = 1 if d1 > d2 else (-1 if d1 < d2 else 0)
if ((len(d1) < len(d2)) and
(d1[-1] == 'www') and
(d1[:-1] == d2[:len(d1)-1])):
(d1[: - 1] == d2[:len(d1) - 1])):
val = -1
elif ((len(d1) > len(d2)) and
(d2[-1] == 'www') and
(d1[:len(d2)-1] == d2[:-1])):
(d1[:len(d2) - 1] == d2[: - 1])):
val = 1
elif d1[:-1] == d2[:-1]:
if d1[-1] == 'www':
@ -127,6 +134,7 @@ def subdomain_cmp(d1, d2):
val = 1
return val
class enumratorBase(object):
def __init__(self, base_url, engine_name, domain, subdomains=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -147,33 +155,31 @@ class enumratorBase(object):
def print_banner(self):
""" subclass can override this if they want a fancy banner :)"""
self.print_(G+"[-] Searching now in %s.." %(self.engine_name)+W)
self.print_(G + "[-] Searching now in %s.." % (self.engine_name) + W)
return
def send_req(self, query, page_no=1):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
}
url = self.base_url.format(query=query, page_no=page_no)
url = self.base_url.format(query=query, page_no=page_no)
try:
resp = self.session.get(url, headers=headers, timeout=self.timeout)
except Exception as e:
except Exception:
resp = None
return self.get_response(resp)
def get_response(self,response):
def get_response(self, response):
if response is None:
return 0
if hasattr(response, "text"):
return response.text
else:
return response.content
return response.text if hasattr(response, "text") else response.content
def check_max_subdomains(self,count):
def check_max_subdomains(self, count):
if self.MAX_DOMAINS == 0:
return False
return count >= self.MAX_DOMAINS
@ -183,12 +189,12 @@ class enumratorBase(object):
return False
return num >= self.MAX_PAGES
#Override
# override
def extract_domains(self, resp):
""" chlid class should override this function """
return
#override
# override
def check_response_errors(self, resp):
""" chlid class should override this function
The function should return True if there are no errors and False otherwise
@ -215,28 +221,28 @@ class enumratorBase(object):
while flag:
query = self.generate_query()
count = query.count(self.domain) #finding the number of subdomains found so far
count = query.count(self.domain) # finding the number of subdomains found so far
#if they we reached the maximum number of subdomains in search query
#then we should go over the pages
# if they we reached the maximum number of subdomains in search query
# then we should go over the pages
if self.check_max_subdomains(count):
page_no = self.get_page(page_no)
if self.check_max_pages(page_no): #maximum pages for Google to avoid getting blocked
if self.check_max_pages(page_no): # maximum pages for Google to avoid getting blocked
return self.subdomains
resp = self.send_req(query, page_no)
#check if there is any error occured
# check if there is any error occured
if not self.check_response_errors(resp):
return self.subdomains
links = self.extract_domains(resp)
#if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
# if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
if links == prev_links:
retries += 1
page_no = self.get_page(page_no)
#make another retry maybe it isn't the last page
# make another retry maybe it isn't the last page
if retries >= 3:
return self.subdomains
@ -260,6 +266,7 @@ class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
for domain in domain_list:
self.q.append(domain)
class GoogleEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -278,20 +285,20 @@ class GoogleEnum(enumratorBaseThreaded):
for link in links_list:
link = re.sub('<span.*>', '', link)
if not link.startswith('http'):
link="http://"+link
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
except Exception:
pass
return links_list
def check_response_errors(self, resp):
if 'Our systems have detected unusual traffic' in resp:
self.print_(R+"[!] Error: Google probably now is blocking our requests"+W)
self.print_(R+"[~] Finished now the Google Enumeration ..."+W)
self.print_(R + "[!] Error: Google probably now is blocking our requests" + W)
self.print_(R + "[~] Finished now the Google Enumeration ..." + W)
return False
return True
@ -302,12 +309,13 @@ class GoogleEnum(enumratorBaseThreaded):
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS-2])
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS - 2])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class YahooEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -326,19 +334,19 @@ class YahooEnum(enumratorBaseThreaded):
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links+links2
links_list = links + links2
for link in links_list:
link = re.sub("<(\/)?b>","", link)
link = re.sub("<(\/)?b>", "", link)
if not link.startswith('http'):
link="http://"+link
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
except Exception:
pass
return links_list
@ -346,7 +354,7 @@ class YahooEnum(enumratorBaseThreaded):
def should_sleep(self):
return
def get_page(self,num):
def get_page(self, num):
return num + 10
def generate_query(self):
@ -358,6 +366,7 @@ class YahooEnum(enumratorBaseThreaded):
query = "site:{domain}".format(domain=self.domain)
return query
class AskEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -375,18 +384,18 @@ class AskEnum(enumratorBaseThreaded):
links_list = link_regx.findall(resp)
for link in links_list:
if not link.startswith('http'):
link="http://"+link
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
except Exception:
pass
return links_list
def get_page(self,num):
def get_page(self, num):
return num + 1
def generate_query(self):
@ -399,6 +408,7 @@ class AskEnum(enumratorBaseThreaded):
return query
class BingEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -406,9 +416,9 @@ class BingEnum(enumratorBaseThreaded):
self.engine_name = "Bing"
self.MAX_DOMAINS = 30
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains,q=q, silent=silent)
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent)
self.q = q
self.verbose=verbose
self.verbose = verbose
return
def extract_domains(self, resp):
@ -417,18 +427,18 @@ class BingEnum(enumratorBaseThreaded):
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links+links2
links_list = links + links2
for link in links_list:
link = re.sub('<(\/)?strong>|<span.*?>|<|>', '', link)
if not link.startswith('http'):
link="http://"+link
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
except Exception:
pass
return links_list
@ -442,6 +452,7 @@ class BingEnum(enumratorBaseThreaded):
query = "domain:{domain} -www.{domain}".format(domain=self.domain)
return query
class BaiduEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -449,7 +460,7 @@ class BaiduEnum(enumratorBaseThreaded):
self.engine_name = "Baidu"
self.MAX_DOMAINS = 2
self.MAX_PAGES = 760
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains, q=q, silent=silent, verbose=verbose)
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.querydomain = self.domain
self.q = q
return
@ -463,16 +474,16 @@ class BaiduEnum(enumratorBaseThreaded):
for link in links:
link = re.sub('<.*?>|>|<|&nbsp;', '', link)
if not link.startswith('http'):
link="http://"+link
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain.endswith(self.domain):
subdomain_list.append(subdomain)
if subdomain not in self.subdomains and subdomain != self.domain:
found_newdomain = True
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
except Exception:
pass
if not found_newdomain and subdomain_list:
self.querydomain = self.findsubs(subdomain_list)
@ -500,6 +511,7 @@ class BaiduEnum(enumratorBaseThreaded):
query = "site:{domain} -site:www.{domain}".format(domain=self.domain)
return query
class NetcraftEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -512,11 +524,13 @@ class NetcraftEnum(enumratorBaseThreaded):
def req(self, url, cookies=None):
cookies = cookies or {}
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
}
try:
resp = self.session.get(url, headers=headers, timeout=self.timeout,cookies=cookies)
except Exception as e:
@ -527,8 +541,8 @@ class NetcraftEnum(enumratorBaseThreaded):
def get_next(self, resp):
link_regx = re.compile('<A href="(.*?)"><b>Next page</b></a>')
link = link_regx.findall(resp)
link = re.sub('host=.*?%s'%self.domain, 'host=%s'%self.domain, link[0])
url = 'http://searchdns.netcraft.com'+link
link = re.sub('host=.*?%s' % self.domain, 'host=%s' % self.domain, link[0])
url = 'http://searchdns.netcraft.com' + link
return url
def create_cookies(self, cookie):
@ -538,7 +552,7 @@ class NetcraftEnum(enumratorBaseThreaded):
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1])).hexdigest()
return cookies
def get_cookies(self,headers):
def get_cookies(self, headers):
if 'set-cookie' in headers:
cookies = self.create_cookies(headers['set-cookie'])
else:
@ -551,7 +565,7 @@ class NetcraftEnum(enumratorBaseThreaded):
cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain=self.domain)
while True:
resp = self.get_response(self.req(url,cookies))
resp = self.get_response(self.req(url, cookies))
self.extract_domains(resp)
if 'Next page' not in resp:
return self.subdomains
@ -568,9 +582,9 @@ class NetcraftEnum(enumratorBaseThreaded):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
except Exception:
pass
return links_list
@ -587,7 +601,7 @@ class DNSdumpster(enumratorBaseThreaded):
super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def check_host(self,host):
def check_host(self, host):
is_valid = False
Resolver = dns.resolver.Resolver()
Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
@ -596,7 +610,7 @@ class DNSdumpster(enumratorBaseThreaded):
ip = Resolver.query(host, 'A')[0].to_text()
if ip:
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, host))
self.print_("%s%s: %s%s" % (R, self.engine_name, W, host))
is_valid = True
self.live_subdomains.append(host)
except:
@ -606,11 +620,12 @@ class DNSdumpster(enumratorBaseThreaded):
def req(self, req_method, url, params=None):
params = params or {}
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://dnsdumpster.com'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://dnsdumpster.com'
}
try:
@ -624,25 +639,25 @@ class DNSdumpster(enumratorBaseThreaded):
return self.get_response(resp)
def get_csrftoken(self, resp):
csrf_regex = re.compile("<input type='hidden' name='csrfmiddlewaretoken' value='(.*?)' />",re.S)
csrf_regex = re.compile("<input type='hidden' name='csrfmiddlewaretoken' value='(.*?)' />", re.S)
token = csrf_regex.findall(resp)[0]
return token.strip()
def enumerate(self):
resp = self.req('GET', self.base_url)
token = self.get_csrftoken(resp)
params = {'csrfmiddlewaretoken':token, 'targetip':self.domain}
params = {'csrfmiddlewaretoken': token, 'targetip': self.domain}
post_resp = self.req('POST', self.base_url, params)
self.extract_domains(post_resp)
for subdomain in self.subdomains:
t = threading.Thread(target=self.check_host,args=(subdomain,))
t = threading.Thread(target=self.check_host, args=(subdomain,))
t.start()
t.join()
return self.live_subdomains
def extract_domains(self, resp):
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>',re.S)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>',re.S)
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>', re.S)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>', re.S)
links = []
try:
results_tbl = tbl_regex.findall(resp)[0]
@ -658,6 +673,7 @@ class DNSdumpster(enumratorBaseThreaded):
self.subdomains.append(subdomain.strip())
return links
class Virustotal(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -669,12 +685,13 @@ class Virustotal(enumratorBaseThreaded):
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
#the main send_req need to be rewritten
# the main send_req need to be rewritten
def send_req(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
}
try:
@ -685,7 +702,7 @@ class Virustotal(enumratorBaseThreaded):
return self.get_response(resp)
#once the send_req is rewritten we don't need to call this function, the stock one should be ok
# once the send_req is rewritten we don't need to call this function, the stock one should be ok
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.send_req(url)
@ -702,9 +719,9 @@ class Virustotal(enumratorBaseThreaded):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
except Exception:
pass
@ -720,15 +737,16 @@ class ThreatCrowd(enumratorBaseThreaded):
return
def req(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
}
try:
resp = self.session.get(url, headers=headers, timeout=self.timeout)
except Exception as e:
except Exception:
resp = None
return self.get_response(resp)
@ -746,7 +764,6 @@ class ThreatCrowd(enumratorBaseThreaded):
self.print_(e)
return
try:
links = json.loads(resp)['subdomains']
for link in links:
@ -760,6 +777,7 @@ class ThreatCrowd(enumratorBaseThreaded):
except Exception as e:
pass
class CrtSearch(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -772,15 +790,16 @@ class CrtSearch(enumratorBaseThreaded):
return
def req(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
}
try:
resp = self.session.get(url, headers=headers, timeout=self.timeout)
except Exception as e:
except Exception:
resp = None
return self.get_response(resp)
@ -807,6 +826,7 @@ class CrtSearch(enumratorBaseThreaded):
except Exception as e:
pass
class PassiveDNS(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
@ -819,10 +839,11 @@ class PassiveDNS(enumratorBaseThreaded):
return
def req(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-GB,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
}
try:
@ -833,7 +854,6 @@ class PassiveDNS(enumratorBaseThreaded):
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
@ -850,20 +870,20 @@ class PassiveDNS(enumratorBaseThreaded):
subdomain = link[:link.find('[')].strip()
if subdomain not in self.subdomains and subdomain != self.domain and subdomain.endswith(self.domain):
if self.verbose:
self.print_("%s%s: %s%s"%(R, self.engine_name, W, subdomain))
self.print_("%s%s: %s%s" %( R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
except Exception:
pass
class portscan():
def __init__(self,subdomains,ports):
class portscan():
def __init__(self, subdomains, ports):
self.subdomains = subdomains
self.ports = ports
self.threads = 20
self.lock = threading.BoundedSemaphore(value=self.threads)
def port_scan(self,host,ports):
def port_scan(self, host, ports):
openports = []
self.lock.acquire()
for port in ports:
@ -874,15 +894,15 @@ class portscan():
if result == 0:
openports.append(port)
s.close()
except Exception as e:
except Exception:
pass
self.lock.release()
if len(openports) > 0:
print("%s%s%s - %sFound open ports:%s %s%s%s"%(G,host,W,R,W,Y,', '.join(openports),W))
print("%s%s%s - %sFound open ports:%s %s%s%s" % (G, host, W, R, W, Y, ', '.join(openports), W))
def run(self):
for subdomain in self.subdomains:
t = threading.Thread(target=self.port_scan,args=(subdomain,self.ports))
t = threading.Thread(target=self.port_scan, args=(subdomain, self.ports))
t.start()
def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, engines):
@ -893,66 +913,70 @@ def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, e
subdomains_queue = list()
else:
subdomains_queue = multiprocessing.Manager().list()
#Check Bruteforce Status
# Check Bruteforce Status
if enable_bruteforce or enable_bruteforce is None:
enable_bruteforce = True
#Validate domain
# Validate domain
domain_check = re.compile("^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
if not domain_check.match(domain):
if not silent: print(R+"Error: Please enter a valid domain"+W)
if not silent:
print(R + "Error: Please enter a valid domain" + W)
return []
if not domain.startswith('http://') or not domain.startswith('https://'):
domain = 'http://'+domain
domain = 'http://' + domain
parsed_domain = urlparse.urlparse(domain)
if not silent: print(B+"[-] Enumerating subdomains now for %s"%parsed_domain.netloc+W)
if not silent:
print(B + "[-] Enumerating subdomains now for %s"%parsed_domain.netloc + W)
if verbose and not silent:
print(Y+"[-] verbosity is enabled, will show the subdomains results in realtime"+W)
print(Y + "[-] verbosity is enabled, will show the subdomains results in realtime" + W)
supported_engines = {'baidu':BaiduEnum,
'yahoo':YahooEnum,
'google':GoogleEnum,
'bing':BingEnum,
'ask':AskEnum,
'netcraft':NetcraftEnum,
'dnsdumpster':DNSdumpster,
'virustotal':Virustotal,
'threatcrowd':ThreatCrowd,
'ssl':CrtSearch,
'passivedns':PassiveDNS
supported_engines = {'baidu': BaiduEnum,
'yahoo': YahooEnum,
'google': GoogleEnum,
'bing': BingEnum,
'ask': AskEnum,
'netcraft': NetcraftEnum,
'dnsdumpster': DNSdumpster,
'virustotal': Virustotal,
'threatcrowd': ThreatCrowd,
'ssl': CrtSearch,
'passivedns': PassiveDNS
}
chosenEnums = []
if engines == None:
chosenEnums = [BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum,
NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd, CrtSearch, PassiveDNS]
if engines is None:
chosenEnums = [
BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum,
NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd,
CrtSearch, PassiveDNS
]
else:
engines = engines.split(',')
for engine in engines:
if engine.lower() in supported_engines:
chosenEnums.append(supported_engines[engine.lower()])
#Start the engines enumeration
# Start the engines enumeration
enums = [enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose) for enum in chosenEnums]
for enum in enums:
enum.start()
for enum in enums:
enum.join()
subdomains = set(subdomains_queue)
subdomains = set(subdomains_queue)
for subdomain in subdomains:
search_list.add(subdomain)
if enable_bruteforce:
if not silent: print(G+"[-] Starting bruteforce module now using subbrute.."+W)
if not silent:
print(G + "[-] Starting bruteforce module now using subbrute.." + W)
record_type = False
path_to_file = os.path.dirname(os.path.realpath(__file__))
subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
@ -972,19 +996,22 @@ def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, e
if savefile:
write_file(savefile, subdomains)
if not silent: print(Y+"[-] Total Unique Subdomains Found: %s"%len(subdomains)+W)
if not silent:
print(Y + "[-] Total Unique Subdomains Found: %s" % len(subdomains) + W)
if ports:
if not silent: print(G+"[-] Start port scan now for the following ports: %s%s"%(Y,ports)+W)
if not silent:
print(G + "[-] Start port scan now for the following ports: %s%s" % (Y,ports) + W)
ports = ports.split(',')
pscan = portscan(subdomains,ports)
pscan = portscan(subdomains, ports)
pscan.run()
elif not silent:
for subdomain in subdomains:
print(G+subdomain+W)
print(G + subdomain + W)
return subdomains
if __name__=="__main__":
args = parse_args()
domain = args.domain
@ -996,7 +1023,5 @@ if __name__=="__main__":
engines = args.engines
if verbose or verbose is None:
verbose = True
banner()
res = main(domain, threads, savefile, ports, silent=False, verbose=verbose, enable_bruteforce=enable_bruteforce, engines=engines)