Merge pull request #2 from cclauss/patch-1
Avoid [] or {} as default value for function params
This commit is contained in:
commit
13feabda83
408
sublist3r.py
408
sublist3r.py
|
|
@ -1,6 +1,8 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
# SubList3r v0.1
|
# SubList3r v0.1
|
||||||
# By Ahmed Aboul-Ela - twitter.com/aboul3la
|
# By Ahmed Aboul-Ela - twitter.com/aboul3la
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
|
@ -28,10 +30,10 @@ W = '\033[0m' #white
|
||||||
|
|
||||||
def banner():
|
def banner():
|
||||||
print """%s
|
print """%s
|
||||||
____ _ _ _ _ _____
|
____ _ _ _ _ _____
|
||||||
/ ___| _ _| |__ | (_)___| |_|___ / _ __
|
/ ___| _ _| |__ | (_)___| |_|___ / _ __
|
||||||
\___ \| | | | '_ \| | / __| __| |_ \| '__|
|
\___ \| | | | '_ \| | / __| __| |_ \| '__|
|
||||||
___) | |_| | |_) | | \__ \ |_ ___) | |
|
___) | |_| | |_) | | \__ \ |_ ___) | |
|
||||||
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
|
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
|
||||||
|
|
||||||
# Fast Subomains Enumeration tool using Search Engines and BruteForce
|
# Fast Subomains Enumeration tool using Search Engines and BruteForce
|
||||||
|
|
@ -40,35 +42,35 @@ def banner():
|
||||||
"""%(R,W,Y,W)
|
"""%(R,W,Y,W)
|
||||||
|
|
||||||
def parser_error(errmsg):
|
def parser_error(errmsg):
|
||||||
banner()
|
banner()
|
||||||
print "Usage: python "+sys.argv[0]+" [Options] use -h for help"
|
print "Usage: python "+sys.argv[0]+" [Options] use -h for help"
|
||||||
print R+"Error: "+errmsg+W
|
print R+"Error: "+errmsg+W
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
#parse the arguments
|
|
||||||
parser = argparse.ArgumentParser(epilog='\tExample: \r\npython '+sys.argv[0]+" -d google.com")
|
|
||||||
parser.error = parser_error
|
|
||||||
parser._optionals.title = "OPTIONS"
|
|
||||||
parser.add_argument('-d','--domain', help='Domain name to enumrate it\'s subdomains', required=True)
|
|
||||||
parser.add_argument('-b','--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False)
|
|
||||||
parser.add_argument('-v','--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False)
|
|
||||||
parser.add_argument('-t','--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=10)
|
|
||||||
parser.add_argument('-o','--output', help='Save the results to text file')
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
def write_file(filename,subdomains):
|
def parse_args():
|
||||||
#saving subdomains results to output file
|
#parse the arguments
|
||||||
|
parser = argparse.ArgumentParser(epilog = '\tExample: \r\npython '+sys.argv[0]+" -d google.com")
|
||||||
|
parser.error = parser_error
|
||||||
|
parser._optionals.title = "OPTIONS"
|
||||||
|
parser.add_argument('-d', '--domain', help="Domain name to enumrate it's subdomains", required=True)
|
||||||
|
parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False)
|
||||||
|
parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False)
|
||||||
|
parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=10)
|
||||||
|
parser.add_argument('-o', '--output', help='Save the results to text file')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def write_file(filename, subdomains):
|
||||||
|
#saving subdomains results to output file
|
||||||
print "%s[-] Saving results to file: %s%s%s%s"%(Y,W,R,filename,W)
|
print "%s[-] Saving results to file: %s%s%s%s"%(Y,W,R,filename,W)
|
||||||
f = open(str(filename),'wb')
|
with open(str(filename), 'wb') as f:
|
||||||
for subdomain in subdomains:
|
for subdomain in subdomains:
|
||||||
f.write(subdomain+"\r\n")
|
f.write(subdomain+"\r\n")
|
||||||
f.close()
|
|
||||||
|
|
||||||
class enumratorBase(object):
|
class enumratorBase(object):
|
||||||
def __init__(self, base_url, engine_name, domain , subdomains=[]):
|
def __init__(self, base_url, engine_name, domain, subdomains=None):
|
||||||
|
subdomains = subdomains or []
|
||||||
self.domain = urlparse.urlparse(domain).netloc
|
self.domain = urlparse.urlparse(domain).netloc
|
||||||
self.session=requests.Session()
|
self.session = requests.Session()
|
||||||
self.subdomains = []
|
self.subdomains = []
|
||||||
self.timeout = 10
|
self.timeout = 10
|
||||||
self.base_url = base_url
|
self.base_url = base_url
|
||||||
|
|
@ -78,17 +80,16 @@ class enumratorBase(object):
|
||||||
def print_banner(self):
|
def print_banner(self):
|
||||||
""" subclass can override this if they want a fancy banner :)"""
|
""" subclass can override this if they want a fancy banner :)"""
|
||||||
print G+"[-] Searching now in %s.." %(self.engine_name)+W
|
print G+"[-] Searching now in %s.." %(self.engine_name)+W
|
||||||
return
|
return
|
||||||
|
|
||||||
def send_req(self, query, page_no=1):
|
def send_req(self, query, page_no=1):
|
||||||
|
|
||||||
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0',
|
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0',
|
||||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||||
'Accept-Language': 'en-GB,en;q=0.5',
|
'Accept-Language': 'en-GB,en;q=0.5',
|
||||||
'Accept-Encoding': 'gzip, deflate',
|
'Accept-Encoding': 'gzip, deflate',
|
||||||
'Connection': 'keep-alive'
|
'Connection': 'keep-alive'
|
||||||
}
|
}
|
||||||
|
|
||||||
url = self.base_url.format(query=query, page_no=page_no)
|
url = self.base_url.format(query=query, page_no=page_no)
|
||||||
try:
|
try:
|
||||||
resp = self.session.get(url, headers=headers, timeout=self.timeout)
|
resp = self.session.get(url, headers=headers, timeout=self.timeout)
|
||||||
|
|
@ -96,31 +97,25 @@ class enumratorBase(object):
|
||||||
print e
|
print e
|
||||||
raise
|
raise
|
||||||
return resp.text
|
return resp.text
|
||||||
|
|
||||||
def check_max_subdomains(self,count):
|
def check_max_subdomains(self,count):
|
||||||
if self.MAX_DOMAINS == 0:
|
if self.MAX_DOMAINS == 0:
|
||||||
return False
|
return False
|
||||||
if count >= self.MAX_DOMAINS:
|
return count >= self.MAX_DOMAINS
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def check_max_pages(self,num):
|
def check_max_pages(self, num):
|
||||||
if self.MAX_PAGES == 0:
|
if self.MAX_PAGES == 0:
|
||||||
return False
|
return False
|
||||||
if num >= self.MAX_PAGES:
|
return num >= self.MAX_PAGES
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
#Override
|
#Override
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
""" chlid class should override this function """
|
""" chlid class should override this function """
|
||||||
return
|
return
|
||||||
|
|
||||||
#override
|
#override
|
||||||
def check_response_errors(self,resp):
|
def check_response_errors(self, resp):
|
||||||
""" chlid class should override this function
|
""" chlid class should override this function
|
||||||
The function should return True if there are no errors and False otherwise
|
The function should return True if there are no errors and False otherwise
|
||||||
"""
|
"""
|
||||||
return True
|
return True
|
||||||
|
|
@ -133,44 +128,41 @@ class enumratorBase(object):
|
||||||
""" chlid class should override this function """
|
""" chlid class should override this function """
|
||||||
return
|
return
|
||||||
|
|
||||||
|
def get_page(self, num):
|
||||||
|
""" chlid class that user different pagnation counter should override this function """
|
||||||
|
return num + 10
|
||||||
|
|
||||||
def get_page(self,num):
|
def enumerate(self, altquery=False):
|
||||||
""" chlid class that user different pagnation counter should override this function """
|
|
||||||
return num+10
|
|
||||||
|
|
||||||
def enumerate(self,altquery=False):
|
|
||||||
flag = True
|
flag = True
|
||||||
page_no = 0
|
page_no = 0
|
||||||
prev_links =[]
|
prev_links = []
|
||||||
prev_subdomains = []
|
prev_subdomains = []
|
||||||
retries = 0
|
retries = 0
|
||||||
|
|
||||||
while flag:
|
while flag:
|
||||||
query = self.generate_query()
|
query = self.generate_query()
|
||||||
count = query.count(self.domain) #finding the number of subdomains found so far
|
count = query.count(self.domain) #finding the number of subdomains found so far
|
||||||
|
|
||||||
#if they we reached the maximum number of subdomains in search query then we should go over the pages
|
|
||||||
if self.check_max_subdomains(count):
|
|
||||||
page_no= self.get_page(page_no)
|
|
||||||
|
|
||||||
|
#if they we reached the maximum number of subdomains in search query
|
||||||
|
#then we should go over the pages
|
||||||
|
if self.check_max_subdomains(count):
|
||||||
|
page_no = self.get_page(page_no)
|
||||||
|
|
||||||
if self.check_max_pages(page_no): #maximum pages for Google to avoid getting blocked
|
if self.check_max_pages(page_no): #maximum pages for Google to avoid getting blocked
|
||||||
return self.subdomains
|
return self.subdomains
|
||||||
|
|
||||||
resp = self.send_req(query, page_no)
|
resp = self.send_req(query, page_no)
|
||||||
|
|
||||||
#check if there is any error occured
|
#check if there is any error occured
|
||||||
if not self.check_response_errors(resp):
|
if not self.check_response_errors(resp):
|
||||||
return self.subdomains
|
return self.subdomains
|
||||||
|
|
||||||
links = self.extract_domains(resp)
|
links = self.extract_domains(resp)
|
||||||
|
|
||||||
#if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
|
#if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
|
||||||
if links == prev_links:
|
if links == prev_links:
|
||||||
retries+=1
|
retries += 1
|
||||||
page_no= self.get_page(page_no)
|
page_no = self.get_page(page_no)
|
||||||
|
|
||||||
#make another retry maybe it isn't the last page
|
#make another retry maybe it isn't the last page
|
||||||
if retries >= 3:
|
if retries >= 3:
|
||||||
return self.subdomains
|
return self.subdomains
|
||||||
|
|
||||||
|
|
@ -181,11 +173,12 @@ class enumratorBase(object):
|
||||||
|
|
||||||
|
|
||||||
class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
|
class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
|
||||||
def __init__(self, base_url, engine_name, domain , subdomains=[], q=None,lock=threading.Lock()):
|
def __init__(self, base_url, engine_name, domain, subdomains=None, q=None, lock=threading.Lock()):
|
||||||
|
subdomains = subdomains or []
|
||||||
enumratorBase.__init__(self, base_url, engine_name, domain, subdomains)
|
enumratorBase.__init__(self, base_url, engine_name, domain, subdomains)
|
||||||
multiprocessing.Process.__init__(self)
|
multiprocessing.Process.__init__(self)
|
||||||
self.lock = lock
|
self.lock = lock
|
||||||
self.q=q
|
self.q = q
|
||||||
return
|
return
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
|
@ -194,13 +187,14 @@ class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
|
||||||
|
|
||||||
|
|
||||||
class GoogleEnum(enumratorBaseThreaded):
|
class GoogleEnum(enumratorBaseThreaded):
|
||||||
def __init__(self, domain , subdomains=[], q=None):
|
def __init__(self, domain, subdomains=None, q=None):
|
||||||
|
subdomains = subdomains or []
|
||||||
base_url = "https://google.com/search?q={query}&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start={page_no}&filter=0"
|
base_url = "https://google.com/search?q={query}&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start={page_no}&filter=0"
|
||||||
self.engine_name="Google"
|
self.engine_name = "Google"
|
||||||
self.MAX_DOMAINS = 11
|
self.MAX_DOMAINS = 11
|
||||||
self.MAX_PAGES = 200
|
self.MAX_PAGES = 200
|
||||||
super(GoogleEnum, self).__init__(base_url, self.engine_name,domain, subdomains,q=q)
|
super(GoogleEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q)
|
||||||
self.q=q
|
self.q = q
|
||||||
return
|
return
|
||||||
|
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
|
|
@ -208,18 +202,17 @@ class GoogleEnum(enumratorBaseThreaded):
|
||||||
try:
|
try:
|
||||||
links_list = link_regx.findall(resp)
|
links_list = link_regx.findall(resp)
|
||||||
for link in links_list:
|
for link in links_list:
|
||||||
link = re.sub('<span.*>','',link)
|
link = re.sub('<span.*>', '', link)
|
||||||
if not link.startswith('http'):
|
if not link.startswith('http'):
|
||||||
link="http://"+link
|
link="http://"+link
|
||||||
subdomain = urlparse.urlparse(link).netloc
|
subdomain = urlparse.urlparse(link).netloc
|
||||||
|
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
|
||||||
if subdomain not in self.subdomains and subdomain != self.domain and subdomain != '':
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print "%s%s: %s%s"%(R,self.engine_name,W,subdomain)
|
print "%s%s: %s%s"%(R, self.engine_name, W, subdomain)
|
||||||
self.subdomains.append(subdomain)
|
self.subdomains.append(subdomain)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pass
|
pass
|
||||||
return links_list
|
return links_list
|
||||||
|
|
||||||
def check_response_errors(self, resp):
|
def check_response_errors(self, resp):
|
||||||
if 'Our systems have detected unusual traffic' in resp:
|
if 'Our systems have detected unusual traffic' in resp:
|
||||||
|
|
@ -228,26 +221,28 @@ class GoogleEnum(enumratorBaseThreaded):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def should_sleep(self):
|
def should_sleep(self):
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
return
|
return
|
||||||
|
|
||||||
def generate_query(self):
|
def generate_query(self):
|
||||||
if len(self.subdomains) > 0:
|
if self.subdomains:
|
||||||
query = "site:{domain} -www.{domain} -{found}".format(domain=self.domain, found=' -'.join(self.subdomains[:self.MAX_DOMAINS-2]))
|
fmt = 'site:{domain} -www.{domain} -{found}'
|
||||||
|
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS-2])
|
||||||
|
query = fmt.format(domain=self.domain, found=found)
|
||||||
else:
|
else:
|
||||||
query = "site:{domain} -www.{domain}".format(domain=self.domain)
|
query = "site:{domain} -www.{domain}".format(domain=self.domain)
|
||||||
return query
|
return query
|
||||||
|
|
||||||
class YahooEnum(enumratorBaseThreaded):
|
class YahooEnum(enumratorBaseThreaded):
|
||||||
def __init__(self, domain , subdomains=[], q=None):
|
def __init__(self, domain, subdomains=None, q=None):
|
||||||
|
subdomains = subdomains or []
|
||||||
base_url = "https://search.yahoo.com/search?p={query}&b={page_no}"
|
base_url = "https://search.yahoo.com/search?p={query}&b={page_no}"
|
||||||
self.engine_name="Yahoo"
|
self.engine_name = "Yahoo"
|
||||||
self.MAX_DOMAINS = 10
|
self.MAX_DOMAINS = 10
|
||||||
self.MAX_PAGES = 0
|
self.MAX_PAGES = 0
|
||||||
super(YahooEnum, self).__init__(base_url, self.engine_name,domain, subdomains, q=q)
|
super(YahooEnum, self).__init__(base_url, self.engine_name,domain, subdomains, q=q)
|
||||||
self.q=q
|
self.q = q
|
||||||
return
|
return
|
||||||
|
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
|
|
@ -258,46 +253,45 @@ class YahooEnum(enumratorBaseThreaded):
|
||||||
links2 = link_regx2.findall(resp)
|
links2 = link_regx2.findall(resp)
|
||||||
links_list = links+links2
|
links_list = links+links2
|
||||||
for link in links_list:
|
for link in links_list:
|
||||||
link = re.sub("<(\/)?b>","",link)
|
link = re.sub("<(\/)?b>","", link)
|
||||||
|
|
||||||
if not link.startswith('http'):
|
if not link.startswith('http'):
|
||||||
link="http://"+link
|
link="http://"+link
|
||||||
|
|
||||||
subdomain = urlparse.urlparse(link).netloc
|
subdomain = urlparse.urlparse(link).netloc
|
||||||
|
|
||||||
if not subdomain.endswith(self.domain):
|
if not subdomain.endswith(self.domain):
|
||||||
continue
|
continue
|
||||||
|
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
|
||||||
if subdomain not in self.subdomains and subdomain != self.domain and subdomain != self.domain and subdomain != '':
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print "%s%s: %s%s"%(R,self.engine_name,W,subdomain)
|
print "%s%s: %s%s"%(R, self.engine_name, W, subdomain)
|
||||||
self.subdomains.append(subdomain)
|
self.subdomains.append(subdomain)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return links_list
|
return links_list
|
||||||
|
|
||||||
def should_sleep(self):
|
def should_sleep(self):
|
||||||
return
|
return
|
||||||
|
|
||||||
def get_page(self,num):
|
def get_page(self,num):
|
||||||
return num+10
|
return num + 10
|
||||||
|
|
||||||
def generate_query(self):
|
def generate_query(self):
|
||||||
if len(self.subdomains) > 0:
|
if self.subdomains:
|
||||||
query = "site:{domain} -domain:www.{domain} -domain:{found}".format(domain=self.domain, found=' -domain:'.join(self.subdomains[:77]))
|
fmt = 'site:{domain} -domain:www.{domain} -domain:{found}'
|
||||||
|
found = ' -domain:'.join(self.subdomains[:77])
|
||||||
|
query = fmt.format(domain=self.domain, found=found)
|
||||||
else:
|
else:
|
||||||
query = "site:{domain}".format(domain=self.domain)
|
query = "site:{domain}".format(domain=self.domain)
|
||||||
return query
|
return query
|
||||||
|
|
||||||
class AskEnum(enumratorBaseThreaded):
|
class AskEnum(enumratorBaseThreaded):
|
||||||
def __init__(self, domain , subdomains=[], q=None):
|
def __init__(self, domain, subdomains=None, q=None):
|
||||||
|
subdomains = subdomains or []
|
||||||
base_url = 'http://www.ask.com/web?q={query}&page={page_no}&qid=8D6EE6BF52E0C04527E51F64F22C4534&o=0&l=dir&qsrc=998&qo=pagination'
|
base_url = 'http://www.ask.com/web?q={query}&page={page_no}&qid=8D6EE6BF52E0C04527E51F64F22C4534&o=0&l=dir&qsrc=998&qo=pagination'
|
||||||
self.engine_name="Ask"
|
self.engine_name = "Ask"
|
||||||
self.MAX_DOMAINS = 11
|
self.MAX_DOMAINS = 11
|
||||||
self.MAX_PAGES = 0
|
self.MAX_PAGES = 0
|
||||||
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains, q=q)
|
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q)
|
||||||
self.q=q
|
self.q = q
|
||||||
return
|
return
|
||||||
|
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
|
|
@ -310,32 +304,35 @@ class AskEnum(enumratorBaseThreaded):
|
||||||
subdomain = urlparse.urlparse(link).netloc
|
subdomain = urlparse.urlparse(link).netloc
|
||||||
if subdomain not in self.subdomains and subdomain != self.domain:
|
if subdomain not in self.subdomains and subdomain != self.domain:
|
||||||
if verbose:
|
if verbose:
|
||||||
print "%s%s: %s%s"%(R,self.engine_name,W,subdomain)
|
print "%s%s: %s%s"%(R, self.engine_name, W, subdomain)
|
||||||
self.subdomains.append(subdomain)
|
self.subdomains.append(subdomain)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return links_list
|
return links_list
|
||||||
|
|
||||||
def get_page(self,num):
|
def get_page(self,num):
|
||||||
return num+1
|
return num + 1
|
||||||
|
|
||||||
def generate_query(self):
|
def generate_query(self):
|
||||||
if len(self.subdomains) > 0:
|
if self.subdomains:
|
||||||
query = "site:{domain} -www.{domain} -{found}".format(domain=self.domain, found=' -'.join(self.subdomains[:self.MAX_DOMAINS]))
|
fmt = 'site:{domain} -www.{domain} -{found}'
|
||||||
|
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
|
||||||
|
query = fmt.format(domain=self.domain, found=found)
|
||||||
else:
|
else:
|
||||||
query = "site:{domain} -www.{domain}".format(domain=self.domain)
|
query = "site:{domain} -www.{domain}".format(domain=self.domain)
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
class BingEnum(enumratorBaseThreaded):
|
class BingEnum(enumratorBaseThreaded):
|
||||||
def __init__(self, domain , subdomains=[], q=None):
|
def __init__(self, domain, subdomains=None, q=None):
|
||||||
|
subdomains = subdomains or []
|
||||||
base_url = 'https://www.bing.com/search?q={query}&go=Submit&first={page_no}'
|
base_url = 'https://www.bing.com/search?q={query}&go=Submit&first={page_no}'
|
||||||
self.engine_name="Bing"
|
self.engine_name = "Bing"
|
||||||
self.MAX_DOMAINS = 30
|
self.MAX_DOMAINS = 30
|
||||||
self.MAX_PAGES = 0
|
self.MAX_PAGES = 0
|
||||||
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains,q=q)
|
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains,q=q)
|
||||||
self.q=q
|
self.q = q
|
||||||
return
|
return
|
||||||
|
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
|
|
@ -347,37 +344,39 @@ class BingEnum(enumratorBaseThreaded):
|
||||||
links_list = links+links2
|
links_list = links+links2
|
||||||
|
|
||||||
for link in links_list:
|
for link in links_list:
|
||||||
link = re.sub('<(\/)?strong>|<span.*?>|<|>','',link)
|
link = re.sub('<(\/)?strong>|<span.*?>|<|>', '', link)
|
||||||
if not link.startswith('http'):
|
if not link.startswith('http'):
|
||||||
link="http://"+link
|
link="http://"+link
|
||||||
subdomain = urlparse.urlparse(link).netloc
|
subdomain = urlparse.urlparse(link).netloc
|
||||||
if subdomain not in self.subdomains and subdomain != self.domain:
|
if subdomain not in self.subdomains and subdomain != self.domain:
|
||||||
if verbose:
|
if verbose:
|
||||||
print "%s%s: %s%s"%(R,self.engine_name,W,subdomain)
|
print "%s%s: %s%s"%(R, self.engine_name, W, subdomain)
|
||||||
self.subdomains.append(subdomain)
|
self.subdomains.append(subdomain)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return links_list
|
return links_list
|
||||||
|
|
||||||
def generate_query(self):
|
def generate_query(self):
|
||||||
if len(self.subdomains) > 0:
|
if self.subdomains:
|
||||||
query = "domain:{domain} -www.{domain} -{found}".format(domain=self.domain, found=' -'.join(self.subdomains[:self.MAX_DOMAINS]))
|
fmt = 'domain:{domain} -www.{domain} -{found}'
|
||||||
|
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
|
||||||
|
query = fmt.format(domain=self.domain, found=found)
|
||||||
else:
|
else:
|
||||||
query = "domain:{domain} -www.{domain}".format(domain=self.domain)
|
query = "domain:{domain} -www.{domain}".format(domain=self.domain)
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
class BaiduEnum(enumratorBaseThreaded):
|
class BaiduEnum(enumratorBaseThreaded):
|
||||||
def __init__(self, domain , subdomains=[], q=None):
|
def __init__(self, domain, subdomains=None, q=None):
|
||||||
|
subdomains = subdomains or []
|
||||||
base_url = 'http://www.baidu.com/s?pn={page_no}&wd={query}'
|
base_url = 'http://www.baidu.com/s?pn={page_no}&wd={query}'
|
||||||
self.engine_name="Baidu"
|
self.engine_name = "Baidu"
|
||||||
self.MAX_DOMAINS = 2
|
self.MAX_DOMAINS = 2
|
||||||
self.MAX_PAGES = 760
|
self.MAX_PAGES = 760
|
||||||
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains, q=q)
|
enumratorBaseThreaded.__init__(self, base_url, self.engine_name,domain, subdomains, q=q)
|
||||||
self.querydomain = self.domain
|
self.querydomain = self.domain
|
||||||
self.q=q
|
self.q = q
|
||||||
return
|
return
|
||||||
|
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
|
|
@ -387,7 +386,7 @@ class BaiduEnum(enumratorBaseThreaded):
|
||||||
try:
|
try:
|
||||||
links = link_regx.findall(resp)
|
links = link_regx.findall(resp)
|
||||||
for link in links:
|
for link in links:
|
||||||
link = re.sub('<.*?>|>|<| ','',link)
|
link = re.sub('<.*?>|>|<| ', '', link)
|
||||||
if not link.startswith('http'):
|
if not link.startswith('http'):
|
||||||
link="http://"+link
|
link="http://"+link
|
||||||
subdomain = urlparse.urlparse(link).netloc
|
subdomain = urlparse.urlparse(link).netloc
|
||||||
|
|
@ -396,27 +395,20 @@ class BaiduEnum(enumratorBaseThreaded):
|
||||||
if subdomain not in self.subdomains and subdomain != self.domain:
|
if subdomain not in self.subdomains and subdomain != self.domain:
|
||||||
found_newdomain = True
|
found_newdomain = True
|
||||||
if verbose:
|
if verbose:
|
||||||
print "%s%s: %s%s"%(R,self.engine_name,W,subdomain)
|
print "%s%s: %s%s"%(R, self.engine_name, W, subdomain)
|
||||||
self.subdomains.append(subdomain)
|
self.subdomains.append(subdomain)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pass
|
pass
|
||||||
|
if not found_newdomain and subdomain_list:
|
||||||
if not found_newdomain and len(subdomain_list) != 0:
|
|
||||||
self.querydomain = self.findsubs(subdomain_list)
|
self.querydomain = self.findsubs(subdomain_list)
|
||||||
return links
|
return links
|
||||||
|
|
||||||
def findsubs(self,subdomains):
|
def findsubs(self, subdomains):
|
||||||
count = Counter(subdomains)
|
count = Counter(subdomains)
|
||||||
subdomain1 = max(count, key=count.get)
|
subdomain1 = max(count, key=count.get)
|
||||||
count.pop(subdomain1,"None")
|
count.pop(subdomain1, "None")
|
||||||
|
subdomain2 = max(count, key=count.get) if count else ''
|
||||||
if len(count) > 0:
|
return (subdomain1, subdomain2)
|
||||||
subdomain2 = max(count, key=count.get)
|
|
||||||
else:
|
|
||||||
subdomain2 = ''
|
|
||||||
|
|
||||||
return (subdomain1,subdomain2)
|
|
||||||
|
|
||||||
|
|
||||||
def check_response_errors(self, resp):
|
def check_response_errors(self, resp):
|
||||||
return True
|
return True
|
||||||
|
|
@ -425,23 +417,24 @@ class BaiduEnum(enumratorBaseThreaded):
|
||||||
return
|
return
|
||||||
|
|
||||||
def generate_query(self):
|
def generate_query(self):
|
||||||
if len(self.subdomains) > 0 and self.querydomain != self.domain:
|
if self.subdomains and self.querydomain != self.domain:
|
||||||
query = "site:{domain} -site:{found} ".format(domain=self.domain, found=' -site:'.join(self.querydomain))
|
found = ' -site:'.join(self.querydomain)
|
||||||
|
query = "site:{domain} -site:{found} ".format(domain=self.domain, found=found)
|
||||||
else:
|
else:
|
||||||
query = "site:{domain}".format(domain=self.domain)
|
query = "site:{domain}".format(domain=self.domain)
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
class NetcraftEnum(multiprocessing.Process):
|
class NetcraftEnum(multiprocessing.Process):
|
||||||
def __init__(self, domain , subdomains=[], q=None,lock=threading.Lock()):
|
def __init__(self, domain, subdomains=None, q=None, lock=threading.Lock()):
|
||||||
|
subdomains = subdomains or []
|
||||||
self.base_url = 'http://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}'
|
self.base_url = 'http://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}'
|
||||||
self.domain = urlparse.urlparse(domain).netloc
|
self.domain = urlparse.urlparse(domain).netloc
|
||||||
self.subdomains = []
|
self.subdomains = []
|
||||||
self.session=requests.Session()
|
self.session = requests.Session()
|
||||||
self.engine_name="Netcraft"
|
self.engine_name = "Netcraft"
|
||||||
multiprocessing.Process.__init__(self)
|
multiprocessing.Process.__init__(self)
|
||||||
self.lock = lock
|
self.lock = lock
|
||||||
self.q=q
|
self.q = q
|
||||||
self.timeout = 10
|
self.timeout = 10
|
||||||
self.print_banner()
|
self.print_banner()
|
||||||
return
|
return
|
||||||
|
|
@ -453,14 +446,15 @@ class NetcraftEnum(multiprocessing.Process):
|
||||||
|
|
||||||
def print_banner(self):
|
def print_banner(self):
|
||||||
print G+"[-] Searching now in %s.." %(self.engine_name)+W
|
print G+"[-] Searching now in %s.." %(self.engine_name)+W
|
||||||
return
|
return
|
||||||
|
|
||||||
def req(self,url,cookies=dict()):
|
def req(self, url, cookies=None)
|
||||||
|
cookies = cookies or {}
|
||||||
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
|
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
|
||||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||||
'Accept-Language': 'en-GB,en;q=0.5',
|
'Accept-Language': 'en-GB,en;q=0.5',
|
||||||
'Accept-Encoding': 'gzip, deflate',
|
'Accept-Encoding': 'gzip, deflate',
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
resp = self.session.get(url, headers=headers, timeout=self.timeout,cookies=cookies)
|
resp = self.session.get(url, headers=headers, timeout=self.timeout,cookies=cookies)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -468,23 +462,20 @@ class NetcraftEnum(multiprocessing.Process):
|
||||||
raise
|
raise
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
def get_next(self, resp):
|
||||||
def get_next(self,resp):
|
|
||||||
link_regx = re.compile('<A href="(.*?)"><b>Next page</b></a>')
|
link_regx = re.compile('<A href="(.*?)"><b>Next page</b></a>')
|
||||||
link = link_regx.findall(resp)
|
link = link_regx.findall(resp)
|
||||||
link = re.sub('host=.*?%s'%self.domain,'host=%s'%self.domain,link[0])
|
link = re.sub('host=.*?%s'%self.domain, 'host=%s'%self.domain, link[0])
|
||||||
url = 'http://searchdns.netcraft.com'+link
|
url = 'http://searchdns.netcraft.com'+link
|
||||||
return url
|
return url
|
||||||
|
|
||||||
|
def create_cookies(self, cookie):
|
||||||
def create_cookies(self,cookie):
|
|
||||||
cookies = dict()
|
cookies = dict()
|
||||||
cookies_list = cookie[0:cookie.find(';')].split("=")
|
cookies_list = cookie[0:cookie.find(';')].split("=")
|
||||||
cookies[cookies_list[0]] = cookies_list[1]
|
cookies[cookies_list[0]] = cookies_list[1]
|
||||||
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1])).hexdigest()
|
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1])).hexdigest()
|
||||||
return cookies
|
return cookies
|
||||||
|
|
||||||
|
|
||||||
def enumerate(self):
|
def enumerate(self):
|
||||||
start_url = self.base_url.format(domain='example.com')
|
start_url = self.base_url.format(domain='example.com')
|
||||||
resp = self.req(start_url)
|
resp = self.req(start_url)
|
||||||
|
|
@ -497,7 +488,7 @@ class NetcraftEnum(multiprocessing.Process):
|
||||||
return self.subdomains
|
return self.subdomains
|
||||||
break
|
break
|
||||||
url = self.get_next(resp)
|
url = self.get_next(resp)
|
||||||
|
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
link_regx = re.compile('<a href="http://toolbar.netcraft.com/site_report\?url=(.*)">')
|
link_regx = re.compile('<a href="http://toolbar.netcraft.com/site_report\?url=(.*)">')
|
||||||
try:
|
try:
|
||||||
|
|
@ -506,27 +497,26 @@ class NetcraftEnum(multiprocessing.Process):
|
||||||
subdomain = urlparse.urlparse(link).netloc
|
subdomain = urlparse.urlparse(link).netloc
|
||||||
if not subdomain.endswith(self.domain):
|
if not subdomain.endswith(self.domain):
|
||||||
continue
|
continue
|
||||||
if subdomain not in self.subdomains and subdomain != self.domain and subdomain != '':
|
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
|
||||||
if verbose:
|
if verbose:
|
||||||
print "%s%s: %s%s"%(R,self.engine_name,W,subdomain)
|
print "%s%s: %s%s"%(R, self.engine_name, W, subdomain)
|
||||||
self.subdomains.append(subdomain)
|
self.subdomains.append(subdomain)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pass
|
pass
|
||||||
return links_list
|
return links_list
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class DNSdumpster(multiprocessing.Process):
|
class DNSdumpster(multiprocessing.Process):
|
||||||
def __init__(self, domain , subdomains=[], q=None,lock=threading.Lock()):
|
def __init__(self, domain, subdomains=None, q=None, lock=threading.Lock()):
|
||||||
|
subdomains = subdomains or []
|
||||||
self.base_url = 'https://dnsdumpster.com/'
|
self.base_url = 'https://dnsdumpster.com/'
|
||||||
self.domain = urlparse.urlparse(domain).netloc
|
self.domain = urlparse.urlparse(domain).netloc
|
||||||
self.subdomains = []
|
self.subdomains = []
|
||||||
self.session=requests.Session()
|
self.session = requests.Session()
|
||||||
self.engine_name="DNSdumpster"
|
self.engine_name = "DNSdumpster"
|
||||||
multiprocessing.Process.__init__(self)
|
multiprocessing.Process.__init__(self)
|
||||||
self.lock = lock
|
self.lock = lock
|
||||||
self.q=q
|
self.q = q
|
||||||
self.timeout = 10
|
self.timeout = 10
|
||||||
self.print_banner()
|
self.print_banner()
|
||||||
return
|
return
|
||||||
|
|
@ -538,7 +528,7 @@ class DNSdumpster(multiprocessing.Process):
|
||||||
|
|
||||||
def print_banner(self):
|
def print_banner(self):
|
||||||
print G+"[-] Searching now in %s.." %(self.engine_name)+W
|
print G+"[-] Searching now in %s.." %(self.engine_name)+W
|
||||||
return
|
return
|
||||||
|
|
||||||
def check_host(self,host):
|
def check_host(self,host):
|
||||||
is_valid = False
|
is_valid = False
|
||||||
|
|
@ -546,46 +536,44 @@ class DNSdumpster(multiprocessing.Process):
|
||||||
Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
|
Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
|
||||||
try:
|
try:
|
||||||
ip = Resolver.query(host, 'A')[0].to_text()
|
ip = Resolver.query(host, 'A')[0].to_text()
|
||||||
if ip != '' and ip is not None:
|
if ip:
|
||||||
is_valid = True
|
is_valid = True
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
return is_valid
|
return is_valid
|
||||||
|
|
||||||
def req(self,req_method,url,params=dict()):
|
def req(self, req_method, url, params=None):
|
||||||
|
params = params or {}
|
||||||
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
|
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/40.0',
|
||||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||||
'Accept-Language': 'en-GB,en;q=0.5',
|
'Accept-Language': 'en-GB,en;q=0.5',
|
||||||
'Accept-Encoding': 'gzip, deflate',
|
'Accept-Encoding': 'gzip, deflate',
|
||||||
'Referer':'https://dnsdumpster.com'
|
'Referer': 'https://dnsdumpster.com'
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if req_method == 'GET':
|
if req_method == 'GET':
|
||||||
resp = self.session.get(url, headers=headers, timeout=self.timeout)
|
resp = self.session.get(url, headers=headers, timeout=self.timeout)
|
||||||
else:
|
else:
|
||||||
resp = self.session.post(url,data=params,headers=headers,timeout=self.timeout)
|
resp = self.session.post(url, data=params, headers=headers, timeout=self.timeout)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print e
|
print e
|
||||||
raise
|
raise
|
||||||
return resp.text
|
return resp.text
|
||||||
|
|
||||||
|
def get_csrftoken(self, resp):
|
||||||
def get_csrftoken(self,resp):
|
|
||||||
csrf_regex = re.compile("<input type='hidden' name='csrfmiddlewaretoken' value='(.*?)' />",re.S)
|
csrf_regex = re.compile("<input type='hidden' name='csrfmiddlewaretoken' value='(.*?)' />",re.S)
|
||||||
token = csrf_regex.findall(resp)[0]
|
token = csrf_regex.findall(resp)[0]
|
||||||
return token.strip()
|
return token.strip()
|
||||||
|
|
||||||
def enumerate(self):
|
def enumerate(self):
|
||||||
resp = self.req('GET',self.base_url)
|
resp = self.req('GET', self.base_url)
|
||||||
token = self.get_csrftoken(resp)
|
token = self.get_csrftoken(resp)
|
||||||
params = {'csrfmiddlewaretoken':token,'targetip':self.domain}
|
params = {'csrfmiddlewaretoken':token, 'targetip':self.domain}
|
||||||
post_resp = self.req('POST',self.base_url,params)
|
post_resp = self.req('POST', self.base_url, params)
|
||||||
self.extract_domains(post_resp)
|
self.extract_domains(post_resp)
|
||||||
return self.subdomains
|
return self.subdomains
|
||||||
|
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>',re.S)
|
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>',re.S)
|
||||||
link_regex = re.compile('<td class="col-md-4">(.*?)<br>',re.S)
|
link_regex = re.compile('<td class="col-md-4">(.*?)<br>',re.S)
|
||||||
|
|
@ -597,12 +585,11 @@ class DNSdumpster(multiprocessing.Process):
|
||||||
subdomain = link.strip()
|
subdomain = link.strip()
|
||||||
if not subdomain.endswith(self.domain):
|
if not subdomain.endswith(self.domain):
|
||||||
continue
|
continue
|
||||||
if self.check_host(subdomain) and subdomain not in self.subdomains and subdomain != self.domain and subdomain != '':
|
if self.check_host(subdomain) and subdomain and subdomain not in self.subdomains and subdomain != self.domain:
|
||||||
if verbose:
|
if verbose:
|
||||||
print "%s%s: %s%s"%(R,self.engine_name,W,subdomain)
|
print "%s%s: %s%s"%(R, self.engine_name, W, subdomain)
|
||||||
self.subdomains.append(subdomain)
|
self.subdomains.append(subdomain)
|
||||||
return links
|
return links
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
@ -615,7 +602,7 @@ def main():
|
||||||
baidu_list = []
|
baidu_list = []
|
||||||
bruteforce_list = set()
|
bruteforce_list = set()
|
||||||
subdomains_queue = multiprocessing.Queue()
|
subdomains_queue = multiprocessing.Queue()
|
||||||
|
|
||||||
#Check Verbosity
|
#Check Verbosity
|
||||||
global verbose
|
global verbose
|
||||||
verbose = args.verbose
|
verbose = args.verbose
|
||||||
|
|
@ -634,63 +621,46 @@ def main():
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
if not domain.startswith('http://') or not domain.startswith('https://'):
|
if not domain.startswith('http://') or not domain.startswith('https://'):
|
||||||
domain = 'http://'+domain
|
domain = 'http://'+domain
|
||||||
|
|
||||||
#Print the Banner
|
#Print the Banner
|
||||||
banner()
|
banner()
|
||||||
parsed_domain = urlparse.urlparse(domain)
|
parsed_domain = urlparse.urlparse(domain)
|
||||||
|
|
||||||
print B+"[-] Enumerating subdomains now for %s"%parsed_domain.netloc+W
|
print B+"[-] Enumerating subdomains now for %s"%parsed_domain.netloc+W
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print Y+"[-] verbosity is enabled, will show the subdomains results in realtime"+W
|
print Y+"[-] verbosity is enabled, will show the subdomains results in realtime"+W
|
||||||
|
|
||||||
#Start the engines enumeration
|
#Start the engines enumeration
|
||||||
enum_baidu = BaiduEnum(domain, verbose,q=subdomains_queue)
|
enums = [enum(domain, verbose, q=subdomains_queue) for enum in AskEnum,
|
||||||
enum_yahoo = YahooEnum(domain, verbose,q=subdomains_queue)
|
BaiduEnum, BingEnum, DNSdumpster, GoogleEnum, NetcraftEnum, YahooEnum]
|
||||||
enum_google = GoogleEnum(domain, verbose, q=subdomains_queue)
|
for enum in enums:
|
||||||
enum_bing = BingEnum(domain, verbose, q=subdomains_queue)
|
enum.start()
|
||||||
enum_ask = AskEnum(domain, verbose, q=subdomains_queue)
|
for enum in enums:
|
||||||
enum_netcraft = NetcraftEnum(domain, verbose, q=subdomains_queue)
|
enum.join()
|
||||||
enum_dnsdumpester = DNSdumpster(domain, verbose, q=subdomains_queue)
|
|
||||||
|
|
||||||
enum_baidu.start()
|
|
||||||
enum_yahoo.start()
|
|
||||||
enum_google.start()
|
|
||||||
enum_bing.start()
|
|
||||||
enum_ask.start()
|
|
||||||
enum_netcraft.start()
|
|
||||||
enum_dnsdumpester.start()
|
|
||||||
|
|
||||||
enum_baidu.join()
|
|
||||||
enum_yahoo.join()
|
|
||||||
enum_google.join()
|
|
||||||
enum_bing.join()
|
|
||||||
enum_ask.join()
|
|
||||||
enum_netcraft.join()
|
|
||||||
enum_dnsdumpester.join()
|
|
||||||
|
|
||||||
search_list = set()
|
search_list = set()
|
||||||
|
|
||||||
while not subdomains_queue.empty():
|
while not subdomains_queue.empty():
|
||||||
search_list= search_list.union(subdomains_queue.get())
|
search_list= search_list.union(subdomains_queue.get())
|
||||||
|
|
||||||
if enable_bruteforce:
|
if enable_bruteforce:
|
||||||
print G+"[-] Starting bruteforce module now using subbrute.."+W
|
print G+"[-] Starting bruteforce module now using subbrute.."+W
|
||||||
record_type = False
|
record_type = False
|
||||||
subs = os.path.join(os.path.dirname(os.path.realpath(__file__)), "subbrute/names.txt")
|
path_to_file = os.path.dirname(os.path.realpath(__file__))
|
||||||
resolvers=os.path.join(os.path.dirname(os.path.realpath(__file__)) ,"subbrute/resolvers.txt")
|
subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
|
||||||
process_count=threads
|
resolvers = os.path.join(path_to_file, 'subbrute', 'resolvers.txt')
|
||||||
output = False
|
process_count = threads
|
||||||
json_output=False
|
output = False
|
||||||
bruteforce_list = subbrute.print_target(parsed_domain.netloc, record_type, subs, resolvers, process_count, output, json_output, search_list,verbose)
|
json_output = False
|
||||||
|
bruteforce_list = subbrute.print_target(parsed_domain.netloc, record_type, subs, resolvers, process_count, output, json_output, search_list, verbose)
|
||||||
|
|
||||||
subdomains = search_list.union(bruteforce_list)
|
subdomains = search_list.union(bruteforce_list)
|
||||||
|
|
||||||
if len(subdomains) > 0:
|
if subdomains:
|
||||||
if savefile is not None:
|
if savefile:
|
||||||
write_file(savefile,subdomains)
|
write_file(savefile, subdomains)
|
||||||
print Y+"[-] Total Unique Subdomains Found: %s"%len(subdomains)+W
|
print Y+"[-] Total Unique Subdomains Found: %s"%len(subdomains)+W
|
||||||
for subdomain in subdomains:
|
for subdomain in subdomains:
|
||||||
print G+subdomain+W
|
print G+subdomain+W
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue