Merge pull request #185 (Fixed Virustotal Search)
Fix Virustotal search
This commit is contained in:
commit
e756cc3a91
37
sublist3r.py
37
sublist3r.py
|
@ -669,11 +669,12 @@ class DNSdumpster(enumratorBaseThreaded):
|
||||||
class Virustotal(enumratorBaseThreaded):
|
class Virustotal(enumratorBaseThreaded):
|
||||||
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
|
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
|
||||||
subdomains = subdomains or []
|
subdomains = subdomains or []
|
||||||
base_url = 'https://www.virustotal.com/en/domain/{domain}/information/'
|
base_url = 'https://www.virustotal.com/ui/domains/{domain}/subdomains'
|
||||||
self.engine_name = "Virustotal"
|
self.engine_name = "Virustotal"
|
||||||
self.lock = threading.Lock()
|
self.lock = threading.Lock()
|
||||||
self.q = q
|
self.q = q
|
||||||
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
|
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
|
||||||
|
self.url = self.base_url.format(domain=self.domain)
|
||||||
return
|
return
|
||||||
|
|
||||||
# the main send_req need to be rewritten
|
# the main send_req need to be rewritten
|
||||||
|
@ -688,23 +689,31 @@ class Virustotal(enumratorBaseThreaded):
|
||||||
|
|
||||||
# once the send_req is rewritten we don't need to call this function, the stock one should be ok
|
# once the send_req is rewritten we don't need to call this function, the stock one should be ok
|
||||||
def enumerate(self):
|
def enumerate(self):
|
||||||
url = self.base_url.format(domain=self.domain)
|
while self.url != '':
|
||||||
resp = self.send_req(url)
|
resp = self.send_req(self.url)
|
||||||
self.extract_domains(resp)
|
resp = json.loads(resp)
|
||||||
|
if 'error' in resp:
|
||||||
|
self.print_(R + "[!] Error: Virustotal probably now is blocking our requests" + W)
|
||||||
|
break
|
||||||
|
if 'links' in resp and 'next' in resp['links']:
|
||||||
|
self.url = resp['links']['next']
|
||||||
|
else:
|
||||||
|
self.url = ''
|
||||||
|
self.extract_domains(resp)
|
||||||
return self.subdomains
|
return self.subdomains
|
||||||
|
|
||||||
def extract_domains(self, resp):
|
def extract_domains(self, resp):
|
||||||
link_regx = re.compile('<div class="enum.*?">.*?<a target="_blank" href=".*?">(.*?)</a>', re.S)
|
#resp is already parsed as json
|
||||||
try:
|
try:
|
||||||
links = link_regx.findall(resp)
|
for i in resp['data']:
|
||||||
for link in links:
|
if i['type'] == 'domain':
|
||||||
subdomain = link.strip()
|
subdomain = i['id']
|
||||||
if not subdomain.endswith(self.domain):
|
if not subdomain.endswith(self.domain):
|
||||||
continue
|
continue
|
||||||
if subdomain not in self.subdomains and subdomain != self.domain:
|
if subdomain not in self.subdomains and subdomain != self.domain:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
|
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
|
||||||
self.subdomains.append(subdomain.strip())
|
self.subdomains.append(subdomain.strip())
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue