Skip to content

Commit

Permalink
added () to all the prints and solved google import problems
Browse files Browse the repository at this point in the history
  • Loading branch information
Grilinux committed Mar 12, 2024
1 parent e1f4937 commit a59fcb2
Show file tree
Hide file tree
Showing 8 changed files with 35 additions and 35 deletions.
16 changes: 8 additions & 8 deletions modules/credleaks.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ def run(self, args, lookup, startTime, reportDir):
credLeakDir = './credleaks'

if not os.path.exists(potfileDir):
print '[-] The potfile directory is missing. Symlink your location to ./potfile and see if that works'
print('[-] The potfile directory is missing. Symlink your location to ./potfile and see if that works')
return


if not os.path.exists(credLeakDir):
print '[-] The credleaks directory is missing. Symlink your location to ./credleaks and see if that works'
print('[-] The credleaks directory is missing. Symlink your location to ./credleaks and see if that works')
return

#for each domain/ip provided
Expand All @@ -34,7 +34,7 @@ def run(self, args, lookup, startTime, reportDir):
credResult=[]


print '[+] Searching credential dumps for entries that contain '+l
print('[+] Searching credential dumps for entries that contain '+l)
#overall, take the lookup value (preferably a domain) and search the dumps for it
#for each file in ./credleaks directory
#really need to get this data out of text files an into an indexed form. it's slow af
Expand Down Expand Up @@ -65,7 +65,7 @@ def run(self, args, lookup, startTime, reportDir):
dumpDict['xxx']=str(line.rstrip("\r\n"))
#print each file searched and how many matches if verbose
if args.verbose is True:
print '[i] Searched ' + str(credFileName)+' and found '+ str(j)
print('[i] Searched ' + str(credFileName)+' and found '+ str(j))


#print hash and user of files if verbose
Expand All @@ -74,7 +74,7 @@ def run(self, args, lookup, startTime, reportDir):
print(str(u))

#start printing stuff and appending to credResult
print '[+] Searching Local Credential Dumps in ./credleaks against potfile in ./potfile '
print('[+] Searching Local Credential Dumps in ./credleaks against potfile in ./potfile ')
credFile.writelines('********EMAILS FOUND BELOW********\n\n\n\n')
credResult.append('********EMAILS FOUND BELOW********\n\n\n\n')

Expand All @@ -94,7 +94,7 @@ def run(self, args, lookup, startTime, reportDir):
#open a pot file
with open('./potfile/'+potFileName, 'r') as potFile:
#tell user you are looking
print '[i] Any creds you have in your potfile will appear below as user:hash:plain : '
print('[i] Any creds you have in your potfile will appear below as user:hash:plain : ')
#then look at every line
for potLine in potFile:
#then for every line look at every hash and user in the dict
Expand All @@ -103,7 +103,7 @@ def run(self, args, lookup, startTime, reportDir):
#that is also the same length as the original hash (this is probably a crappy check tho...)
if str(h) == str(potLine[0:len(h)]):
#print the user: and the line from the potfile (hash:plain) to the user
print str(u)+':'+str(potLine.rstrip("\r\n"))
print(str(u)+':'+str(potLine.rstrip("\r\n")))
#need to append the output to a variable to return or write to the file
#this is separate because not all found usernames/emails have hashes and not all hashes are cracked
#write to text file
Expand All @@ -113,4 +113,4 @@ def run(self, args, lookup, startTime, reportDir):


return credResult
print credResult
print(credResult)
6 changes: 3 additions & 3 deletions modules/dnsquery.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ def run(self, args, lookup, reportDir):

#iterate the index and values of the lookup list
for i, l in enumerate(lookup):
print '[+] Performing DNS query '+ str(i + 1) + ' using "host -a ' + l+'"'
print('[+] Performing DNS query '+ str(i + 1) + ' using "host -a ' + l+'"')
dnsFile=open(reportDir+l+'/'+l+'_dns.txt','w')
#subprocess to run host -a on the current value of l in the loop, split into newlines
try:
dnsCmd = subprocess.Popen(['host', '-a', str(l)], stdout = subprocess.PIPE).communicate()[0].split('\n')
except:
print '[-] Error running dns query'
print('[-] Error running dns query')
dnsResult.append('Error running DNS query')
continue
#append lists together
Expand All @@ -30,7 +30,7 @@ def run(self, args, lookup, reportDir):

#print dnsResult if -v
if args.verbose is True:
for d in dnsResult: print '\n'.join(d)
for d in dnsResult: print('\n'.join(d))

#return list object
return dnsResult
8 changes: 4 additions & 4 deletions modules/googledork.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env python

import requests, time
from google import search
from googlesearch import search
from lxml import html


Expand Down Expand Up @@ -35,7 +35,7 @@ def run(self, args, lookup, reportDir):
googleFile=open(reportDir+l+'/'+l+'_google_dork.txt','w')

#show user whiat is being searched
print ('[+] Google query %s for %s site:%s' % (str(i + 1),str(d),str(l)))
print('[+] Google query %s for %s site:%s' % (str(i + 1),str(d),str(l)))
print('[+] Results:')

try:
Expand All @@ -48,7 +48,7 @@ def run(self, args, lookup, reportDir):
time.sleep(2)
#catch exceptions
except Exception as e:
print ('[!] Error encountered: %s' % e)
print('[!] Error encountered: %s' % e)
pass
#iterate results
for r in self.google_result:
Expand All @@ -57,7 +57,7 @@ def run(self, args, lookup, reportDir):

#verbosity flag
if self.args.verbose is True:
for r in self.google_result: print (''.join(r))
for r in self.google_result: print(''.join(r))

#return results list
return self.google_result
Expand Down
6 changes: 3 additions & 3 deletions modules/hibp.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@ def run(self, args, lookup, reportDir):
page.raise_for_status()
except page.exceptions.HTTPError as e:
if e.page.status_code == 503:
print ('Service unavailable')
print('Service unavailable')
continue
except:
print ('[-] Connection error or no result on {} :'.format(url))
print ('[-] Status code {}'.format(page.status_code))
print('[-] Connection error or no result on {} :'.format(url))
print('[-] Status code {}'.format(page.status_code))
continue
6 changes: 3 additions & 3 deletions modules/pastebinscrape.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env python

import requests, time
from google import search
from googlesearch import search
from lxml import html

class Pastebinscrape():
Expand Down Expand Up @@ -59,7 +59,7 @@ def run(self, args, lookup, reportDir, apiKeyDir):
pasteUrlFile.writelines(u+'\n')
paste_scrape_results.append(u+'\n')
except:
print ('[-] Error opening ' + u +':')
print('[-] Error opening ' + u +':')
paste_scrape_results.append('Error opening {}'.format(u))
continue

Expand All @@ -68,7 +68,7 @@ def run(self, args, lookup, reportDir, apiKeyDir):

#if verbose spit out url, search term and domain searched
if args.verbose is True:
print ('[+] Looking for instances of {} and {} in {}'.format(d,l,u))
print('[+] Looking for instances of {} and {} in {}'.format(d,l,u))
#grab raw paste data from the textarea
rawPasteData = tree.xpath('//textarea[@class="paste_code"]/text()')

Expand Down
4 changes: 2 additions & 2 deletions modules/reportgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def run(self, \

instrText = OxmlElement('w:instrText')
instrText.set(qn('xml:space'), 'preserve') # sets attribute on element
instrText.text = 'TOC \o "1-3" \h \z \u' # change 1-3 depending on heading levels you need
instrText.text = 'TOC \o "1-3" \\h \\z \\u' # change 1-3 depending on heading levels you need

fldChar2 = OxmlElement('w:fldChar')
fldChar2.set(qn('w:fldCharType'), 'separate')
Expand Down Expand Up @@ -290,7 +290,7 @@ def run(self, \
font.name = 'Arial'
font.size = Pt(10)
except:
print ('probably an encoding error...')
print('probably an encoding error...')
continue

print('[+] Writing file: ./reports/{}/OSINT_{}_.docx'.format(l, l))
Expand Down
20 changes: 10 additions & 10 deletions modules/webscrape.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def run(self, args, lookup, reportDir, apiKeyDir):
for i,l in enumerate(lookup):
scrapeFile=open(reportDir+l+'/'+l+'_scrape.txt','w')

print ('[+] Scraping sites using %s' % l)
print('[+] Scraping sites using %s' % l)
#http://www.indeed.com/jobs?as_and=ibm.com&as_phr=&as_any=&as_not=&as_ttl=&as_cmp=&jt=all&st=&salary=&radius=25&fromage=any&limit=500&sort=date&psf=advsrch
#init list and insert domain with tld stripped
#insert lookup value into static urls
Expand All @@ -50,21 +50,21 @@ def run(self, args, lookup, reportDir, apiKeyDir):
for name,url in scrapeUrls.items():
#indeed matches jobs. yeah yeah it doesnt use their api yet
if name == 'indeed':
if args.verbose is True:print '[+] Searching job postings on indeed.com for %s:' % l.split('.')[0]
if args.verbose is True:print('[+] Searching job postings on indeed.com for %s:' % l.split('.')[0])

#http://docs.python-guide.org/en/latest/scenarios/scrape/
try:
ipage = requests.get(url, headers = userAgent)
except Exception as e:
print ('[-] Scraping error on %s: %s' %(url, e))
print('[-] Scraping error on %s: %s' %(url, e))
continue

#build html tree
itree = html.fromstring(ipage.content)

#count jobs
jobCount = itree.xpath('//div[@id="searchCount"]/text()')
print '[+] '+str(''.join(jobCount)) + ' Jobs posted on indeed.com that match %s:' % (l.split('.')[0])
print('[+] '+str(''.join(jobCount)) + ' Jobs posted on indeed.com that match %s:' % (l.split('.')[0]))
jobTitle = itree.xpath('//a[@data-tn-element="jobTitle"]/text()')
self.indeedResult.append('\n[+] Job postings on indeed.com that match %s \n\n' % l.split('.')[0])
for t in jobTitle:
Expand All @@ -74,13 +74,13 @@ def run(self, args, lookup, reportDir, apiKeyDir):
#https://developer.github.com/v3/search/
#http://docs.python-guide.org/en/latest/scenarios/json/
if name == 'github':
if args.verbose is True:print '[+] Searching repository names on Github for %s' % (l.split('.')[0])
if args.verbose is True:print('[+] Searching repository names on Github for %s' % (l.split('.')[0]))

#http://docs.python-guide.org/en/latest/scenarios/scrape/
try:
gpage = requests.get(url, headers = userAgent)
except Exception as e:
print ('[-] Scraping error on %s: %s' %(url, e))
print('[-] Scraping error on %s: %s' %(url, e))
continue

#read json response
Expand All @@ -93,7 +93,7 @@ def run(self, args, lookup, reportDir, apiKeyDir):

if name == 'virustotal':
if not os.path.exists(apiKeyDir + 'virus_total.key'):
print '[-] Missing %svirus_total.key' % apiKeyDir
print('[-] Missing %svirus_total.key' % apiKeyDir)

vtApiKey=raw_input("Please provide an API Key: ")

Expand All @@ -109,7 +109,7 @@ def run(self, args, lookup, reportDir, apiKeyDir):
for k in apiKeyFile:
vtApiKey = k
except:
print ('[-] Error opening %svirus_total.key key file, skipping. ' % apiKeyDir)
print('[-] Error opening %svirus_total.key key file, skipping. ' % apiKeyDir)
continue


Expand Down Expand Up @@ -152,7 +152,7 @@ def run(self, args, lookup, reportDir, apiKeyDir):

#verbosity logic
if args.verbose is True:
for gr in self.githubResult: print (''.join(gr.strip('\n')))
for ir in self.indeedResult: print (''.join(ir.strip('\n')))
for gr in self.githubResult: print(''.join(gr.strip('\n')))
for ir in self.indeedResult: print(''.join(ir.strip('\n')))

return self.scrapeResult
4 changes: 2 additions & 2 deletions modules/whois.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ def run(self, args, lookup, reportDir):

#iterate the index and values of the lookup list
for i, l in enumerate(lookup):
print ('[+] Performing whois query %s for %s' % (str(i + 1), l))
print('[+] Performing whois query %s for %s' % (str(i + 1), l))

whoisFile=open(reportDir+l+'/'+l+'_whois.txt','w')

Expand All @@ -33,7 +33,7 @@ def run(self, args, lookup, reportDir):

#verbosity logic
if args.verbose is True:
for w in whoisResult: print (''.join(w))
for w in whoisResult: print(''.join(w))

return whoisResult

0 comments on commit a59fcb2

Please sign in to comment.