Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update of InSpy to python3 #39

Open
wants to merge 20 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 22 additions & 17 deletions InSpy.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,22 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
# Copyright (c) 2018 Jonathan Broche (@LeapSecurity)

import argparse, sys, os
from lib.http import *
from lib.workbench import *
from lib.soup import *
from lib.export import *
from lib.logger import *

hunterapi = "" #insert hunterio api key here

parser = argparse.ArgumentParser(description='InSpy - A LinkedIn enumeration tool by Jonathan Broche (@LeapSecurity)', version="3.0.1")
if not hunterapi:
print("[+] Your hunter api key is Empty")
print("[+] Hunter Api Key is required please fill the hunter api key opening the InSpy.py File")
sys.exit(404)

Version ="4.0"

parser = argparse.ArgumentParser(description='InSpy - A LinkedIn enumeration tool by Hari Kiran(TheCyberMonster)\n A forked project of InSpy 3.0')
parser.add_argument('company', help="Company name to use for tasks.")
parser.add_argument('--domain', help="Company domain to use for searching.")
parser.add_argument('--email', help="Email format to create email addresses with. [Accepted Formats: [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected]]")
Expand All @@ -26,16 +33,14 @@

args = parser.parse_args()
start_logger(args.company)
hunterapi = "" #insert hunterio api key here

email = args.email
domain = args.domain


print "\nInSpy {}".format(parser.version)
print("\nInSpy {}".format(Version))

try:
if domain and not email: #search hunterio for email format
if domain and not email: #search hunter.io for email format
email = get_email_format(args.domain, hunterapi)
if email and not domain: #search clearbit for domain
domain = get_domain(args.company)
Expand All @@ -48,7 +53,7 @@

email = email.replace("{", "").replace("}","")

print "\nDomain: {}, Email Format: {}\n".format(domain, email)
print("Domain: {}, Email Format: {}".format(domain, email))

employees = {}

Expand All @@ -58,25 +63,25 @@
if args.company.lower() in title.lower():
if not name in employees:
employees[name] = title
print "\n{} Employees identified".format(len(employees.keys()))
print("{} Employees identified".format(len(employees.keys())))
else:
print os.path.abspath(args.titles)
print "No such file or directory: '{}'".format(args.titles)

print(os.path.abspath(args.titles))
print("No such file or directory: '{}'".format(args.titles))
emails=[]
if employees:
#output employees
for name, title in employees.iteritems():
print "{} {}".format(name, title[:50].replace('&', '&'))
for name, title in employees.items():
print("{} {}".format(name, title[:50].replace('&', '&')))

#craft emails
emails = create_emails(employees, domain, email)


if emails:
#output emails
print "\nEmails crafted\n".format(len(emails.keys()))
print("Emails crafted".format(len(emails.keys())))
for name, email in emails.items():
print email
print(email)

#export results
if args.html:
Expand All @@ -88,4 +93,4 @@
if args.csv:
output("csv", args.csv, args.company, domain, employees, emails)
except (KeyboardInterrupt, SystemExit):
print "\nTerminated script.\n"
print("Terminated script.")
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ Version 3.0 introduces the automation of domain and email retrieval in addition
## Installation
-----

Run `pip install -r requirements.txt` within the cloned InSpy directory.
Run `pip3 install -r requirements.txt` within the cloned InSpy directory.

Obtain an API key from [HunterIO](https://hunter.io/) and insert it into the hunterio variable within InSpy.py (line 29).

Expand Down
Binary file removed lib/__init__.pyc
Binary file not shown.
13 changes: 7 additions & 6 deletions lib/export.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json, os, xml.dom.minidom, time, csv
import json, os, time, csv
import xml.dom.minidom
from xml.etree.ElementTree import Element, SubElement, tostring

def output(format, file, company, domain, employees, emails):
Expand All @@ -17,14 +18,14 @@ def ocsv(filename, company, domain, employees, emails):
fieldnames = ["Employee Name", "Title", "Email"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for name, title in employees.iteritems():
for name, title in employees.items():
writer.writerow({"Employee Name": name, "Title": title.replace('&', '&'), "Email": emails[name]})

#JSON
def ojson(file, company, domain, employees, emails):
employee_json = []

for name, title in employees.iteritems():
for name, title in employees.items():
employee_json.append({"name": name, "title": title.replace('&', '&'), "email": emails[name]})

full_json = {
Expand All @@ -49,7 +50,7 @@ def oxml(file, company, domain, employees, emails):

echild = SubElement(top, 'Employees')

for name, title in employees.iteritems():
for name, title in employees.items():

employee = SubElement(echild, "Employee")
#name
Expand All @@ -71,7 +72,7 @@ def oxml(file, company, domain, employees, emails):
def ohtml(file, company, domain, employees, emails):
employee_html = []

for name, title in employees.iteritems():
for name, title in employees.items():
employee_html.append("<tr><td>{name}</td><td>{title}</td><td>{email}</td></tr>".format(name=name, title=title, email=emails[name]))

page = """
Expand All @@ -96,4 +97,4 @@ def ohtml(file, company, domain, employees, emails):
""".format(company=company, time=time.strftime("%Y/%m/%d %H:%M:%S"), html=employee_html)

with open(os.path.abspath(file), 'w') as f:
f.write(page)
f.write(page)
Binary file removed lib/export.pyc
Binary file not shown.
16 changes: 7 additions & 9 deletions lib/http.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@

import requests, random
from logger import *
#requests.packages.urllib3.disable_warnings()
import logging

def random_header():

agents = ['Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36 OPR/38.0.2220.41',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36']
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36 OPR/38.0.2220.41',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36']

return {'User-Agent': random.choice(agents),'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'}

Expand All @@ -27,7 +25,7 @@ def http_request(url):
return {"status": r.status_code, "response": ""}

except requests.exceptions.Timeout as e:
print "Error: Timed out."
print("Error: Timed out.")
logging.error(e)
except Exception as e:
logging.error(e)
logging.error(e)
Binary file removed lib/http.pyc
Binary file not shown.
3 changes: 2 additions & 1 deletion lib/logger.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging, sys, time

import logging,time

time_format = time.strftime("%Y-%m-%d %H:%M:%S")

Expand Down
Binary file removed lib/logger.pyc
Binary file not shown.
7 changes: 4 additions & 3 deletions lib/soup.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import BeautifulSoup, json

from bs4 import BeautifulSoup

def soupify(response):
try:
Expand All @@ -7,7 +8,7 @@ def soupify(response):
except (AttributeError, TypeError) as e:
pass
except Exception as e:
print "Error: {}".format(e)
print("Error: {}".format(e))

def get_employees(soup):
try:
Expand All @@ -21,5 +22,5 @@ def get_employees(soup):
except (AttributeError, TypeError) as e:
pass
except Exception as e:
print "Error: {}".format(e)
print("Error: {}".format(e))

Binary file removed lib/soup.pyc
Binary file not shown.
34 changes: 17 additions & 17 deletions lib/workbench.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,34 @@
import re, json, HTMLParser, unicodedata
from http import *
from logger import *

from html.parser import HTMLParser
import logging
from lib.http import http_request

def get_domain(company): #Clearbit API - clearbit.com
def get_domain(company): #Clearbit API - clearbit.com

clearbit_request = "https://autocomplete.clearbit.com/v1/companies/suggest?query={}".format(company)
clearbit_results = []
domain = ""

r = http_request(clearbit_request)

if len(r["response"]) >=1:
for element in r["response"]:
if company.lower() == element['name'].lower():
clearbit_results.append({"name" : element['name'], "domain":element['domain']})
if len(r["response"]) >= 1:
for element in r["response"]:
clearbit_results.append({"name": element['name'], "domain": element['domain']})

if len(clearbit_results) == 1: #return domain if one result
if len(clearbit_results) == 1: #return domain if one result
domain = clearbit_results[0]["domain"]
elif len(clearbit_results) > 1: #prompt user if multiple domains identified
print "Multiple domains identified for company. Which one is the target?"
elif len(clearbit_results) > 1: #prompt user if multiple domains identified
print("Multiple domains identified for company. Which one is the target?")
for index, result in enumerate(clearbit_results):
print "{}) Name: {}, Domain: {}".format(index, result["name"], result["domain"])
choice = input()
print("{}) Name: {}, Domain: {}".format(index, result["name"], result["domain"]))
choice = int(input("Select using S.No \n (Ex: select-> 1 )\n select-> "))
domain = clearbit_results[choice]["domain"]

if domain:
return domain
else:
logging.error("Clearbit API - HTTP {} Error".format(r["status"]))
print "InSpy could not identify the domain name. Use --domain."
print("InSpy could not identify the domain name. Use --domain.")


def get_email_format(domain, apikey): #HunterIO API - hunter.io
Expand All @@ -39,7 +38,7 @@ def get_email_format(domain, apikey): #HunterIO API - hunter.io
r = http_request(hunter_request)

if r["status"] == 200:
for k,v in r["response"].iteritems():
for k,v in r["response"].items():
if k == 'data':
if v['pattern']:
emailformat = v['pattern']
Expand All @@ -50,7 +49,7 @@ def get_email_format(domain, apikey): #HunterIO API - hunter.io
if emailformat:
return emailformat
else:
print "InSpy could not identify the email format. Use --email."
print("InSpy could not identify the email format. Use --email.")

def search_linkedin(company, file):
titles = []
Expand All @@ -74,6 +73,7 @@ def search_linkedin(company, file):


#craft emails

def create_emails(employees, domain, eformat):
hparser=HTMLParser.HTMLParser()
emails = {}
Expand Down Expand Up @@ -113,4 +113,4 @@ def format_email(eformat, first, last):
}
return formats[eformat]
except Exception as e:
print e
print(e)
Binary file removed lib/workbench.pyc
Binary file not shown.
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
requests==2.20.1
BeautifulSoup==3.2.1
requests
beautifulsoup4