Skip to content

Commit

Permalink
Adding the complete toolkit
Browse files Browse the repository at this point in the history
  • Loading branch information
javiln8 committed Jun 19, 2020
1 parent 985c79d commit c741f33
Show file tree
Hide file tree
Showing 27 changed files with 1,017 additions and 0 deletions.
24 changes: 24 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Wi-Fi Spartan ⚔️
Smart pentesting toolkit for modern WPA/WPA2 networks ⚔️📡

![alt text](https://github.com/javiln8/wifi_spartan/blob/master/images/logo.png?raw=true)

### Requirements
The toolkit uses `bettercap` as its backend framework for attacking networks. Can be installed with any packet manager.

### Usage
Run `python3 wifi_spartan.py --help` to see all available commands and options. To see all available options of a function, run `python3 wifi_spartan.py <module> --help`.

Wi-Fi spartan modules:
- [x] `scan`: wireless spectrum scanner
- [x] `deauth`: deauthentication attack to attempt to capture the 4-way handshake
- [x] `pmkid`: PMKID client-less attack
- [x] `spoof scan`: local network hosts scanner
- [x] `spoof spy`: MiTM attack with ARP spoofing
- [x] `automata`: wardriving automation with deep reinforcement learning techniques.


### Future implementations
- [ ] `jam`: WiFi jamming with packet flooding
- [ ] `rogue`: Evil Twin attack
- [ ] `crack`: dictionary attack to attempt to crack the PSK
5 changes: 5 additions & 0 deletions dns.spoof.hosts
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
1.2.3.4 facebook.com
1.2.3.5 linkedin.com
1.2.3.6 netflix.com
1.2.4.6 www.google.com
1.2.5.7 www.reddit.com
Binary file added images/logo.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions key_material/note.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
All key material captured with WiFi Spartan will be stored here, in pcap format.
9 changes: 9 additions & 0 deletions parameters.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Wireless spectrum parameters
min_rssi: -200
ap_ttl: 120
station_ttl: 300

# Recon and channel hopping parameters
recon_time: 30
hop_recon_time: 10
min_recon_time: 5
61 changes: 61 additions & 0 deletions smart/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
from stable_baselines import A2C
from stable_baselines.common.policies import MlpLstmPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from tensorflow.python.util import deprecation
import logging

from spartan.smart.learn import Environment
import spartan.smart.state

import os
import numpy as np

# Configure AI logs and disbale other logs
logging.basicConfig(filename='spartan/smart/ai.log',level=logging.DEBUG)
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
logging.getLogger("gym").setLevel(logging.CRITICAL)

# A2C parameters
hyperparameters = {
'gamma': 0.99,
'n_steps': 1,
'vf_coef': 0.25,
'ent_coef': 0.01,
'max_grad_norm': 0.5,
'learning_rate':0.001,
'alpha': 0.99,
'epsilon': 0.00001,
'verbose': 1,
'lr_schedule': "constant",
}

MODEL_PATH = 'spartan/smart/brain.nn'
TENSORBOARD_PATH = './spartan/smart/tensorboard'

# Load the AC2 model
def load_model(parameters, agent, state):
env = Environment(agent, state)
env = DummyVecEnv([lambda: env])
logging.info("[smart] Gym environment generated...")

a2c = A2C(MlpLstmPolicy, env, **hyperparameters, tensorboard_log=TENSORBOARD_PATH)
logging.info("[smart] A2C created...")

if os.path.exists(MODEL_PATH):
a2c.load(MODEL_PATH, env)
logging.info("[smart] A2C model loaded...")

return a2c

def featurize(state):
total_interactions = state['deauths'] + 1e-20

return np.concatenate((
[state['misses'] / total_interactions],
#[state['new_aps'] / total_interactions],
[state['hops'] / 140],
[state['deauths'] / total_interactions],
[state['handshakes'] / total_interactions],
))
Binary file added smart/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file added smart/__pycache__/learn.cpython-37.pyc
Binary file not shown.
Binary file added smart/__pycache__/state.cpython-37.pyc
Binary file not shown.
183 changes: 183 additions & 0 deletions smart/learn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
from spartan.smart import state
from spartan.utils import post
import spartan.smart

import gym
from gym import spaces
import numpy as np
import logging

# Parameters to optimize while learning
class Parameter(object):
def __init__(self, name, value=0.0, min_value=0, max_value=2, channel=None, trainable=True):
self.name = name
self.channel = channel
self.value = value
self.min_value = min_value
self.max_value = max_value + 1

if self.min_value < 0:
self.scale_factor = abs(self.min_value)
elif self.min_value > 0:
self.scale_factor = -self.min_value
else:
self.scale_factor = 0

# Size of the parameter space
def space_size(self):
return self.max_value + self.scale_factor

# Value function
def parameter_to_value(self, policy):
self.value = policy - self.scale_factor
return int(self.value)

# OpenAI custom Gym Environment
class Environment(gym.Env):
metadata = {'render.modes': ['human']}
parameters = [
Parameter('min_rssi', min_value=-200, max_value=-50),
Parameter('ap_ttl', min_value=30, max_value=600),
Parameter('station_ttl', min_value=60, max_value=300),
Parameter('recon_time', min_value=10, max_value=60),
Parameter('hop_recon_time', min_value=10, max_value=60),
Parameter('min_recon_time', min_value=5, max_value=30),
]

def __init__(self, agent, state):
super(Environment, self).__init__()
self.agent = agent
self.state = state
self.epoch_number = 0
self.wifi_channels = 140
self.observation_shape = (1,4) # 4: handshakes, misses, hops, deauths, new_aps
self.reward_range = (-.7, 1.02)
self.cache_state = None
self.cache_render = None

for channel in range(self.wifi_channels):
Environment.parameters += [Parameter('channel_' + str(channel), min_value=0, max_value=1, channel=channel + 1)]

# OpenAI Gym spaces
self.action_space = spaces.MultiDiscrete([p.space_size() for p in Environment.parameters])
self.observation_space = spaces.Box(low=0, high=1, shape=self.observation_shape, dtype=np.float32)

self.last = {
'reward': 0.0,
'policy': None,
'parameters': {},
'state': None,
'vectorized_state': None
}

# Update the model parameters given a optimization policy
def update_parameters(policy):
parameters = {}
channels = []

assert len(Environment.parameters) == len(policy)

for i in range(len(policy)):
parameter = Environment.parameters[i]
if 'channel' not in parameter.name:
parameters[parameter.name] = parameter.parameter_to_value(policy[i])
else:
has_channel = parameter.parameter_to_value(policy[i])
channel = parameter.channel
if has_channel:
channels.append(channel)

parameters['channels'] = channels

return parameters

# Perform a iteration of the agent-environment loop
def step(self, policy):
new_parameters = Environment.update_parameters(policy)
self.last['policy'] = policy
self.last['parameters'] = new_parameters

# Agent performs the action
self.agent.apply_policy(new_parameters)
self.epoch_number += 1

while (True):
# Wait for state data in parallel
if self.state.state_data and self.cache_state != self.state.state:
logging.info('[smart] State data: ' + str(self.state.state_data))

self.last['reward'] = self.state.state_data['reward']
self.last['state'] = self.state.state_data
self.last['vectorized_state'] = spartan.smart.featurize(self.last['state'])

self.agent.model.env.render()
self.agent.save_model()

self.cache_state = self.state.state

return self.last['vectorized_state'], self.last['reward'], False, {}

# Reset the environment
def reset(self):
logging.info("[smart] Resetting the environment...")
self.epoch_number = 0
if self.state.state_data:
self.last['state'] = self.state.state_data
self.last['vectorized_state'] = spartan.smart.featurize(self.state.state_data)

return self.last['vectorized_state']

# Output environment data
def render(self, mode='human', close=False, force=False):
if self.cache_render == self.epoch_number:
return

self.cache_render = self.epoch_number

logging.info('[smart] Training epoch: ' + str(self.epoch_number)) #self._agent.training_epochs()))')
logging.info('[smart] Reward: ' + str(self.last['reward']))
#print('Policy: ' + join("%s:%s" % (name, value) for name, value in self.last['parameters'].items())))

# Train the AI using A2C policy optimization
class Trainer(object):
def __init__(self, parameters):
self.parameters = parameters
self.model = None

def train(self):
epochs_per_state = 50

self.model = spartan.smart.load_model(self.parameters, self, self.state)

observations = None
while True:
self.model.env.render()
logging.info('[smart] Learning for ' + str(epochs_per_state) + ' epochs.')
self.model.learn(total_timesteps=epochs_per_state, callback=self.model.env.render())

if not observations:
observations = self.model.env.reset()

action, _ = self.model.predict(observations)
observations, _, _, _ = self.model.env.step(action)

# Save the A2C model
def save_model(self):
logging.info('[smart] Saving model')
self.model.save(spartan.smart.MODEL_PATH)

# Apply new parameters
def apply_policy(self, new_parameters):
logging.info('[smart] Updating parameters with the new policy.')
for name, new_value in new_parameters.items():
if name in self.parameters:
current_value = self.parameters[name]

# Update the parameter value
if current_value != new_value:
self.parameters[name] = new_value
logging.info('[smart] Updating ' + str(name)+ ': ' + str(new_value))

post('set wifi.ap.ttl ' + str(self.parameters['ap_ttl']))
post('set wifi.sta.ttl ' + str(self.parameters['station_ttl']))
post('set wifi.rssi.min ' + str(self.parameters['min_rssi']))
70 changes: 70 additions & 0 deletions smart/state.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Reward function of the reinforcement learning process
class RewardFunction(object):
def __call__(self, total_states, state_data):
total_interactions = max(state_data['deauths'], state_data['handshakes']) + 1e-20
total_channels = 140

shakes = state_data['handshakes'] / total_interactions
hops = 0.1 * (state_data['hops'] / total_channels)
misses = -0.3 * (state_data['misses'] / total_interactions)
#new_aps = +0.3 * (state_data['new_aps'] / total_interactions)

return shakes + hops + misses #+ new_aps

# Information about each wardrive state (state = one loop of wardrive session)
class State(object):
def __init__(self, parameters):
self.state = 0
self.parameters = parameters

self.did_deauth = False
self.deauths = 0
self.misses = 0
self.new_aps = 0
self.handshakes = 0
self.hops = 0

self.reward = RewardFunction()
self.state_data = {}

# Track usefuel state statistics
def track(self, deauth=False, handshake=False, hop=False, miss=False, new=False, increment=1):
if deauth:
self.deauths += increment
self.did_deauth = True
if miss:
self.misses += increment
if hop:
self.hops += increment
if handshake:
self.handshakes += increment
if new:
self.new_aps += increment

# Rotate the state
def next_state(self):
self.state_data = {
'hops': self.hops,
'deauths': self.deauths,
'handshakes': self.handshakes,
'misses': self.misses,
#'new_aps': self.new_aps,
}

self.state_data['reward'] = self.reward(self.state + 1, self.state_data)

print('\nSTATE:' + str(self.state))
print('Number of channel hops: ' + str(self.hops))
print('Number of deauths: ' + str(self.deauths))
print('Number of captured handshakes: ' + str(self.handshakes))
print('Number of missed APs: ' + str(self.misses))
print('Number of discovered new APs: ' + str(self.new_aps))
print('Reward: ' + str(self.state_data['reward']) + '\n')

self.state += 1
self.did_deauth = False
self.deauths = 0
self.misses = 0
self.new_aps = 0
self.handshakes = 0
self.hops = 0
Empty file added spartan/__init__.py
Empty file.
Binary file added spartan/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file added spartan/__pycache__/automata.cpython-37.pyc
Binary file not shown.
Binary file added spartan/__pycache__/capture.cpython-37.pyc
Binary file not shown.
Binary file added spartan/__pycache__/crack.cpython-37.pyc
Binary file not shown.
Binary file added spartan/__pycache__/scan.cpython-37.pyc
Binary file not shown.
Binary file added spartan/__pycache__/spoof.cpython-37.pyc
Binary file not shown.
Binary file added spartan/__pycache__/utils.cpython-37.pyc
Binary file not shown.
Loading

0 comments on commit c741f33

Please sign in to comment.