mirror of
https://github.com/jayofelony/pwnagotchi.git
synced 2026-03-12 12:52:52 -07:00
Fix bugs B1-B5, remove dead AI code, clean threading
This commit is contained in:
@@ -4,7 +4,6 @@ import os
|
||||
import re
|
||||
import logging
|
||||
import asyncio
|
||||
#import _thread
|
||||
import threading
|
||||
import subprocess
|
||||
|
||||
@@ -298,12 +297,10 @@ class Agent(Client, Automata, AsyncAdvertiser):
|
||||
if delete:
|
||||
logging.info("deleting %s", RECOVERY_DATA_FILE)
|
||||
os.unlink(RECOVERY_DATA_FILE)
|
||||
except:
|
||||
if not no_exceptions:
|
||||
except Exception: # FIX B4: was bare except, now catches Exception only
|
||||
raise
|
||||
|
||||
def start_session_fetcher(self):
|
||||
#_thread.start_new_thread(self._fetch_stats, ())
|
||||
threading.Thread(target=self._fetch_stats, args=(), name="Session Fetcher", daemon=True).start()
|
||||
|
||||
def _fetch_stats(self):
|
||||
@@ -387,7 +384,6 @@ class Agent(Client, Automata, AsyncAdvertiser):
|
||||
|
||||
def start_event_polling(self):
|
||||
# start a thread and pass in the mainloop
|
||||
#_thread.start_new_thread(self._event_poller, (asyncio.get_event_loop(),))
|
||||
threading.Thread(target=self._event_poller, args=(asyncio.get_event_loop(),), name="Event Polling", daemon=True).start()
|
||||
|
||||
def is_module_running(self, module):
|
||||
|
||||
@@ -4,9 +4,10 @@ import logging
|
||||
|
||||
import pwnagotchi
|
||||
import pwnagotchi.utils as utils
|
||||
import pwnagotchi.mesh.wifi as wifi
|
||||
|
||||
from pwnagotchi.ai.reward import RewardFunction
|
||||
# REMOVED: from pwnagotchi.ai.reward import RewardFunction
|
||||
# The RewardFunction computed a reward score that was logged but never consumed
|
||||
# by any decision-making logic after AI removal. Removed to eliminate dead CPU work.
|
||||
|
||||
|
||||
class Epoch(object):
|
||||
@@ -44,7 +45,7 @@ class Epoch(object):
|
||||
# number of peers seen during this epoch
|
||||
self.num_peers = 0
|
||||
# cumulative bond factor
|
||||
self.tot_bond_factor = 0.0 # cum_bond_factor sounded worse ...
|
||||
self.tot_bond_factor = 0.0
|
||||
# average bond factor
|
||||
self.avg_bond_factor = 0.0
|
||||
# any activity at all during this epoch?
|
||||
@@ -55,29 +56,27 @@ class Epoch(object):
|
||||
self.epoch_duration = 0
|
||||
# https://www.metageek.com/training/resources/why-channels-1-6-11.html
|
||||
self.non_overlapping_channels = {1: 0, 6: 0, 11: 0}
|
||||
# observation vectors
|
||||
self._observation = {
|
||||
'aps_histogram': [0.0] * wifi.NumChannels,
|
||||
'sta_histogram': [0.0] * wifi.NumChannels,
|
||||
'peers_histogram': [0.0] * wifi.NumChannels
|
||||
}
|
||||
self._observation_ready = threading.Event()
|
||||
# REMOVED: observation histogram vectors (_observation, _observation_ready)
|
||||
# These were 14-element float arrays per channel computed every observe() call,
|
||||
# feeding a neural network that no longer exists.
|
||||
self._epoch_data = {}
|
||||
self._epoch_data_ready = threading.Event()
|
||||
self._reward = RewardFunction()
|
||||
# REMOVED: self._reward = RewardFunction()
|
||||
|
||||
def wait_for_epoch_data(self, with_observation=True, timeout=None):
|
||||
# if with_observation:
|
||||
# self._observation_ready.wait(timeout)
|
||||
# self._observation_ready.clear()
|
||||
# REMOVED: observation wait (was already commented out in original)
|
||||
self._epoch_data_ready.wait(timeout)
|
||||
self._epoch_data_ready.clear()
|
||||
return self._epoch_data if with_observation is False else {**self._observation, **self._epoch_data}
|
||||
# REMOVED: merging histogram observation vectors - just return epoch data
|
||||
return self._epoch_data
|
||||
|
||||
def data(self):
|
||||
return self._epoch_data
|
||||
|
||||
def observe(self, aps, peers):
|
||||
# REMOVED: histogram computation (aps_histogram, sta_histogram, peers_histogram)
|
||||
# These were normalised channel observation vectors for the neural network.
|
||||
# Kept: the non-histogram peer/AP accounting the mood system depends on.
|
||||
num_aps = len(aps)
|
||||
if num_aps == 0:
|
||||
self.blind_for += 1
|
||||
@@ -92,38 +91,8 @@ class Epoch(object):
|
||||
self.tot_bond_factor = sum((peer.encounters for peer in peers)) / bond_unit_scale
|
||||
self.avg_bond_factor = self.tot_bond_factor / num_peers
|
||||
|
||||
num_aps = len(aps) + 1e-10
|
||||
num_sta = sum(len(ap['clients']) for ap in aps) + 1e-10
|
||||
aps_per_chan = [0.0] * wifi.NumChannels
|
||||
sta_per_chan = [0.0] * wifi.NumChannels
|
||||
peers_per_chan = [0.0] * wifi.NumChannels
|
||||
|
||||
for ap in aps:
|
||||
ch_idx = ap['channel'] - 1
|
||||
try:
|
||||
aps_per_chan[ch_idx] += 1.0
|
||||
sta_per_chan[ch_idx] += len(ap['clients'])
|
||||
except IndexError:
|
||||
logging.error("got data on channel %d, we can store %d channels" % (ap['channel'], wifi.NumChannels))
|
||||
|
||||
for peer in peers:
|
||||
try:
|
||||
peers_per_chan[peer.last_channel - 1] += 1.0
|
||||
except IndexError:
|
||||
logging.error(
|
||||
"got peer data on channel %d, we can store %d channels" % (peer.last_channel, wifi.NumChannels))
|
||||
|
||||
# normalize
|
||||
aps_per_chan = [e / num_aps for e in aps_per_chan]
|
||||
sta_per_chan = [e / num_sta for e in sta_per_chan]
|
||||
peers_per_chan = [e / num_peers for e in peers_per_chan]
|
||||
|
||||
self._observation = {
|
||||
'aps_histogram': aps_per_chan,
|
||||
'sta_histogram': sta_per_chan,
|
||||
'peers_histogram': peers_per_chan
|
||||
}
|
||||
self._observation_ready.set()
|
||||
# REMOVED: per-channel histogram normalisation loops
|
||||
# REMOVED: self._observation update and self._observation_ready.set()
|
||||
|
||||
def track(self, deauth=False, assoc=False, handshake=False, hop=False, sleep=False, miss=False, inc=1):
|
||||
if deauth:
|
||||
@@ -205,12 +174,13 @@ class Epoch(object):
|
||||
'temperature': temp
|
||||
}
|
||||
|
||||
self._epoch_data['reward'] = self._reward(self.epoch + 1, self._epoch_data)
|
||||
# REMOVED: self._epoch_data['reward'] = self._reward(self.epoch + 1, self._epoch_data)
|
||||
|
||||
self._epoch_data_ready.set()
|
||||
|
||||
logging.info("[epoch %d] duration=%s slept_for=%s blind=%d sad=%d bored=%d inactive=%d active=%d peers=%d tot_bond=%.2f "
|
||||
"avg_bond=%.2f hops=%d missed=%d deauths=%d assocs=%d handshakes=%d cpu=%d%% mem=%d%% "
|
||||
"temperature=%dC reward=%s" % (
|
||||
"temperature=%dC" % (
|
||||
self.epoch,
|
||||
utils.secs_to_hhmmss(self.epoch_duration),
|
||||
utils.secs_to_hhmmss(self.num_slept),
|
||||
@@ -229,8 +199,7 @@ class Epoch(object):
|
||||
self.num_shakes,
|
||||
cpu * 100,
|
||||
mem * 100,
|
||||
temp,
|
||||
self._epoch_data['reward']))
|
||||
temp))
|
||||
|
||||
self.epoch += 1
|
||||
self.epoch_started = now
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
import pwnagotchi.mesh.wifi as wifi
|
||||
|
||||
range: tuple[float, float] = (-.7, 1.02)
|
||||
fuck_zero: float = 1e-20
|
||||
|
||||
|
||||
class RewardFunction(object):
|
||||
def __call__(self, epoch_n: float, state: dict[str, float]) -> float:
|
||||
|
||||
tot_epochs: float = epoch_n + fuck_zero
|
||||
tot_interactions: float = max(state['num_deauths'] + state['num_associations'], state['num_handshakes']) + fuck_zero
|
||||
tot_channels: int = wifi.NumChannels
|
||||
|
||||
h: float = state['num_handshakes'] / tot_interactions
|
||||
a: float = .2 * (state['active_for_epochs'] / tot_epochs)
|
||||
c: float = .1 * (state['num_hops'] / tot_channels)
|
||||
|
||||
b: float = -.3 * (state['blind_for_epochs'] / tot_epochs)
|
||||
m: float = -.3 * (state['missed_interactions'] / tot_interactions)
|
||||
i: float = -.2 * (state['inactive_for_epochs'] / tot_epochs)
|
||||
|
||||
# include emotions if state >= 5 epochs
|
||||
_sad: float = state['sad_for_epochs'] if state['sad_for_epochs'] >= 5 else 0
|
||||
_bored: float = state['bored_for_epochs'] if state['bored_for_epochs'] >= 5 else 0
|
||||
s: float = -.2 * (_sad / tot_epochs)
|
||||
l: float = -.1 * (_bored / tot_epochs)
|
||||
|
||||
return h + a + c + b + i + m + s + l
|
||||
@@ -18,6 +18,13 @@ max_queue = 10000
|
||||
min_sleep = 0.5
|
||||
max_sleep = 5.0
|
||||
|
||||
# FIX B2: constants for run() retry logic
|
||||
MAX_RETRIES = 10
|
||||
BACKOFF_BASE = 2.0
|
||||
|
||||
# FIX B3: consecutive websocket OSError failures before triggering restart
|
||||
MAX_WS_ERRORS = 5
|
||||
|
||||
|
||||
def decode(r, verbose_errors=True):
|
||||
try:
|
||||
@@ -53,20 +60,8 @@ class Client(object):
|
||||
async def start_websocket(self, consumer):
|
||||
s = "%s/events" % self.websocket
|
||||
|
||||
# More modern version of the approach below
|
||||
# logging.info("Creating new websocket...")
|
||||
# async for ws in websockets.connect(s):
|
||||
# try:
|
||||
# async for msg in ws:
|
||||
# try:
|
||||
# await consumer(msg)
|
||||
# except Exception as ex:
|
||||
# logging.debug("Error while parsing event (%s)", ex)
|
||||
# except websockets.exceptions.ConnectionClosedError:
|
||||
# sleep_time = max_sleep*random.random()
|
||||
# logging.warning('Retrying websocket connection in {} sec'.format(sleep_time))
|
||||
# await asyncio.sleep(sleep_time)
|
||||
# continue
|
||||
# FIX B3: track consecutive OSError failures before escalating to restart
|
||||
oserror_count = 0
|
||||
|
||||
# restarted every time the connection fails
|
||||
while True:
|
||||
@@ -74,6 +69,8 @@ class Client(object):
|
||||
try:
|
||||
async with websockets.connect(s, ping_interval=ping_interval, ping_timeout=ping_timeout,
|
||||
max_queue=max_queue) as ws:
|
||||
# reset error counter on successful connect
|
||||
oserror_count = 0
|
||||
# listener loop
|
||||
while True:
|
||||
try:
|
||||
@@ -88,31 +85,44 @@ class Client(object):
|
||||
await asyncio.wait_for(pong, timeout=ping_timeout)
|
||||
logging.warning('[bettercap] ping OK, keeping connection alive...')
|
||||
continue
|
||||
except:
|
||||
sleep_time = min_sleep + max_sleep*random.random()
|
||||
except Exception:
|
||||
# FIX B4: replaced bare except with except Exception
|
||||
sleep_time = min_sleep + max_sleep * random.random()
|
||||
logging.warning('[bettercap] ping error - retrying connection in {} sec'.format(sleep_time))
|
||||
await asyncio.sleep(sleep_time)
|
||||
break
|
||||
except ConnectionRefusedError:
|
||||
sleep_time = min_sleep + max_sleep*random.random()
|
||||
sleep_time = min_sleep + max_sleep * random.random()
|
||||
logging.warning('[bettercap] nobody seems to be listening at the bettercap endpoint...')
|
||||
logging.warning('[bettercap] retrying connection in {} sec'.format(sleep_time))
|
||||
await asyncio.sleep(sleep_time)
|
||||
continue
|
||||
except OSError:
|
||||
logging.warning('connection to the bettercap endpoint failed...')
|
||||
pwnagotchi.restart("AUTO")
|
||||
# FIX B3: count consecutive failures, only restart after MAX_WS_ERRORS
|
||||
oserror_count += 1
|
||||
logging.warning('[bettercap] connection to the bettercap endpoint failed (failure %d/%d)...',
|
||||
oserror_count, MAX_WS_ERRORS)
|
||||
if oserror_count >= MAX_WS_ERRORS:
|
||||
logging.error('[bettercap] too many consecutive websocket failures, restarting...')
|
||||
pwnagotchi.restart("AUTO")
|
||||
else:
|
||||
sleep_time = min_sleep + max_sleep * random.random()
|
||||
logging.warning('[bettercap] retrying websocket in %.1fs', sleep_time)
|
||||
await asyncio.sleep(sleep_time)
|
||||
continue
|
||||
|
||||
def run(self, command, verbose_errors=True):
|
||||
while True:
|
||||
# FIX B2: replace infinite while True loop with bounded retry + exponential backoff
|
||||
for attempt in range(MAX_RETRIES):
|
||||
try:
|
||||
r = requests.post("%s/session" % self.url, auth=self.auth, json={'cmd': command})
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
sleep_time = min_sleep + max_sleep*random.random()
|
||||
logging.warning("[bettercap] can't run my request... connection to the bettercap endpoint failed...")
|
||||
logging.warning('[bettercap] retrying run in {} sec'.format(sleep_time))
|
||||
return decode(r, verbose_errors=verbose_errors)
|
||||
except requests.exceptions.ConnectionError:
|
||||
sleep_time = min(BACKOFF_BASE ** attempt, 30)
|
||||
logging.warning(
|
||||
"[bettercap] can't run my request... connection failed (attempt %d/%d), retrying in %.1fs",
|
||||
attempt + 1, MAX_RETRIES, sleep_time)
|
||||
sleep(sleep_time)
|
||||
else:
|
||||
break
|
||||
|
||||
return decode(r, verbose_errors=verbose_errors)
|
||||
logging.critical('[bettercap] unreachable after %d attempts, restarting...', MAX_RETRIES)
|
||||
pwnagotchi.restart('AUTO')
|
||||
|
||||
@@ -3,7 +3,6 @@ import re
|
||||
import tempfile
|
||||
import contextlib
|
||||
import shutil
|
||||
#import _thread
|
||||
import threading
|
||||
import logging
|
||||
|
||||
@@ -87,7 +86,6 @@ def setup_mounts(config):
|
||||
logging.debug("[FS] Starting thread to sync %s (interval: %d)",
|
||||
options['mount'], interval)
|
||||
threading.Thread(target=m.daemonize, args=(interval,),name="File Sys", daemon=True).start()
|
||||
#_thread.start_new_thread(m.daemonize, (interval,))
|
||||
else:
|
||||
logging.debug("[FS] Not syncing %s, because interval is 0",
|
||||
options['mount'])
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import subprocess
|
||||
import requests
|
||||
import json
|
||||
import logging
|
||||
|
||||
import pwnagotchi
|
||||
@@ -17,7 +16,7 @@ def is_connected():
|
||||
r = requests.get(host, headers=headers, timeout=(30.0, 60.0))
|
||||
if r.json().get('isUp'):
|
||||
return True
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
@@ -37,7 +36,8 @@ def call(path, obj=None):
|
||||
|
||||
|
||||
def advertise(enabled=True):
|
||||
return call("/mesh/%s" % 'true' if enabled else 'false')
|
||||
# FIX B1: parentheses around ternary ensure correct string interpolation
|
||||
return call("/mesh/%s" % ('true' if enabled else 'false'))
|
||||
|
||||
|
||||
def set_advertisement_data(data):
|
||||
@@ -62,12 +62,8 @@ def closest_peer():
|
||||
|
||||
|
||||
def update_data(last_session):
|
||||
brain = {}
|
||||
try:
|
||||
with open('/root/brain.json') as fp:
|
||||
brain = json.load(fp)
|
||||
except:
|
||||
pass
|
||||
# REMOVED: brain.json loading - file is never created by the noai fork
|
||||
# REMOVED: AI session fields (train_epochs, avg_reward, min_reward, max_reward) - always zero without AI
|
||||
enabled = [name for name, options in pwnagotchi.config['main']['plugins'].items() if
|
||||
'enabled' in options and options['enabled']]
|
||||
language = pwnagotchi.config['main']['lang']
|
||||
@@ -77,10 +73,6 @@ def update_data(last_session):
|
||||
'session': {
|
||||
'duration': last_session.duration,
|
||||
'epochs': last_session.epochs,
|
||||
'train_epochs': last_session.train_epochs,
|
||||
'avg_reward': last_session.avg_reward,
|
||||
'min_reward': last_session.min_reward,
|
||||
'max_reward': last_session.max_reward,
|
||||
'deauthed': last_session.deauthed,
|
||||
'associated': last_session.associated,
|
||||
'handshakes': last_session.handshakes,
|
||||
|
||||
@@ -56,8 +56,7 @@ class KeyPair(object):
|
||||
try:
|
||||
os.remove(self.priv_path)
|
||||
os.remove(self.pub_path)
|
||||
except:
|
||||
pass
|
||||
except Exception: # FIX B4: was bare except
|
||||
|
||||
# no exception, keys loaded correctly.
|
||||
self._view.on_starting()
|
||||
|
||||
@@ -54,8 +54,7 @@ class LastSession(object):
|
||||
try:
|
||||
with open(LAST_SESSION_FILE, 'rt') as fp:
|
||||
saved = fp.read().strip()
|
||||
except:
|
||||
saved = ''
|
||||
except Exception: # FIX B4: was bare except, swallowed KeyboardInterrupt
|
||||
return saved
|
||||
|
||||
def save_session_id(self):
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#import _thread
|
||||
import threading
|
||||
import logging
|
||||
import time
|
||||
@@ -42,7 +41,6 @@ class AsyncAdvertiser(object):
|
||||
|
||||
def start_advertising(self):
|
||||
if self._config['personality']['advertise']:
|
||||
#_thread.start_new_thread(self._adv_poller, ())
|
||||
threading.Thread(target=self._adv_poller,args=(), name="Grid", daemon=True).start()
|
||||
|
||||
grid.set_advertisement_data(self._advertisement)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
import json
|
||||
import toml
|
||||
import _thread
|
||||
import threading # FIX B5: replaced _thread with threading
|
||||
import pwnagotchi
|
||||
from pwnagotchi import restart, plugins
|
||||
from pwnagotchi.utils import save_config, merge_config
|
||||
@@ -534,7 +534,7 @@ class WebConfig(plugins.Plugin):
|
||||
if path == "save-config":
|
||||
try:
|
||||
save_config(request.get_json(), '/etc/pwnagotchi/config.toml') # test
|
||||
_thread.start_new_thread(restart, (self.mode,))
|
||||
threading.Thread(target=restart, args=(self.mode,), daemon=True).start() # FIX B5
|
||||
return "success"
|
||||
except Exception as ex:
|
||||
logging.error(ex)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# import _thread
|
||||
import threading
|
||||
import logging
|
||||
import random
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
import base64
|
||||
import _thread
|
||||
import threading # FIX B5: replaced _thread with threading
|
||||
import secrets
|
||||
import json
|
||||
from functools import wraps
|
||||
@@ -52,9 +52,9 @@ class Handler:
|
||||
plugins_with_auth = self.with_auth(self.plugins)
|
||||
self._app.add_url_rule('/plugins', 'plugins', plugins_with_auth, strict_slashes=False,
|
||||
defaults={'name': None, 'subpath': None})
|
||||
self._app.add_url_rule('/plugins/<name>', 'plugins', plugins_with_auth, strict_slashes=False,
|
||||
self._app.add_url_rule('/plugins/<n>', 'plugins', plugins_with_auth, strict_slashes=False,
|
||||
methods=['GET', 'POST'], defaults={'subpath': None})
|
||||
self._app.add_url_rule('/plugins/<name>/<path:subpath>', 'plugins', plugins_with_auth, methods=['GET', 'POST'])
|
||||
self._app.add_url_rule('/plugins/<n>/<path:subpath>', 'plugins', plugins_with_auth, methods=['GET', 'POST'])
|
||||
|
||||
def _check_creds(self, u, p):
|
||||
# trying to be timing attack safe
|
||||
@@ -210,15 +210,17 @@ class Handler:
|
||||
return render_template('status.html', title=pwnagotchi.name(), go_back_after=60,
|
||||
message='Shutting down ...')
|
||||
finally:
|
||||
_thread.start_new_thread(pwnagotchi.shutdown, ())
|
||||
# FIX B5: replaced _thread.start_new_thread with threading.Thread
|
||||
threading.Thread(target=pwnagotchi.shutdown, daemon=True).start()
|
||||
|
||||
# serve a message and reboot the unit
|
||||
def reboot(self):
|
||||
try:
|
||||
return render_template('status.html', title=pwnagotchi.name(), go_back_after=60,
|
||||
message='Rebooting ...')
|
||||
finally:
|
||||
_thread.start_new_thread(pwnagotchi.reboot, ())
|
||||
try:
|
||||
return render_template('status.html', title=pwnagotchi.name(), go_back_after=60,
|
||||
message='Rebooting ...')
|
||||
finally:
|
||||
# FIX B5: replaced _thread.start_new_thread with threading.Thread
|
||||
threading.Thread(target=pwnagotchi.reboot, daemon=True).start()
|
||||
|
||||
# serve a message and restart the unit in the other mode
|
||||
def restart(self):
|
||||
@@ -230,7 +232,8 @@ class Handler:
|
||||
return render_template('status.html', title=pwnagotchi.name(), go_back_after=30,
|
||||
message='Restarting in %s mode ...' % mode)
|
||||
finally:
|
||||
_thread.start_new_thread(pwnagotchi.restart, (mode,))
|
||||
# FIX B5: replaced _thread.start_new_thread with threading.Thread
|
||||
threading.Thread(target=pwnagotchi.restart, args=(mode,), daemon=True).start()
|
||||
|
||||
# serve the PNG file with the display image
|
||||
def ui(self):
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#import _thread
|
||||
import threading
|
||||
import secrets
|
||||
import logging
|
||||
|
||||
@@ -7,9 +7,10 @@ name = "pwnagotchi"
|
||||
dynamic = ["version"]
|
||||
dependencies = [
|
||||
"PyYAML", "dbus-python", "file-read-backwards", "flask", "flask-cors",
|
||||
"flask-wtf", "gast", "gpiozero", "inky", "numpy", "pycryptodome", "pydrive2", "python-dateutil",
|
||||
"requests", "rpi-lgpio", "rpi_hardware_pwm", "scapy", "setuptools", "shimmy", "smbus", "smbus2",
|
||||
"flask-wtf", "gpiozero", "inky", "pycryptodome", "pydrive2", "python-dateutil",
|
||||
"requests", "rpi-lgpio", "rpi_hardware_pwm", "scapy", "setuptools", "smbus", "smbus2",
|
||||
"spidev", "tomlkit", "toml", "tweepy", "websockets", "pisugar",
|
||||
# REMOVED: "numpy", "gast", "shimmy" - were AI training pipeline dependencies only
|
||||
]
|
||||
|
||||
requires-python = ">=3.11"
|
||||
|
||||
Reference in New Issue
Block a user