From 86d4028dc465cbfd7c9e67d531dcdb3a352742bd Mon Sep 17 00:00:00 2001 From: Pavol Rusnak Date: Mon, 7 Oct 2024 10:52:43 +0200 Subject: [PATCH] remove dangling spaces --- Changelog.md | 2 +- Examples/Announce.py | 6 +- Examples/Broadcast.py | 4 +- Examples/Buffer.py | 6 +- Examples/Channel.py | 6 +- Examples/Echo.py | 20 +- Examples/Filetransfer.py | 20 +- Examples/Identify.py | 6 +- Examples/Link.py | 8 +- Examples/Minimal.py | 4 +- Examples/Ratchets.py | 20 +- Examples/Request.py | 6 +- Examples/Speedtest.py | 12 +- README.md | 18 +- RNS/Buffer.py | 2 +- RNS/Channel.py | 24 +- RNS/Cryptography/AES.py | 4 +- RNS/Cryptography/Fernet.py | 2 +- RNS/Cryptography/SHA256.py | 38 +-- RNS/Cryptography/X25519.py | 6 +- RNS/Cryptography/aes/aes.py | 10 +- RNS/Destination.py | 8 +- RNS/Identity.py | 24 +- RNS/Interfaces/AX25KISSInterface.py | 4 +- RNS/Interfaces/Android/KISSInterface.py | 12 +- RNS/Interfaces/Android/RNodeInterface.py | 54 +-- RNS/Interfaces/Android/SerialInterface.py | 14 +- RNS/Interfaces/AutoInterface.py | 16 +- RNS/Interfaces/I2PInterface.py | 50 +-- RNS/Interfaces/Interface.py | 6 +- RNS/Interfaces/KISSInterface.py | 8 +- RNS/Interfaces/LocalInterface.py | 18 +- RNS/Interfaces/PipeInterface.py | 14 +- RNS/Interfaces/RNodeInterface.py | 36 +- RNS/Interfaces/RNodeMultiInterface.py | 30 +- RNS/Interfaces/SerialInterface.py | 12 +- RNS/Interfaces/TCPInterface.py | 30 +- RNS/Interfaces/UDPInterface.py | 2 +- RNS/Link.py | 32 +- RNS/Packet.py | 18 +- RNS/Resolver.py | 2 +- RNS/Resource.py | 42 +-- RNS/Reticulum.py | 50 +-- RNS/Transport.py | 82 ++--- RNS/Utilities/rncp.py | 18 +- RNS/Utilities/rnid.py | 22 +- RNS/Utilities/rnir.py | 2 +- RNS/Utilities/rnodeconf.py | 56 ++-- RNS/Utilities/rnpath.py | 16 +- RNS/Utilities/rnprobe.py | 12 +- RNS/Utilities/rnsd.py | 26 +- RNS/Utilities/rnstatus.py | 28 +- RNS/Utilities/rnx.py | 18 +- RNS/__init__.py | 20 +- RNS/vendor/configobj.py | 382 +++++++++++----------- RNS/vendor/i2plib/__init__.py | 4 +- RNS/vendor/i2plib/aiosam.py | 42 +-- RNS/vendor/i2plib/sam.py | 20 +- RNS/vendor/i2plib/tunnel.py | 40 +-- tests/channel.py | 2 +- tests/identity.py | 2 +- tests/link.py | 28 +- 62 files changed, 763 insertions(+), 763 deletions(-) diff --git a/Changelog.md b/Changelog.md index d36671f..5d3bc78 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1214,7 +1214,7 @@ This beta release brings a range of improvements and bugfixes. - Improved documentation. - Improved request timeouts and handling. - Improved link establishment. - - Improved resource transfer timing. + - Improved resource transfer timing. **Fixed bugs** - Fixed a race condition in inbound proof handling. diff --git a/Examples/Announce.py b/Examples/Announce.py index 5b0a3af..c5a2e3c 100644 --- a/Examples/Announce.py +++ b/Examples/Announce.py @@ -22,7 +22,7 @@ noble_gases = ["Helium", "Neon", "Argon", "Krypton", "Xenon", "Radon", "Oganesso def program_setup(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our example identity = RNS.Identity() @@ -70,7 +70,7 @@ def program_setup(configpath): # We register the announce handler with Reticulum RNS.Transport.register_announce_handler(announce_handler) - + # Everything's ready! # Let's hand over control to the announce loop announceLoop(destination_1, destination_2) @@ -86,7 +86,7 @@ def announceLoop(destination_1, destination_2): # know how to create messages directed towards it. while True: entered = input() - + # Randomly select a fruit fruit = fruits[random.randint(0,len(fruits)-1)] diff --git a/Examples/Broadcast.py b/Examples/Broadcast.py index 423d759..1d752d3 100644 --- a/Examples/Broadcast.py +++ b/Examples/Broadcast.py @@ -17,7 +17,7 @@ APP_NAME = "example_utilities" def program_setup(configpath, channel=None): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # If the user did not select a "channel" we use # a default one called "public_information". # This "channel" is added to the destination name- @@ -40,7 +40,7 @@ def program_setup(configpath, channel=None): # We specify a callback that will get called every time # the destination receives data. broadcast_destination.set_packet_callback(packet_callback) - + # Everything's ready! # Let's hand over control to the main loop broadcastLoop(broadcast_destination) diff --git a/Examples/Buffer.py b/Examples/Buffer.py index 7d26ce1..edf0615 100644 --- a/Examples/Buffer.py +++ b/Examples/Buffer.py @@ -35,7 +35,7 @@ latest_buffer = None def server(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our example server_identity = RNS.Identity() @@ -151,7 +151,7 @@ def client(destination_hexhash, configpath): raise ValueError( f"Destination length is invalid, must be {dest_len} hexadecimal characters ({dest_len // 2} bytes)." ) - + destination_hash = bytes.fromhex(destination_hexhash) except: RNS.log("Invalid destination entered. Check your input!\n") @@ -251,7 +251,7 @@ def link_closed(link): RNS.log("The link was closed by the server, exiting now") else: RNS.log("Link closed, exiting now") - + RNS.Reticulum.exit_handler() time.sleep(1.5) os._exit(0) diff --git a/Examples/Channel.py b/Examples/Channel.py index b1c6b39..6f475d2 100644 --- a/Examples/Channel.py +++ b/Examples/Channel.py @@ -98,7 +98,7 @@ latest_client_link = None def server(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our link example server_identity = RNS.Identity() @@ -206,7 +206,7 @@ def client(destination_hexhash, configpath): raise ValueError( f"Destination length is invalid, must be {dest_len} hexadecimal characters ({dest_len // 2} bytes)." ) - + destination_hash = bytes.fromhex(destination_hexhash) except: RNS.log("Invalid destination entered. Check your input!\n") @@ -315,7 +315,7 @@ def link_closed(link): RNS.log("The link was closed by the server, exiting now") else: RNS.log("Link closed, exiting now") - + RNS.Reticulum.exit_handler() time.sleep(1.5) os._exit(0) diff --git a/Examples/Echo.py b/Examples/Echo.py index ed377b2..0d020dc 100644 --- a/Examples/Echo.py +++ b/Examples/Echo.py @@ -26,7 +26,7 @@ def server(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our echo server server_identity = RNS.Identity() @@ -35,7 +35,7 @@ def server(configpath): # create a "single" destination that can receive encrypted # messages. This way the client can send a request and be # certain that no-one else than this destination was able - # to read it. + # to read it. echo_destination = RNS.Destination( server_identity, RNS.Destination.IN, @@ -50,7 +50,7 @@ def server(configpath): # generate a proof for each incoming packet and transmit it # back to the sender of that packet. echo_destination.set_proof_strategy(RNS.Destination.PROVE_ALL) - + # Tell the destination which function in our program to # run when a packet is received. We do this so we can # print a log message when the server receives a request @@ -79,7 +79,7 @@ def announceLoop(destination): def server_callback(message, packet): global reticulum - + # Tell the user that we received an echo request, and # that we are going to send a reply to the requester. # Sending the proof is handled automatically, since we @@ -92,14 +92,14 @@ def server_callback(message, packet): if reception_rssi != None: reception_stats += f" [RSSI {reception_rssi} dBm]" - + if reception_snr != None: reception_stats += f" [SNR {reception_snr} dBm]" else: if packet.rssi != None: reception_stats += f" [RSSI {packet.rssi} dBm]" - + if packet.snr != None: reception_stats += f" [SNR {packet.snr} dB]" @@ -114,7 +114,7 @@ def server_callback(message, packet): # to run as a client def client(destination_hexhash, configpath, timeout=None): global reticulum - + # We need a binary representation of the destination # hash that was entered on the command line try: @@ -149,7 +149,7 @@ def client(destination_hexhash, configpath, timeout=None): # command line. while True: input() - + # Let's first check if RNS knows a path to the destination. # If it does, we'll load the server identity and create a packet if RNS.Transport.has_path(destination_hash): @@ -230,7 +230,7 @@ def packet_delivered(receipt): if reception_rssi != None: reception_stats += f" [RSSI {reception_rssi} dBm]" - + if reception_snr != None: reception_stats += f" [SNR {reception_snr} dB]" @@ -238,7 +238,7 @@ def packet_delivered(receipt): if receipt.proof_packet != None: if receipt.proof_packet.rssi != None: reception_stats += f" [RSSI {receipt.proof_packet.rssi} dBm]" - + if receipt.proof_packet.snr != None: reception_stats += f" [SNR {receipt.proof_packet.snr} dB]" diff --git a/Examples/Filetransfer.py b/Examples/Filetransfer.py index 79081d1..f58eac1 100644 --- a/Examples/Filetransfer.py +++ b/Examples/Filetransfer.py @@ -44,7 +44,7 @@ serve_path = None def server(configpath, path): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our file server server_identity = RNS.Identity() @@ -120,7 +120,7 @@ def client_connected(link): RNS.log("Too many files in served directory!", RNS.LOG_ERROR) RNS.log("You should implement a function to split the filelist over multiple packets.", RNS.LOG_ERROR) RNS.log("Hint: The client already supports it :)", RNS.LOG_ERROR) - + # After this, we're just going to keep the link # open until the client requests a file. We'll # configure a function that get's called when @@ -147,7 +147,7 @@ def client_request(message, packet): # read it and pack it as a resource RNS.log(f"Client requested \"{filename}\"") file = open(os.path.join(serve_path, filename), "rb") - + file_resource = RNS.Resource( file, packet.link, @@ -220,7 +220,7 @@ def client(destination_hexhash, configpath): raise ValueError( f"Destination length is invalid, must be {dest_len} hexadecimal characters ({dest_len // 2} bytes)." ) - + destination_hash = bytes.fromhex(destination_hexhash) except: RNS.log("Invalid destination entered. Check your input!\n") @@ -291,7 +291,7 @@ def download(filename): # packet receipt. request_packet = RNS.Packet(server_link, filename.encode("utf-8"), create_receipt=False) request_packet.send() - + print("") print(f"Requested \"{filename}\" from server, waiting for download to begin...") menu_mode = "download_started" @@ -474,7 +474,7 @@ def link_closed(link): RNS.log("The link was closed by the server, exiting now") else: RNS.log("Link closed, exiting now") - + RNS.Reticulum.exit_handler() time.sleep(1.5) os._exit(0) @@ -486,17 +486,17 @@ def link_closed(link): def download_began(resource): global menu_mode, current_download, download_started, transfer_size, file_size current_download = resource - + if download_started == 0: download_started = time.time() - + transfer_size += resource.size file_size = resource.total_size - + menu_mode = "downloading" # When the download concludes, successfully -# or not, we'll update our menu state and +# or not, we'll update our menu state and # inform the user about how it all went. def download_concluded(resource): global menu_mode, current_filename, download_started, download_finished, download_time diff --git a/Examples/Identify.py b/Examples/Identify.py index 6bfa49d..deddc0a 100644 --- a/Examples/Identify.py +++ b/Examples/Identify.py @@ -27,7 +27,7 @@ latest_client_link = None def server(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our link example server_identity = RNS.Identity() @@ -99,7 +99,7 @@ def server_packet_received(message, packet): text = message.decode("utf-8") RNS.log(f"Received data from {remote_peer}: {text}") - + reply_text = f"I received \"{text}\" over the link from {remote_peer}" reply_data = reply_text.encode("utf-8") RNS.Packet(latest_client_link, reply_data).send() @@ -239,7 +239,7 @@ def link_closed(link): RNS.log("The link was closed by the server, exiting now") else: RNS.log("Link closed, exiting now") - + RNS.Reticulum.exit_handler() time.sleep(1.5) os._exit(0) diff --git a/Examples/Link.py b/Examples/Link.py index 6df8f3e..b3e5966 100644 --- a/Examples/Link.py +++ b/Examples/Link.py @@ -27,7 +27,7 @@ latest_client_link = None def server(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our link example server_identity = RNS.Identity() @@ -89,7 +89,7 @@ def server_packet_received(message, packet): # that connected. text = message.decode("utf-8") RNS.log(f"Received data on the link: {text}") - + reply_text = f"I received \"{text}\" over the link" reply_data = reply_text.encode("utf-8") RNS.Packet(latest_client_link, reply_data).send() @@ -113,7 +113,7 @@ def client(destination_hexhash, configpath): raise ValueError( f"Destination length is invalid, must be {dest_len} hexadecimal characters ({dest_len // 2} bytes)." ) - + destination_hash = bytes.fromhex(destination_hexhash) except: RNS.log("Invalid destination entered. Check your input!\n") @@ -217,7 +217,7 @@ def link_closed(link): RNS.log("The link was closed by the server, exiting now") else: RNS.log("Link closed, exiting now") - + RNS.Reticulum.exit_handler() time.sleep(1.5) os._exit(0) diff --git a/Examples/Minimal.py b/Examples/Minimal.py index 6f33893..3f7558f 100644 --- a/Examples/Minimal.py +++ b/Examples/Minimal.py @@ -17,7 +17,7 @@ APP_NAME = "example_utilities" def program_setup(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our example identity = RNS.Identity() @@ -42,7 +42,7 @@ def program_setup(configpath): # tries to communicate with the destination know whether their # communication was received correctly. destination.set_proof_strategy(RNS.Destination.PROVE_ALL) - + # Everything's ready! # Let's hand over control to the announce loop announceLoop(destination) diff --git a/Examples/Ratchets.py b/Examples/Ratchets.py index b690de4..1623990 100644 --- a/Examples/Ratchets.py +++ b/Examples/Ratchets.py @@ -28,7 +28,7 @@ def server(configpath): # TODO: Remove RNS.loglevel = RNS.LOG_DEBUG - + # Randomly create a new identity for our echo server server_identity = RNS.Identity() @@ -37,7 +37,7 @@ def server(configpath): # create a "single" destination that can receive encrypted # messages. This way the client can send a request and be # certain that no-one else than this destination was able - # to read it. + # to read it. echo_destination = RNS.Destination( server_identity, RNS.Destination.IN, @@ -61,7 +61,7 @@ def server(configpath): # generate a proof for each incoming packet and transmit it # back to the sender of that packet. echo_destination.set_proof_strategy(RNS.Destination.PROVE_ALL) - + # Tell the destination which function in our program to # run when a packet is received. We do this so we can # print a log message when the server receives a request @@ -90,7 +90,7 @@ def announceLoop(destination): def server_callback(message, packet): global reticulum - + # Tell the user that we received an echo request, and # that we are going to send a reply to the requester. # Sending the proof is handled automatically, since we @@ -103,14 +103,14 @@ def server_callback(message, packet): if reception_rssi != None: reception_stats += f" [RSSI {reception_rssi} dBm]" - + if reception_snr != None: reception_stats += f" [SNR {reception_snr} dBm]" else: if packet.rssi != None: reception_stats += f" [RSSI {packet.rssi} dBm]" - + if packet.snr != None: reception_stats += f" [SNR {packet.snr} dB]" @@ -125,7 +125,7 @@ def server_callback(message, packet): # to run as a client def client(destination_hexhash, configpath, timeout=None): global reticulum - + # We need a binary representation of the destination # hash that was entered on the command line try: @@ -160,7 +160,7 @@ def client(destination_hexhash, configpath, timeout=None): # command line. while True: input() - + # Let's first check if RNS knows a path to the destination. # If it does, we'll load the server identity and create a packet if RNS.Transport.has_path(destination_hash): @@ -242,7 +242,7 @@ def packet_delivered(receipt): if reception_rssi != None: reception_stats += f" [RSSI {reception_rssi} dBm]" - + if reception_snr != None: reception_stats += f" [SNR {reception_snr} dB]" @@ -250,7 +250,7 @@ def packet_delivered(receipt): if receipt.proof_packet != None: if receipt.proof_packet.rssi != None: reception_stats += f" [RSSI {receipt.proof_packet.rssi} dBm]" - + if receipt.proof_packet.snr != None: reception_stats += f" [SNR {receipt.proof_packet.snr} dB]" diff --git a/Examples/Request.py b/Examples/Request.py index 774fe2e..acd016a 100644 --- a/Examples/Request.py +++ b/Examples/Request.py @@ -33,7 +33,7 @@ def random_text_generator(path, data, request_id, link_id, remote_identity, requ def server(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our link example server_identity = RNS.Identity() @@ -113,7 +113,7 @@ def client(destination_hexhash, configpath): raise ValueError( f"Destination length is invalid, must be {dest_len} hexadecimal characters ({dest_len // 2} bytes)." ) - + destination_hash = bytes.fromhex(destination_hexhash) except: RNS.log("Invalid destination entered. Check your input!\n") @@ -223,7 +223,7 @@ def link_closed(link): RNS.log("The link was closed by the server, exiting now") else: RNS.log("Link closed, exiting now") - + RNS.Reticulum.exit_handler() time.sleep(1.5) os._exit(0) diff --git a/Examples/Speedtest.py b/Examples/Speedtest.py index 4e564f0..be7650c 100644 --- a/Examples/Speedtest.py +++ b/Examples/Speedtest.py @@ -36,7 +36,7 @@ printed = False def server(configpath): # We must first initialise Reticulum reticulum = RNS.Reticulum(configpath) - + # Randomly create a new identity for our link example server_identity = RNS.Identity() @@ -113,9 +113,9 @@ def size_str(num, suffix='B'): def server_packet_received(message, packet): global latest_client_link, first_packet_at, last_packet_at, received_data, rc, data_cap - + received_data += len(packet.data) - + rc += 1 if rc >= 50: RNS.log(size_str(received_data)) @@ -127,7 +127,7 @@ def server_packet_received(message, packet): rc = 0 last_packet_at = time.time() - + # Print statistics download_time = last_packet_at-first_packet_at hours, rem = divmod(download_time, 3600) @@ -169,7 +169,7 @@ def client(destination_hexhash, configpath): raise ValueError( f"Destination length is invalid, must be {dest_len} hexadecimal characters ({dest_len // 2} bytes)." ) - + destination_hash = bytes.fromhex(destination_hexhash) except: RNS.log("Invalid destination entered. Check your input!\n") @@ -280,7 +280,7 @@ def link_closed(link): RNS.log("The link was closed by the server, exiting now") else: RNS.log("Link closed, exiting now") - + RNS.Reticulum.exit_handler() time.sleep(1.5) diff --git a/README.md b/README.md index 0c55bcc..6866f3e 100755 --- a/README.md +++ b/README.md @@ -109,8 +109,8 @@ network, and vice versa. ## How do I get started? The best way to get started with the Reticulum Network Stack depends on what -you want to do. For full details and examples, have a look at the -[Getting Started Fast](https://markqvist.github.io/Reticulum/manual/gettingstartedfast.html) +you want to do. For full details and examples, have a look at the +[Getting Started Fast](https://markqvist.github.io/Reticulum/manual/gettingstartedfast.html) section of the [Reticulum Manual](https://markqvist.github.io/Reticulum/manual/). To simply install Reticulum and related utilities on your system, the easiest way is via `pip`. @@ -143,15 +143,15 @@ creating a more complex configuration. If you have an old version of `pip` on your system, you may need to upgrade it first with `pip install pip --upgrade`. If you no not already have `pip` installed, you can install it using the package manager of your system with `sudo apt install python3-pip` or similar. -For more detailed examples on how to expand communication over many mediums such -as packet radio or LoRa, serial ports, or over fast IP links and the Internet using -the UDP and TCP interfaces, take a look at the [Supported Interfaces](https://markqvist.github.io/Reticulum/manual/interfaces.html) +For more detailed examples on how to expand communication over many mediums such +as packet radio or LoRa, serial ports, or over fast IP links and the Internet using +the UDP and TCP interfaces, take a look at the [Supported Interfaces](https://markqvist.github.io/Reticulum/manual/interfaces.html) section of the [Reticulum Manual](https://markqvist.github.io/Reticulum/manual/). ## Included Utilities -Reticulum includes a range of useful utilities for managing your networks, -viewing status and information, and other tasks. You can read more about these -programs in the [Included Utility Programs](https://markqvist.github.io/Reticulum/manual/using.html#included-utility-programs) +Reticulum includes a range of useful utilities for managing your networks, +viewing status and information, and other tasks. You can read more about these +programs in the [Included Utility Programs](https://markqvist.github.io/Reticulum/manual/using.html#included-utility-programs) section of the [Reticulum Manual](https://markqvist.github.io/Reticulum/manual/). - The system daemon `rnsd` for running Reticulum as an always-available service @@ -242,7 +242,7 @@ The testnet is just that, an informal network for testing and experimenting. It will be up most of the time, and anyone can join, but it also means that there's no guarantees for service availability. -It probably goes without saying, but *don't use the testnet entry-points as +It probably goes without saying, but *don't use the testnet entry-points as hardcoded or default interfaces in any applications you ship to users*. When shipping applications, the best practice is to provide your own default connectivity solutions, if needed and applicable, or in most cases, simply diff --git a/RNS/Buffer.py b/RNS/Buffer.py index f260c16..eb0b0e9 100644 --- a/RNS/Buffer.py +++ b/RNS/Buffer.py @@ -244,7 +244,7 @@ class RawChannelWriter(RawIOBase, AbstractContextManager): processed_length = len(chunk) message = StreamDataMessage(self._stream_id, chunk, self._eof, comp_success) - + self._channel.send(message) return processed_length diff --git a/RNS/Channel.py b/RNS/Channel.py index 5634471..d2a81fe 100644 --- a/RNS/Channel.py +++ b/RNS/Channel.py @@ -136,7 +136,7 @@ class MessageBase(abc.ABC): MSGTYPE = None """ Defines a unique identifier for a message class. - + * Must be unique within all classes registered with a ``Channel`` * Must be less than ``0xf000``. Values greater than or equal to ``0xf000`` are reserved. """ @@ -247,11 +247,11 @@ class Channel(contextlib.AbstractContextManager): # The maximum window size for transfers on fast links WINDOW_MAX_FAST = 48 - + # For calculating maps and guard segments, this # must be set to the global maximum window. WINDOW_MAX = WINDOW_MAX_FAST - + # If the fast rate is sustained for this many request # rounds, the fast link window size will be allowed. FAST_RATE_THRESHOLD = 10 @@ -380,21 +380,21 @@ class Channel(contextlib.AbstractContextManager): def _emplace_envelope(self, envelope: Envelope, ring: collections.deque[Envelope]) -> bool: with self._lock: i = 0 - + for existing in ring: if envelope.sequence == existing.sequence: RNS.log(f"Envelope: Emplacement of duplicate envelope with sequence {envelope.sequence}", RNS.LOG_EXTREME) return False - + if envelope.sequence < existing.sequence and not (self._next_rx_sequence - envelope.sequence) > (Channel.SEQ_MAX//2): ring.insert(i, envelope) envelope.tracked = True return True - + i += 1 - + envelope.tracked = True ring.append(envelope) @@ -449,7 +449,7 @@ class Channel(contextlib.AbstractContextManager): m = e.unpack(self._message_factories) else: m = e.message - + self._rx_ring.remove(e) self._run_callbacks(m) @@ -468,7 +468,7 @@ class Channel(contextlib.AbstractContextManager): with self._lock: outstanding = 0 for envelope in self._tx_ring: - if envelope.outlet == self._outlet: + if envelope.outlet == self._outlet: if not envelope.packet or not self._outlet.get_packet_state(envelope.packet) == MessageState.MSGSTATE_DELIVERED: outstanding += 1 @@ -508,7 +508,7 @@ class Channel(contextlib.AbstractContextManager): # TODO: Remove at some point # RNS.log("Increased "+str(self)+" max window to "+str(self.window_max), RNS.LOG_DEBUG) # RNS.log("Increased "+str(self)+" min window to "+str(self.window_min), RNS.LOG_DEBUG) - + else: self.fast_rate_rounds += 1 if self.window_max < Channel.WINDOW_MAX_FAST and self.fast_rate_rounds == Channel.FAST_RATE_THRESHOLD: @@ -581,7 +581,7 @@ class Channel(contextlib.AbstractContextManager): with self._lock: if not self.is_ready_to_send(): raise ChannelException(CEType.ME_LINK_NOT_READY, f"Link is not ready") - + envelope = Envelope(self._outlet, message=message, sequence=self._next_sequence) self._next_sequence = (self._next_sequence + 1) % Channel.SEQ_MODULUS self._emplace_envelope(envelope, self._tx_ring) @@ -592,7 +592,7 @@ class Channel(contextlib.AbstractContextManager): envelope.pack() if len(envelope.raw) > self._outlet.mdu: raise ChannelException(CEType.ME_TOO_BIG, f"Packed message too big for packet: {len(envelope.raw)} > {self._outlet.mdu}") - + envelope.packet = self._outlet.send(envelope.raw) envelope.tries += 1 self._outlet.set_packet_delivered_callback(envelope.packet, self._packet_delivered) diff --git a/RNS/Cryptography/AES.py b/RNS/Cryptography/AES.py index caca7d6..9e934ad 100644 --- a/RNS/Cryptography/AES.py +++ b/RNS/Cryptography/AES.py @@ -25,7 +25,7 @@ import RNS.vendor.platformutils as pu if cp.PROVIDER == cp.PROVIDER_INTERNAL: from .aes import AES - + elif cp.PROVIDER == cp.PROVIDER_PYCA: from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes @@ -46,7 +46,7 @@ class AES_128_CBC: cipher = Cipher(algorithms.AES(key), modes.CBC(iv)) else: cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend()) - + encryptor = cipher.encryptor() ciphertext = encryptor.update(plaintext) + encryptor.finalize() return ciphertext diff --git a/RNS/Cryptography/Fernet.py b/RNS/Cryptography/Fernet.py index 0ce843c..818a406 100644 --- a/RNS/Cryptography/Fernet.py +++ b/RNS/Cryptography/Fernet.py @@ -49,7 +49,7 @@ class Fernet(): if len(key) != 32: raise ValueError(f"Token key must be 32 bytes, not {len(key)}") - + self._signing_key = key[:16] self._encryption_key = key[16:] diff --git a/RNS/Cryptography/SHA256.py b/RNS/Cryptography/SHA256.py index 03f22a4..304d67d 100644 --- a/RNS/Cryptography/SHA256.py +++ b/RNS/Cryptography/SHA256.py @@ -48,34 +48,34 @@ class sha256: _h = (0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19) _output_size = 8 - + blocksize = 1 block_size = 64 digest_size = 32 - - def __init__(self, m=None): + + def __init__(self, m=None): self._buffer = b"" self._counter = 0 - + if m is not None: if type(m) is not bytes: raise TypeError(f'{self.__class__.__name__}() argument 1 must be bytes, not {type(m).__name__}') self.update(m) - + def _rotr(self, x, y): return ((x >> y) | (x << (32-y))) & 0xFFFFFFFF - + def _sha256_process(self, c): w = [0]*64 w[0:16] = struct.unpack('!16L', c) - + for i in range(16, 64): s0 = self._rotr(w[i-15], 7) ^ self._rotr(w[i-15], 18) ^ (w[i-15] >> 3) s1 = self._rotr(w[i-2], 17) ^ self._rotr(w[i-2], 19) ^ (w[i-2] >> 10) w[i] = (w[i-16] + s0 + w[i-7] + s1) & 0xFFFFFFFF - + a,b,c,d,e,f,g,h = self._h - + for i in range(64): s0 = self._rotr(a, 2) ^ self._rotr(a, 13) ^ self._rotr(a, 22) maj = (a & b) ^ (a & c) ^ (b & c) @@ -83,7 +83,7 @@ class sha256: s1 = self._rotr(e, 6) ^ self._rotr(e, 11) ^ self._rotr(e, 25) ch = (e & f) ^ ((~e) & g) t1 = h + s1 + ch + self._k[i] + w[i] - + h = g g = f f = e @@ -92,38 +92,38 @@ class sha256: c = b b = a a = (t1 + t2) & 0xFFFFFFFF - + self._h = [(x+y) & 0xFFFFFFFF for x,y in zip(self._h, [a,b,c,d,e,f,g,h])] - + def update(self, m): if not m: return if type(m) is not bytes: raise TypeError(f'{sys._getframe().f_code.co_name}() argument 1 must be bytes, not {type(m).__name__}') - + self._buffer += m self._counter += len(m) - + while len(self._buffer) >= 64: self._sha256_process(self._buffer[:64]) self._buffer = self._buffer[64:] - + def digest(self): mdi = self._counter & 0x3F length = struct.pack('!Q', self._counter<<3) - + if mdi < 56: padlen = 55-mdi else: padlen = 119-mdi - + r = self.copy() r.update(b'\x80'+(b'\x00'*padlen)+length) return b''.join([struct.pack('!L', i) for i in r._h[:self._output_size]]) - + def hexdigest(self): return self.digest().encode('hex') - + def copy(self): return copy.deepcopy(self) diff --git a/RNS/Cryptography/X25519.py b/RNS/Cryptography/X25519.py index 813f258..58022a7 100644 --- a/RNS/Cryptography/X25519.py +++ b/RNS/Cryptography/X25519.py @@ -138,9 +138,9 @@ class X25519PrivateKey: peer_public_key = X25519PublicKey.from_public_bytes(peer_public_key) start = time.time() - + shared = _pack_number(_raw_curve25519(peer_public_key.x, self.a)) - + end = time.time() duration = end-start @@ -150,7 +150,7 @@ class X25519PrivateKey: if end > X25519PrivateKey.T_CLEAR: X25519PrivateKey.T_CLEAR = end + X25519PrivateKey.DELAY_WINDOW X25519PrivateKey.T_MAX = 0 - + if duration < X25519PrivateKey.T_MAX or duration < X25519PrivateKey.MIN_EXEC_TIME: target = start+X25519PrivateKey.T_MAX diff --git a/RNS/Cryptography/aes/aes.py b/RNS/Cryptography/aes/aes.py index 4b8e45e..bc15959 100644 --- a/RNS/Cryptography/aes/aes.py +++ b/RNS/Cryptography/aes/aes.py @@ -144,7 +144,7 @@ class AES: return matrix2bytes(state) - # will encrypt the entire data + # will encrypt the entire data def encrypt(self, plaintext, iv): """ Encrypts `plaintext` using CBC mode and PKCS#7 padding, with the given @@ -173,7 +173,7 @@ class AES: return b''.join(ciphertext_blocks) - # will decrypt the entire data + # will decrypt the entire data def decrypt(self, ciphertext, iv): """ Decrypts `ciphertext` using CBC mode and PKCS#7 padding, with the given @@ -188,7 +188,7 @@ class AES: for ciphertext_block in split_blocks(ciphertext): # in CBC mode every block is XOR'd with the previous block xorred = xor_bytes(previous, self._decrypt_block(ciphertext_block)) - + # append plaintext plaintext_blocks.append(xorred) previous = ciphertext_block @@ -223,7 +223,7 @@ def test(): print("Single Block Tests") print("------------------") print(f"iv: {iv.hex()}") - + print(f"plain text: '{single_block_text.decode()}'") ciphertext_block = _aes._encrypt_block(single_block_text) plaintext_block = _aes._decrypt_block(ciphertext_block) @@ -268,4 +268,4 @@ def test(): if __name__ == "__main__": # test AES class - test() + test() diff --git a/RNS/Destination.py b/RNS/Destination.py index 68d854c..3904039 100755 --- a/RNS/Destination.py +++ b/RNS/Destination.py @@ -140,7 +140,7 @@ class Destination: def __init__(self, identity, direction, type, app_name, *aspects): # Check input values and build name string - if "." in app_name: raise ValueError("Dots can't be used in app names") + if "." in app_name: raise ValueError("Dots can't be used in app names") if not type in Destination.types: raise ValueError("Unknown destination type") if not direction in Destination.directions: raise ValueError("Unknown destination direction") @@ -241,7 +241,7 @@ class Destination: if self.direction != Destination.IN: raise TypeError("Only IN destination types can be announced") - + ratchet = b"" now = time.time() stale_responses = [] @@ -264,7 +264,7 @@ class Destination: # multiple available paths, and to choose the best one. RNS.log(f"Using cached announce data for answering path request with tag {RNS.prettyhexrep(tag)}", RNS.LOG_EXTREME) announce_data = self.path_responses[tag][1] - + else: destination_hash = self.hash random_hash = RNS.Identity.get_random_hash()[0:5]+int(time.time()).to_bytes(5, "big") @@ -281,7 +281,7 @@ class Destination: returned_app_data = self.default_app_data() if isinstance(returned_app_data, bytes): app_data = returned_app_data - + signed_data = self.hash+self.identity.get_public_key()+self.name_hash+random_hash+ratchet if app_data != None: signed_data += app_data diff --git a/RNS/Identity.py b/RNS/Identity.py index a1cc638..3cc520b 100644 --- a/RNS/Identity.py +++ b/RNS/Identity.py @@ -137,7 +137,7 @@ class Identity: # save, but the only changes. It might be possible to # simply overwrite on exit now that every local client # disconnect triggers a data persist. - + try: if hasattr(Identity, "saving_known_destinations"): wait_interval = 0.2 @@ -279,7 +279,7 @@ class Identity: ratchet_data = {"ratchet": ratchet, "received": time.time()} ratchetdir = f"{RNS.Reticulum.storagepath}/ratchets" - + if not os.path.isdir(ratchetdir): os.makedirs(ratchetdir) @@ -290,7 +290,7 @@ class Identity: ratchet_file.close() os.replace(outpath, finalpath) - + threading.Thread(target=persist_job, daemon=True).start() except Exception as e: @@ -337,7 +337,7 @@ class Identity: Identity.known_ratchets[destination_hash] = ratchet_data["ratchet"] else: return None - + except Exception as e: RNS.log(f"An error occurred while loading ratchet data for {RNS.prettyhexrep(destination_hash)} from storage.", RNS.LOG_ERROR) RNS.log(f"The contained exception was: {e}", RNS.LOG_ERROR) @@ -444,7 +444,7 @@ class Identity: RNS.log(f"Received invalid announce for {RNS.prettyhexrep(destination_hash)}: Invalid signature.", RNS.LOG_DEBUG) del announced_identity return False - + except Exception as e: RNS.log(f"Error occurred while validating announce. The contained exception was: {e}", RNS.LOG_ERROR) return False @@ -567,7 +567,7 @@ class Identity: self.prv = X25519PrivateKey.from_private_bytes(self.prv_bytes) self.sig_prv_bytes = prv_bytes[Identity.KEYSIZE//8//2:] self.sig_prv = Ed25519PrivateKey.from_private_bytes(self.sig_prv_bytes) - + self.pub = self.prv.public_key() self.pub_bytes = self.pub.public_bytes() @@ -640,7 +640,7 @@ class Identity: target_public_key = self.pub shared_key = ephemeral_key.exchange(target_public_key) - + derived_key = RNS.Cryptography.hkdf( length=32, derive_from=shared_key, @@ -690,9 +690,9 @@ class Identity: plaintext = fernet.decrypt(ciphertext) if ratchet_id_receiver: ratchet_id_receiver.latest_ratchet_id = ratchet_id - + break - + except Exception as e: pass @@ -720,7 +720,7 @@ class Identity: RNS.log(f"Decryption by {RNS.prettyhexrep(self.hash)} failed: {e}", RNS.LOG_DEBUG) if ratchet_id_receiver: ratchet_id_receiver.latest_ratchet_id = None - + return plaintext; else: RNS.log("Decryption failed because the token size was invalid.", RNS.LOG_DEBUG) @@ -739,7 +739,7 @@ class Identity: """ if self.sig_prv != None: try: - return self.sig_prv.sign(message) + return self.sig_prv.sign(message) except Exception as e: RNS.log(f"The identity {self} could not sign the requested message. The contained exception was: {e}", RNS.LOG_ERROR) raise e @@ -770,7 +770,7 @@ class Identity: proof_data = signature else: proof_data = packet.packet_hash + signature - + if destination == None: destination = packet.generate_proof_destination() diff --git a/RNS/Interfaces/AX25KISSInterface.py b/RNS/Interfaces/AX25KISSInterface.py index cf99a0b..3f4bba1 100644 --- a/RNS/Interfaces/AX25KISSInterface.py +++ b/RNS/Interfaces/AX25KISSInterface.py @@ -80,7 +80,7 @@ class AX25KISSInterface(Interface): super().__init__() self.HW_MTU = 564 - + self.pyserial = serial self.serial = None self.owner = owner @@ -343,7 +343,7 @@ class AX25KISSInterface(Interface): self.online = False RNS.log(f"A serial port error occurred, the contained exception was: {e}", RNS.LOG_ERROR) RNS.log(f"The interface {self} experienced an unrecoverable error and is now offline.", RNS.LOG_ERROR) - + if RNS.Reticulum.panic_on_interface_error: RNS.panic() diff --git a/RNS/Interfaces/Android/KISSInterface.py b/RNS/Interfaces/Android/KISSInterface.py index 4c3c8cb..c732dc9 100644 --- a/RNS/Interfaces/Android/KISSInterface.py +++ b/RNS/Interfaces/Android/KISSInterface.py @@ -74,7 +74,7 @@ class KISSInterface(Interface): from usbserial4a import serial4a as serial self.parity = "N" - + else: RNS.log("Could not load USB serial module for Android, KISS interface cannot be created.", RNS.LOG_CRITICAL) RNS.log("You can install this module by issuing: pip install usbserial4a", RNS.LOG_CRITICAL) @@ -83,9 +83,9 @@ class KISSInterface(Interface): raise SystemError("Android-specific interface was used on non-Android OS") super().__init__() - + self.HW_MTU = 564 - + if beacon_data == None: beacon_data = "" @@ -172,7 +172,7 @@ class KISSInterface(Interface): self.serial.timeout = 0.1 elif vid == 0x10C4: # Hardware parameters for SiLabs CP210x @ 115200 baud - self.serial.DEFAULT_READ_BUFFER_SIZE = 64 + self.serial.DEFAULT_READ_BUFFER_SIZE = 64 self.serial.USB_READ_TIMEOUT_MILLIS = 12 self.serial.timeout = 0.012 elif vid == 0x1A86 and pid == 0x55D4: @@ -352,7 +352,7 @@ class KISSInterface(Interface): data_buffer = data_buffer+bytes([byte]) elif (command == KISS.CMD_READY): self.process_queue() - + if got == 0: time_since_last = int(time.time()*1000) - last_read_ms if len(data_buffer) > 0 and time_since_last > self.timeout: @@ -379,7 +379,7 @@ class KISSInterface(Interface): self.online = False RNS.log(f"A serial port error occurred, the contained exception was: {e}", RNS.LOG_ERROR) RNS.log(f"The interface {self} experienced an unrecoverable error and is now offline.", RNS.LOG_ERROR) - + if RNS.Reticulum.panic_on_interface_error: RNS.panic() diff --git a/RNS/Interfaces/Android/RNodeInterface.py b/RNS/Interfaces/Android/RNodeInterface.py index d600bd5..4a1aa35 100644 --- a/RNS/Interfaces/Android/RNodeInterface.py +++ b/RNS/Interfaces/Android/RNodeInterface.py @@ -42,7 +42,7 @@ class KISS(): FESC = 0xDB TFEND = 0xDC TFESC = 0xDD - + CMD_UNKNOWN = 0xFE CMD_DATA = 0x00 CMD_FREQUENCY = 0x01 @@ -78,11 +78,11 @@ class KISS(): DETECT_REQ = 0x73 DETECT_RESP = 0x46 - + RADIO_STATE_OFF = 0x00 RADIO_STATE_ON = 0x01 RADIO_STATE_ASK = 0xFF - + CMD_ERROR = 0x90 ERROR_INITRADIO = 0x01 ERROR_TXFAILED = 0x02 @@ -194,7 +194,7 @@ class AndroidBluetoothManager(): if self.rfcomm_reader != None: self.rfcomm_reader.close() self.rfcomm_reader = None - + if self.rfcomm_writer != None: self.rfcomm_writer.close() self.rfcomm_writer = None @@ -371,7 +371,7 @@ class RNodeInterface(Interface): else: self.bt_manager = None - + else: RNS.log("Could not load USB serial module for Android, RNode interface cannot be created.", RNS.LOG_CRITICAL) RNS.log("You can install this module by issuing: pip install usbserial4a", RNS.LOG_CRITICAL) @@ -382,7 +382,7 @@ class RNodeInterface(Interface): super().__init__() self.HW_MTU = 508 - + self.pyserial = serial self.serial = None self.owner = owner @@ -561,7 +561,7 @@ class RNodeInterface(Interface): # self.ble = BLEConnection(owner=self, target_name=self.ble_name, target_bt_addr=self.ble_addr) # self.serial = self.ble # RNS.log(f"New connection instance: "+str(self.ble)) - + def open_port(self): if not self.use_ble: if self.port != None: @@ -602,7 +602,7 @@ class RNodeInterface(Interface): self.serial.timeout = 0.1 elif vid == 0x10C4: # Hardware parameters for SiLabs CP210x @ 115200 baud - self.serial.DEFAULT_READ_BUFFER_SIZE = 64 + self.serial.DEFAULT_READ_BUFFER_SIZE = 64 self.serial.USB_READ_TIMEOUT_MILLIS = 12 self.serial.timeout = 0.012 elif vid == 0x1A86 and pid == 0x55D4: @@ -687,14 +687,14 @@ class RNodeInterface(Interface): RNS.log(f"After configuring {self}, the reported radio parameters did not match your configuration.", RNS.LOG_ERROR) RNS.log("Make sure that your hardware actually supports the parameters specified in the configuration", RNS.LOG_ERROR) RNS.log("Aborting RNode startup", RNS.LOG_ERROR) - + if self.serial != None: self.serial.close() if self.bt_manager != None: self.bt_manager.close() raise OSError("RNode interface did not pass configuration validation") - + def initRadio(self): self.setFrequency() @@ -702,22 +702,22 @@ class RNodeInterface(Interface): self.setBandwidth() time.sleep(0.15) - + self.setTXPower() time.sleep(0.15) - + self.setSpreadingFactor() time.sleep(0.15) - + self.setCodingRate() time.sleep(0.15) self.setSTALock() time.sleep(0.15) - + self.setLTALock() time.sleep(0.15) - + self.setRadioState(KISS.RADIO_STATE_ON) time.sleep(0.15) @@ -735,7 +735,7 @@ class RNodeInterface(Interface): written = self.write_mux(kiss_command) if written != len(kiss_command): raise OSError("An IO error occurred while sending host left command to device") - + def enable_bluetooth(self): kiss_command = bytes([KISS.FEND, KISS.CMD_BT_CTRL, 0x01, KISS.FEND]) written = self.write_mux(kiss_command) @@ -788,7 +788,7 @@ class RNodeInterface(Interface): data = line_byte+line_data escaped_data = KISS.escape(data) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_FB_WRITE])+escaped_data+bytes([KISS.FEND]) - + written = self.write_mux(kiss_command) if written != len(kiss_command): raise OSError("An IO error occurred while writing framebuffer data device") @@ -883,7 +883,7 @@ class RNodeInterface(Interface): if (self.maj_version >= RNodeInterface.REQUIRED_FW_VER_MAJ): if (self.min_version >= RNodeInterface.REQUIRED_FW_VER_MIN): self.firmware_ok = True - + if self.firmware_ok: return @@ -1188,7 +1188,7 @@ class RNodeInterface(Interface): atl = command_buffer[2] << 8 | command_buffer[3] cus = command_buffer[4] << 8 | command_buffer[5] cul = command_buffer[6] << 8 | command_buffer[7] - + self.r_airtime_short = ats/100.0 self.r_airtime_long = atl/100.0 self.r_channel_load_short = cus/100.0 @@ -1289,10 +1289,10 @@ class RNodeInterface(Interface): if time.time() > self.first_tx + self.id_interval: RNS.log(f"Interface {self} is transmitting beacon data: {self.id_callsign.decode('utf-8')}", RNS.LOG_DEBUG) self.processOutgoing(self.id_callsign) - + if (time.time() - self.last_port_io > self.port_io_timeout): self.detect() - + if (time.time() - self.last_port_io > self.port_io_timeout*3): raise OSError(f"Connected port for {self} became unresponsive") @@ -1343,7 +1343,7 @@ class RNodeInterface(Interface): if self.last_imagedata != None: self.display_image(self.last_imagedata) self.enable_external_framebuffer() - + elif hasattr(self, "bt_manager") and self.bt_manager != None and self.bt_manager.connected: self.configure_device() if self.online: @@ -1504,7 +1504,7 @@ class BLEConnection(BluetoothDispatcher): self.write_characteristic(self.rx_char, data) else: time.sleep(0.1) - + except Exception as e: RNS.log("An error occurred in {self} write loop: {e}", RNS.LOG_ERROR) RNS.trace_exception(e) @@ -1552,7 +1552,7 @@ class BLEConnection(BluetoothDispatcher): self.owner.hw_errors.append({"error": KISS.ERROR_INVALID_BLE_MTU, "description": "The Bluetooth Low Energy transfer MTU could not be configured for the connected device, and communication has failed. Restart Reticulum and any connected applications to retry connecting."}) self.close() self.should_run = False - + self.close_gatt() self.connect_job_running = False @@ -1599,14 +1599,14 @@ class BLEConnection(BluetoothDispatcher): def on_services(self, status, services): if status == GATT_SUCCESS: self.rx_char = services.search(BLEConnection.UART_RX_CHAR_UUID) - + if self.rx_char is not None: self.tx_char = services.search(BLEConnection.UART_TX_CHAR_UUID) - if self.tx_char is not None: + if self.tx_char is not None: if self.enable_notifications(self.tx_char): RNS.log("Enabled notifications for BLE TX characteristic", RNS.LOG_DEBUG) - + RNS.log(f"Requesting BLE connection MTU update to {self.target_mtu}", RNS.LOG_DEBUG) self.mtu_requested_time = time.time() self.request_mtu(self.target_mtu) diff --git a/RNS/Interfaces/Android/SerialInterface.py b/RNS/Interfaces/Android/SerialInterface.py index db7d47a..4b62b28 100644 --- a/RNS/Interfaces/Android/SerialInterface.py +++ b/RNS/Interfaces/Android/SerialInterface.py @@ -64,7 +64,7 @@ class SerialInterface(Interface): from usbserial4a import serial4a as serial self.parity = "N" - + else: RNS.log("Could not load USB serial module for Android, Serial interface cannot be created.", RNS.LOG_CRITICAL) RNS.log("You can install this module by issuing: pip install usbserial4a", RNS.LOG_CRITICAL) @@ -75,7 +75,7 @@ class SerialInterface(Interface): super().__init__() self.HW_MTU = 564 - + self.pyserial = serial self.serial = None self.owner = owner @@ -145,7 +145,7 @@ class SerialInterface(Interface): self.serial.timeout = 0.1 elif vid == 0x10C4: # Hardware parameters for SiLabs CP210x @ 115200 baud - self.serial.DEFAULT_READ_BUFFER_SIZE = 64 + self.serial.DEFAULT_READ_BUFFER_SIZE = 64 self.serial.USB_READ_TIMEOUT_MILLIS = 12 self.serial.timeout = 0.012 elif vid == 0x1A86 and pid == 0x55D4: @@ -182,7 +182,7 @@ class SerialInterface(Interface): if self.online: data = bytes([HDLC.FLAG])+HDLC.escape(data)+bytes([HDLC.FLAG]) written = self.serial.write(data) - self.txb += len(data) + self.txb += len(data) if written != len(data): raise OSError(f"Serial interface only wrote {written} bytes of {len(data)}") @@ -217,7 +217,7 @@ class SerialInterface(Interface): byte = HDLC.ESC escape = False data_buffer = data_buffer+bytes([byte]) - + if got == 0: time_since_last = int(time.time()*1000) - last_read_ms if len(data_buffer) > 0 and time_since_last > self.timeout: @@ -225,12 +225,12 @@ class SerialInterface(Interface): in_frame = False escape = False # sleep(0.08) - + except Exception as e: self.online = False RNS.log(f"A serial port error occurred, the contained exception was: {e}", RNS.LOG_ERROR) RNS.log(f"The interface {self} experienced an unrecoverable error and is now offline.", RNS.LOG_ERROR) - + if RNS.Reticulum.panic_on_interface_error: RNS.panic() diff --git a/RNS/Interfaces/AutoInterface.py b/RNS/Interfaces/AutoInterface.py index 06ed77c..7737415 100644 --- a/RNS/Interfaces/AutoInterface.py +++ b/RNS/Interfaces/AutoInterface.py @@ -289,7 +289,7 @@ class AutoInterface(Interface): udp_server = socketserver.UDPServer(address, self.handler_factory(self.processIncoming)) self.interface_servers[ifname] = udp_server - + thread = threading.Thread(target=udp_server.serve_forever) thread.daemon = True thread.start() @@ -306,11 +306,11 @@ class AutoInterface(Interface): def discovery_handler(self, socket, ifname): def announce_loop(): self.announce_handler(ifname) - + thread = threading.Thread(target=announce_loop) thread.daemon = True thread.start() - + while True: data, ipv6_src = socket.recvfrom(1024) expected_hash = RNS.Identity.full_hash(self.group_id+ipv6_src[0].encode("utf-8")) @@ -396,13 +396,13 @@ class AutoInterface(Interface): self.carrier_changed = True RNS.log(f"{self} Carrier recovered on {ifname}", RNS.LOG_WARNING) self.timed_out_interfaces[ifname] = False - + def announce_handler(self, ifname): while True: self.peer_announce(ifname) time.sleep(self.announce_interval) - + def peer_announce(self, ifname): try: link_local_address = self.adopted_interfaces[ifname] @@ -414,7 +414,7 @@ class AutoInterface(Interface): announce_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, ifis) announce_socket.sendto(discovery_token, addr_info[0][4]) announce_socket.close() - + except Exception as e: if (ifname in self.timed_out_interfaces and self.timed_out_interfaces[ifname] == False) or not ifname in self.timed_out_interfaces: RNS.log(f"{self} Detected possible carrier loss on {ifname}: {e}", RNS.LOG_WARNING) @@ -471,9 +471,9 @@ class AutoInterface(Interface): except Exception as e: RNS.log(f"Could not transmit on {self}. The contained exception was: {e}", RNS.LOG_ERROR) - + self.txb += len(data) - + # Until per-device sub-interfacing is implemented, # ingress limiting should be disabled on AutoInterface diff --git a/RNS/Interfaces/I2PInterface.py b/RNS/Interfaces/I2PInterface.py index 7448bfa..626ba11 100644 --- a/RNS/Interfaces/I2PInterface.py +++ b/RNS/Interfaces/I2PInterface.py @@ -143,11 +143,11 @@ class I2PController: self.loop.ext_owner = self result = asyncio.run_coroutine_threadsafe(tunnel_up(), self.loop).result() - + if not i2p_destination in self.i2plib_tunnels: raise OSError("No tunnel control instance was created") - else: + else: tn = self.i2plib_tunnels[i2p_destination] if tn != None and hasattr(tn, "status"): @@ -183,7 +183,7 @@ class I2PController: except ConnectionRefusedError as e: raise e - + except ConnectionAbortedError as e: raise e @@ -222,13 +222,13 @@ class I2PController: elif isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.KeyNotFound): RNS.log(f"The I2P daemon could not find the key for {i2p_destination}", RNS.LOG_ERROR) - + elif isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.PeerNotFound): RNS.log(f"The I2P daemon mould not find the peer {i2p_destination}", RNS.LOG_ERROR) - + elif isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.I2PError): RNS.log("The I2P daemon experienced an unspecified error", RNS.LOG_ERROR) - + elif isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.Timeout): RNS.log(f"I2P daemon timed out while setting up client tunnel to {i2p_destination}", RNS.LOG_ERROR) @@ -320,7 +320,7 @@ class I2PController: elif i2p_exception != None: RNS.log("An error ocurred while setting up I2P tunnel", RNS.LOG_ERROR) - + if isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.CantReachPeer): RNS.log(f"The I2P daemon can't reach peer {i2p_destination}", RNS.LOG_ERROR) @@ -338,13 +338,13 @@ class I2PController: elif isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.KeyNotFound): RNS.log(f"The I2P daemon could not find the key for {i2p_destination}", RNS.LOG_ERROR) - + elif isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.PeerNotFound): RNS.log(f"The I2P daemon mould not find the peer {i2p_destination}", RNS.LOG_ERROR) - + elif isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.I2PError): RNS.log("The I2P daemon experienced an unspecified error", RNS.LOG_ERROR) - + elif isinstance(i2p_exception, RNS.vendor.i2plib.exceptions.Timeout): RNS.log(f"I2P daemon timed out while setting up client tunnel to {i2p_destination}", RNS.LOG_ERROR) @@ -393,7 +393,7 @@ class I2PInterfacePeer(Interface): super().__init__() self.HW_MTU = 1064 - + self.IN = True self.OUT = False self.socket = None @@ -492,7 +492,7 @@ class I2PInterfacePeer(Interface): while self.awaiting_i2p_tunnel: time.sleep(0.25) time.sleep(2) - + if not self.kiss_framing: self.wants_tunnel = True @@ -525,7 +525,7 @@ class I2PInterfacePeer(Interface): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self.socket.setsockopt(socket.IPPROTO_TCP, TCP_KEEPIDLE, int(I2PInterfacePeer.I2P_PROBE_AFTER)) - + def shutdown_socket(self, target_socket): if callable(target_socket.close): try: @@ -538,15 +538,15 @@ class I2PInterfacePeer(Interface): if socket != None: target_socket.close() except Exception as e: - RNS.log(f"Error while closing socket for {self}: {e}") - + RNS.log(f"Error while closing socket for {self}: {e}") + def detach(self): RNS.log(f"Detaching {self}", RNS.LOG_DEBUG) if self.socket != None: if hasattr(self.socket, "close"): if callable(self.socket.close): self.detached = True - + try: self.socket.shutdown(socket.SHUT_RDWR) except Exception as e: @@ -564,7 +564,7 @@ class I2PInterfacePeer(Interface): self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((self.target_ip, self.target_port)) self.online = True - + except Exception as e: if initial: if not self.awaiting_i2p_tunnel: @@ -572,7 +572,7 @@ class I2PInterfacePeer(Interface): RNS.log(f"Leaving unconnected and retrying connection in {I2PInterfacePeer.RECONNECT_WAIT} seconds.", RNS.LOG_ERROR) return False - + else: raise e @@ -580,7 +580,7 @@ class I2PInterfacePeer(Interface): self.set_timeouts_linux() elif platform.system() == "Darwin": self.set_timeouts_osx() - + self.online = True self.writing = False self.never_connected = False @@ -631,7 +631,7 @@ class I2PInterfacePeer(Interface): self.rxb += len(data) if hasattr(self, "parent_interface") and self.parent_interface != None and self.parent_count: self.parent_interface.rxb += len(data) - + self.owner.inbound(data, self) def processOutgoing(self, data): @@ -651,7 +651,7 @@ class I2PInterfacePeer(Interface): self.writing = False self.txb += len(data) self.last_write = time.time() - + if hasattr(self, "parent_interface") and self.parent_interface != None and self.parent_count: self.parent_interface.txb += len(data) @@ -686,7 +686,7 @@ class I2PInterfacePeer(Interface): RNS.log(f"An error ocurred while sending I2P keepalive. The contained exception was: {e}", RNS.LOG_ERROR) self.shutdown_socket(self.socket) should_run = False - + if (time.time()-self.last_read > I2PInterfacePeer.I2P_READ_TIMEOUT): RNS.log("I2P socket is unresponsive, restarting...", RNS.LOG_WARNING) if self.socket != None: @@ -790,7 +790,7 @@ class I2PInterfacePeer(Interface): break - + except Exception as e: self.online = False RNS.log(f"An interface error occurred for {self}, the contained exception was: {e}", RNS.LOG_WARNING) @@ -832,7 +832,7 @@ class I2PInterface(Interface): def __init__(self, owner, name, rns_storagepath, peers, connectable = False, ifac_size = 16, ifac_netname = None, ifac_netkey = None): super().__init__() - + self.HW_MTU = 1064 self.online = False @@ -882,7 +882,7 @@ class I2PInterface(Interface): def createHandler(*args, **keys): return I2PInterfaceHandler(callback, *args, **keys) return createHandler - + ThreadingI2PServer.allow_reuse_address = True self.server = ThreadingI2PServer(self.address, handlerFactory(self.incoming_connection)) diff --git a/RNS/Interfaces/Interface.py b/RNS/Interfaces/Interface.py index e72e9c8..01d5c5f 100755 --- a/RNS/Interfaces/Interface.py +++ b/RNS/Interfaces/Interface.py @@ -146,7 +146,7 @@ class Interface: def release(): RNS.Transport.inbound(selected_announce_packet.raw, selected_announce_packet.receiving_interface) threading.Thread(target=release, daemon=True).start() - + except Exception as e: RNS.log(f"An error occurred while processing held announces for {self}", RNS.LOG_ERROR) RNS.log(f"The contained exception was: {e}", RNS.LOG_ERROR) @@ -170,7 +170,7 @@ class Interface: for i in range(1,dq_len): delta_sum += self.ia_freq_deque[i]-self.ia_freq_deque[i-1] delta_sum += time.time() - self.ia_freq_deque[dq_len-1] - + if delta_sum == 0: avg = 0 else: @@ -187,7 +187,7 @@ class Interface: for i in range(1,dq_len): delta_sum += self.oa_freq_deque[i]-self.oa_freq_deque[i-1] delta_sum += time.time() - self.oa_freq_deque[dq_len-1] - + if delta_sum == 0: avg = 0 else: diff --git a/RNS/Interfaces/KISSInterface.py b/RNS/Interfaces/KISSInterface.py index 13c9197..a37e0bb 100644 --- a/RNS/Interfaces/KISSInterface.py +++ b/RNS/Interfaces/KISSInterface.py @@ -71,9 +71,9 @@ class KISSInterface(Interface): RNS.panic() super().__init__() - + self.HW_MTU = 564 - + if beacon_data == None: beacon_data = "" @@ -218,7 +218,7 @@ class KISSInterface(Interface): def processIncoming(self, data): - self.rxb += len(data) + self.rxb += len(data) self.owner.inbound(data, self) @@ -325,7 +325,7 @@ class KISSInterface(Interface): self.online = False RNS.log(f"A serial port error occurred, the contained exception was: {e}", RNS.LOG_ERROR) RNS.log(f"The interface {self} experienced an unrecoverable error and is now offline.", RNS.LOG_ERROR) - + if RNS.Reticulum.panic_on_interface_error: RNS.panic() diff --git a/RNS/Interfaces/LocalInterface.py b/RNS/Interfaces/LocalInterface.py index 1fedf00..3c197be 100644 --- a/RNS/Interfaces/LocalInterface.py +++ b/RNS/Interfaces/LocalInterface.py @@ -58,11 +58,11 @@ class LocalClientInterface(Interface): # TODO: Remove at some point # self.rxptime = 0 - + self.HW_MTU = 1064 self.online = False - + self.IN = True self.OUT = False self.socket = None @@ -146,7 +146,7 @@ class LocalClientInterface(Interface): time.sleep(LocalClientInterface.RECONNECT_WAIT+2) RNS.Transport.shared_connection_reappeared() threading.Thread(target=job, daemon=True).start() - + else: RNS.log("Attempt to reconnect on a non-initiator shared local interface. This should not happen.", RNS.LOG_ERROR) raise OSError("Attempt to reconnect on a non-initiator local interface") @@ -156,10 +156,10 @@ class LocalClientInterface(Interface): self.rxb += len(data) if hasattr(self, "parent_interface") and self.parent_interface != None: self.parent_interface.rxb += len(data) - + # TODO: Remove at some point # processing_start = time.time() - + self.owner.inbound(data, self) # TODO: Remove at some point @@ -234,7 +234,7 @@ class LocalClientInterface(Interface): break - + except Exception as e: self.online = False RNS.log(f"An interface error occurred, the contained exception was: {e}", RNS.LOG_ERROR) @@ -247,7 +247,7 @@ class LocalClientInterface(Interface): if callable(self.socket.close): RNS.log(f"Detaching {self}", RNS.LOG_DEBUG) self.detached = True - + try: self.socket.shutdown(socket.SHUT_RDWR) except Exception as e: @@ -283,7 +283,7 @@ class LocalClientInterface(Interface): if self.is_connected_to_shared_instance: if nowarning == False: RNS.log("Permanently lost connection to local shared RNS instance. Exiting now.", RNS.LOG_CRITICAL) - + RNS.exit() @@ -297,7 +297,7 @@ class LocalServerInterface(Interface): super().__init__() self.online = False self.clients = 0 - + self.IN = True self.OUT = False self.name = "Reticulum" diff --git a/RNS/Interfaces/PipeInterface.py b/RNS/Interfaces/PipeInterface.py index 87c67f2..5004a74 100644 --- a/RNS/Interfaces/PipeInterface.py +++ b/RNS/Interfaces/PipeInterface.py @@ -49,7 +49,7 @@ class PipeInterface(Interface): owner = None command = None - + def __init__(self, owner, name, command, respawn_delay): if respawn_delay == None: respawn_delay = 5 @@ -57,7 +57,7 @@ class PipeInterface(Interface): super().__init__() self.HW_MTU = 1064 - + self.owner = owner self.name = name self.command = command @@ -83,7 +83,7 @@ class PipeInterface(Interface): def open_pipe(self): RNS.log(f"Connecting subprocess pipe for {self}...", RNS.LOG_VERBOSE) - + try: self.process = subprocess.Popen(shlex.split(self.command), stdin=subprocess.PIPE, stdout=subprocess.PIPE) self.pipe_is_open = True @@ -102,7 +102,7 @@ class PipeInterface(Interface): def processIncoming(self, data): - self.rxb += len(data) + self.rxb += len(data) self.owner.inbound(data, self) @@ -111,7 +111,7 @@ class PipeInterface(Interface): data = bytes([HDLC.FLAG])+HDLC.escape(data)+bytes([HDLC.FLAG]) written = self.process.stdin.write(data) self.process.stdin.flush() - self.txb += len(data) + self.txb += len(data) if written != len(data): raise OSError(f"Pipe interface only wrote {written} bytes of {len(data)}") @@ -152,7 +152,7 @@ class PipeInterface(Interface): RNS.log(f"Subprocess terminated on {self}") self.process.kill() - + except Exception as e: self.online = False try: @@ -162,7 +162,7 @@ class PipeInterface(Interface): RNS.log(f"A pipe error occurred, the contained exception was: {e}", RNS.LOG_ERROR) RNS.log(f"The interface {self} experienced an unrecoverable error and is now offline.", RNS.LOG_ERROR) - + if RNS.Reticulum.panic_on_interface_error: RNS.panic() diff --git a/RNS/Interfaces/RNodeInterface.py b/RNS/Interfaces/RNodeInterface.py index 9d955d5..83b241c 100644 --- a/RNS/Interfaces/RNodeInterface.py +++ b/RNS/Interfaces/RNodeInterface.py @@ -33,7 +33,7 @@ class KISS(): FESC = 0xDB TFEND = 0xDC TFESC = 0xDD - + CMD_UNKNOWN = 0xFE CMD_DATA = 0x00 CMD_FREQUENCY = 0x01 @@ -69,11 +69,11 @@ class KISS(): DETECT_REQ = 0x73 DETECT_RESP = 0x46 - + RADIO_STATE_OFF = 0x00 RADIO_STATE_ON = 0x01 RADIO_STATE_ASK = 0xFF - + CMD_ERROR = 0x90 ERROR_INITRADIO = 0x01 ERROR_TXFAILED = 0x02 @@ -91,7 +91,7 @@ class KISS(): data = data.replace(bytes([0xdb]), bytes([0xdb, 0xdd])) data = data.replace(bytes([0xc0]), bytes([0xdb, 0xdc])) return data - + class RNodeInterface(Interface): MAX_CHUNK = 32768 @@ -132,7 +132,7 @@ class RNodeInterface(Interface): super().__init__() self.HW_MTU = 508 - + self.pyserial = serial self.serial = None self.owner = owner @@ -287,7 +287,7 @@ class RNodeInterface(Interface): write_timeout = None, dsrdtr = False, ) - + else: RNS.log(f"Opening BLE connection for {self}...") if self.ble == None: @@ -325,7 +325,7 @@ class RNodeInterface(Interface): detect_time = RNS.prettytime(time.time()-detect_time) else: RNS.log(f"RNode detect timed out over {self.port}", RNS.LOG_ERROR) - + if not self.detected: RNS.log(f"Could not detect device for {self}", RNS.LOG_ERROR) self.serial.close() @@ -346,7 +346,7 @@ class RNodeInterface(Interface): RNS.log("Make sure that your hardware actually supports the parameters specified in the configuration", RNS.LOG_ERROR) RNS.log("Aborting RNode startup", RNS.LOG_ERROR) self.serial.close() - + def initRadio(self): self.setFrequency() @@ -366,13 +366,13 @@ class RNodeInterface(Interface): written = self.serial.write(kiss_command) if written != len(kiss_command): raise OSError(f"An IO error occurred while detecting hardware for {self}") - + def leave(self): kiss_command = bytes([KISS.FEND, KISS.CMD_LEAVE, 0xFF, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise OSError("An IO error occurred while sending host left command to device") - + def enable_external_framebuffer(self): if self.display != None: kiss_command = bytes([KISS.FEND, KISS.CMD_FB_EXT, 0x01, KISS.FEND]) @@ -406,7 +406,7 @@ class RNodeInterface(Interface): data = line_byte+line_data escaped_data = KISS.escape(data) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_FB_WRITE])+escaped_data+bytes([KISS.FEND]) - + written = self.serial.write(kiss_command) if written != len(kiss_command): raise OSError("An IO error occurred while writing framebuffer data device") @@ -501,7 +501,7 @@ class RNodeInterface(Interface): if (self.maj_version >= RNodeInterface.REQUIRED_FW_VER_MAJ): if (self.min_version >= RNodeInterface.REQUIRED_FW_VER_MIN): self.firmware_ok = True - + if self.firmware_ok: return @@ -784,7 +784,7 @@ class RNodeInterface(Interface): atl = command_buffer[2] << 8 | command_buffer[3] cus = command_buffer[4] << 8 | command_buffer[5] cul = command_buffer[6] << 8 | command_buffer[7] - + self.r_airtime_short = ats/100.0 self.r_airtime_long = atl/100.0 self.r_channel_load_short = cus/100.0 @@ -870,7 +870,7 @@ class RNodeInterface(Interface): self.detected = True else: self.detected = False - + else: time_since_last = int(time.time()*1000) - last_read_ms if len(data_buffer) > 0 and time_since_last > self.timeout: @@ -928,7 +928,7 @@ class RNodeInterface(Interface): self.disable_external_framebuffer() self.setRadioState(KISS.RADIO_STATE_OFF) self.leave() - + if self.use_ble: self.ble.close() @@ -1021,7 +1021,7 @@ class BLEConnection(): if importlib.util.find_spec("bleak") != None: import bleak BLEConnection.bleak = bleak - + import asyncio BLEConnection.asyncio = asyncio else: @@ -1100,7 +1100,7 @@ class BLEConnection(): else: if self.target_bt_addr != None and device.address == self.target_bt_addr: RNS.log(f"Can't connect to target device {self.target_bt_addr} over BLE, device is not bonded", RNS.LOG_ERROR) - + elif self.target_name != None and device.name == self.target_name: RNS.log(f"Can't connect to target device {self.target_name} over BLE, device is not bonded", RNS.LOG_ERROR) @@ -1115,7 +1115,7 @@ class BLEConnection(): if "props" in device.details and "Bonded" in device.details["props"]: if device.details["props"]["Bonded"] == True: return True - + except Exception as e: RNS.log(f"Error while determining device bond status for {device}, the contained exception was: {e}", RNS.LOG_ERROR) diff --git a/RNS/Interfaces/RNodeMultiInterface.py b/RNS/Interfaces/RNodeMultiInterface.py index 021bbb1..96f4084 100644 --- a/RNS/Interfaces/RNodeMultiInterface.py +++ b/RNS/Interfaces/RNodeMultiInterface.py @@ -33,7 +33,7 @@ class KISS(): FESC = 0xDB TFEND = 0xDC TFESC = 0xDD - + CMD_UNKNOWN = 0xFE CMD_FREQUENCY = 0x01 CMD_BANDWIDTH = 0x02 @@ -94,11 +94,11 @@ class KISS(): DETECT_REQ = 0x73 DETECT_RESP = 0x46 - + RADIO_STATE_OFF = 0x00 RADIO_STATE_ON = 0x01 RADIO_STATE_ASK = 0xFF - + CMD_ERROR = 0x90 ERROR_INITRADIO = 0x01 ERROR_TXFAILED = 0x02 @@ -159,7 +159,7 @@ class KISS(): data = data.replace(bytes([0xdb]), bytes([0xdb, 0xdd])) data = data.replace(bytes([0xc0]), bytes([0xdb, 0xdc])) return data - + class RNodeMultiInterface(Interface): MAX_CHUNK = 32768 @@ -188,7 +188,7 @@ class RNodeMultiInterface(Interface): super().__init__() self.HW_MTU = 508 - + self.clients = 0 self.pyserial = serial self.serial = None @@ -294,7 +294,7 @@ class RNodeMultiInterface(Interface): self.detect() sleep(0.2) - + if not self.detected: RNS.log(f"Could not detect device for {self}", RNS.LOG_ERROR) self.serial.close() @@ -327,7 +327,7 @@ class RNodeMultiInterface(Interface): interface.OUT = subint[10] interface.IN = True - + interface.announce_rate_target = self.announce_rate_target interface.mode = self.mode interface.HW_MTU = self.HW_MTU @@ -345,13 +345,13 @@ class RNodeMultiInterface(Interface): written = self.serial.write(kiss_command) if written != len(kiss_command): raise OSError(f"An IO error occurred while detecting hardware for {self}") - + def leave(self): kiss_command = bytes([KISS.FEND, KISS.CMD_LEAVE, 0xFF, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise OSError("An IO error occurred while sending host left command to device") - + def enable_external_framebuffer(self): if self.display != None: kiss_command = bytes([KISS.FEND, KISS.CMD_FB_EXT, 0x01, KISS.FEND]) @@ -385,7 +385,7 @@ class RNodeMultiInterface(Interface): data = line_byte+line_data escaped_data = KISS.escape(data) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_FB_WRITE])+escaped_data+bytes([KISS.FEND]) - + written = self.serial.write(kiss_command) if written != len(kiss_command): raise OSError("An IO error occurred while writing framebuffer data device") @@ -485,7 +485,7 @@ class RNodeMultiInterface(Interface): if (self.maj_version >= RNodeMultiInterface.REQUIRED_FW_VER_MAJ): if (self.min_version >= RNodeMultiInterface.REQUIRED_FW_VER_MIN): self.firmware_ok = True - + if self.firmware_ok: return @@ -737,7 +737,7 @@ class RNodeMultiInterface(Interface): atl = command_buffer[2] << 8 | command_buffer[3] cus = command_buffer[4] << 8 | command_buffer[5] cul = command_buffer[6] << 8 | command_buffer[7] - + self.r_airtime_short = ats/100.0 self.r_airtime_long = atl/100.0 self.r_channel_load_short = cus/100.0 @@ -804,7 +804,7 @@ class RNodeMultiInterface(Interface): # add the interface to the back of the list, they're all given from vport 0 and up in order self.subinterface_types.append(KISS.interface_type_to_str(command_buffer[1])) command_buffer = b"" - + else: time_since_last = int(time.time()*1000) - last_read_ms if len(data_buffer) > 0 and time_since_last > self.timeout: @@ -918,7 +918,7 @@ class RNodeSubInterface(Interface): RNS.panic() super().__init__() - + if index == 0: sel_cmd = KISS.CMD_SEL_INT0 data_cmd= KISS.CMD_INT0_DATA @@ -1079,7 +1079,7 @@ class RNodeSubInterface(Interface): RNS.log(f"After configuring {self}, the reported radio parameters did not match your configuration.", RNS.LOG_ERROR) RNS.log("Make sure that your hardware actually supports the parameters specified in the configuration", RNS.LOG_ERROR) RNS.log("Aborting RNode startup", RNS.LOG_ERROR) - + def initRadio(self): self.parent_interface.setFrequency(self.frequency, self) diff --git a/RNS/Interfaces/SerialInterface.py b/RNS/Interfaces/SerialInterface.py index e69b347..63ee725 100755 --- a/RNS/Interfaces/SerialInterface.py +++ b/RNS/Interfaces/SerialInterface.py @@ -63,7 +63,7 @@ class SerialInterface(Interface): super().__init__() self.HW_MTU = 564 - + self.pyserial = serial self.serial = None self.owner = owner @@ -122,7 +122,7 @@ class SerialInterface(Interface): def processIncoming(self, data): - self.rxb += len(data) + self.rxb += len(data) self.owner.inbound(data, self) @@ -130,7 +130,7 @@ class SerialInterface(Interface): if self.online: data = bytes([HDLC.FLAG])+HDLC.escape(data)+bytes([HDLC.FLAG]) written = self.serial.write(data) - self.txb += len(data) + self.txb += len(data) if written != len(data): raise OSError(f"Serial interface only wrote {written} bytes of {len(data)}") @@ -164,7 +164,7 @@ class SerialInterface(Interface): byte = HDLC.ESC escape = False data_buffer = data_buffer+bytes([byte]) - + else: time_since_last = int(time.time()*1000) - last_read_ms if len(data_buffer) > 0 and time_since_last > self.timeout: @@ -172,12 +172,12 @@ class SerialInterface(Interface): in_frame = False escape = False sleep(0.08) - + except Exception as e: self.online = False RNS.log(f"A serial port error occurred, the contained exception was: {e}", RNS.LOG_ERROR) RNS.log(f"The interface {self} experienced an unrecoverable error and is now offline.", RNS.LOG_ERROR) - + if RNS.Reticulum.panic_on_interface_error: RNS.panic() diff --git a/RNS/Interfaces/TCPInterface.py b/RNS/Interfaces/TCPInterface.py index fbdb3f3..306f35d 100644 --- a/RNS/Interfaces/TCPInterface.py +++ b/RNS/Interfaces/TCPInterface.py @@ -80,9 +80,9 @@ class TCPClientInterface(Interface): def __init__(self, owner, name, target_ip=None, target_port=None, connected_socket=None, max_reconnect_tries=None, kiss_framing=False, i2p_tunneled = False, connect_timeout = None): super().__init__() - + self.HW_MTU = 1064 - + self.IN = True self.OUT = False self.socket = None @@ -99,7 +99,7 @@ class TCPClientInterface(Interface): self.i2p_tunneled = i2p_tunneled self.mode = RNS.Interfaces.Interface.Interface.MODE_FULL self.bitrate = TCPClientInterface.BITRATE_GUESS - + if max_reconnect_tries == None: self.max_reconnect_tries = TCPClientInterface.RECONNECT_MAX_TRIES else: @@ -128,14 +128,14 @@ class TCPClientInterface(Interface): self.connect_timeout = connect_timeout else: self.connect_timeout = TCPClientInterface.INITIAL_CONNECT_TIMEOUT - + if TCPClientInterface.SYNCHRONOUS_START: self.initial_connect() else: thread = threading.Thread(target=self.initial_connect) thread.daemon = True thread.start() - + def initial_connect(self): if not self.connect(initial=True): thread = threading.Thread(target=self.reconnect) @@ -170,19 +170,19 @@ class TCPClientInterface(Interface): TCP_KEEPIDLE = 0x10 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - + if not self.i2p_tunneled: self.socket.setsockopt(socket.IPPROTO_TCP, TCP_KEEPIDLE, int(TCPClientInterface.TCP_PROBE_AFTER)) else: self.socket.setsockopt(socket.IPPROTO_TCP, TCP_KEEPIDLE, int(TCPClientInterface.I2P_PROBE_AFTER)) - + def detach(self): if self.socket != None: if hasattr(self.socket, "close"): if callable(self.socket.close): RNS.log(f"Detaching {self}", RNS.LOG_DEBUG) self.detached = True - + try: self.socket.shutdown(socket.SHUT_RDWR) except Exception as e: @@ -209,13 +209,13 @@ class TCPClientInterface(Interface): if initial: RNS.log(f"TCP connection for {self} established", RNS.LOG_DEBUG) - + except Exception as e: if initial: RNS.log(f"Initial connection for {self} could not be established: {e}", RNS.LOG_ERROR) RNS.log(f"Leaving unconnected and retrying connection in {TCPClientInterface.RECONNECT_WAIT} seconds.", RNS.LOG_ERROR) return False - + else: raise e @@ -223,7 +223,7 @@ class TCPClientInterface(Interface): self.set_timeouts_linux() elif platform.system() == "Darwin": self.set_timeouts_osx() - + self.online = True self.writing = False self.never_connected = False @@ -269,7 +269,7 @@ class TCPClientInterface(Interface): self.rxb += len(data) if hasattr(self, "parent_interface") and self.parent_interface != None: self.parent_interface.rxb += len(data) - + self.owner.inbound(data, self) def processOutgoing(self, data): @@ -369,7 +369,7 @@ class TCPClientInterface(Interface): break - + except Exception as e: self.online = False RNS.log(f"An interface error occurred for {self}, the contained exception was: {e}", RNS.LOG_WARNING) @@ -427,7 +427,7 @@ class TCPServerInterface(Interface): self.online = False self.clients = 0 - + self.IN = True self.OUT = False self.name = name @@ -474,7 +474,7 @@ class TCPServerInterface(Interface): spawned_interface.target_port = str(handler.client_address[1]) spawned_interface.parent_interface = self spawned_interface.bitrate = self.bitrate - + spawned_interface.ifac_size = self.ifac_size spawned_interface.ifac_netname = self.ifac_netname spawned_interface.ifac_netkey = self.ifac_netkey diff --git a/RNS/Interfaces/UDPInterface.py b/RNS/Interfaces/UDPInterface.py index d407a78..391a4ec 100644 --- a/RNS/Interfaces/UDPInterface.py +++ b/RNS/Interfaces/UDPInterface.py @@ -99,7 +99,7 @@ class UDPInterface(Interface): udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) udp_socket.sendto(data, (self.forward_ip, self.forward_port)) self.txb += len(data) - + except Exception as e: RNS.log(f"Could not transmit on {self}. The contained exception was: {e}", RNS.LOG_ERROR) diff --git a/RNS/Link.py b/RNS/Link.py index a74a444..8d9fbc0 100644 --- a/RNS/Link.py +++ b/RNS/Link.py @@ -124,7 +124,7 @@ class Link: RNS.Transport.register_link(link) link.last_inbound = time.time() link.start_watchdog() - + RNS.log(f"Incoming link request {link} accepted on {link.attached_interface}", RNS.LOG_DEBUG) return link @@ -189,7 +189,7 @@ class Link: self.sig_prv = Ed25519PrivateKey.generate() self.fernet = None - + self.pub = self.prv.public_key() self.pub_bytes = self.pub.public_bytes() @@ -288,7 +288,7 @@ class Link: self.establishment_cost += len(packet.raw) signed_data = self.link_id+self.peer_pub_bytes+self.peer_sig_pub_bytes signature = packet.data[:RNS.Identity.SIGLENGTH//8] - + if self.destination.identity.validate(signature, signed_data): if self.status != Link.HANDSHAKE: raise OSError(f"Invalid link state for proof validation: {self.status}") @@ -301,7 +301,7 @@ class Link: self.last_proof = self.activated_at RNS.Transport.activate_link(self) RNS.log(f"Link {self} established with {self.destination}, RTT is {round(self.rtt, 3)}s", RNS.LOG_VERBOSE) - + if self.rtt != None and self.establishment_cost != None and self.rtt > 0 and self.establishment_cost > 0: self.establishment_rate = self.establishment_cost/self.rtt @@ -316,7 +316,7 @@ class Link: thread.start() else: RNS.log(f"Invalid link proof signature received by {self}. Ignoring.", RNS.LOG_DEBUG) - + except Exception as e: self.status = Link.CLOSED RNS.log(f"An error ocurred while validating link request proof on {self}.", RNS.LOG_ERROR) @@ -377,7 +377,7 @@ class Link: timeout = timeout, request_size = len(packed_request), ) - + else: request_id = RNS.Identity.truncated_hash(packed_request) RNS.log(f"Sending request {RNS.prettyhexrep(request_id)} as resource.", RNS.LOG_DEBUG) @@ -547,7 +547,7 @@ class Link: resource.cancel() if self._channel: self._channel._shutdown() - + self.prv = None self.pub = None self.pub_bytes = None @@ -620,7 +620,7 @@ class Link: self.status = Link.STALE else: sleep_time = self.keepalive - + else: sleep_time = (last_inbound + self.keepalive) - time.time() @@ -655,7 +655,7 @@ class Link: self.snr = packet.snr if packet.q != None: self.q = packet.q - + def send_keepalive(self): keepalive_packet = RNS.Packet(self, bytes([0xFF]), context=RNS.Packet.KEEPALIVE) keepalive_packet.send() @@ -782,7 +782,7 @@ class Link: thread = threading.Thread(target=self.callbacks.packet, args=(plaintext, packet)) thread.daemon = True thread.start() - + if self.destination.proof_strategy == RNS.Destination.PROVE_ALL: packet.prove() should_query = True @@ -815,7 +815,7 @@ class Link: self.callbacks.remote_identified(self, self.__remote_identity) except Exception as e: RNS.log(f"Error while executing remote identified callback from {self}. The contained exception was: {e}", RNS.LOG_ERROR) - + self.__update_phy_stats(packet, query_shared=True) elif packet.context == RNS.Packet.REQUEST: @@ -903,7 +903,7 @@ class Link: if not packet.packet_hash in resource.req_hashlist: resource.req_hashlist.append(packet.packet_hash) resource.request(plaintext) - + # TODO: Test and possibly enable this at some point # def request_job(): # resource.request(plaintext) @@ -984,7 +984,7 @@ class Link: try: if not self.fernet: self.fernet = Fernet(self.derived_key) - + return self.fernet.decrypt(ciphertext) except Exception as e: @@ -1140,7 +1140,7 @@ class RequestReceipt(): elif self.resource != None: self.hash = resource.request_id resource.set_callback(self.request_resource_concluded) - + self.link = link self.request_id = self.hash self.request_size = request_size @@ -1224,7 +1224,7 @@ class RequestReceipt(): self.packet_receipt.callbacks.delivery(self.packet_receipt) self.progress = resource.get_progress() - + if self.callbacks.progress != None: try: self.callbacks.progress(self) @@ -1233,7 +1233,7 @@ class RequestReceipt(): else: resource.cancel() - + def response_received(self, response): if not self.status == RequestReceipt.FAILED: self.progress = 1.0 diff --git a/RNS/Packet.py b/RNS/Packet.py index 03eccf0..8c909ad 100755 --- a/RNS/Packet.py +++ b/RNS/Packet.py @@ -97,7 +97,7 @@ class Packet: # the below calculation; 383 bytes. ENCRYPTED_MDU = math.floor((RNS.Reticulum.MDU-RNS.Identity.FERNET_OVERHEAD-RNS.Identity.KEYSIZE//16)/RNS.Identity.AES128_BLOCKSIZE)*RNS.Identity.AES128_BLOCKSIZE - 1 """ - The maximum size of the payload data in a single encrypted packet + The maximum size of the payload data in a single encrypted packet """ PLAIN_MDU = MDU """ @@ -256,7 +256,7 @@ class Packet: def send(self): """ Sends the packet. - + :returns: A :ref:`RNS.PacketReceipt` instance if *create_receipt* was set to *True* when the packet was instantiated, if not returns *None*. If the packet could not be sent *False* is returned. """ if not self.sent: @@ -278,21 +278,21 @@ class Packet: self.sent = False self.receipt = None return False - + else: raise OSError("Packet was already sent") def resend(self): """ Re-sends the packet. - + :returns: A :ref:`RNS.PacketReceipt` instance if *create_receipt* was set to *True* when the packet was instantiated, if not returns *None*. If the packet could not be sent *False* is returned. """ if self.sent: # Re-pack the packet to obtain new ciphertext for # encrypted destinations self.pack() - + if RNS.Transport.outbound(self): return self.receipt else: @@ -388,7 +388,7 @@ class PacketReceipt: def get_status(self): """ - :returns: The status of the associated :ref:`RNS.Packet` instance. Can be one of ``RNS.PacketReceipt.SENT``, ``RNS.PacketReceipt.DELIVERED``, ``RNS.PacketReceipt.FAILED`` or ``RNS.PacketReceipt.CULLED``. + :returns: The status of the associated :ref:`RNS.Packet` instance. Can be one of ``RNS.PacketReceipt.SENT``, ``RNS.PacketReceipt.DELIVERED``, ``RNS.PacketReceipt.FAILED`` or ``RNS.PacketReceipt.CULLED``. """ return self.status @@ -422,7 +422,7 @@ class PacketReceipt: RNS.log(f"An error occurred while evaluating external delivery callback for {link}", RNS.LOG_ERROR) RNS.log(f"The contained exception was: {e}", RNS.LOG_ERROR) RNS.trace_exception(e) - + return True else: return False @@ -490,7 +490,7 @@ class PacketReceipt: self.callbacks.delivery(self) except Exception as e: RNS.log(f"Error while executing proof validated callback. The contained exception was: {e}", RNS.LOG_ERROR) - + return True else: return False @@ -524,7 +524,7 @@ class PacketReceipt: def set_timeout(self, timeout): """ Sets a timeout in seconds - + :param timeout: The timeout in seconds. """ self.timeout = float(timeout) diff --git a/RNS/Resolver.py b/RNS/Resolver.py index cd8cb36..6a36079 100644 --- a/RNS/Resolver.py +++ b/RNS/Resolver.py @@ -21,7 +21,7 @@ # SOFTWARE. class Resolver: - + @staticmethod def resolve_identity(full_name): pass \ No newline at end of file diff --git a/RNS/Resource.py b/RNS/Resource.py index 7632ca2..ce4da7a 100644 --- a/RNS/Resource.py +++ b/RNS/Resource.py @@ -59,11 +59,11 @@ class Resource: # The maximum window size for transfers on fast links WINDOW_MAX_FAST = 75 - + # For calculating maps and guard segments, this # must be set to the global maximum window. WINDOW_MAX = WINDOW_MAX_FAST - + # If the fast rate is sustained for this many request # rounds, the fast link window size will be allowed. FAST_RATE_THRESHOLD = WINDOW_MAX_SLOW - WINDOW - 2 @@ -111,7 +111,7 @@ class Resource: # fit in 3 bytes in resource advertisements. MAX_EFFICIENT_SIZE = 16 * 1024 * 1024 - 1 RESPONSE_MAX_GRACE_TIME = 10 - + # The maximum size to auto-compress with # bz2 before sending. AUTO_COMPRESS_MAX_SIZE = MAX_EFFICIENT_SIZE @@ -185,7 +185,7 @@ class Resource: resource.waiting_for_hmu = False resource.receiving_part = False resource.consecutive_completed_height = -1 - + if not resource.link.has_incoming_resource(resource): resource.link.register_incoming_resource(resource) @@ -256,7 +256,7 @@ class Resource: data_size = len(data) self.grand_total_parts = math.ceil(data_size/Resource.SDU) self.total_size = data_size - + resource_data = data self.total_segments = 1 self.segment_index = 1 @@ -323,7 +323,7 @@ class Resource: self.data = b"" self.data += RNS.Identity.get_random_hash()[:Resource.RANDOM_HASH_SIZE] self.data += self.compressed_data - + self.compressed = True self.uncompressed_data = None @@ -348,7 +348,7 @@ class Resource: self.size = len(self.data) self.sent_parts = 0 hashmap_entries = int(math.ceil(self.size/float(Resource.SDU))) - + hashmap_ok = False while not hashmap_ok: hashmap_computation_began = time.time() @@ -389,12 +389,12 @@ class Resource: self.parts.append(part) RNS.log(f"Hashmap computation concluded in {round(time.time() - hashmap_computation_began, 3)} seconds", RNS.LOG_DEBUG) - + if advertise: self.advertise() else: self.receive_lock = Lock() - + def hashmap_update_packet(self, plaintext): if not self.status == Resource.FAILED: @@ -489,7 +489,7 @@ class Resource: except Exception as e: RNS.log(f"Could not resend advertisement packet, cancelling resource. The contained exception was: {e}", RNS.LOG_VERBOSE) self.cancel() - + elif self.status == Resource.TRANSFERRING: if not self.initiator: @@ -504,7 +504,7 @@ class Resource: retries_used = self.max_retries - self.retries_left extra_wait = retries_used * Resource.PER_RETRY_DELAY sleep_time = self.last_activity + (rtt*(self.part_timeout_factor+window_remaining)) + Resource.RETRY_GRACE_TIME + extra_wait - time.time() - + if sleep_time < 0: if self.retries_left > 0: ms = "" if self.outstanding_parts == 1 else "s" @@ -554,7 +554,7 @@ class Resource: if sleep_time == None or sleep_time < 0: RNS.log("Timing error, cancelling resource transfer.", RNS.LOG_ERROR) self.cancel() - + if sleep_time != None: sleep(min(sleep_time, Resource.WATCHDOG_MAX_SLEEP)) @@ -692,7 +692,7 @@ class Resource: if self.req_resp == None: self.req_resp = self.last_activity rtt = self.req_resp-self.req_sent - + self.part_timeout_factor = Resource.PART_TIMEOUT_FACTOR_AFTER_RTT if self.rtt == None: self.rtt = self.link.rtt @@ -732,7 +732,7 @@ class Resource: # Update consecutive completed pointer if i == self.consecutive_completed_height + 1: self.consecutive_completed_height = i - + cp = self.consecutive_completed_height + 1 while cp < len(self.parts) and self.parts[cp] != None: self.consecutive_completed_height = cp @@ -797,7 +797,7 @@ class Resource: i = 0; pn = self.consecutive_completed_height+1 search_start = pn search_size = self.window - + for part in self.parts[search_start:search_start+search_size]: if part == None: part_hash = self.hashmap[pn] @@ -878,10 +878,10 @@ class Resource: RNS.log("Resource could not send parts, cancelling transfer!", RNS.LOG_DEBUG) RNS.log(f"The contained exception was: {e}", RNS.LOG_DEBUG) self.cancel() - + if wants_more_hashmap: last_map_hash = request_data[1:Resource.MAPHASH_LEN+1] - + part_index = self.receiver_min_consecutive_height search_start = part_index search_end = self.receiver_min_consecutive_height+ResourceAdvertisement.COLLISION_GUARD_SIZE @@ -899,7 +899,7 @@ class Resource: else: segment = part_index // ResourceAdvertisement.HASHMAP_MAX_LEN - + hashmap_start = segment*ResourceAdvertisement.HASHMAP_MAX_LEN hashmap_end = min((segment+1)*ResourceAdvertisement.HASHMAP_MAX_LEN, len(self.parts)) @@ -943,7 +943,7 @@ class Resource: self.link.cancel_outgoing_resource(self) else: self.link.cancel_incoming_resource(self) - + if self.callback != None: try: self.link.resource_concluded(self) @@ -968,7 +968,7 @@ class Resource: self.processed_parts += self.sent_parts self.progress_total_parts = float(self.grand_total_parts) else: - self.processed_parts = (self.segment_index-1)*math.ceil(Resource.MAX_EFFICIENT_SIZE/Resource.SDU) + self.processed_parts = (self.segment_index-1)*math.ceil(Resource.MAX_EFFICIENT_SIZE/Resource.SDU) self.processed_parts += self.received_count if self.split: self.progress_total_parts = float(math.ceil(self.total_size/Resource.SDU)) @@ -1141,7 +1141,7 @@ class ResourceAdvertisement: @staticmethod def unpack(data): dictionary = umsgpack.unpackb(data) - + adv = ResourceAdvertisement() adv.t = dictionary["t"] adv.d = dictionary["d"] diff --git a/RNS/Reticulum.py b/RNS/Reticulum.py index b9b9a1a..e58ab9e 100755 --- a/RNS/Reticulum.py +++ b/RNS/Reticulum.py @@ -128,7 +128,7 @@ class Reticulum: HEADER_MAXSIZE = 2+1+(TRUNCATED_HASHLENGTH//8)*2 IFAC_MIN_SIZE = 1 IFAC_SALT = bytes.fromhex("adf54d882c9a9b80771eb4995d702d4a3e733391b2a0f53f416d9f907e55cff8") - + MDU = MTU - HEADER_MAXSIZE - IFAC_MIN_SIZE RESOURCE_CACHE = 24*60*60 @@ -139,7 +139,7 @@ class Reticulum: router = None config = None - + # The default configuration path will be expanded to a directory # named ".reticulum" inside the current users home directory userdir = os.path.expanduser("~") @@ -149,7 +149,7 @@ class Reticulum: cachepath = "" __instance = None - + @staticmethod def exit_handler(): # This exit handler is called whenever Reticulum is asked to @@ -211,7 +211,7 @@ class Reticulum: if logdest == RNS.LOG_FILE: RNS.logdest = RNS.LOG_FILE RNS.logfile = f"{Reticulum.configdir}/logfile" - + Reticulum.configpath = f"{Reticulum.configdir}/config" Reticulum.storagepath = f"{Reticulum.configdir}/storage" Reticulum.cachepath = f"{Reticulum.configdir}/storage/cache" @@ -277,7 +277,7 @@ class Reticulum: self.__apply_config() RNS.log(f"Configuration loaded from {self.configpath}", RNS.LOG_VERBOSE) - + RNS.Identity.load_known_destinations() RNS.Transport.start(self) @@ -285,7 +285,7 @@ class Reticulum: self.rpc_addr = ("127.0.0.1", self.local_control_port) if self.rpc_key == None: self.rpc_key = RNS.Identity.full_hash(RNS.Transport.identity.get_private_key()) - + if self.is_shared_instance: self.rpc_listener = multiprocessing.connection.Listener(self.rpc_addr, authkey=self.rpc_key) thread = threading.Thread(target=self.rpc_loop) @@ -313,7 +313,7 @@ class Reticulum: if now > self.last_data_persist+Reticulum.PERSIST_INTERVAL: self.__persist_data() - + time.sleep(Reticulum.JOB_INTERVAL) def __start_local_interface(self): @@ -329,7 +329,7 @@ class Reticulum: interface._force_bitrate = Reticulum._force_shared_instance_bitrate RNS.log(f"Forcing shared instance bitrate of {RNS.prettyspeed(interface.bitrate)}", RNS.LOG_WARNING) RNS.Transport.interfaces.append(interface) - + self.is_shared_instance = True RNS.log(f"Started shared instance interface: {interface}", RNS.LOG_DEBUG) self.__start_jobs() @@ -454,7 +454,7 @@ class Reticulum: c = self.config["interfaces"][name] interface_mode = Interface.Interface.MODE_FULL - + if "interface_mode" in c: c["interface_mode"] = str(c["interface_mode"]).lower() if c["interface_mode"] == "full": @@ -489,7 +489,7 @@ class Reticulum: if "ifac_size" in c: if c.as_int("ifac_size") >= Reticulum.IFAC_MIN_SIZE*8: ifac_size = c.as_int("ifac_size")//8 - + ifac_netname = None if "networkname" in c: if c["networkname"] != "": @@ -505,7 +505,7 @@ class Reticulum: if "pass_phrase" in c: if c["pass_phrase"] != "": ifac_netkey = c["pass_phrase"] - + ingress_control = True if "ingress_control" in c: ingress_control = c.as_bool("ingress_control") ic_max_held_announces = None @@ -532,12 +532,12 @@ class Reticulum: if "announce_rate_target" in c: if c.as_int("announce_rate_target") > 0: announce_rate_target = c.as_int("announce_rate_target") - + announce_rate_grace = None if "announce_rate_grace" in c: if c.as_int("announce_rate_grace") >= 0: announce_rate_grace = c.as_int("announce_rate_grace") - + announce_rate_penalty = None if "announce_rate_penalty" in c: if c.as_int("announce_rate_penalty") >= 0: @@ -553,7 +553,7 @@ class Reticulum: if "announce_cap" in c: if c.as_float("announce_cap") > 0 and c.as_float("announce_cap") <= 100: announce_cap = c.as_float("announce_cap")/100.0 - + try: interface = None @@ -660,7 +660,7 @@ class Reticulum: if interface_mode == Interface.Interface.MODE_ACCESS_POINT: RNS.log(f"{interface} does not support Access Point mode, reverting to default mode: Full", RNS.LOG_WARNING) interface_mode = Interface.Interface.MODE_FULL - + interface.mode = interface_mode interface.announce_cap = announce_cap @@ -697,7 +697,7 @@ class Reticulum: if interface_mode == Interface.Interface.MODE_ACCESS_POINT: RNS.log(f"{interface} does not support Access Point mode, reverting to default mode: Full", RNS.LOG_WARNING) interface_mode = Interface.Interface.MODE_FULL - + interface.mode = interface_mode interface.announce_cap = announce_cap @@ -734,7 +734,7 @@ class Reticulum: if interface_mode == Interface.Interface.MODE_ACCESS_POINT: RNS.log(f"{interface} does not support Access Point mode, reverting to default mode: Full", RNS.LOG_WARNING) interface_mode = Interface.Interface.MODE_FULL - + interface.mode = interface_mode interface.announce_cap = announce_cap @@ -937,7 +937,7 @@ class Reticulum: ble_addr = ble_string else: ble_name = ble_string - + interface = RNodeInterface.RNodeInterface( RNS.Transport, name, @@ -1011,11 +1011,11 @@ class Reticulum: txpower = int(subinterface_config["txpower"]) if "txpower" in subinterface_config else None subint_config[subint_index][4] = txpower spreadingfactor = int(subinterface_config["spreadingfactor"]) if "spreadingfactor" in subinterface_config else None - subint_config[subint_index][5] = spreadingfactor + subint_config[subint_index][5] = spreadingfactor codingrate = int(subinterface_config["codingrate"]) if "codingrate" in subinterface_config else None subint_config[subint_index][6] = codingrate flow_control = subinterface_config.as_bool("flow_control") if "flow_control" in subinterface_config else False - subint_config[subint_index][7] = flow_control + subint_config[subint_index][7] = flow_control st_alock = float(subinterface_config["airtime_limit_short"]) if "airtime_limit_short" in subinterface_config else None subint_config[subint_index][8] = st_alock lt_alock = float(subinterface_config["airtime_limit_long"]) if "airtime_limit_long" in subinterface_config else None @@ -1037,7 +1037,7 @@ class Reticulum: id_interval = int(c["id_interval"]) if "id_interval" in c else None id_callsign = c["id_callsign"] if "id_callsign" in c else None port = c["port"] if "port" in c else None - + if port == None: raise ValueError(f"No port specified for {name}") @@ -1121,7 +1121,7 @@ class Reticulum: def _add_interface(self,interface, mode = None, configured_bitrate=None, ifac_size=None, ifac_netname=None, ifac_netkey=None, announce_cap=None, announce_rate_target=None, announce_rate_grace=None, announce_rate_penalty=None): if not self.is_connected_to_shared_instance: if interface != None and issubclass(type(interface), RNS.Interfaces.Interface.Interface): - + if mode == None: mode = Interface.Interface.MODE_FULL interface.mode = mode @@ -1199,14 +1199,14 @@ class Reticulum: age = now - mtime if age > RNS.Transport.DESTINATION_TIMEOUT: os.unlink(filepath) - + except Exception as e: RNS.log(f"Error while cleaning resources cache, the contained exception was: {e}", RNS.LOG_ERROR) def __create_default_config(self): self.config = ConfigObj(__default_rns_config__) self.config.filename = Reticulum.configpath - + if not os.path.isdir(Reticulum.configdir): os.makedirs(Reticulum.configdir) self.config.write() @@ -1278,7 +1278,7 @@ class Reticulum: interfaces = [] for interface in RNS.Transport.interfaces: ifstats = {} - + if hasattr(interface, "clients"): ifstats["clients"] = interface.clients else: diff --git a/RNS/Transport.py b/RNS/Transport.py index d1bd685..a93070a 100755 --- a/RNS/Transport.py +++ b/RNS/Transport.py @@ -51,7 +51,7 @@ class Transport: """ Maximum amount of hops that Reticulum will transport a packet. """ - + PATHFINDER_R = 1 # Retransmit retries PATHFINDER_G = 5 # Retry grace period PATHFINDER_RW = 0.5 # Random window for announce rebroadcast @@ -101,7 +101,7 @@ class Transport: announce_rate_table = {} # A table for keeping track of announce rates path_requests = {} # A table for storing path request timestamps path_states = {} # A table for keeping track of path states - + discovery_path_requests = {} # A table for keeping track of path requests on behalf of other nodes discovery_pr_tags = [] # A table for keeping track of tagged path requests max_pr_tags = 32000 # Maximum amount of unique path request tags to remember @@ -150,7 +150,7 @@ class Transport: if Transport.identity == None: transport_identity_path = f"{RNS.Reticulum.storagepath}/transport_identity" if os.path.isfile(transport_identity_path): - Transport.identity = RNS.Identity.from_file(transport_identity_path) + Transport.identity = RNS.Identity.from_file(transport_identity_path) if Transport.identity == None: RNS.log("No valid Transport Identity in storage, creating...", RNS.LOG_VERBOSE) @@ -187,7 +187,7 @@ class Transport: Transport.control_destinations.append(Transport.remote_management_destination) Transport.control_hashes.append(Transport.remote_management_destination.hash) RNS.log(f"Enabled remote management on {Transport.remote_management_destination}", RNS.LOG_NOTICE) - + Transport.jobs_running = False thread = threading.Thread(target=Transport.jobloop, daemon=True) thread.start() @@ -405,7 +405,7 @@ class Transport: announce_destination = RNS.Destination(announce_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, "unknown", "unknown"); announce_destination.hash = packet.destination_hash announce_destination.hexhash = announce_destination.hash.hex() - + new_packet = RNS.Packet( announce_destination, announce_data, @@ -423,7 +423,7 @@ class Transport: RNS.log(f"Rebroadcasting announce as path response for {RNS.prettyhexrep(announce_destination.hash)} with hop count {new_packet.hops}", RNS.LOG_DEBUG) else: RNS.log(f"Rebroadcasting announce for {RNS.prettyhexrep(announce_destination.hash)} with hop count {new_packet.hops}", RNS.LOG_DEBUG) - + outgoing.append(new_packet) # This handles an edge case where a peer sends a past @@ -486,7 +486,7 @@ class Transport: path_request_throttle = time.time() - last_path_request < Transport.PATH_REQUEST_MI path_request_conditions = False - + # If the path has been invalidated between the time of # making the link request and now, try to rediscover it if not Transport.has_path(link_entry[6]): @@ -726,7 +726,7 @@ class Transport: # Assemble new payload with IFAC new_raw = new_header+ifac+raw[2:] - + # Mask payload i = 0; masked_raw = b"" for byte in new_raw: @@ -781,7 +781,7 @@ class Transport: if generate_receipt: packet.receipt = RNS.PacketReceipt(packet) Transport.receipts.append(packet.receipt) - + # TODO: Enable when caching has been redesigned # Transport.cache(packet) @@ -850,7 +850,7 @@ class Transport: should_transmit = False if interface != packet.destination.attached_interface: should_transmit = False - + if packet.attached_interface != None and interface != packet.attached_interface: should_transmit = False @@ -919,7 +919,7 @@ class Transport: tx_time = (len(packet.raw)*8) / interface.bitrate wait_time = (tx_time / interface.announce_cap) interface.announce_allowed_at = outbound_time + wait_time - + else: should_transmit = False if not len(interface.announce_queue) >= RNS.Reticulum.MAX_QUEUED_ANNOUNCES: @@ -979,10 +979,10 @@ class Transport: else: pass - + else: pass - + if should_transmit: if not stored_hash: Transport.packet_hashlist.append(packet.packet_hash) @@ -1134,14 +1134,14 @@ class Transport: if Transport.identity == None: return - + Transport.jobs_locked = True - + packet = RNS.Packet(None, raw) if not packet.unpack(): Transport.jobs_locked = False return - + packet.receiving_interface = interface packet.hops += 1 @@ -1205,7 +1205,7 @@ class Transport: Transport.packet_hashlist.append(packet.packet_hash) # TODO: Enable when caching has been redesigned # Transport.cache(packet) - + # Check special conditions for local clients connected # through a shared Reticulum instance from_local_client = (packet.receiving_interface in Transport.local_client_interfaces) @@ -1262,7 +1262,7 @@ class Transport: if packet.destination_hash in Transport.destination_table: next_hop = Transport.destination_table[packet.destination_hash][1] remaining_hops = Transport.destination_table[packet.destination_hash][2] - + if remaining_hops > 1: # Just increase hop count and transmit new_raw = packet.raw[0:1] @@ -1356,7 +1356,7 @@ class Transport: new_raw += packet.raw[2:] Transport.transmit(outbound_interface, new_raw) Transport.link_table[packet.destination_hash][0] = time.time() - + # TODO: Test and possibly enable this at some point # Transport.jobs_locked = False # return @@ -1383,13 +1383,13 @@ class Transport: if local_destination == None and RNS.Identity.validate_announce(packet): if packet.transport_id != None: received_from = packet.transport_id - + # Check if this is a next retransmission from # another node. If it is, we're removing the # announce in question from our pending table if RNS.Reticulum.transport_enabled() and packet.destination_hash in Transport.announce_table: announce_entry = Transport.announce_table[packet.destination_hash] - + if packet.hops-1 == announce_entry[4]: RNS.log(f"Heard a local rebroadcast of announce for {RNS.prettyhexrep(packet.destination_hash)}", RNS.LOG_DEBUG) announce_entry[6] += 1 @@ -1415,7 +1415,7 @@ class Transport: # local to this system, and that hops are less than the max if (not any(packet.destination_hash == d.hash for d in Transport.destinations) and packet.hops < Transport.PATHFINDER_M+1): announce_emitted = Transport.announce_emitted(packet) - + random_blob = packet.data[RNS.Identity.KEYSIZE//8+RNS.Identity.NAME_HASH_LENGTH//8:RNS.Identity.KEYSIZE//8+RNS.Identity.NAME_HASH_LENGTH//8+10] random_blobs = [] if packet.destination_hash in Transport.destination_table: @@ -1442,7 +1442,7 @@ class Transport: # the emission timestamp is more recent. now = time.time() path_expires = Transport.destination_table[packet.destination_hash][3] - + path_announce_emitted = 0 for path_random_blob in random_blobs: path_announce_emitted = max(path_announce_emitted, int.from_bytes(path_random_blob[5:10], "big")) @@ -1474,7 +1474,7 @@ class Transport: should_add = True else: should_add = False - + # If we have already heard this announce before, # but the path has been marked as unresponsive # by a failed communications attempt or similar, @@ -1527,14 +1527,14 @@ class Transport: else: rate_blocked = True - + retries = 0 announce_hops = packet.hops local_rebroadcasts = 0 block_rebroadcasts = False attached_interface = None - + retransmit_timeout = now + (RNS.rand() * Transport.PATHFINDER_RW) if hasattr(packet.receiving_interface, "mode") and packet.receiving_interface.mode == RNS.Interfaces.Interface.Interface.MODE_ACCESS_POINT: @@ -1543,7 +1543,7 @@ class Transport: expires = now + Transport.ROAMING_PATH_TIME else: expires = now + Transport.PATHFINDER_E - + random_blobs.append(random_blob) random_blobs = random_blobs[-Transport.MAX_RANDOM_BLOBS:] @@ -1552,7 +1552,7 @@ class Transport: if rate_blocked: RNS.log(f"Blocking rebroadcast of announce from {RNS.prettyhexrep(packet.destination_hash)} due to excessive announce rate", RNS.LOG_DEBUG) - + else: if Transport.from_local_client(packet): # If the announce is from a local client, @@ -1619,7 +1619,7 @@ class Transport: attached_interface = local_interface, context_flag = packet.context_flag, ) - + new_announce.hops = packet.hops new_announce.send() @@ -1730,7 +1730,7 @@ class Transport: if destination.hash == packet.destination_hash and destination.type == packet.destination_type: packet.destination = destination destination.receive(packet) - + # Handling for local data packets elif packet.packet_type == RNS.Packet.DATA: if packet.destination_type == RNS.Destination.LINK: @@ -1835,7 +1835,7 @@ class Transport: for link in Transport.active_links: if link.link_id == packet.destination_hash: packet.link = link - + if len(packet.data) == RNS.PacketReceipt.EXPL_LENGTH: proof_hash = packet.data[:RNS.Identity.HASHLENGTH//8] else: @@ -1875,13 +1875,13 @@ class Transport: interface_hash = interface.get_hash() public_key = RNS.Transport.identity.get_public_key() random_hash = RNS.Identity.get_random_hash() - + tunnel_id_data = public_key+interface_hash tunnel_id = RNS.Identity.full_hash(tunnel_id_data) signed_data = tunnel_id_data+random_hash signature = Transport.identity.sign(signed_data) - + data = signed_data+signature tnl_snth_dst = RNS.Destination(None, RNS.Destination.OUT, RNS.Destination.PLAIN, Transport.APP_NAME, "tunnel", "synthesize") @@ -1901,7 +1901,7 @@ class Transport: tunnel_id_data = public_key+interface_hash tunnel_id = RNS.Identity.full_hash(tunnel_id_data) random_hash = data[RNS.Identity.KEYSIZE//8+RNS.Identity.HASHLENGTH//8:RNS.Identity.KEYSIZE//8+RNS.Identity.HASHLENGTH//8+RNS.Reticulum.TRUNCATED_HASHLENGTH//8] - + signature = data[RNS.Identity.KEYSIZE//8+RNS.Identity.HASHLENGTH//8+RNS.Reticulum.TRUNCATED_HASHLENGTH//8:expected_length] signed_data = tunnel_id_data+random_hash @@ -1974,7 +1974,7 @@ class Transport: for registered_destination in Transport.destinations: if destination.hash == registered_destination.hash: raise KeyError("Attempt to register an already registered destination.") - + Transport.destinations.append(destination) if Transport.owner.is_connected_to_shared_instance: @@ -2061,7 +2061,7 @@ class Transport: if packet.receiving_interface != None: interface_reference = str(packet.receiving_interface) - file = open(f"{RNS.Reticulum.cachepath}/{packet_hash}", "wb") + file = open(f"{RNS.Reticulum.cachepath}/{packet_hash}", "wb") file.write(umsgpack.packb([packet.raw, interface_reference])) file.close() @@ -2419,11 +2419,11 @@ class Transport: if len(Transport.local_client_interfaces) > 0: if destination_hash in Transport.destination_table: destination_interface = Transport.destination_table[destination_hash][5] - + if Transport.is_local_client_interface(destination_interface): destination_exists_on_local_client = True Transport.pending_local_path_requests[destination_hash] = attached_interface - + local_destination = next((d for d in Transport.destinations if d.hash == destination_hash), None) if local_destination != None: local_destination.announce(path_response=True, tag=tag, attached_interface=attached_interface) @@ -2436,7 +2436,7 @@ class Transport: if attached_interface.mode == RNS.Interfaces.Interface.Interface.MODE_ROAMING and attached_interface == received_from: RNS.log("Not answering path request on roaming-mode interface, since next hop is on same roaming-mode interface", RNS.LOG_DEBUG) - + else: if requestor_transport_id != None and next_hop == requestor_transport_id: # TODO: Find a bandwidth efficient way to invalidate our @@ -2480,7 +2480,7 @@ class Transport: if packet.destination_hash in Transport.announce_table: held_entry = Transport.announce_table[packet.destination_hash] Transport.held_announces[packet.destination_hash] = held_entry - + Transport.announce_table[packet.destination_hash] = [now, retransmit_timeout, retries, received_from, announce_hops, packet, local_rebroadcasts, block_rebroadcasts, attached_interface] elif is_from_local_client: @@ -2554,7 +2554,7 @@ class Transport: detachable_interfaces.append(interface) else: pass - + for interface in Transport.local_client_interfaces: # Currently no rules are being applied # here, and all interfaces will be sent diff --git a/RNS/Utilities/rncp.py b/RNS/Utilities/rncp.py index 6ae0045..979af5f 100644 --- a/RNS/Utilities/rncp.py +++ b/RNS/Utilities/rncp.py @@ -58,7 +58,7 @@ def listen(configdir, verbosity = 0, quietness = 0, allowed = [], display_identi identity_path = f"{RNS.Reticulum.identitypath}/{APP_NAME}" if os.path.isfile(identity_path): - identity = RNS.Identity.from_file(identity_path) + identity = RNS.Identity.from_file(identity_path) if identity == None: RNS.log("No valid saved identity found, creating new...", RNS.LOG_INFO) @@ -102,7 +102,7 @@ def listen(configdir, verbosity = 0, quietness = 0, allowed = [], display_identi ms = "y" else: ms = "ies" - + RNS.log(f"Loaded {len(ali)} allowed identit{ms} from {allowed_file}", RNS.LOG_VERBOSE) except Exception as e: @@ -180,7 +180,7 @@ def listen(configdir, verbosity = 0, quietness = 0, allowed = [], display_identi destination.announce() threading.Thread(target=job, daemon=True).start() - + while True: time.sleep(1) @@ -206,7 +206,7 @@ def receive_sender_identified(link, identity): def receive_resource_callback(resource): global allow_all - + sender_identity = resource.link.get_remote_identity() if sender_identity != None: @@ -239,7 +239,7 @@ def receive_resource_concluded(resource): while os.path.isfile(saved_filename): counter += 1 saved_filename = f"{filename}.{counter}" - + file = open(saved_filename, "wb") file.write(resource.data.read()) file.close() @@ -412,7 +412,7 @@ def fetch(configdir, verbosity = 0, quietness = 0, destination = None, file = No while os.path.isfile(saved_filename): counter += 1 saved_filename = f"{filename}.{counter}" - + file = open(saved_filename, "wb") file.write(resource.data.read()) file.close() @@ -521,7 +521,7 @@ def send(configdir, verbosity = 0, quietness = 0, destination = None, file = Non print(str(e)) exit(1) - + file_path = os.path.expanduser(file) if not os.path.isfile(file_path): print("File not found") @@ -637,7 +637,7 @@ def send(configdir, verbosity = 0, quietness = 0, destination = None, file = Non sys.stdout.flush() i = (i+1)%len(syms) - + if resource.status > RNS.Resource.COMPLETE: if silent: print(f"File was not accepted by {RNS.prettyhexrep(destination_hash)}") @@ -708,7 +708,7 @@ def main(): parser.add_argument("-w", action="store", metavar="seconds", type=float, help="sender timeout before giving up", default=RNS.Transport.PATH_REQUEST_TIMEOUT) # parser.add_argument("--limit", action="store", metavar="files", type=float, help="maximum number of files to accept", default=None) parser.add_argument("--version", action="version", version=f"rncp {__version__}") - + args = parser.parse_args() if args.listen or args.print_identity: diff --git a/RNS/Utilities/rnid.py b/RNS/Utilities/rnid.py index 211f81b..2395d61 100644 --- a/RNS/Utilities/rnid.py +++ b/RNS/Utilities/rnid.py @@ -82,7 +82,7 @@ def main(): parser.add_argument("-w", "--write", metavar="file", action="store", default=None, help="output file path", type=str) parser.add_argument("-f", "--force", action="store_true", default=None, help="write output even if it overwrites existing files") parser.add_argument("-I", "--stdin", action="store_true", default=False, help=argparse.SUPPRESS) # "read input from STDIN instead of file" - parser.add_argument("-O", "--stdout", action="store_true", default=False, help=argparse.SUPPRESS) # help="write output to STDOUT instead of file", + parser.add_argument("-O", "--stdout", action="store_true", default=False, help=argparse.SUPPRESS) # help="write output to STDOUT instead of file", parser.add_argument("-R", "--request", action="store_true", default=False, help="request unknown Identities from the network") parser.add_argument("-t", action="store", metavar="seconds", type=float, help="identity request timeout before giving up", default=RNS.Transport.PATH_REQUEST_TIMEOUT) @@ -93,14 +93,14 @@ def main(): parser.add_argument("-B", "--base32", action="store_true", default=False, help="Use base32-encoded input and output") parser.add_argument("--version", action="version", version=f"rnid {__version__}") - + args = parser.parse_args() ops = 0; for t in [args.encrypt, args.decrypt, args.validate, args.sign]: if t: ops += 1 - + if ops > 1: RNS.log("This utility currently only supports one of the encrypt, decrypt, sign or verify operations per invocation", RNS.LOG_ERROR) exit(1) @@ -179,7 +179,7 @@ def main(): quietness = args.quiet if verbosity != 0 or quietness != 0: targetloglevel = targetloglevel+verbosity-quietness - + # Start Reticulum reticulum = RNS.Reticulum(configdir=args.config, loglevel=targetloglevel) RNS.compact_log_fmt = True @@ -234,7 +234,7 @@ def main(): RNS.log("Invalid hexadecimal hash provided", RNS.LOG_ERROR) exit(7) - + else: # Try loading Identity from file if not os.path.isfile(identity_str): @@ -391,7 +391,7 @@ def main(): RNS.log("Could not open output file for writing", RNS.LOG_ERROR) RNS.log(f"The contained exception was: {e}", RNS.LOG_ERROR) exit(15) - + # TODO: Actually expand this to a good solution # probably need to create a wrapper that takes # into account not closing stdout when done @@ -415,12 +415,12 @@ def main(): if not args.stdout: RNS.log(f"Signing {args.read}") - + try: data_output.write(identity.sign(data_input.read())) data_output.close() data_input.close() - + if not args.stdout: if args.read: RNS.log(f"File {args.read} signed with {identity} to {args.write}") @@ -448,7 +448,7 @@ def main(): else: # if not args.stdout: # RNS.log("Verifying "+str(args.validate)+" for "+str(args.read)) - + try: try: sig_input = open(args.validate, "rb") @@ -498,7 +498,7 @@ def main(): if not args.stdout: RNS.log(f"Encrypting {args.read}") - + try: more_data = True while more_data: @@ -545,7 +545,7 @@ def main(): if not args.stdout: RNS.log(f"Decrypting {args.read}...") - + try: more_data = True while more_data: diff --git a/RNS/Utilities/rnir.py b/RNS/Utilities/rnir.py index 01c74da..403ed98 100644 --- a/RNS/Utilities/rnir.py +++ b/RNS/Utilities/rnir.py @@ -49,7 +49,7 @@ def main(): parser.add_argument('-q', '--quiet', action='count', default=0) parser.add_argument("--exampleconfig", action='store_true', default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version=f"ir {__version__}") - + args = parser.parse_args() if args.exampleconfig: diff --git a/RNS/Utilities/rnodeconf.py b/RNS/Utilities/rnodeconf.py index fc4601d..df19660 100755 --- a/RNS/Utilities/rnodeconf.py +++ b/RNS/Utilities/rnodeconf.py @@ -61,7 +61,7 @@ class KISS(): FESC = 0xDB TFEND = 0xDC TFESC = 0xDD - + CMD_UNKNOWN = 0xFE CMD_DATA = 0x00 CMD_FREQUENCY = 0x01 @@ -104,11 +104,11 @@ class KISS(): DETECT_REQ = 0x73 DETECT_RESP = 0x46 - + RADIO_STATE_OFF = 0x00 RADIO_STATE_ON = 0x01 RADIO_STATE_ASK = 0xFF - + CMD_ERROR = 0x90 ERROR_INITRADIO = 0x01 ERROR_TXFAILED = 0x02 @@ -191,7 +191,7 @@ class ROM(): PRODUCT_TECHO = 0x15 MODEL_T4 = 0x16 MODEL_T9 = 0x17 - + PRODUCT_HMBRW = 0xF0 MODEL_FF = 0xFF MODEL_FE = 0xFE @@ -610,7 +610,7 @@ class RNode(): self.detected = True else: self.detected = False - + else: time_since_last = int(time.time()*1000) - last_read_ms if len(data_buffer) > 0 and time_since_last > self.timeout: @@ -886,7 +886,7 @@ class RNode(): from cryptography.hazmat.primitives.serialization import load_der_private_key from cryptography.hazmat.primitives.asymmetric import padding - # Try loading local signing key for + # Try loading local signing key for # validation of self-signed devices if os.path.isdir(FWD_DIR) and os.path.isfile(FWD_DIR+"/signing.key"): private_bytes = None @@ -922,7 +922,7 @@ class RNode(): RNS.log("Could not deserialize local signing key") RNS.log(str(e)) - # Try loading trusted signing key for + # Try loading trusted signing key for # validation of devices if os.path.isdir(TK_DIR): for f in os.listdir(TK_DIR): @@ -1230,11 +1230,11 @@ def rnode_open_serial(port): write_timeout = None, dsrdtr = False ) - - + + def graceful_exit(C=0): if RNS.vendor.platformutils.is_windows(): - RNS.log("Windows detected; delaying DTR",RNS.LOG_VERBOSE) + RNS.log("Windows detected; delaying DTR",RNS.LOG_VERBOSE) if rnode: RNS.log("Sending \"Leave\" to Rnode",RNS.LOG_VERBOSE) rnode.leave() # Leave has wait built in @@ -1319,13 +1319,13 @@ def main(): parser.add_argument("-f", "--flash", action="store_true", help="Flash firmware and bootstrap EEPROM") parser.add_argument("-r", "--rom", action="store_true", help="Bootstrap EEPROM without flashing firmware") - parser.add_argument("-k", "--key", action="store_true", help="Generate a new signing key and exit") # + parser.add_argument("-k", "--key", action="store_true", help="Generate a new signing key and exit") # parser.add_argument("-S", "--sign", action="store_true", help="Display public part of signing key") parser.add_argument("-H", "--firmware-hash", action="store", help="Display installed firmware hash") parser.add_argument("-K", "--get-target-firmware-hash", action="store_true", help=argparse.SUPPRESS) # Get target firmware hash from device parser.add_argument("-L", "--get-firmware-hash", action="store_true", help=argparse.SUPPRESS) # Get calculated firmware hash from device parser.add_argument("--platform", action="store", metavar="platform", type=str, default=None, help="Platform specification for device bootstrap") - parser.add_argument("--product", action="store", metavar="product", type=str, default=None, help="Product specification for device bootstrap") # + parser.add_argument("--product", action="store", metavar="product", type=str, default=None, help="Product specification for device bootstrap") # parser.add_argument("--model", action="store", metavar="model", type=str, default=None, help="Model code for device bootstrap") parser.add_argument("--hwrev", action="store", metavar="revision", type=int, default=None, help="Hardware revision for device bootstrap") @@ -1354,7 +1354,7 @@ def main(): if args.fw_version != None: selected_version = args.fw_version - try: + try: check_float = float(selected_version) except ValueError: RNS.log("Selected version \""+selected_version+"\" does not appear to be a number.") @@ -1368,7 +1368,7 @@ def main(): if args.nocheck: upd_nocheck = True - + if args.public or args.key or args.flash or args.rom or args.autoinstall or args.trust_key: from cryptography.hazmat.primitives import hashes from cryptography.hazmat.backends import default_backend @@ -1419,8 +1419,8 @@ def main(): ports = list_ports.comports() portlist = [] for port in ports: - portlist.insert(0, port) - + portlist.insert(0, port) + pi = 1 print("Detected serial ports:") for port in portlist: @@ -1556,8 +1556,8 @@ def main(): ports = list_ports.comports() portlist = [] for port in ports: - portlist.insert(0, port) - + portlist.insert(0, port) + pi = 1 print("Detected serial ports:") for port in portlist: @@ -1638,7 +1638,7 @@ def main(): print("correct firmware and provision it.") else: print("\nIt looks like this is a fresh device with no RNode firmware.") - + print("") print("What kind of device is this?\n") print("[1] A specific kind of RNode") @@ -2224,7 +2224,7 @@ def main(): fw_filename = "rnode_firmware.hex" elif selected_mcu == ROM.MCU_2560: fw_filename = "rnode_firmware_m2560.hex" - + elif selected_platform == ROM.PLATFORM_ESP32: fw_filename = None print("\nWhat kind of ESP32 board is this?\n") @@ -2337,7 +2337,7 @@ def main(): except Exception as e: RNS.log("Could not load device signing key") - + graceful_exit() @@ -2413,7 +2413,7 @@ def main(): return part_hash else: return None - + elif platform == ROM.PLATFORM_NRF52: # Calculate digest manually, as it is not included in the image. firmware_data = open(partition_file, "rb") @@ -2994,7 +2994,7 @@ def main(): wants_fw_provision = False if args.flash: from subprocess import call - + if fw_filename == None: fw_filename = "rnode_firmware.hex" @@ -3032,7 +3032,7 @@ def main(): RNS.log("Error while flashing") RNS.log(str(e)) graceful_exit(1) - + else: fw_src = UPD_DIR+"/"+selected_version+"/" if os.path.isfile(fw_src+fw_filename): @@ -3215,7 +3215,7 @@ def main(): update_full_path = EXT_DIR+"/extracted_rnode_firmware.version" else: update_full_path = UPD_DIR+"/"+selected_version+"/"+fw_filename - if os.path.isfile(update_full_path): + if os.path.isfile(update_full_path): try: args.info = False RNS.log("Updating RNode firmware for device on "+args.port) @@ -3468,7 +3468,7 @@ def main(): if args.autoinstall: RNS.log("Clearing old EEPROM, this will take about 15 seconds...") rnode.wipe_eeprom() - + if rnode.platform == ROM.PLATFORM_ESP32: RNS.log("Waiting for ESP32 reset...") time.sleep(6) @@ -3849,7 +3849,7 @@ def main(): else: RNS.log("This device has not been provisioned yet, cannot get firmware hash") exit(77) - + if args.get_firmware_hash: if rnode.provisioned: RNS.log(f"The actual firmware hash is: {rnode.firmware_hash.hex()}") @@ -3923,7 +3923,7 @@ def main(): except KeyboardInterrupt: print("") graceful_exit() - + graceful_exit() def extract_recovery_esptool(): diff --git a/RNS/Utilities/rnpath.py b/RNS/Utilities/rnpath.py index 353d020..21a5b34 100644 --- a/RNS/Utilities/rnpath.py +++ b/RNS/Utilities/rnpath.py @@ -235,7 +235,7 @@ def program_setup(configdir, table, rates, drop, destination_hexhash, verbosity, hour_rate = round(len(entry["timestamps"])/span_hours, 3) if hour_rate-int(hour_rate) == 0: hour_rate = int(hour_rate) - + if entry["rate_violations"] > 0: if entry["rate_violations"] == 1: s_str = "" @@ -245,14 +245,14 @@ def program_setup(configdir, table, rates, drop, destination_hexhash, verbosity, rv_str = f", {entry['rate_violations']} active rate violation{s_str}" else: rv_str = "" - + if entry["blocked_until"] > time.time(): bli = time.time()-(int(entry["blocked_until"])-time.time()) bl_str = f", new announces allowed in {pretty_date(int(bli))}" else: bl_str = "" - + print(f"{RNS.prettyhexrep(entry['hash'])} last heard {last_str} ago, {hour_rate} announces/hour in the last {span_str}{rv_str}{bl_str}") except Exception as e: @@ -272,7 +272,7 @@ def program_setup(configdir, table, rates, drop, destination_hexhash, verbosity, print("Dropping announce queues on all interfaces...") reticulum.drop_announce_queues() - + elif drop: if remote_link: if not no_output: @@ -341,7 +341,7 @@ def program_setup(configdir, table, rates, drop, destination_hexhash, verbosity, except Exception as e: print(str(e)) sys.exit(1) - + if not RNS.Transport.has_path(destination_hash): RNS.Transport.request_path(destination_hash) print(f"Path to {RNS.prettyhexrep(destination_hash)} requested ", end=" ") @@ -376,7 +376,7 @@ def program_setup(configdir, table, rates, drop, destination_hexhash, verbosity, print("\r \rPath not found") sys.exit(1) - + def main(): try: @@ -479,7 +479,7 @@ def main(): help="timeout before giving up on remote queries", default=RNS.Transport.PATH_REQUEST_TIMEOUT ) - + parser.add_argument( "-j", "--json", @@ -497,7 +497,7 @@ def main(): ) parser.add_argument('-v', '--verbose', action='count', default=0) - + args = parser.parse_args() if args.config: diff --git a/RNS/Utilities/rnprobe.py b/RNS/Utilities/rnprobe.py index fcf01c4..ffa2b51 100644 --- a/RNS/Utilities/rnprobe.py +++ b/RNS/Utilities/rnprobe.py @@ -38,7 +38,7 @@ def program_setup(configdir, destination_hexhash, size=None, full_name = None, v if full_name == None: print("The full destination name including application name aspects must be specified for the destination") exit() - + try: app_name, aspects = RNS.Destination.app_and_aspects_from_name(full_name) @@ -133,7 +133,7 @@ def program_setup(configdir, destination_hexhash, size=None, full_name = None, v if time.time() > _timeout: print("\r \rProbe timed out") - + else: print("\b\b ") sys.stdout.flush() @@ -162,10 +162,10 @@ def program_setup(configdir, destination_hexhash, size=None, full_name = None, v if reception_rssi != None: reception_stats += f" [RSSI {reception_rssi} dBm]" - + if reception_snr != None: reception_stats += f" [SNR {reception_snr} dB]" - + if reception_q != None: reception_stats += f" [Link Quality {reception_q}%]" @@ -173,7 +173,7 @@ def program_setup(configdir, destination_hexhash, size=None, full_name = None, v if receipt.proof_packet != None: if receipt.proof_packet.rssi != None: reception_stats += f" [RSSI {receipt.proof_packet.rssi} dBm]" - + if receipt.proof_packet.snr != None: reception_stats += f" [SNR {receipt.proof_packet.snr} dB]" @@ -192,7 +192,7 @@ def program_setup(configdir, destination_hexhash, size=None, full_name = None, v exit(2) else: exit(0) - + def main(): try: diff --git a/RNS/Utilities/rnsd.py b/RNS/Utilities/rnsd.py index 6c75e41..d9d5d77 100755 --- a/RNS/Utilities/rnsd.py +++ b/RNS/Utilities/rnsd.py @@ -56,7 +56,7 @@ def main(): parser.add_argument('-s', '--service', action='store_true', default=False, help="rnsd is running as a service and should log to file") parser.add_argument("--exampleconfig", action='store_true', default=False, help="print verbose configuration example to stdout and exit") parser.add_argument("--version", action="version", version=f"rnsd {__version__}") - + args = parser.parse_args() if args.exampleconfig: @@ -192,7 +192,7 @@ loglevel = 4 # The following example enables communication with other # local Reticulum peers using UDP broadcasts. - + [[UDP Interface]] type = UDPInterface enabled = no @@ -235,24 +235,24 @@ loglevel = 4 # This example demonstrates a TCP server interface. # It will listen for incoming connections on the # specified IP address and port number. - + [[TCP Server Interface]] type = TCPServerInterface enabled = no # This configuration will listen on all IP # interfaces on port 4242 - + listen_ip = 0.0.0.0 listen_port = 4242 # Alternatively you can bind to a specific IP - + # listen_ip = 10.0.0.88 # listen_port = 4242 # Or a specific network device - + # device = eth0 # port = 4242 @@ -300,7 +300,7 @@ loglevel = 4 # host device before connecting. BLE # devices can be connected by name, # BLE MAC address or by any available. - + # Connect to specific device by name # port = ble://RNode 3B87 @@ -320,7 +320,7 @@ loglevel = 4 # Set TX power to 7 dBm (5 mW) txpower = 7 - # Select spreading factor 8. Valid + # Select spreading factor 8. Valid # range is 7 through 12, with 7 # being the fastest and 12 having # the longest range. @@ -349,8 +349,8 @@ loglevel = 4 # flow control can be useful. By default # it is disabled. flow_control = False - - + + # An example KISS modem interface. Useful for running # Reticulum over packet radio hardware. @@ -365,7 +365,7 @@ loglevel = 4 # Set the serial baud-rate and other # configuration parameters. - speed = 115200 + speed = 115200 databits = 8 parity = none stopbits = 1 @@ -413,7 +413,7 @@ loglevel = 4 # way, Reticulum will automatically encapsulate it's # traffic in AX.25 and also identify your stations # transmissions with your callsign and SSID. - # + # # Only do this if you really need to! Reticulum doesn't # need the AX.25 layer for anything, and it incurs extra # overhead on every packet to encapsulate in AX.25. @@ -436,7 +436,7 @@ loglevel = 4 # Set the serial baud-rate and other # configuration parameters. - speed = 115200 + speed = 115200 databits = 8 parity = none stopbits = 1 diff --git a/RNS/Utilities/rnstatus.py b/RNS/Utilities/rnstatus.py index a65fba8..9334600 100644 --- a/RNS/Utilities/rnstatus.py +++ b/RNS/Utilities/rnstatus.py @@ -161,7 +161,7 @@ def program_setup(configdir, dispall=False, verbosity=0, name_filter=None, json= stats, link_count = remote_status except Exception as e: raise e - + except Exception as e: print(str(e)) exit(20) @@ -215,7 +215,7 @@ def program_setup(configdir, dispall=False, verbosity=0, name_filter=None, json= if sorting == "held": interfaces.sort(key=lambda i: i["held_announces"], reverse=not sort_reverse) - + for ifstat in interfaces: name = ifstat["name"] @@ -301,10 +301,10 @@ def program_setup(configdir, dispall=False, verbosity=0, name_filter=None, json= if "airtime_short" in ifstat and "airtime_long" in ifstat: print(" Airtime : {ats}% (15s), {atl}% (1h)".format(ats=str(ifstat["airtime_short"]),atl=str(ifstat["airtime_long"]))) - + if "channel_load_short" in ifstat and "channel_load_long" in ifstat: print(" Ch.Load : {ats}% (15s), {atl}% (1h)".format(ats=str(ifstat["channel_load_short"]),atl=str(ifstat["channel_load_long"]))) - + if "peers" in ifstat and ifstat["peers"] != None: print(" Peers : {np} reachable".format(np=ifstat["peers"])) @@ -314,7 +314,7 @@ def program_setup(configdir, dispall=False, verbosity=0, name_filter=None, json= if "ifac_signature" in ifstat and ifstat["ifac_signature"] != None: sigstr = "<…"+RNS.hexrep(ifstat["ifac_signature"][-5:], delimit=False)+">" print(" Access : {nb}-bit IFAC by {sig}".format(nb=ifstat["ifac_size"]*8, sig=sigstr)) - + if "i2p_b32" in ifstat and ifstat["i2p_b32"] != None: print(" I2P B32 : {ep}".format(ep=str(ifstat["i2p_b32"]))) @@ -324,14 +324,14 @@ def program_setup(configdir, dispall=False, verbosity=0, name_filter=None, json= print(" Queued : {np} announce".format(np=aqn)) else: print(" Queued : {np} announces".format(np=aqn)) - + if astats and "held_announces" in ifstat and ifstat["held_announces"] != None and ifstat["held_announces"] > 0: aqn = ifstat["held_announces"] if aqn == 1: print(" Held : {np} announce".format(np=aqn)) else: print(" Held : {np} announces".format(np=aqn)) - + if astats and "incoming_announce_frequency" in ifstat and ifstat["incoming_announce_frequency"] != None: print(" Announces : {iaf}↑".format(iaf=RNS.prettyfrequency(ifstat["outgoing_announce_frequency"]))) print(" {iaf}↓".format(iaf=RNS.prettyfrequency(ifstat["incoming_announce_frequency"]))) @@ -357,7 +357,7 @@ def program_setup(configdir, dispall=False, verbosity=0, name_filter=None, json= print(f"\n{lstr}") print("") - + else: if not remote: print("Could not get RNS status") @@ -378,7 +378,7 @@ def main(): help="show all interfaces", default=False ) - + parser.add_argument( "-A", "--announce-stats", @@ -386,7 +386,7 @@ def main(): help="show announce stats", default=False ) - + parser.add_argument( "-l", "--link-stats", @@ -394,7 +394,7 @@ def main(): help="show link stats", default=False, ) - + parser.add_argument( "-s", "--sort", @@ -403,7 +403,7 @@ def main(): default=None, type=str ) - + parser.add_argument( "-r", "--reverse", @@ -411,7 +411,7 @@ def main(): help="reverse sorting", default=False, ) - + parser.add_argument( "-j", "--json", @@ -450,7 +450,7 @@ def main(): parser.add_argument('-v', '--verbose', action='count', default=0) parser.add_argument("filter", nargs="?", default=None, help="only display interfaces with names including filter", type=str) - + args = parser.parse_args() if args.config: diff --git a/RNS/Utilities/rnx.py b/RNS/Utilities/rnx.py index 5a78d2c..861b2b7 100644 --- a/RNS/Utilities/rnx.py +++ b/RNS/Utilities/rnx.py @@ -45,7 +45,7 @@ def prepare_identity(identity_path): identity_path = f"{RNS.Reticulum.identitypath}/{APP_NAME}" if os.path.isfile(identity_path): - identity = RNS.Identity.from_file(identity_path) + identity = RNS.Identity.from_file(identity_path) if identity == None: RNS.log("No valid saved identity found, creating new...", RNS.LOG_INFO) @@ -57,7 +57,7 @@ def listen(configdir, identitypath = None, verbosity = 0, quietness = 0, allowed targetloglevel = 3+verbosity-quietness reticulum = RNS.Reticulum(configdir=configdir, loglevel=targetloglevel) - + prepare_identity(identitypath) destination = RNS.Destination(identity, RNS.Destination.IN, RNS.Destination.SINGLE, APP_NAME, "execute") @@ -107,7 +107,7 @@ def listen(configdir, identitypath = None, verbosity = 0, quietness = 0, allowed if not disable_announce: destination.announce() - + while True: time.sleep(1) @@ -338,7 +338,7 @@ def execute(configdir, identitypath = None, verbosity = 0, quietness = 0, detail if link == None or link.status == RNS.Link.CLOSED or link.status == RNS.Link.PENDING: link = RNS.Link(listener_destination) link.did_identify = False - + if not spin(until=lambda: link.status == RNS.Link.ACTIVE, msg=f"Establishing link with {RNS.prettyhexrep(destination_hash)}", timeout=timeout): print(f"Could not establish link with {RNS.prettyhexrep(destination_hash)}") exit(243) @@ -467,7 +467,7 @@ def execute(configdir, identitypath = None, verbosity = 0, quietness = 0, detail else: tstr = "" print(f"Remote wrote {outlen} bytes to stdout{tstr}") - + if errlen != None and stderr != None: if len(stderr) < errlen: tstr = f", {len(stderr)} bytes displayed" @@ -548,7 +548,7 @@ def main(): parser.add_argument("--stdout", action='store', default=None, help="max size in bytes of returned stdout", type=int) parser.add_argument("--stderr", action='store', default=None, help="max size in bytes of returned stderr", type=int) parser.add_argument("--version", action="version", version=f"rnx {__version__}") - + args = parser.parse_args() if args.listen or args.print_identity: @@ -600,8 +600,8 @@ def main(): # while True: # ch = sys.stdin.read(1) # cmdbuf += ch.encode("utf-8") - # print("\r"+prompt+cmdbuf.decode("utf-8"), end="") - + # print("\r"+prompt+cmdbuf.decode("utf-8"), end="") + command = input() if command.lower() == "exit" or command.lower() == "quit": exit(0) @@ -676,7 +676,7 @@ def pretty_time(time, verbose=False): minutes = int(time // 60) time %= 60 seconds = round(time, 2) - + ss = "" if seconds == 1 else "s" sm = "" if minutes == 1 else "s" sh = "" if hours == 1 else "s" diff --git a/RNS/__init__.py b/RNS/__init__.py index 04ee64e..f0e99a3 100755 --- a/RNS/__init__.py +++ b/RNS/__init__.py @@ -90,7 +90,7 @@ def loglevelname(level): return "Debug" if (level == LOG_EXTREME): return "Extra" - + return "Unknown" def version(): @@ -124,7 +124,7 @@ def log(msg, level=3, _override_destination = False): file = open(logfile, "a") file.write(logstring+"\n") file.close() - + if os.path.getsize(logfile) > LOG_MAXSIZE: prevfile = logfile+".1" if os.path.isfile(prevfile): @@ -138,7 +138,7 @@ def log(msg, level=3, _override_destination = False): log("Exception occurred while writing log message to log file: "+str(e), LOG_CRITICAL) log("Dumping future log events to console!", LOG_CRITICAL) log(msg, level) - + def rand(): result = instance_random.random() @@ -155,7 +155,7 @@ def hexrep(data, delimit=True): iter(data) except TypeError: data = [data] - + delimiter = ":" if not delimit: delimiter = "" @@ -228,7 +228,7 @@ def prettytime(time, verbose=False, compact=False): seconds = int(time) else: seconds = round(time, 2) - + ss = "" if seconds == 1 else "s" sm = "" if minutes == 1 else "s" sh = "" if hours == 1 else "s" @@ -272,7 +272,7 @@ def prettytime(time, verbose=False, compact=False): def prettyshorttime(time, verbose=False, compact=False): time = time*1e6 - + seconds = int(time // 1e6); time %= 1e6 milliseconds = int(time // 1e3); time %= 1e3 @@ -280,7 +280,7 @@ def prettyshorttime(time, verbose=False, compact=False): microseconds = int(time) else: microseconds = round(time, 2) - + ss = "" if seconds == 1 else "s" sms = "" if milliseconds == 1 else "s" sus = "" if microseconds == 1 else "s" @@ -365,16 +365,16 @@ def profiler(tag=None, capture=False, super_tag=None): def profiler_results(): from statistics import mean, median, stdev results = {} - + for tag in profiler_tags: tag_captures = [] tag_entry = profiler_tags[tag] - + for thread_ident in tag_entry["threads"]: thread_entry = tag_entry["threads"][thread_ident] thread_captures = thread_entry["captures"] sample_count = len(thread_captures) - + if sample_count > 2: thread_results = { "count": sample_count, diff --git a/RNS/vendor/configobj.py b/RNS/vendor/configobj.py index e84fde5..338168a 100755 --- a/RNS/vendor/configobj.py +++ b/RNS/vendor/configobj.py @@ -139,28 +139,28 @@ class UnknownType(Exception): class Builder: - + def build(self, o): if m is None: raise UnknownType(o.__class__.__name__) return m(o) - + def build_List(self, o): return list(map(self.build, o.getChildren())) - + def build_Const(self, o): return o.value - + def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = next(i) return d - + def build_Tuple(self, o): return tuple(self.build_List(o)) - + def build_Name(self, o): if o.name == 'None': return None @@ -168,10 +168,10 @@ class Builder: return True if o.name == 'False': return False - + # An undefined Name raise UnknownType('Undefined Name') - + def build_Add(self, o): real, imag = list(map(self.build_Const, o.getChildren())) try: @@ -181,14 +181,14 @@ class Builder: if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real+imag - + def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) - + def build_UnarySub(self, o): return -self.build_Const(o.getChildren()[0]) - + def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) @@ -199,7 +199,7 @@ _builder = Builder() def unrepr(s): if not s: return s - + # this is supposed to be safe import ast return ast.literal_eval(s) @@ -304,7 +304,7 @@ class InterpolationEngine: # short-cut if not self._cookie in value: return value - + def recursive_interpolate(key, value, section, backtrail): """The function that does the actual work. @@ -404,7 +404,7 @@ class InterpolationEngine: (e.g., if we interpolated "$$" and returned "$"). """ raise NotImplementedError() - + class ConfigParserInterpolation(InterpolationEngine): @@ -453,27 +453,27 @@ interpolation_engines = { def __newobj__(cls, *args): # Hack for pickle - return cls.__new__(cls, *args) + return cls.__new__(cls, *args) class Section(dict): """ A dictionary-like object that represents a section in a config file. - + It does string interpolation if the 'interpolation' attribute of the 'main' object is set to True. - + Interpolation is tried first from this object, then from the 'DEFAULT' section of this object, next from the parent and its 'DEFAULT' section, and so on until the main object is reached. - + A Section will behave like an ordered dictionary - following the order of the ``scalars`` and ``sections`` attributes. You can use this to change the order of members. - + Iteration follows the order: scalars, then sections. """ - + def __setstate__(self, state): dict.update(self, state[0]) self.__dict__.update(state[1]) @@ -481,8 +481,8 @@ class Section(dict): def __reduce__(self): state = (dict(self), self.__dict__) return (__newobj__, (self.__class__,), state) - - + + def __init__(self, parent, depth, main, indict=None, name=None): """ * parent is the section above @@ -507,8 +507,8 @@ class Section(dict): # (rather than just passing to ``dict.__init__``) for entry, value in indict.items(): self[entry] = value - - + + def _initialise(self): # the sequence of scalar values in this Section self.scalars = [] @@ -552,7 +552,7 @@ class Section(dict): def __getitem__(self, key): """Fetch the item and do string interpolation.""" val = dict.__getitem__(self, key) - if self.main.interpolation: + if self.main.interpolation: if isinstance(val, str): return self._interpolate(key, val) if isinstance(val, list): @@ -569,20 +569,20 @@ class Section(dict): def __setitem__(self, key, value, unrepr=False): """ Correctly set a value. - + Making dictionary values Section instances. (We have to special case 'Section' instances - which are also dicts) - + Keys must be strings. Values need only be strings (or lists of strings) if ``main.stringify`` is set. - + ``unrepr`` must be set when setting a value to a dictionary, without creating a new sub-section. """ if not isinstance(key, str): raise ValueError(f'The key "{key}" is not a string.') - + # add the comment if key not in self.comments: self.comments[key] = [] @@ -683,7 +683,7 @@ class Section(dict): """ A version of clear that also affects scalars/sections Also clears comments and configspec. - + Leaves other attributes alone : depth/main/parent are not affected """ @@ -757,10 +757,10 @@ class Section(dict): def dict(self): """ Return a deepcopy of self as a dictionary. - + All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. - + >>> n = a.dict() >>> n == a 1 @@ -785,7 +785,7 @@ class Section(dict): def merge(self, indict): """ A recursive update - useful for merging config files. - + >>> a = '''[section1] ... option1 = True ... [[subsection]] @@ -805,17 +805,17 @@ class Section(dict): if (key in self and isinstance(self[key], dict) and isinstance(val, dict)): self[key].merge(val) - else: + else: self[key] = val def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. - + Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) - + Also renames comments. """ if oldkey in self.scalars: @@ -843,30 +843,30 @@ class Section(dict): call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. - + Return a dictionary of the return values - + If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. - + Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. - + Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. - + See the encode and decode methods for examples, including functions. - + .. admonition:: caution - + You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. - + >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) @@ -929,17 +929,17 @@ class Section(dict): Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. - - If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns + + If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. - - If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns + + If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. - + ``as_bool`` is not case sensitive. - + Any other input will raise a ``ValueError``. - + >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') @@ -971,10 +971,10 @@ class Section(dict): def as_int(self, key): """ A convenience method which coerces the specified value to an integer. - + If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. - + >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') @@ -994,10 +994,10 @@ class Section(dict): def as_float(self, key): """ A convenience method which coerces the specified value to a float. - + If the value is an invalid literal for ``float``, a ``ValueError`` will be raised. - + >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL @@ -1011,13 +1011,13 @@ class Section(dict): 3.2... """ return float(self[key]) - - + + def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. - + >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') @@ -1033,15 +1033,15 @@ class Section(dict): if isinstance(result, (tuple, list)): return list(result) return [result] - + def restore_default(self, key): """ Restore (and return) default value for the specified key. - + This method will only work for a ConfigObj that was created with a configspec and has been validated. - + If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] @@ -1050,20 +1050,20 @@ class Section(dict): self.defaults.append(key) return default - + def restore_defaults(self): """ Recursively restore default values to all members that have them. - + This method will only work for a ConfigObj that was created with a configspec and has been validated. - + It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) - + for section in self.sections: self[section].restore_defaults() @@ -1178,7 +1178,7 @@ class ConfigObj(Section): write_empty_values=False, _inspec=False): """ Parse a config file or create a config file object. - + ``ConfigObj(infile=None, configspec=None, encoding=None, interpolation=True, raise_errors=False, list_values=True, create_empty=False, file_error=False, stringify=True, @@ -1188,9 +1188,9 @@ class ConfigObj(Section): self._inspec = _inspec # init the superclass Section.__init__(self, self, 0, self) - + infile = infile or [] - + _options = {'configspec': configspec, 'encoding': encoding, 'interpolation': interpolation, 'raise_errors': raise_errors, 'list_values': list_values, @@ -1206,7 +1206,7 @@ class ConfigObj(Section): warnings.warn('Passing in an options dictionary to ConfigObj() is ' 'deprecated. Use **options instead.', DeprecationWarning, stacklevel=2) - + # TODO: check the values too. for entry in options: if entry not in OPTION_DEFAULTS: @@ -1217,18 +1217,18 @@ class ConfigObj(Section): keyword_value = _options[entry] if value != keyword_value: options[entry] = keyword_value - + # XXXX this ignores an explicit list_values = True in combination # with _inspec. The user should *never* do that anyway, but still... if _inspec: options['list_values'] = False - + self._initialise(options) configspec = options['configspec'] self._original_configspec = configspec self._load(infile, configspec) - - + + def _load(self, infile, configspec): if isinstance(infile, str): self.filename = infile @@ -1246,10 +1246,10 @@ class ConfigObj(Section): with open(infile, 'w') as h: h.write('') content = [] - + elif isinstance(infile, (list, tuple)): content = list(infile) - + elif isinstance(infile, dict): # initialise self # the Section class handles creating subsections @@ -1262,18 +1262,18 @@ class ConfigObj(Section): this_section[section] = {} set_section(in_section[section], this_section[section]) set_section(infile, self) - + else: for entry in infile: self[entry] = infile[entry] del self._errors - + if configspec is not None: self._handle_configspec(configspec) else: self.configspec = None return - + elif getattr(infile, 'read', MISSING) is not MISSING: # This supports file like objects content = infile.read() or [] @@ -1300,7 +1300,7 @@ class ConfigObj(Section): assert all(isinstance(line, str) for line in content), repr(content) content = [line.rstrip('\r\n') for line in content] - + self._parse(content) # if we had any errors, now is the time to raise them if self._errors: @@ -1318,17 +1318,17 @@ class ConfigObj(Section): raise error # delete private attributes del self._errors - + if configspec is None: self.configspec = None else: self._handle_configspec(configspec) - - + + def _initialise(self, options=None): if options is None: options = OPTION_DEFAULTS - + # initialise a few variables self.filename = None self._errors = [] @@ -1345,18 +1345,18 @@ class ConfigObj(Section): self.newlines = None self.write_empty_values = options['write_empty_values'] self.unrepr = options['unrepr'] - + self.initial_comment = [] self.final_comment = [] self.configspec = None - + if self._inspec: self.list_values = False - + # Clear section attributes as well Section._initialise(self) - - + + def __repr__(self): def _getval(key): try: @@ -1364,27 +1364,27 @@ class ConfigObj(Section): except MissingInterpolationOption: return dict.__getitem__(self, key) return ('ConfigObj({%s})' % ', '.join([f'{key!r}: {_getval(key)!r}' for key in (self.scalars + self.sections)])) - - + + def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. - + If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). - + (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) - + If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. - + NOTE: This method must not be called with an empty ``infile``. - + Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. - + ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ @@ -1395,7 +1395,7 @@ class ConfigObj(Section): # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) - + if isinstance(infile, (list, tuple)): line = infile[0] else: @@ -1424,18 +1424,18 @@ class ConfigObj(Section): ##self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) - + # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) - + # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) - + newline = line[len(BOM):] - + # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline @@ -1443,7 +1443,7 @@ class ConfigObj(Section): infile = newline self.BOM = True return self._decode(infile, self.encoding) - + # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in list(BOMS.items()): if not isinstance(line, bytes) or not line.startswith(BOM): @@ -1470,7 +1470,7 @@ class ConfigObj(Section): return self._decode(infile, 'utf-8') # UTF16 - have to decode return self._decode(infile, encoding) - + if six.PY2 and isinstance(line, str): # don't actually do any decoding, since we're on python 2 and @@ -1494,7 +1494,7 @@ class ConfigObj(Section): def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. - + if is a string, it also needs converting to a list. """ if isinstance(infile, str): @@ -1543,14 +1543,14 @@ class ConfigObj(Section): temp_list_values = self.list_values if self.unrepr: self.list_values = False - + comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False - + while cur_index < maxline: if reset_comment: comment_list = [] @@ -1562,13 +1562,13 @@ class ConfigObj(Section): reset_comment = False comment_list.append(line) continue - + if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True - + reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) @@ -1582,7 +1582,7 @@ class ConfigObj(Section): self._handle_error("Cannot compute the section depth", NestingError, infile, cur_index) continue - + if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: @@ -1602,13 +1602,13 @@ class ConfigObj(Section): self._handle_error("Section too nested", NestingError, infile, cur_index) continue - + sect_name = self._unquote(sect_name) if sect_name in parent: self._handle_error('Duplicate section name', DuplicateError, infile, cur_index) continue - + # create the new section this_section = Section( parent, @@ -1709,7 +1709,7 @@ class ConfigObj(Section): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. - + Return a reference to the right section, or raise a SyntaxError. """ @@ -1727,7 +1727,7 @@ class ConfigObj(Section): def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. - + Either raise the error or store it. The error will have occured at ``cur_index`` """ @@ -1756,19 +1756,19 @@ class ConfigObj(Section): def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. - + Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. - + * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. - + If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. - + If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ @@ -1776,7 +1776,7 @@ class ConfigObj(Section): # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' - + if multiline and isinstance(value, (list, tuple)): if not value: return ',' @@ -1794,12 +1794,12 @@ class ConfigObj(Section): if not value: return '""' - + no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value )) hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote - + if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` @@ -1817,13 +1817,13 @@ class ConfigObj(Section): else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) - + if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) - + return quot % value - - + + def _get_single_quote(self, value): if ("'" in value) and ('"' in value): raise ConfigObjError(f'Value "{value}" cannot be safely quoted.') @@ -1832,15 +1832,15 @@ class ConfigObj(Section): else: quot = dquot return quot - - + + def _get_triple_quote(self, value): if (value.find('"""') != -1) and (value.find("'''") != -1): raise ConfigObjError(f'Value "{value}" cannot be safely quoted.') if value.find('"""') == -1: quot = tdquot else: - quot = tsquot + quot = tsquot return quot @@ -1930,7 +1930,7 @@ class ConfigObj(Section): def _handle_configspec(self, configspec): """Parse the configspec.""" - # FIXME: Should we check that the configspec was created with the + # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: @@ -1944,11 +1944,11 @@ class ConfigObj(Section): raise ConfigspecError(f'Parsing configspec failed: {e}') except OSError as e: raise OSError(f'Reading configspec failed: {e}') - - self.configspec = configspec - - + self.configspec = configspec + + + def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections @@ -1960,7 +1960,7 @@ class ConfigObj(Section): for entry in section.sections: if entry not in configspec: section[entry].configspec = many - + for entry in configspec.sections: if entry == '__many__': continue @@ -1971,11 +1971,11 @@ class ConfigObj(Section): # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') - + # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry] - + def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" @@ -2007,9 +2007,9 @@ class ConfigObj(Section): def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file - + tekNico: FIXME: use StringIO instead of real files - + >>> filename = a.filename >>> a.filename = 'test.ini' >>> a.write() @@ -2022,7 +2022,7 @@ class ConfigObj(Section): if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE - + out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') @@ -2036,7 +2036,7 @@ class ConfigObj(Section): if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) - + indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: @@ -2049,7 +2049,7 @@ class ConfigObj(Section): out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) - + if isinstance(this_entry, Section): # a section out.append(self._write_marker( @@ -2064,7 +2064,7 @@ class ConfigObj(Section): entry, this_entry, comment)) - + if section is self: for line in self.final_comment: line = self._decode_element(line) @@ -2073,10 +2073,10 @@ class ConfigObj(Section): line = csp + line out.append(line) self.interpolation = int_val - + if section is not self: return out - + if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode @@ -2090,7 +2090,7 @@ class ConfigObj(Section): out.append('') out[0] = BOM_UTF8 + out[0] return out - + # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w' @@ -2122,34 +2122,34 @@ class ConfigObj(Section): section=None): """ Test the ConfigObj against a configspec. - + It uses the ``validator`` object from *validate.py*. - + To run ``validate`` on the current ConfigObj, call: :: - + test = config.validate(validator) - + (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). - + It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). - + In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). - + If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. - + You must have the validate module to use ``preserve_errors=True``. - + You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. @@ -2162,7 +2162,7 @@ class ConfigObj(Section): # Which makes importing configobj faster from validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue - + section = self if copy: @@ -2172,23 +2172,23 @@ class ConfigObj(Section): section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type - + # # section.default_values.clear() #?? configspec = section.configspec self._set_configspec(section, copy) - + def validate_entry(entry, spec, val, missing, ret_true, ret_false): section.default_values.pop(entry, None) - + try: section.default_values[entry] = validator.get_default_value(configspec[entry]) except (KeyError, AttributeError, validator.baseErrorClass): # No default, bad default or validator has no 'get_default_value' # (e.g. SimpleVal) pass - + try: check = validator.check(spec, val, @@ -2222,16 +2222,16 @@ class ConfigObj(Section): if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false - + # out = {} ret_true = True ret_false = True - + unvalidated = [k for k in section.scalars if k not in configspec] - incorrect_sections = [k for k in configspec.sections if k in section.scalars] + incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] - + for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names @@ -2251,16 +2251,16 @@ class ConfigObj(Section): else: missing = False val = section[entry] - - ret_true, ret_false = validate_entry(entry, configspec[entry], val, + + ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) - + many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] - + if many is not None: for entry in unvalidated: val = section[entry] @@ -2284,7 +2284,7 @@ class ConfigObj(Section): ret_false = False msg = f'Section {entry!r} was provided as a single value' out[entry] = validator.baseErrorClass(msg) - + # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: @@ -2305,7 +2305,7 @@ class ConfigObj(Section): ret_false = False else: ret_true = False - + section.extra_values = unvalidated if preserve_errors and not section._created: # If the section wasn't created (i.e. it wasn't missing) @@ -2334,12 +2334,12 @@ class ConfigObj(Section): self.configspec = None # Just to be sure ;-) self._original_configspec = None - - + + def reload(self): """ Reload a ConfigObj from file. - + This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ @@ -2352,31 +2352,31 @@ class ConfigObj(Section): if entry == 'configspec': continue current_options[entry] = getattr(self, entry) - + configspec = self._original_configspec current_options['configspec'] = configspec - + self.clear() self._initialise(current_options) self._load(filename, configspec) - + class SimpleVal: """ A simple validator. Can be used to check that all members expected are present. - + To use it, provide a configspec with all your members in (the value given will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` method of your ``ConfigObj``. ``validate`` will return ``True`` if all members are present, or a dictionary with True/False meaning present/missing. (Whole missing sections will be replaced with ``False``) """ - + def __init__(self): self.baseErrorClass = ConfigObjError - + def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: @@ -2388,32 +2388,32 @@ def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. - + ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. - + (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) - + Returns a list of keys that failed. Each member of the list is a tuple:: - + ([list of sections...], key, result) - + If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. - + If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. - + If the value (or section) was missing then ``result`` will be ``False``. - + If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. - + For example *The value "3" is of the wrong type*. """ if levels is None: @@ -2448,21 +2448,21 @@ def get_extra_values(conf, _prepend=()): """ Find all the values and sections not in the configspec from a validated ConfigObj. - + ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. - - The tuples contain two values, a tuple representing the section the value + + The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. - + NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list. """ out = [] - + out.extend([(_prepend, name) for name in conf.extra_values]) for name in conf.sections: if name not in conf.extra_values: diff --git a/RNS/vendor/i2plib/__init__.py b/RNS/vendor/i2plib/__init__.py index ee6de01..1de6e30 100644 --- a/RNS/vendor/i2plib/__init__.py +++ b/RNS/vendor/i2plib/__init__.py @@ -1,5 +1,5 @@ """ -A modern asynchronous library for building I2P applications. +A modern asynchronous library for building I2P applications. """ from .__version__ import ( @@ -10,7 +10,7 @@ from .__version__ import ( from .sam import Destination, PrivateKey from .aiosam import ( - get_sam_socket, dest_lookup, new_destination, + get_sam_socket, dest_lookup, new_destination, create_session, stream_connect, stream_accept, Session, StreamConnection, StreamAcceptor ) diff --git a/RNS/vendor/i2plib/aiosam.py b/RNS/vendor/i2plib/aiosam.py index eccff10..25807e2 100644 --- a/RNS/vendor/i2plib/aiosam.py +++ b/RNS/vendor/i2plib/aiosam.py @@ -34,12 +34,12 @@ async def get_sam_socket(sam_address=sam.DEFAULT_ADDRESS, loop=None): writer.close() raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]() -async def dest_lookup(domain, sam_address=sam.DEFAULT_ADDRESS, +async def dest_lookup(domain, sam_address=sam.DEFAULT_ADDRESS, loop=None): - """A coroutine used to lookup a full I2P destination by .i2p domain or + """A coroutine used to lookup a full I2P destination by .i2p domain or .b32.i2p address. - :param domain: Address to be resolved, can be a .i2p domain or a .b32.i2p + :param domain: Address to be resolved, can be a .i2p domain or a .b32.i2p address. :param sam_address: (optional) SAM API address :param loop: (optional) Event loop instance @@ -56,7 +56,7 @@ async def dest_lookup(domain, sam_address=sam.DEFAULT_ADDRESS, async def new_destination(sam_address=sam.DEFAULT_ADDRESS, loop=None, sig_type=sam.Destination.default_sig_type): - """A coroutine used to generate a new destination with a private key of a + """A coroutine used to generate a new destination with a private key of a chosen signature type. :param sam_address: (optional) SAM API address @@ -70,7 +70,7 @@ async def new_destination(sam_address=sam.DEFAULT_ADDRESS, loop=None, writer.close() return sam.Destination(reply["PRIV"], has_private_key=True) -async def create_session(session_name, sam_address=sam.DEFAULT_ADDRESS, +async def create_session(session_name, sam_address=sam.DEFAULT_ADDRESS, loop=None, style="STREAM", signature_type=sam.Destination.default_sig_type, destination=None, options={}): @@ -80,10 +80,10 @@ async def create_session(session_name, sam_address=sam.DEFAULT_ADDRESS, :param sam_address: (optional) SAM API address :param loop: (optional) Event loop instance :param style: (optional) Session style, can be STREAM, DATAGRAM, RAW - :param signature_type: (optional) If the destination is TRANSIENT, this + :param signature_type: (optional) If the destination is TRANSIENT, this signature type is used - :param destination: (optional) Destination to use in this session. Can be - a base64 encoded string, :class:`Destination` + :param destination: (optional) Destination to use in this session. Can be + a base64 encoded string, :class:`Destination` instance or None. TRANSIENT destination is used when it is None. :param options: (optional) A dict object with i2cp options @@ -111,7 +111,7 @@ async def create_session(session_name, sam_address=sam.DEFAULT_ADDRESS, if reply.ok: if not destination: destination = sam.Destination( - reply["DESTINATION"], has_private_key=True) + reply["DESTINATION"], has_private_key=True) logger.debug(destination.base32) logger.debug(f"Session created {session_name}") return (reader, writer) @@ -119,7 +119,7 @@ async def create_session(session_name, sam_address=sam.DEFAULT_ADDRESS, writer.close() raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]() -async def stream_connect(session_name, destination, +async def stream_connect(session_name, destination, sam_address=sam.DEFAULT_ADDRESS, loop=None): """A coroutine used to connect to a remote I2P destination. @@ -173,16 +173,16 @@ class Session: :param sam_address: (optional) SAM API address :param loop: (optional) Event loop instance :param style: (optional) Session style, can be STREAM, DATAGRAM, RAW - :param signature_type: (optional) If the destination is TRANSIENT, this + :param signature_type: (optional) If the destination is TRANSIENT, this signature type is used - :param destination: (optional) Destination to use in this session. Can be - a base64 encoded string, :class:`Destination` + :param destination: (optional) Destination to use in this session. Can be + a base64 encoded string, :class:`Destination` instance or None. TRANSIENT destination is used when it is None. :param options: (optional) A dict object with i2cp options :return: :class:`Session` object """ - def __init__(self, session_name, sam_address=sam.DEFAULT_ADDRESS, + def __init__(self, session_name, sam_address=sam.DEFAULT_ADDRESS, loop=None, style="STREAM", signature_type=sam.Destination.default_sig_type, destination=None, options={}): @@ -195,9 +195,9 @@ class Session: self.options = options async def __aenter__(self): - self.reader, self.writer = await create_session(self.session_name, - sam_address=self.sam_address, loop=self.loop, style=self.style, - signature_type=self.signature_type, + self.reader, self.writer = await create_session(self.session_name, + sam_address=self.sam_address, loop=self.loop, style=self.style, + signature_type=self.signature_type, destination=self.destination, options=self.options) return self @@ -214,7 +214,7 @@ class StreamConnection: :param loop: (optional) Event loop instance :return: :class:`StreamConnection` object """ - def __init__(self, session_name, destination, + def __init__(self, session_name, destination, sam_address=sam.DEFAULT_ADDRESS, loop=None): self.session_name = session_name self.sam_address = sam_address @@ -222,7 +222,7 @@ class StreamConnection: self.destination = destination async def __aenter__(self): - self.reader, self.writer = await stream_connect(self.session_name, + self.reader, self.writer = await stream_connect(self.session_name, self.destination, sam_address=self.sam_address, loop=self.loop) self.read = self.reader.read self.write = self.writer.write @@ -240,14 +240,14 @@ class StreamAcceptor: :param loop: (optional) Event loop instance :return: :class:`StreamAcceptor` object """ - def __init__(self, session_name, sam_address=sam.DEFAULT_ADDRESS, + def __init__(self, session_name, sam_address=sam.DEFAULT_ADDRESS, loop=None): self.session_name = session_name self.sam_address = sam_address self.loop = loop async def __aenter__(self): - self.reader, self.writer = await stream_accept(self.session_name, + self.reader, self.writer = await stream_accept(self.session_name, sam_address=self.sam_address, loop=self.loop) self.read = self.reader.read self.write = self.writer.write diff --git a/RNS/vendor/i2plib/sam.py b/RNS/vendor/i2plib/sam.py index dad8b76..2220ae8 100644 --- a/RNS/vendor/i2plib/sam.py +++ b/RNS/vendor/i2plib/sam.py @@ -8,7 +8,7 @@ I2P_B64_CHARS = "-~" def i2p_b64encode(x): """Encode I2P destination""" - return b64encode(x, altchars=I2P_B64_CHARS.encode()).decode() + return b64encode(x, altchars=I2P_B64_CHARS.encode()).decode() def i2p_b64decode(x): """Decode I2P destination""" @@ -79,9 +79,9 @@ class Destination: https://geti2p.net/spec/common-structures#destination - :param data: (optional) Base64 encoded data or binary data - :param path: (optional) A path to a file with binary data - :param has_private_key: (optional) Does data have a private key? + :param data: (optional) Base64 encoded data or binary data + :param path: (optional) A path to a file with binary data + :param has_private_key: (optional) Does data have a private key? """ ECDSA_SHA256_P256 = 1 @@ -97,12 +97,12 @@ class Destination: def __init__(self, data=None, path=None, has_private_key=False): #: Binary destination - self.data = b'' + self.data = b'' #: Base64 encoded destination - self.base64 = "" + self.base64 = "" #: :class:`RNS.vendor.i2plib.PrivateKey` instance or None - self.private_key = None - + self.private_key = None + if path: with open(path, "rb") as f: data = f.read() @@ -126,13 +126,13 @@ class Destination: """Base32 destination hash of this destination""" desthash = sha256(self.data).digest() return b32encode(desthash).decode()[:52].lower() - + class PrivateKey: """I2P private key https://geti2p.net/spec/common-structures#keysandcert - :param data: Base64 encoded data or binary data + :param data: Base64 encoded data or binary data """ def __init__(self, data): diff --git a/RNS/vendor/i2plib/tunnel.py b/RNS/vendor/i2plib/tunnel.py index aeb0f63..51fb045 100644 --- a/RNS/vendor/i2plib/tunnel.py +++ b/RNS/vendor/i2plib/tunnel.py @@ -2,7 +2,7 @@ import logging import asyncio import argparse -from . import sam +from . import sam from . import aiosam from . import utils from .log import logger @@ -29,9 +29,9 @@ async def proxy_data(reader, writer): class I2PTunnel: """Base I2P Tunnel object, not to be used directly - :param local_address: A local address to use for a tunnel. + :param local_address: A local address to use for a tunnel. E.g. ("127.0.0.1", 6668) - :param destination: (optional) Destination to use for this tunnel. Can be + :param destination: (optional) Destination to use for this tunnel. Can be a base64 encoded string, :class:`Destination` instance or None. A new destination is created when it is None. @@ -42,7 +42,7 @@ class I2PTunnel: :param sam_address: (optional) SAM API address """ - def __init__(self, local_address, destination=None, session_name=None, + def __init__(self, local_address, destination=None, session_name=None, options={}, loop=None, sam_address=sam.DEFAULT_ADDRESS): self.local_address = local_address self.destination = destination @@ -57,7 +57,7 @@ class I2PTunnel: sam_address=self.sam_address, loop=self.loop) _, self.session_writer = await aiosam.create_session( self.session_name, style=self.style, options=self.options, - sam_address=self.sam_address, + sam_address=self.sam_address, loop=self.loop, destination=self.destination) def stop(self): @@ -68,11 +68,11 @@ class ClientTunnel(I2PTunnel): """Client tunnel, a subclass of tunnel.I2PTunnel If you run a client tunnel with a local address ("127.0.0.1", 6668) and - a remote destination "irc.echelon.i2p", all connections to 127.0.0.1:6668 + a remote destination "irc.echelon.i2p", all connections to 127.0.0.1:6668 will be proxied to irc.echelon.i2p. - :param remote_destination: Remote I2P destination, can be either .i2p - domain, .b32.i2p address, base64 destination or + :param remote_destination: Remote I2P destination, can be either .i2p + domain, .b32.i2p address, base64 destination or :class:`Destination` instance """ @@ -90,12 +90,12 @@ class ClientTunnel(I2PTunnel): """Handle local client connection""" try: sc_task = aiosam.stream_connect( - self.session_name, self.remote_destination, + self.session_name, self.remote_destination, sam_address=self.sam_address, loop=self.loop) self.status["connect_tasks"].append(sc_task) - + remote_reader, remote_writer = await sc_task - asyncio.ensure_future(proxy_data(remote_reader, client_writer), + asyncio.ensure_future(proxy_data(remote_reader, client_writer), loop=self.loop) asyncio.ensure_future(proxy_data(client_reader, remote_writer), loop=self.loop) @@ -123,8 +123,8 @@ class ServerTunnel(I2PTunnel): """Server tunnel, a subclass of tunnel.I2PTunnel If you want to expose a local service 127.0.0.1:80 to the I2P network, run - a server tunnel with a local address ("127.0.0.1", 80). If you don't - provide a private key or a session name, it will use a TRANSIENT + a server tunnel with a local address ("127.0.0.1", 80). If you don't + provide a private key or a session name, it will use a TRANSIENT destination. """ def __init__(self, *args, **kwargs): @@ -139,7 +139,7 @@ class ServerTunnel(I2PTunnel): async def handle_client(incoming, client_reader, client_writer): try: # data and dest may come in one chunk - dest, data = incoming.split(b"\n", 1) + dest, data = incoming.split(b"\n", 1) remote_destination = sam.Destination(dest.decode()) logger.debug(f"{self.session_name} client connected: {remote_destination.base32}.b32.i2p") @@ -151,7 +151,7 @@ class ServerTunnel(I2PTunnel): try: sc_task = asyncio.wait_for( asyncio.open_connection( - host=self.local_address[0], + host=self.local_address[0], port=self.local_address[1]), timeout=5) self.status["connect_tasks"].append(sc_task) @@ -172,7 +172,7 @@ class ServerTunnel(I2PTunnel): try: while True: client_reader, client_writer = await aiosam.stream_accept( - self.session_name, sam_address=self.sam_address, + self.session_name, sam_address=self.sam_address, loop=self.loop) incoming = await client_reader.read(BUFFER_SIZE) asyncio.ensure_future(handle_client( @@ -192,13 +192,13 @@ if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('type', metavar="TYPE", choices=('server', 'client'), help="Tunnel type (server or client)") - parser.add_argument('address', metavar="ADDRESS", + parser.add_argument('address', metavar="ADDRESS", help="Local address (e.g. 127.0.0.1:8000)") parser.add_argument('--debug', '-d', action='store_true', help='Debugging') parser.add_argument('--key', '-k', default='', metavar='PRIVATE_KEY', help='Path to private key file') - parser.add_argument('--destination', '-D', default='', + parser.add_argument('--destination', '-D', default='', metavar='DESTINATION', help='Remote destination') args = parser.parse_args() @@ -216,10 +216,10 @@ if __name__ == '__main__': local_address = utils.address_from_string(args.address) if args.type == "client": - tunnel = ClientTunnel(args.destination, local_address, loop=loop, + tunnel = ClientTunnel(args.destination, local_address, loop=loop, destination=destination, sam_address=SAM_ADDRESS) elif args.type == "server": - tunnel = ServerTunnel(local_address, loop=loop, destination=destination, + tunnel = ServerTunnel(local_address, loop=loop, destination=destination, sam_address=SAM_ADDRESS) asyncio.ensure_future(tunnel.run(), loop=loop) diff --git a/tests/channel.py b/tests/channel.py index 85e7217..793ad8c 100644 --- a/tests/channel.py +++ b/tests/channel.py @@ -62,7 +62,7 @@ class Packet: def set_delivered_callback(self, callback: Callable[[Packet], None]): self.delivered_callback = callback - + def delivered(self): with self.lock: self.state = MessageState.MSGSTATE_DELIVERED diff --git a/tests/identity.py b/tests/identity.py index 9f66a10..990197a 100644 --- a/tests/identity.py +++ b/tests/identity.py @@ -187,7 +187,7 @@ class TestIdentity(unittest.TestCase): lb = 1 else: lb = 8 - + for i in range(1, lb): msg = os.urandom(mlen) b += mlen diff --git a/tests/link.py b/tests/link.py index 1713df7..5b61921 100644 --- a/tests/link.py +++ b/tests/link.py @@ -105,7 +105,7 @@ class TestLink(unittest.TestCase): dest = RNS.Destination(id1, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "link", "establish") self.assertEqual(dest.hash, bytes.fromhex("fb48da0e82e6e01ba0c014513f74540d")) - + l1 = RNS.Link(dest) time.sleep(0.5) self.assertEqual(l1.status, RNS.Link.ACTIVE) @@ -129,7 +129,7 @@ class TestLink(unittest.TestCase): dest = RNS.Destination(id1, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "link", "establish") self.assertEqual(dest.hash, bytes.fromhex("fb48da0e82e6e01ba0c014513f74540d")) - + l1 = RNS.Link(dest) time.sleep(0.5) self.assertEqual(l1.status, RNS.Link.ACTIVE) @@ -176,7 +176,7 @@ class TestLink(unittest.TestCase): if n_failed > 0: ns = "s" if n_failed != 1 else "" print(f"Failed to receive proof for {n_failed} packet{ns}") - + self.assertEqual(all_ok, True) print("OK!") print(f"Single packet and proof round-trip throughput is {self.size_str(b / pduration, 'b')}ps") @@ -201,7 +201,7 @@ class TestLink(unittest.TestCase): dest = RNS.Destination(id1, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "link", "establish") self.assertEqual(dest.hash, bytes.fromhex("fb48da0e82e6e01ba0c014513f74540d")) - + l1 = RNS.Link(dest) time.sleep(0.5) self.assertEqual(l1.status, RNS.Link.ACTIVE) @@ -241,7 +241,7 @@ class TestLink(unittest.TestCase): dest = RNS.Destination(id1, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "link", "establish") self.assertEqual(dest.hash, bytes.fromhex("fb48da0e82e6e01ba0c014513f74540d")) - + l1 = RNS.Link(dest) time.sleep(0.5) self.assertEqual(l1.status, RNS.Link.ACTIVE) @@ -280,7 +280,7 @@ class TestLink(unittest.TestCase): dest = RNS.Destination(id1, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "link", "establish") self.assertEqual(dest.hash, bytes.fromhex("fb48da0e82e6e01ba0c014513f74540d")) - + l1 = RNS.Link(dest) time.sleep(0.5) self.assertEqual(l1.status, RNS.Link.ACTIVE) @@ -310,7 +310,7 @@ class TestLink(unittest.TestCase): if RNS.Cryptography.backend() == "internal": print("Skipping medium resource test...") return - + init_rns(self) print("") print("Medium resource test") @@ -324,7 +324,7 @@ class TestLink(unittest.TestCase): dest = RNS.Destination(id1, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "link", "establish") self.assertEqual(dest.hash, bytes.fromhex("fb48da0e82e6e01ba0c014513f74540d")) - + l1 = RNS.Link(dest) time.sleep(0.5) self.assertEqual(l1.status, RNS.Link.ACTIVE) @@ -371,7 +371,7 @@ class TestLink(unittest.TestCase): dest = RNS.Destination(id1, RNS.Destination.OUT, RNS.Destination.SINGLE, APP_NAME, "link", "establish") self.assertEqual(dest.hash, bytes.fromhex("fb48da0e82e6e01ba0c014513f74540d")) - + l1 = RNS.Link(dest) time.sleep(0.5) self.assertEqual(l1.status, RNS.Link.ACTIVE) @@ -553,11 +553,11 @@ class TestLink(unittest.TestCase): target_bytes = 3000 else: target_bytes = BUFFER_TEST_TARGET - + random.seed(154889) message = random.randbytes(target_bytes) buffer_read_target = len(message) - + # the return message will have an appendage string " back at you" # for every StreamDataMessage that arrives. To verify, we need # to insert that string every MAX_DATA_LEN and also at the end. @@ -572,7 +572,7 @@ class TestLink(unittest.TestCase): # StreamDataMessage, the appended text will end up in a # separate packet. print(f"Sending {len(message)} bytes, receiving {len(expected_rx_message)} bytes, ") - + buffer.write(message) buffer.flush() @@ -723,7 +723,7 @@ def profile_resource(): resource_profiling() def profile_targets(): - + targets_profiling(yp=True) # cProfile.runctx("entry()", {"entry": targets_profiling, "size_str": size_str}, {}, "profile-targets.data") # p = pstats.Stats("profile-targets.data") @@ -749,7 +749,7 @@ def resource_profiling(): import yappi yappi.start() - + resource = RNS.Resource(data, l1, timeout=resource_timeout) start = time.time()