mirror of
https://github.com/markqvist/Reticulum.git
synced 2024-11-22 21:50:18 +00:00
Implemented resources
This commit is contained in:
parent
19d9b1a4a5
commit
de8d9cf722
@ -2,7 +2,7 @@ header types
|
|||||||
-----------------
|
-----------------
|
||||||
type 1 00 Two byte header, one 10 byte address field
|
type 1 00 Two byte header, one 10 byte address field
|
||||||
type 2 01 Two byte header, two 10 byte address fields
|
type 2 01 Two byte header, two 10 byte address fields
|
||||||
type 3 10 Two byte header, one 10 byte address field, used for link request proofs
|
type 3 10 Reserved
|
||||||
type 4 11 Reserved for extended header format
|
type 4 11 Reserved for extended header format
|
||||||
|
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ class Destination:
|
|||||||
# Application specific data can be added to the announce.
|
# Application specific data can be added to the announce.
|
||||||
def announce(self,app_data=None):
|
def announce(self,app_data=None):
|
||||||
destination_hash = self.hash
|
destination_hash = self.hash
|
||||||
random_hash = self.identity.getRandomHash()
|
random_hash = RNS.Identity.getRandomHash()
|
||||||
|
|
||||||
signed_data = self.hash+self.identity.getPublicKey()+random_hash
|
signed_data = self.hash+self.identity.getPublicKey()+random_hash
|
||||||
if app_data != None:
|
if app_data != None:
|
||||||
|
@ -4,7 +4,7 @@ import os
|
|||||||
import RNS
|
import RNS
|
||||||
import time
|
import time
|
||||||
import atexit
|
import atexit
|
||||||
import cPickle
|
import vendor.umsgpack as umsgpack
|
||||||
from cryptography.hazmat.primitives import hashes
|
from cryptography.hazmat.primitives import hashes
|
||||||
from cryptography.hazmat.backends import default_backend
|
from cryptography.hazmat.backends import default_backend
|
||||||
from cryptography.hazmat.primitives import serialization
|
from cryptography.hazmat.primitives import serialization
|
||||||
@ -61,7 +61,7 @@ class Identity:
|
|||||||
def saveKnownDestinations():
|
def saveKnownDestinations():
|
||||||
RNS.log("Saving known destinations to storage...", RNS.LOG_VERBOSE)
|
RNS.log("Saving known destinations to storage...", RNS.LOG_VERBOSE)
|
||||||
file = open(RNS.Reticulum.storagepath+"/known_destinations","w")
|
file = open(RNS.Reticulum.storagepath+"/known_destinations","w")
|
||||||
cPickle.dump(Identity.known_destinations, file)
|
umsgpack.dump(Identity.known_destinations, file)
|
||||||
file.close()
|
file.close()
|
||||||
RNS.log("Done saving known destinations to storage", RNS.LOG_VERBOSE)
|
RNS.log("Done saving known destinations to storage", RNS.LOG_VERBOSE)
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ class Identity:
|
|||||||
def loadKnownDestinations():
|
def loadKnownDestinations():
|
||||||
if os.path.isfile(RNS.Reticulum.storagepath+"/known_destinations"):
|
if os.path.isfile(RNS.Reticulum.storagepath+"/known_destinations"):
|
||||||
file = open(RNS.Reticulum.storagepath+"/known_destinations","r")
|
file = open(RNS.Reticulum.storagepath+"/known_destinations","r")
|
||||||
Identity.known_destinations = cPickle.load(file)
|
Identity.known_destinations = umsgpack.load(file)
|
||||||
file.close()
|
file.close()
|
||||||
RNS.log("Loaded "+str(len(Identity.known_destinations))+" known destinations from storage", RNS.LOG_VERBOSE)
|
RNS.log("Loaded "+str(len(Identity.known_destinations))+" known destinations from storage", RNS.LOG_VERBOSE)
|
||||||
else:
|
else:
|
||||||
@ -89,6 +89,10 @@ class Identity:
|
|||||||
|
|
||||||
return digest.finalize()[:10]
|
return digest.finalize()[:10]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def getRandomHash():
|
||||||
|
return Identity.truncatedHash(os.urandom(10))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def validateAnnounce(packet):
|
def validateAnnounce(packet):
|
||||||
if packet.packet_type == RNS.Packet.ANNOUNCE:
|
if packet.packet_type == RNS.Packet.ANNOUNCE:
|
||||||
@ -276,8 +280,3 @@ class Identity:
|
|||||||
|
|
||||||
proof = RNS.Packet(destination, proof_data, RNS.Packet.PROOF)
|
proof = RNS.Packet(destination, proof_data, RNS.Packet.PROOF)
|
||||||
proof.send()
|
proof.send()
|
||||||
|
|
||||||
|
|
||||||
def getRandomHash(self):
|
|
||||||
return self.truncatedHash(os.urandom(10))
|
|
||||||
|
|
||||||
|
113
RNS/Link.py
113
RNS/Link.py
@ -14,15 +14,21 @@ class LinkCallbacks:
|
|||||||
self.link_established = None
|
self.link_established = None
|
||||||
self.packet = None
|
self.packet = None
|
||||||
self.resource_started = None
|
self.resource_started = None
|
||||||
self.resource_completed = None
|
self.resource_concluded = None
|
||||||
|
|
||||||
class Link:
|
class Link:
|
||||||
CURVE = ec.SECP256R1()
|
CURVE = ec.SECP256R1()
|
||||||
ECPUBSIZE = 91
|
ECPUBSIZE = 91
|
||||||
|
BLOCKSIZE = 16
|
||||||
|
|
||||||
PENDING = 0x00
|
PENDING = 0x00
|
||||||
ACTIVE = 0x01
|
ACTIVE = 0x01
|
||||||
|
|
||||||
|
ACCEPT_NONE = 0x00
|
||||||
|
ACCEPT_APP = 0x01
|
||||||
|
ACCEPT_ALL = 0x02
|
||||||
|
resource_strategies = [ACCEPT_NONE, ACCEPT_APP, ACCEPT_ALL]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def validateRequest(owner, data, packet):
|
def validateRequest(owner, data, packet):
|
||||||
if len(data) == (Link.ECPUBSIZE):
|
if len(data) == (Link.ECPUBSIZE):
|
||||||
@ -40,6 +46,7 @@ class Link:
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
RNS.log("Validating link request failed", RNS.LOG_VERBOSE)
|
RNS.log("Validating link request failed", RNS.LOG_VERBOSE)
|
||||||
|
traceback.print_exc()
|
||||||
return None
|
return None
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -50,7 +57,11 @@ class Link:
|
|||||||
def __init__(self, destination=None, owner=None, peer_pub_bytes = None):
|
def __init__(self, destination=None, owner=None, peer_pub_bytes = None):
|
||||||
if destination != None and destination.type != RNS.Destination.SINGLE:
|
if destination != None and destination.type != RNS.Destination.SINGLE:
|
||||||
raise TypeError("Links can only be established to the \"single\" destination type")
|
raise TypeError("Links can only be established to the \"single\" destination type")
|
||||||
|
self.rtt = None
|
||||||
self.callbacks = LinkCallbacks()
|
self.callbacks = LinkCallbacks()
|
||||||
|
self.resource_strategy = Link.ACCEPT_NONE
|
||||||
|
self.outgoing_resources = []
|
||||||
|
self.incoming_resources = []
|
||||||
self.status = Link.PENDING
|
self.status = Link.PENDING
|
||||||
self.type = RNS.Destination.LINK
|
self.type = RNS.Destination.LINK
|
||||||
self.owner = owner
|
self.owner = owner
|
||||||
@ -109,7 +120,7 @@ class Link:
|
|||||||
signature = self.owner.identity.sign(signed_data)
|
signature = self.owner.identity.sign(signed_data)
|
||||||
|
|
||||||
proof_data = self.pub_bytes+signature
|
proof_data = self.pub_bytes+signature
|
||||||
proof = RNS.Packet(self, proof_data, packet_type=RNS.Packet.PROOF, header_type=RNS.Packet.HEADER_3)
|
proof = RNS.Packet(self, proof_data, packet_type=RNS.Packet.PROOF, context=RNS.Packet.LRPROOF)
|
||||||
proof.send()
|
proof.send()
|
||||||
|
|
||||||
def validateProof(self, packet):
|
def validateProof(self, packet):
|
||||||
@ -122,9 +133,9 @@ class Link:
|
|||||||
self.handshake()
|
self.handshake()
|
||||||
self.attached_interface = packet.receiving_interface
|
self.attached_interface = packet.receiving_interface
|
||||||
RNS.Transport.activateLink(self)
|
RNS.Transport.activateLink(self)
|
||||||
|
RNS.log("Link "+str(self)+" established with "+str(self.destination), RNS.LOG_VERBOSE)
|
||||||
if self.callbacks.link_established != None:
|
if self.callbacks.link_established != None:
|
||||||
self.callbacks.link_established(self)
|
self.callbacks.link_established(self)
|
||||||
RNS.log("Link "+str(self)+" established with "+str(self.destination), RNS.LOG_VERBOSE)
|
|
||||||
else:
|
else:
|
||||||
RNS.log("Invalid link proof signature received by "+str(self), RNS.LOG_VERBOSE)
|
RNS.log("Invalid link proof signature received by "+str(self), RNS.LOG_VERBOSE)
|
||||||
|
|
||||||
@ -139,9 +150,60 @@ class Link:
|
|||||||
if packet.receiving_interface != self.attached_interface:
|
if packet.receiving_interface != self.attached_interface:
|
||||||
RNS.log("Link-associated packet received on unexpected interface! Someone might be trying to manipulate your communication!", RNS.LOG_ERROR)
|
RNS.log("Link-associated packet received on unexpected interface! Someone might be trying to manipulate your communication!", RNS.LOG_ERROR)
|
||||||
else:
|
else:
|
||||||
plaintext = self.decrypt(packet.data)
|
if packet.packet_type == RNS.Packet.DATA:
|
||||||
if (self.callbacks.packet != None):
|
if packet.context == RNS.Packet.NONE:
|
||||||
self.callbacks.packet(plaintext, packet)
|
plaintext = self.decrypt(packet.data)
|
||||||
|
if (self.callbacks.packet != None):
|
||||||
|
self.callbacks.packet(plaintext, packet)
|
||||||
|
|
||||||
|
elif packet.context == RNS.Packet.RESOURCE_ADV:
|
||||||
|
packet.plaintext = self.decrypt(packet.data)
|
||||||
|
if self.resource_strategy == Link.ACCEPT_NONE:
|
||||||
|
pass
|
||||||
|
elif self.resource_strategy == Link.ACCEPT_APP:
|
||||||
|
if self.callbacks.resource != None:
|
||||||
|
self.callbacks.resource(packet)
|
||||||
|
elif self.resource_strategy == Link.ACCEPT_ALL:
|
||||||
|
RNS.Resource.accept(packet, self.callbacks.resource_concluded)
|
||||||
|
|
||||||
|
elif packet.context == RNS.Packet.RESOURCE_REQ:
|
||||||
|
plaintext = self.decrypt(packet.data)
|
||||||
|
if ord(plaintext[:1]) == RNS.Resource.HASHMAP_IS_EXHAUSTED:
|
||||||
|
resource_hash = plaintext[1+RNS.Resource.MAPHASH_LEN:RNS.Identity.HASHLENGTH/8+1+RNS.Resource.MAPHASH_LEN]
|
||||||
|
else:
|
||||||
|
resource_hash = plaintext[1:RNS.Identity.HASHLENGTH/8+1]
|
||||||
|
for resource in self.outgoing_resources:
|
||||||
|
if resource.hash == resource_hash:
|
||||||
|
resource.request(plaintext)
|
||||||
|
|
||||||
|
elif packet.context == RNS.Packet.RESOURCE_HMU:
|
||||||
|
plaintext = self.decrypt(packet.data)
|
||||||
|
resource_hash = plaintext[:RNS.Identity.HASHLENGTH/8]
|
||||||
|
for resource in self.incoming_resources:
|
||||||
|
if resource_hash == resource.hash:
|
||||||
|
resource.hashmap_update_packet(plaintext)
|
||||||
|
|
||||||
|
elif packet.context == RNS.Packet.RESOURCE_ICL:
|
||||||
|
plaintext = self.decrypt(packet.data)
|
||||||
|
resource_hash = plaintext[:RNS.Identity.HASHLENGTH/8]
|
||||||
|
for resource in self.incoming_resources:
|
||||||
|
if resource_hash == resource.hash:
|
||||||
|
resource.cancel()
|
||||||
|
|
||||||
|
# TODO: find the most efficient way to allow multiple
|
||||||
|
# transfers at the same time, sending resource hash on
|
||||||
|
# each packet is a huge overhead
|
||||||
|
elif packet.context == RNS.Packet.RESOURCE:
|
||||||
|
for resource in self.incoming_resources:
|
||||||
|
resource.receive_part(packet)
|
||||||
|
|
||||||
|
elif packet.packet_type == RNS.Packet.PROOF:
|
||||||
|
if packet.context == RNS.Packet.RESOURCE_PRF:
|
||||||
|
resource_hash = packet.data[0:RNS.Identity.HASHLENGTH/8]
|
||||||
|
for resource in self.outgoing_resources:
|
||||||
|
if resource_hash == resource.hash:
|
||||||
|
resource.validateProof(packet.data)
|
||||||
|
|
||||||
|
|
||||||
def encrypt(self, plaintext):
|
def encrypt(self, plaintext):
|
||||||
if self.__encryption_disabled:
|
if self.__encryption_disabled:
|
||||||
@ -170,11 +232,43 @@ class Link:
|
|||||||
def packet_callback(self, callback):
|
def packet_callback(self, callback):
|
||||||
self.callbacks.packet = callback
|
self.callbacks.packet = callback
|
||||||
|
|
||||||
|
# Called when an incoming resource transfer is started
|
||||||
def resource_started_callback(self, callback):
|
def resource_started_callback(self, callback):
|
||||||
self.callbacks.resource_started = callback
|
self.callbacks.resource_started = callback
|
||||||
|
|
||||||
def resource_completed_callback(self, callback):
|
# Called when a resource transfer is concluded
|
||||||
self.callbacks.resource_completed = callback
|
def resource_concluded_callback(self, callback):
|
||||||
|
self.callbacks.resource_concluded = callback
|
||||||
|
|
||||||
|
def setResourceStrategy(self, resource_strategy):
|
||||||
|
if not resource_strategy in Link.resource_strategies:
|
||||||
|
raise TypeError("Unsupported resource strategy")
|
||||||
|
else:
|
||||||
|
self.resource_strategy = resource_strategy
|
||||||
|
|
||||||
|
def register_outgoing_resource(self, resource):
|
||||||
|
self.outgoing_resources.append(resource)
|
||||||
|
|
||||||
|
def register_incoming_resource(self, resource):
|
||||||
|
self.incoming_resources.append(resource)
|
||||||
|
|
||||||
|
def cancel_outgoing_resource(self, resource):
|
||||||
|
if resource in self.outgoing_resources:
|
||||||
|
self.outgoing_resources.remove(resource)
|
||||||
|
else:
|
||||||
|
RNS.log("Attempt to cancel a non-existing incoming resource", RNS.LOG_ERROR)
|
||||||
|
|
||||||
|
def cancel_incoming_resource(self, resource):
|
||||||
|
if resource in self.incoming_resources:
|
||||||
|
self.incoming_resources.remove(resource)
|
||||||
|
else:
|
||||||
|
RNS.log("Attempt to cancel a non-existing incoming resource", RNS.LOG_ERROR)
|
||||||
|
|
||||||
|
def ready_for_new_resource(self):
|
||||||
|
if len(self.outgoing_resources) > 0:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
def disableEncryption(self):
|
def disableEncryption(self):
|
||||||
if (RNS.Reticulum.should_allow_unencrypted()):
|
if (RNS.Reticulum.should_allow_unencrypted()):
|
||||||
@ -185,5 +279,8 @@ class Link:
|
|||||||
RNS.log("Shutting down Reticulum now!", RNS.LOG_CRITICAL)
|
RNS.log("Shutting down Reticulum now!", RNS.LOG_CRITICAL)
|
||||||
RNS.panic()
|
RNS.panic()
|
||||||
|
|
||||||
|
def encryption_disabled(self):
|
||||||
|
return self.__encryption_disabled
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return RNS.prettyhexrep(self.link_id)
|
return RNS.prettyhexrep(self.link_id)
|
346
RNS/Packet.py
346
RNS/Packet.py
@ -2,6 +2,198 @@ import struct
|
|||||||
import time
|
import time
|
||||||
import RNS
|
import RNS
|
||||||
|
|
||||||
|
class Packet:
|
||||||
|
# Constants
|
||||||
|
DATA = 0x00
|
||||||
|
ANNOUNCE = 0x01
|
||||||
|
LINKREQUEST = 0x02
|
||||||
|
PROOF = 0x03
|
||||||
|
types = [DATA, ANNOUNCE, LINKREQUEST, PROOF]
|
||||||
|
|
||||||
|
HEADER_1 = 0x00 # Normal header format
|
||||||
|
HEADER_2 = 0x01 # Header format used for link packets in transport
|
||||||
|
HEADER_3 = 0x02 # Reserved
|
||||||
|
HEADER_4 = 0x03 # Reserved
|
||||||
|
header_types = [HEADER_1, HEADER_2, HEADER_3, HEADER_4]
|
||||||
|
|
||||||
|
# Context types
|
||||||
|
NONE = 0x00
|
||||||
|
RESOURCE = 0x01
|
||||||
|
RESOURCE_ADV = 0x02
|
||||||
|
RESOURCE_REQ = 0x03
|
||||||
|
RESOURCE_HMU = 0x04
|
||||||
|
RESOURCE_PRF = 0x05
|
||||||
|
RESOURCE_ICL = 0x06
|
||||||
|
RESOURCE_RCL = 0x07
|
||||||
|
REQUEST = 0x08
|
||||||
|
RESPONSE = 0x09
|
||||||
|
COMMAND = 0x0A
|
||||||
|
COMMAND_STAT = 0x0B
|
||||||
|
LRPROOF = 0xFF
|
||||||
|
|
||||||
|
HEADER_MAXSIZE = 23
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
TIMEOUT = 60
|
||||||
|
|
||||||
|
def __init__(self, destination, data, packet_type = DATA, context = NONE, transport_type = RNS.Transport.BROADCAST, header_type = HEADER_1, transport_id = None):
|
||||||
|
if destination != None:
|
||||||
|
if transport_type == None:
|
||||||
|
transport_type = RNS.Transport.BROADCAST
|
||||||
|
|
||||||
|
self.header_type = header_type
|
||||||
|
self.packet_type = packet_type
|
||||||
|
self.transport_type = transport_type
|
||||||
|
self.context = context
|
||||||
|
|
||||||
|
self.hops = 0;
|
||||||
|
self.destination = destination
|
||||||
|
self.transport_id = transport_id
|
||||||
|
self.data = data
|
||||||
|
self.flags = self.getPackedFlags()
|
||||||
|
self.MTU = RNS.Reticulum.MTU
|
||||||
|
|
||||||
|
self.raw = None
|
||||||
|
self.packed = False
|
||||||
|
self.sent = False
|
||||||
|
self.receipt = None
|
||||||
|
self.fromPacked = False
|
||||||
|
else:
|
||||||
|
self.raw = data
|
||||||
|
self.packed = True
|
||||||
|
self.fromPacked = True
|
||||||
|
|
||||||
|
self.sent_at = None
|
||||||
|
self.packet_hash = None
|
||||||
|
|
||||||
|
def getPackedFlags(self):
|
||||||
|
if self.context == Packet.LRPROOF:
|
||||||
|
packed_flags = (self.header_type << 6) | (self.transport_type << 4) | RNS.Destination.LINK | self.packet_type
|
||||||
|
else:
|
||||||
|
packed_flags = (self.header_type << 6) | (self.transport_type << 4) | (self.destination.type << 2) | self.packet_type
|
||||||
|
return packed_flags
|
||||||
|
|
||||||
|
def pack(self):
|
||||||
|
self.header = ""
|
||||||
|
self.header += struct.pack("!B", self.flags)
|
||||||
|
self.header += struct.pack("!B", self.hops)
|
||||||
|
|
||||||
|
|
||||||
|
if self.context == Packet.LRPROOF:
|
||||||
|
self.header += self.destination.link_id
|
||||||
|
self.ciphertext = self.data
|
||||||
|
else:
|
||||||
|
if self.header_type == Packet.HEADER_1:
|
||||||
|
self.header += self.destination.hash
|
||||||
|
|
||||||
|
if self.packet_type == Packet.ANNOUNCE:
|
||||||
|
# Announce packets are not encrypted
|
||||||
|
self.ciphertext = self.data
|
||||||
|
elif self.packet_type == Packet.PROOF and self.context == Packet.RESOURCE_PRF:
|
||||||
|
# Resource proofs are not encrypted
|
||||||
|
self.ciphertext = self.data
|
||||||
|
elif self.context == Packet.RESOURCE:
|
||||||
|
# A resource takes care of symmetric
|
||||||
|
# encryption by itself
|
||||||
|
self.ciphertext = self.data
|
||||||
|
else:
|
||||||
|
# In all other cases, we encrypt the packet
|
||||||
|
# with the destination's public key
|
||||||
|
self.ciphertext = self.destination.encrypt(self.data)
|
||||||
|
|
||||||
|
if self.header_type == Packet.HEADER_2:
|
||||||
|
if t_destination != None:
|
||||||
|
self.header += self.t_destination
|
||||||
|
else:
|
||||||
|
raise IOError("Packet with header type 2 must have a transport ID")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
self.header += chr(self.context)
|
||||||
|
|
||||||
|
self.raw = self.header + self.ciphertext
|
||||||
|
|
||||||
|
if len(self.raw) > self.MTU:
|
||||||
|
raise IOError("Packet size of "+str(len(self.raw))+" exceeds MTU of "+str(self.MTU)+" bytes")
|
||||||
|
|
||||||
|
self.packed = True
|
||||||
|
|
||||||
|
def unpack(self):
|
||||||
|
self.flags = ord(self.raw[0])
|
||||||
|
self.hops = ord(self.raw[1])
|
||||||
|
|
||||||
|
self.header_type = (self.flags & 0b11000000) >> 6
|
||||||
|
self.transport_type = (self.flags & 0b00110000) >> 4
|
||||||
|
self.destination_type = (self.flags & 0b00001100) >> 2
|
||||||
|
self.packet_type = (self.flags & 0b00000011)
|
||||||
|
|
||||||
|
if self.header_type == Packet.HEADER_2:
|
||||||
|
self.transport_id = self.raw[2:12]
|
||||||
|
self.destination_hash = self.raw[12:22]
|
||||||
|
self.context = ord(self.raw[22:23])
|
||||||
|
self.data = self.raw[23:]
|
||||||
|
else:
|
||||||
|
self.transport_id = None
|
||||||
|
self.destination_hash = self.raw[2:12]
|
||||||
|
self.context = ord(self.raw[12:13])
|
||||||
|
self.data = self.raw[13:]
|
||||||
|
|
||||||
|
self.packed = False
|
||||||
|
|
||||||
|
def send(self):
|
||||||
|
if not self.sent:
|
||||||
|
if not self.packed:
|
||||||
|
self.pack()
|
||||||
|
|
||||||
|
if RNS.Transport.outbound(self):
|
||||||
|
return self.receipt
|
||||||
|
else:
|
||||||
|
# TODO: Don't raise error here, handle gracefully
|
||||||
|
raise IOError("Packet could not be sent! Do you have any outbound interfaces configured?")
|
||||||
|
else:
|
||||||
|
raise IOError("Packet was already sent")
|
||||||
|
|
||||||
|
def resend(self):
|
||||||
|
if self.sent:
|
||||||
|
Transport.outbound(self.raw)
|
||||||
|
else:
|
||||||
|
raise IOError("Packet was not sent yet")
|
||||||
|
|
||||||
|
def prove(self, destination=None):
|
||||||
|
if self.fromPacked and self.destination:
|
||||||
|
if self.destination.identity and self.destination.identity.prv:
|
||||||
|
self.destination.identity.prove(self, destination)
|
||||||
|
|
||||||
|
# Generates a special destination that allows Reticulum
|
||||||
|
# to direct the proof back to the proved packet's sender
|
||||||
|
def generateProofDestination(self):
|
||||||
|
return ProofDestination(self)
|
||||||
|
|
||||||
|
def validateProofPacket(self, proof_packet):
|
||||||
|
return self.receipt.validateProofPacket(proof_packet)
|
||||||
|
|
||||||
|
def validateProof(self, proof):
|
||||||
|
return self.receipt.validateProof(proof)
|
||||||
|
|
||||||
|
def updateHash(self):
|
||||||
|
self.packet_hash = self.getHash()
|
||||||
|
|
||||||
|
def getHash(self):
|
||||||
|
return RNS.Identity.fullHash(self.getHashablePart())
|
||||||
|
|
||||||
|
def getHashablePart(self):
|
||||||
|
return self.raw[0:1]+self.raw[2:]
|
||||||
|
|
||||||
|
class ProofDestination:
|
||||||
|
def __init__(self, packet):
|
||||||
|
self.hash = packet.getHash()[:10];
|
||||||
|
self.type = RNS.Destination.SINGLE
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
return plaintext
|
||||||
|
|
||||||
|
|
||||||
class PacketReceipt:
|
class PacketReceipt:
|
||||||
# Receipt status constants
|
# Receipt status constants
|
||||||
FAILED = 0x00
|
FAILED = 0x00
|
||||||
@ -91,157 +283,3 @@ class PacketReceiptCallbacks:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.delivery = None
|
self.delivery = None
|
||||||
self.timeout = None
|
self.timeout = None
|
||||||
|
|
||||||
class Packet:
|
|
||||||
# Constants
|
|
||||||
DATA = 0x00;
|
|
||||||
ANNOUNCE = 0x01;
|
|
||||||
LINKREQUEST = 0x02;
|
|
||||||
PROOF = 0x03;
|
|
||||||
types = [DATA, ANNOUNCE, LINKREQUEST, PROOF]
|
|
||||||
|
|
||||||
HEADER_1 = 0x00; # Normal header format
|
|
||||||
HEADER_2 = 0x01; # Header format used for link packets in transport
|
|
||||||
HEADER_3 = 0x02; # Normal header format, but used to indicate a link request proof
|
|
||||||
HEADER_4 = 0x03; # Reserved
|
|
||||||
header_types = [HEADER_1, HEADER_2, HEADER_3, HEADER_4]
|
|
||||||
|
|
||||||
# Defaults
|
|
||||||
TIMEOUT = 3600.0
|
|
||||||
|
|
||||||
def __init__(self, destination, data, packet_type = DATA, transport_type = RNS.Transport.BROADCAST, header_type = HEADER_1, transport_id = None):
|
|
||||||
if destination != None:
|
|
||||||
if transport_type == None:
|
|
||||||
transport_type = RNS.Transport.BROADCAST
|
|
||||||
|
|
||||||
self.header_type = header_type
|
|
||||||
self.packet_type = packet_type
|
|
||||||
self.transport_type = transport_type
|
|
||||||
|
|
||||||
self.hops = 0;
|
|
||||||
self.destination = destination
|
|
||||||
self.transport_id = transport_id
|
|
||||||
self.data = data
|
|
||||||
self.flags = self.getPackedFlags()
|
|
||||||
self.MTU = RNS.Reticulum.MTU
|
|
||||||
|
|
||||||
self.raw = None
|
|
||||||
self.packed = False
|
|
||||||
self.sent = False
|
|
||||||
self.receipt = None
|
|
||||||
self.fromPacked = False
|
|
||||||
else:
|
|
||||||
self.raw = data
|
|
||||||
self.packed = True
|
|
||||||
self.fromPacked = True
|
|
||||||
|
|
||||||
self.sent_at = None
|
|
||||||
self.packet_hash = None
|
|
||||||
|
|
||||||
def getPackedFlags(self):
|
|
||||||
if self.header_type == Packet.HEADER_3:
|
|
||||||
packed_flags = (self.header_type << 6) | (self.transport_type << 4) | RNS.Destination.LINK | self.packet_type
|
|
||||||
else:
|
|
||||||
packed_flags = (self.header_type << 6) | (self.transport_type << 4) | (self.destination.type << 2) | self.packet_type
|
|
||||||
return packed_flags
|
|
||||||
|
|
||||||
def pack(self):
|
|
||||||
self.header = ""
|
|
||||||
self.header += struct.pack("!B", self.flags)
|
|
||||||
self.header += struct.pack("!B", self.hops)
|
|
||||||
if self.header_type == Packet.HEADER_2:
|
|
||||||
if t_destination != None:
|
|
||||||
self.header += self.t_destination
|
|
||||||
else:
|
|
||||||
raise IOError("Packet with header type 2 must have a transport ID")
|
|
||||||
|
|
||||||
if self.header_type == Packet.HEADER_1:
|
|
||||||
self.header += self.destination.hash
|
|
||||||
if self.packet_type != Packet.ANNOUNCE:
|
|
||||||
self.ciphertext = self.destination.encrypt(self.data)
|
|
||||||
else:
|
|
||||||
self.ciphertext = self.data
|
|
||||||
|
|
||||||
if self.header_type == Packet.HEADER_3:
|
|
||||||
self.header += self.destination.link_id
|
|
||||||
self.ciphertext = self.data
|
|
||||||
|
|
||||||
self.raw = self.header + self.ciphertext
|
|
||||||
|
|
||||||
if len(self.raw) > self.MTU:
|
|
||||||
raise IOError("Packet size of "+str(len(self.raw))+" exceeds MTU of "+str(self.MTU)+" bytes")
|
|
||||||
|
|
||||||
self.packed = True
|
|
||||||
|
|
||||||
def unpack(self):
|
|
||||||
self.flags = ord(self.raw[0])
|
|
||||||
self.hops = ord(self.raw[1])
|
|
||||||
|
|
||||||
self.header_type = (self.flags & 0b11000000) >> 6
|
|
||||||
self.transport_type = (self.flags & 0b00110000) >> 4
|
|
||||||
self.destination_type = (self.flags & 0b00001100) >> 2
|
|
||||||
self.packet_type = (self.flags & 0b00000011)
|
|
||||||
|
|
||||||
if self.header_type == Packet.HEADER_2:
|
|
||||||
self.transport_id = self.raw[2:12]
|
|
||||||
self.destination_hash = self.raw[12:22]
|
|
||||||
self.data = self.raw[22:]
|
|
||||||
else:
|
|
||||||
self.transport_id = None
|
|
||||||
self.destination_hash = self.raw[2:12]
|
|
||||||
self.data = self.raw[12:]
|
|
||||||
|
|
||||||
self.packed = False
|
|
||||||
|
|
||||||
def send(self):
|
|
||||||
if not self.sent:
|
|
||||||
if not self.packed:
|
|
||||||
self.pack()
|
|
||||||
|
|
||||||
if RNS.Transport.outbound(self):
|
|
||||||
return self.receipt
|
|
||||||
else:
|
|
||||||
# TODO: Don't raise error here, handle gracefully
|
|
||||||
raise IOError("Packet could not be sent! Do you have any outbound interfaces configured?")
|
|
||||||
else:
|
|
||||||
raise IOError("Packet was already sent")
|
|
||||||
|
|
||||||
def resend(self):
|
|
||||||
if self.sent:
|
|
||||||
Transport.outbound(self.raw)
|
|
||||||
else:
|
|
||||||
raise IOError("Packet was not sent yet")
|
|
||||||
|
|
||||||
def prove(self, destination=None):
|
|
||||||
if self.fromPacked and self.destination:
|
|
||||||
if self.destination.identity and self.destination.identity.prv:
|
|
||||||
self.destination.identity.prove(self, destination)
|
|
||||||
|
|
||||||
# Generates a special destination that allows Reticulum
|
|
||||||
# to direct the proof back to the proved packet's sender
|
|
||||||
def generateProofDestination(self):
|
|
||||||
return ProofDestination(self)
|
|
||||||
|
|
||||||
def validateProofPacket(self, proof_packet):
|
|
||||||
return self.receipt.validateProofPacket(proof_packet)
|
|
||||||
|
|
||||||
def validateProof(self, proof):
|
|
||||||
return self.receipt.validateProof(proof)
|
|
||||||
|
|
||||||
def updateHash(self):
|
|
||||||
self.packet_hash = self.getHash()
|
|
||||||
|
|
||||||
def getHash(self):
|
|
||||||
return RNS.Identity.fullHash(self.getHashablePart())
|
|
||||||
|
|
||||||
def getHashablePart(self):
|
|
||||||
return self.raw[0:1]+self.raw[2:]
|
|
||||||
|
|
||||||
class ProofDestination:
|
|
||||||
def __init__(self, packet):
|
|
||||||
self.hash = packet.getHash()[:10];
|
|
||||||
self.type = RNS.Destination.SINGLE
|
|
||||||
|
|
||||||
def encrypt(self, plaintext):
|
|
||||||
return plaintext
|
|
||||||
|
|
||||||
|
437
RNS/Resource.py
Normal file
437
RNS/Resource.py
Normal file
@ -0,0 +1,437 @@
|
|||||||
|
import RNS
|
||||||
|
import bz2
|
||||||
|
import math
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
import vendor.umsgpack as umsgpack
|
||||||
|
|
||||||
|
|
||||||
|
class Resource:
|
||||||
|
WINDOW_MIN = 1
|
||||||
|
WINDOW_MAX = 10
|
||||||
|
WINDOW = 5
|
||||||
|
MAPHASH_LEN = 4
|
||||||
|
SDU = RNS.Reticulum.MTU - RNS.Packet.HEADER_MAXSIZE
|
||||||
|
RANDOM_HASH_SIZE = 4
|
||||||
|
|
||||||
|
DEFAULT_TIMEOUT = RNS.Packet.TIMEOUT
|
||||||
|
MAX_RETRIES = 3
|
||||||
|
ROUNDTRIP_FACTOR = 1.5
|
||||||
|
|
||||||
|
HASHMAP_IS_NOT_EXHAUSTED = 0x00
|
||||||
|
HASHMAP_IS_EXHAUSTED = 0xFF
|
||||||
|
|
||||||
|
# Status constants
|
||||||
|
NONE = 0x00
|
||||||
|
QUEUED = 0x01
|
||||||
|
ADVERTISED = 0x02
|
||||||
|
TRANSFERRING = 0x03
|
||||||
|
COMPLETE = 0x04
|
||||||
|
FAILED = 0x05
|
||||||
|
CORRUPT = 0x06
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def accept(advertisement_packet, callback=None, progress_callback = None):
|
||||||
|
try:
|
||||||
|
adv = ResourceAdvertisement.unpack(advertisement_packet.plaintext)
|
||||||
|
|
||||||
|
resource = Resource(None, advertisement_packet.link)
|
||||||
|
resource.status = Resource.TRANSFERRING
|
||||||
|
|
||||||
|
resource.flags = adv.f
|
||||||
|
resource.size = adv.t
|
||||||
|
resource.uncompressed_size = adv.d
|
||||||
|
resource.hash = adv.h
|
||||||
|
resource.random_hash = adv.r
|
||||||
|
resource.hashmap_raw = adv.m
|
||||||
|
resource.encrypted = True if resource.flags & 0x01 else False
|
||||||
|
resource.compressed = True if resource.flags >> 1 & 0x01 else False
|
||||||
|
resource.initiator = False
|
||||||
|
resource.callback = callback
|
||||||
|
resource.__progress_callback = progress_callback
|
||||||
|
resource.total_parts = int(math.ceil(resource.size/float(Resource.SDU)))
|
||||||
|
resource.received_count = 0
|
||||||
|
resource.outstanding_parts = 0
|
||||||
|
resource.parts = [None] * resource.total_parts
|
||||||
|
resource.window = Resource.WINDOW
|
||||||
|
resource.last_activity = time.time()
|
||||||
|
|
||||||
|
resource.hashmap = [None] * resource.total_parts
|
||||||
|
resource.hashmap_height = 0
|
||||||
|
resource.waiting_for_hmu = False
|
||||||
|
|
||||||
|
resource.link.register_incoming_resource(resource)
|
||||||
|
|
||||||
|
RNS.log("Accepting resource advertisement for "+RNS.prettyhexrep(resource.hash), RNS.LOG_DEBUG)
|
||||||
|
resource.link.callbacks.resource_started(resource)
|
||||||
|
|
||||||
|
resource.hashmap_update(0, resource.hashmap_raw)
|
||||||
|
|
||||||
|
return resource
|
||||||
|
except Exception as e:
|
||||||
|
RNS.log("Could not decode resource advertisement, dropping resource", RNS.LOG_VERBOSE)
|
||||||
|
traceback.print_exc()
|
||||||
|
return None
|
||||||
|
|
||||||
|
def __init__(self, data, link, advertise=True, auto_compress=True, callback=None, progress_callback=None):
|
||||||
|
self.status = Resource.NONE
|
||||||
|
self.link = link
|
||||||
|
self.rtt = None
|
||||||
|
|
||||||
|
if data != None:
|
||||||
|
hashmap_ok = False
|
||||||
|
while not hashmap_ok:
|
||||||
|
self.initiator = True
|
||||||
|
self.callback = callback
|
||||||
|
self.progress_callback = progress_callback
|
||||||
|
self.random_hash = RNS.Identity.getRandomHash()[:Resource.RANDOM_HASH_SIZE]
|
||||||
|
self.uncompressed_data = data
|
||||||
|
self.compressed_data = bz2.compress(self.uncompressed_data)
|
||||||
|
self.uncompressed_size = len(self.uncompressed_data)
|
||||||
|
self.compressed_size = len(self.compressed_data)
|
||||||
|
|
||||||
|
self.hash = RNS.Identity.fullHash(data+self.random_hash)
|
||||||
|
self.expected_proof = RNS.Identity.fullHash(data+self.hash)
|
||||||
|
|
||||||
|
if (self.compressed_size < self.uncompressed_size and auto_compress):
|
||||||
|
self.data = self.compressed_data
|
||||||
|
self.compressed = True
|
||||||
|
self.uncompressed_data = None
|
||||||
|
else:
|
||||||
|
self.data = self.uncompressed_data
|
||||||
|
self.compressed = False
|
||||||
|
self.compressed_data = None
|
||||||
|
|
||||||
|
if not self.link.encryption_disabled():
|
||||||
|
self.data = self.link.encrypt(self.data)
|
||||||
|
self.encrypted = True
|
||||||
|
else:
|
||||||
|
self.encrypted = False
|
||||||
|
|
||||||
|
self.size = len(self.data)
|
||||||
|
|
||||||
|
self.hashmap = ""
|
||||||
|
self.parts = []
|
||||||
|
for i in range(0,int(math.ceil(self.size/float(Resource.SDU)))):
|
||||||
|
data = self.data[i*Resource.SDU:(i+1)*Resource.SDU]
|
||||||
|
part = RNS.Packet(link, data, context=RNS.Packet.RESOURCE)
|
||||||
|
part.pack()
|
||||||
|
part.map_hash = self.getMapHash(data)
|
||||||
|
self.hashmap += part.map_hash
|
||||||
|
self.parts.append(part)
|
||||||
|
|
||||||
|
hashmap_ok = self.checkHashMap()
|
||||||
|
if not hashmap_ok:
|
||||||
|
RNS.log("Found hash collision in resource map, remapping...", RNS.LOG_VERBOSE)
|
||||||
|
|
||||||
|
if advertise:
|
||||||
|
self.advertise()
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def checkHashMap(self):
|
||||||
|
checked_hashes = []
|
||||||
|
for part in self.parts:
|
||||||
|
if part.map_hash in checked_hashes:
|
||||||
|
return False
|
||||||
|
checked_hashes.append(part.map_hash)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def hashmap_update_packet(self, plaintext):
|
||||||
|
if not self.status == Resource.FAILED:
|
||||||
|
update = umsgpack.unpackb(plaintext[RNS.Identity.HASHLENGTH/8:])
|
||||||
|
self.hashmap_update(update[0], update[1])
|
||||||
|
|
||||||
|
|
||||||
|
def hashmap_update(self, segment, hashmap):
|
||||||
|
if not self.status == Resource.FAILED:
|
||||||
|
seg_len = ResourceAdvertisement.HASHMAP_MAX_LEN
|
||||||
|
hashes = len(hashmap)/Resource.MAPHASH_LEN
|
||||||
|
for i in range(0,hashes):
|
||||||
|
self.hashmap[i+segment*seg_len] = hashmap[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
|
||||||
|
self.hashmap_height += 1
|
||||||
|
|
||||||
|
self.waiting_for_hmu = False
|
||||||
|
self.request_next()
|
||||||
|
|
||||||
|
def getMapHash(self, data):
|
||||||
|
return RNS.Identity.fullHash(data+self.random_hash)[:Resource.MAPHASH_LEN]
|
||||||
|
|
||||||
|
def advertise(self):
|
||||||
|
thread = threading.Thread(target=self.__advertise_job)
|
||||||
|
thread.setDaemon(True)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
def __advertise_job(self):
|
||||||
|
data = ResourceAdvertisement(self).pack()
|
||||||
|
packet = RNS.Packet(self.link, data, context=RNS.Packet.RESOURCE_ADV)
|
||||||
|
while not self.link.ready_for_new_resource():
|
||||||
|
self.status = Resource.QUEUED
|
||||||
|
sleep(0.25)
|
||||||
|
|
||||||
|
packet.send()
|
||||||
|
self.last_activity = time.time()
|
||||||
|
self.adv_sent = self.last_activity
|
||||||
|
self.rtt = None
|
||||||
|
self.status = Resource.ADVERTISED
|
||||||
|
self.link.register_outgoing_resource(self)
|
||||||
|
|
||||||
|
def assemble(self):
|
||||||
|
if not self.status == Resource.FAILED:
|
||||||
|
try:
|
||||||
|
RNS.log("Assembling parts...")
|
||||||
|
stream = ""
|
||||||
|
for part in self.parts:
|
||||||
|
stream += part
|
||||||
|
|
||||||
|
if self.encrypted:
|
||||||
|
data = self.link.decrypt(stream)
|
||||||
|
else:
|
||||||
|
data = stream
|
||||||
|
|
||||||
|
if self.compressed:
|
||||||
|
self.data = bz2.decompress(data)
|
||||||
|
else:
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
calculated_hash = RNS.Identity.fullHash(self.data+self.random_hash)
|
||||||
|
|
||||||
|
if calculated_hash == self.hash:
|
||||||
|
self.status = Resource.COMPLETE
|
||||||
|
self.prove()
|
||||||
|
else:
|
||||||
|
self.status = Resource.CORRUPT
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
RNS.log("Error while assembling received resource.", RNS.LOG_ERROR)
|
||||||
|
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
|
||||||
|
self.status = Resource.CORRUPT
|
||||||
|
|
||||||
|
if self.callback != None:
|
||||||
|
self.callback(self)
|
||||||
|
|
||||||
|
|
||||||
|
def prove(self):
|
||||||
|
if not self.status == Resource.FAILED:
|
||||||
|
proof = RNS.Identity.fullHash(self.data+self.hash)
|
||||||
|
proof_data = self.hash+proof
|
||||||
|
proof_packet = RNS.Packet(self.link, proof_data, packet_type=RNS.Packet.PROOF, context=RNS.Packet.RESOURCE_PRF)
|
||||||
|
proof_packet.send()
|
||||||
|
|
||||||
|
def validateProof(self, proof_data):
|
||||||
|
if not self.status == Resource.FAILED:
|
||||||
|
if len(proof_data) == RNS.Identity.HASHLENGTH/8*2:
|
||||||
|
if proof_data[RNS.Identity.HASHLENGTH/8:] == self.expected_proof:
|
||||||
|
self.status = Resource.COMPLETE
|
||||||
|
if self.callback != None:
|
||||||
|
self.callback(self)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def receive_part(self, packet):
|
||||||
|
self.last_activity = time.time()
|
||||||
|
if self.req_resp == None:
|
||||||
|
self.req_resp = self.last_activity
|
||||||
|
rtt = self.req_resp-self.req_sent
|
||||||
|
if self.rtt == None:
|
||||||
|
self.rtt = rtt
|
||||||
|
elif self.rtt < rtt:
|
||||||
|
self.rtt = rtt
|
||||||
|
|
||||||
|
if not self.status == Resource.FAILED:
|
||||||
|
self.status = Resource.TRANSFERRING
|
||||||
|
part_data = packet.data
|
||||||
|
part_hash = self.getMapHash(part_data)
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
for map_hash in self.hashmap:
|
||||||
|
if map_hash == part_hash:
|
||||||
|
if self.parts[i] == None:
|
||||||
|
self.parts[i] = part_data
|
||||||
|
self.received_count += 1
|
||||||
|
self.outstanding_parts -= 1
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
if self.__progress_callback != None:
|
||||||
|
self.__progress_callback(self)
|
||||||
|
|
||||||
|
if self.outstanding_parts == 0 and self.received_count == self.total_parts:
|
||||||
|
self.assemble()
|
||||||
|
elif self.outstanding_parts == 0:
|
||||||
|
if self.window < Resource.WINDOW_MAX:
|
||||||
|
self.window += 1
|
||||||
|
self.request_next()
|
||||||
|
|
||||||
|
# Called on incoming resource to send a request for more data
|
||||||
|
def request_next(self):
|
||||||
|
if not self.status == Resource.FAILED:
|
||||||
|
if not self.waiting_for_hmu:
|
||||||
|
self.outstanding_parts = 0
|
||||||
|
hashmap_exhausted = Resource.HASHMAP_IS_NOT_EXHAUSTED
|
||||||
|
requested_hashes = ""
|
||||||
|
|
||||||
|
i = 0; pn = 0
|
||||||
|
for part in self.parts:
|
||||||
|
|
||||||
|
if part == None:
|
||||||
|
part_hash = self.hashmap[pn]
|
||||||
|
if part_hash != None:
|
||||||
|
requested_hashes += part_hash
|
||||||
|
self.outstanding_parts += 1
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
hashmap_exhausted = Resource.HASHMAP_IS_EXHAUSTED
|
||||||
|
|
||||||
|
pn += 1
|
||||||
|
if i >= self.window or hashmap_exhausted == Resource.HASHMAP_IS_EXHAUSTED:
|
||||||
|
break
|
||||||
|
|
||||||
|
hmu_part = chr(hashmap_exhausted)
|
||||||
|
if hashmap_exhausted == Resource.HASHMAP_IS_EXHAUSTED:
|
||||||
|
last_map_hash = self.hashmap[self.hashmap_height-1]
|
||||||
|
hmu_part += last_map_hash
|
||||||
|
self.waiting_for_hmu = True
|
||||||
|
|
||||||
|
request_data = hmu_part + self.hash + requested_hashes
|
||||||
|
request_packet = RNS.Packet(self.link, request_data, context = RNS.Packet.RESOURCE_REQ)
|
||||||
|
|
||||||
|
request_packet.send()
|
||||||
|
self.last_activity = time.time()
|
||||||
|
self.req_sent = self.last_activity
|
||||||
|
self.req_resp = None
|
||||||
|
|
||||||
|
# Called on outgoing resource to make it send more data
|
||||||
|
def request(self, request_data):
|
||||||
|
if not self.status == Resource.FAILED:
|
||||||
|
rtt = time.time() - self.adv_sent
|
||||||
|
if self.rtt == None:
|
||||||
|
self.rtt = rtt
|
||||||
|
|
||||||
|
self.status == Resource.TRANSFERRING
|
||||||
|
wants_more_hashmap = True if ord(request_data[0]) == Resource.HASHMAP_IS_EXHAUSTED else False
|
||||||
|
pad = 1+Resource.MAPHASH_LEN if wants_more_hashmap else 1
|
||||||
|
|
||||||
|
requested_hashes = request_data[pad+RNS.Identity.HASHLENGTH/8:]
|
||||||
|
|
||||||
|
for i in range(0,len(requested_hashes)/Resource.MAPHASH_LEN):
|
||||||
|
requested_hash = requested_hashes[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
for part in self.parts:
|
||||||
|
if part.map_hash == requested_hash:
|
||||||
|
if not part.sent:
|
||||||
|
part.send()
|
||||||
|
else:
|
||||||
|
part.resend()
|
||||||
|
self.last_activity = time.time()
|
||||||
|
break
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
if wants_more_hashmap:
|
||||||
|
last_map_hash = request_data[1:Resource.MAPHASH_LEN+1]
|
||||||
|
|
||||||
|
part_index = 0
|
||||||
|
for part in self.parts:
|
||||||
|
part_index += 1
|
||||||
|
if part.map_hash == last_map_hash:
|
||||||
|
break
|
||||||
|
|
||||||
|
if part_index % ResourceAdvertisement.HASHMAP_MAX_LEN != 0:
|
||||||
|
RNS.log("Resource sequencing error, cancelling transfer!", RNS.LOG_ERROR)
|
||||||
|
self.cancel()
|
||||||
|
else:
|
||||||
|
segment = part_index / ResourceAdvertisement.HASHMAP_MAX_LEN
|
||||||
|
|
||||||
|
|
||||||
|
hashmap_start = segment*ResourceAdvertisement.HASHMAP_MAX_LEN
|
||||||
|
hashmap_end = min((segment+1)*ResourceAdvertisement.HASHMAP_MAX_LEN, len(self.parts))
|
||||||
|
|
||||||
|
hashmap = ""
|
||||||
|
for i in range(hashmap_start,hashmap_end):
|
||||||
|
hashmap += self.hashmap[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
|
||||||
|
|
||||||
|
hmu = self.hash+umsgpack.packb([segment, hashmap])
|
||||||
|
hmu_packet = RNS.Packet(self.link, hmu, context = RNS.Packet.RESOURCE_HMU)
|
||||||
|
hmu_packet.send()
|
||||||
|
self.last_activity = time.time()
|
||||||
|
|
||||||
|
def cancel(self):
|
||||||
|
self.status = Resource.FAILED
|
||||||
|
if self.initiator:
|
||||||
|
cancel_packet = RNS.Packet(self.link, self.hash, context=RNS.Packet.RESOURCE_ICL)
|
||||||
|
cancel_packet.send()
|
||||||
|
self.link.cancel_outgoing_resource(self)
|
||||||
|
else:
|
||||||
|
self.link.cancel_incoming_resource(self)
|
||||||
|
|
||||||
|
if self.callback != None:
|
||||||
|
self.callback(self)
|
||||||
|
|
||||||
|
def progress_callback(self, callback):
|
||||||
|
self.__progress_callback = callback
|
||||||
|
|
||||||
|
def progress(self):
|
||||||
|
progress = self.received_count / float(self.total_parts)
|
||||||
|
return progress
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return RNS.prettyHexRep(self.hash)
|
||||||
|
|
||||||
|
|
||||||
|
class ResourceAdvertisement:
|
||||||
|
# TODO: Can this be allocated dynamically? Keep in mind hashmap_update inference
|
||||||
|
HASHMAP_MAX_LEN = 84
|
||||||
|
|
||||||
|
def __init__(self, resource=None):
|
||||||
|
if resource != None:
|
||||||
|
self.t = resource.size # Transfer size
|
||||||
|
self.d = resource.uncompressed_size # Data size
|
||||||
|
self.n = len(resource.parts) # Number of parts
|
||||||
|
self.h = resource.hash # Resource hash
|
||||||
|
self.r = resource.random_hash # Resource random hash
|
||||||
|
self.m = resource.hashmap # Resource hashmap
|
||||||
|
self.c = resource.compressed # Compression flag
|
||||||
|
self.e = resource.encrypted # Encryption flag
|
||||||
|
self.f = 0x00 | self.c << 1 | self.e # Flags
|
||||||
|
|
||||||
|
def pack(self, segment=0):
|
||||||
|
hashmap_start = segment*ResourceAdvertisement.HASHMAP_MAX_LEN
|
||||||
|
hashmap_end = min((segment+1)*ResourceAdvertisement.HASHMAP_MAX_LEN, self.n)
|
||||||
|
|
||||||
|
hashmap = ""
|
||||||
|
for i in range(hashmap_start,hashmap_end):
|
||||||
|
hashmap += self.m[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
|
||||||
|
|
||||||
|
dictionary = {
|
||||||
|
u"t": self.t,
|
||||||
|
u"d": self.d,
|
||||||
|
u"n": self.n,
|
||||||
|
u"h": self.h,
|
||||||
|
u"r": self.r,
|
||||||
|
u"f": self.f,
|
||||||
|
u"m": hashmap
|
||||||
|
}
|
||||||
|
|
||||||
|
return umsgpack.packb(dictionary)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def unpack(data):
|
||||||
|
dictionary = umsgpack.unpackb(data)
|
||||||
|
|
||||||
|
adv = ResourceAdvertisement()
|
||||||
|
adv.t = dictionary["t"]
|
||||||
|
adv.d = dictionary["d"]
|
||||||
|
adv.n = dictionary["n"]
|
||||||
|
adv.h = dictionary["h"]
|
||||||
|
adv.r = dictionary["r"]
|
||||||
|
adv.m = dictionary["m"]
|
||||||
|
adv.f = dictionary["f"]
|
||||||
|
adv.e = True if (adv.f & 0x01) == 0x01 else False
|
||||||
|
adv.c = True if ((adv.f >> 1) & 0x01) == 0x01 else False
|
||||||
|
|
||||||
|
return adv
|
@ -29,7 +29,7 @@ class Reticulum:
|
|||||||
Reticulum.cachepath = Reticulum.configdir+"/storage/cache"
|
Reticulum.cachepath = Reticulum.configdir+"/storage/cache"
|
||||||
|
|
||||||
Reticulum.__allow_unencrypted = False
|
Reticulum.__allow_unencrypted = False
|
||||||
Reticulum.__use_implicit_proof = False
|
Reticulum.__use_implicit_proof = True
|
||||||
|
|
||||||
if not os.path.isdir(Reticulum.storagepath):
|
if not os.path.isdir(Reticulum.storagepath):
|
||||||
os.makedirs(Reticulum.storagepath)
|
os.makedirs(Reticulum.storagepath)
|
||||||
@ -41,7 +41,7 @@ class Reticulum:
|
|||||||
self.config = ConfigObj(self.configpath)
|
self.config = ConfigObj(self.configpath)
|
||||||
RNS.log("Configuration loaded from "+self.configpath)
|
RNS.log("Configuration loaded from "+self.configpath)
|
||||||
else:
|
else:
|
||||||
RNS.log("Could not load config file, creating default configuration...")
|
RNS.log("Could not load config file, creating default configuration file...")
|
||||||
self.createDefaultConfig()
|
self.createDefaultConfig()
|
||||||
RNS.log("Default config file created. Make any necessary changes in "+Reticulum.configdir+"/config and start Reticulum again.")
|
RNS.log("Default config file created. Make any necessary changes in "+Reticulum.configdir+"/config and start Reticulum again.")
|
||||||
RNS.log("Exiting now!")
|
RNS.log("Exiting now!")
|
||||||
@ -244,9 +244,6 @@ class Reticulum:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
RNS.log("The interface \""+name+"\" could not be created. Check your configuration file for errors!", RNS.LOG_ERROR)
|
RNS.log("The interface \""+name+"\" could not be created. Check your configuration file for errors!", RNS.LOG_ERROR)
|
||||||
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
|
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
|
||||||
#traceback.print_exc()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def createDefaultConfig(self):
|
def createDefaultConfig(self):
|
||||||
|
@ -116,17 +116,18 @@ class Transport:
|
|||||||
if RNS.Identity.validateAnnounce(packet):
|
if RNS.Identity.validateAnnounce(packet):
|
||||||
Transport.cache(packet)
|
Transport.cache(packet)
|
||||||
|
|
||||||
if packet.packet_type == RNS.Packet.LINKREQUEST:
|
elif packet.packet_type == RNS.Packet.LINKREQUEST:
|
||||||
for destination in Transport.destinations:
|
for destination in Transport.destinations:
|
||||||
if destination.hash == packet.destination_hash and destination.type == packet.destination_type:
|
if destination.hash == packet.destination_hash and destination.type == packet.destination_type:
|
||||||
packet.destination = destination
|
packet.destination = destination
|
||||||
destination.receive(packet)
|
destination.receive(packet)
|
||||||
Transport.cache(packet)
|
Transport.cache(packet)
|
||||||
|
|
||||||
if packet.packet_type == RNS.Packet.DATA:
|
elif packet.packet_type == RNS.Packet.DATA:
|
||||||
if packet.destination_type == RNS.Destination.LINK:
|
if packet.destination_type == RNS.Destination.LINK:
|
||||||
for link in Transport.active_links:
|
for link in Transport.active_links:
|
||||||
if link.link_id == packet.destination_hash:
|
if link.link_id == packet.destination_hash:
|
||||||
|
packet.link = link
|
||||||
link.receive(packet)
|
link.receive(packet)
|
||||||
Transport.cache(packet)
|
Transport.cache(packet)
|
||||||
else:
|
else:
|
||||||
@ -143,13 +144,17 @@ class Transport:
|
|||||||
if destination.callbacks.proof_requested:
|
if destination.callbacks.proof_requested:
|
||||||
destination.callbacks.proof_requested(packet)
|
destination.callbacks.proof_requested(packet)
|
||||||
|
|
||||||
if packet.packet_type == RNS.Packet.PROOF:
|
elif packet.packet_type == RNS.Packet.PROOF:
|
||||||
if packet.header_type == RNS.Packet.HEADER_3:
|
if packet.context == RNS.Packet.LRPROOF:
|
||||||
# This is a link request proof, forward
|
# This is a link request proof, forward
|
||||||
# to a waiting link request
|
# to a waiting link request
|
||||||
for link in Transport.pending_links:
|
for link in Transport.pending_links:
|
||||||
if link.link_id == packet.destination_hash:
|
if link.link_id == packet.destination_hash:
|
||||||
link.validateProof(packet)
|
link.validateProof(packet)
|
||||||
|
elif packet.context == RNS.Packet.RESOURCE_PRF:
|
||||||
|
for link in Transport.active_links:
|
||||||
|
if link.link_id == packet.destination_hash:
|
||||||
|
link.receive(packet)
|
||||||
else:
|
else:
|
||||||
# TODO: Make sure everything uses new proof handling
|
# TODO: Make sure everything uses new proof handling
|
||||||
if len(packet.data) == RNS.PacketReceipt.EXPL_LENGTH:
|
if len(packet.data) == RNS.PacketReceipt.EXPL_LENGTH:
|
||||||
|
@ -10,6 +10,7 @@ from .Transport import Transport
|
|||||||
from .Destination import Destination
|
from .Destination import Destination
|
||||||
from .Packet import Packet
|
from .Packet import Packet
|
||||||
from .Packet import PacketReceipt
|
from .Packet import PacketReceipt
|
||||||
|
from .Resource import Resource
|
||||||
|
|
||||||
modules = glob.glob(os.path.dirname(__file__)+"/*.py")
|
modules = glob.glob(os.path.dirname(__file__)+"/*.py")
|
||||||
__all__ = [ os.path.basename(f)[:-3] for f in modules if not f.endswith('__init__.py')]
|
__all__ = [ os.path.basename(f)[:-3] for f in modules if not f.endswith('__init__.py')]
|
||||||
|
1134
RNS/vendor/umsgpack.py
vendored
Normal file
1134
RNS/vendor/umsgpack.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user