mirror of
https://github.com/markqvist/Reticulum.git
synced 2024-11-26 15:30:18 +00:00
Resource timing, retries
This commit is contained in:
parent
23ff873c63
commit
5c94324230
@ -29,8 +29,8 @@ class Link:
|
|||||||
# TODO: This should not be hardcoded,
|
# TODO: This should not be hardcoded,
|
||||||
# but calculated from something like
|
# but calculated from something like
|
||||||
# first-hop RTT latency and distance
|
# first-hop RTT latency and distance
|
||||||
PROOF_TIMEOUT = 10
|
DEFAULT_TIMEOUT = 5
|
||||||
TIMEOUT_FACTOR = 4
|
TIMEOUT_FACTOR = 3
|
||||||
KEEPALIVE = 120
|
KEEPALIVE = 120
|
||||||
|
|
||||||
PENDING = 0x00
|
PENDING = 0x00
|
||||||
@ -93,7 +93,8 @@ class Link:
|
|||||||
self.rx = 0
|
self.rx = 0
|
||||||
self.txbytes = 0
|
self.txbytes = 0
|
||||||
self.rxbytes = 0
|
self.rxbytes = 0
|
||||||
self.proof_timeout = Link.PROOF_TIMEOUT
|
self.default_timeout = Link.DEFAULT_TIMEOUT
|
||||||
|
self.proof_timeout = self.default_timeout
|
||||||
self.timeout_factor = Link.TIMEOUT_FACTOR
|
self.timeout_factor = Link.TIMEOUT_FACTOR
|
||||||
self.keepalive = Link.KEEPALIVE
|
self.keepalive = Link.KEEPALIVE
|
||||||
self.watchdog_lock = False
|
self.watchdog_lock = False
|
||||||
@ -275,7 +276,7 @@ class Link:
|
|||||||
next_check = self.request_time + self.proof_timeout
|
next_check = self.request_time + self.proof_timeout
|
||||||
sleep_time = next_check - time.time()
|
sleep_time = next_check - time.time()
|
||||||
if time.time() >= self.request_time + self.proof_timeout:
|
if time.time() >= self.request_time + self.proof_timeout:
|
||||||
RNS.log("Timeout waiting for RTT packet from link initiator", RNS.LOG_VERBOSE)
|
#RNS.log("Timeout waiting for RTT packet from link initiator", RNS.LOG_DEBUG)
|
||||||
self.status = Link.CLOSED
|
self.status = Link.CLOSED
|
||||||
self.teardown_reason = Link.TIMEOUT
|
self.teardown_reason = Link.TIMEOUT
|
||||||
self.link_closed()
|
self.link_closed()
|
||||||
|
@ -25,10 +25,11 @@ class Packet:
|
|||||||
RESOURCE_PRF = 0x05
|
RESOURCE_PRF = 0x05
|
||||||
RESOURCE_ICL = 0x06
|
RESOURCE_ICL = 0x06
|
||||||
RESOURCE_RCL = 0x07
|
RESOURCE_RCL = 0x07
|
||||||
REQUEST = 0x08
|
CACHE_REQUEST = 0x08
|
||||||
RESPONSE = 0x09
|
REQUEST = 0x09
|
||||||
COMMAND = 0x0A
|
RESPONSE = 0x0A
|
||||||
COMMAND_STAT = 0x0B
|
COMMAND = 0x0B
|
||||||
|
COMMAND_STAT = 0x0C
|
||||||
KEEPALIVE = 0xFC
|
KEEPALIVE = 0xFC
|
||||||
LINKCLOSE = 0xFD
|
LINKCLOSE = 0xFD
|
||||||
LRRTT = 0xFE
|
LRRTT = 0xFE
|
||||||
@ -171,7 +172,11 @@ class Packet:
|
|||||||
|
|
||||||
def resend(self):
|
def resend(self):
|
||||||
if self.sent:
|
if self.sent:
|
||||||
Transport.outbound(self.raw)
|
if RNS.Transport.outbound(self):
|
||||||
|
return self.receipt
|
||||||
|
else:
|
||||||
|
# TODO: Don't raise error here, handle gracefully
|
||||||
|
raise IOError("Packet could not be sent! Do you have any outbound interfaces configured?")
|
||||||
else:
|
else:
|
||||||
raise IOError("Packet was not sent yet")
|
raise IOError("Packet was not sent yet")
|
||||||
|
|
||||||
@ -198,6 +203,8 @@ class Packet:
|
|||||||
return RNS.Identity.fullHash(self.getHashablePart())
|
return RNS.Identity.fullHash(self.getHashablePart())
|
||||||
|
|
||||||
def getHashablePart(self):
|
def getHashablePart(self):
|
||||||
|
# TODO: This assumes transport headers are stripped
|
||||||
|
# by Transport before going anywhere else
|
||||||
return self.raw[0:1]+self.raw[2:]
|
return self.raw[0:1]+self.raw[2:]
|
||||||
|
|
||||||
class ProofDestination:
|
class ProofDestination:
|
||||||
|
149
RNS/Resource.py
149
RNS/Resource.py
@ -6,7 +6,6 @@ import threading
|
|||||||
import vendor.umsgpack as umsgpack
|
import vendor.umsgpack as umsgpack
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
|
||||||
|
|
||||||
class Resource:
|
class Resource:
|
||||||
WINDOW_MIN = 1
|
WINDOW_MIN = 1
|
||||||
WINDOW_MAX = 10
|
WINDOW_MAX = 10
|
||||||
@ -15,9 +14,10 @@ class Resource:
|
|||||||
SDU = RNS.Reticulum.MTU - RNS.Packet.HEADER_MAXSIZE
|
SDU = RNS.Reticulum.MTU - RNS.Packet.HEADER_MAXSIZE
|
||||||
RANDOM_HASH_SIZE = 4
|
RANDOM_HASH_SIZE = 4
|
||||||
|
|
||||||
DEFAULT_TIMEOUT = RNS.Packet.TIMEOUT
|
# TODO: Should be allocated more
|
||||||
MAX_RETRIES = 3
|
# intelligently
|
||||||
ROUNDTRIP_FACTOR = 1.5
|
MAX_RETRIES = 5
|
||||||
|
SENDER_GRACE_TIME = 10
|
||||||
|
|
||||||
HASHMAP_IS_NOT_EXHAUSTED = 0x00
|
HASHMAP_IS_NOT_EXHAUSTED = 0x00
|
||||||
HASHMAP_IS_EXHAUSTED = 0xFF
|
HASHMAP_IS_EXHAUSTED = 0xFF
|
||||||
@ -27,9 +27,11 @@ class Resource:
|
|||||||
QUEUED = 0x01
|
QUEUED = 0x01
|
||||||
ADVERTISED = 0x02
|
ADVERTISED = 0x02
|
||||||
TRANSFERRING = 0x03
|
TRANSFERRING = 0x03
|
||||||
COMPLETE = 0x04
|
AWAITING_PROOF = 0x04
|
||||||
FAILED = 0x05
|
ASSEMBLING = 0x05
|
||||||
CORRUPT = 0x06
|
COMPLETE = 0x06
|
||||||
|
FAILED = 0x07
|
||||||
|
CORRUPT = 0x08
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def accept(advertisement_packet, callback=None, progress_callback = None):
|
def accept(advertisement_packet, callback=None, progress_callback = None):
|
||||||
@ -68,15 +70,24 @@ class Resource:
|
|||||||
|
|
||||||
resource.hashmap_update(0, resource.hashmap_raw)
|
resource.hashmap_update(0, resource.hashmap_raw)
|
||||||
|
|
||||||
|
resource.watchdog_job()
|
||||||
|
|
||||||
return resource
|
return resource
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
RNS.log("Could not decode resource advertisement, dropping resource", RNS.LOG_VERBOSE)
|
RNS.log("Could not decode resource advertisement, dropping resource", RNS.LOG_DEBUG)
|
||||||
traceback.print_exc()
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def __init__(self, data, link, advertise=True, auto_compress=True, callback=None, progress_callback=None):
|
def __init__(self, data, link, advertise=True, auto_compress=True, callback=None, progress_callback=None):
|
||||||
self.status = Resource.NONE
|
self.status = Resource.NONE
|
||||||
self.link = link
|
self.link = link
|
||||||
|
self.max_retries = Resource.MAX_RETRIES
|
||||||
|
self.retries_left = self.max_retries
|
||||||
|
self.default_timeout = self.link.default_timeout
|
||||||
|
self.timeout_factor = self.link.timeout_factor
|
||||||
|
self.sender_grace_time = Resource.SENDER_GRACE_TIME
|
||||||
|
self.hmu_retry_ok = False
|
||||||
|
self.watchdog_lock = False
|
||||||
|
self.__watchdog_job_id = 0
|
||||||
self.rtt = None
|
self.rtt = None
|
||||||
|
|
||||||
if data != None:
|
if data != None:
|
||||||
@ -112,6 +123,7 @@ class Resource:
|
|||||||
self.size = len(self.data)
|
self.size = len(self.data)
|
||||||
|
|
||||||
self.hashmap = ""
|
self.hashmap = ""
|
||||||
|
self.sent_parts = 0
|
||||||
self.parts = []
|
self.parts = []
|
||||||
for i in range(0,int(math.ceil(self.size/float(Resource.SDU)))):
|
for i in range(0,int(math.ceil(self.size/float(Resource.SDU)))):
|
||||||
data = self.data[i*Resource.SDU:(i+1)*Resource.SDU]
|
data = self.data[i*Resource.SDU:(i+1)*Resource.SDU]
|
||||||
@ -142,17 +154,22 @@ class Resource:
|
|||||||
|
|
||||||
def hashmap_update_packet(self, plaintext):
|
def hashmap_update_packet(self, plaintext):
|
||||||
if not self.status == Resource.FAILED:
|
if not self.status == Resource.FAILED:
|
||||||
|
self.last_activity = time.time()
|
||||||
|
self.retries_left = self.max_retries
|
||||||
|
|
||||||
update = umsgpack.unpackb(plaintext[RNS.Identity.HASHLENGTH/8:])
|
update = umsgpack.unpackb(plaintext[RNS.Identity.HASHLENGTH/8:])
|
||||||
self.hashmap_update(update[0], update[1])
|
self.hashmap_update(update[0], update[1])
|
||||||
|
|
||||||
|
|
||||||
def hashmap_update(self, segment, hashmap):
|
def hashmap_update(self, segment, hashmap):
|
||||||
if not self.status == Resource.FAILED:
|
if not self.status == Resource.FAILED:
|
||||||
|
self.status = Resource.TRANSFERRING
|
||||||
seg_len = ResourceAdvertisement.HASHMAP_MAX_LEN
|
seg_len = ResourceAdvertisement.HASHMAP_MAX_LEN
|
||||||
hashes = len(hashmap)/Resource.MAPHASH_LEN
|
hashes = len(hashmap)/Resource.MAPHASH_LEN
|
||||||
for i in range(0,hashes):
|
for i in range(0,hashes):
|
||||||
self.hashmap[i+segment*seg_len] = hashmap[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
|
if self.hashmap[i+segment*seg_len] == None:
|
||||||
self.hashmap_height += 1
|
self.hashmap_height += 1
|
||||||
|
self.hashmap[i+segment*seg_len] = hashmap[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
|
||||||
|
|
||||||
self.waiting_for_hmu = False
|
self.waiting_for_hmu = False
|
||||||
self.request_next()
|
self.request_next()
|
||||||
@ -167,22 +184,105 @@ class Resource:
|
|||||||
|
|
||||||
def __advertise_job(self):
|
def __advertise_job(self):
|
||||||
data = ResourceAdvertisement(self).pack()
|
data = ResourceAdvertisement(self).pack()
|
||||||
packet = RNS.Packet(self.link, data, context=RNS.Packet.RESOURCE_ADV)
|
self.advertisement_packet = RNS.Packet(self.link, data, context=RNS.Packet.RESOURCE_ADV)
|
||||||
while not self.link.ready_for_new_resource():
|
while not self.link.ready_for_new_resource():
|
||||||
self.status = Resource.QUEUED
|
self.status = Resource.QUEUED
|
||||||
sleep(0.25)
|
sleep(0.25)
|
||||||
|
|
||||||
packet.send()
|
self.advertisement_packet.send()
|
||||||
self.last_activity = time.time()
|
self.last_activity = time.time()
|
||||||
self.adv_sent = self.last_activity
|
self.adv_sent = self.last_activity
|
||||||
self.rtt = None
|
self.rtt = None
|
||||||
self.status = Resource.ADVERTISED
|
self.status = Resource.ADVERTISED
|
||||||
self.link.register_outgoing_resource(self)
|
self.link.register_outgoing_resource(self)
|
||||||
|
|
||||||
|
self.watchdog_job()
|
||||||
|
|
||||||
|
def watchdog_job(self):
|
||||||
|
thread = threading.Thread(target=self.__watchdog_job)
|
||||||
|
thread.setDaemon(True)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
def __watchdog_job(self):
|
||||||
|
self.__watchdog_job_id += 1
|
||||||
|
this_job_id = self.__watchdog_job_id
|
||||||
|
|
||||||
|
while self.status < Resource.ASSEMBLING and this_job_id == self.__watchdog_job_id:
|
||||||
|
while self.watchdog_lock:
|
||||||
|
sleep(0.025)
|
||||||
|
|
||||||
|
sleep_time = None
|
||||||
|
|
||||||
|
if self.status == Resource.ADVERTISED:
|
||||||
|
sleep_time = (self.adv_sent+self.default_timeout)-time.time()
|
||||||
|
if sleep_time < 0:
|
||||||
|
if self.retries_left <= 0:
|
||||||
|
RNS.log("Resource transfer timeout after sending advertisement", RNS.LOG_DEBUG)
|
||||||
|
self.cancel()
|
||||||
|
sleep_time = 0.001
|
||||||
|
else:
|
||||||
|
RNS.log("No part requests received, retrying resource advertisement...", RNS.LOG_DEBUG)
|
||||||
|
self.retries_left -= 1
|
||||||
|
self.advertisement_packet.resend()
|
||||||
|
self.last_activity = time.time()
|
||||||
|
self.adv_sent = self.last_activity
|
||||||
|
sleep_time = 0.001
|
||||||
|
|
||||||
|
|
||||||
|
elif self.status == Resource.TRANSFERRING:
|
||||||
|
if not self.initiator:
|
||||||
|
rtt = self.link.rtt if self.rtt == None else self.rtt
|
||||||
|
sleep_time = self.last_activity + (rtt*self.timeout_factor) - time.time()
|
||||||
|
|
||||||
|
if sleep_time < 0:
|
||||||
|
if self.retries_left > 0:
|
||||||
|
RNS.log("Timeout waiting for parts, requesting retry", RNS.LOG_DEBUG)
|
||||||
|
sleep_time = 0.001
|
||||||
|
self.retries_left -= 1
|
||||||
|
self.waiting_for_hmu = False
|
||||||
|
self.request_next()
|
||||||
|
else:
|
||||||
|
self.cancel()
|
||||||
|
sleep_time = 0.001
|
||||||
|
else:
|
||||||
|
max_wait = self.rtt * self.timeout_factor * self.max_retries + self.sender_grace_time
|
||||||
|
sleep_time = self.last_activity + max_wait - time.time()
|
||||||
|
if sleep_time < 0:
|
||||||
|
RNS.log("Resource timed out waiting for part requests", RNS.LOG_DEBUG)
|
||||||
|
self.cancel()
|
||||||
|
sleep_time = 0.001
|
||||||
|
|
||||||
|
elif self.status == Resource.AWAITING_PROOF:
|
||||||
|
sleep_time = self.last_part_sent + (self.rtt*self.timeout_factor+self.sender_grace_time) - time.time()
|
||||||
|
if sleep_time < 0:
|
||||||
|
if self.retries_left <= 0:
|
||||||
|
RNS.log("Resource timed out waiting for proof", RNS.LOG_DEBUG)
|
||||||
|
self.cancel()
|
||||||
|
sleep_time = 0.001
|
||||||
|
else:
|
||||||
|
RNS.log("All parts sent, but no resource proof received, querying network cache...", RNS.LOG_DEBUG)
|
||||||
|
self.retries_left -= 1
|
||||||
|
expected_data = self.hash + self.expected_proof
|
||||||
|
expected_proof_packet = RNS.Packet(self.link, expected_data, packet_type=RNS.Packet.PROOF, context=RNS.Packet.RESOURCE_PRF)
|
||||||
|
expected_proof_packet.pack()
|
||||||
|
expected_proof_packet.updateHash()
|
||||||
|
RNS.Transport.cache_request(expected_proof_packet.packet_hash)
|
||||||
|
self.last_part_sent = time.time()
|
||||||
|
sleep_time = 0.001
|
||||||
|
|
||||||
|
if sleep_time == 0:
|
||||||
|
RNS.log("Warning! Link watchdog sleep time of 0!", RNS.LOG_WARNING)
|
||||||
|
if sleep_time == None or sleep_time < 0:
|
||||||
|
# TODO: This should probably not be here forever
|
||||||
|
RNS.log("Timing error! Closing Reticulum now.", RNS.LOG_CRITICAL)
|
||||||
|
RNS.panic()
|
||||||
|
|
||||||
|
sleep(sleep_time)
|
||||||
|
|
||||||
def assemble(self):
|
def assemble(self):
|
||||||
if not self.status == Resource.FAILED:
|
if not self.status == Resource.FAILED:
|
||||||
try:
|
try:
|
||||||
RNS.log("Assembling parts...")
|
self.status = Resource.ASSEMBLING
|
||||||
stream = ""
|
stream = ""
|
||||||
for part in self.parts:
|
for part in self.parts:
|
||||||
stream += part
|
stream += part
|
||||||
@ -236,11 +336,14 @@ class Resource:
|
|||||||
|
|
||||||
def receive_part(self, packet):
|
def receive_part(self, packet):
|
||||||
self.last_activity = time.time()
|
self.last_activity = time.time()
|
||||||
|
self.retries_left = self.max_retries
|
||||||
|
|
||||||
if self.req_resp == None:
|
if self.req_resp == None:
|
||||||
self.req_resp = self.last_activity
|
self.req_resp = self.last_activity
|
||||||
rtt = self.req_resp-self.req_sent
|
rtt = self.req_resp-self.req_sent
|
||||||
if self.rtt == None:
|
if self.rtt == None:
|
||||||
self.rtt = rtt
|
self.rtt = rtt
|
||||||
|
self.watchdog_job()
|
||||||
elif self.rtt < rtt:
|
elif self.rtt < rtt:
|
||||||
self.rtt = rtt
|
self.rtt = rtt
|
||||||
|
|
||||||
@ -313,7 +416,12 @@ class Resource:
|
|||||||
if self.rtt == None:
|
if self.rtt == None:
|
||||||
self.rtt = rtt
|
self.rtt = rtt
|
||||||
|
|
||||||
self.status == Resource.TRANSFERRING
|
if self.status != Resource.TRANSFERRING:
|
||||||
|
self.status = Resource.TRANSFERRING
|
||||||
|
self.watchdog_job()
|
||||||
|
|
||||||
|
self.retries_left = self.max_retries
|
||||||
|
|
||||||
wants_more_hashmap = True if ord(request_data[0]) == Resource.HASHMAP_IS_EXHAUSTED else False
|
wants_more_hashmap = True if ord(request_data[0]) == Resource.HASHMAP_IS_EXHAUSTED else False
|
||||||
pad = 1+Resource.MAPHASH_LEN if wants_more_hashmap else 1
|
pad = 1+Resource.MAPHASH_LEN if wants_more_hashmap else 1
|
||||||
|
|
||||||
@ -322,16 +430,18 @@ class Resource:
|
|||||||
for i in range(0,len(requested_hashes)/Resource.MAPHASH_LEN):
|
for i in range(0,len(requested_hashes)/Resource.MAPHASH_LEN):
|
||||||
requested_hash = requested_hashes[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
|
requested_hash = requested_hashes[i*Resource.MAPHASH_LEN:(i+1)*Resource.MAPHASH_LEN]
|
||||||
|
|
||||||
i = 0
|
pi = 0
|
||||||
for part in self.parts:
|
for part in self.parts:
|
||||||
if part.map_hash == requested_hash:
|
if part.map_hash == requested_hash:
|
||||||
if not part.sent:
|
if not part.sent:
|
||||||
part.send()
|
part.send()
|
||||||
|
self.sent_parts += 1
|
||||||
else:
|
else:
|
||||||
part.resend()
|
part.resend()
|
||||||
self.last_activity = time.time()
|
self.last_activity = time.time()
|
||||||
|
self.last_part_sent = self.last_activity
|
||||||
break
|
break
|
||||||
i += 1
|
pi += 1
|
||||||
|
|
||||||
if wants_more_hashmap:
|
if wants_more_hashmap:
|
||||||
last_map_hash = request_data[1:Resource.MAPHASH_LEN+1]
|
last_map_hash = request_data[1:Resource.MAPHASH_LEN+1]
|
||||||
@ -358,10 +468,15 @@ class Resource:
|
|||||||
|
|
||||||
hmu = self.hash+umsgpack.packb([segment, hashmap])
|
hmu = self.hash+umsgpack.packb([segment, hashmap])
|
||||||
hmu_packet = RNS.Packet(self.link, hmu, context = RNS.Packet.RESOURCE_HMU)
|
hmu_packet = RNS.Packet(self.link, hmu, context = RNS.Packet.RESOURCE_HMU)
|
||||||
|
|
||||||
hmu_packet.send()
|
hmu_packet.send()
|
||||||
self.last_activity = time.time()
|
self.last_activity = time.time()
|
||||||
|
|
||||||
|
if self.sent_parts == len(self.parts):
|
||||||
|
self.status = Resource.AWAITING_PROOF
|
||||||
|
|
||||||
def cancel(self):
|
def cancel(self):
|
||||||
|
if self.status < Resource.COMPLETE:
|
||||||
self.status = Resource.FAILED
|
self.status = Resource.FAILED
|
||||||
if self.initiator:
|
if self.initiator:
|
||||||
if self.link.status == RNS.Link.ACTIVE:
|
if self.link.status == RNS.Link.ACTIVE:
|
||||||
@ -382,7 +497,7 @@ class Resource:
|
|||||||
return progress
|
return progress
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return RNS.prettyHexRep(self.hash)
|
return RNS.prettyhexrep(self.hash)+str(self.link)
|
||||||
|
|
||||||
|
|
||||||
class ResourceAdvertisement:
|
class ResourceAdvertisement:
|
||||||
|
@ -63,8 +63,8 @@ class Reticulum:
|
|||||||
RNS.loglevel = int(value)
|
RNS.loglevel = int(value)
|
||||||
if RNS.loglevel < 0:
|
if RNS.loglevel < 0:
|
||||||
RNS.loglevel = 0
|
RNS.loglevel = 0
|
||||||
if RNS.loglevel > 6:
|
if RNS.loglevel > 7:
|
||||||
RNS.loglevel = 6
|
RNS.loglevel = 7
|
||||||
|
|
||||||
if "reticulum" in self.config:
|
if "reticulum" in self.config:
|
||||||
for option in self.config["reticulum"]:
|
for option in self.config["reticulum"]:
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import os
|
||||||
import RNS
|
import RNS
|
||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
@ -64,7 +65,7 @@ class Transport:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def outbound(packet):
|
def outbound(packet):
|
||||||
while (Transport.jobs_running):
|
while (Transport.jobs_running):
|
||||||
sleep(0.1)
|
sleep(0.01)
|
||||||
|
|
||||||
Transport.jobs_locked = True
|
Transport.jobs_locked = True
|
||||||
packet.updateHash()
|
packet.updateHash()
|
||||||
@ -101,9 +102,15 @@ class Transport:
|
|||||||
def packet_filter(packet):
|
def packet_filter(packet):
|
||||||
if packet.context == RNS.Packet.KEEPALIVE:
|
if packet.context == RNS.Packet.KEEPALIVE:
|
||||||
return True
|
return True
|
||||||
|
if packet.context == RNS.Packet.RESOURCE_REQ:
|
||||||
|
return True
|
||||||
|
if packet.context == RNS.Packet.RESOURCE_PRF:
|
||||||
|
return True
|
||||||
if not packet.packet_hash in Transport.packet_hashlist:
|
if not packet.packet_hash in Transport.packet_hashlist:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def inbound(raw, interface=None):
|
def inbound(raw, interface=None):
|
||||||
while (Transport.jobs_running):
|
while (Transport.jobs_running):
|
||||||
@ -116,8 +123,9 @@ class Transport:
|
|||||||
packet.updateHash()
|
packet.updateHash()
|
||||||
packet.receiving_interface = interface
|
packet.receiving_interface = interface
|
||||||
|
|
||||||
RNS.log(str(interface)+" received packet with hash "+RNS.prettyhexrep(packet.packet_hash), RNS.LOG_DEBUG)
|
RNS.log(str(interface)+" received packet with hash "+RNS.prettyhexrep(packet.packet_hash), RNS.LOG_EXTREME)
|
||||||
|
|
||||||
|
# TODO: Rewrite these redundant cache calls
|
||||||
if Transport.packet_filter(packet):
|
if Transport.packet_filter(packet):
|
||||||
Transport.packet_hashlist.append(packet.packet_hash)
|
Transport.packet_hashlist.append(packet.packet_hash)
|
||||||
|
|
||||||
@ -216,6 +224,9 @@ class Transport:
|
|||||||
def shouldCache(packet):
|
def shouldCache(packet):
|
||||||
# TODO: Implement sensible rules for which
|
# TODO: Implement sensible rules for which
|
||||||
# packets to cache
|
# packets to cache
|
||||||
|
if packet.context == RNS.Packet.RESOURCE_PRF:
|
||||||
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -226,8 +237,37 @@ class Transport:
|
|||||||
file = open(RNS.Reticulum.cachepath+"/"+packet_hash, "w")
|
file = open(RNS.Reticulum.cachepath+"/"+packet_hash, "w")
|
||||||
file.write(packet.raw)
|
file.write(packet.raw)
|
||||||
file.close()
|
file.close()
|
||||||
RNS.log("Wrote packet "+packet_hash+" to cache", RNS.LOG_DEBUG)
|
RNS.log("Wrote packet "+packet_hash+" to cache", RNS.LOG_EXTREME)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
RNS.log("Error writing packet to cache", RNS.LOG_ERROR)
|
RNS.log("Error writing packet to cache", RNS.LOG_ERROR)
|
||||||
RNS.log("The contained exception was: "+str(e))
|
RNS.log("The contained exception was: "+str(e))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def cache_request_packet(packet):
|
||||||
|
if len(packet.data) == RNS.Identity.HASHLENGTH/8:
|
||||||
|
packet_hash = RNS.hexrep(packet.data, delimit=False)
|
||||||
|
path = RNS.Reticulum.cachepath+"/"+packet_hash
|
||||||
|
if os.path.isfile(path):
|
||||||
|
file = open(path, "r")
|
||||||
|
raw = file.read()
|
||||||
|
file.close()
|
||||||
|
packet = RNS.Packet(None, raw)
|
||||||
|
# TODO: Implement outbound for this
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def cache_request(packet_hash):
|
||||||
|
RNS.log("Cache request for "+RNS.prettyhexrep(packet_hash), RNS.LOG_EXTREME)
|
||||||
|
path = RNS.Reticulum.cachepath+"/"+RNS.hexrep(packet_hash, delimit=False)
|
||||||
|
if os.path.isfile(path):
|
||||||
|
file = open(path, "r")
|
||||||
|
raw = file.read()
|
||||||
|
Transport.inbound(raw)
|
||||||
|
file.close()
|
||||||
|
else:
|
||||||
|
cache_request_packet = RNS.Packet(Transport.transport_destination(), packet_hash, context = RNS.Packet.CACHE_REQUEST)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def transport_destination():
|
||||||
|
# TODO: implement this
|
||||||
|
pass
|
||||||
|
@ -22,6 +22,7 @@ LOG_NOTICE = 3
|
|||||||
LOG_INFO = 4
|
LOG_INFO = 4
|
||||||
LOG_VERBOSE = 5
|
LOG_VERBOSE = 5
|
||||||
LOG_DEBUG = 6
|
LOG_DEBUG = 6
|
||||||
|
LOG_EXTREME = 7
|
||||||
|
|
||||||
LOG_STDOUT = 0x91
|
LOG_STDOUT = 0x91
|
||||||
LOG_FILE = 0x92
|
LOG_FILE = 0x92
|
||||||
|
Loading…
Reference in New Issue
Block a user