Skip to content

Commit

Permalink
decreased rollback limit to 30-40
Browse files Browse the repository at this point in the history
removed old peer format support
fixed hyperblock recompression
  • Loading branch information
hclivess committed Jul 30, 2019
1 parent 0c57b5b commit b07d3e8
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 73 deletions.
5 changes: 2 additions & 3 deletions essentials.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,8 @@ def round_down(number, order):


def checkpoint_set(node):
if node.last_block > 2000:
node.checkpoint = round_down(node.last_block, 1000) - 1000
node.logger.app_log.warning(f"Checkpoint set to {node.checkpoint}")
node.checkpoint = round_down(node.last_block, 10) - 30
node.logger.app_log.warning(f"Checkpoint set to {node.checkpoint}")


def ledger_balance3(address, cache, db_handler):
Expand Down
2 changes: 2 additions & 0 deletions libs/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ def __init__(self):
self.last_block_timestamp = None
self.last_block_ago = None

self.recompress = None

self.accept_peers = True
self.difficulty = None
self.ledger_temp = None
Expand Down
21 changes: 14 additions & 7 deletions node.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def recompress_ledger(node, rebuild=False, depth=15000):
hyp.execute("VACUUM")
hyper.close()

if os.path.exists(node.hyper_path) and rebuild:
if os.path.exists(node.hyper_path):
os.remove(node.hyper_path) # remove the old hyperblocks to rebuild
os.rename(node.ledger_path + '.temp', node.hyper_path)

Expand All @@ -223,12 +223,12 @@ def ledger_check_heights(node, db_handler):

if hdd_block_max == hdd2_block_last == hdd2_block_last_misc == hdd_block_max_diff and node.hyper_recompress: # cross-integrity check
node.logger.app_log.warning("Status: Recompressing hyperblocks (keeping full ledger)")
recompress = True
node.recompress = True

#print (hdd_block_max,hdd2_block_last,node.hyper_recompress)
elif hdd_block_max == hdd2_block_last and not node.hyper_recompress:
node.logger.app_log.warning("Status: Hyperblock recompression skipped")
recompress = False
node.recompress = False
else:
lowest_block = min(hdd_block_max, hdd2_block_last, hdd_block_max_diff, hdd2_block_last_misc)
highest_block = max(hdd_block_max, hdd2_block_last, hdd_block_max_diff, hdd2_block_last_misc)
Expand All @@ -237,14 +237,13 @@ def ledger_check_heights(node, db_handler):
f"Status: Cross-integrity check failed, {highest_block} will be rolled back below {lowest_block}")

rollback(node,db_handler_initial,lowest_block) #rollback to the lowest value
recompress = False
node.recompress = False

else:
node.logger.app_log.warning("Status: Compressing ledger to Hyperblocks")
recompress = True
node.recompress = True


if recompress:
recompress_ledger(node)


def bin_convert(string):
Expand Down Expand Up @@ -2084,6 +2083,14 @@ def add_indices(db_handler: dbhandler.DbHandler):
db_handler_initial = dbhandler.DbHandler(node.index_db, node.ledger_path, node.hyper_path, node.ram, node.ledger_ram_file, node.logger, trace_db_calls=node.trace_db_calls)

ledger_check_heights(node, db_handler_initial)


if node.recompress:
#todo: do not close database and move files, swap tables instead
db_handler_initial.close()
recompress_ledger(node)
db_handler_initial = dbhandler.DbHandler(node.index_db, node.ledger_path, node.hyper_path, node.ram, node.ledger_ram_file, node.logger, trace_db_calls=node.trace_db_calls)

ram_init(db_handler_initial)
node_block_init(db_handler_initial)
initial_db_check()
Expand Down
90 changes: 27 additions & 63 deletions peershandler.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,69 +292,33 @@ def peersync(self, subdata: str) -> int:
with self.peersync_lock:
try:
total_added = 0
if "(" in str(subdata): # OLD WAY
# TODO: next fork, no such peers should be left out. Simplify this code.
server_peer_tuples = re.findall("'([\d.]+)', '([\d]+)'", subdata)
self.app_log.info(f"Received following {len(server_peer_tuples)} peers: {server_peer_tuples}")
with open(self.peerfile, "r") as peer_file:
peers = json.load(peer_file)
for pair in set(server_peer_tuples): # set removes duplicates
if pair not in peers and self.accept_peers:
self.app_log.info(f"Outbound: {pair} is a new peer, saving if connectible")
try:
# check if node is active
s_purge = socks.socksocket()
s_purge.settimeout(5)
if self.config.tor:
s_purge.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
s_purge.connect((pair[0], int(pair[1])))
s_purge.close()
# Save to suggested if not already
with open(self.suggested_peerfile) as peers_existing:
peers_suggested = json.load(peers_existing)
if pair not in peers_suggested:
peers_suggested[pair[0]] = pair[1]

with open(self.suggested_peerfile, "w") as peer_list_file:
json.dump(peers_suggested, peer_list_file)
# Also add to our local peers dict and dump the json
if pair[0] not in peers:
total_added += 1
peers[pair[0]] = pair[1]
with open(self.peerfile, "w") as peer_file:
json.dump(peers, peer_file)
except:
pass
self.app_log.info("Not connectible")
else:
self.app_log.info(f"Outbound: {pair} is not a new peer")
else:
# json format

subdata = self.dict_validate(subdata)
data_dict = json.loads(subdata)

self.app_log.info(f"Received {len(data_dict)} peers.")
# Simplified the log, every peers then has a ok or ko status anyway.
for ip, port in data_dict.items():
if ip not in self.peer_dict:
self.app_log.info(f"Outbound: {ip}:{port} is a new peer, saving if connectible")
try:
s_purge = socks.socksocket()
s_purge.settimeout(5)
if self.config.tor:
s_purge.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
s_purge.connect((ip, int(port))) # save a new peer file with only active nodes
s_purge.close()
# This only adds to our local dict, does not force save.
if ip not in self.peer_dict:
total_added += 1
self.peer_dict[ip] = port
self.app_log.info(f"Inbound: Peer {ip}:{port} saved to local peers")
except:
self.app_log.info("Not connectible")
else:
self.app_log.info(f"Outbound: {ip}:{port} is not a new peer")

# json format

subdata = self.dict_validate(subdata)
data_dict = json.loads(subdata)

self.app_log.info(f"Received {len(data_dict)} peers.")
# Simplified the log, every peers then has a ok or ko status anyway.
for ip, port in data_dict.items():
if ip not in self.peer_dict:
self.app_log.info(f"Outbound: {ip}:{port} is a new peer, saving if connectible")
try:
s_purge = socks.socksocket()
s_purge.settimeout(5)
if self.config.tor:
s_purge.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050)
s_purge.connect((ip, int(port))) # save a new peer file with only active nodes
s_purge.close()
# This only adds to our local dict, does not force save.
if ip not in self.peer_dict:
total_added += 1
self.peer_dict[ip] = port
self.app_log.info(f"Inbound: Peer {ip}:{port} saved to local peers")
except:
self.app_log.info("Not connectible")
else:
self.app_log.info(f"Outbound: {ip}:{port} is not a new peer")
except Exception as e:
self.app_log.warning(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
Expand Down

0 comments on commit b07d3e8

Please sign in to comment.