diff --git a/tests/api_tests/jsonsocket.py b/tests/api_tests/jsonsocket.py new file mode 100755 index 0000000000..3abb9f1f00 --- /dev/null +++ b/tests/api_tests/jsonsocket.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +import sys +import json +import socket + +class JSONSocket(object): + """ + Class encapsulates socket object with special handling json rpc. + timeout is ignored now - nonblicking socket not supported. + """ + def __init__(self, host, port, path, timeout=None): + """ + host in form [http[s]://][:port] + if port not in host must be spefified as argument + """ + self.__sock = None + if host.find("http://") == 0 or host.find("https://") == 0: + host = host[host.find("//")+2 : len(host)] + _host = host + if port != None: + host = host + ":" + str(port) + else: + colon_pos = host.rfind(":") + _host = host[0 : colon_pos] + port = int(host[colon_pos+1 : len(host)]) + self.__fullpath = host + path + self.__host = bytes(host, "utf-8") + self.__path = bytes(path, "utf-8") + #print(": {}; : {}".format(_host, port)) + self.__sock = socket.create_connection((_host, port)) + self.__sock.setblocking(True) + #self.__sock.settimeout(timeout) + self.__head = b"POST " + self.__path + b" HTTP/1.0\r\n" + \ + b"HOST: " + self.__host + b"\r\n" + \ + b"Content-type: application/json\r\n" + + def get_fullpath(self): + return self.__fullpath + + def request(self, data=None, json=None): + """ + data - complete binary form of json request (ends with '\r\n' + json - json request as python dict + + return value in form of json response as python dict + """ + if data == None: + data = bytes(json.dumps(json), "utf-8") + b"\r\n" + length = bytes(str(len(data)), "utf-8") + request = self.__head + \ + b"Content-length: " + length + b"\r\n\r\n" + \ + data + #print("request:", request.decode("utf-8")) + self.__sock.sendall(request) + #self.__sock.send(request) + status, response = self.__read() + #if response == {}: + # print("response is empty for request:", request.decode("utf-8")) + return status, response + + def __call__(self, data=None, json=None): + return self.request(data, json) + + def __read(self): + response = '' + binary = b'' + while True: + temp = self.__sock.recv(4096) + if not temp: break + binary += temp + + response = binary.decode("utf-8") + + if response.find("HTTP") == 0: + response = response[response.find("\r\n\r\n")+4 : len(response)] + if response and response != '': + r = json.loads(response) + if 'result' in r: + return True, r + else: + return False, r + + return False, {} + + def __del__(self): + if self.__sock: + self.__sock.close() + + +def steemd_call(host, data=None, json=None, max_tries=10, timeout=0.1): + """ + host - [http[s]://: + data - binary form of request body, if missing json object should be provided (as python dict/array) + """ +# try: +# jsocket = JSONSocket(host, None, "/rpc", timeout) +# except: +# print("Cannot open socket for:", host) +# return False, {} + + for i in range(max_tries): + try: + jsocket = JSONSocket(host, None, "/rpc", timeout) + except: + type, value, traceback = sys.exc_info() + print("Error: {}:{} {} {}".format(1, type, value, traceback)) + print("Error: Cannot open JSONSocket for:", host) + continue + try: + status, response = jsocket(data, json) + if status: + return status, response + except: + type, value, traceback = sys.exc_info() + print("Error: {}:{} {} {}".format(1, type, value, traceback)) + print("Error: JSONSocket request failed for: {} ({})".format(host, data.decode("utf-8"))) + continue + else: + return False, {} \ No newline at end of file diff --git a/tests/api_tests/list_account.py b/tests/api_tests/list_account.py new file mode 100755 index 0000000000..36ca7da690 --- /dev/null +++ b/tests/api_tests/list_account.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +""" +Create list of all steem accounts in file. +Usage: create_account_list.py [] +""" +import sys +import json +from jsonsocket import JSONSocket +from jsonsocket import steemd_call + +def list_accounts(url): + """ + url in form : + """ + last_account = "" + end = False + accounts_count = 0 + accounts = [] + + while end == False: + request = bytes( json.dumps( { + "jsonrpc": "2.0", + "id": 0, + "method": "database_api.list_accounts", + "params": { "start": last_account, "limit": 1000, "order": "by_name" } + } ), "utf-8" ) + b"\r\n" + + status, response = steemd_call(url, data=request) + + if status == False: + print( "rpc failed for last_account: " + last_account ) + return [] + + account_list = response["result"]["accounts"] + + if last_account != "": + assert account_list[0]["name"] == last_account + del account_list[0] + + if len( account_list ) == 0: + end = True + continue + + last_account = account_list[-1]["name"] + accounts_count += len( accounts ) + for account in account_list: + accounts.append( account["name"] ) + + # while end == False + return accounts + + +def main(): + if len( sys.argv ) < 2 or len( sys.argv ) > 3: + exit( "Usage: create_account_list.py []" ) + + url = sys.argv[1] + print( url ) + + accounts = list_accounts( url ) + + if len(accounts) == 0: + exit(-1) + + if len( sys.argv ) == 3: + filename = sys.argv[2] + + try: file = open( filename, "w" ) + except: exit( "Cannot open file " + filename ) + + for account in accounts: + file.write(account + "\n") + + file.close() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/api_tests/list_comment.py b/tests/api_tests/list_comment.py new file mode 100755 index 0000000000..c0857a77c5 --- /dev/null +++ b/tests/api_tests/list_comment.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +""" +Create list of all steem comments in file. +Usage: list_comment.py [] +""" +import sys +import json +from jsonsocket import JSONSocket +from jsonsocket import steemd_call + +def list_comments(url): + """ + url in form : + """ + last_cashout_time = "2016-01-01T00-00-00" + end = False + comments = [] + + while end == False: + request = bytes( json.dumps( { + "jsonrpc": "2.0", + "id": 0, + "method": "database_api.list_comments", + "params": { "start":[ last_cashout_time, "", "" ], "limit": 5, "order": "by_cashout_time" } + } ), "utf-8" ) + b"\r\n" + + status, response = steemd_call(url, data=request) + + if status == False: + print( "rpc failed for last_cashout_time: " + last_cashout_time ) + return [] + + comment_list = response["result"]["comments"] + + actual_cashout_time = comment_list[-1]["cashout_time"] + + if actual_cashout_time == last_cashout_time: + end = True + continue + + last_cashout_time = actual_cashout_time + + for comment in comment_list: + comments.append( comment["permlink"]+";"+comment["author"] +";"+comment["last_update"] ) + + # while end == False + return comments + + +def main(): + if len( sys.argv ) < 2 or len( sys.argv ) > 3: + exit( "Usage: list_comment.py []" ) + + url = sys.argv[1] + print( url ) + + comments = list_comments( url ) + + if len(comments) == 0: + exit(-1) + + if len( sys.argv ) == 3: + filename = sys.argv[2] + + try: file = open( filename, "w" ) + except: exit( "Cannot open file " + filename ) + + for comment in comments: + file.write(comment + "\n") + + file.close() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/api_tests/test_ah_get_account_history.py b/tests/api_tests/test_ah_get_account_history.py new file mode 100755 index 0000000000..91fc55f039 --- /dev/null +++ b/tests/api_tests/test_ah_get_account_history.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python3 +""" + Usage: __name__ jobs url1 url2 [working_dir [accounts_file]] + Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 [get_account_history [accounts]] + set jobs to 0 if you want use all processors + url1 is reference url for list_accounts +""" +import sys +import json +import os +import shutil +import locale +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import ProcessPoolExecutor +from concurrent.futures import Future +from concurrent.futures import wait +from jsonsocket import JSONSocket +from jsonsocket import steemd_call +from list_account import list_accounts +from pathlib import Path + + +wdir = Path() +errors = 0 + + +def main(): + if len( sys.argv ) < 4 or len( sys.argv ) > 6: + print( "Usage: __name__ jobs url1 url2 [working_dir [accounts_file]]" ) + print( " Example: __name__ 4 http://127.0.0.1:8090 http://127.0.0.1:8091 [get_account_history [accounts]]" ) + print( " set jobs to 0 if you want use all processors" ) + print( " url1 is reference url for list_accounts" ) + exit () + + global wdir + global errors + + jobs = int(sys.argv[1]) + if jobs <= 0: + import multiprocessing + jobs = multiprocessing.cpu_count() + + url1 = sys.argv[2] + url2 = sys.argv[3] + + if len( sys.argv ) > 4: + wdir = Path(sys.argv[4]) + + accounts_file = sys.argv[5] if len( sys.argv ) > 5 else "" + + if accounts_file != "": + try: + with open(accounts_file, "rt") as file: + accounts = file.readlines() + except: + exit("Cannot open file: " + accounts_file) + else: + accounts = list_accounts(url1) + + length = len(accounts) + + if length == 0: + exit("There are no any account!") + + create_wdir() + + print( str(length) + " accounts" ) + + if jobs > length: + jobs = length + + print( "setup:" ) + print( " jobs: {}".format(jobs) ) + print( " url1: {}".format(url1) ) + print( " url2: {}".format(url2) ) + print( " wdir: {}".format(wdir) ) + print( " accounts_file: {}".format(accounts_file) ) + + if jobs > 1: + first = 0 + last = length - 1 + accounts_per_job = length // jobs + + with ProcessPoolExecutor(max_workers=jobs) as executor: + for i in range(jobs-1): + executor.submit(compare_results, url1, url2, accounts[first : first+accounts_per_job-1]) + first = first + accounts_per_job + executor.submit(compare_results, url1, url2, accounts[first : last]) + else: + compare_results(url1, url2, accounts) + + exit( errors ) + + +def create_wdir(): + global wdir + + if wdir.exists(): + if wdir.is_file(): + os.remove(wdir) + + if wdir.exists() == False: + wdir.mkdir(parents=True) + + +def compare_results(url1, url2, accounts, max_tries=10, timeout=0.1): + success = True + print("Compare accounts: [{}..{}]".format(accounts[0], accounts[-1])) + + for account in accounts: + if get_account_history(url1, url2, account, max_tries, timeout) == False: + success = False; break + + print("Compare accounts: [{}..{}] {}".format(accounts[0], accounts[-1], "finished" if success else "break with error" )) + + +def get_account_history(url1, url2, account, max_tries=10, timeout=0.1): + global wdir + global errors + START = -1 + HARD_LIMIT = 10000 + LIMIT = HARD_LIMIT + + while True: + request = bytes( json.dumps( { + "jsonrpc": "2.0", + "id": 0, + "method": "account_history_api.get_account_history", + "params": { "account": account, "start": START, "limit": LIMIT } + } ), "utf-8" ) + b"\r\n" + + with ThreadPoolExecutor(max_workers=2) as executor: + future1 = executor.submit(steemd_call, url1, data=request, max_tries=max_tries, timeout=timeout) + future2 = executor.submit(steemd_call, url2, data=request, max_tries=max_tries, timeout=timeout) + + status1, json1 = future1.result() + status2, json2 = future2.result() + #status1, json1 = steemd_call(url1, data=request, max_tries=max_tries, timeout=timeout) + #status2, json2 = steemd_call(url2, data=request, max_tries=max_tries, timeout=timeout) + + if status1 == False or status2 == False or json1 != json2: + print("Comparison failed for account: {}; start: {}; limit: {}".format(account, START, LIMIT)) + errors += 1 + + filename = wdir / account + try: file = filename.open("w") + except: print("Cannot open file:", filename); return False + + file.write("Comparison failed:\n") + file.write("{} response:\n".format(url1)) + json.dump(json1, file, indent=2, sort_keys=True) + file.write("\n") + file.write("{} response:\n".format(url2)) + json.dump(json2, file, indent=2, sort_keys=True) + file.write("\n") + file.close() + return False + + history = json1["result"]["history"] + last = history[0][0] if len(history) else 0 + + if last == 0: break + + last -= 1 + START = last + LIMIT = last if last < HARD_LIMIT else HARD_LIMIT + # while True + + return True + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/api_tests/test_ah_get_ops_in_block.py b/tests/api_tests/test_ah_get_ops_in_block.py new file mode 100755 index 0000000000..4e23e71fe5 --- /dev/null +++ b/tests/api_tests/test_ah_get_ops_in_block.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +""" + Usage: script_name jobs url1 url2 [wdir [last_block [first_block]]] + Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 ./ 5000000 0 + set jobs to 0 if you want use all processors + if last_block == 0, it is read from url1 (as reference) +""" + +import sys +import json +import os +import shutil +from jsonsocket import JSONSocket +from jsonsocket import steemd_call +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import ProcessPoolExecutor +from concurrent.futures import Future +from concurrent.futures import wait +from pathlib import Path + + +wdir = Path() +errors = 0 + + +def main(): + if len(sys.argv) < 4 or len(sys.argv) > 7: + print("Usage: script_name jobs url1 url2 [wdir [last_block [first_block]]]") + print(" Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 ./ 5000000 0") + print( " set jobs to 0 if you want use all processors" ) + print(" if last_block == 0, it is read from url1 (as reference)") + exit() + + global wdir + global errors + first_block = 0 + last_block = 0 + + jobs = int(sys.argv[1]) + if jobs <= 0: + import multiprocessing + jobs = multiprocessing.cpu_count() + + url1 = sys.argv[2] + url2 = sys.argv[3] + + if len(sys.argv) > 4: + wdir = Path(sys.argv[4]) + + if len(sys.argv) > 5: + last_block = int(sys.argv[5]) + else: + last_block = 0 + if len(sys.argv) == 7: + first_block = int(sys.argv[6]) + else: + first_block = 0 + + last_block1 = get_last_block(url1) + last_block2 = get_last_block(url2) + + if last_block1 != last_block2: + exit("last block of {} ({}) is different then last block of {} ({})".format(url1, last_block1, url2, last_block2)) + + if last_block == 0: + last_block = last_block1 + elif last_block != last_block1: + print("WARNING: last block from cmdline {} is different then from {} ({})".format(last_block, url1, last_block1)) + + if last_block == 0: + exit("last block cannot be 0!") + + create_wdir() + + blocks = last_block - first_block + 1 + + if jobs > blocks: + jobs = blocks + + print("setup:") + print(" jobs: {}".format(jobs)) + print(" url1: {}".format(url1)) + print(" url2: {}".format(url2)) + print(" wdir: {}".format(wdir)) + print(" block range: {}:{}".format(first_block, last_block)) + + if jobs > 1: + blocks_per_job = blocks // jobs + + with ProcessPoolExecutor(max_workers=jobs) as executor: + for i in range(jobs-1): + executor.submit(compare_results, first_block, (first_block + blocks_per_job - 1), url1, url2) + first_block = first_block + blocks_per_job + executor.submit(compare_results, first_block, last_block, url1, url2) + else: + compare_results(first_block, last_block, url1, url2) + + exit( errors ) + + +def create_wdir(): + global wdir + + if wdir.exists(): + if wdir.is_file(): + os.remove(wdir) + + if wdir.exists() == False: + wdir.mkdir(parents=True) + + +def get_last_block(url, max_tries=10, timeout=0.1): + request = bytes( json.dumps( { + "jsonrpc": "2.0", + "id": 0, + "method": "database_api.get_dynamic_global_properties", + "params": {} + } ), "utf-8" ) + b"\r\n" + + status, response = steemd_call(url, data=request, max_tries=max_tries, timeout=timeout) + + if status == False: + return 0 + try: + return response["result"]["head_block_number"] + except: + return 0 + + +def compare_results(f_block, l_block, url1, url2, max_tries=10, timeout=0.1): + global wdir + global errors + + print( "Compare blocks [{} : {}]".format(f_block, l_block) ) + + for i in range(f_block, l_block+1): + request = bytes( json.dumps( { + "jsonrpc": "2.0", + "id": i, + "method": "account_history_api.get_ops_in_block", + "params": { "block_num": i, "only_virtual": False } + } ), "utf-8" ) + b"\r\n" + + with ThreadPoolExecutor(max_workers=2) as executor: + #with ProcessPoolExecutor(max_workers=2) as executor: + future1 = executor.submit(steemd_call, url1, data=request, max_tries=max_tries, timeout=timeout) + future2 = executor.submit(steemd_call, url2, data=request, max_tries=max_tries, timeout=timeout) + + status1, json1 = future1.result() + status2, json2 = future2.result() + + #status1, json1 = steemd_call(url1, data=request, max_tries=max_tries, timeout=timeout) + #status2, json2 = steemd_call(url2, data=request, max_tries=max_tries, timeout=timeout) + + if status1 == False or status2 == False or json1 != json2: + print("Difference @block: {}\n".format(i)) + errors += 1 + + filename = wdir / Path(str(f_block) + "_" + str(l_block) + ".log") + try: file = filename.open( "w" ) + except: print( "Cannot open file:", filename ); return + + file.write("Difference @block: {}\n".format(i)) + file.write("{} response:\n".format(url1)) + json.dump(json1, file, indent=2, sort_keys=True) + file.write("\n") + file.write("{} response:\n".format(url2)) + json.dump(json2, file, indent=2, sort_keys=True) + file.write("\n") + file.close() + print( "Compare blocks [{} : {}] break with error".format(f_block, l_block) ) + return + + print( "Compare blocks [{} : {}] finished".format(f_block, l_block) ) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/api_tests/test_list_votes.py b/tests/api_tests/test_list_votes.py new file mode 100755 index 0000000000..0cd6c594bd --- /dev/null +++ b/tests/api_tests/test_list_votes.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +""" + Usage: __name__ jobs url1 url2 [nr_cycles [working_dir [comments_file]]] + Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 [20 my_comments_data_dir [comments]] + by default: nr_cycles = 3; set nr_cycles to 0 if you want to use all comments + set jobs to 0 if you want use all processors + url1 is reference url for list_comments +""" +import sys +import json +import os +import shutil +import locale +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import ProcessPoolExecutor +from concurrent.futures import Future +from concurrent.futures import wait +from jsonsocket import JSONSocket +from jsonsocket import steemd_call +from list_comment import list_comments +from pathlib import Path + + +wdir = Path() +errors = 0 +nr_cycles = 3 + +def main(): + if len( sys.argv ) < 4 or len( sys.argv ) > 7: + print( "Usage: __name__ jobs url1 url2 [nr_cycles [working_dir [comments_file]]]" ) + print( " Example: __name__ 4 http://127.0.0.1:8090 http://127.0.0.1:8091 [ 20 my_comments_data_dir [comments]]" ) + print( " by default: nr_cycles = 3; set nr_cycles to 0 if you want to use all comments )" ) + print( " set jobs to 0 if you want use all processors" ) + print( " url1 is reference url for list_comments" ) + exit () + + global wdir + global errors + global nr_cycles + + jobs = int(sys.argv[1]) + if jobs <= 0: + import multiprocessing + jobs = multiprocessing.cpu_count() + + url1 = sys.argv[2] + url2 = sys.argv[3] + + if len( sys.argv ) > 4: + nr_cycles = int( sys.argv[4] ) + + if len( sys.argv ) > 5: + wdir = Path(sys.argv[5]) + + comments_file = sys.argv[6] if len( sys.argv ) > 6 else "" + + if comments_file != "": + try: + with open(comments_file, "rt") as file: + comments = file.readlines() + except: + exit("Cannot open file: " + comments_file) + else: + comments = list_comments(url1) + + length = len(comments) + + if length == 0: + exit("There are no any comment!") + + create_wdir() + + print( str(length) + " comments" ) + + if jobs > length: + jobs = length + + print( "setup:" ) + print( " jobs: {}".format(jobs) ) + print( " url1: {}".format(url1) ) + print( " url2: {}".format(url2) ) + print( " wdir: {}".format(wdir) ) + print( " comments_file: {}".format(comments_file) ) + + if jobs > 1: + first = 0 + last = length - 1 + comments_per_job = length // jobs + + with ProcessPoolExecutor(max_workers=jobs) as executor: + for i in range(jobs-1): + executor.submit(compare_results, url1, url2, comments[first : first+comments_per_job-1]) + first = first + comments_per_job + executor.submit(compare_results, url1, url2, comments[first : last]) + else: + compare_results(url1, url2, comments) + + exit( errors ) + + +def create_wdir(): + global wdir + + if wdir.exists(): + if wdir.is_file(): + os.remove(wdir) + + if wdir.exists() == False: + wdir.mkdir(parents=True) + + +def compare_results(url1, url2, comments, max_tries=10, timeout=0.1): + success = True + print("Compare comments: [{}..{}]".format(comments[0], comments[ nr_cycles - 1 if nr_cycles > 0 else -1 ])) + + if nr_cycles > 0 and nr_cycles < len( comments ): + chosen_comments = comments[0:nr_cycles] + else: + chosen_comments = comments + + for comment_line in chosen_comments: + if list_votes(url1, url2, comment_line, max_tries, timeout) == False: + success = False; break + + print("Compare comments: [{}..{}] {}".format(comments[0], comments[ nr_cycles - 1 if nr_cycles > 0 else -1 ], "finished" if success else "break with error" )) + + +def list_votes(url1, url2, comment_line, max_tries=10, timeout=0.1): + global wdir + global errors + LIMIT = 1000 + + comment_array = comment_line.split(';') + permlink = comment_array[0] + author = comment_array[1] + voter = "" + + print( "PERMLINK: {}, AUTHOR: {}".format( permlink, author )) + + while True: + request = bytes( json.dumps( { + "jsonrpc": "2.0", + "id": 0, + "method": "database_api.list_votes", + "params": { "start": [ author, permlink, voter ], "limit": LIMIT, "order":"by_comment_voter" } + } ), "utf-8" ) + b"\r\n" + + with ThreadPoolExecutor(max_workers=2) as executor: + future1 = executor.submit(steemd_call, url1, data=request, max_tries=max_tries, timeout=timeout) + future2 = executor.submit(steemd_call, url2, data=request, max_tries=max_tries, timeout=timeout) + + status1, json1 = future1.result() + status2, json2 = future2.result() + #status1, json1 = steemd_call(url1, data=request, max_tries=max_tries, timeout=timeout) + #status2, json2 = steemd_call(url2, data=request, max_tries=max_tries, timeout=timeout) + + if status1 == False or status2 == False or json1 != json2: + print("Comparison failed for permlink: {}; author: {}; limit: {}".format(permlink, author, LIMIT)) + errors += 1 + + filename = wdir / permlink + try: file = filename.open("w") + except: print("Cannot open file:", filename); return False + + file.write("Comparison failed:\n") + file.write("{} response:\n".format(url1)) + json.dump(json1, file, indent=2, sort_keys=True) + file.write("\n") + file.write("{} response:\n".format(url2)) + json.dump(json2, file, indent=2, sort_keys=True) + file.write("\n") + file.close() + return False + + votes = json1["result"]["votes"] + votes_length = len( votes ) + if votes_length > 0: + actual_permlink = votes[-1]["permlink"] + actual_author = votes[-1]["author"] + actual_voter = votes[-1]["voter"] + + if( actual_permlink == permlink and actual_author == author and actual_voter == voter ): + break + else: + permlink = actual_permlink + author = actual_author + voter = actual_voter + else: + break; + + # while True + + return True + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/api_tests/test_list_votes2.py b/tests/api_tests/test_list_votes2.py new file mode 100755 index 0000000000..7dfb5f2153 --- /dev/null +++ b/tests/api_tests/test_list_votes2.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +""" + Usage: __name__ jobs url1 url2 [nr_cycles [working_dir [comments_file]]] + Example: script_name 4 http://127.0.0.1:8090 http://127.0.0.1:8091 [20 my_comments_data_dir [comments]] + by default: nr_cycles = 3; set nr_cycles to 0 if you want to use all comments + set jobs to 0 if you want use all processors + url1 is reference url for list_comments +""" +import sys +import json +import os +import shutil +import locale +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import ProcessPoolExecutor +from concurrent.futures import Future +from concurrent.futures import wait +from jsonsocket import JSONSocket +from jsonsocket import steemd_call +from list_comment import list_comments +from pathlib import Path + + +wdir = Path() +errors = 0 +nr_cycles = 3 + +def main(): + if len( sys.argv ) < 4 or len( sys.argv ) > 7: + print( "Usage: __name__ jobs url1 url2 [nr_cycles [working_dir [comments_file]]]" ) + print( " Example: __name__ 4 http://127.0.0.1:8090 http://127.0.0.1:8091 [ 20 my_comments_data_dir [comments]]" ) + print( " by default: nr_cycles = 3; set nr_cycles to 0 if you want to use all comments )" ) + print( " set jobs to 0 if you want use all processors" ) + print( " url1 is reference url for list_comments" ) + exit () + + global wdir + global errors + global nr_cycles + + jobs = int(sys.argv[1]) + if jobs <= 0: + import multiprocessing + jobs = multiprocessing.cpu_count() + + url1 = sys.argv[2] + url2 = sys.argv[3] + + if len( sys.argv ) > 4: + nr_cycles = int( sys.argv[4] ) + + if len( sys.argv ) > 5: + wdir = Path(sys.argv[5]) + + comments_file = sys.argv[6] if len( sys.argv ) > 6 else "" + + if comments_file != "": + try: + with open(comments_file, "rt") as file: + comments = file.readlines() + except: + exit("Cannot open file: " + comments_file) + else: + comments = list_comments(url1) + + length = len(comments) + + if length == 0: + exit("There are no any comment!") + + create_wdir() + + print( str(length) + " comments" ) + + if jobs > length: + jobs = length + + print( "setup:" ) + print( " jobs: {}".format(jobs) ) + print( " url1: {}".format(url1) ) + print( " url2: {}".format(url2) ) + print( " wdir: {}".format(wdir) ) + print( " comments_file: {}".format(comments_file) ) + + if jobs > 1: + first = 0 + last = length - 1 + comments_per_job = length // jobs + + with ProcessPoolExecutor(max_workers=jobs) as executor: + for i in range(jobs-1): + executor.submit(compare_results, url1, url2, comments[first : first+comments_per_job-1]) + first = first + comments_per_job + executor.submit(compare_results, url1, url2, comments[first : last]) + else: + compare_results(url1, url2, comments) + + exit( errors ) + + +def create_wdir(): + global wdir + + if wdir.exists(): + if wdir.is_file(): + os.remove(wdir) + + if wdir.exists() == False: + wdir.mkdir(parents=True) + + +def compare_results(url1, url2, comments, max_tries=10, timeout=0.1): + success = True + print("Compare comments: [{}..{}]".format(comments[0], comments[ nr_cycles - 1 if nr_cycles > 0 else -1 ])) + + if nr_cycles > 0 and nr_cycles < len( comments ): + chosen_comments = comments[0:nr_cycles] + else: + chosen_comments = comments + + for comment_line in chosen_comments: + if list_votes(url1, url2, comment_line, max_tries, timeout) == False: + success = False; break + + print("Compare comments: [{}..{}] {}".format(comments[0], comments[ nr_cycles - 1 if nr_cycles > 0 else -1 ], "finished" if success else "break with error" )) + + +def list_votes(url1, url2, comment_line, max_tries=10, timeout=0.1): + global wdir + global errors + LIMIT = 1000 + + comment_array = comment_line.split(';') + permlink = comment_array[0] + author = comment_array[1] + last_update = comment_array[2] + voter = "" + + print( "PERMLINK: {}, AUTHOR: {} LAST_UPDATE: {}".format( permlink, author, last_update )) + + while True: + request = bytes( json.dumps( { + "jsonrpc": "2.0", + "id": 0, + "method": "database_api.list_votes", + "params": { "start": [ voter, last_update, author, permlink ], "limit": LIMIT, "order":"by_voter_last_update" } + } ), "utf-8" ) + b"\r\n" + + with ThreadPoolExecutor(max_workers=2) as executor: + future1 = executor.submit(steemd_call, url1, data=request, max_tries=max_tries, timeout=timeout) + future2 = executor.submit(steemd_call, url2, data=request, max_tries=max_tries, timeout=timeout) + + status1, json1 = future1.result() + status2, json2 = future2.result() + #status1, json1 = steemd_call(url1, data=request, max_tries=max_tries, timeout=timeout) + #status2, json2 = steemd_call(url2, data=request, max_tries=max_tries, timeout=timeout) + + if status1 == False or status2 == False or json1 != json2: + print("Comparison failed for permlink: {}; author: {}; limit: {}".format(permlink, author, LIMIT)) + errors += 1 + + filename = wdir / permlink + try: file = filename.open("w") + except: print("Cannot open file:", filename); return False + + file.write("Comparison failed:\n") + file.write("{} response:\n".format(url1)) + json.dump(json1, file, indent=2, sort_keys=True) + file.write("\n") + file.write("{} response:\n".format(url2)) + json.dump(json2, file, indent=2, sort_keys=True) + file.write("\n") + file.close() + return False + + votes = json1["result"]["votes"] + votes_length = len( votes ) + if votes_length > 0: + actual_permlink = votes[-1]["permlink"] + actual_author = votes[-1]["author"] + actual_voter = votes[-1]["voter"] + actual_last_update = votes[-1]["last_update"] + + if( actual_permlink == permlink and actual_author == author and actual_voter == voter ): + break + else: + permlink = actual_permlink + author = actual_author + voter = actual_voter + last_update = actual_last_update + else: + break; + + # while True + + return True + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tests/smoketest/.dockerignore b/tests/smoketest/.dockerignore new file mode 100644 index 0000000000..d818a87076 --- /dev/null +++ b/tests/smoketest/.dockerignore @@ -0,0 +1,3 @@ +Dockerfile +README.md +docker_build_and_run.sh diff --git a/tests/smoketest/Dockerfile b/tests/smoketest/Dockerfile new file mode 100644 index 0000000000..eaaa8e3f2d --- /dev/null +++ b/tests/smoketest/Dockerfile @@ -0,0 +1,28 @@ +FROM phusion/baseimage:0.9.19 + +ENV LANG=en_US.UTF-8 +ENV WDIR=/usr/local/steem +ENV SMOKETEST=$WDIR/smoketest + +RUN apt-get update +RUN apt-get install -y apt-utils +RUN apt-get install -y libreadline-dev +RUN apt-get install -y python3 +RUN apt-get install -y python3-pip +RUN pip3 install --upgrade pip +RUN pip3 install pyresttest + +COPY . $SMOKETEST + +RUN apt-get clean +RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +#reference: volume points to reference steemd +#tested: volume points to tested steemd +#ref_blockchain: volume points to reference folder, where blockchain folder exists +#tested_blockchain: volume points to tested folder, where blockchain folder exists +VOLUME ["reference", "tested", "ref_blockchain", "tested_blockchain"] + +#CMD pyresttest +CMD cd $SMOKETEST && \ + ./smoketest.sh /reference/steemd /tested/steemd /ref_blockchain /tested_blockchain $STOP_REPLAY_AT_BLOCK diff --git a/tests/smoketest/account_history/config.ini b/tests/smoketest/account_history/config.ini index 335cfc9e2d..3e539df34d 100644 --- a/tests/smoketest/account_history/config.ini +++ b/tests/smoketest/account_history/config.ini @@ -23,7 +23,7 @@ plugin = account_history account_history_api database_api shared-file-dir = "blockchain" # Size of the shared memory file. Default: 54G -shared-file-size = 54G +shared-file-size = 8G # Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints. # checkpoint = diff --git a/tests/smoketest/account_history/get_ops_in_block/test.sh b/tests/smoketest/account_history/get_ops_in_block/test.sh deleted file mode 100755 index 8b84a591a6..0000000000 --- a/tests/smoketest/account_history/get_ops_in_block/test.sh +++ /dev/null @@ -1,157 +0,0 @@ -#!/bin/bash - -EXIT_CODE=0 - -if [[ $# -lt 5 || $# -gt 7 ]] -then - echo Usage: jobs 1st_address 1st_port 2nd_address 2nd_port [last_block [first_block]] - echo if jobs == 0 script detect processor count and use it - echo if last_block not passed or 0 will be read from steemd - echo if first_block not passed will be 0 - echo Example: 127.0.0.1 8090 ec2-34-235-166-184.compute-1.amazonaws.com 8090 - exit -1 -fi - -JOBS=$1 -NODE1=http://$2:$3 -NODE2=http://$4:$5 -FIRST_BLOCK=0 -LAST_BLOCK=0 - -if [ $# -eq 6 ] -then - LAST_BLOCK=$6 -fi - -if [ $# -eq 7 ] -then - FIRST_BLOCK=$7 -fi - -if [ $JOBS -eq 0 ] -then - $JOBS=$(nproc -all) -fi - -if [ $LAST_BLOCK -eq 0 ] -then - for tries in {1..10} - do - LAST_BLOCK=$(curl -s --data '{ "jsonrpc": "2.0", "id": 0, "method": "database_api.get_dynamic_global_properties", "params": {} }' $NODE1\ - | python -c \ - 'import sys, json;\ - print(json.load(sys.stdin)["result"]["head_block_number"])') - if [[ $? -eq 0 && $LAST_BLOCK != "" ]]; then - break - fi - done - [[ $? -ne 0 || $LAST_BLOCK == "" ]] && echo FATAL: database_api.get_dynamic_global_properties on $NODE1 failed && exit -1 - - for tries in {1..10} - do - LAST_BLOCK2=$(curl -s --data '{ "jsonrpc": "2.0", "id": 0, "method": "database_api.get_dynamic_global_properties", "params": {} }' $NODE2\ - | python -c \ - 'import sys, json;\ - print(json.load(sys.stdin)["result"]["head_block_number"])') - if [[ $? -eq 0 && $LAST_BLOCK != "" ]]; then - break - fi - done - [[ $? -ne 0 || $LAST_BLOCK2 == "" ]] && echo FATAL: database_api.get_dynamic_global_properties on $NODE2 failed && exit -1 - - if [ $LAST_BLOCK -ne $LAST_BLOCK2 ] - then - echo FATAL: $NODE1 head_block_number $LAST_BLOCK is different than $NODE2 head_block_number $LAST_BLOCK2 - exit -1 - fi -fi - -echo $0 parameters JOBS=$JOBS NODE1=$NODE1 NODE2=$NODE2 FIRST_BLOCK=$FIRST_BLOCK LAST_BLOCK=$LAST_BLOCK - -# node block -get_ops_in_block () -{ -local NODE=$1 -local BLOCK=$2 -local JSON="" - -for tries in {1..10}; do - JSON=$(curl -s --data "{ \"jsonrpc\": \"2.0\", \"id\": \"$BLOCK\", \"method\": \"account_history_api.get_ops_in_block\", \"params\": { \"block_num\": \"$BLOCK\", \"only_virtual\": false } }" $NODE) - - if [[ $? -eq 0 && $JSON != "" ]]; then - JSON=$(echo $JSON \ - | python -c \ - 'import sys, json; \ - response=json.load(sys.stdin); \ - result=response["result"]; \ - print(json.dumps(result, sort_keys=True, indent=2))') - if [[ $? -eq 0 && $JSON != "" ]]; then - break - fi - fi -done - -echo $JSON -} # get_ops_in_block () - -# args: first_block last_block output -test_blocks () -{ -local BLOCK=$1 -local OUTPUT=$3 -local JSON1="" -local JSON2="" - -echo Blocks range: [ $1 : $2 ] >$OUTPUT -echo >>$OUTPUT - -while [ $BLOCK -le $2 ] -do - echo Comparing block number $BLOCK - - JSON1=$(get_ops_in_block $NODE1 $BLOCK &) - - JSON2=$(get_ops_in_block $NODE2 $BLOCK &) - - wait - - [[ $JSON1 == "" ]] && echo ERROR: Failed to get block $BLOCK from node $NODE1 >>$OUTPUT && exit -1 - [[ $JSON2 == "" ]] && echo ERROR: Failed to get block $BLOCK from node $NODE2 >>$OUTPUT && exit -1 - - if [[ "$JSON1" != "$JSON2" ]] - then - echo ERROR: Comparison failed at block $BLOCK >>$OUTPUT - echo $NODE1 response: >>$OUTPUT - echo "$JSON1" >>$OUTPUT - echo $NODE2 response: >>$OUTPUT - echo "$JSON2" >>$OUTPUT - EXIT_CODE=-1 - return - fi - - ((BLOCK++)) -done - -echo SUCCESS! >>$OUTPUT -} # test_blocks () - -if [ $JOBS -gt 1 ] -then - BLOCKS=$(($LAST_BLOCK-$FIRST_BLOCK+1)) - BLOCKS_PER_JOB=$(($BLOCKS/$JOBS)) - - for ((JOB=1;JOB$WDIR/$account && return - - for tries in {1..10} - do - local JSON2=$(eval $GET_AH $NODE2) - if [[ $? -eq 0 && $JSON2 != "" ]]; then - JSON2=$(echo $JSON2 | python -c \ - 'import sys, json; \ - response=json.load(sys.stdin); \ - result=response["result"]; \ - print(json.dumps(result, sort_keys=True, indent=2))') - if [[ $? -eq 0 && $JSON2 != "" ]]; then - break - fi - fi - done - [[ $? -ne 0 || $JSON2 == "" ]] && ((ERRORS++)) && echo ERROR: Failed to get history account for $account from node $NODE2 >$WDIR/$account && return - - if [[ "$JSON1" != "$JSON2" ]] - then - echo ERROR: Comparison failed >$WDIR/$account - echo $NODE1 response: >>$WDIR/$account - echo "$JSON1" >>$WDIR/$account - echo $NODE2 response: >>$WDIR/$account - echo "$JSON2" >>$WDIR/$account - ((ERRORS++)) - return - fi - - LAST=$(echo $JSON1 \ - | python -c \ - 'import sys, json; \ - result=json.load(sys.stdin); \ - history=result["history"]; \ - print(history[0][0] if len(history) else 0)') - - if [ $LAST -eq 0 ]; then - break - fi - - ((LAST--)) - START=$LAST - if [ $LAST -gt $HARD_LIMIT ]; then - LIMIT=$HARD_LIMIT - else - LIMIT=$LAST - fi - done # while true; do -} # get_account_history () - -CURRENT_JOBS=0 - -for account in $( <$WDIR/accounts ); do - -if [ $ERRORS -ne 0 ]; then - wait - exit -1 -fi - -if [ $CURRENT_JOBS -eq $JOBS ]; then - wait - CURRENT_JOBS=0 -fi - -((CURRENT_JOBS++)) -get_account_history $account & - -done # for account in $( -""" -import sys -import json -import requests - -def main(): - if len( sys.argv ) != 3: - exit( "Usage: create_account_list.py " ) - - url = sys.argv[1] + "/rpc" - print( url ) - filename = sys.argv[2] - - try: file = open( filename, "w" ) - except: exit( "Cannot open file " + filename ) - - headers = { 'content-type': 'application/json' } - last_account = "" - end = False - accounts_count = 0 - - while end == False: - request = { - "jsonrpc": "2.0", - "id": 0, - "method": "database_api.list_accounts", - "params": { "start": last_account, "limit": 1000, "order": "by_name" } - } - - try: - response = requests.post( url, data=json.dumps(request), headers=headers).json() - - accounts = response["result"]["accounts"] - except: - print( "rpc failed for last_account: " + last_account ) - print( response ) - end = True - continue - - if last_account != "": - assert accounts[0]["name"] == last_account - del accounts[0] - - if len( accounts ) == 0: - end = True - continue - - last_account = accounts[-1]["name"] - accounts_count += len( accounts ) - for account in accounts: - file.write( account["name"] + "\n" ) - - # while end == False - file.close() - print( str(accounts_count) + " accounts") - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/tests/smoketest/scripts/open_node.sh b/tests/smoketest/scripts/open_node.sh new file mode 100755 index 0000000000..01ef95cee9 --- /dev/null +++ b/tests/smoketest/scripts/open_node.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +function echo(){ builtin echo $(basename $0 .sh): "$@"; } +pushd () { command pushd "$@" > /dev/null; } +popd () { command popd "$@" > /dev/null; } + +if [ $# -ne 5 ] +then + echo Usage: node_kind steemd_path node_options work_path port + echo Example: reference ~/steemit/steem/build/programs/steemd/steemd --webserver-http-endpoint=127.0.0.1:8090 ~/working 8090 + exit -1 +fi + +function check_pid_port { + echo Checking that steemd with pid $1 listens at $2 port. + + NETSTAT_CMD="netstat -tlpn 2> /dev/null" + STAGE1=$(eval $NETSTAT_CMD) + STAGE2=$(echo $STAGE1 | grep -o ":$2 [^ ]* LISTEN $1/steemd") + ATTEMPT=0 + + while [[ -z $STAGE2 ]] && [ $ATTEMPT -lt 3 ]; do + sleep 1 + STAGE1=$(eval $NETSTAT_CMD) + STAGE2=$(echo $STAGE1 | grep -o ":$2 [^ ]* LISTEN $1/steemd") + ((ATTEMPT++)) + done + + if [[ -z $STAGE2 ]]; then + echo FATAL: Could not find steemd with pid $1 listening at port $2 using $NETSTAT_CMD command. + echo FATAL: Most probably another steemd instance is running and listens at the port. + return 1 + else + return 0 + fi +} + +PID=0 +NAME=$1 +STEEMD_PATH=$2 +NODE_OPTIONS=$3 +WORK_PATH=$4 +TEST_PORT=$5 + +echo Running $NAME steemd to listen +$STEEMD_PATH $NODE_OPTIONS -d $WORK_PATH & PID=$! + +if check_pid_port $PID $TEST_PORT; then + STEEMD_NODE_PID=$PID + #echo $STEEMD_NODE_PID +fi diff --git a/tests/smoketest/scripts/run_replay.sh b/tests/smoketest/scripts/run_replay.sh new file mode 100755 index 0000000000..c3a3a5d07c --- /dev/null +++ b/tests/smoketest/scripts/run_replay.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +function echo(){ builtin echo $(basename $0 .sh): "$@"; } +pushd () { command pushd "$@" > /dev/null; } +popd () { command popd "$@" > /dev/null; } + +if [ $# -ne 5 ] +then + echo "Usage: path_to_tested_steemd path_to_reference_steemd path_to_test_blockchain_directory path_to_reference_blockchain_directory number_of_blocks_to_replay" + echo "Example: ~/work/steemit/steem/build/programs/steemd/steemd ~/master/steemit/steem/build/programs/steemd/steemd ~/steemit/steem/work1 ~/steemit/steem/work2 2000000" + echo "Note: Run this script from test group directory." + exit -1 +fi + +STEEMD_CONFIG=config.ini +TEST_STEEMD_PATH=$1 +REF_STEEMD_PATH=$2 +TEST_WORK_PATH=$3 +REF_WORK_PATH=$4 +BLOCK_LIMIT=$5 +RET_VAL1=-1 +RET_VAL2=-1 + +function copy_config { + echo Copying ./$STEEMD_CONFIG over $1/$STEEMD_CONFIG + cp ./$STEEMD_CONFIG $1/$STEEMD_CONFIG + [ $? -ne 0 ] && echo FATAL: Failed to copy ./$STEEMD_CONFIG over $1/$STEEMD_CONFIG file. && exit -1 +} + +copy_config $TEST_WORK_PATH +copy_config $REF_WORK_PATH + +echo Running "test instance" replay of $BLOCK_LIMIT blocks +( $TEST_STEEMD_PATH --replay --stop-replay-at-block $BLOCK_LIMIT -d $TEST_WORK_PATH ) & REPLAY_PID1=$! + +echo Running "reference instance" replay of $BLOCK_LIMIT blocks +( $REF_STEEMD_PATH --replay --stop-replay-at-block $BLOCK_LIMIT -d $REF_WORK_PATH ) & REPLAY_PID2=$! + +wait $REPLAY_PID1 +RET_VAL1=$? +wait $REPLAY_PID2 +RET_VAL2=$? + +[ $RET_VAL1 -ne 0 ] && echo "FATAL: tested steemd failed to replay $BLOCK_LIMIT blocks." && exit -1 +[ $RET_VAL2 -ne 0 ] && echo "FATAL: reference steemd failed to replay $BLOCK_LIMIT blocks." && exit -1 + +exit 0 \ No newline at end of file diff --git a/tests/smoketest/smoketest.sh b/tests/smoketest/smoketest.sh index d75b248de0..30511341d9 100755 --- a/tests/smoketest/smoketest.sh +++ b/tests/smoketest/smoketest.sh @@ -8,14 +8,6 @@ JOBS=1 API_TEST_PATH=../../python_scripts/tests/api_tests BLOCK_SUBPATH=blockchain/block_log GROUP_TEST_SCRIPT=test_group.sh -STEEMD_CONFIG=config.ini -NODE_ADDRESS=127.0.0.1 -TEST_PORT=8090 -REF_PORT=8091 -TEST_NODE=$NODE_ADDRESS:$TEST_PORT -REF_NODE=$NODE_ADDRESS:$REF_PORT -TEST_NODE_OPT=--webserver-http-endpoint=$TEST_NODE -REF_NODE_OPT=--webserver-http-endpoint=$REF_NODE function echo(){ builtin echo $(basename $0 .sh): "$@"; } pushd () { command pushd "$@" > /dev/null; } @@ -27,6 +19,17 @@ function print_help_and_quit { exit $EXIT_CODE } +if [ $# -ne 5 ] +then + print_help_and_quit +fi + +TEST_STEEMD_PATH=$1 +REF_STEEMD_PATH=$2 +TEST_WORK_PATH=$3 +REF_WORK_PATH=$4 +BLOCK_LIMIT=$5 + function check_steemd_path { echo Checking $1... if [ -x "$1" ] && file "$1" | grep -q "executable" @@ -47,42 +50,6 @@ function check_work_path { fi } -function run_replay { - echo Running $1 replay of $BLOCK_LIMIT blocks - $2 --replay --stop-replay-at-block $BLOCK_LIMIT -d $3 - [ $? -ne 0 ] && echo FATAL: steemd failed to replay $BLOCK_LIMIT blocks. && exit -1 -} - -function copy_config { - echo Copying ./$STEEMD_CONFIG over $1/$STEEMD_CONFIG - cp ./$STEEMD_CONFIG $1/$STEEMD_CONFIG - [ $? -ne 0 ] && echo FATAL: Failed to copy ./$STEEMD_CONFIG over $1/$STEEMD_CONFIG file. && exit -1 -} - -function check_pid_port { - echo Checking that steemd with pid $1 listens at $2 port. - - NETSTAT_CMD="netstat -tlpn 2> /dev/null" - STAGE1=$(eval $NETSTAT_CMD) - STAGE2=$(echo $STAGE1 | grep -o ":$2 [^ ]* LISTEN $1/steemd") - ATTEMPT=0 - - while [[ -z $STAGE2 ]] && [ $ATTEMPT -lt 3 ]; do - sleep 1 - STAGE1=$(eval $NETSTAT_CMD) - STAGE2=$(echo $STAGE1 | grep -o ":$2 [^ ]* LISTEN $1/steemd") - ((ATTEMPT++)) - done - - if [[ -z $STAGE2 ]]; then - echo FATAL: Could not find steemd with pid $1 listening at port $2 using $NETSTAT_CMD command. - echo FATAL: Most probably another steemd instance is running and listens at the port. - return 1 - else - return 0 - fi -} - function run_test_group { echo Running test group $1 pushd $1 @@ -94,43 +61,14 @@ function run_test_group { return fi - copy_config $TEST_WORK_PATH - copy_config $REF_WORK_PATH - - run_replay "test instance" $STEEMD_PATH $TEST_WORK_PATH - run_replay "reference instance" $REF_STEEMD_PATH $REF_WORK_PATH - - echo Running tested steemd to listen - $STEEMD_PATH $TEST_NODE_OPT -d $TEST_WORK_PATH & TEST_STEEMD_PID=$! - echo Running reference steemd to listen - $REF_STEEMD_PATH $REF_NODE_OPT -d $REF_WORK_PATH & REF_STEEMD_PID=$! - - if check_pid_port $TEST_STEEMD_PID $TEST_PORT && check_pid_port $REF_STEEMD_PID $REF_PORT; then - echo Running ./$GROUP_TEST_SCRIPT $JOBS $NODE_ADDRESS $TEST_PORT $NODE_ADDRESS $REF_PORT $BLOCK_LIMIT - ./$GROUP_TEST_SCRIPT $JOBS $NODE_ADDRESS $TEST_PORT $NODE_ADDRESS $REF_PORT $BLOCK_LIMIT - [ $? -ne 0 ] && echo test group $1 FAILED && ((GROUP_FAILURE++)) && EXIT_CODE=-1 - else - ((GROUP_FAILURE++)) - fi + echo Running ./$GROUP_TEST_SCRIPT $JOBS $TEST_STEEMD_PATH $REF_STEEMD_PATH $TEST_WORK_PATH $REF_WORK_PATH $BLOCK_LIMIT + ./$GROUP_TEST_SCRIPT $JOBS $TEST_STEEMD_PATH $REF_STEEMD_PATH $TEST_WORK_PATH $REF_WORK_PATH $BLOCK_LIMIT + [ $? -ne 0 ] && echo test group $1 FAILED && ((GROUP_FAILURE++)) && EXIT_CODE=-1 - kill -s SIGINT $TEST_STEEMD_PID - kill -s SIGINT $REF_STEEMD_PID - wait popd } -if [ $# -ne 5 ] -then - print_help_and_quit -fi - -STEEMD_PATH=$1 -REF_STEEMD_PATH=$2 -TEST_WORK_PATH=$3 -REF_WORK_PATH=$4 -BLOCK_LIMIT=$5 - -check_steemd_path $STEEMD_PATH +check_steemd_path $TEST_STEEMD_PATH check_steemd_path $REF_STEEMD_PATH check_work_path $TEST_WORK_PATH diff --git a/tests/smoketest/votes/config.ini b/tests/smoketest/votes/config.ini new file mode 100644 index 0000000000..6741ac50c5 --- /dev/null +++ b/tests/smoketest/votes/config.ini @@ -0,0 +1,60 @@ +# Plugin(s) to enable, may be specified multiple times +plugin = database_api + +# Defines a range of accounts to track as a json pair ["from","to"] [from,to] Can be specified multiple times. +# account-history-track-account-range = + +# Defines a range of accounts to track as a json pair ["from","to"] [from,to] Can be specified multiple times. Deprecated in favor of account-history-track-account-range. +# track-account-range = + +# Defines a list of operations which will be explicitly logged. +# account-history-whitelist-ops = + +# Defines a list of operations which will be explicitly logged. Deprecated in favor of history-whitelist-ops. +# whitelist-ops = + +# Defines a list of operations which will be explicitly ignored. +# account-history-blacklist-ops = + +# Defines a list of operations which will be explicitly ignored. Deprecated in favor of history-blacklist-ops. +# blacklist-ops = + +# the location of the chain shared memory files (absolute path or relative to application data dir) +shared-file-dir = "blockchain" + +# Size of the shared memory file. Default: 54G +shared-file-size = 8G + +# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints. +# checkpoint = + +# flush shared memory changes to disk every N blocks +# flush-state-interval = + +# Database edits to apply on startup (may specify multiple times) +# debug-node-edit-script = + +# The local IP address and port to listen for incoming connections. +# p2p-endpoint = + +# Maxmimum number of incoming connections on P2P endpoint. +# p2p-max-connections = + +# The IP address and port of a remote peer to sync with. Deprecated in favor of p2p-seed-node. +# seed-node = + +# The IP address and port of a remote peer to sync with. +# p2p-seed-node = + +# Enable block production, even if the chain is stale. +enable-stale-production = false + +# Percent of witnesses (0-99) that must be participating in order to produce blocks +required-participation = 0 + +# name of witness controlled by this node (e.g. initwitness ) +# witness = + +# WIF PRIVATE KEY to be used by one or more witnesses or miners +# private-key = + diff --git a/tests/smoketest/votes/test_group.sh b/tests/smoketest/votes/test_group.sh new file mode 100755 index 0000000000..faff3a5c27 --- /dev/null +++ b/tests/smoketest/votes/test_group.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +function echo(){ builtin echo $(basename $0 .sh): "$@"; } +pushd () { command pushd "$@" > /dev/null; } +popd () { command popd "$@" > /dev/null; } + +if [ $# -ne 6 ] +then + echo Usage: jobs 1st_address 1st_port 2nd_address 2nd_port working_dir + echo Example: 100 127.0.0.1 8090 ec2-34-235-166-184.compute-1.amazonaws.com 8090 logs + exit -1 +fi + +SCRIPT_DIR=../scripts +PY_SCRIPT_DIR=../../api_tests +REPLAY_SCRIPT=run_replay.sh +NODE_SCRIPT=open_node.sh +NODE_ADDRESS=127.0.0.1 +TEST_PORT=8090 +REF_PORT=8091 +TEST_NODE=$NODE_ADDRESS:$TEST_PORT +REF_NODE=$NODE_ADDRESS:$REF_PORT +TEST_NODE_OPT=--webserver-http-endpoint=$TEST_NODE +REF_NODE_OPT=--webserver-http-endpoint=$REF_NODE +EXIT_CODE=0 +JOBS=$1 +TEST_STEEMD_PATH=$2 +REF_STEEMD_PATH=$3 +TEST_WORK_PATH=$4 +REF_WORK_PATH=$5 +BLOCK_LIMIT=$6 +WDIR=$PWD/logs +TEST_STEEMD_PID=-1 +REF_STEEMD_PID=-1 +export STEEMD_NODE_PID="default" + +function run_replay { + echo Running $SCRIPT_DIR/$REPLAY_SCRIPT $TEST_STEEMD_PATH $REF_STEEMD_PATH $TEST_WORK_PATH $REF_WORK_PATH $BLOCK_LIMIT + $SCRIPT_DIR/$REPLAY_SCRIPT $TEST_STEEMD_PATH $REF_STEEMD_PATH $TEST_WORK_PATH $REF_WORK_PATH $BLOCK_LIMIT + [ $? -ne 0 ] && echo test group FAILED && exit -1 +} + +function open_node { + echo Running $SCRIPT_DIR/$NODE_SCRIPT $1 $2 $3 $4 $5 + . $SCRIPT_DIR/$NODE_SCRIPT $1 $2 $3 $4 $5 +} + +function run_test { + pushd $PY_SCRIPT_DIR + echo Running python3 $1 $JOBS http://$TEST_NODE http://$REF_NODE $WDIR + python3 $1 $JOBS http://$TEST_NODE http://$REF_NODE 50 $WDIR + [ $? -ne 0 ] && echo test FAILED && EXIT_CODE=-1 + popd +} + +run_replay + +open_node "tested" $TEST_STEEMD_PATH $TEST_NODE_OPT $TEST_WORK_PATH $TEST_PORT +TEST_STEEMD_PID=$STEEMD_NODE_PID + +open_node "reference" $REF_STEEMD_PATH $REF_NODE_OPT $REF_WORK_PATH $REF_PORT +REF_STEEMD_PID=$STEEMD_NODE_PID + +#echo TEST_STEEMD_PID: $TEST_STEEMD_PID REF_STEEMD_PID: $REF_STEEMD_PID +if [ $TEST_STEEMD_PID -ne -1 ] && [ $REF_STEEMD_PID -ne -1 ]; then + run_test "test_list_votes.py" + run_test "test_list_votes2.py" +else + EXIT_CODE=-1 +fi + +kill -s SIGINT $TEST_STEEMD_PID +kill -s SIGINT $REF_STEEMD_PID +wait + +exit $EXIT_CODE