diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..b1bc858 Binary files /dev/null and b/.DS_Store differ diff --git a/.github/workflows/publish-to-readthedocs.yml b/.github/workflows/publish-to-readthedocs.yml index 348ef37..3edaa35 100644 --- a/.github/workflows/publish-to-readthedocs.yml +++ b/.github/workflows/publish-to-readthedocs.yml @@ -22,10 +22,6 @@ jobs: - name: Trigger readthedocs build # if: github.event_name == 'push' && startsWith(github.ref_name, 'v') # trigger build/publish of latest version - - run: curl \ - -X POST \ - -H "Authorization: Token $RTDS_TOKEN" $RTDS_PROJECT/versions/latest/builds/ + - run: curl -X POST -H "Authorization: Token $RTDS_TOKEN" $RTDS_PROJECT/versions/latest/builds/ # trigger build/publish of v*.*.* version - - run: curl \ - -X POST \ - -H "Authorization: Token $RTDS_TOKEN" $RTDS_PROJECT/versions/${{ github.ref_name }}/builds/ + - run: curl -X POST -H "Authorization: Token $RTDS_TOKEN" $RTDS_PROJECT/versions/${{ github.ref_name }}/builds/ diff --git a/.pypirc b/.pypirc new file mode 100644 index 0000000..0c2536c --- /dev/null +++ b/.pypirc @@ -0,0 +1,12 @@ +[distutils] +index-servers = + pypi + testpypi + +[pypi] +repository: https://upload.pypi.org/legacy/ +username: gmfeinberg + +[testpypi] +repository: https://test.pypi.org/legacy/ +username: gmfeinberg diff --git a/docs/.DS_Store b/docs/.DS_Store new file mode 100644 index 0000000..f940bed Binary files /dev/null and b/docs/.DS_Store differ diff --git a/elasticity.txt b/elasticity.txt new file mode 100644 index 0000000..b95aa92 --- /dev/null +++ b/elasticity.txt @@ -0,0 +1,43 @@ +testing: see Yev's comments: nosql-sdk-int\node\test\elasticity_test\README.md + +new proto: + QUERY_ID 'qn' + TRACE_TO_LOG_FILES 'tf' + TOPOLOGY_INFO 'tp' + QUERY_BATCH_TRACES: 'qts', + + //query virtual scan-related fields + VIRTUAL_SCAN: 'vs', + VIRTUAL_SCANS: 'vssa', + VIRTUAL_SCAN_SID: 'vssid', + VIRTUAL_SCAN_PID: 'vspid', + VIRTUAL_SCAN_PRIM_KEY: 'vspk', + VIRTUAL_SCAN_SEC_KEY: 'vssk', + VIRTUAL_SCAN_MOVE_AFTER: 'vsma', + VIRTUAL_SCAN_JOIN_DESC_RESUME_KEY: 'vsjdrk', + VIRTUAL_SCAN_JOIN_PATH_TABLES: 'vsjpt', + VIRTUAL_SCAN_JOIN_PATH_KEY: 'vsjpk', + VIRTUAL_SCAN_JOIN_PATH_SEC_KEY: 'vsjpsk', + VIRTUAL_SCAN_JOIN_PATH_MATCHED: 'vsjpm' + +Java + + Client.java + o topo added + o query version add + o extra query processing and handling of version mismatch + +moved TopologyInfo out of PreparedStatement to ... ??? + + + + + +Node + serialization: + o readTopoInfo, validate it after + o read/write virtual scan info in query path + o read query traces? + o extra tracing code + + refactor: create QueryOpBase to be shared by Prepare and Query. It handles query protocol version negotiation diff --git a/page_query.py b/page_query.py new file mode 100644 index 0000000..d8f898a --- /dev/null +++ b/page_query.py @@ -0,0 +1,211 @@ +# +# Copyright (c) 2018, 2024 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Universal Permissive License v 1.0 as shown at +# https://oss.oracle.com/licenses/upl/ +# + +# +# This is a simple example to demonstrate use of the Python driver for the +# Oracle NoSQL Database. It can be used to run against the Oracle NoSQL Database +# cloud service, against the Cloud Simulator, or against an on-premise Oracle +# NoSQL database. +# +# See the comments in config*.py about running in different environments. By +# default, the example is ready to run against the Cloud Simulator. +# +# The example demonstrates: +# o configuring and creating a handle to access the service +# o create a table +# o put, multiple write and multiple delete of simple data +# o prepare statement and query data +# o drop the table +# +# This example is not intended to be an exhaustive overview of the API, which +# has a number of additional operations. +# +# Requirements: +# 1. Python 3.5+ +# 2. Python dependencies (install using pip or other mechanism): +# o requests +# 3. If running against the Cloud Simulator, it can be downloaded from here: +# http://www.oracle.com/technetwork/topics/cloud/downloads/index.html#nosqlsdk +# It requires Java +# 4. If running against the Oracle NoSQL Database Cloud Service an account must +# be used along with additional authentication information. See instructions in +# the comments in config_cloud.py +# +# To run: +# 1. set PYTHONPATH to include the parent directory of ../src/borneo +# 2. modify variables in config*.py for the runtime environment after reading +# instructions in the comments. +# 3. run +# $ python single_data.py +# + +import traceback + +from borneo import ( + DeleteRequest, GetRequest, PutRequest, QueryRequest, TableLimits, + TableRequest) + +from parameters import drop_table, table_name, tenant_id +from utils import get_handle + + +def main(): + + handle = None + try: + # + # Create a handle + # + handle = get_handle(tenant_id) + + # + # Create a table + # + statement = 'Create table if not exists ' + table_name + '(id integer, \ +sid integer, name string, primary key(shard(sid), id))' + print('Creating table: ' + statement) + request = TableRequest().set_statement(statement).set_table_limits( + TableLimits(300, 10, 1)) + handle.do_table_request(request, 50000, 3000) + print('After create table') + + # + # Put a few rows + # + request = PutRequest().set_table_name(table_name) + for i in range(10): + value = {'id': i, 'sid': 0, 'name': 'myname' + str(i)} + request.set_value(value) + handle.put(request) + print('After put of 10 rows') + + # + # Get the row + # + request = GetRequest().set_key({'id': 1, 'sid': 0}).set_table_name( + table_name) + result = handle.get(request) + print('After get: ' + str(result)) + + # + # Query, using a range + # + statement = 'select * from ' + table_name + ' where id > 2 and id < 8' + request = QueryRequest().set_statement(statement) + print('Query results for: ' + statement) + + page = PageQuery(handle, statement, 2) + while not page.is_done(): + res = page.get_next_batch() + print('Next batch') + for r in res: + print('\t' + str(r)) + + # + # Delete the row + # + request = DeleteRequest().set_key({'id': 1, 'sid': 0}).set_table_name( + table_name) + result = handle.delete(request) + print('After delete: ' + str(result)) + + # + # Get again to show deletion + # + request = GetRequest().set_key({'id': 1, 'sid': 0}).set_table_name( + table_name) + result = handle.get(request) + print('After get (should be None): ' + str(result)) + + # + # Drop the table + # + if drop_table: + request = TableRequest().set_statement( + 'drop table if exists ' + table_name) + handle.do_table_request(request, 40000, 2000) + print('After drop table') + else: + print('Not dropping table') + + print('Example is complete') + except Exception as e: + print(e) + traceback.print_exc() + finally: + # If the handle isn't closed Python will not exit properly + if handle is not None: + handle.close() + +class PageQuery(object): + + def __init__(self, handle, query, batch_size=15): + self._query_string = query + self._handle = handle + self._batch_size = batch_size + self._request = QueryRequest() + self._request.set_statement(self._query_string) + self._next(True) + + def _next(self, first_call): + print('next') + self._results_list = [] + self._index = 0 + # the first time this is called is_done() won't be defined + if not first_call and self._request.is_done(): + return + while True: + # loop until non-zero results or query is done + result = self._handle.query(self._request) + res = result.get_results() + if res is None or not res: + if self._request.is_done(): + return + else: + self._results_list.extend(result.get_results()) + return + + def is_done(self): + # the list is cleared when done + return self._results_list is None or not self._results_list + + def get_next_batch(self): + # consumes batch_size results using results_list and index + if self.is_done(): + return None + result = [] + remaining_results = len(self._results_list) - self._index + print('remaining res: ' + str(remaining_results)) + if remaining_results > 0: + # copy what's available + print('adding res: ' + str(self._index) + ',' + str(self._batch_size)) + # add no more than batch_size results. This will add fewer than + # batch size if results_list doesn't have that many results + result.extend(self._results_list[ + self._index:(self._index + self._batch_size)]) + + # result started out empty, so increase index by what was added + self._index += len(result) + + if len(result) >= self._batch_size: + print('return res: ' + str(len(result))) + return result + + print('calling next, num res, batch: ' + str(len(result)) + ',' + str(self._batch_size)) + # get new batch + self._next(False) + + # add more results if available and not up to batch size + if not self.is_done(): + # next guarantees either done or non-empty results + left_in_batch = self._batch_size - len(result) + result.extend(self._results_list[0:left_in_batch]) + + return result + +if __name__ == '__main__': + main() diff --git a/save/testkey.pem b/save/testkey.pem new file mode 100644 index 0000000..4e84f3b --- /dev/null +++ b/save/testkey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEqwIBAAKCAQEAyowOG0dCWfU36t/MlHGW/nrgeBEx1PQsbX5hQ4VSicJ8gaSz +rhbDviIDGiLmpte/V0se4VOj3vVpUAuK9p/0dMutzado0Yk+oE397y0XplHE4Kq7 +uGhfeAyJ1BzFmS+3Ogl1mphrHqZJ3U26bPyZCNmPfi+RuwcZEWsQ11wrPsLB2Zsu +pmRQbpVq46mBW4MwXMoEOJ3/9UksGBoONOtqwB4wBwyy9wKOFe4O6sZwpzL/pLvu +JzfDPHTQxaqXOXcbyT0u0Wt+CcUwgwXj2/kfGBt4pDVSXo9uNTIK7fFkTvBctelK +kIIgiM1GQSKqYsoVz8FxjKptRlSvYXmmN7axswIDAQABAoIBAQC+Q+BAeaj3jiWv +6flofGelt9HCmAxiidhuJqjBX60QIXUJ56it4U6Q/axCUgSvN73TB4sDialHGOuL +uLwaPlnJmUc0B42sP6tntLvUgFxV7Tvu/BMJAGql/8k9SqXH543L4/Toe0Ms8HI0 +g/6awJIVQSHNHGxd7yU35FqSsC9nbbjEMRDHcy5HoIzjJUMOIUodopVmiq88Rk58 +nMHev5eAegL/M9WFBxOQKaNJns2EnwDVh8x0h3LwRia8WgHBtWuVELijEO+3+xsx +eF6ewLe6RzOOc59eJCYb9fbt1XY0jvFSxAnoSrS75GIhfN8AIRQEehMuDZ8/cnzH +/63M3IOxAoGJAM7g0u1yLKs+OdS92sc57efE4d3UQ3av7ws4URniThDMi13Nl9v1 +sZt911tT+HjlpDEhniqpaXayUE/Dbb4CrKHdhdSSNZZvQ7XSKM0KxEmrgVh2ikT0 +lXMdFKkQdkbEpUgF2Hi+g2OlTzQVL9Be66MKKVXZWSUANaS1Lp/32ShhgQnGW1SI +ZPsCeQD6o/Y3wIbKi+tqo+ZmV3lFIbDTMMjfE2qOF+ucbp/PRmMB8aZPfS4/JtUG +MwGYKKop4e/5OxYGUiVn4YuQKjNbmf9155nW/2eUV07PH7EiC/ALtmxyrgu+h8jO +Rp6ip9GxiirmlGPoY5LYCLlSc/5W3SUP9CV1mKkCgYkAlECvR3PbqPgVz2wivUS5 +Ra9kwWhc1FAt+Rdpnyf7E7TRZu5vljSTC7/5tbwOQu0mZgMWarIvHt2YCiEdot4d +wn2Sa4w/lMe1eTXbAV1m3vhl3/RLEDFKbEz5wlJGwF4pp9xwjc7RL3aGtOc99ZAg +Qx2EE76HUKm+x4MDn6Wo3HRLWarpRkHcQwJ5APnNjMqIQk+BIMMKU8nQiPdT2s7C +IpHi4iJ4yMUCpJiLSIZZeC5lT0B1zX7+OyqR7PcNToWSVCM8oDdm3hhzVviHBctX +Z0aXC38XfV6F8Z9DUp351SB/QwlEf+jHDbiDlkVitdtWLjUheOuD1mDNZsLgad36 +sZMA8QKBiDQ2IIEXqD0qFFRuo+AFXmroiRGaekWL8pkfan4PjYzuDR9FEImAbQWA +TF2F5TigIeRYkP/EzcEM6t+EOjGhuaUdr3D3Jpe/ZKyvJfHXSFOwajIxvKmd5E48 +meKrKmtZsTsrynZNH5jwWpIz58aWlm+Lcy4d596PLmS7Qi/EABk+/wbVlv6vwT8= +-----END RSA PRIVATE KEY----- diff --git a/save/testnson.py b/save/testnson.py new file mode 100644 index 0000000..44848fe --- /dev/null +++ b/save/testnson.py @@ -0,0 +1,11 @@ +import base64 +from borneo.nson import (Nson, JsonSerializer) +from borneo.common import ByteInputStream +#encoded_pair = b64encode(pair.encode()).decode() + +nson_str='BgAAAL0AAAADgGUEf4BjBgAAABMAAAADgXJ1BICBcmsEgIF3awR/gHIGAAAAlAAAAASBbWQF/QGMO0CKcYF4cAV/gXJ2AbGs7QAFdywAIf4TmR4QpkO+tXsUw49hwJgAAAAAAAAAhQEDAAAAAQAAAAEAAAAAAAIkvYBsBgAAAEIAAAADgWlkBYCDbmFtZQeCRm9vg3RhZ3MAAAAAJQAAAAODdGFncweDcm9ja4N0YWdzB4RtZXRhbIN0YWdzB4JiYXI=' + +nson_bytes = base64.b64decode(nson_str) +bis = ByteInputStream(nson_bytes) +ser = JsonSerializer(pretty=True) +Nson.generate_events_from_nson(bis, ser) diff --git a/sortperf.py b/sortperf.py new file mode 100644 index 0000000..8f32475 --- /dev/null +++ b/sortperf.py @@ -0,0 +1,62 @@ +import os +import time +from borneo import (Regions, NoSQLHandle, NoSQLHandleConfig, PutRequest, + AuthorizationProvider, QueryRequest, TableRequest, + GetRequest) +from borneo.kv import StoreAccessTokenProvider + +def get_connection(): + endpoint = 'localhost:80' + print('Connecting to Oracle NoSQL Cloud Service at ' + endpoint) + provider = StoreAccessTokenProvider() #CloudsimProvider() + config = NoSQLHandleConfig(endpoint, provider).set_logger(None) + return NoSQLHandle(config) + +def fetch_data(handle): + print('Fetching data') + statement ="SELECT * FROM moat_integration_services_jobs WHERE tool = 'iqt' AND active = False ORDER BY time_created DESC" + + #statement ="SELECT * FROM moat_integration_services_jobs WHERE tool = 'iqt' AND active = False ORDER BY time_created DESC LIMIT 15 OFFSET 0" + # statement ="SELECT count(1) FROM moat_integration_services_jobs WHERE tool = 'iqt' AND active = False" + + request = QueryRequest().set_statement(statement) + print('Query results for: ' + statement) + t1 = time.perf_counter() + numres = 0 + lasttime = None + while True: + result = handle.query(request) + res = result.get_results() + for r in res: + t = r['time_created'] + if lasttime is not None and not t <= lasttime: + print('FAIL: not <= ' + lasttime + ', ' + t) + lasttime = t + numres += len(res) + if request.is_done(): + break + t2 = time.perf_counter() + print(f"Time: {t2 - t1:0.4f} seconds") + print('Num results: ' + str(numres)) + + +def main(): + print("Inside main") + handle = get_connection() + fetch_data(handle) + os._exit(0) + + +class CloudsimProvider(AuthorizationProvider): + + def __init__(self): + super(CloudsimProvider, self).__init__() + + def close(self): + pass + + def get_authorization_string(self, request=None): + return 'Bearer sortperf' + +if __name__ == "__main__": + main()