diff --git a/hydrosdk/contract.py b/hydrosdk/contract.py index 92a8d8e..d15055e 100644 --- a/hydrosdk/contract.py +++ b/hydrosdk/contract.py @@ -8,7 +8,7 @@ from hydro_serving_grpc.contract import ModelContract, ModelSignature, ModelField, DataProfileType from hydro_serving_grpc.tf.types_pb2 import * -from hydrosdk.data.types import name2dtype, shape_to_proto, PY_TO_DTYPE, np2proto_dtype, proto2np_dtype +from hydrosdk.data.types import alias_to_proto_dtype, shape_to_proto, PY_TO_DTYPE, np_to_proto_dtype, proto_to_np_dtype class ContractViolationException(Exception): @@ -74,7 +74,7 @@ def field_from_dict(field_name: str, field_dict: dict) -> ModelField: subfields_buffer.append(subfield) result_subfields = subfields_buffer else: - result_dtype = name2dtype(dtype) + result_dtype = alias_to_proto_dtype(dtype) if result_dtype is not None: result_field = ModelField( @@ -374,14 +374,14 @@ def parse_field(name, dtype, shape, profile=ProfilingType.NONE): else: if dtype in DataType.keys(): # exact name e.g. DT_STRING result_dtype = dtype - elif dtype in DataType.values(): + elif dtype in DataType.values(): # int value of DataType result_dtype = dtype - elif isinstance(dtype, str): # string alias - result_dtype = name2dtype(dtype) + elif isinstance(dtype, str): # string alias e.g. 'double' + result_dtype = alias_to_proto_dtype(dtype) elif isinstance(dtype, type): # type. could be python or numpy type result_dtype = PY_TO_DTYPE.get(dtype) if not result_dtype: - result_dtype = np2proto_dtype(dtype) + result_dtype = np_to_proto_dtype(dtype) else: result_dtype = DT_INVALID @@ -590,7 +590,7 @@ def mock_input_data(signature: ModelSignature): simple_shape = [1] field_shape = tuple(np.abs(simple_shape)) size = reduce(operator.mul, field_shape) - npdtype = proto2np_dtype(field.dtype) + npdtype = proto_to_np_dtype(field.dtype) if field.dtype == DT_BOOL: x = (np.random.randn(*field_shape) >= 0).astype(np.bool) elif field.dtype in [DT_FLOAT, DT_HALF, DT_DOUBLE, DT_COMPLEX128, DT_COMPLEX64]: diff --git a/hydrosdk/data/conversions.py b/hydrosdk/data/conversions.py index 45cd74a..38552c2 100644 --- a/hydrosdk/data/conversions.py +++ b/hydrosdk/data/conversions.py @@ -1,16 +1,17 @@ -from typing import Union, Dict, List +from typing import Dict, List, Iterable import numpy as np import pandas as pd -from hydro_serving_grpc import TensorProto, DataType, TensorShapeProto +from hydro_serving_grpc import TensorProto, DataType, TensorShapeProto, DT_STRING, DT_HALF, DT_COMPLEX64, DT_COMPLEX128 from hydro_serving_grpc.contract import ModelSignature +from pandas.core.common import flatten -from hydrosdk.data.types import NP_TO_HS_DTYPE, DTYPE_TO_FIELDNAME, np2proto_shape, PY_TO_DTYPE, find_in_list_by_name, proto2np_dtype +from hydrosdk.data.types import np_to_proto_dtype, DTYPE_TO_FIELDNAME, find_in_list_by_name, proto_to_np_dtype def tensor_proto_to_py(t: TensorProto): """ - Converts tensor proto into corresponding python object + Converts tensor proto into a corresponding python object - list or scalar :param t: :return: """ @@ -25,74 +26,157 @@ def tensor_proto_to_py(t: TensorProto): return value[0] -def tensor_proto_to_nparray(t: TensorProto): +def list_to_tensor_proto(data: List, proto_dtype: DataType, proto_shape: TensorShapeProto) -> TensorProto: """ - Creates Numpy array given dtype, shape and values from TensorProto object + Converts data in a form of a Python List into a TensorProto object + :param data: List with data + :param proto_dtype: DataType of a future TensorProto + :param proto_shape: TensorShapeProto of a future TensorProto + :return: Same data but in a TensorProto object + """ + # We can pack only flattened lists into TensorProto, so we need to flatten the list + flattened_list = flatten(data) + tensor_proto_parameters = { + DTYPE_TO_FIELDNAME[proto_dtype]: flattened_list, + "dtype": proto_dtype, + "tensor_shape": proto_shape + } + return TensorProto(**tensor_proto_parameters) + + +def tensor_proto_to_np(t: TensorProto): + """ + Creates either np.array or scalar with Numpy dtype based on + data type, shape and values from TensorProto object :param t: :return: """ array_shape = [dim.size for dim in t.tensor_shape.dim] - np_dtype = proto2np_dtype(t.dtype) - value = getattr(t, DTYPE_TO_FIELDNAME[t.dtype]) - - nparray = np.array(value, dtype=np_dtype) + np_dtype = proto_to_np_dtype(t.dtype) + proto_values = getattr(t, DTYPE_TO_FIELDNAME[t.dtype]) + + if t.dtype == DT_HALF: + x = np.fromiter(proto_values, dtype=np.uint16).view(np.float16) + elif t.dtype == DT_STRING: + x = np.array([s.decode("utf-8") for s in proto_values]) + elif t.dtype == DT_COMPLEX64 or t.dtype == DT_COMPLEX128: + it = iter(proto_values) + x = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=np_dtype) + else: + x = np.array(proto_values, dtype=np_dtype) # If no dims specified in TensorShapeProto, then it is scalar if array_shape: - return nparray.reshape(*array_shape) + return x.reshape(*array_shape) else: - return np.asscalar(nparray) + return x.flatten()[0] -def nparray_to_tensor_proto(x: np.array): +def np_to_tensor_proto(x) -> TensorProto: """ - Creates TensorProto object with specified dtype, shape and values under respective fieldname from np.array - :param x: + Creates TensorProto object from Numpy ndarray or scalar with inferred TensorProtoShape and DataType + :param x: Union[np.array, np.ScalarType] :return: """ - proto_dtype = NP_TO_HS_DTYPE.get(x.dtype.type) - if proto_dtype is None: - raise ValueError(f"Couldn't convert numpy dtype {x.dtype.type} to one of available TensorProto dtypes") + if isinstance(x, np.ScalarType): + return scalar_to_tensor_proto(x) + elif isinstance(x, np.ndarray): + return nparray_to_tensor_proto(x) + else: + raise TypeError(f"Unsupported object {x}") + + +def nparray_to_tensor_proto(x: np.array) -> TensorProto: + """ + Creates TensorProto object from Numpy ndarray + with TensorProtoShape and DataType inferred from the latter + :param x: Data in form ofa numpy ndarray + :return: Same data packed into a TensorProto object + """ + + if x.dtype.isbuiltin != 1 and x.dtype.type != np.str_: + raise ValueError(f"{x.dtype} is not supported." + f" Dtypes not compiled into numpy are not supported, except for np.str.") + + proto_dtype = np_to_proto_dtype(x.dtype.type) + proto_shape = tensor_shape_proto_from_tuple(x.shape) + + if proto_dtype == DT_HALF: + proto_values = x.view(np.uint16).flatten() + elif proto_dtype == DT_STRING: + proto_values = [s.encode("utf-8") for s in x.flatten()] + elif proto_dtype == DT_COMPLEX64 or proto_dtype == DT_COMPLEX128: + proto_values = [v.item() for c_number in x.flatten() for v in [c_number.real, c_number.imag]] + else: + proto_values = x.flatten() kwargs = { - DTYPE_TO_FIELDNAME[proto_dtype]: x.flatten(), + DTYPE_TO_FIELDNAME[proto_dtype]: proto_values, "dtype": proto_dtype, - "tensor_shape": np2proto_shape(x.shape) + "tensor_shape": proto_shape } + return TensorProto(**kwargs) -def list_to_tensor_proto(data: List, dtype: str, shape: TensorShapeProto): - proto_dtype = DataType.Value(DataType.Name(dtype)) - tensor_proto_parameters = { - DTYPE_TO_FIELDNAME[proto_dtype]: data, +def scalar_to_tensor_proto(x: np.ScalarType) -> TensorProto: + """ + Creates TensorProto object from a scalar with a Numpy dtype + with TensorProtoShape and DataType inferred from the latter + :param x: Scalar value with a Numpy dtype + :return: Same value but packed into a TensorProto object + """ + proto_dtype = np_to_proto_dtype(type(x)) + + if proto_dtype == DT_HALF: + proto_values = [np.array(x, dtype=np.float16).view(np.uint16)] + elif proto_dtype == DT_STRING: + proto_values = [x.encode("utf-8")] + elif proto_dtype == DT_COMPLEX64 or proto_dtype == DT_COMPLEX128: + proto_values = [x.real, x.imag] + else: + proto_values = [x] + + kwargs = { + DTYPE_TO_FIELDNAME[proto_dtype]: proto_values, "dtype": proto_dtype, - "tensor_shape": shape + "tensor_shape": TensorShapeProto() } - return TensorProto(**tensor_proto_parameters) + return TensorProto(**kwargs) -def convert_inputs_to_tensor_proto(inputs: Union[Dict, pd.DataFrame], signature: ModelSignature) -> dict: +def tensor_shape_proto_from_tuple(shape: Iterable[int]) -> TensorShapeProto: + """ + Helper function to transform shape in the form of a tuple (Numpy shape representation) into a TensorProtoShape + :param shape: Shape in a tuple form + :return: same shape but in a TensorShapeProto object """ + return TensorShapeProto(dim=[TensorShapeProto.Dim(size=s) for s in shape]) - :param inputs: - :param signature: - :return: + +def convert_inputs_to_tensor_proto(inputs: Dict, signature: ModelSignature) -> Dict[str, TensorProto]: + """ + Generate Dict[str, TensorProto] from pd.DataFrame or Dict[str, Union[np.array, np.ScalarType]] + + Converts inputs into a representation of data where each field + of a signature is represented by a valid TensorProto object. + :param inputs: Dict, where keys are names of signature fields and + values are data in either Numpy or Python form, or alternatively, + pd.DataFrame, where columns are names of fields and column values are data. + :param signature: ModelVersion signature with names, shapes and dtypes + of fields into which `inputs` are converted into + :return: Dictionary with TensorProtos to be used in forming a PredictRequest """ tensors = {} if isinstance(inputs, dict): for key, value in inputs.items(): - - if type(value) in PY_TO_DTYPE: - # If we got a single val, we can perform the same logic in the next steps if we create List[value] from it - value = [value] - if isinstance(value, list): # x: [1,2,3,4] signature_field = find_in_list_by_name(some_list=signature.inputs, name=key) tensors[key] = list_to_tensor_proto(value, signature_field.dtype, signature_field.shape) - - elif isinstance(value, np.ndarray) or isinstance(value, np.ScalarType): - # Support both np.ndarray and np.scalar since they support same operations on them + elif isinstance(value, np.ScalarType): + # This works for all scalars, including python int, float, etc. + tensors[key] = scalar_to_tensor_proto(value) + elif isinstance(value, np.ndarray): tensors[key] = nparray_to_tensor_proto(value) else: raise TypeError("Unsupported objects in dict values {}".format(type(value))) @@ -101,8 +185,7 @@ def convert_inputs_to_tensor_proto(inputs: Union[Dict, pd.DataFrame], signature: for key, value in dict(inputs).items(): tensors[key] = nparray_to_tensor_proto(value.ravel()) else: - raise ValueError( - "Conversion failed. Expected [pandas.DataFrame, dict[str, numpy.ndarray], dict[str, list], dict[str, python_primitive]], got {}".format( - type(inputs))) + raise ValueError(f"Conversion failed. Expected [pandas.DataFrame, dict[str, numpy.ndarray],\ + dict[str, list], dict[str, np.ScalarType]], got {type(inputs)}") return tensors diff --git a/hydrosdk/data/types.py b/hydrosdk/data/types.py index 88e72af..cb3e443 100644 --- a/hydrosdk/data/types.py +++ b/hydrosdk/data/types.py @@ -1,12 +1,8 @@ import numbers from enum import Enum -from hydro_serving_grpc import DT_STRING, DT_BOOL, \ - DT_HALF, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, \ - DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32, \ - DT_UINT64, DT_QINT8, DT_QINT16, DT_QINT32, DT_QUINT8, \ - DT_QUINT16, DT_VARIANT, DT_COMPLEX64, DT_COMPLEX128, DataType -from hydro_serving_grpc.contract import ModelSignature +import numpy as np +from hydro_serving_grpc.tf import * DTYPE_TO_FIELDNAME = { DT_HALF: "half_val", @@ -37,54 +33,32 @@ DTYPE_TO_PY = {v: k for k, v in PY_TO_DTYPE.items()} -DTYPE_ALIASES = { - DT_STRING: "string", - DT_BOOL: "bool", - DT_VARIANT: "variant", - - DT_HALF: "float16", - DT_FLOAT: "float32", - DT_DOUBLE: "float64", - - DT_INT8: "int8", - DT_INT16: "int16", - DT_INT32: "int32", - DT_INT64: "int64", - - DT_UINT8: "uint8", - DT_UINT16: "uint16", - DT_UINT32: "uint32", - DT_UINT64: "uint64", - - DT_QINT8: "qint8", - DT_QINT16: "qint16", - DT_QINT32: "qint32", - - DT_QUINT8: "quint8", - DT_QUINT16: "quint16", - - DT_COMPLEX64: "complex64", - DT_COMPLEX128: "complex128" -} - -DTYPE_ALIASES_REVERSE = { +ALIAS_TO_DTYPE = { "string": DT_STRING, + "str": DT_STRING, "bool": DT_BOOL, "variant": DT_VARIANT, "float16": DT_HALF, "half": DT_HALF, "float32": DT_FLOAT, + "single": DT_FLOAT, + "float": DT_DOUBLE, # We treat float the same way Numpy does, look at np.float_ "float64": DT_DOUBLE, "double": DT_DOUBLE, "int8": DT_INT8, + "byte": DT_INT8, "int16": DT_INT16, + "short": DT_INT16, "int32": DT_INT32, "int64": DT_INT64, + "int": DT_INT64, "uint8": DT_UINT8, + "ubyte": DT_UINT8, "uint16": DT_UINT16, + "ushort": DT_UINT16, "uint32": DT_UINT32, "uint64": DT_UINT64, @@ -97,13 +71,14 @@ "complex64": DT_COMPLEX64, "complex128": DT_COMPLEX128, + "complex": DT_COMPLEX128 } scalar = "scalar" -def name2dtype(name): - type_ = DTYPE_ALIASES_REVERSE.get(name, DT_INVALID) +def alias_to_proto_dtype(name): + type_ = ALIAS_TO_DTYPE.get(name, DT_INVALID) if not type_: try: type_ = DataType.Value(name) @@ -113,14 +88,6 @@ def name2dtype(name): return type_ -def dtype2name(dtype): - return DTYPE_ALIASES.get(dtype) - - -def dtype_field(dtype): - return DTYPE_TO_FIELDNAME.get(dtype) - - def shape_to_proto(user_shape): if isinstance(user_shape, dict): user_shape = user_shape.get("dim") @@ -145,12 +112,7 @@ def shape_to_proto(user_shape): return shape -import numpy as np -from hydro_serving_grpc import DT_STRING, DT_BOOL, \ - DT_HALF, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, \ - DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32, \ - DT_UINT64, DT_COMPLEX64, DT_COMPLEX128, TensorShapeProto, DT_INVALID - +# This dict also allows getting proper proto Dtypes for int, str and other builtin python types NP_TO_HS_DTYPE = { np.int8: DT_INT8, np.int16: DT_INT16, @@ -165,46 +127,33 @@ def shape_to_proto(user_shape): np.float32: DT_FLOAT, np.float64: DT_DOUBLE, np.float: DT_DOUBLE, - # np.float128: None, + np.float128: None, np.complex64: DT_COMPLEX64, np.complex128: DT_COMPLEX128, - # np.complex256: None, + np.complex256: None, + np.bool_: DT_BOOL, np.bool: DT_BOOL, - # np.object: None, - np.str: DT_STRING, - # np.void: None + np.str_: DT_STRING, + np.unicode_: DT_STRING, + str: DT_STRING, } HS_TO_NP_DTYPE = dict([(v, k) for k, v in NP_TO_HS_DTYPE.items()]) +HS_TO_NP_DTYPE[DT_BFLOAT16] = None -def proto2np_dtype(dt): - if dt in HS_TO_NP_DTYPE: +def proto_to_np_dtype(dt): + if HS_TO_NP_DTYPE.get(dt) is not None: return HS_TO_NP_DTYPE[dt] else: - raise KeyError("Datatype {}({}) is not supported in HydroSDK".format(DataType.Name(dt), dt)) + raise TypeError("Datatype {}({}) is not supported in HydroSDK".format(DataType.Name(dt), dt)) -def np2proto_dtype(dt): - if dt in NP_TO_HS_DTYPE: +def np_to_proto_dtype(dt): + if NP_TO_HS_DTYPE.get(dt) is not None: return NP_TO_HS_DTYPE[dt] else: - raise KeyError("Datatype {} is not supported in HydroSDK".format(dt)) - - -# TODO: method not used -def proto2np_shape(tsp): - if tsp is None or len(tsp.dim) == 0: - return tuple() - else: - shape = tuple([int(s.size) for s in tsp.dim]) - return shape - - -# TODO: method not used -def np2proto_shape(np_shape): - shape = TensorShapeProto(dim=[TensorShapeProto.Dim(size=x) for x in np_shape]) - return shape + raise TypeError("Datatype {} is not supported in HydroSDK".format(dt)) def find_in_list_by_name(some_list: list, name: str): diff --git a/hydrosdk/predictor.py b/hydrosdk/predictor.py index 9cc9871..3ab8cf7 100644 --- a/hydrosdk/predictor.py +++ b/hydrosdk/predictor.py @@ -7,7 +7,7 @@ from hydro_serving_grpc.contract import ModelSignature from hydro_serving_grpc.gateway import GatewayServiceStub, api_pb2 -from hydrosdk.data.conversions import convert_inputs_to_tensor_proto, tensor_proto_to_nparray, tensor_proto_to_py +from hydrosdk.data.conversions import convert_inputs_to_tensor_proto, tensor_proto_to_np, tensor_proto_to_py from hydrosdk.data.types import PredictorDT @@ -97,7 +97,7 @@ def predict(self, inputs: Union[pd.DataFrame, dict, pd.Series]) -> Union[pd.Data return self.predict_resp_to_df(response=response) elif self.return_type == PredictorDT.DICT_NP_ARRAY: - return self.predict_resp_to_dict_nparray(response=response) + return self.predict_resp_to_dict_np(response=response) elif self.return_type == PredictorDT.DICT_PYTHON: return self.predict_resp_to_dict_pydtype(response=response) @@ -112,7 +112,7 @@ def predict_resp_to_dict_pydtype(response: PredictResponse) -> Dict: return output_tensors_dict @staticmethod - def predict_resp_to_dict_nparray(response: PredictResponse) -> Dict[str, np.array]: + def predict_resp_to_dict_np(response: PredictResponse) -> Dict[str, np.array]: """ Transform tensors insider PredictResponse into np.arrays to create Dict[str, np.array] :param response: @@ -120,7 +120,7 @@ def predict_resp_to_dict_nparray(response: PredictResponse) -> Dict[str, np.arra """ output_tensors_dict = dict() for tensor_name, tensor_proto in response.outputs.items(): - output_tensors_dict[tensor_name] = tensor_proto_to_nparray(tensor_proto) + output_tensors_dict[tensor_name] = tensor_proto_to_np(tensor_proto) return output_tensors_dict @staticmethod @@ -130,5 +130,5 @@ def predict_resp_to_df(response: PredictResponse) -> pd.DataFrame: :param response: :return: """ - response_dict: Dict[str, np.array] = PredictServiceClient.predict_resp_to_dict_nparray(response) + response_dict: Dict[str, np.array] = PredictServiceClient.predict_resp_to_dict_np(response) return pd.DataFrame(response_dict) diff --git a/tests/test_conversions.py b/tests/test_conversions.py new file mode 100644 index 0000000..2e9cd7c --- /dev/null +++ b/tests/test_conversions.py @@ -0,0 +1,154 @@ +import numpy as np +import pytest +from hydro_serving_grpc.tf import * +from hydro_serving_grpc.tf import TensorProto, TensorShapeProto + +from hydrosdk.data.conversions import np_to_tensor_proto, tensor_proto_to_np, proto_to_np_dtype, tensor_shape_proto_from_tuple +from hydrosdk.data.types import DTYPE_TO_FIELDNAME, np_to_proto_dtype + +int_dtypes = [DT_INT64, DT_UINT16, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_UINT32, DT_UINT64] +float_types = [DT_DOUBLE, DT_FLOAT, ] +quantized_int_types = [DT_QINT8, DT_QINT16, DT_QINT32, DT_QUINT8, DT_QUINT16] +unsupported_dtypes = [DT_BFLOAT16, DT_INVALID, DT_MAP, DT_RESOURCE, DT_VARIANT] +complex_dtypes = [DT_COMPLEX64, DT_COMPLEX128, ] + +supported_float_np_types = [np.single, np.double, np.float, np.float32, np.float64, np.float_] +supported_int_np_types = [np.int, np.int64, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.uint64] +supported_complex_np_types = [np.complex, np.complex128, np.complex64, np.csingle, np.cdouble, np.complex_] +unsupported_np_types = [np.float128, np.complex256, np.object, np.void, + np.longlong, np.ulonglong, np.clongdouble] + + +class TestConversion: + + @pytest.mark.parametrize("dtype", int_dtypes + float_types + [DT_STRING, DT_BOOL, DT_HALF]) + def test_proto_dtype_to_np_to_proto(self, dtype): + np_type = proto_to_np_dtype(dtype) + restored_dtype = np_to_proto_dtype(np_type) + assert dtype == restored_dtype + + @pytest.mark.parametrize("np_shape", [(100, 1), (-1, 100), (-1, 1), (1,), (10, 10, 10, 10,)]) + def test_np_shape_to_proto_and_back(self, np_shape): + proto_shape = tensor_shape_proto_from_tuple(np_shape) + restored_shape = tuple([dim.size for dim in proto_shape.dim]) + assert np_shape == restored_shape + + @pytest.mark.parametrize("dtype", int_dtypes + float_types) + def test_tensor_to_np_array_to_tensor(self, dtype): + tensor_shape = TensorShapeProto(dim=[TensorShapeProto.Dim(size=3), TensorShapeProto.Dim(size=1)]) + + tp_kwargs = {DTYPE_TO_FIELDNAME[dtype]: [1, 2, 3], + "dtype": dtype, + "tensor_shape": tensor_shape} + + original_tensor_proto = TensorProto(**tp_kwargs) + np_representation = tensor_proto_to_np(original_tensor_proto) + restored_tensor_proto = np_to_tensor_proto(np_representation) + assert restored_tensor_proto == original_tensor_proto + + @pytest.mark.parametrize("dtype", float_types) + def test_tensor_to_np_array_to_tensor(self, dtype): + tensor_shape = TensorShapeProto(dim=[TensorShapeProto.Dim(size=3), TensorShapeProto.Dim(size=1)]) + + tp_kwargs = {DTYPE_TO_FIELDNAME[dtype]: [1.10, 2.20, 3.30], + "dtype": dtype, + "tensor_shape": tensor_shape} + + original_tensor_proto = TensorProto(**tp_kwargs) + np_representation = tensor_proto_to_np(original_tensor_proto) + restored_tensor_proto = np_to_tensor_proto(np_representation) + assert restored_tensor_proto == original_tensor_proto + + @pytest.mark.parametrize("dtype", [DT_HALF]) + def test_half_dtype_conversion(self, dtype): + tensor_shape = TensorShapeProto(dim=[TensorShapeProto.Dim(size=3), TensorShapeProto.Dim(size=1)]) + tp_kwargs = {DTYPE_TO_FIELDNAME[dtype]: np.array([1.10, 2.20, 3.30], dtype=np.float16).view(np.uint16), + "dtype": dtype, + "tensor_shape": tensor_shape} + + original_tensor_proto = TensorProto(**tp_kwargs) + np_representation = tensor_proto_to_np(original_tensor_proto) + restored_tensor_proto = np_to_tensor_proto(np_representation) + assert restored_tensor_proto == original_tensor_proto + + @pytest.mark.parametrize("dtype", [DT_HALF]) + def test_half_dtype_scalar_conversion(self, dtype): + tensor_shape = TensorShapeProto() + tp_kwargs = {DTYPE_TO_FIELDNAME[dtype]: np.array([1.1], dtype=np.float16).view(np.uint16), + "dtype": dtype, + "tensor_shape": tensor_shape} + + original_tensor_proto = TensorProto(**tp_kwargs) + np_representation = tensor_proto_to_np(original_tensor_proto) + print(type(np_representation)) + restored_tensor_proto = np_to_tensor_proto(np_representation) + assert restored_tensor_proto == original_tensor_proto + + @pytest.mark.parametrize("shape", [TensorShapeProto(), ]) + def test_int_scalar_tensor_to_np_scalar_back_to_tensor(self, shape): + tp_kwargs = {"int64_val": [1], "dtype": DT_INT64, "tensor_shape": shape} + original_tensor_proto = TensorProto(**tp_kwargs) + np_representation = tensor_proto_to_np(original_tensor_proto) + restored_tensor_proto = np_to_tensor_proto(np_representation) + + assert restored_tensor_proto == original_tensor_proto + + @pytest.mark.parametrize("np_dtype", supported_float_np_types + supported_int_np_types) + def test_np_to_tensor_to_np(self, np_dtype): + x = np.array([1.0, 2.0, 3.0], dtype=np_dtype) + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert np.all(x == x_restored) + + @pytest.mark.xfail(strict=True, raises=TypeError) + @pytest.mark.parametrize("np_dtype", unsupported_np_types) + def test_unsupported_np_to_tensor_to_np(self, np_dtype): + x = np.array([1.0, 2.0, 3.0], dtype=np_dtype) + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert np.all(x == x_restored) + + def test_bool_np_to_tensor_to_np(self): + x = np.array([True, False, True], dtype=np.bool) + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert np.all(x == x_restored) + + def test_bool_scalar_to_tensor_and_back(self): + x = np.bool() + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert x == x_restored + + def test_str_np_to_tensor_to_np(self): + x = np.array(["a", "b", "c"], dtype=np.str) + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert np.all(x == x_restored) + + def test_str_scalar_to_tensor_and_back(self): + x = np.str("a") + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert x == x_restored + + @pytest.mark.parametrize("dt", supported_complex_np_types) + def test_complex_np_to_tensor_to_np(self, dt): + x = np.array([-1 - 1j, -1 + 1j, +1 - 1j, +1 + 1j], dtype=dt) + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert np.all(x == x_restored) + + @pytest.mark.parametrize("dt", supported_complex_np_types) + def test_complex_scalar_to_tensor_to_np(self, dt): + x = np.array([-1 - 1j], dtype=dt)[0] + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert x == x_restored + + @pytest.mark.parametrize("np_dtype", supported_float_np_types + supported_int_np_types) + def test_np_scalar_to_tensor_to_np(self, np_dtype): + x = np.array([1.0], dtype=np_dtype)[0] + tensor_proto = np_to_tensor_proto(x) + x_restored = tensor_proto_to_np(tensor_proto) + assert x == x_restored diff --git a/tests/test_predictor.py b/tests/test_predictor.py index 125f33f..1a38bec 100644 --- a/tests/test_predictor.py +++ b/tests/test_predictor.py @@ -145,7 +145,7 @@ def test_predict_python_scalar_type(scalar_servable): predictions = predictor_client.predict(inputs=inputs) assert isinstance(predictions, dict) - assert isinstance(predictions['output'], int) + assert isinstance(predictions['output'], np.int64) assert predictions['output'] == value diff --git a/version b/version index a625450..e703481 100644 --- a/version +++ b/version @@ -1 +1 @@ -2.3.1 \ No newline at end of file +2.3.2 \ No newline at end of file