Skip to content

Commit

Permalink
fix comment
Browse files Browse the repository at this point in the history
  • Loading branch information
wyz5864 committed Nov 30, 2023
1 parent 0f3d2f0 commit fa2784d
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 51 deletions.
6 changes: 0 additions & 6 deletions dipu/torch_dipu/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,7 @@ def script_wrapper(*args, **kwargs):
torch.jit.script = script_wrapper


def apply_cuda_tensor_patch():
if mockcuda:
_C._mockCudaTensor()


def apply_patches():
apply_cuda_tensor_patch()
apply_tensor_method_patch()
apply_torch_function_patch()
apply_dist_patch()
Expand Down
27 changes: 20 additions & 7 deletions dipu/torch_dipu/csrc_dipu/binding/ExportTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,11 @@ static at::Tensor dispatch_to(
non_blocking, copy);
}

// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
static std::shared_ptr<PyObject* [2]> splitArgs(PyObject* args) {
ssize_t rawSize = PyTuple_Size(args);
PyObject* newArgs = PyTuple_New(rawSize - 1);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
std::shared_ptr<PyObject* [2]> result(new PyObject*[2], [](PyObject** p) {
// if (p[1]) { // cause segfault, why?
// Py_DECREF(p[1]);
Expand Down Expand Up @@ -95,6 +97,7 @@ struct PyTensorType {
THPDtype* dtype;
THPLayout* layout;
bool is_cuda;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers)
char name[64];
int backend;
int scalar_type;
Expand All @@ -118,7 +121,7 @@ static_assert(std::is_standard_layout<PyTensorType>::value,
static PyObject* mock_Tensor_new(PyTypeObject* type, PyObject* args,
PyObject* kwargs) {
HANDLE_TH_ERRORS
auto& tensor_type = *((PyTensorType*)type);
auto& tensor_type = *(reinterpret_cast<PyTensorType*>(type));
return THPVariable_Wrap(torch::utils::legacy_tensor_ctor(
tensor_type.get_dispatch_key(), tensor_type.get_scalar_type(), args,
kwargs));
Expand All @@ -139,23 +142,31 @@ static inline at::Backend dipu_mock_backend(at::Backend backend) {
static PyObject* dipuMockCudaTensors(PyObject* _unused, PyObject* noargs) {
HANDLE_TH_ERRORS
auto torch_module = THPObjectPtr(PyImport_ImportModule("torch"));
if (!torch_module) throw python_error();
if (!torch_module) {
throw python_error();
}

auto tensor_classes = THPObjectPtr(
PyObject_GetAttrString(torch_module.get(), "_tensor_classes"));
if (!tensor_classes) throw python_error();
if (!tensor_classes) {
throw python_error();
}

auto seq = THPObjectPtr(PySequence_Fast(
tensor_classes, "torch._tensor_classes has been modified\n"));
if (!seq) throw python_error();
if (!seq) {
throw python_error();
}

Py_ssize_t len = PySequence_Fast_GET_SIZE(seq.get());
PyObject** tensor_type_array = PySequence_Fast_ITEMS(seq.get());

for (Py_ssize_t i = 0; i < len; ++i) {
// assume no one change the items in torch._tensor_classes, i.e. assume
// they can be reinterpreted as PyTensorType.
PyTensorType* tensor_type = (PyTensorType*)tensor_type_array[i];
// NOLINTNEXTLINE(modernize-use-auto)
PyTensorType* tensor_type =
reinterpret_cast<PyTensorType*>(tensor_type_array[i]);
tensor_type->py_type.tp_new = mock_Tensor_new;
tensor_type->backend =
static_cast<int>(dipu_mock_backend(tensor_type->get_backend()));
Expand All @@ -168,10 +179,12 @@ static PyObject* dipuMockCudaTensors(PyObject* _unused, PyObject* noargs) {
// we prefer to use pybind11 to export patch func, cpython is used only patching
// tensor-func which has complex dynamic parameters not easy to parsed by
// pybind.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables)
static PyMethodDef TorchTensorMethods[] = {
{"dipu", castPyCFunctionWithKeywords(THPVariable_dipu),
METH_VARARGS | METH_KEYWORDS, NULL},
{"_mockCudaTensor", (PyCFunction)dipuMockCudaTensors, METH_NOARGS, nullptr},
METH_VARARGS | METH_KEYWORDS, nullptr},
{"_mockCudaTensor", reinterpret_cast<PyCFunction>(dipuMockCudaTensors),
METH_NOARGS, nullptr},
{nullptr, nullptr, 0, nullptr}};

DIPU_API PyMethodDef* exportTensorFunctions() { return TorchTensorMethods; }
Expand Down
4 changes: 0 additions & 4 deletions dipu/torch_dipu/dipu/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,6 @@
# resume initialize flag after random generator ready
# "is_initialized",

# tensor
# 'FloatTensor', 'DoubleTensor', 'HalfTensor',
# 'LongTensor', 'IntTensor', 'ShortTensor', 'ByteTensor', 'CharTensor', 'BoolTensor',

# device
"can_device_access_peer", "current_device", "devicectx", "device_count", "device_of", "synchronize",
"get_device_name", "get_device_properties", "get_device_capability", "is_available", "set_device",
Expand Down
44 changes: 10 additions & 34 deletions dipu/torch_dipu/dipu/tensor.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,21 @@
# Copyright (c) 2023, DeepLink.
import torch
from .device import __diputype__


# replace type exported from csrc/tensor/python_tensor.cpp
# need replace torch._tensor_classes? (seems currently not need)
class _MetaTensorType(type):
def __instancecheck__(cls, inst):
if isinstance(inst, torch.Tensor):
if inst.device.type == __diputype__ and inst.dtype == cls.dtype:
return True
return False
from .device import __diputype__
from torch_dipu import _C, mockcuda

class FloatTensor(metaclass=_MetaTensorType):
dtype = torch.float
class DoubleTensor(metaclass=_MetaTensorType):
dtype= torch.float64
class HalfTensor(metaclass=_MetaTensorType):
dtype= torch.float16

class LongTensor(metaclass=_MetaTensorType):
dtype = torch.int64
class IntTensor(metaclass=_MetaTensorType):
dtype = torch.int32
class ShortTensor(metaclass=_MetaTensorType):
dtype = torch.int16
class ByteTensor(metaclass=_MetaTensorType):
dtype = torch.uint8
class CharTensor(metaclass=_MetaTensorType):
dtype = torch.int8
class BoolTensor(metaclass=_MetaTensorType):
dtype = torch.bool
_default_tensor_type = torch.FloatTensor


_default_tensor_type = FloatTensor
def __set_default_tensor_type(type = torch.FloatTensor):
print(" warnning!! dipu not support default tensor setting now!, this func is empty")
global _default_tensor_type
_default_tensor_type = type

def __set_default_tensor_type(type = FloatTensor):
print(" warnning!! dipu not support default tensor setting now!, this func is empty")
global _default_tensor_type
_default_tensor_type = type

# need enhance, seems change tensor define is need
def apply_tensor_type_patch():
torch.set_default_tensor_type = __set_default_tensor_type
torch.set_default_tensor_type = __set_default_tensor_type
if mockcuda:
_C._mockCudaTensor()

0 comments on commit fa2784d

Please sign in to comment.