Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Run clang-format command from Makefile #377

Merged
merged 2 commits into from
Jul 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/overview/shaders-to-headers.rst
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ If you are using CMake as build system you can use CMake to directly convert you
GIT_TAG 1344ece4ac278f9b3be3b4555ffaace7a032b91f) # The commit hash for a dev version before v0.9.0. Replace with the latest from: https://github.com/KomputeProject/kompute/releases
FetchContent_MakeAvailable(kompute)
include_directories(${kompute_SOURCE_DIR}/src/include)
include_directories(${kompute_BINARY_DIR}/src/shaders/glsl)

# Add to the list, so CMake can later find the code to compile shaders to header files
list(APPEND CMAKE_PREFIX_PATH "${kompute_SOURCE_DIR}/cmake")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@

// Allows us to use the C++ sleep function to wait when loading the
// Vulkan library in android
#include <unistd.h>
#include <kompute/logger/Logger.hpp>
#include <unistd.h>

static std::vector<float>
jfloatArrayToVector(JNIEnv* env, const jfloatArray& fromArray)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
#include "my_shader.hpp"
#include <kompute/Kompute.hpp>


KomputeModelML::KomputeModelML() {}

KomputeModelML::~KomputeModelML() {}
Expand Down
2 changes: 1 addition & 1 deletion examples/array_multiplication/src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
#include <memory>
#include <vector>

#include <shader/my_shader.hpp>
#include <kompute/Kompute.hpp>
#include <shader/my_shader.hpp>

int
main()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ KomputeSummatorNode::add(float value)

void
KomputeSummatorNode::reset()
{}
{
}

float
KomputeSummatorNode::get_total() const
Expand Down Expand Up @@ -97,7 +98,8 @@ KomputeSummatorNode::_init()

void
KomputeSummatorNode::_process(float delta)
{}
{
}

void
KomputeSummatorNode::_bind_methods()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ KomputeSummator::add(float value)

void
KomputeSummator::reset()
{}
{
}

float
KomputeSummator::get_total() const
Expand Down Expand Up @@ -97,7 +98,8 @@ KomputeSummator::_init()

void
KomputeSummator::_process(float delta)
{}
{
}

void
KomputeSummator::_register_methods()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,8 @@ KomputeModelMLNode::_init()

void
KomputeModelMLNode::_process(float delta)
{}
{
}

void
KomputeModelMLNode::_bind_methods()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,8 @@ KomputeModelML::_init()

void
KomputeModelML::_process(float delta)
{}
{
}

void
KomputeModelML::_register_methods()
Expand Down
21 changes: 7 additions & 14 deletions examples/logistic_regression/src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,13 @@ main()
std::shared_ptr<kp::TensorT<float>> y = mgr.tensor({ 0, 0, 0, 1, 1 });

std::shared_ptr<kp::TensorT<float>> wIn = mgr.tensor({ 0.001, 0.001 });
std::shared_ptr<kp::TensorT<float>> wOutI =
mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> wOutJ =
mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> wOutI = mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> wOutJ = mgr.tensor({ 0, 0, 0, 0, 0 });

std::shared_ptr<kp::TensorT<float>> bIn = mgr.tensor({ 0 });
std::shared_ptr<kp::TensorT<float>> bOut =
mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> bOut = mgr.tensor({ 0, 0, 0, 0, 0 });

std::shared_ptr<kp::TensorT<float>> lOut =
mgr.tensor({ 0, 0, 0, 0, 0 });
std::shared_ptr<kp::TensorT<float>> lOut = mgr.tensor({ 0, 0, 0, 0, 0 });

std::vector<std::shared_ptr<kp::Tensor>> params = { xI, xJ, y,
wIn, wOutI, wOutJ,
Expand All @@ -40,9 +36,8 @@ main()

std::vector<uint32_t> spirv2{ 0x1, 0x2 };

std::vector<uint32_t> spirv(
shader::MY_SHADER_COMP_SPV.begin(),
shader::MY_SHADER_COMP_SPV.end());
std::vector<uint32_t> spirv(shader::MY_SHADER_COMP_SPV.begin(),
shader::MY_SHADER_COMP_SPV.end());

std::shared_ptr<kp::Algorithm> algorithm = mgr.algorithm(
params, spirv, kp::Workgroup({ 5 }), std::vector<float>({ 5.0 }));
Expand All @@ -69,9 +64,7 @@ main()
wIn->data()[1],
bIn->data()[0]);

if (wIn->data()[0] > 0.01 ||
wIn->data()[1] < 1.0 ||
bIn->data()[0] > 0.0) {
if (wIn->data()[0] > 0.01 || wIn->data()[1] < 1.0 || bIn->data()[0] > 0.0) {
throw std::runtime_error("Result does not match");
}
}
6 changes: 4 additions & 2 deletions src/Manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,8 +176,10 @@ Manager::createInstance()
#ifdef __APPLE__
// Required for backwards compatibility for MacOS M1 devices
// https://stackoverflow.com/questions/72374316/validation-error-on-device-extension-on-m1-mac
applicationExtensions.push_back(VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME);
computeInstanceCreateInfo.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
applicationExtensions.push_back(
VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME);
computeInstanceCreateInfo.flags |=
vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
#endif

if (!applicationExtensions.empty()) {
Expand Down
12 changes: 7 additions & 5 deletions src/OpTensorCopy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,17 +62,19 @@ OpTensorCopy::postEval(const vk::CommandBuffer& /*commandBuffer*/)
KP_LOG_DEBUG("Kompute OpTensorCopy postEval called");

// Do not copy on CPU side if source is storage tensor
if (this->mTensors[0]->tensorType() == kp::Tensor::TensorTypes::eStorage)
{
KP_LOG_DEBUG("Kompute OpTensorCopy not copying tensor source given it's of eStorage type");
if (this->mTensors[0]->tensorType() == kp::Tensor::TensorTypes::eStorage) {
KP_LOG_DEBUG("Kompute OpTensorCopy not copying tensor source given "
"it's of eStorage type");
return;
}
void* data = this->mTensors[0]->rawData();

// Copy the data from the first tensor into all the tensors
for (size_t i = 1; i < this->mTensors.size(); i++) {
if (this->mTensors[i]->tensorType() == kp::Tensor::TensorTypes::eStorage) {
KP_LOG_DEBUG("Kompute OpTensorCopy not copying to tensor dest given it's of eStorage type");
if (this->mTensors[i]->tensorType() ==
kp::Tensor::TensorTypes::eStorage) {
KP_LOG_DEBUG("Kompute OpTensorCopy not copying to tensor dest "
"given it's of eStorage type");
continue;
}
this->mTensors[i]->setRawData(data);
Expand Down
5 changes: 3 additions & 2 deletions src/Sequence.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,9 @@ Sequence::destroy()
return;
}

if(this->mFence) {
this->mDevice->destroy(this->mFence, (vk::Optional<const vk::AllocationCallbacks>)nullptr);
if (this->mFence) {
this->mDevice->destroy(
this->mFence, (vk::Optional<const vk::AllocationCallbacks>)nullptr);
}

if (this->mFreeCommandBuffer) {
Expand Down
9 changes: 4 additions & 5 deletions src/Tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,8 @@ Tensor::mapRawData()
} else if (this->mTensorType == TensorTypes::eDevice) {
hostVisibleMemory = this->mStagingMemory;
} else {
KP_LOG_WARN(
"Kompute Tensor mapping data not supported on {} tensor", toString(this->tensorType()));
KP_LOG_WARN("Kompute Tensor mapping data not supported on {} tensor",
toString(this->tensorType()));
return;
}

Expand All @@ -167,7 +167,6 @@ Tensor::mapRawData()
// flush
this->mRawData = this->mDevice->mapMemory(
*hostVisibleMemory, 0, bufferSize, vk::MemoryMapFlags());

}

void
Expand All @@ -183,8 +182,8 @@ Tensor::unmapRawData()
} else if (this->mTensorType == TensorTypes::eDevice) {
hostVisibleMemory = this->mStagingMemory;
} else {
KP_LOG_WARN(
"Kompute Tensor mapping data not supported on {} tensor", toString(this->tensorType()));
KP_LOG_WARN("Kompute Tensor mapping data not supported on {} tensor",
toString(this->tensorType()));
return;
}

Expand Down
4 changes: 2 additions & 2 deletions src/include/kompute/logger/Logger.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
static const char* KOMPUTE_LOG_TAG = "KomputeLog";
#else
#if KOMPUTE_BUILD_PYTHON
#include <pybind11/pybind11.h>
#include <fmt/core.h>
#include <pybind11/pybind11.h>
namespace py = pybind11;
// from python/src/main.cpp
extern py::object kp_trace, kp_debug, kp_info, kp_warning, kp_error;
Expand Down Expand Up @@ -88,7 +88,7 @@ setupLogger();
fmt::print("[{} {}] [debug] [{}:{}] {}\n", \
__DATE__, \
__TIME__, \
__FILE_NAME__, \
__FILE_NAME__, \
__LINE__, \
fmt::format(__VA_ARGS__))
#else
Expand Down
17 changes: 7 additions & 10 deletions src/logger/Logger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,14 @@ setupLogger()
// TODO: Add flag in compile flags
std::shared_ptr<spdlog::logger> logger =
#if KOMPUTE_SPDLOG_ASYNC_LOGGING
std::make_shared<spdlog::async_logger>(
"",
sinks.begin(),
sinks.end(),
spdlog::thread_pool(),
spdlog::async_overflow_policy::block);
std::make_shared<spdlog::async_logger>(
"",
sinks.begin(),
sinks.end(),
spdlog::thread_pool(),
spdlog::async_overflow_policy::block);
#else
std::make_shared<spdlog::logger>(
"",
sinks.begin(),
sinks.end());
std::make_shared<spdlog::logger>("", sinks.begin(), sinks.end());
#endif

logger->set_level(getLogLevel());
Expand Down
19 changes: 12 additions & 7 deletions test/TestOpShadersFromStringAndFile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,15 @@
#include "test_shader.hpp"

// Introducing custom struct that can be used for tensors
struct TestStruct {
struct TestStruct
{
float x;
uint32_t y;
int32_t z;

// Creating an == operator overload for the comparison below
bool operator==(const TestStruct rhs) const {
bool operator==(const TestStruct rhs) const
{
return this->x == rhs.x && this->y == rhs.y && this->z == rhs.z;
}
};
Expand Down Expand Up @@ -55,8 +57,10 @@ TEST(TestShader, ShaderRawDataFromConstructorCustomDataType)

kp::Manager mgr;

std::shared_ptr<kp::TensorT<TestStruct>> tensorA = mgr.tensorT<TestStruct>({ { 0.1, 2, 3} });
std::shared_ptr<kp::TensorT<TestStruct>> tensorB = mgr.tensorT<TestStruct>({ { 0.0, 0, 0} });
std::shared_ptr<kp::TensorT<TestStruct>> tensorA =
mgr.tensorT<TestStruct>({ { 0.1, 2, 3 } });
std::shared_ptr<kp::TensorT<TestStruct>> tensorB =
mgr.tensorT<TestStruct>({ { 0.0, 0, 0 } });

std::vector<uint32_t> spirv = compileSource(shader);

Expand All @@ -67,8 +71,10 @@ TEST(TestShader, ShaderRawDataFromConstructorCustomDataType)
->eval<kp::OpAlgoDispatch>(mgr.algorithm(params, spirv))
->eval<kp::OpTensorSyncLocal>(params);

EXPECT_EQ(tensorA->vector(), std::vector<TestStruct>({ TestStruct{0.1, 2, 3} }));
EXPECT_EQ(tensorB->vector(), std::vector<TestStruct>({ TestStruct{0.1, 2, 3} }));
EXPECT_EQ(tensorA->vector(),
std::vector<TestStruct>({ TestStruct{ 0.1, 2, 3 } }));
EXPECT_EQ(tensorB->vector(),
std::vector<TestStruct>({ TestStruct{ 0.1, 2, 3 } }));
}

TEST(TestShaderEndianness, ShaderRawDataFromConstructor)
Expand Down Expand Up @@ -151,4 +157,3 @@ TEST(TestOpAlgoCreate, ShaderCompiledDataFromConstructor)
EXPECT_EQ(tensorA->vector(), std::vector<float>({ 0, 1, 2 }));
EXPECT_EQ(tensorB->vector(), std::vector<float>({ 3, 4, 5 }));
}

30 changes: 14 additions & 16 deletions test/TestOpTensorCopy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,13 +169,13 @@ TEST(TestOpTensorCopy, CopyThroughStorageTensor)
std::shared_ptr<kp::TensorT<float>> tensorOut = mgr.tensor(testVecOut);
// Tensor storage requires a vector to be passed only to reflect size
std::shared_ptr<kp::TensorT<float>> tensorStorage =
mgr.tensor({ 0, 0, 0 }, kp::Tensor::TensorTypes::eStorage);
mgr.tensor({ 0, 0, 0 }, kp::Tensor::TensorTypes::eStorage);

mgr.sequence()
->eval<kp::OpTensorSyncDevice>({ tensorIn, tensorOut })
->eval<kp::OpTensorCopy>({ tensorIn, tensorStorage })
->eval<kp::OpTensorCopy>({ tensorStorage, tensorOut })
->eval<kp::OpTensorSyncLocal>({ tensorIn, tensorOut });
->eval<kp::OpTensorSyncDevice>({ tensorIn, tensorOut })
->eval<kp::OpTensorCopy>({ tensorIn, tensorStorage })
->eval<kp::OpTensorCopy>({ tensorStorage, tensorOut })
->eval<kp::OpTensorSyncLocal>({ tensorIn, tensorOut });

// Making sure the GPU holds the same vector
EXPECT_EQ(tensorIn->vector(), tensorOut->vector());
Expand All @@ -192,7 +192,7 @@ TEST(TestOpTensorCopy, CopyTensorThroughStorageViaAlgorithms)
std::shared_ptr<kp::TensorT<float>> tensorOut = mgr.tensor(testVecOut);
// Tensor storage requires a vector to be passed only to reflect size
std::shared_ptr<kp::TensorT<float>> tensorStorage =
mgr.tensor({ 0, 0, 0 }, kp::Tensor::TensorTypes::eStorage);
mgr.tensor({ 0, 0, 0 }, kp::Tensor::TensorTypes::eStorage);

EXPECT_TRUE(tensorIn->isInit());
EXPECT_TRUE(tensorOut->isInit());
Expand All @@ -213,9 +213,8 @@ TEST(TestOpTensorCopy, CopyTensorThroughStorageViaAlgorithms)
}
)");

auto algoA = mgr.algorithm(
{ tensorIn, tensorStorage },
compileSource(shaderA));
auto algoA =
mgr.algorithm({ tensorIn, tensorStorage }, compileSource(shaderA));

// Copy from storage tensor to output tensor
std::string shaderB = (R"(
Expand All @@ -233,15 +232,14 @@ TEST(TestOpTensorCopy, CopyTensorThroughStorageViaAlgorithms)
}
)");

auto algoB = mgr.algorithm(
{ tensorStorage, tensorOut },
compileSource(shaderB));
auto algoB =
mgr.algorithm({ tensorStorage, tensorOut }, compileSource(shaderB));

mgr.sequence()
->eval<kp::OpTensorSyncDevice>({ tensorIn })
->eval<kp::OpAlgoDispatch>(algoA)
->eval<kp::OpAlgoDispatch>(algoB)
->eval<kp::OpTensorSyncLocal>({ tensorOut });
->eval<kp::OpTensorSyncDevice>({ tensorIn })
->eval<kp::OpAlgoDispatch>(algoA)
->eval<kp::OpAlgoDispatch>(algoB)
->eval<kp::OpTensorSyncLocal>({ tensorOut });

// Making sure the GPU holds the same vector
EXPECT_EQ(tensorIn->vector(), tensorOut->vector());
Expand Down
Loading