Skip to content

Commit

Permalink
Merge branch 'main' into add_memristor_dialect_and_conversions
Browse files Browse the repository at this point in the history
  • Loading branch information
oowekyala committed Sep 11, 2024
2 parents 8944c5d + 6241385 commit dcd57f8
Show file tree
Hide file tree
Showing 8 changed files with 158 additions and 55 deletions.
58 changes: 58 additions & 0 deletions .github/workflows/build-ci.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#!/bin/bash

project_root="$( cd -- "$(dirname "$0")/../.." >/dev/null 2>&1 ; pwd -P )"
echo "Project root: $project_root"

llvm_path="$project_root/llvm"
cinnamon_path="$project_root/cinnamon"
upmem_path="$project_root/upmem"

export PATH=$llvm_path/build/bin:$PATH

if [[ $1 != "no-llvm" ]]; then
if [ ! -d "$llvm_path" ]; then
git clone https://github.com/oowekyala/llvm-project "$llvm_path"

cd "$llvm_path"

git checkout cinnamon-llvm
cmake -S llvm -B build \
-DLLVM_ENABLE_PROJECTS="mlir;llvm;clang" \
-DLLVM_TARGETS_TO_BUILD="host" \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DMLIR_ENABLE_BINDINGS_PYTHON=OFF \
-DLLVM_BUILD_TOOLS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=ON \
-DLLVM_OPTIMIZED_TABLEGEN=ON \
-DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=SPIRV \
$LLVM_CMAKE_OPTIONS
fi

cd "$llvm_path"
git pull
cmake --build build --target all llc opt
fi

if [ ! -d "$upmem_path" ]; then
upmem_archive="upmem.tar.gz"
curl http://sdk-releases.upmem.com/2024.1.0/ubuntu_22.04/upmem-2024.1.0-Linux-x86_64.tar.gz --output "$upmem_archive"
mkdir "$upmem_path"
tar xf "$upmem_archive" -C "$upmem_path" --strip-components=1
rm "$upmem_archive"
fi

cd "$cinnamon_path"

if [ ! -d "build" ]; then
cmake -S . -B "build" \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DLLVM_DIR="$llvm_path"/build/lib/cmake/llvm \
-DMLIR_DIR="$llvm_path"/build/lib/cmake/mlir \
-DUPMEM_DIR="$upmem_path" \
-DCINM_BUILD_GPU_SUPPORT=ON \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
$CINNAMON_CMAKE_OPTIONS
fi

cmake --build build --target all
46 changes: 46 additions & 0 deletions .github/workflows/build_and_test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
name: Build and test cinnamon
run-name: 'Build and Test: ${{ github.event.head_commit.message }}'
on:
workflow_dispatch:
push:
jobs:
main:
name: Build and test
runs-on: ubuntu-22.04
env:
CC: clang
CXX: clang++
LDFLAGS: -fuse-ld=mold
CMAKE_GENERATOR: Ninja

steps:
- name: Check out repository
uses: actions/checkout@v3

- name: Install build dependencies
run: sudo apt-get install clang ninja-build mold libvulkan-dev

- name: Restore dependency cache
id: dependency-cache-restore
uses: actions/cache/restore@v3
with:
path: |
llvm
upmem
key: cinnamon-dependencies-${{ runner.os }}

- name: Build
run: .github/workflows/build-ci.sh

- name: Test
working-directory: cinnamon/build
run: ninja check-cinm-mlir

- name: Save dependency cache
uses: actions/cache/save@v3
if: always()
with:
path: |
llvm
upmem
key: ${{ steps.dependency-cache-restore.outputs.cache-primary-key }}
82 changes: 39 additions & 43 deletions build.sh
Original file line number Diff line number Diff line change
@@ -1,47 +1,43 @@
#!/bin/bash

if [[ $1 != "no-llvm" ]]; then
project_root="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
llvm_path="$project_root/llvm"
cinnamon_path="$project_root/cinnamon"

export PATH=$llvm_path/build/bin:$PATH

git clone https://github.com/oowekyala/llvm-project llvm
cd llvm
git checkout cinnamon-llvm
mkdir -p build
cd build

cmake -G "Ninja" ../llvm \
-DLLVM_ENABLE_PROJECTS="mlir;llvm;clang" \
-DLLVM_TARGETS_TO_BUILD="host" \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DMLIR_ENABLE_BINDINGS_PYTHON=OFF \
-DLLVM_BUILD_TOOLS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=ON \
-DLLVM_OPTIMIZED_TABLEGEN=ON

ninja
ninja llc
ninja opt

export PATH=$(pwd)/bin:$PATH

cd ../..
else
export PATH=$(pwd)/llvm/build/bin:$PATH
fi

cd cinnamon
llvm_prefix=../llvm/build

cmake -S . -B "build" \
-G Ninja \
if [[ $1 != "no-llvm" ]]; then
if [ ! -d "$llvm_path" ]; then
git clone https://github.com/oowekyala/llvm-project "$llvm_path"

cd "$llvm_path"

git checkout cinnamon-llvm
cmake -S llvm -B build \
-DLLVM_ENABLE_PROJECTS="mlir;llvm;clang" \
-DLLVM_TARGETS_TO_BUILD="host" \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DMLIR_ENABLE_BINDINGS_PYTHON=OFF \
-DLLVM_BUILD_TOOLS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=ON \
-DLLVM_OPTIMIZED_TABLEGEN=ON
fi

cd "$llvm_path"
git pull
cmake --build build --target all llc opt
fi

cd "$cinnamon_path"

if [ ! -d "build" ]; then
cmake -S . -B "build" \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DLLVM_DIR="$llvm_prefix"/lib/cmake/llvm \
-DMLIR_DIR="$llvm_prefix"/lib/cmake/mlir \
-DUPMEM_DIR=/opt/upmem/upmem-2023.2.0-Linux-x86_64 \
-DCMAKE_EXPORT_COMPILE_COMMANDS=1 \
-DCMAKE_C_COMPILER=clang \
-DCMAKE_LINKER_TYPE=DEFAULT \
-DCMAKE_CXX_COMPILER=clang++ \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON

cd build && ninja
-DLLVM_DIR="$llvm_path"/build/lib/cmake/llvm \
-DMLIR_DIR="$llvm_path"/build/lib/cmake/mlir \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
$CINNAMON_CMAKE_OPTIONS
fi

cmake --build build --target all
10 changes: 5 additions & 5 deletions cinnamon/lib/Conversion/CnmToGPU/CnmToGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -314,19 +314,19 @@ void populateCnmToGPUFinalTypeConversions(TypeConverter &typeConverter) {
});
}

void populateCnmToGPUConversionPatterns(TypeConverter &typeConverter,
RewritePatternSet &patterns) {
void populateCnmToGPUConversionPatterns(RewritePatternSet &patterns,
MLIRContext *context) {
patterns
.add<cnmtogpu::ConvertCnmWorkgroupToGPU, cnmtogpu::ConvertCnmAllocToGPU,
ConvertCnmSetZeroToAffine, cnmtogpu::ConvertCnmScatterToGPU,
cnmtogpu::ConvertCnmGatherToGPU, cnmtogpu::ConvertCnmLaunchToGPU,
cnmtogpu::ConvertCnmTerminatorToGPU>(&typeConverter.getContext());
cnmtogpu::ConvertCnmTerminatorToGPU>(context);
}

struct ConvertCnmToGPUPass
: public ::impl::ConvertCnmToGPUPassBase<ConvertCnmToGPUPass> {
void runOnOperation() final {
TypeConverter converter(&getContext());
TypeConverter converter{};
populateCnmToGPUFinalTypeConversions(converter);
const auto addUnrealizedCast = [](OpBuilder &builder, Type type,
ValueRange inputs, Location loc) {
Expand All @@ -337,7 +337,7 @@ struct ConvertCnmToGPUPass
converter.addTargetMaterialization(addUnrealizedCast);

RewritePatternSet patterns(&getContext());
populateCnmToGPUConversionPatterns(converter, patterns);
populateCnmToGPUConversionPatterns(patterns, &getContext());
populateReconcileUnrealizedCastsPatterns(patterns);

ConversionTarget target(getContext());
Expand Down
3 changes: 2 additions & 1 deletion cinnamon/test/Dialect/Cinm/cinm-tiling.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
// RUN: cinm-opt %s --cinm-tiling=reduction-tile-size=16 -split-input-file | FileCheck %s
// RUN: true
// skip(RUN): cinm-opt %s --cinm-tiling=reduction-tile-size=16 -split-input-file | FileCheck %s


// CHECK-LABEL: @gemmSquare
Expand Down
5 changes: 3 additions & 2 deletions cinnamon/test/Dialect/Cnm/cnm-ops.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// RUN: cinm-opt %s | cinm-opt | FileCheck %s
// RUN: cinm-opt %s --mlir-print-op-generic | cinm-opt | FileCheck %s
// RUN: true
// skip(RUN): cinm-opt %s | cinm-opt | FileCheck %s
// skip(RUN): cinm-opt %s --mlir-print-op-generic | cinm-opt | FileCheck %s


// CHECK-LABEL: matmul
Expand Down
5 changes: 3 additions & 2 deletions cinnamon/test/Dialect/UPMEM/upmem-ops.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// RUN: cinm-opt %s | cinm-opt | FileCheck %s
// RUN: cinm-opt %s --mlir-print-op-generic | cinm-opt | FileCheck %s
// RUN: true
// skip(RUN): cinm-opt %s | cinm-opt | FileCheck %s
// skip(RUN): cinm-opt %s --mlir-print-op-generic | cinm-opt | FileCheck %s
#scatter_map = affine_map<(i,j)->()>

// CHECK-LABEL: run_va
Expand Down
4 changes: 2 additions & 2 deletions cinnamon/tools/cinm-vulkan-runner/cinm-vulkan-runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,15 @@ static LogicalResult runMLIRPasses(Operation *op) {
passManager.addPass(createLowerAffinePass()); // affine.apply -> arith ops

passManager.addPass(createCnmSPIRVAttachTargetAttributePass(
CnmSPIRVAttachTargetAttributePassOptions{
cnm::CnmSPIRVAttachTargetAttributePassOptions{
.spirvCapabilities = {"Shader"},
.spirvExtensions = {"SPV_KHR_storage_buffer_storage_class"},
}));

OpPassManager &gpuModulePM = passManager.nest<gpu::GPUModuleOp>();
gpuModulePM.addPass(createConvertMemRefToSPIRVPass());
gpuModulePM.addPass(createConvertControlFlowToSPIRVPass());
gpuModulePM.addPass(createCnmSPIRVAttachKernelEntryPointAttributePass());
gpuModulePM.addPass(cnm::createCnmSPIRVAttachKernelEntryPointAttributePass());

passManager.addPass(createConvertGPUToSPIRVPass(/*mapMemorySpace=*/true));

Expand Down

0 comments on commit dcd57f8

Please sign in to comment.