diff --git a/bin/instrument.py b/bin/instrument.py index 882a94f8..fe835efa 100755 --- a/bin/instrument.py +++ b/bin/instrument.py @@ -333,8 +333,8 @@ def readCompileOption(): assert 0 < bitWidth, "BitWidth must be a integer greater than 0" assert math.log2(bitWidth).is_integer(), "BitWidth must be a exponent of power 2" assert 0 <= minPercentileOutlierThreshold and minPercentileOutlierThreshold <= 100, "Minimum Percentile Value for Percentile should be greater than or equal to 0" - assert 0 <= maxPercentileOutlierThreshold and maxPercentileOutlierThreshold <= 100, "Maximum Percentile Value for Percentile should be greater than or equal to 0 and lesser than or equal to 100" - assert minPercentileOutlierThreshold <= maxPercentileOutlierThreshold, "Minimum Percentile Value should be lesser than Maximum Percentile Value" + assert 0 <= maxPercentileOutlierThreshold and maxPercentileOutlierThreshold <= 100, "Maximum Percentile Value for Percentile should be greater than or equal to 0 and less than or equal to 100" + assert minPercentileOutlierThreshold <= maxPercentileOutlierThreshold, "Minimum Percentile Value should be less than Maximum Percentile Value" global fakeQuant fakeQuant = [targetLayer, minPercentileOutlierThreshold, maxPercentileOutlierThreshold, bitWidth] diff --git a/docs/input_masterlist_ml.yaml b/docs/input_masterlist_ml.yaml index ecee7b29..4880d908 100644 --- a/docs/input_masterlist_ml.yaml +++ b/docs/input_masterlist_ml.yaml @@ -157,7 +157,7 @@ compileOption: bitWidth: 8 -# Fake Qunatization parameters comes within compileOption. In here, you need to mention the targer layer - conv or matmul (currently only supporting these) +# Fake Quantization parameters comes within compileOption. In here, you need to mention the targer layer - conv or matmul (currently only supporting these) # minPercentileOutlierThreshold and maxPercentileOutlierThreshold are optional parameters. On not mentioning these parameters, they are autoamtically set to # minPercentileOutlierThreshold = 0 and maxPercentileOutlierThreshold = 100 -# bitWidth needs to be mentioned and is the bit width of the Quantized Int numbers which needs to be a exponent of power 2 \ No newline at end of file +# bitWidth needs to be mentioned and is the bit width of the Quantized Int numbers which needs to be a exponent of power 2 diff --git a/llvm_passes/core/FakeQuantizationPass.cpp b/llvm_passes/core/FakeQuantizationPass.cpp index ecd76e79..e2041d76 100644 --- a/llvm_passes/core/FakeQuantizationPass.cpp +++ b/llvm_passes/core/FakeQuantizationPass.cpp @@ -17,7 +17,6 @@ #include #include "llvm/Support/CommandLine.h" -// #include "Controller.h" using namespace llvm; using namespace std; @@ -262,8 +261,8 @@ namespace llfi "FakeQuantIntegerBasedAddition", Type::getFloatTy(Context), Type::getFloatTy(Context), Type::getFloatTy(Context)); - FunctionCallee FakeQunatDequnatizeAndBiasAdditionFunction = M->getOrInsertFunction( - "FakeQunatDequnatizeAndBiasAddition", Type::getFloatTy(Context), Type::getFloatTy(Context), + FunctionCallee FakeQunatDequantizeAndBiasAdditionFunction = M->getOrInsertFunction( + "FakeQunatDequantizeAndBiasAddition", Type::getFloatTy(Context), Type::getFloatTy(Context), Type::getFloatTy(Context)); bool foundLayerDef = false; @@ -329,7 +328,7 @@ namespace llfi // Replacing the old float to new float obtained from Fake - // Qunatization + // Quantization op->replaceAllUsesWith(newInst); deleteInst.insert(op); } @@ -343,7 +342,7 @@ namespace llfi Value *op1 = op->getOperand(0); Value *op2 = op->getOperand(1); - Value *newInst = Builder.CreateCall(FakeQunatDequnatizeAndBiasAdditionFunction, {op1, op2}); + Value *newInst = Builder.CreateCall(FakeQuantDequnatizeAndBiasAdditionFunction, {op1, op2}); op->replaceAllUsesWith(newInst); deleteInst.insert(op); } diff --git a/llvm_passes/core/FakeQuantizationPass.h b/llvm_passes/core/FakeQuantizationPass.h index a344f4fa..1c9a57b9 100644 --- a/llvm_passes/core/FakeQuantizationPass.h +++ b/llvm_passes/core/FakeQuantizationPass.h @@ -40,4 +40,4 @@ std::string getOperandName(llvm::Instruction *I); } // namespace llfi -#endif // LLFI_H \ No newline at end of file +#endif // LLFI_H diff --git a/runtime_lib/FakeQuantizationLib.cpp b/runtime_lib/FakeQuantizationLib.cpp index 9a2a2e47..e0ebd6d7 100644 --- a/runtime_lib/FakeQuantizationLib.cpp +++ b/runtime_lib/FakeQuantizationLib.cpp @@ -325,12 +325,12 @@ float Quantize(float w1, float x1, int currentLayerIndex, int totalNumberOfLayer } } -void finished(int currentLayerIndex, int totalNumberOfLayers, int minPercentileThreshold, int maxPercetileThreshold, int bitWidth) +void finished(int currentLayerIndex, int totalNumberOfLayers, int minPercentileThreshold, int maxPercentileThreshold, int bitWidth) { bit_width = bitWidth; max_number = 2 ^ bit_width; printf("In this Layer - %i\n", currentLayerIndex); - FindPercentile(minPercentileThreshold, maxPercetileThreshold); + FindPercentile(minPercentileThreshold, maxPercentileThreshold); printf("Got called in finished!\n"); printf("Index for w %i and Index for x %i\n", w_index, x_index); printf("Actual Min for x %f and Actual Max %f\n", x_values[0], x_values[x_index - 1]); @@ -402,7 +402,7 @@ float FakeQuantIntegerBasedAddition(float num1, float num2) return (float)(intNum1 + intNum2); } -float FakeQunatDequnatizeAndBiasAddition(float QunatizeNum, float Basis) +float FakeQuantDequantizeAndBiasAddition(float QunatizeNum, float Basis) { int QunatizeIntNum = (int)QunatizeNum; float dequnatizedFloat = dequantize(QunatizeIntNum); diff --git a/runtime_lib/FakeQuantizationLib.h b/runtime_lib/FakeQuantizationLib.h index 9d62eb76..18045572 100644 --- a/runtime_lib/FakeQuantizationLib.h +++ b/runtime_lib/FakeQuantizationLib.h @@ -10,15 +10,15 @@ extern "C" { #endif float getWAndX(float w1, float x1, int currentLayerIndex, int totalNumberOfLayers); -void finished(int currentLayerIndex, int totalNumberOfLayers, int minPercentileThreshold, int maxPercetileThreshold, int bitWidth); +void finished(int currentLayerIndex, int totalNumberOfLayers, int minPercentileThreshold, int maxPercentileThreshold, int bitWidth); float dequantize(int q); float Quantize(float w1, float x1, int currentLayerIndex, int totalNumberOfLayers); float QuantizeMatMul(float w1, float x1, int currentLayerIndex, int totalNumberOfLayers); float FakeQuantIntegerBasedAddition(float num1, float num2); -float FakeQunatDequnatizeAndBiasAddition(float num1, float num2); +float FakeQuantDequantizeAndBiasAddition(float num1, float num2); void getBias(float a, float b); #ifdef __cplusplus } #endif -#endif // FAKE_QUANTIZATION_H \ No newline at end of file +#endif // FAKE_QUANTIZATION_H