Skip to content

Commit

Permalink
Rename macro: Q_UNUSED -> FLIT_UNUSED
Browse files Browse the repository at this point in the history
  • Loading branch information
mikebentley15 committed Jun 22, 2017
1 parent ce54bef commit 9006f98
Show file tree
Hide file tree
Showing 7 changed files with 27 additions and 27 deletions.
6 changes: 3 additions & 3 deletions inputGen/helper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class FmtRestore {

void printTestVal(const std::string &funcName, float val) {
FmtRestore restorer(std::cout);
Q_UNUSED(restorer);
FLIT_UNUSED(restorer);

auto intval = flit::as_int(val);
std::cout << funcName << ": 0x"
Expand All @@ -31,7 +31,7 @@ void printTestVal(const std::string &funcName, float val) {

void printTestVal(const std::string &funcName, double val) {
FmtRestore restorer(std::cout);
Q_UNUSED(restorer);
FLIT_UNUSED(restorer);

auto intval = flit::as_int(val);
std::cout << funcName << ": 0x"
Expand All @@ -43,7 +43,7 @@ void printTestVal(const std::string &funcName, double val) {

void printTestVal(const std::string &funcName, long double val) {
FmtRestore restorer(std::cout);
Q_UNUSED(restorer);
FLIT_UNUSED(restorer);

auto intval = flit::as_int(val);
uint64_t lhalf = static_cast<uint64_t>((intval >> 64)) & 0xFFFFL;
Expand Down
2 changes: 1 addition & 1 deletion litmus-tests/disabled/SimpleCHull.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class SimpleCHull: public flit::TestBase<T> {

virtual flit::ResultType::mapped_type
run_impl(const flit::TestInput<T>& ti) {
Q_UNUSED(ti);
FLIT_UNUSED(ti);
CHullEdges.clear();
PointList.clear();
ReadInputs(fopen("data/random_input", "r"));
Expand Down
16 changes: 8 additions & 8 deletions litmus-tests/tests/Paranoia.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ namespace {
/* floating point exception receiver */
void sigfpe(int i)
{
Q_UNUSED(i);
FLIT_UNUSED(i);
fpecount++;
info_stream << endl << "* * * FLOATING-POINT ERROR * * *\n";
(void)fflush(stdout);
Expand All @@ -322,7 +322,7 @@ void sigfpe(int i)
template <typename F>
flit::ResultType::mapped_type Paranoia<F>::run_impl(const flit::TestInput<F>& ti)
{
Q_UNUSED(ti);
FLIT_UNUSED(ti);
int timeoutMillis = 1000;
enum class ExitStatus {
SuccessStatus = 0,
Expand Down Expand Up @@ -1837,32 +1837,32 @@ flit::ResultType::mapped_type Paranoia<F>::run_impl(const flit::TestInput<F>& ti
info_stream << id << "END OF TEST.\n";
}
catch (const TimeoutError &e) {
Q_UNUSED(e);
FLIT_UNUSED(e);
info_stream << id << ": timeout error occurred" << endl;
status = ExitStatus::TimeoutStatus;
}
catch (const FailureError &e) {
Q_UNUSED(e);
FLIT_UNUSED(e);
info_stream << id << ": failure error occurred" << endl;
status = ExitStatus::FailureStatus;
}
catch (const SeriousError &e) {
Q_UNUSED(e);
FLIT_UNUSED(e);
info_stream << id << ": serious error occurred" << endl;
status = ExitStatus::SeriousStatus;
}
catch (const DefectError &e) {
Q_UNUSED(e);
FLIT_UNUSED(e);
info_stream << id << ": defect error occurred" << endl;
status = ExitStatus::DefectStatus;
}
catch (const FlawError &e) {
Q_UNUSED(e);
FLIT_UNUSED(e);
info_stream << id << ": flaw error occurred" << endl;
status = ExitStatus::FlawStatus;
}
catch (const OverflowError &e) {
Q_UNUSED(e);
FLIT_UNUSED(e);
info_stream << id << ": overflow error occurred" << endl;
status = ExitStatus::OverflowStatus;
}
Expand Down
6 changes: 3 additions & 3 deletions litmus-tests/tests/langois.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class langDotFMA: public flit::TestBase<T> {

virtual flit::ResultType::mapped_type
run_impl(const flit::TestInput<T>& ti) {
Q_UNUSED(ti);
FLIT_UNUSED(ti);
using stype = typename std::vector<T>::size_type;
stype size = 16;
auto rand = flit::getRandSeq<T>();
Expand Down Expand Up @@ -124,7 +124,7 @@ class langCompDotFMA: public flit::TestBase<T> {

virtual flit::ResultType::mapped_type
run_impl(const flit::TestInput<T>& ti) {
Q_UNUSED(ti);
FLIT_UNUSED(ti);
using stype = typename std::vector<T>::size_type;
stype size = 16;
auto rand = flit::getRandSeq<T>();
Expand Down Expand Up @@ -163,7 +163,7 @@ class langCompDot: public flit::TestBase<T> {

virtual flit::ResultType::mapped_type
run_impl(const flit::TestInput<T>& ti) {
Q_UNUSED(ti);
FLIT_UNUSED(ti);
using stype = typename std::vector<T>::size_type;
stype size = 16;
auto rand = flit::getRandSeq<T>();
Expand Down
4 changes: 2 additions & 2 deletions litmus-tests/tests/tinys.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ class dotProd: public flit::TestBase<T> {

virtual flit::ResultType::mapped_type
run_impl(const flit::TestInput<T>& ti) {
Q_UNUSED(ti);
FLIT_UNUSED(ti);
auto size = 16;

auto rand = flit::getRandSeq<T>();
Expand Down Expand Up @@ -159,7 +159,7 @@ class simpleReduction: public flit::TestBase<T> {

virtual flit::ResultType::mapped_type
run_impl(const flit::TestInput<T>& ti) {
Q_UNUSED(ti);
FLIT_UNUSED(ti);
auto vals = flit::getRandSeq<T>();
auto sublen = vals.size() / 4 - 1;
T sum = 0;
Expand Down
10 changes: 5 additions & 5 deletions src/TestBase.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ std::unique_ptr<T, CudaDeleter<T>*> makeCudaArr(const T* vals, size_t length) {

return ptr;
#else
Q_UNUSED(vals);
Q_UNUSED(length);
FLIT_UNUSED(vals);
FLIT_UNUSED(length);
throw std::runtime_error("Should not use makeCudaArr without CUDA enabled");
#endif
}
Expand Down Expand Up @@ -184,9 +184,9 @@ runKernel(KernelFunction<T>* kernel, const TestInput<T>& ti, size_t stride) {
return results;
#else // not __CUDA__
// Do nothing
Q_UNUSED(kernel);
Q_UNUSED(ti);
Q_UNUSED(stride);
FLIT_UNUSED(kernel);
FLIT_UNUSED(ti);
FLIT_UNUSED(stride);
return {};
#endif // __CUDA__
}
Expand Down
10 changes: 5 additions & 5 deletions src/flitHelpers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@

#include <cfloat>

#ifndef Q_UNUSED
#define Q_UNUSED(x) (void)x
#ifndef FLIT_UNUSED
#define FLIT_UNUSED(x) (void)x
#endif

// #ifdef __CUDA__
Expand All @@ -38,21 +38,21 @@ const int RAND_VECT_SIZE = 256;
inline
float
get_next_type(long double x){
Q_UNUSED(x);
FLIT_UNUSED(x);
return 0.0f;
}

inline
double
get_next_type(float x){
Q_UNUSED(x);
FLIT_UNUSED(x);
return 0.0;
}

inline
long double
get_next_type(double x){
Q_UNUSED(x);
FLIT_UNUSED(x);
return 0.0l;
}

Expand Down

0 comments on commit 9006f98

Please sign in to comment.