Skip to content

Commit

Permalink
Merge branch 'mpi' of gitlab.com:aduprop/aduprop into mpi
Browse files Browse the repository at this point in the history
  • Loading branch information
michel2323 committed Mar 19, 2018
2 parents 4d22319 + 8de004a commit 8b588c0
Show file tree
Hide file tree
Showing 3 changed files with 154 additions and 54 deletions.
48 changes: 30 additions & 18 deletions include/ad.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -309,22 +309,24 @@ void t3s_t2s_t1s_driver(const pVector<double> &xic,
}
for (size_t j = 0; j < dim; ++j) {
for (size_t k = 0; k < dim; ++k) {
for (size_t l = 0; l < dim; ++l) {
axic[l].value().value().value() = xic[l];
axic[l].gradient().value().value() = 0.0;
axic[l].value().gradient().gradient() = 0.0;
axic[l].gradient().gradient().gradient() = 0.0;
axic[l].value().value().gradient() = 0.0;
axic[l].gradient().value().gradient() = 0.0;
axic[l].value().gradient().value() = 0.0;
axic[l].gradient().gradient().value() = 0.0;
}
axic[i].value().value().gradient() = 1.0;
axic[j].value().gradient().value() = 1.0;
axic[k].gradient().value().value() = 1.0;
integrate<t3s>(axic);
for (size_t l = 0; l < dim; ++l) {
T[l][k][j][i-start] = axic[l].gradient().gradient().gradient();
if(H[k][j][i] != 0.0) {
for (size_t l = 0; l < dim; ++l) {
axic[l].value().value().value() = xic[l];
axic[l].gradient().value().value() = 0.0;
axic[l].value().gradient().gradient() = 0.0;
axic[l].gradient().gradient().gradient() = 0.0;
axic[l].value().value().gradient() = 0.0;
axic[l].gradient().value().gradient() = 0.0;
axic[l].value().gradient().value() = 0.0;
axic[l].gradient().gradient().value() = 0.0;
}
axic[i].value().value().gradient() = 1.0;
axic[j].value().gradient().value() = 1.0;
axic[k].gradient().value().value() = 1.0;
integrate<t3s>(axic);
for (size_t l = 0; l < dim; ++l) {
T[l][k][j][i-start] = axic[l].gradient().gradient().gradient();
}
}
}
for (size_t k = 0; k < dim; ++k) {
Expand Down Expand Up @@ -767,6 +769,7 @@ void propagateAD(pVector<double>& m0, pMatrix<double>& cv0, System& sys,
cv_temp.zeros();

pMatrix<double> cv_temp2(dim, dim);
pTensor3<double> H_tmp(H.get_d1(), H.get_d1(), H.get_d3());
cv_temp2.zeros();

switch(degree) {
Expand All @@ -782,15 +785,24 @@ void propagateAD(pVector<double>& m0, pMatrix<double>& cv0, System& sys,
exit(-1);
}

double cutrate = 1.0 - 1.0/(double) dim;
// double cutrate = 0.4;
// Obtain tensors
if(paduprop_getrank() == 0) {
std::cout << "Obtaining tensors" << std::endl;
}
switch(degree) {
case 3:
// drivers.t3s_t2s_t1s_driver(m0, J, H, T);
drivers.t3s_t2s_t1s_driver(m0, J, H, T, start, end);
drivers.t2s_t1s_driver(m0, J, H);
// H.cutoff(0.7);
std::cout << "H nz: " << H.nz() << std::endl;
H_tmp=H;
std::cout << "Cutrate: " << cutrate << std::endl;
H_tmp.cutoff(cutrate);
std::cout << "H_tmp nz: " << H_tmp.nz() << std::endl;
drivers.t3s_t2s_t1s_driver(m0, J, H_tmp, T, start, end);
// T.zeros();
break;
case 2:
drivers.t2s_t1s_driver(m0, J, H);
Expand Down Expand Up @@ -929,7 +941,7 @@ void propagateAD(pVector<double>& m0, pMatrix<double>& cv0, System& sys,
global_prof.end("reduction");

cv0 = cv_temp + cv_temp2;
//cv0.cutoff(0.90);
cv0.cutoff(cutrate);
global_prof.end("propagateAD");
}

Expand Down
39 changes: 17 additions & 22 deletions include/alg.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,45 +206,44 @@ template <typename T> class pMatrix {
}

T norm() {
size_t n = rows*cols;
T res = 0;
for (size_t i = 0 ; i < rows; ++i) {
for (size_t j = 0 ; j < cols; ++j) {
res+=(data[i*cols + j])*(data[i*cols + j]);
}
for (size_t i = 0 ; i < n; ++i) {
res+=data[i]*data[i];
}
return sqrt(res);
}

T maxnorm() {
size_t n = rows*cols;
T res = 0;
for (size_t i = 0 ; i < rows; ++i) {
for (size_t j = 0 ; j < cols; ++j) {
if(fabs(data[i*cols + j]) > res) {
res = fabs(data[i*cols + j]);
}
for (size_t i = 0 ; i < n; ++i) {
if(fabs(data[i]) > res) {
res = fabs(data[i]);
}
}
return res;
}

size_t nz() {
size_t n = rows*cols;
size_t res = 0;
for (size_t i = 0 ; i < rows; ++i) {
for (size_t j = 0 ; j < cols; ++j) {
if(data[i*cols + j] != 0.0) res++;
}
for (size_t i = 0 ; i < n; ++i) {
if(data[i] != 0.0) res++;
}
return res;
}

size_t thres(T in) {
size_t n = rows*cols;
size_t res = 0;
for (size_t i = 0 ; i < rows; ++i) {
for (size_t j = 0 ; j < cols; ++j) {
if(fabs(data[i*cols + j]) > in) res++;
}
for (size_t i = 0 ; i < n; ++i) {
if(fabs(data[i]) > in) res++;
}
return res;
}

size_t cutoff(double rate) {
pMatrix<T> tmp(rows, cols);
size_t n = rows*cols;

std::vector<T> vect(n);
Expand All @@ -254,11 +253,8 @@ template <typename T> class pMatrix {

double del = rate * (double) (n-1);
size_t el = (size_t) del;
std::cout << "del: " << del << std::endl;
std::cout << "el: " << el << std::endl;

T thres = vect[el];
std::cout << "vect[el]: " << vect[el] << std::endl;
size_t count = 0;

for (size_t i = 0 ; i < n; ++i) {
Expand All @@ -267,7 +263,6 @@ template <typename T> class pMatrix {
count++;
}
}
std::cout << "count: " << count << std::endl;
return count;
}

Expand Down
121 changes: 107 additions & 14 deletions include/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,17 +97,65 @@ template <typename T> class pTensor3 {
}

T norm() {
size_t n = d1*d2*d3;
T res = 0;
for(size_t i = 0 ; i < d1 ; ++i) {
for(size_t j = 0 ; j < d2 ; ++j) {
for(size_t k = 0 ; k < d3 ; ++k) {
res+=(data[i*d2*d3 + j*d3 + k] * data[i*d2*d3 + j*d3 + k]);
}
}
for (size_t i = 0 ; i < n; ++i) {
res+=data[i]*data[i];
}
return sqrt(res);
}

T maxnorm() {
size_t n = d1*d2*d3;
T res = 0;
for (size_t i = 0 ; i < n; ++i) {
if(fabs(data[i]) > res) {
res = fabs(data[i]);
}
}
return res;
}

size_t nz() {
size_t n = d1*d2*d3;
size_t res = 0;
for (size_t i = 0 ; i < n; ++i) {
if(data[i] != 0.0) res++;
}
return res;
}

size_t thres(T in) {
size_t n = d1*d2*d3;
size_t res = 0;
for (size_t i = 0 ; i < n; ++i) {
if(fabs(data[i]) > in) res++;
}
return res;
}

size_t cutoff(double rate) {
size_t n = d1*d2*d3;

std::vector<T> vect(n);
for(size_t i = 0; i < n; ++i) vect[i] = fabs(data[i]);

std::sort(vect.begin(), vect.end());

double del = rate * (double) (n-1);
size_t el = (size_t) del;

T thres = vect[el];
size_t count = 0;

for (size_t i = 0 ; i < n; ++i) {
if(fabs(data[i]) < thres) {
data[i] = 0;
count++;
}
}
return count;
}

void zeros() {
for (size_t i = 0; i < d3*d2*d1; ++i) {
Expand Down Expand Up @@ -265,20 +313,65 @@ template <typename T> class pTensor4 {
}

T norm() {
size_t n = d1*d2*d3*d4;
T res = 0;
for(size_t i = 0 ; i < d1 ; ++i) {
for(size_t j = 0 ; j < d2 ; ++j) {
for(size_t k = 0 ; k < d3 ; ++k) {
for(size_t l = 0 ; l < d4 ; ++l) {
res+=(data[i*d2*d3*d4 + j*d3*d4 + k*d4 + l] * data[i*d2*d3*d4 + j*d3*d4 + k*d4 + l]);
}
}
}
for (size_t i = 0 ; i < n; ++i) {
res+=data[i]*data[i];
}
return sqrt(res);
}

T maxnorm() {
size_t n = d1*d2*d3*d4;
T res = 0;
for (size_t i = 0 ; i < n; ++i) {
if(fabs(data[i]) > res) {
res = fabs(data[i]);
}
}
return res;
}

size_t nz() {
size_t n = d1*d2*d3*d4;
size_t res = 0;
for (size_t i = 0 ; i < n; ++i) {
if(data[i] != 0.0) res++;
}
return res;
}

size_t thres(T in) {
size_t n = d1*d2*d3*d4;
size_t res = 0;
for (size_t i = 0 ; i < n; ++i) {
if(fabs(data[i]) > in) res++;
}
return res;
}

size_t cutoff(double rate) {
size_t n = d1*d2*d3*d4;

std::vector<T> vect(n);
for(size_t i = 0; i < n; ++i) vect[i] = fabs(data[i]);

std::sort(vect.begin(), vect.end());

double del = rate * (double) (n-1);
size_t el = (size_t) del;

T thres = vect[el];
size_t count = 0;

for (size_t i = 0 ; i < n; ++i) {
if(fabs(data[i]) < thres) {
data[i] = 0;
count++;
}
}
return count;
}
void zeros() {
for (size_t i = 0; i < d4*d3*d2*d1; ++i) {
data[i] = 0.0;
Expand Down

0 comments on commit 8b588c0

Please sign in to comment.