Skip to content

Asynchronous combi #101

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 22 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
fd56602
Added Combine Async
May 2, 2018
addbdda
Merge remote-tracking branch 'origin/combi_gene_faults' into asynchro…
May 2, 2018
d414343
This is the async changes that were made.
Jun 11, 2018
37395c3
added a new example file
Jun 11, 2018
6f16d65
Merge remote-tracking branch 'origin/combi_gene_faults' into asynchro…
Jun 11, 2018
7b9f4b7
Added test all ainstead of exlplicitly changing the code
Jun 21, 2018
b9888a8
Added CombiAsyncOddEven scheme with only one deheirarchization.
Aug 6, 2018
b0cce06
Merge remote-tracking branch 'origin/master' into asynchronousCombi
obersteiner Oct 17, 2019
a88638d
fixed compiler issues + tests are now running
obersteiner Oct 18, 2019
60e4baa
Merge remote-tracking branch 'origin/master' into asynchronousCombi
obersteiner Apr 3, 2020
a0bcf8f
fixed issues with compilation; still need to check why TIMING is set
obersteiner Apr 3, 2020
181b3a9
Merge remote-tracking branch 'origin/master' into asynchronousCombi
obersteiner Apr 6, 2020
d9f49d1
added Makefile in Async example to adjust_examples + added template
obersteiner Apr 7, 2020
c06914a
fixed example: made destructor public + removed warnings + added asse…
obersteiner Apr 7, 2020
e1c0d35
generalized run.sh
obersteiner Apr 7, 2020
6675303
added valid configuration for ctparam
obersteiner Apr 7, 2020
33c91f7
added comments to ctparam
obersteiner Apr 7, 2020
56df7f8
fixed example for new combi framework version
obersteiner Apr 7, 2020
01ae059
removed unnecessary dehierarchization by saving the intermediate hier…
obersteiner Nov 30, 2020
1bbee65
simplified code
obersteiner Dec 22, 2020
3c6a569
fix if subspaces change + final combi + fix for oddeven
obersteiner Dec 22, 2020
f31416c
changed to nodal save of old values + simplified code -> works now wi…
obersteiner Dec 22, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion adjust_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

dir_path = os.path.dirname(os.path.realpath(__file__))
GLPK_DIR= str(dir_path) + "/glpk"
examples = ["combi_example", "combi_example_faults", "gene_distributed", "gene_distributed_linear"]
examples = ["combi_example", "combi_example_faults", "combiAsync_example", "gene_distributed", "gene_distributed_linear"]
for example in examples:
pfilein = open(str(dir_path)+ "/distributedcombigrid/examples/" + example + "/Makefile.template" ,'r')
temp = pfilein.read()
Expand Down
23 changes: 23 additions & 0 deletions distributedcombigrid/examples/combiAsync_example/Makefile.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
CC=$(CC)
CFLAGS=-std=c++11 -g -fopenmp -Wno-deprecated-declarations -Wno-unused-local-typedefs -Wno-deprecated -Wno-uninitialized -Wall -DUNIFORMDECOMPOSITION

SGPP_DIR=$(SGPP)
GLPK_DIR=$(GLPK)

LD_SGPP=-L$(SGPP_DIR)/lib/sgpp
INC_GLPK=-I$(GLPK_DIR)/include
LD_GLPK=-L$(GLPK_DIR)/lib
INC_SGPP=-I$(SGPP_DIR)/distributedcombigrid/src/

LDIR=$(LD_SGPP) $(LD_GLPK)
INC=$(INC_SGPP) $(INC_GLPK)

LIBS=-lsgppdistributedcombigrid -lboost_serialization

all: combi_example

combi_example: combi_example.cpp TaskExample.hpp
$(CC) $(CFLAGS) $(LDIR) $(INC) -o combi_example combi_example.cpp $(LIBS)

clean:
rm -f *.o out/* combi_example
244 changes: 244 additions & 0 deletions distributedcombigrid/examples/combiAsync_example/TaskExample.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,244 @@
/*
* TaskExample.hpp
*
* Created on: Sep 25, 2015
* Author: heenemo
*/

#ifndef TASKEXAMPLE_HPP_
#define TASKEXAMPLE_HPP_

#include "sgpp/distributedcombigrid/fullgrid/DistributedFullGrid.hpp"
#include "sgpp/distributedcombigrid/task/Task.hpp"

namespace combigrid {

class TaskExample: public Task {

public:
/* if the constructor of the base task class is not sufficient we can provide an
* own implementation. here, we add dt, nsteps, and p as a new parameters.
*/
TaskExample(DimType dim, LevelVector& l, std::vector<bool>& boundary,
real coeff, LoadModel* loadModel, real dt,
size_t nsteps, IndexVector p = IndexVector(0),FaultCriterion *faultCrit = (new StaticFaults({0,IndexVector(0),IndexVector(0)})) ) :
Task(dim, l, boundary, coeff, loadModel, faultCrit), dt_(dt), nsteps_(
nsteps), p_(p), initialized_(false), stepsTotal_(0), dfg_(NULL) {
}

void init(CommunicatorType lcomm, std::vector<IndexVector> decomposition = std::vector<IndexVector>()){
assert(!initialized_);
assert(dfg_ == NULL);

int lrank;
MPI_Comm_rank(lcomm, &lrank);

/* create distributed full grid. we try to find a balanced ratio between
* the number of grid points and the number of processes per dimension
* by this very simple algorithm. to keep things simple we require powers
* of two for the number of processes here. */
int np;
MPI_Comm_size(lcomm, &np);

// check if power of two
if (!((np > 0) && ((np & (~np + 1)) == np)))
assert(false && "number of processes not power of two");

DimType dim = this->getDim();
IndexVector p(dim, 1);
const LevelVector& l = this->getLevelVector();

if (p_.size() == 0) {
// compute domain decomposition
IndexType prod_p(1);

while (prod_p != static_cast<IndexType>(np)) {
DimType dimMaxRatio = 0;
real maxRatio = 0.0;

for (DimType k = 0; k < dim; ++k) {
real ratio = std::pow(2.0, l[k]) / p[k];

if (ratio > maxRatio) {
maxRatio = ratio;
dimMaxRatio = k;
}
}

p[dimMaxRatio] *= 2;
prod_p = 1;

for (DimType k = 0; k < dim; ++k)
prod_p *= p[k];
}
} else {
p = p_;
}

if (lrank == 0) {
std::cout << "init task " << this->getID() << " with l = "
<< this->getLevelVector() << " and p = " << p << std::endl;
}

// create local subgrid on each process
dfg_ = new DistributedFullGrid<CombiDataType>(dim, l, lcomm,
this->getBoundary(), p);

/* loop over local subgrid and set initial values */
std::vector<CombiDataType>& elements = dfg_->getElementVector();

phi_.resize(dfg_->getNrElements());
// we are only allowed to have 1 process per group in this example!
assert(elements.size() == dfg_->getNrElements());

for (IndexType li = 0; li < dfg_->getNrElements(); ++li) {
std::vector<double> coords(this->getDim());
dfg_->getCoordsGlobal(li, coords);

double exponent = 0;
for (DimType d = 0; d < this->getDim(); ++d) {
exponent -= std::pow(coords[d] - 0.5, 2);
}
dfg_->getData()[li] = std::exp(exponent*100.0) * 2;
}

initialized_ = true;
}


/* this is were the application code kicks in and all the magic happens.
* do whatever you have to do, but make sure that your application uses
* only lcomm or a subset of it as communicator.
* important: don't forget to set the isFinished flag at the end of the computation.
*/
void run(CommunicatorType lcomm) {
assert(initialized_);

int lrank;
MPI_Comm_rank(lcomm, &lrank);

/* pseudo timestepping to demonstrate the behaviour of your typical
* time-dependent simulation problem. */
std::vector<CombiDataType> u(this->getDim(), 1);

// gradient of phi
std::vector<CombiDataType> dphi(this->getDim());

std::vector<IndexType> l(this->getDim());
std::vector<double> h(this->getDim());

for (unsigned int i = 0; i < this->getDim(); i++){
l[i] = dfg_->length(i);
h[i] = 1.0 / (double)l[i];
}

for (size_t i = 0; i < nsteps_; ++i) {
phi_.swap(dfg_->getElementVector());

for (IndexType li = 0; li < dfg_->getNrElements(); ++li) {
IndexVector ai(this->getDim());
dfg_->getGlobalVectorIndex(li, ai);

//neighbour
std::vector<IndexVector> ni(this->getDim(), ai);
std::vector<IndexType> lni(this->getDim());

CombiDataType u_dot_dphi = 0;

for(unsigned int j = 0; j < this->getDim(); j++){
ni[j][j] = (l[j] + ni[j][j] - 1) % l[j];
lni[j] = dfg_->getGlobalLinearIndex(ni[j]);
}

for(unsigned int j = 0; j < this->getDim(); j++){
//calculate gradient of phi with backward differential quotient
dphi[j] = (phi_[li] - phi_[lni[j]]) / h[j];

u_dot_dphi += u[j] * dphi[j];
}

dfg_->getData()[li] = phi_[li] - u_dot_dphi * dt_;
}

MPI_Barrier(lcomm);
}

stepsTotal_ += nsteps_;

this->setFinished(true);
}

/* this function evaluates the combination solution on a given full grid.
* here, a full grid representation of your task's solution has to be created
* on the process of lcomm with the rank r.
* typically this would require gathering your (in whatever way) distributed
* solution on one process and then converting it to the full grid representation.
* the DistributedFullGrid class offers a convenient function to do this.
*/
void getFullGrid(FullGrid<CombiDataType>& fg, RankType r,
CommunicatorType lcomm, int n = 0) {
assert(fg.getLevels() == dfg_->getLevels());

dfg_->gatherFullGrid(fg, r);
}

DistributedFullGrid<CombiDataType>& getDistributedFullGrid(int n = 0) {
return *dfg_;
}


void setZero(){

}
~TaskExample() {
if (dfg_ != NULL)
delete dfg_;
}

protected:
/* if there are local variables that have to be initialized at construction
* you have to do it here. the worker processes will create the task using
* this constructor before overwriting the variables that are set by the
* manager. here we need to set the initialized variable to make sure it is
* set to false. */
TaskExample() :
initialized_(false), stepsTotal_(1), dfg_(NULL) {
}

private:
friend class boost::serialization::access;

// new variables that are set by manager. need to be added to serialize
real dt_;
size_t nsteps_;
IndexVector p_;

// pure local variables that exist only on the worker processes
bool initialized_;
size_t stepsTotal_;
DistributedFullGrid<CombiDataType>* dfg_;
std::vector<CombiDataType> phi_;

/**
* The serialize function has to be extended by the new member variables.
* However this concerns only member variables that need to be exchanged
* between manager and workers. We do not need to add "local" member variables
* that are only needed on either manager or worker processes.
* For serialization of the parent class members, the class must be
* registered with the BOOST_CLASS_EXPORT macro.
*/
template<class Archive>
void serialize(Archive& ar, const unsigned int version) {
// handles serialization of base class
ar& boost::serialization::base_object<Task>(*this);

// add our new variables
ar& dt_;
ar& nsteps_;
ar& p_;
}
};

} // namespace combigrid

#endif /* TASKEXAMPLE_HPP_ */
Loading