-
Notifications
You must be signed in to change notification settings - Fork 12
/
distribute.h
67 lines (56 loc) · 1.68 KB
/
distribute.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
/*
Developed by Sandeep Sharma and Garnet K.-L. Chan, 2012
Copyright (c) 2012, Garnet K.-L. Chan
This program is integrated in Molpro with the permission of
Sandeep Sharma and Garnet K.-L. Chan
*/
#ifndef SPIN_DISTRIBUTE_HEADER_H
#define SPIN_DISTRIBUTE_HEADER_H
#include <iostream>
#include <communicate.h>
#include "timer.h"
#include "pario.h"
#ifndef SERIAL
#include <boost/mpi/communicator.hpp>
#include <boost/mpi.hpp>
#endif
#include <vector>
class DiagonalMatrix;
namespace SpinAdapted
{
class StackSparseMatrix;
void SplitStackmem();
void MergeStackmem();
template<class T> void initiateMultiThread(T* op, T* &op_array, int MAX_THRD)
{
if (MAX_THRD == 1) {
op_array = op;
}
else if (MAX_THRD > 1) {
op_array = new T[MAX_THRD];
op_array[0] = *op;
for (int i=1; i<MAX_THRD; i++)
op_array[i].deepClearCopy(*op);
}
}
template <class T> void accumulateMultiThread(T* op, T* &op_array, int MAX_THRD)
{
if ( MAX_THRD == 1)
return;
else { //only multithreaded
for (int i=MAX_THRD-1; i>0; i--) {
ScaleAdd(1.0, op_array[i], op_array[0]);
op_array[i].deallocate();
}
delete [] op_array;
}
}
#ifndef SERIAL
void distributedaccumulate(DiagonalMatrix& component);
void distributedaccumulate(SpinAdapted::StackSparseMatrix& component);
#else
void distributedaccumulate(DiagonalMatrix& component) ;
void distributedaccumulate(SpinAdapted::StackSparseMatrix& component);
#endif
}
#endif