SMG2S
Sparse Matrix Generator with Given Spectrum
parVector.hpp
1 /*
2 MIT License
3 Copyright (c) 2019 Xinzhe WU @ Maison de la Simulation, France
4 Copyright (c) 2019-2022, Xinzhe Wu @ Simulation and Data Laboratory Quantum
5  Materials, Forschungszentrum Juelich GmbH.
6 
7 Permission is hereby granted, free of charge, to any person obtaining a copy
8 of this software and associated documentation files (the "Software"), to deal
9 in the Software without restriction, including without limitation the rights
10 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 copies of the Software, and to permit persons to whom the Software is
12 furnished to do so, subject to the following conditions:
13 The above copyright notice and this permission notice shall be included in all
14 copies or substantial portions of the Software.
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 SOFTWARE.
22 */
23 
24 #ifndef __PAR_VECTOR_H__
25 #define __PAR_VECTOR_H__
26 
27 #include <parVector/parVectorMap.hpp>
28 #include <utils/utils.hpp>
29 #include <utils/MPI_DataType.hpp>
30 
32 
40 template<typename T, typename S>
41 class parVector{
42  private:
44  T *array;
50  MPI_Comm comm;
54  int MyPID;
56  int nProcs;
57 
58  public:
59  parVector();
61 
68  parVector(MPI_Comm ncomm, S lbound, S ubound);
70 
77  ~parVector();
78 
81 
83  S GetLowerBound(){return index_map.GetLowerBound();};
85  S GetUpperBound(){return index_map.GetUpperBound();};
87  S GetGlobalSize(){return global_size;};
89  S GetLocalSize(){return local_size;};
91  S GetRank(){return index_map.GetRank();};
92 
94 
99  T GetUpperNeighbor(S offset);
101 
106  T GetLowerNeighbor(S offset);
108 
113  T GetValue(S index)
114  {
115  auto lindex = index_map.Glob2Loc(index);
116  return array[lindex];
117  };
119 
122  T GetValueLocal(S lindex)
123  {
124  return array[lindex];
125  };
126 
128  T* GetArray(){return array;};
130  MPI_Comm GetComm(){return comm;};
132 
137  S Loc2Glob(S local_index){return index_map.Loc2Glob(local_index);};
139 
144  S Glob2Loc(S global_index){return index_map.Glob2Loc(global_index);};
145 
147 
150  void SetToValue(T value);
152  void SetToZero();
154 
158  void SetValueLocal(S row, T value);
160 
165  void SetValuesLocal(S nindex, S *rows, T *values);
167 
171  void SetValueGlobal(S index, T value);
173 
178  void SetValuesGlobal(S nindex, S *rows, T *values);
180 
184  void AddValueLocal(S row, T value);
186 
191  void AddValuesLocal(S nindex, S *rows, T *values);
193 
196  void VecAdd(parVector v);
198 
201  void VecScale(T scale);
203 
208  T VecDot(parVector v);
210 
213  void ReadExtVec(std::string spectrum);
215 
218  void writeToTxt(std::string file_name);
220 
223  void writeToTxtCmplx(std::string file_name);
224 
226  void VecView();
227 };
228 
229 template<typename T,typename S>
231 
232 
233 template<typename T,typename S>
234 parVector<T,S>::parVector(MPI_Comm ncomm, S lbound, S ubound)
235 {
236  MPI_Comm_dup(ncomm, &comm);
237  index_map = parVectorMap<S>(ncomm, lbound, ubound);
238  local_size = index_map.GetLocalSize();
239  global_size = index_map.GetGlobalSize();
240  array = new T[local_size];
241  MPI_Comm_rank(ncomm, &MyPID);
242  MPI_Comm_size(ncomm, &nProcs);
243 }
244 
245 template<typename T,typename S>
247 {
248  index_map = map;
249  comm = index_map.GetCurrentComm();
250  local_size = index_map.GetLocalSize();
251  global_size = index_map.GetGlobalSize();
252  array = new T[local_size];
253  MPI_Comm_rank(comm, &MyPID);
254  MPI_Comm_size(comm, &nProcs);
255 }
256 
257 template<typename T,typename S>
259 
260 
261 template<typename T, typename S>
263 {
264  for(S i= 0; i < local_size; i++) {
265  array[i] = value;
266  }
267 }
268 
269 template<typename T, typename S>
271 {
272  T val = 0;
273  SetToValue(val);
274 }
275 
276 template<typename T, typename S>
278 {
279  int r;
280  r = index_map.GetRank();
281  if (r == 0){
282  std::cout << "Parallel Vector View: " << std::endl << std::endl;
283  }
284  T *array = GetArray();
285  S global;
286  for(S i = 0; i < local_size; i++){
287  global = Loc2Glob(i);
288  std::cout << "[" << global << "]: " << array[i] << std::endl;
289  }
290 }
291 
292 template<typename T, typename S>
293 void parVector<T,S>::SetValueLocal(S row, T value)
294 {
295  if (row < local_size){
296  array[row] = value;
297  }
298 }
299 
300 template<typename T, typename S>
301 void parVector<T,S>::SetValuesLocal(S nindex, S *rows, T *values)
302 {
303  for(S i = 0; i < nindex; i++){
304  SetValueLocal(rows[i],values[i]);
305  }
306 }
307 
308 template<typename T, typename S>
309 void parVector<T,S>::SetValueGlobal(S index, T value)
310 {
311  int lower_bound = GetLowerBound();
312  int upper_bound = GetUpperBound();
313  if((index >= lower_bound) && (index < upper_bound)){
314  SetValueLocal(index_map.Glob2Loc(index), value);
315  }
316 }
317 
318 template<typename T, typename S>
319 void parVector<T,S>::SetValuesGlobal(S nindex, S *rows, T *values)
320 {
321  for(S i = 0; i < nindex; i++){
322  SetValueLocal(Glob2Loc(rows[i]),values[i]);
323  }
324 }
325 
326 template<typename T, typename S>
327 void parVector<T,S>::AddValueLocal(S row, T value)
328 {
329  if (row < local_size){
330  array[row] = array[row] + value;
331  }
332 }
333 
334 template<typename T, typename S>
335 void parVector<T,S>::AddValuesLocal(S nindex, S *rows, T *values)
336 {
337  for(S i = 0; i < nindex; i++){
338  AddValueLocal(rows[i],values[i]);
339  }
340 }
341 
342 template<typename T, typename S>
344 {
345  if(local_size != v.GetLocalSize()){
346  std::cout << "vector size not coherant" << std::endl;
347  }
348  else{
349  for(S i = 0; i < local_size; i++){
350  array[i] = array[i] + v.array[i];
351  }
352  }
353 }
354 
355 template<typename T, typename S>
357 {
358  for(S i = 0; i < local_size; i++){
359  array[i] = scale*array[i];
360  }
361 }
362 
363 template<typename T, typename S>
365 {
366  T sum;
367  for(S i = 0; i < local_size; i++){
368  sum += array[i]*v.array[i];
369  }
370  MPI_Allreduce(MPI_IN_PLACE, &sum, 1, getMPI_Type<T>(), MPI_SUM, comm);
371  return sum;
372 }
373 
374 
375 template<typename T, typename S>
376 void parVector<T, S>::ReadExtVec(std::string spectrum)
377 {
378  std::ifstream file(spectrum);
379  std::string line;
380 
381  int lower_bound = GetLowerBound();
382  int upper_bound = GetUpperBound();
383 
384  int size1;
385  size1 = sizeof(T) / sizeof(Base<T>);
386  S idx;
387  Base<T> in_vals[size1];
388  T val;
389 
390  while (std::getline(file,line)) {
391  //std::cout << line << std::endl;
392  idx = 0;
393  for(int i = 0; i < size1; i++){
394  in_vals[i] = 0.0;
395  }
396 
397  std::stringstream linestream ( line ) ;
398  linestream >> idx;
399 
400  for(int i = 0; i < size1; i++){
401  linestream >> in_vals[i];
402  }
403  idx = idx - 1;
404 
405  for(int i = 0; i < size1; i++){
406  reinterpret_cast<Base<T>(&)[size1]>(val)[i] = in_vals[i];
407  }
408 
409  if((idx >= lower_bound) && (idx < upper_bound)){
410  SetValueLocal(index_map.Glob2Loc(idx),val);
411  }
412  }
413 }
414 
415 template<typename T, typename S>
416 void parVector<T, S>::writeToTxt(std::string file_name)
417 {
418  std::string header;
419  std::string data;
420 
421  if( (sizeof(T)/sizeof(Base<T>) != 1) ){
422  try{
423  throw 505;
424  }catch(...){
425  std::cout << "SMG2S]> Caught Exception: for complex vector, please use writeToTxtCmplx" << std::endl;
426  }
427  }
428 
429  //generate header
430  header.append("%%SMG2S vector in real scalar\n");
431  std::string dim_info = std::to_string(global_size) + " " + std::to_string(global_size) + "\n";
432  header.append(dim_info);
433 
434  for(auto i = 0; i < local_size; i++){
435  data += std::to_string(index_map.Loc2Glob(i)+1) + " " + std::to_string(array[i]) + "\n";
436  }
437 
438  MPI_File fh;
439  MPI_Offset write_offset;
440  MPI_Offset text_size;
441  MPI_Offset *write_size_per_proc;
442  MPI_Status sts;
443 
444  write_size_per_proc = (MPI_Offset *)malloc(sizeof(MPI_Offset) * nProcs);
445 
446  text_size = data.size();
447 
448  MPI_Allgather(&text_size, 1, MPI_OFFSET, write_size_per_proc, 1, MPI_OFFSET, comm);
449 
450  write_offset = header.size();
451 
452  for (auto i = 0; i < MyPID; ++i) {
453  write_offset += write_size_per_proc[i];
454  }
455 
456 
457  MPI_File_delete (file_name.c_str(), MPI_INFO_NULL);
458  MPI_File_open(comm, file_name.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
459 
460 
461  if (MyPID == 0) {
462  MPI_File_write_at(fh, 0, header.c_str(), header.size(), MPI_CHAR, &sts);
463  }
464 
465  MPI_File_write_at(fh, write_offset, data.c_str(), data.size(), MPI_CHAR, &sts);
466 
467  MPI_File_close(&fh);
468  free(write_size_per_proc);
469 }
470 
471 template<typename T, typename S>
472 void parVector<T, S>::writeToTxtCmplx(std::string file_name){
473  std::string header;
474  std::string data;
475 
476  if( (sizeof(T)/sizeof(Base<T>) != 2) ){
477  try{
478  throw 505;
479  }catch(...){
480  std::cout << "SMG2S]> Caught Exception: for real vector, please use writeToTxt" << std::endl;
481  }
482  }
483 
484  //generate header
485  header.append("%%SMG2S vector in complex scalar\n");
486  std::string dim_info = std::to_string(global_size) + " " + std::to_string(global_size) + " " + std::to_string(global_size) + "\n";
487  header.append(dim_info);
488 
489  for(auto i = 0; i < local_size; i++){
490  data += std::to_string(index_map.Loc2Glob(i)+1) + " " + std::to_string(array[i].real()) + + " " + std::to_string(array[i].imag()) + "\n";
491  }
492 
493  MPI_File fh;
494  MPI_Offset write_offset;
495  MPI_Offset text_size;
496  MPI_Offset *write_size_per_proc;
497  MPI_Status sts;
498 
499  write_size_per_proc = (MPI_Offset *)malloc(sizeof(MPI_Offset) * nProcs);
500 
501  text_size = data.size();
502 
503  MPI_Allgather(&text_size, 1, MPI_OFFSET, write_size_per_proc, 1, MPI_OFFSET, comm);
504 
505  write_offset = header.size();
506 
507  for (auto i = 0; i < MyPID; ++i) {
508  write_offset += write_size_per_proc[i];
509  }
510 
511 
512  MPI_File_delete (file_name.c_str(), MPI_INFO_NULL);
513  MPI_File_open(comm, file_name.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
514 
515 
516  if (MyPID == 0) {
517  MPI_File_write_at(fh, 0, header.c_str(), header.size(), MPI_CHAR, &sts);
518  }
519 
520  MPI_File_write_at(fh, write_offset, data.c_str(), data.size(), MPI_CHAR, &sts);
521 
522  MPI_File_close(&fh);
523  free(write_size_per_proc);
524 }
525 
526 
527 template<typename T, typename S>
529  MPI_Request rtypereq, stypereq;
530  MPI_Status typestat;
531  int up, down;
532 
533  if(MyPID != 0){
534  up = MyPID - 1;
535  }else{
536  up = nProcs - 1;
537  }
538 
539  if(MyPID != nProcs - 1){
540  down = MyPID + 1;
541  }else{
542  down = 0;
543  }
544 
545  T neighbor;
546 
547  try{
548  if(offset < 1 || offset > local_size){
549  throw 505;
550  }
551  }catch(...){
552  std::cout << "SMG2S]> Caught Exception: input parameter is invalid for GetUpperNeighbor" << std::endl;
553  }
554 
555  MPI_Isend(&array[local_size-offset], 1, getMPI_Type<T>(), down, 1, comm, &stypereq);
556  MPI_Irecv(&neighbor, 1, getMPI_Type<T>(), up, 1, comm, &rtypereq);
557 
558  MPI_Wait(&rtypereq,&typestat);
559 
560  return neighbor;
561 
562 }
563 
564 template<typename T, typename S>
566  MPI_Request rtypereq, stypereq;
567  MPI_Status typestat;
568  int up, down;
569 
570  if(MyPID != 0){
571  up = MyPID - 1;
572  }else{
573  up = nProcs - 1;
574  }
575 
576  if(MyPID != nProcs - 1){
577  down = MyPID + 1;
578  }else{
579  down = 0;
580  }
581 
582  T neighbor;
583 
584  try{
585  if(offset < 1 || offset > local_size){
586  throw 505;
587  }
588  }catch(...){
589  std::cout << "SMG2S]> Caught Exception: input parameter is invalid for GetLowerNeighbor" << std::endl;
590  }
591 
592  MPI_Isend(&array[offset-1], 1, getMPI_Type<T>(), up, 1, comm, &stypereq);
593  MPI_Irecv(&neighbor, 1, getMPI_Type<T>(), down, 1, comm, &rtypereq);
594 
595  MPI_Wait(&rtypereq,&typestat);
596 
597  return neighbor;
598 
599 }
600 #endif
parVector::SetValueLocal
void SetValueLocal(S row, T value)
Set a value on a local index of piece of distributed vector on each MPI proc.
Definition: parVector.hpp:293
parVectorMap
A class which determines the way to distribute a vector across MPI procs.
Definition: parVectorMap.hpp:40
parVector::writeToTxtCmplx
void writeToTxtCmplx(std::string file_name)
Write a parVector in complex scalar to a local file.
Definition: parVector.hpp:472
parVector::nProcs
int nProcs
number of MPI procs within the working MPI communicator parVectorMap::comm
Definition: parVector.hpp:56
parVector::GetRank
S GetRank()
Return parVector<S>::MyPID.
Definition: parVector.hpp:91
parVector::array
T * array
An array store the local piece of a global vector on each MPI proc.
Definition: parVector.hpp:44
parVectorMap::GetCurrentComm
MPI_Comm GetCurrentComm()
Return parVectorMap<S>::comm.
Definition: parVectorMap.hpp:122
parVector::GetValue
T GetValue(S index)
Get a value of vector with a given global index.
Definition: parVector.hpp:113
parVector::GetUpperBound
S GetUpperBound()
Return the upper_bound on each MPI proc.
Definition: parVector.hpp:85
parVector::MyPID
int MyPID
rank of each MPI procs within the working MPI communicator parVectorMap::comm
Definition: parVector.hpp:54
parVector::~parVector
~parVector()
A destructor.
Definition: parVector.hpp:258
parVector::SetValuesGlobal
void SetValuesGlobal(S nindex, S *rows, T *values)
Set multiple values with multiple global indices of distributed vector.
Definition: parVector.hpp:319
parVector::ReadExtVec
void ReadExtVec(std::string spectrum)
Read a vector from local text file.
Definition: parVector.hpp:376
parVector::GetUpperNeighbor
T GetUpperNeighbor(S offset)
For each MPI of rank `i`, get a value from the local vector stored on rank `i-1`.
Definition: parVector.hpp:528
parVector::comm
MPI_Comm comm
The working MPI Communicator.
Definition: parVector.hpp:50
parVector::SetToValue
void SetToValue(T value)
Set all the entries of a vector to a same value.
Definition: parVector.hpp:262
parVector::AddValueLocal
void AddValueLocal(S row, T value)
add a value onto the value with a given global index of distributed vector
Definition: parVector.hpp:327
parVector::global_size
S global_size
Global size of this distributed vector.
Definition: parVector.hpp:48
parVector::Loc2Glob
S Loc2Glob(S local_index)
Convert a index of local vector on each MPI proc into its index in the global distributed vector.
Definition: parVector.hpp:137
parVector::writeToTxt
void writeToTxt(std::string file_name)
Write a parVector in real scalar to a local file.
Definition: parVector.hpp:416
parVector::VecView
void VecView()
Display the vector in a distributed manner.
Definition: parVector.hpp:277
parVector::GetVecMap
parVectorMap< S > GetVecMap()
Return parVector::index_map.
Definition: parVector.hpp:80
parVector::local_size
S local_size
The number of elements of vector stored on each MPI proc.
Definition: parVector.hpp:46
parVector::GetLowerBound
S GetLowerBound()
Return the lower_bound on each MPI proc.
Definition: parVector.hpp:83
parVector::SetToZero
void SetToZero()
Set all the entries of a vector to zero.
Definition: parVector.hpp:270
parVector::SetValueGlobal
void SetValueGlobal(S index, T value)
Set a value with a global index distributed vector.
Definition: parVector.hpp:309
parVector::GetArray
T * GetArray()
Get the pointer `*array` which stores the local piece of vector on each MPI proc.
Definition: parVector.hpp:128
parVector::GetValueLocal
T GetValueLocal(S lindex)
Get a value of vector with a given local index on each MPI proc.
Definition: parVector.hpp:122
parVector::GetGlobalSize
S GetGlobalSize()
Return parVector<S>::global_size.
Definition: parVector.hpp:87
parVector::Glob2Loc
S Glob2Loc(S global_index)
Convert a index of global vector into its index in the local vector on each MPI proc.
Definition: parVector.hpp:144
parVector::GetLocalSize
S GetLocalSize()
Return parVector<S>::local_size.
Definition: parVector.hpp:89
parVector::SetValuesLocal
void SetValuesLocal(S nindex, S *rows, T *values)
Set multiple values with multiple local indices of piece of distributed vector on each MPI proc.
Definition: parVector.hpp:301
parVector
A class which defines a vector distributed across 1D MPI grid.
Definition: parVector.hpp:41
parVector::VecAdd
void VecAdd(parVector v)
Add with another vector.
Definition: parVector.hpp:343
parVector::GetLowerNeighbor
T GetLowerNeighbor(S offset)
For each MPI of rank `i`, get a value from the local vector stored on rank `i+1`.
Definition: parVector.hpp:565
parVector::VecScale
void VecScale(T scale)
Scale all the elements of a vector with `scale`.
Definition: parVector.hpp:356
parVector::index_map
parVectorMap< S > index_map
A parVectorMap object which shows the distribution scheme of a vector.
Definition: parVector.hpp:52
parVector::GetComm
MPI_Comm GetComm()
Get working MPI communicator.
Definition: parVector.hpp:130
parVector::AddValuesLocal
void AddValuesLocal(S nindex, S *rows, T *values)
Add multiple values with multiple global indices onto the related values of distributed vector.
Definition: parVector.hpp:335
parVector::VecDot
T VecDot(parVector v)
Compute the dot product with another vector `v`.
Definition: parVector.hpp:364