NeoPZ
TPZParFrontStructMatrix.cpp
Go to the documentation of this file.
1 
6 #include "TPZFrontMatrix.h"
8 #include "TPZFrontStructMatrix.h"
9 
10 #include "TPZFrontSym.h"
11 #include "TPZFrontNonSym.h"
12 
13 #include "pzstrmatrix.h"
14 #include "pzfstrmatrix.h"
15 
16 #include "pzgmesh.h"
17 #include "pzcmesh.h"
18 #include "pzsubcmesh.h"
19 #include "pzbndcond.h"
20 
21 #include "pzanalysis.h"
22 #include "pzsolve.h"
23 #include "pzstepsolver.h"
24 
25 #include "TPZParFrontMatrix.h"
26 
27 #include "pzelementgroup.h"
28 #include "pzcondensedcompel.h"
29 
30 #include "pzdxmesh.h"
31 #include <fstream>
32 using namespace std;
33 
34 #include "pzelmat.h"
35 
36 #include "TPZFileEqnStorage.h"
37 #include "pzlog.h"
38 
39 #include "pz_pthread.h"
40 
41 #ifdef LOG4CXX
42 
43 static LoggerPtr logger(Logger::getLogger("pz.strmatrix.frontstructmatrix"));
44 
45 #endif
46 
48 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
50 pthread_mutex_t mutex_element_assemble = PTHREAD_MUTEX_INITIALIZER;
52 pthread_mutex_t mutex_global_assemble = PTHREAD_MUTEX_INITIALIZER;
53 
55 pthread_cond_t condassemble = PTHREAD_COND_INITIALIZER;
57 pthread_cond_t stackfull = PTHREAD_COND_INITIALIZER;
58 
59 
60 template<class front>
62 {
63  fMaxStackSize = 500;
65 }
66 
67 template<class front>
69 {
70 }
71 
72 template<class front>
74 {
75 
76 }
77 
78 template<class front>
81  return mat;
82  ;
83 
84 }
85 
86 template<class front>
88 
89 
91 
92  TPZAdmChunkVector<TPZCompEl *> &elementvec = parfront->fMesh->ElementVec();
93 
94 
95  while(parfront->fCurrentElement < parfront->fNElements) {
96 
102  //Lock a mutex and get an element number
103 
104  PZ_PTHREAD_MUTEX_LOCK(&mutex_element_assemble, "TPZParFrontStructMatrix<front>::ElementAssemble()");
105 
106  //Stack is full and process must wait here!
107  if(parfront->felnum.NElements()==parfront->fMaxStackSize)
108  {
109  /* cout << " Stack full" << endl;
110  cout << " Waiting" << endl;
111  cout.flush();*/
112  //cout << "Mutex unlocked on Condwait" << endl;
113 #ifdef LOG4CXX
114  if (logger->isDebugEnabled())
115  {
116  std::stringstream sout;
117  sout << "Entering cond_wait because of stack overflow ";
118  LOGPZ_DEBUG(logger,sout.str())
119  }
120 #endif
121  PZ_PTHREAD_COND_WAIT(&stackfull,&mutex_element_assemble,"TPZParFrontStructMatrix<front>::ElementAssemble()");
122  //cout << "Mutex LOCKED leaving Condwait" << endl;
123 
124  }
125 
126  //cout << "Locking mutex_element_assemble" << endl;
127  //cout.flush();
128  int64_t local_element = parfront->fCurrentElement;
129  if(local_element==parfront->fNElements)
130  {
131  PZ_PTHREAD_MUTEX_UNLOCK(&mutex_element_assemble, "TPZParFrontStructMatrix<front>::ElementAssemble()");
132  return 0;
133  }
134  /* cout << "All element matrices assembled" << endl;
135  return 0;
136  }
137  */
138 #ifdef LOG4CXX
139  if (logger->isDebugEnabled())
140  {
141  std::stringstream sout;
142  sout << "Computing element " << local_element;
143  LOGPZ_DEBUG(logger,sout.str())
144  }
145 #endif
146  parfront->fCurrentElement++;
147  //Unlock mutex and / or send a broadcast
148  //cout << "Computing Element " << parfront->fCurrentElement << endl;
149  //cout << "Unlocking mutex_element_assemble" << endl;
150  //cout.flush();
151  PZ_PTHREAD_MUTEX_UNLOCK(&mutex_element_assemble, "TPZParFrontStructMatrix<front>::ElementAssemble()");
152 
153 
154  if(parfront->fElementOrder[local_element] < 0) continue;
155  TPZCompEl *el = elementvec[parfront->fElementOrder[local_element]];
156  if(!el) continue;
157  // int dim = el->NumNodes();
158 
159  //Builds elements stiffness matrix
162 
163  el->CalcStiff(*ek, *ef);
164  //Locks a mutex and adds element contribution to frontmatrix
165  //if mutex is locked go to condwait waiting for an specific condvariable
166  // este mutex deve ser outro mutex -> mutexassemble
167 
168  PZ_PTHREAD_MUTEX_LOCK(&mutex_global_assemble, "TPZParFrontStructMatrix<front>::ElementGlobalAssemble()");
169  //cout << "Locking mutex_global_assemble" << endl;
170  //cout << "Pushing variables to the stack" << endl;
171  //cout.flush();
172 
173  // colocar ek, ef e o element_local no stack
174  parfront->felnum.Push(local_element);
175  parfront->fekstack.Push(ek);
176  parfront->fefstack.Push(ef);
177 
178 #ifdef LOG4CXX
179  if (logger->isDebugEnabled())
180  {
181  std::stringstream sout;
182  sout << "Pushing element " << local_element << " on the stack, stack sizes " << parfront->felnum.NElements() << " " << parfront->fekstack.NElements() << " " << parfront->fefstack.NElements();
183  LOGPZ_DEBUG(logger,sout.str())
184  }
185 #endif
186  // Outro thread procura se stack contem o proximo elemento
187  // qdo nao encontra entra em condwait de acordo com condassemble
188  // isso ocorre num outro processo
189  // Uma vez que uma nova ek foi adicionada ao stack
190  // chame broadcast para acordar o thread que faz assemblagem global
191 
192  //cout << "Unlocking mutex_global_assemble" << endl;
193  //cout << "Broadcasting condassemble" << endl;
194  //cout.flush();
195 
196  /* if(!(parfront->fCurrentElement%20)){
197  cout << endl << "Computing " << parfront->fCurrentElement << " on thread " << pthread_self() << endl;
198  cout << " " << (100*parfront->fCurrentElement/parfront->fNElements) << "% Elements computed" << " " << (100*parfront->fCurrentAssembled/parfront->fNElements) << "% Elements assembled" << endl;
199  }
200  cout << '*';
201  cout.flush();
202  */
203  //Alterado cond_broadcast para cond_signal
204  //invertendo a sequencia das chamadas
205  PZ_PTHREAD_COND_BROADCAST(&condassemble,"TPZParFrontStructMatrix<front>::CondAssemble()");
206  PZ_PTHREAD_MUTEX_UNLOCK(&mutex_global_assemble,"TPZParFrontStructMatrix<front>::ElementGlobalAssemble()");
207 
208  // o thread de assemblagem utiliza mutexassemble
209  // e feito em outro thread AssembleElement(el, ek, ef, stiffness, rhs);
210 
211  if(parfront->fGuiInterface) if(parfront->fGuiInterface->AmIKilled()){
212  break;
213  }
214  }//fim for iel
215 
216 #ifdef LOG4CXX
217  if (logger->isDebugEnabled())
218  {
219  std::stringstream sout;
220  sout << __PRETTY_FUNCTION__ << " Falling through";
221  LOGPZ_DEBUG(logger,sout.str())
222  }
223 #endif
224  std::cout << __PRETTY_FUNCTION__ << " Falling through \n";
225 
226  return NULL;
227 
228 }
229 
230 #include "arglib.h"
231 
232 clarg::argInt num_threads("-ntdec", "Number of threads to decompose in TPZParFrontStructMatrix.", 6);
233 
234 template<class front>
236 
238  TPZAdmChunkVector<TPZCompEl *> &elementvec = parfront->fMesh->ElementVec();
239  while(parfront->fCurrentAssembled < parfront->fNElements) {
240 
241 #ifndef USING_ATLAS
242  const int unit = (parfront->fNElements/200 == 0 ? 1 : parfront->fNElements/200);
243  if(!(parfront->fCurrentAssembled%unit))cout << "*";
244  cout.flush();
245  if(!(parfront->fCurrentAssembled%(20*unit)) && parfront->fCurrentAssembled)
246  {
247  if(parfront->fCurrentElement!=parfront->fNElements){
248  cout << " Element " << parfront->fCurrentElement << " " << (100*parfront->fCurrentElement/parfront->fNElements) << "% Elements computed " << (100*parfront->fCurrentAssembled/parfront->fNElements) << "% Elements assembled " << endl;
249  cout.flush();
250  }else{
251  cout << " " << (100*parfront->fCurrentAssembled/parfront->fNElements) << "% Elements assembled " << endl;
252  cout.flush();
253  }
254  }
255 #endif
256 
261  //Lock a mutex and get an element number
262  int64_t local_element = parfront->fCurrentAssembled;
263  parfront->fCurrentAssembled++;
264 
265  if(parfront->fElementOrder[local_element] < 0) continue;
266  TPZCompEl *el = elementvec[parfront->fElementOrder[local_element]];
267  if(!el) continue;
268  TPZMaterial * mat = el->Material();
269  if(mat)
270  {
271  int matid = mat->Id();
272  if(parfront->ShouldCompute(matid) == false)
273  {
274  continue;
275  }
276  }
277  else
278  {
279  TPZSubCompMesh *submesh = dynamic_cast<TPZSubCompMesh *> (el);
280  TPZElementGroup *elgrp = dynamic_cast<TPZElementGroup *>(el);
281  TPZCondensedCompEl *condel = dynamic_cast<TPZCondensedCompEl *>(el);
282  if(!submesh && ! elgrp && ! condel)
283  {
284  continue;
285  }
286  }
287 
288  //Searches for next element
289  int64_t i=0;
290  int64_t aux = -1;
291  TPZElementMatrix *ekaux = 0, *efaux = 0;
292  PZ_PTHREAD_MUTEX_LOCK(&mutex_global_assemble,"TPZParFrontStructMatrix<front>::GlobalAssemble()");
293 #ifdef LOG4CXX
294  if (logger->isDebugEnabled())
295  {
296  std::stringstream sout;
297  sout << "Acquired mutex_global_assemble";
298  LOGPZ_DEBUG(logger,sout.str())
299  }
300 #endif
301  while(aux != local_element){
302  while(i < parfront->felnum.NElements()) {
303  if(parfront->felnum[i] == local_element){
304  TPZElementMatrix *ektemp, *eftemp;
305  aux = parfront->felnum[i];
306  ekaux = parfront->fekstack[i];
307  efaux = parfront->fefstack[i];
308  int64_t itemp = parfront->felnum.Pop();
309  ektemp = parfront->fekstack.Pop();
310  eftemp = parfront->fefstack.Pop();
311  if(parfront->felnum.NElements()<parfront->fMaxStackSize){
312  PZ_PTHREAD_COND_BROADCAST(&stackfull,"TPZParFrontStructMatrix<front>::GlobalAssemble()");
313  }
314  if(i < parfront->felnum.NElements()) {
315 
316  parfront->felnum[i] = itemp;
317  parfront->fekstack[i]=ektemp;
318  parfront->fefstack[i]=eftemp;
319  }
320  break;
321  }
322  i++;
323  }
324  if(aux!=local_element){
325  i=0;
326 #ifdef LOG4CXX
327  if (logger->isDebugEnabled())
328  {
329  std::stringstream sout;
330  sout << "Waiting on condassemble";
331  LOGPZ_DEBUG(logger,sout.str())
332  }
333 #endif
334  PZ_PTHREAD_COND_WAIT(&condassemble, &mutex_global_assemble, "TPZParFrontStructMatrix<front>::GlobalAssemble()");
335  }
336  }
337 #ifdef LOG4CXX
338  if (logger->isDebugEnabled())
339  {
340  std::stringstream sout;
341  sout << "Unlocking mutex_global_assemble";
342  LOGPZ_DEBUG(logger,sout.str())
343  }
344 #endif
345  PZ_PTHREAD_MUTEX_UNLOCK(&mutex_global_assemble,"TPZParFrontStructMatrix<front>::GlobalAssemble()");
346  parfront->AssembleElement(el, *ekaux, *efaux, *parfront->fStiffness, *parfront->fRhs);
347  if(parfront->fCurrentAssembled == parfront->fNElements)
348  {
349 #ifdef STACKSTORAGE
351 #else
352  TPZParFrontMatrix<STATE, TPZFileEqnStorage<STATE>, front> *mat = dynamic_cast<TPZParFrontMatrix<STATE, TPZFileEqnStorage<STATE>, front>* > (parfront->fStiffness);
353 
354 #endif
355  mat->FinishWriting();
356 #ifdef LOG4CXX
357  if (logger->isDebugEnabled())
358  {
359  std::stringstream sout;
360  sout << "fFinishedComputing set to 1";
361  LOGPZ_DEBUG(logger,sout.str())
362  }
363 #endif
364  }
365 
366  delete ekaux;
367  delete efaux;
368 
369  if(parfront->fGuiInterface) if(parfront->fGuiInterface->AmIKilled()){
370 #ifdef STACKSTORAGE
372 #else
373  TPZParFrontMatrix<STATE, TPZFileEqnStorage<STATE>, front> *mat = dynamic_cast<TPZParFrontMatrix<STATE, TPZFileEqnStorage<STATE>, front>* > (parfront->fStiffness);
374 
375 #endif
376  mat->FinishWriting();
377  break;
378  }
379 
380 
381  }//fim for iel
382 #ifdef LOG4CXX
383  if (logger->isDebugEnabled())
384  {
385  std::stringstream sout;
386  sout << "Terminating assemble thread";
387  LOGPZ_DEBUG(logger,sout.str())
388  }
389 #endif
390  cout << "Matrix assembled\n";
391  cout.flush();
392 
393  return 0;
394 }
395 
396 template<class front>
398 {
399  this->fGuiInterface = guiInterface;
400 
401 #ifdef STACKSTORAGE
403 #else
404  TPZParFrontMatrix<STATE, TPZFileEqnStorage<STATE>, front> *mat = dynamic_cast<TPZParFrontMatrix<STATE, TPZFileEqnStorage<STATE>, front>* > (&matref);
405 
406 #endif
407  if(!mat)
408  {
409  cout << __PRETTY_FUNCTION__ << " we are in serious trouble : wrong type of matrix"<< endl;
410  DebugStop();
411  }
412 
413  int nthreads;
414  //cout << "Number of Threads " << endl;
415  //cin >> nthreads;
416  //fNThreads = nthreads;
417  if (this->fNumThreads < 3) {
418  this->fNumThreads = 3;
419  }
420  cout << "Number of Threads " << this->fNumThreads << endl;
421  nthreads = this->fNumThreads;
422  cout.flush();
423  //int nthreads = fNThreads+1;
424 
425  pthread_t *allthreads = new pthread_t[nthreads];
426  int *res = new int[nthreads];
427  int i;
428 
429  TPZVec <int> numelconnected(this->fMesh->NEquations(),0);
430  //TPZFrontMatrix<TPZStackEqnStorage, front> *mat = new TPZFrontMatrix<TPZStackEqnStorage, front>(fMesh->NEquations());
431 
432  //TPZFrontMatrix<TPZFileEqnStorage, front> *mat = new TPZFrontMatrix<TPZFileEqnStorage, front>(fMesh->NEquations());
433 
434 
435 
436 
437  //TPZParFrontMatrix<TPZStackEqnStorage, front> *mat = new TPZParFrontMatrix<TPZStackEqnStorage, front>(this->fMesh->NEquations());
438  fNElements = this->fMesh->NElements();
439 
440  this->OrderElement();
441 
442 // this->AdjustSequenceNumbering();
443 
444 #ifdef LOG4CXX
445  if (logger->isDebugEnabled()) {
446  std::stringstream sout;
447  this->fMesh->Print(sout);
448  LOGPZ_DEBUG(logger, sout.str())
449  }
450 #endif
451 
452  this->GetNumElConnected(numelconnected);
453 
454  mat->SetNumElConnected(numelconnected);
455 
456  if (num_threads.was_set())
457  mat->GetFront().ProductTensorMTInitData(num_threads.get_value());
458  else
459  mat->GetFront().ProductTensorMTInitData( nthreads - 2 ); // Here it initializes the multthread decomposition (comment to deactivate. Remember to coment the Finish also)
460 
461  fStiffness = mat;
462  fRhs = &rhs;
463  fCurrentElement = 0;
464  fCurrentAssembled = 0;
465 
466  /*
467  *Triger 'n' threads passing Assemble as argument
468  */
469  //pthread_create(&allthreads[fNThreads-1],NULL,this->GlobalAssemble, this);
470  // try{
471  res[nthreads-1] = PZ_PTHREAD_CREATE(&allthreads[nthreads-1], NULL,
472  this->GlobalAssemble, this, __FUNCTION__);
473  if(!res[nthreads-1]){
474 #ifdef VC
475  cout << "GlobalAssemble Thread created Successfuly "<< allthreads[nthreads-1].x << endl;
476 #else
477  cout << "GlobalAssemble Thread created Successfuly "<< allthreads[nthreads-1] << endl;
478 #endif
479  cout.flush();
480  }else{
481 #ifdef VC
482  cout << "GlobalAssemble Thread Fail "<< allthreads[nthreads-1].x << endl;
483 #else
484  cout << "GlobalAssemble Thread Fail "<< allthreads[nthreads-1] << endl;
485 #endif
486  cout.flush();
487  // DebugStop();
488  }
489  res[nthreads-2] = PZ_PTHREAD_CREATE(&allthreads[nthreads-2], NULL,
490  mat->WriteFile, mat, __FUNCTION__);
491  if(!res[nthreads-2]){
492 #ifdef VC
493  cout << "WriteFile Thread created Successfuly "<< allthreads[nthreads-2].x << endl;
494 #else
495  cout << "WriteFile Thread created Successfuly "<< allthreads[nthreads-2] << endl;
496 #endif
497  cout.flush();
498  }else{
499 #ifdef VC
500  cout << "WriteFile Thread Fail "<< allthreads[nthreads-2].x << endl;
501 #else
502  cout << "WriteFile Thread Fail "<< allthreads[nthreads-2] << endl;
503 #endif
504  cout.flush();
505  // DebugStop();
506  }
507 
508  for(i=0;i<nthreads-2;i++){
509  res[i] = PZ_PTHREAD_CREATE(&allthreads[i], NULL,
510  this->ElementAssemble, this, __FUNCTION__);
511  if(!res[i]){
512 #ifdef VC
513  cout << "ElementAssemble Thread "<< i+1 << " created Successfuly "<< allthreads[i].x << endl;
514 #else
515  cout << "ElementAssemble Thread "<< i+1 << " created Successfuly "<< allthreads[i] << endl;
516 #endif
517  cout.flush();
518  }else{
519  cout << "Error " << res[i] << "\t";
520 #ifdef VC
521  cout << "ElementAssemble Thread "<< i+1 << " Fail " << allthreads[i].x << endl;
522 #else
523  cout << "ElementAssemble Thread "<< i+1 << " Fail " << allthreads[i] << endl;
524 #endif
525  cout.flush();
526  }
527  }
528  for(i=0;i<nthreads;i++) {
529  PZ_PTHREAD_JOIN(allthreads[i], NULL, __FUNCTION__);
530  }
531 
532  delete[] allthreads;// fThreadUsed, fDec;
533  delete[] res;
534  mat->GetFront().ProductTensorMTFinish(); // Here it ends the multthread decomposition (comment to deactivate. Remember to coment the initialization also)
535  fStiffness = 0;
536  fRhs = 0;
537 
538 }
539 
540 
541 #ifndef STATE_COMPLEX
542 #include "pzmat2dlin.h"
543 
544 template<class front>
546 
547  int refine=1;
548  int order=1;
549 
550  TPZGeoMesh gmesh;
551  TPZCompMesh cmesh(&gmesh);
552  double coordstore[4][3] = {{0.,0.,0.},{1.,0.,0.},{1.,1.,0.},
553  {0.,1.,0.}};
554 
555  int i,j;
556  TPZVec<REAL> coord(3,0.);
557  for(i=0; i<4; i++) {
558  // initializar as coordenadas do no em um vetor
559  for (j=0; j<3; j++) coord[j] = coordstore[i][j];
560 
561  // identificar um espa� no vetor onde podemos armazenar
562  // este vetor
563 
564  // initializar os dados do n� gmesh.NodeVec ()[i].Initialize (i,coord,gmesh);
565  }
566  int el;
567  //TPZGeoEl *gel;
568  for(el=0; el<1; el++) {
569 
570  // initializar os indices dos n�
571  TPZVec<int64_t> indices(4);
572  for(i=0; i<4; i++) indices[i] = i;
573  // O proprio construtor vai inserir o elemento na malha
574  int64_t index;
575  /*gel = */gmesh.CreateGeoElement(EQuadrilateral,indices,1,index);
576  }
577  gmesh.BuildConnectivity ();
578 
579  TPZVec<TPZGeoEl *> subel;
580  //gel->Divide(subel);
581 
582 
583 
584  cout << "Refinement ";
585  cin >> refine;
586  cout << endl;
587  DebugStop();
588  //UniformRefine(refine,gmesh);
589 
590 
591  TPZMat2dLin *mat2d = new TPZMat2dLin(1);
592  TPZFMatrix<STATE> xk(1,1,1.),xc(1,2,0.),xf(1,1,1.);
593  mat2d->SetMaterial (xk,xc,xf);
594  TPZMaterial * meumat = mat2d;
595  cmesh.InsertMaterialObject(meumat);
596 
597  TPZFMatrix<STATE> val1(1,1,0.),val2(1,1,0.);
598  TPZMaterial * bnd = meumat->CreateBC (meumat,-4,0,val1,val2);
599  cmesh.InsertMaterialObject(bnd);
600 
601 
602 
603  cout << "Interpolation order ";
604  cin >> order;
605  cout << endl;
606 
607  // TPZCompEl::gOrder = order;
608  cmesh.SetDefaultOrder(order);
609 
610  cmesh.AutoBuild();
611  // cmesh.AdjustBoundaryElements();
612  cmesh.InitializeBlock();
613 
614  ofstream output("outputPar.dat");
615  // ofstream output2("outputNon.dat");
616  cmesh.Print(output);
617  TPZAnalysis an(&cmesh,true,output);
618  // TPZAnalysis an2(&cmesh,output);
619 
620  TPZVec<int> numelconnected(cmesh.NEquations(),0);
621  int64_t ic;
622  //cout << "Nmero de Equa�es -> " << cmesh.NEquations() << endl;
623  //cout.flush();
624 
625  ofstream out("cmeshBlock_out.txt");
626  // cmesh.Print(out);
627  // cmesh.Block().Print("Block",out);
628  for(ic=0; ic<cmesh.ConnectVec().NElements(); ic++) {
629  TPZConnect &cn = cmesh.ConnectVec()[ic];
630  if(cn.HasDependency() || cn.IsCondensed()) continue;
631  int64_t seqn = cn.SequenceNumber();
632  if(seqn < 0) continue;
633  int64_t firsteq = cmesh.Block().Position(seqn);
634  int64_t lasteq = firsteq+cmesh.Block().Size(seqn);
635  int64_t ind;
636  int temp = cmesh.ConnectVec()[ic].NElConnected();
637  for(ind=firsteq;ind<lasteq;ind++) {
638  numelconnected[ind] = temp;//cmesh.ConnectVec()[ic].NElConnected();
639  }
640  }
641  // //cout << "nequations " << numelconnected.NElements();
642  // for(ic=0;ic<numelconnected.NElements(); ic++) //cout << numelconnected[ic] <<' ';
643  // //cout << endl;
644  // //cout.flush();
645 
646  // TPZFrontMatrix<TPZFileEqnStorage, TPZFrontNonSym> *mat = new TPZFrontMatrix<TPZFileEqnStorage, TPZFrontNonSym>(cmesh.NEquations());
647  //TPZFrontMatrix<TPZStackEqnStorage, TPZFrontNonSym> *mat = new TPZFrontMatrix<TPZStackEqnStorage, TPZFrontNonSym>(cmesh.NEquations());
648  //TPZFrontMatrix<TPZStackEqnStorage> *mat = new TPZFrontMatrix<TPZStackEqnStorage>(cmesh.NEquations());
649 
651 
652  // TPZFStructMatrix mat2(&cmesh);
653  // mat->SetNumElConnected(numelconnected);
654  //mat = CreateAssemble();
655  int threads;
656  cout << "Number of Threads ";
657  cin >> threads;
658  cout << endl;
659 
660  mat.SetNumThreads(threads);
661  //mat.SetNumberOfThreads(1);
662 
663  an.SetStructuralMatrix(mat);
664  // an2.SetStructuralMatrix(mat2);
665 
667  // sol.SetDirect(ELU);
668  sol.SetDirect(ECholesky);
669  // TPZStepSolver sol2;
670  // sol2.SetDirect(ECholesky);
671  // sol.SetDirect(ELU);
672 
673 
674  an.SetSolver(sol);
675  // an2.SetSolver(sol2);
676  // mat->SetNumElConnected(numelconnected);
677  // mat->SetFileName("longhin.bin");
678  // an.Solver().SetDirect(ELU);
679  // mat->FinishWriting();
680  // mat->SetFileName('r',"longhin.bin");
681  // //cout << "******************************************************************************************************AQUI 1" << endl;
682  an.Run(output);
683  an.Print("solution of frontal solver", output);
684  // //cout << "******************************************************************************************************AQUI 2" << endl;
685  // an2.Run(output2);
686  // an2.Print("solution of frontal solver", output2);
687  /*
688  TPZVec<char *> scalnames(1);
689  scalnames[0] = "state";
690 
691  TPZVec<char *> vecnames(0);
692 
693  TPZDXGraphMesh graph(&cmesh,2,meumat,vecnames,scalnames);
694  ofstream *dxout = new ofstream("poisson.dx");
695  graph.SetOutFile(*dxout);
696  graph.SetResolution(0);
697 
698  //an.DefineGraphMesh(2, scalnames, vecnames, plotfile);
699  //an.Print("FEM SOLUTION ",output);
700  //an.PostProcess(1);
701  int istep = 0,numstep=1;
702 
703  graph.DrawMesh(numstep+1);
704  graph.DrawSolution(0,0);
705 
706  TPZAnalysis an2(&cmesh,output);
707  TPZFMatrix<STATE> *full = new TPZFMatrix(cmesh.NEquations(),cmesh.NEquations(),0.);
708  an2.SetMatrix(full);
709  an2.Solver().SetDirect(ELU);
710  an2.Run(output);
711  an2.Print("solution of full matrix", output);
712 
713  // full->Print("full decomposed matrix");
714  */
715  output.flush();
716  cout.flush();
717  return 0;
718 
719 }
720 #endif
721 
722 template<class front>
724 {
725 
726  //TPZFrontMatrix<TPZStackEqnStorage, front> *mat = new TPZFrontMatrix<TPZStackEqnStorage, front>(fMesh->NEquations());
727 
728  //TPZFrontMatrix<TPZFileEqnStorage, front> *mat = new TPZFrontMatrix<TPZFileEqnStorage, front>(fMesh->NEquations());
729  int64_t neq = this->fEquationFilter.NActiveEquations();
730 
731  //
732 #ifdef STACKSTORAGE
734 #else
735  TPZParFrontMatrix<STATE, TPZFileEqnStorage<STATE>, front> *mat = new TPZParFrontMatrix<STATE, TPZFileEqnStorage<STATE>, front>(neq);
736 #endif
737  if (this->fDecomposeType != ENoDecompose)
738  {
740  }
741  rhs.Redim(neq,1);
742 
743  Assemble(*mat,rhs,guiInterface);
744  return mat;
745 
746 }
747 
748 
749 template<class TVar>
750 class TPZFrontSym;
751 template<class TVar>
752 class TPZFrontNonSym;
753 
756 
void GetNumElConnected(TPZVec< int > &numelconnected)
Returns a vector containing all elements connected to a degree of freedom.
int64_t NElements() const
Number of computational elements allocated.
Definition: pzcmesh.h:169
int fNumThreads
Number of threads in Assemble process.
#define PZ_PTHREAD_MUTEX_UNLOCK(mutex, fn)
Definition: pz_pthread.h:79
Contains the TPZFrontSym class which implements decomposition process of the frontal matrix (case sym...
void OrderElement()
It is applied over fElementOrder putting it in the correct order.
pthread_mutex_t mutex
Semaphore which controls multiple threads.
Contains TPZAnalysis class which implements the sequence of actions to perform a finite element analy...
TPZStack< TPZElementMatrix * > fekstack
int Position(const int block_diagonal) const
Returns the position of first element block dependent on matrix diagonal.
Definition: pzblock.h:177
clarg::argInt num_threads("-ntdec", "Number of threads to decompose in TPZParFrontStructMatrix.", 6)
Represents a set of shape functions associated with a computational element/side. Computational Eleme...
Definition: pzconnect.h:30
Contains definitions to LOGPZ_DEBUG, LOGPZ_INFO, LOGPZ_WARN, LOGPZ_ERROR and LOGPZ_FATAL, and the implementation of the inline InitializePZLOG(string) function using log4cxx library or not. It must to be called out of "#ifdef LOG4CXX" scope.
void SetStructuralMatrix(TPZAutoPointer< TPZStructMatrix > strmatrix)
Set structural matrix as auto pointer for analysis.
Definition: pzanalysis.cpp:82
list threads
Definition: test.py:140
FrontMatrix with parallel techniques included. Frontal.
void Print(const std::string &name, std::ostream &out)
Print connect and solution information.
Definition: pzanalysis.cpp:447
virtual TPZGeoEl * CreateGeoElement(MElementType type, TPZVec< int64_t > &cornerindexes, int matid, int64_t &index, int reftype=1)
Generic method for creating a geometric element. Putting this method centrally facilitates the modifi...
Definition: pzgmesh.cpp:1296
Contains the TPZFrontStructMatrix class which responsible for a interface among Finite Element Packag...
int64_t fCurrentAssembled
Current assembled element in the global stiffness matrix.
TPZFront< TVar > & GetFront() override
returns a pointer to the front matrix
int64_t NEquations()
This computes the number of equations associated with non-restrained nodes.
Definition: pzcmesh.cpp:721
Contains the declaration of the TPZElementGroup class, which implements an computational element whic...
pthread_cond_t condassemble
Semaphore which controls condensed assembling.
pthread_mutex_t mutex_global_assemble
Semaphore which controls thread assembling global matrices.
Defines step solvers class. Solver.
Definition: pzmganalysis.h:17
virtual TPZMatrix< STATE > * CreateAssemble(TPZFMatrix< STATE > &rhs, TPZAutoPointer< TPZGuiInterface > guiInterface)
Returns a pointer to TPZMatrix.
void SetSolver(TPZMatrixSolver< STATE > &solver)
Set solver matrix.
Contains the TPZFrontNonSym class which implements storage and decomposition process of the frontal m...
static void * WriteFile(void *t)
Used in an independent thread to write decomposed equations to a binary file.
static void * GlobalAssemble(void *t)
It assembles element matrices in the global stiffness matrix, it is also executed in an independent t...
virtual void CalcStiff(TPZElementMatrix &ek, TPZElementMatrix &ef)
Computes the element stifness matrix and right hand side.
Definition: pzcompel.h:794
TPZEquationFilter fEquationFilter
Object which will determine which equations will be assembled.
Contains the TPZParFrontMatrix class which implements FrontMatrix with parallel techniques.
Abstract class implements storage and decomposition process of the frontal matrix involving simmetry ...
Class which groups elements to characterize dense matrices.
int64_t NElements() const
Access method to query the number of elements of the vector.
Implements a chunk vector with free store administration. Utility.
Definition: TPZStream.h:39
#define PZ_PTHREAD_COND_WAIT(cond, mutex, fn)
Definition: pz_pthread.h:127
Contains the TPZStructMatrixOR class which responsible for a interface among Matrix and Finite Elemen...
TPZParFrontStructMatrix(TPZCompMesh *mesh)
Constructor passing as parameter a TPZCompMesh.
void ProductTensorMTInitData(int nthreads)
Definition: TPZFront.h:298
Contains the TPZFStructMatrix class which implements Full Structural Matrices.
void SetDefaultOrder(int order)
Definition: pzcmesh.h:403
Refines geometrical mesh (all the elements) num times.
Definition: pzstrmatrix.h:35
This abstract class defines the behaviour which each derived class needs to implement.
Definition: TPZMaterial.h:39
Contains declaration of TPZElementMatrix struct which associates an element matrix with the coeficien...
TPZCompMesh * fMesh
Pointer to the computational mesh from which the matrix will be generated.
#define PZ_PTHREAD_JOIN(thread, val, fn)
Definition: pz_pthread.h:34
DecomposeType fDecomposeType
Used Decomposition method.
Contains the TPZBndCond class which implements a boundary condition for TPZMaterial objects...
virtual TPZMaterial * Material() const
Identify the material object associated with the element.
Definition: pzcompel.cpp:959
TPZVec< int > fElementOrder
This vector contains an ordered list.
Contains declaration of TPZMesh class which defines a geometrical mesh and contains a corresponding l...
int64_t SequenceNumber() const
Returns the Sequence number of the connect object.
Definition: pzconnect.h:158
void AssembleElement(TPZCompEl *el, TPZElementMatrix &ek, TPZElementMatrix &ef, TPZMatrix< STATE > &stiffness, TPZFMatrix< STATE > &rhs)
Computes element matrices.
static void * ElementAssemble(void *t)
It computes element matrices in an independent thread.
Implements the sequence of actions to perform a finite element analysis. Analysis.
Definition: pzanalysis.h:32
void Push(const T object)
Pushes a copy of the object on the stack.
Definition: pzstack.h:80
int64_t NActiveEquations() const
Retorna o numero de equacoes ativas do sistema.
int nthreads
Definition: numatst.cpp:315
#define PZ_PTHREAD_MUTEX_LOCK(mutex, fn)
Definition: pz_pthread.h:76
TPZAutoPointer< TPZGuiInterface > fGuiInterface
Implements a group of computational elements as a mesh and an element. Computational Mesh...
Definition: pzsubcmesh.h:36
#define DebugStop()
Returns a message to user put a breakpoint in.
Definition: pzerror.h:20
TPZStructMatrix * Clone()
It clones a TPZStructMatrix.
virtual void SetNumThreads(int n)
#define LOGPZ_DEBUG(A, B)
Define log for debug info.
Definition: pzlog.h:87
void SetNumElConnected(TPZVec< int > &numelconnected)
Initializes the number of elements connected to each equation.
void ProductTensorMTFinish()
void
Definition: TPZFront.h:302
virtual void AutoBuild(const std::set< int > *MaterialIDs)
Creates the computational elements, and the degree of freedom nodes.
Definition: pzcmesh.cpp:308
int HasDependency() const
Returns whether exist dependecy information.
Definition: pzconnect.h:292
string res
Definition: test.py:151
pthread_cond_t stackfull
Semaphore.
const TPZBlock< STATE > & Block() const
Access the block structure of the solution vector.
Definition: pzcmesh.h:213
Responsible for a interface among Finite Element Package and Matrices package to frontal method...
Contains the TPZMat2dLin class which implements a bi-dimensional linear problem.
int fMaxStackSize
Maximum stack size allowed.
TPZAdmChunkVector< TPZConnect > & ConnectVec()
Return a reference to the connect pointers vector.
Definition: pzcmesh.h:198
Class which implements an element which condenses the internal connects.
virtual bool ShouldCompute(int matid) const
Establish whether the element should be computed.
Contains declaration of TPZCompMesh class which is a repository for computational elements...
Contains the TPZFileEqnStorage class which implements an equation array and stores the EqnArrays...
int Redim(const int64_t newRows, const int64_t newCols) override
Redimension a matrix and ZERO your elements.
Definition: pzfmatrix.h:616
void SetMaterial(TPZFMatrix< STATE > &xkin, TPZFMatrix< STATE > &xcin, TPZFMatrix< STATE > &xfin)
Definition: pzmat2dlin.h:44
virtual void Run(std::ostream &out=std::cout)
Calls the appropriate sequence of methods to build a solution or a time stepping sequence.
Definition: pzanalysis.cpp:920
pthread_mutex_t mutex_element_assemble
Semaphore which controls threads assembling elements.
This class associates an element matrix with the coeficients of its contribution in the global stiffn...
Definition: pzelmat.h:30
virtual void SetDecomposeType(DecomposeType dectype)=0
Set the decomposition type.
void BuildConnectivity()
Build the connectivity of the grid.
Definition: pzgmesh.cpp:967
virtual TPZBndCond * CreateBC(TPZMaterial *reference, int id, int typ, TPZFMatrix< STATE > &val1, TPZFMatrix< STATE > &val2)
Creates an object TPZBndCond derived of TPZMaterial.
Contains declaration of TPZSubCompMesh class which implements a group of computational elements as a ...
int64_t fCurrentElement
Current computed element.
int InsertMaterialObject(TPZMaterial *mat)
Insert a material object in the datastructure.
Definition: pzcmesh.cpp:287
T Pop()
Retrieve an object from the stack.
Definition: pzstack.h:87
virtual void Assemble(TPZMatrix< STATE > &mat, TPZFMatrix< STATE > &rhs, TPZAutoPointer< TPZGuiInterface > guiInterface)
Assemble a stiffness matrix.
This class implements a geometric mesh for the pz environment. Geometry.
Definition: pzgmesh.h:48
bool was_set() const
Definition: arglib.h:138
Implements a bi-dimensional linear problem.
Definition: pzmat2dlin.h:22
Implements computational mesh. Computational Mesh.
Definition: pzcmesh.h:47
int Size(const int block_diagonal) const
Returns block dimension.
Definition: pzblock.h:171
const T & get_value() const
Definition: arglib.h:177
Contains the TPZAbstractFrontMatrix class which implements a matrix stored in a frontal decomposition...
TPZAdmChunkVector< TPZCompEl * > & ElementVec()
Returns a reference to the element pointers vector.
Definition: pzcmesh.h:183
Contains TPZSolver class which defines a abstract class of solvers which will be used by matrix class...
#define PZ_PTHREAD_CREATE(thread, attr, routine, args, fn)
Definition: pz_pthread.h:31
int Id() const
Definition: TPZMaterial.h:170
Contains TPZStepSolver class which defines step solvers class.
int64_t NElements() const
Returns the number of elements of the vector.
Definition: pzvec.h:190
int64_t fNElements
Total number of elements.
void FinishWriting()
Sets the flag fFinish to its true value.
void SetDirect(const DecomposeType decomp)
Contains the TPZDXGraphMesh class which implements the interface of the graphmesh to the OpenDX graph...
void InitializeBlock()
Resequence the block object, remove unconnected connect objects and reset the dimension of the soluti...
Definition: pzcmesh.cpp:391
virtual ~TPZParFrontStructMatrix()
Destructor.
TPZFMatrix< STATE > * fRhs
Local pointer to load matrix.
TPZStack< int64_t > felnum
Stack containing elements to be assembled on Stiffness matrix.
Is a structural matrix with parallel techniques included. Structural Matrix Frontal.
Defines the interface of a computational element. Computational Element.
Definition: pzcompel.h:59
bool IsCondensed() const
Access method to return the indication whether the connect is condensed or not.
Definition: pzconnect.h:223
TPZMatrix< STATE > * fStiffness
Local pointer to stiffness matrix.
TPZStack< TPZElementMatrix * > fefstack
Contains the declaration of the TPZCondensedCompEl class, which implements an computational element w...
Contains the TPZParFrontStructMatrix class which is a structural matrix with parallel techniques incl...
Abstract class implements storage and decomposition process of the frontal matrix involving non-simet...
virtual void Print(std::ostream &out=std::cout) const
Prints mesh data.
Definition: pzcmesh.cpp:236
#define PZ_PTHREAD_COND_BROADCAST(cond, fn)
Definition: pz_pthread.h:133