ESyS-Particle  4.0.1
pp_array.hpp
00001 
00002 //                                                         //
00003 // Copyright (c) 2003-2011 by The University of Queensland //
00004 // Earth Systems Science Computational Centre (ESSCC)      //
00005 // http://www.uq.edu.au/esscc                              //
00006 //                                                         //
00007 // Primary Business: Brisbane, Queensland, Australia       //
00008 // Licensed under the Open Software License version 3.0    //
00009 // http://www.opensource.org/licenses/osl-3.0.php          //
00010 //                                                         //
00012 
00014 #include "Foundation/console.h"
00015 #include <iostream>
00016 #include <sstream>
00017 #include <stdexcept>
00018 
00019 #ifdef __INTEL_COMPILER
00020 #include <mathimf.h>
00021 #else
00022 #include <math.h>
00023 #endif
00024 using std::cout;
00025 using std::cerr;
00026 using std::endl;
00027 using std::flush;
00028 
00029 //--- TML includes ---
00030 #include "ntable/src/nt_slab.h"
00031 
00032 //--- STL includes ---
00033 #include <utility>
00034 
00035 using std::pair;
00036 using std::make_pair;
00037 
00038 
00039 
00040 template<typename T>
00041 const int ParallelParticleArray<T>::m_exchg_tag=42;
00042 
00061 template<typename T>
00062 ParallelParticleArray<T>::ParallelParticleArray(TML_Comm *comm, const std::vector<unsigned int> &dims,const Vec3& min, const Vec3& max ,double range, double alpha):AParallelParticleArray(comm, dims), m_nt(NULL)
00063 {
00064   //- get own process coords
00065   vector<int> pcoords=m_comm.get_coords();
00066   //- initialize ntable
00067   // get global number of cells
00068   int nx_global,ny_global,nz_global;
00069   nx_global=lrint((max[0]-min[0])/range);
00070   ny_global=lrint((max[1]-min[1])/range);
00071   nz_global=lrint((max[2]-min[2])/range);
00072   if ((fabs((double(nx_global)-(max[0]-min[0])/range)) > 1e-6) || 
00073       (fabs((double(ny_global)-(max[1]-min[1])/range)) > 1e-6) ||
00074       (fabs((double(nz_global)-(max[2]-min[2])/range)) > 1e-6)){
00075     m_nt=NULL;
00076     std::stringstream msg;
00077     msg << "size doesn't fit range" << endl; // replace by throw 
00078     msg << "diff x : " << double(nx_global)-(max[0]-min[0])/range << endl;
00079     msg << "diff y : " << double(ny_global)-(max[1]-min[1])/range << endl;
00080     msg << "diff z : " << double(nz_global)-(max[2]-min[2])/range << endl;
00081     console.Error() << msg.str() << "\n";
00082     throw std::runtime_error(msg.str());
00083   } else { 
00084     // calc local min. cell, considering overlap
00085     int nx_min=((nx_global*pcoords[0])/m_comm.get_dim(0))-1;
00086     int ny_min=((ny_global*pcoords[1])/m_comm.get_dim(1))-1;
00087     int nz_min=((nz_global*pcoords[2])/m_comm.get_dim(2))-1;
00088     // calc local number of cells, considering overlap
00089     int nx=(((nx_global*(pcoords[0]+1))/m_comm.get_dim(0))-nx_min)+1;
00090     int ny=(((ny_global*(pcoords[1]+1))/m_comm.get_dim(1))-ny_min)+1;
00091     int nz=(((nz_global*(pcoords[2]+1))/m_comm.get_dim(2))-nz_min)+1;
00092     // init
00093     m_nt=new NeighborTable<T>(nx,ny,nz,range,alpha,min,nx_min,ny_min,nz_min);
00094   }
00095   // init time stamp
00096   m_timestamp=0;
00097 }
00098 
00120 template<typename T>
00121 ParallelParticleArray<T>::ParallelParticleArray(TML_Comm *comm, const vector<unsigned int> &dims, const vector<bool> &circ,const Vec3 &min,const Vec3 &max, double range, double alpha):AParallelParticleArray(comm,dims,circ), m_nt(NULL)
00122 {
00123   //- get own process coords
00124   vector<int> pcoords=m_comm.get_coords();
00125   //- initialize ntable
00126   // get global number of cells
00127   int nx_global,ny_global,nz_global;
00128   nx_global=lrint((max[0]-min[0])/range);
00129   ny_global=lrint((max[1]-min[1])/range);
00130   nz_global=lrint((max[2]-min[2])/range);
00131   if ((fabs((double(nx_global)-(max[0]-min[0])/range)) > 1e-6) || 
00132       (fabs((double(ny_global)-(max[1]-min[1])/range)) > 1e-6) ||
00133       (fabs((double(nz_global)-(max[2]-min[2])/range)) > 1e-6)){
00134     m_nt=NULL;
00135     std::stringstream msg;
00136     msg << "size doesn't fit range" << endl; // replace by throw 
00137     msg << "diff x : " << double(nx_global)-(max[0]-min[0])/range << endl;
00138     msg << "diff y : " << double(ny_global)-(max[1]-min[1])/range << endl;
00139     msg << "diff z : " << double(nz_global)-(max[2]-min[2])/range << endl;
00140     throw std::runtime_error(msg.str());
00141   } else { 
00142     // calc local min. cell, considering overlap
00143     int nx_min=((nx_global*pcoords[0])/m_comm.get_dim(0))-1;
00144     int ny_min=((ny_global*pcoords[1])/m_comm.get_dim(1))-1;
00145     int nz_min=((nz_global*pcoords[2])/m_comm.get_dim(2))-1;
00146     // calc local number of cells, considering overlap
00147     int nx=(((nx_global*(pcoords[0]+1))/m_comm.get_dim(0))-nx_min)+1;
00148     int ny=(((ny_global*(pcoords[1]+1))/m_comm.get_dim(1))-ny_min)+1;
00149     int nz=(((nz_global*(pcoords[2]+1))/m_comm.get_dim(2))-nz_min)+1;
00150     // init local neighbortable
00151     m_nt=new NeighborTable<T>(nx,ny,nz,range,alpha,min,nx_min,ny_min,nz_min);
00152     // setup circular shift values 
00153     m_xshift=(max-min).X(); 
00154     m_yshift=(max-min).Y();
00155     m_zshift=(max-min).Z();
00156     // setup circular edges
00157     m_circ_edge_x_down=(circ[0] && (pcoords[0]==0)) ? true : false ;
00158     m_circ_edge_x_up=(circ[0] && (pcoords[0]==m_comm.get_dim(0)-1)) ? true : false ;
00159   }
00160   // init time stamp
00161   m_timestamp=0; 
00162 }
00163 
00167 template<typename T>
00168 ParallelParticleArray<T>::~ParallelParticleArray()
00169 {
00170   if(m_nt!=NULL) delete m_nt;
00171 }
00172 
00178 template<typename T>
00179 void ParallelParticleArray<T>::insert(const T& p)
00180 {
00181   m_nt->insert(p);
00182 }
00183 
00189 template<typename T>
00190 void ParallelParticleArray<T>::insert(const vector<T>& vp)
00191 {
00192   for(typename vector<T>::const_iterator iter=vp.begin();
00193       iter!=vp.end();
00194       iter++){
00195     m_nt->insert(*iter);
00196   }
00197 } 
00198 
00204 template<typename T>
00205 bool ParallelParticleArray<T>::isInInner(const Vec3& pos)
00206 {
00207   return m_nt->isInInner(pos);
00208 }
00209 
00216 template<typename T>
00217 T* ParallelParticleArray<T>::getParticlePtrByIndex(int id)
00218 {
00219   return m_nt->ptr_by_id(id);
00220 }
00221 
00228 template<typename T>
00229 T* ParallelParticleArray<T>::getParticlePtrByPosition(const Vec3& pos)
00230 {
00231   return m_nt->getNearestPtr(pos);
00232 }
00238 template<typename T>
00239 void ParallelParticleArray<T>::rebuild()
00240 { 
00241   // cout << "PPA at node " << m_comm.rank() << "rebuilding " << endl << flush;
00242   //- get own process coords (for debug output)
00243   vector<int> pcoords=m_comm.get_coords();
00244 
00245   // rebuild locally
00246   // cout << "node " << m_comm.rank() << " pre-build " << *m_nt << endl;
00247   m_nt->build();
00248   // cout << "node " << m_comm.rank() << " pre-exchange " << *m_nt << endl;
00249   //- exchange boundary particles 
00250   NTSlab<T> up_slab;
00251   NTSlab<T> down_slab;
00252   vector<T> recv_data;
00253   bool circ_edge=false;
00254 
00255   // x-dir (there is at least one dimension)
00256   if(m_comm.get_dim(0)>1){
00257     // clean out boundary slabs
00258     NTSlab<T> up_boundary_slab=m_nt->yz_slab(m_nt->xsize()-1);
00259     up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
00260     NTSlab<T> down_boundary_slab=m_nt->yz_slab(0);
00261     down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
00262 
00263     // synchronize
00264     m_comm.barrier();
00265 
00266     // define tranfer slabs
00267     up_slab=m_nt->yz_slab(m_nt->xsize()-2);
00268     down_slab=m_nt->yz_slab(1);
00269 
00270     // upstream
00271     if(m_circ_edge_x_up){ // circ. bdry
00272       // cout << "circular shift, node " << m_comm.rank() << " x-dim, up slab size " << up_slab.size() << endl;
00273       // copy particles from transfer slab into buffer
00274       vector<T> buffer;
00275       for(typename NTSlab<T>::iterator iter=up_slab.begin();
00276           iter!=up_slab.end();
00277           iter++){
00278         buffer.push_back(*iter);
00279       }
00280       // shift particles in buffer by circular shift value 
00281       for(typename vector<T>::iterator iter=buffer.begin();
00282           iter!=buffer.end();
00283           iter++){
00284         iter->setCircular(Vec3(-1.0*m_xshift,0.0,0.0));
00285       }
00286       m_comm.shift_cont_packed(buffer,*m_nt,0,1,m_exchg_tag);
00287     } else {
00288       m_comm.shift_cont_packed(up_slab,*m_nt,0,1,m_exchg_tag);
00289     }
00290     // downstream
00291     if(m_circ_edge_x_down){// circ. bdry
00292       // cout << "circular shift , node " << m_comm.rank() << " x-dim, down slab size " << down_slab.size() << endl;
00293       // copy particles from transfer slab into buffer
00294       vector<T> buffer;
00295       for(typename NTSlab<T>::iterator iter=down_slab.begin();
00296           iter!=down_slab.end();
00297           iter++){
00298         buffer.push_back(*iter);
00299       }
00300       // shift particles in buffer by circular shift value 
00301       for(typename vector<T>::iterator iter=buffer.begin();
00302           iter!=buffer.end();
00303           iter++){
00304         iter->setCircular(Vec3(m_xshift,0.0,0.0));
00305       }
00306       m_comm.shift_cont_packed(buffer,*m_nt,0,-1,m_exchg_tag);
00307     } else {
00308       m_comm.shift_cont_packed(down_slab,*m_nt,0,-1,m_exchg_tag);
00309     }
00310   }
00311   // y-dir
00312   if(m_comm.get_dim(1)>1){
00313     // clean out boundary slabs
00314     NTSlab<T> up_boundary_slab=m_nt->xz_slab(m_nt->ysize()-1);
00315     up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
00316     NTSlab<T> down_boundary_slab=m_nt->xz_slab(0);
00317     down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
00318 
00319     // synchronize
00320     m_comm.barrier();
00321 
00322     // define tranfer slabs
00323     up_slab=m_nt->xz_slab(m_nt->ysize()-2);
00324     down_slab=m_nt->xz_slab(1);
00325     if(!circ_edge){ // normal boundary
00326       // upstream
00327       m_comm.shift_cont_packed(up_slab,*m_nt,1,1,m_exchg_tag);
00328       // downstream
00329       m_comm.shift_cont_packed(down_slab,*m_nt,1,-1,m_exchg_tag);
00330     } else { // circular edge -> buffer & shift received particles
00331          cout << "circular y shift not implemented" << endl;
00332     }
00333   }
00334   // z-dir
00335   if(m_comm.get_dim(2)>1){
00336     // cout << "zdim= " << m_comm.get_dim(2) << " shifting" << endl;
00337     // clean out boundary slabs
00338     NTSlab<T> up_boundary_slab=m_nt->xy_slab(m_nt->zsize()-1);
00339     up_boundary_slab.erase(up_boundary_slab.begin(),up_boundary_slab.end());
00340     NTSlab<T> down_boundary_slab=m_nt->xy_slab(0);
00341     down_boundary_slab.erase(down_boundary_slab.begin(),down_boundary_slab.end());
00342 
00343     // define tranfer slabs
00344     up_slab=m_nt->xy_slab(m_nt->zsize()-2);
00345     down_slab=m_nt->xy_slab(1);
00346     if(!circ_edge){ // normal boundary
00347       // upstream
00348       m_comm.shift_cont_packed(up_slab,*m_nt,2,1,m_exchg_tag);
00349       // downstream
00350       m_comm.shift_cont_packed(down_slab,*m_nt,2,-1,m_exchg_tag);
00351     } else { // circular edge -> buffer & shift received particles
00352          cout << "circular x shift not implemented" << endl;
00353     }
00354   }
00355   // update timestamp
00356   m_timestamp++;
00357   // debug
00358  //  cout << "node " << m_comm.rank() << "post-exchange " << *m_nt << flush;
00359 }
00360 
00368 template<typename T>
00369 template<typename P> 
00370 void ParallelParticleArray<T>::exchange(P (T::*rdf)(),void (T::*wrtf)(const P&))
00371 {
00372   // x-dir (there is at least one dimension)
00373   if(m_comm.get_dim(0)>1){
00374     // cout << "xdim= " << m_comm.get_dim(0) << " exchanging" << endl;
00375     // do transfer
00376     exchange_single(rdf,wrtf,m_nt->yz_slab(m_nt->xsize()-2),m_nt->yz_slab(0),0,1);
00377     // downstream
00378     exchange_single(rdf,wrtf,m_nt->yz_slab(1),m_nt->yz_slab(m_nt->xsize()-1),0,-1);
00379   }
00380   // y-dir
00381   if(m_comm.get_dim(1)>1){
00382     // cout << "ydim= " << m_comm.get_dim(1) << " shifting" << endl;
00383     // upstream
00384     exchange_single(rdf,wrtf,m_nt->xz_slab(m_nt->ysize()-2),m_nt->xz_slab(0),1,1);
00385     // downstream
00386     exchange_single(rdf,wrtf,m_nt->xz_slab(1),m_nt->xz_slab(m_nt->ysize()-1),1,-1);
00387   }
00388   // z-dir
00389   if(m_comm.get_dim(2)>1){
00390     // cout << "zdim= " << m_comm.get_dim(2) << " shifting" << endl;
00391     // upstream
00392     exchange_single(rdf,wrtf,m_nt->xy_slab(m_nt->zsize()-2),m_nt->xy_slab(0),2,1);
00393     // downstream
00394     exchange_single(rdf,wrtf,m_nt->xy_slab(1),m_nt->xy_slab(m_nt->zsize()-1),2,-1);
00395   }
00396 }
00397 
00408 template<typename T>
00409 template<typename P> 
00410 void ParallelParticleArray<T>::exchange_single(P (T::*rdf)(),void (T::*wrtf)(const P&),
00411                                                NTSlab<T> send_slab,NTSlab<T> recv_slab,
00412                                                int dir,int dist)
00413 {
00414   vector<P> send_data;
00415   vector<P> recv_data;
00416 
00417   // get data
00418   for(typename NTSlab<T>::iterator iter=send_slab.begin();
00419       iter!=send_slab.end();
00420       iter++){
00421     send_data.push_back(((*iter).*rdf)());
00422 //     cout << "put exchange values from particle " << iter->getID() << " into buffer" << endl; 
00423   }
00424   // exchange
00425   m_comm.shift_cont_packed(send_data,recv_data,dir,dist,m_exchg_tag);
00426 
00427   // apply received data
00428   // check if sizes are correct
00429   if(recv_data.size()==recv_slab.size()){
00430     int count=0;
00431     for(typename NTSlab<T>::iterator iter=recv_slab.begin();
00432         iter!=recv_slab.end();
00433         iter++){
00434       ((*iter).*wrtf)(recv_data[count]);
00435 //       cout << "wrote exchange value to particle " << iter->getID() << endl;
00436       count++;
00437     }
00438   }else{
00439     cerr << "rank = " << m_comm.rank() << ":ParallelParticleArray<T>::exchange_single ERROR: size mismatch in received data, recv_data.size()!=recv_slab.size() - (" << recv_data.size() << "!=" << recv_slab.size() << "). dir = " << dir << ", dist = " << dist << endl;
00440   }
00441 }
00442 
00452 template<typename T>
00453 void ParallelParticleArray<T>::forParticle(int id,void (T::*mf)())
00454 {
00455   T* pp=m_nt->ptr_by_id(id);
00456   if(pp!=NULL){
00457     (pp->*mf)();
00458   }
00459 }
00460 
00471 template<typename T>
00472 template<typename P> 
00473 void ParallelParticleArray<T>::forParticle(int id,void (T::*mf)(P),const P& arg)
00474 {
00475   T* pp=m_nt->ptr_by_id(id);
00476   if(pp!=NULL){
00477     (pp->*mf)(arg);
00478   }
00479 }
00480 
00487 template<typename T>
00488 void ParallelParticleArray<T>::forParticleTag(int tag,void (T::*mf)())
00489 {
00490   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00491       iter!=m_nt->end();
00492       iter++){
00493     if(iter->getTag()==tag){
00494       ((*iter).*mf)();
00495     }
00496   }
00497 }
00498 
00506 template<typename T>
00507 template<typename P> 
00508 void ParallelParticleArray<T>::forParticleTag(int tag,void (T::*mf)(P),const P& arg)
00509 {
00510   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00511       iter!=m_nt->end();
00512       iter++){
00513     if(iter->getTag()==tag){
00514       ((*iter).*mf)(arg);
00515     }
00516   }
00517 }
00518 
00528 template<typename T>
00529 void ParallelParticleArray<T>::forParticleTagMask(int tag,int mask, void (T::*mf)())
00530 {
00531   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00532       iter!=m_nt->end();
00533       iter++){
00534     if((iter->getTag() & mask) == (tag & mask)){
00535       ((*iter).*mf)();
00536     }
00537   }
00538 }
00539 
00550 template<typename T>
00551 template<typename P> 
00552 void ParallelParticleArray<T>::forParticleTagMask(int tag,int mask,void (T::*mf)(P),const P& arg)
00553 {
00554   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00555       iter!=m_nt->end();
00556       iter++){
00557     if((iter->getTag() & mask) == (tag & mask)){
00558       ((*iter).*mf)(arg);
00559     }
00560   }
00561 }
00562 
00566 template<typename T>
00567 void ParallelParticleArray<T>::forAllParticles(void (T::*fnc)())
00568 {
00569   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00570       iter!=m_nt->end();
00571       iter++){
00572     ((*iter).*fnc)();
00573   }
00574 }
00575 
00579 template<typename T>
00580 void ParallelParticleArray<T>::forAllParticles(void (T::*fnc)()const)
00581 {
00582   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00583       iter!=m_nt->end();
00584       iter++){
00585     ((*iter).*fnc)();
00586   }
00587 }
00588 
00595 template<typename T>
00596 template<typename P> 
00597 void ParallelParticleArray<T>::forAllParticles(void (T::*fnc)(P),const P& arg)
00598 {
00599   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00600       iter!=m_nt->end();
00601       iter++){
00602     ((*iter).*fnc)(arg);
00603   }
00604 }
00605 
00612 template<typename T>
00613 template<typename P> 
00614 void ParallelParticleArray<T>::forAllInnerParticles(void (T::*fnc)(P&),P& arg)
00615 {
00616 
00617   NTBlock<T> InnerBlock=m_nt->inner();
00618   for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
00619       iter!=InnerBlock.end();
00620       iter++){
00621     ((*iter).*fnc)(arg);
00622   }
00623 }
00624 
00637 template<typename T>
00638 template<typename P> 
00639 void ParallelParticleArray<T>::forAllParticlesGet(P& cont,typename P::value_type (T::*rdf)()const)
00640 {
00641   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00642       iter!=m_nt->end();
00643       iter++){
00644     cont.push_back(((*iter).*rdf)());
00645   }
00646 }
00647 
00648 template<typename T>
00649 ParallelParticleArray<T>::ParticleIterator::ParticleIterator(
00650   const NtBlock &ntBlock
00651 )
00652   : m_ntBlock(ntBlock),
00653     m_it(m_ntBlock.begin())
00654 {
00655   m_it = m_ntBlock.begin();
00656   m_numRemaining = m_ntBlock.size();
00657 }
00658 
00659 template<typename T>
00660 bool ParallelParticleArray<T>::ParticleIterator::hasNext() const
00661 {
00662   return (m_numRemaining > 0);
00663 }
00664 
00665 template<typename T>
00666 typename ParallelParticleArray<T>::ParticleIterator::Particle &ParallelParticleArray<T>::ParticleIterator::next()
00667 {
00668   Particle &p = (*m_it);
00669   m_it++;
00670   m_numRemaining--;
00671   return p;
00672 }
00673 
00674 template<typename T>
00675 int ParallelParticleArray<T>::ParticleIterator::getNumRemaining() const
00676 {
00677   return m_numRemaining;
00678 }
00679 
00680 template<typename T>
00681 typename ParallelParticleArray<T>::ParticleIterator ParallelParticleArray<T>::getInnerParticleIterator()
00682 {
00683   return ParticleIterator(m_nt->inner());
00684 }
00685 
00693 template<typename T>
00694 template<typename P> 
00695 void ParallelParticleArray<T>::forAllInnerParticlesGet(P& cont,typename P::value_type (T::*rdf)()const)
00696 {
00697   NTBlock<T> InnerBlock=m_nt->inner();
00698   for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
00699       iter!=InnerBlock.end();
00700       iter++){
00701     cont.push_back(((*iter).*rdf)());
00702   }
00703 }
00704 
00711 template<typename T>
00712 template <typename P> 
00713 vector<pair<int,P> > ParallelParticleArray<T>::forAllParticlesGetIndexed(P (T::*rdf)() const)
00714 {
00715   vector<pair<int,P> > res;
00716 
00717   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00718       iter!=m_nt->end();
00719       iter++){
00720     res.push_back(make_pair(iter->getID(),((*iter).*rdf)()));
00721   }
00722   
00723   return res;
00724 }
00725 
00732 template<typename T>
00733 template <typename P> 
00734 vector<pair<int,P> > ParallelParticleArray<T>::forAllInnerParticlesGetIndexed(P (T::*rdf)() const)
00735 {
00736   vector<pair<int,P> > res;
00737 
00738   NTBlock<T> InnerBlock=m_nt->inner();
00739   for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
00740       iter!=InnerBlock.end();
00741       iter++){
00742     res.push_back(make_pair(iter->getID(),((*iter).*rdf)()));
00743   }
00744   
00745   return res;
00746 }
00747 
00763 template<typename T>
00764 template<typename P> 
00765 void ParallelParticleArray<T>::forAllTaggedParticlesGet(P& cont,typename P::value_type (T::*rdf)()const,int tag,int mask)
00766 {
00767   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00768       iter!=m_nt->end();
00769       iter++){
00770     if((iter->getTag() | mask )==(tag | mask)){
00771       cont.push_back(((*iter).*rdf)());
00772     }
00773   }
00774 }
00775 
00785 template<typename T>
00786 template<typename P> 
00787 void ParallelParticleArray<T>::forAllTaggedInnerParticlesGet(P& cont,typename P::value_type (T::*rdf)()const,int tag,int mask)
00788 {
00789   NTBlock<T> InnerBlock=m_nt->inner();
00790   for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
00791       iter!=InnerBlock.end();
00792       iter++){
00793     int ptag=iter->getTag();
00794     if((ptag & mask )==(tag & mask)){
00795       cont.push_back(((*iter).*rdf)());
00796     }
00797   }
00798 }
00799 
00808 template<typename T>
00809 template <typename P> 
00810 vector<pair<int,P> > ParallelParticleArray<T>::forAllTaggedParticlesGetIndexed(P (T::*rdf)() const,int tag,int mask)
00811 {
00812   vector<pair<int,P> > res;
00813 
00814   for(typename NeighborTable<T>::iterator iter=m_nt->begin();
00815       iter!=m_nt->end();
00816       iter++){
00817     int id=iter->getID();
00818     int ptag=iter->getTag();
00819     if(( ptag & mask )==(tag & mask)){
00820       res.push_back(make_pair(id,((*iter).*rdf)()));
00821     }
00822   }
00823   
00824   return res;
00825 }
00826 
00836 template<typename T>
00837 template <typename P> 
00838 vector<pair<int,P> > ParallelParticleArray<T>::forAllInnerTaggedParticlesGetIndexed(P (T::*rdf)() const,int tag,int mask)
00839 {
00840   vector<pair<int,P> > res;
00841 
00842   NTBlock<T> InnerBlock=m_nt->inner();
00843   for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
00844       iter!=InnerBlock.end();
00845       iter++){
00846     int id=iter->getID();
00847     int ptag=iter->getTag();
00848     if((ptag & mask )==(tag & mask)){
00849       res.push_back(make_pair(id,((*iter).*rdf)()));
00850     }
00851   }
00852   
00853   return res;
00854 }
00855 
00870 template<typename T>
00871 template <typename P> 
00872 void ParallelParticleArray<T>::forPointsGetNearest(P& cont,typename P::value_type (T::*rdf)() const,const Vec3& orig,
00873                                                    double dx,double dy,double dz,int nx,int ny,int nz)
00874 {
00875   console.Debug() << "forPointsGetNearest" << "\n";
00876 
00877   Vec3 u_x=Vec3(1.0,0.0,0.0);
00878   Vec3 u_y=Vec3(0.0,1.0,0.0);
00879   Vec3 u_z=Vec3(0.0,0.0,1.0);
00880   for(int ix=0;ix<nx;ix++){
00881     for(int iy=0;iy<ny;iy++){
00882       for(int iz=0;iz<nz;iz++){
00883         Vec3 p=orig+double(ix)*dx*u_x+double(iy)*dy*u_y+double(iz)*dz*u_z;
00884         cont.push_back(((*(m_nt->getNearestPtr(p))).*rdf)());
00885       }
00886     }
00887   }
00888 
00889   console.Debug() << "end forPointsGetNearest" << "\n";
00890 }
00891 
00898 template<typename T>
00899 set<int> ParallelParticleArray<T>::getBoundarySlabIds(int dir,int up) const
00900 {
00901   set<int> res;
00902   NTSlab<T> slab,slab2;
00903 
00904   switch(dir){
00905   case 0 :  // x-dir
00906     if(up==1){
00907       slab=m_nt->yz_slab(m_nt->xsize()-1);
00908       slab2=m_nt->yz_slab(m_nt->xsize()-2);
00909     } else {
00910       slab=m_nt->yz_slab(0);
00911       slab2=m_nt->yz_slab(1);
00912     }
00913     break;
00914   case 1 : // y-dir
00915     if(up==1){
00916       slab=m_nt->xz_slab(m_nt->ysize()-1);
00917       slab2=m_nt->xz_slab(m_nt->ysize()-2);
00918     } else {
00919       slab=m_nt->xz_slab(0);
00920       slab2=m_nt->xz_slab(1);
00921     }
00922     break;
00923   case 2 : // z-dir
00924     if(up==1){
00925       slab=m_nt->xy_slab(m_nt->zsize()-1);
00926       slab2=m_nt->xy_slab(m_nt->zsize()-2);
00927     } else {
00928       slab=m_nt->xy_slab(0);
00929       slab2=m_nt->xy_slab(1);
00930     }
00931     break;
00932   default:
00933     cout << "Error: wrong direction " << dir << " in getBoundarySlabIds" << endl;
00934   }
00935 
00936   for(typename NTSlab<T>::iterator iter=slab.begin();
00937       iter!=slab.end();
00938       iter++){
00939     res.insert(iter->getID());
00940   }
00941   for(typename NTSlab<T>::iterator iter=slab2.begin();
00942       iter!=slab2.end();
00943       iter++){
00944     res.insert(iter->getID());
00945   }
00946 
00947   return res;
00948 }
00949 
00956 template<typename T>
00957 set<int> ParallelParticleArray<T>::get2ndSlabIds(int dir,int up) const
00958 {
00959   set<int> res;
00960   NTSlab<T> slab;
00961 
00962   switch(dir){
00963   case 0 :  // x-dir
00964     if(up==1){
00965       slab=m_nt->yz_slab(m_nt->xsize()-2);
00966     } else {
00967       slab=m_nt->yz_slab(1);
00968     }
00969     break;
00970   case 1 : // y-dir
00971     if(up==1){
00972       slab=m_nt->xz_slab(m_nt->ysize()-2);
00973     } else {
00974       slab=m_nt->xz_slab(1);
00975     }
00976     break;
00977   case 2 : // z-dir
00978     if(up==1){
00979       slab=m_nt->xy_slab(m_nt->zsize()-2);
00980     } else {
00981       slab=m_nt->xy_slab(1);
00982     }
00983     break;
00984   default:
00985     cout << "Error: wrong direction " << dir << " in get2ndSlabIds" << endl;
00986   }
00987 
00988   for(typename NTSlab<T>::iterator iter=slab.begin();
00989       iter!=slab.end();
00990       iter++){
00991     res.insert(iter->getID());
00992   }
00993 
00994   return res;
00995 }
00996 
01002 template<typename T>
01003 void ParallelParticleArray<T>::getAllInnerParticles(vector<T>& pv)
01004 {
01005   NTBlock<T> InnerBlock=m_nt->inner();
01006   for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
01007       iter!=InnerBlock.end();
01008       iter++){
01009     pv.push_back(*iter);
01010   }
01011 }
01012 
01013 
01019 template<typename T>
01020 void ParallelParticleArray<T>::saveCheckPointData(std::ostream& ost)
01021 {
01022   console.Debug() << "ParallelParticleArray<T>::saveCheckPointData\n";
01023 
01024   // output dimensions
01025   ost << m_nt->base_point() << "\n";
01026   ost << m_nt->base_idx_x() << " " << m_nt->base_idx_y() << " " << m_nt->base_idx_z() << "\n";
01027 
01028   // get nr. of particles in inner block 
01029   ost << getInnerSize() << "\n";
01030 
01031   // save particles to stream
01032   NTBlock<T> InnerBlock=m_nt->inner();
01033   for(typename NTBlock<T>::iterator iter=InnerBlock.begin();
01034       iter!=InnerBlock.end();
01035       iter++){
01036     iter->saveCheckPointData(ost);
01037     ost << "\n";
01038   }
01039 }
01040 
01046 template<typename T>
01047 void ParallelParticleArray<T>:: loadCheckPointData(std::istream& ist)
01048 {
01049   console.Debug() << "ParallelParticleArray<T>::loadCheckPointData\n";
01050   int nparts;
01051 
01052   // get dimensions 
01053   Vec3 bp;
01054   int bix,biy,biz;
01055   ist >> bp;
01056   ist >> bix;
01057   ist >> biy;
01058   ist >> biz;
01059   // barf if different from current values
01060   if((bp!=m_nt->base_point()) ||
01061      (bix!=m_nt->base_idx_x()) ||
01062      (biy!=m_nt->base_idx_y()) ||
01063      (biz!=m_nt->base_idx_z())){
01064     std::cerr << "local area data inconsistet: bp " << bp << " / " << m_nt->base_point() 
01065               << " bix: " << bix << " / " << m_nt->base_idx_x()
01066               << " biy: " << biy << " / " << m_nt->base_idx_y()
01067               << " bix: " << biz << " / " << m_nt->base_idx_z() << std::endl;
01068   }
01069 
01070   // get nr. of particles
01071   ist >> nparts;
01072 
01073   // load particles from stream and try to insert them
01074   for(int i=0;i!=nparts;i++){
01075     T p;
01076     p.loadCheckPointData(ist);
01077     if(!isInInner(p.getPos())) std::cerr << "not in inner: " << p.getPos() << std::endl;
01078     m_nt->insert(p);
01079   }
01080 }
01081 
01082 
01089 template<typename T>
01090 ostream& operator<<(ostream& ost, const ParallelParticleArray<T> &ppa)
01091 {
01092   ost << "--ParallelParticleArray--" << endl;
01093   ost << *(ppa.m_nt) << endl;
01094   return ost;
01095 }