CLHEP VERSION Reference Documentation
CLHEP Home Page CLHEP Documentation CLHEP Bug Reports |
00001 // -*- C++ -*- 00002 // --------------------------------------------------------------------------- 00003 // 00004 // This file is a part of the CLHEP - a Class Library for High Energy Physics. 00005 // 00006 00007 #ifdef GNUPRAGMA 00008 #pragma implementation 00009 #endif 00010 00011 #include <string.h> 00012 #include <float.h> // for DBL_EPSILON 00013 00014 #include "CLHEP/Matrix/defs.h" 00015 #include "CLHEP/Random/Random.h" 00016 #include "CLHEP/Matrix/SymMatrix.h" 00017 #include "CLHEP/Matrix/Matrix.h" 00018 #include "CLHEP/Matrix/DiagMatrix.h" 00019 #include "CLHEP/Matrix/Vector.h" 00020 00021 #ifdef HEP_DEBUG_INLINE 00022 #include "CLHEP/Matrix/SymMatrix.icc" 00023 #endif 00024 00025 namespace CLHEP { 00026 00027 // Simple operation for all elements 00028 00029 #define SIMPLE_UOP(OPER) \ 00030 HepMatrix::mIter a=m.begin(); \ 00031 HepMatrix::mIter e=m.begin()+num_size(); \ 00032 for(;a<e; a++) (*a) OPER t; 00033 00034 #define SIMPLE_BOP(OPER) \ 00035 HepMatrix::mIter a=m.begin(); \ 00036 HepMatrix::mcIter b=m2.m.begin(); \ 00037 HepMatrix::mcIter e=m.begin()+num_size(); \ 00038 for(;a<e; a++, b++) (*a) OPER (*b); 00039 00040 #define SIMPLE_TOP(OPER) \ 00041 HepMatrix::mcIter a=m1.m.begin(); \ 00042 HepMatrix::mcIter b=m2.m.begin(); \ 00043 HepMatrix::mIter t=mret.m.begin(); \ 00044 HepMatrix::mcIter e=m1.m.begin()+m1.num_size(); \ 00045 for( ;a<e; a++, b++, t++) (*t) = (*a) OPER (*b); 00046 00047 #define CHK_DIM_2(r1,r2,c1,c2,fun) \ 00048 if (r1!=r2 || c1!=c2) { \ 00049 HepGenMatrix::error("Range error in SymMatrix function " #fun "(1)."); \ 00050 } 00051 00052 #define CHK_DIM_1(c1,r2,fun) \ 00053 if (c1!=r2) { \ 00054 HepGenMatrix::error("Range error in SymMatrix function " #fun "(2)."); \ 00055 } 00056 00057 // Constructors. (Default constructors are inlined and in .icc file) 00058 00059 HepSymMatrix::HepSymMatrix(int p) 00060 : m(p*(p+1)/2), nrow(p) 00061 { 00062 size_ = nrow * (nrow+1) / 2; 00063 m.assign(size_,0); 00064 } 00065 00066 HepSymMatrix::HepSymMatrix(int p, int init) 00067 : m(p*(p+1)/2), nrow(p) 00068 { 00069 size_ = nrow * (nrow+1) / 2; 00070 00071 m.assign(size_,0); 00072 switch(init) 00073 { 00074 case 0: 00075 break; 00076 00077 case 1: 00078 { 00079 HepMatrix::mIter a; 00080 for(int i=0;i<nrow;++i) { 00081 a = m.begin() + (i+1)*i/2 + i; 00082 *a = 1.0; 00083 } 00084 break; 00085 } 00086 default: 00087 error("SymMatrix: initialization must be either 0 or 1."); 00088 } 00089 } 00090 00091 HepSymMatrix::HepSymMatrix(int p, HepRandom &r) 00092 : m(p*(p+1)/2), nrow(p) 00093 { 00094 size_ = nrow * (nrow+1) / 2; 00095 HepMatrix::mIter a = m.begin(); 00096 HepMatrix::mIter b = m.begin() + size_; 00097 for(;a<b;a++) *a = r(); 00098 } 00099 00100 // 00101 // Destructor 00102 // 00103 HepSymMatrix::~HepSymMatrix() { 00104 } 00105 00106 HepSymMatrix::HepSymMatrix(const HepSymMatrix &m1) 00107 : HepGenMatrix(m1), m(m1.size_), nrow(m1.nrow), size_(m1.size_) 00108 { 00109 m = m1.m; 00110 } 00111 00112 HepSymMatrix::HepSymMatrix(const HepDiagMatrix &m1) 00113 : m(m1.nrow*(m1.nrow+1)/2), nrow(m1.nrow) 00114 { 00115 size_ = nrow * (nrow+1) / 2; 00116 00117 int n = num_row(); 00118 m.assign(size_,0); 00119 00120 HepMatrix::mIter mrr = m.begin(); 00121 HepMatrix::mcIter mr = m1.m.begin(); 00122 for(int r=1;r<=n;r++) { 00123 *mrr = *(mr++); 00124 if(r<n) mrr += (r+1); 00125 } 00126 } 00127 00128 // 00129 // 00130 // Sub matrix 00131 // 00132 // 00133 00134 HepSymMatrix HepSymMatrix::sub(int min_row, int max_row) const 00135 #ifdef HEP_GNU_OPTIMIZED_RETURN 00136 return mret(max_row-min_row+1); 00137 { 00138 #else 00139 { 00140 HepSymMatrix mret(max_row-min_row+1); 00141 #endif 00142 if(max_row > num_row()) 00143 error("HepSymMatrix::sub: Index out of range"); 00144 HepMatrix::mIter a = mret.m.begin(); 00145 HepMatrix::mcIter b1 = m.begin() + (min_row+2)*(min_row-1)/2; 00146 int rowsize=mret.num_row(); 00147 for(int irow=1; irow<=rowsize; irow++) { 00148 HepMatrix::mcIter b = b1; 00149 for(int icol=0; icol<irow; ++icol) { 00150 *(a++) = *(b++); 00151 } 00152 if(irow<rowsize) b1 += irow+min_row-1; 00153 } 00154 return mret; 00155 } 00156 00157 HepSymMatrix HepSymMatrix::sub(int min_row, int max_row) 00158 { 00159 HepSymMatrix mret(max_row-min_row+1); 00160 if(max_row > num_row()) 00161 error("HepSymMatrix::sub: Index out of range"); 00162 HepMatrix::mIter a = mret.m.begin(); 00163 HepMatrix::mIter b1 = m.begin() + (min_row+2)*(min_row-1)/2; 00164 int rowsize=mret.num_row(); 00165 for(int irow=1; irow<=rowsize; irow++) { 00166 HepMatrix::mIter b = b1; 00167 for(int icol=0; icol<irow; ++icol) { 00168 *(a++) = *(b++); 00169 } 00170 if(irow<rowsize) b1 += irow+min_row-1; 00171 } 00172 return mret; 00173 } 00174 00175 void HepSymMatrix::sub(int row,const HepSymMatrix &m1) 00176 { 00177 if(row <1 || row+m1.num_row()-1 > num_row() ) 00178 error("HepSymMatrix::sub: Index out of range"); 00179 HepMatrix::mcIter a = m1.m.begin(); 00180 HepMatrix::mIter b1 = m.begin() + (row+2)*(row-1)/2; 00181 int rowsize=m1.num_row(); 00182 for(int irow=1; irow<=rowsize; ++irow) { 00183 HepMatrix::mIter b = b1; 00184 for(int icol=0; icol<irow; ++icol) { 00185 *(b++) = *(a++); 00186 } 00187 if(irow<rowsize) b1 += irow+row-1; 00188 } 00189 } 00190 00191 // 00192 // Direct sum of two matricies 00193 // 00194 00195 HepSymMatrix dsum(const HepSymMatrix &m1, 00196 const HepSymMatrix &m2) 00197 #ifdef HEP_GNU_OPTIMIZED_RETURN 00198 return mret(m1.num_row() + m2.num_row(), 0); 00199 { 00200 #else 00201 { 00202 HepSymMatrix mret(m1.num_row() + m2.num_row(), 00203 0); 00204 #endif 00205 mret.sub(1,m1); 00206 mret.sub(m1.num_row()+1,m2); 00207 return mret; 00208 } 00209 00210 /* ----------------------------------------------------------------------- 00211 This section contains support routines for matrix.h. This section contains 00212 The two argument functions +,-. They call the copy constructor and +=,-=. 00213 ----------------------------------------------------------------------- */ 00214 HepSymMatrix HepSymMatrix::operator- () const 00215 #ifdef HEP_GNU_OPTIMIZED_RETURN 00216 return m2(nrow); 00217 { 00218 #else 00219 { 00220 HepSymMatrix m2(nrow); 00221 #endif 00222 HepMatrix::mcIter a=m.begin(); 00223 HepMatrix::mIter b=m2.m.begin(); 00224 HepMatrix::mcIter e=m.begin()+num_size(); 00225 for(;a<e; a++, b++) (*b) = -(*a); 00226 return m2; 00227 } 00228 00229 00230 00231 HepMatrix operator+(const HepMatrix &m1,const HepSymMatrix &m2) 00232 #ifdef HEP_GNU_OPTIMIZED_RETURN 00233 return mret(m1); 00234 { 00235 #else 00236 { 00237 HepMatrix mret(m1); 00238 #endif 00239 CHK_DIM_2(m1.num_row(),m2.num_row(), m1.num_col(),m2.num_col(),+); 00240 mret += m2; 00241 return mret; 00242 } 00243 HepMatrix operator+(const HepSymMatrix &m1,const HepMatrix &m2) 00244 #ifdef HEP_GNU_OPTIMIZED_RETURN 00245 return mret(m2); 00246 { 00247 #else 00248 { 00249 HepMatrix mret(m2); 00250 #endif 00251 CHK_DIM_2(m1.num_row(),m2.num_row(),m1.num_col(),m2.num_col(),+); 00252 mret += m1; 00253 return mret; 00254 } 00255 00256 HepSymMatrix operator+(const HepSymMatrix &m1,const HepSymMatrix &m2) 00257 #ifdef HEP_GNU_OPTIMIZED_RETURN 00258 return mret(m1.nrow); 00259 { 00260 #else 00261 { 00262 HepSymMatrix mret(m1.nrow); 00263 #endif 00264 CHK_DIM_1(m1.nrow, m2.nrow,+); 00265 SIMPLE_TOP(+) 00266 return mret; 00267 } 00268 00269 // 00270 // operator - 00271 // 00272 00273 HepMatrix operator-(const HepMatrix &m1,const HepSymMatrix &m2) 00274 #ifdef HEP_GNU_OPTIMIZED_RETURN 00275 return mret(m1); 00276 { 00277 #else 00278 { 00279 HepMatrix mret(m1); 00280 #endif 00281 CHK_DIM_2(m1.num_row(),m2.num_row(), 00282 m1.num_col(),m2.num_col(),-); 00283 mret -= m2; 00284 return mret; 00285 } 00286 HepMatrix operator-(const HepSymMatrix &m1,const HepMatrix &m2) 00287 #ifdef HEP_GNU_OPTIMIZED_RETURN 00288 return mret(m1); 00289 { 00290 #else 00291 { 00292 HepMatrix mret(m1); 00293 #endif 00294 CHK_DIM_2(m1.num_row(),m2.num_row(), 00295 m1.num_col(),m2.num_col(),-); 00296 mret -= m2; 00297 return mret; 00298 } 00299 00300 HepSymMatrix operator-(const HepSymMatrix &m1,const HepSymMatrix &m2) 00301 #ifdef HEP_GNU_OPTIMIZED_RETURN 00302 return mret(m1.num_row()); 00303 { 00304 #else 00305 { 00306 HepSymMatrix mret(m1.num_row()); 00307 #endif 00308 CHK_DIM_1(m1.num_row(),m2.num_row(),-); 00309 SIMPLE_TOP(-) 00310 return mret; 00311 } 00312 00313 /* ----------------------------------------------------------------------- 00314 This section contains support routines for matrix.h. This file contains 00315 The two argument functions *,/. They call copy constructor and then /=,*=. 00316 ----------------------------------------------------------------------- */ 00317 00318 HepSymMatrix operator/( 00319 const HepSymMatrix &m1,double t) 00320 #ifdef HEP_GNU_OPTIMIZED_RETURN 00321 return mret(m1); 00322 { 00323 #else 00324 { 00325 HepSymMatrix mret(m1); 00326 #endif 00327 mret /= t; 00328 return mret; 00329 } 00330 00331 HepSymMatrix operator*(const HepSymMatrix &m1,double t) 00332 #ifdef HEP_GNU_OPTIMIZED_RETURN 00333 return mret(m1); 00334 { 00335 #else 00336 { 00337 HepSymMatrix mret(m1); 00338 #endif 00339 mret *= t; 00340 return mret; 00341 } 00342 00343 HepSymMatrix operator*(double t,const HepSymMatrix &m1) 00344 #ifdef HEP_GNU_OPTIMIZED_RETURN 00345 return mret(m1); 00346 { 00347 #else 00348 { 00349 HepSymMatrix mret(m1); 00350 #endif 00351 mret *= t; 00352 return mret; 00353 } 00354 00355 00356 HepMatrix operator*(const HepMatrix &m1,const HepSymMatrix &m2) 00357 #ifdef HEP_GNU_OPTIMIZED_RETURN 00358 return mret(m1.num_row(),m2.num_col()); 00359 { 00360 #else 00361 { 00362 HepMatrix mret(m1.num_row(),m2.num_col()); 00363 #endif 00364 CHK_DIM_1(m1.num_col(),m2.num_row(),*); 00365 HepMatrix::mcIter mit1, mit2, sp,snp; //mit2=0 00366 double temp; 00367 HepMatrix::mIter mir=mret.m.begin(); 00368 for(mit1=m1.m.begin(); 00369 mit1<m1.m.begin()+m1.num_row()*m1.num_col(); 00370 mit1 = mit2) 00371 { 00372 snp=m2.m.begin(); 00373 for(int step=1;step<=m2.num_row();++step) 00374 { 00375 mit2=mit1; 00376 sp=snp; 00377 snp+=step; 00378 temp=0; 00379 while(sp<snp) 00380 temp+=*(sp++)*(*(mit2++)); 00381 if( step<m2.num_row() ) { // only if we aren't on the last row 00382 sp+=step-1; 00383 for(int stept=step+1;stept<=m2.num_row();stept++) 00384 { 00385 temp+=*sp*(*(mit2++)); 00386 if(stept<m2.num_row()) sp+=stept; 00387 } 00388 } // if(step 00389 *(mir++)=temp; 00390 } // for(step 00391 } // for(mit1 00392 return mret; 00393 } 00394 00395 HepMatrix operator*(const HepSymMatrix &m1,const HepMatrix &m2) 00396 #ifdef HEP_GNU_OPTIMIZED_RETURN 00397 return mret(m1.num_row(),m2.num_col()); 00398 { 00399 #else 00400 { 00401 HepMatrix mret(m1.num_row(),m2.num_col()); 00402 #endif 00403 CHK_DIM_1(m1.num_col(),m2.num_row(),*); 00404 int step,stept; 00405 HepMatrix::mcIter mit1,mit2,sp,snp; 00406 double temp; 00407 HepMatrix::mIter mir=mret.m.begin(); 00408 for(step=1,snp=m1.m.begin();step<=m1.num_row();snp+=step++) 00409 for(mit1=m2.m.begin();mit1<m2.m.begin()+m2.num_col();mit1++) 00410 { 00411 mit2=mit1; 00412 sp=snp; 00413 temp=0; 00414 while(sp<snp+step) 00415 { 00416 temp+=*mit2*(*(sp++)); 00417 if( m2.num_size()-(mit2-m2.m.begin())>m2.num_col() ){ 00418 mit2+=m2.num_col(); 00419 } 00420 } 00421 if(step<m1.num_row()) { // only if we aren't on the last row 00422 sp+=step-1; 00423 for(stept=step+1;stept<=m1.num_row();stept++) 00424 { 00425 temp+=*mit2*(*sp); 00426 if(stept<m1.num_row()) { 00427 mit2+=m2.num_col(); 00428 sp+=stept; 00429 } 00430 } 00431 } // if(step 00432 *(mir++)=temp; 00433 } // for(mit1 00434 return mret; 00435 } 00436 00437 HepMatrix operator*(const HepSymMatrix &m1,const HepSymMatrix &m2) 00438 #ifdef HEP_GNU_OPTIMIZED_RETURN 00439 return mret(m1.num_row(),m1.num_row()); 00440 { 00441 #else 00442 { 00443 HepMatrix mret(m1.num_row(),m1.num_row()); 00444 #endif 00445 CHK_DIM_1(m1.num_col(),m2.num_row(),*); 00446 int step1,stept1,step2,stept2; 00447 HepMatrix::mcIter snp1,sp1,snp2,sp2; 00448 double temp; 00449 HepMatrix::mIter mr = mret.m.begin(); 00450 snp1=m1.m.begin(); 00451 for(step1=1;step1<=m1.num_row();++step1) { 00452 snp2=m2.m.begin(); 00453 for(step2=1;step2<=m2.num_row();++step2) 00454 { 00455 sp1=snp1; 00456 sp2=snp2; 00457 snp2+=step2; 00458 temp=0; 00459 if(step1<step2) 00460 { 00461 while(sp1<snp1+step1) { 00462 temp+=(*(sp1++))*(*(sp2++)); 00463 } 00464 sp1+=step1-1; 00465 for(stept1=step1+1;stept1!=step2+1;++stept1) { 00466 temp+=(*sp1)*(*(sp2++)); 00467 if(stept1<m2.num_row()) sp1+=stept1; 00468 } 00469 if(step2<m2.num_row()) { // only if we aren't on the last row 00470 sp2+=step2-1; 00471 for(stept2=step2+1;stept2<=m2.num_row();stept1++,stept2++) { 00472 temp+=(*sp1)*(*sp2); 00473 if(stept2<m2.num_row()) { 00474 sp1+=stept1; 00475 sp2+=stept2; 00476 } 00477 } // for(stept2 00478 } // if(step2 00479 } // step1<step2 00480 else 00481 { 00482 while(sp2<snp2) { 00483 temp+=(*(sp1++))*(*(sp2++)); 00484 } 00485 if(step2<m2.num_row()) { // only if we aren't on the last row 00486 sp2+=step2-1; 00487 for(stept2=step2+1;stept2!=step1+1;stept2++) { 00488 temp+=(*(sp1++))*(*sp2); 00489 if(stept2<m1.num_row()) sp2+=stept2; 00490 } 00491 if(step1<m1.num_row()) { // only if we aren't on the last row 00492 sp1+=step1-1; 00493 for(stept1=step1+1;stept1<=m1.num_row();stept1++,stept2++) { 00494 temp+=(*sp1)*(*sp2); 00495 if(stept1<m1.num_row()) { 00496 sp1+=stept1; 00497 sp2+=stept2; 00498 } 00499 } // for(stept1 00500 } // if(step1 00501 } // if(step2 00502 } // else 00503 *(mr++)=temp; 00504 } // for(step2 00505 if(step1<m1.num_row()) snp1+=step1; 00506 } // for(step1 00507 return mret; 00508 } 00509 00510 HepVector operator*(const HepSymMatrix &m1,const HepVector &m2) 00511 #ifdef HEP_GNU_OPTIMIZED_RETURN 00512 return mret(m1.num_row()); 00513 { 00514 #else 00515 { 00516 HepVector mret(m1.num_row()); 00517 #endif 00518 CHK_DIM_1(m1.num_col(),m2.num_row(),*); 00519 HepMatrix::mcIter sp,snp,vpt; 00520 double temp; 00521 int step,stept; 00522 HepMatrix::mIter vrp=mret.m.begin(); 00523 for(step=1,snp=m1.m.begin();step<=m1.num_row();++step) 00524 { 00525 sp=snp; 00526 vpt=m2.m.begin(); 00527 snp+=step; 00528 temp=0; 00529 while(sp<snp) 00530 temp+=*(sp++)*(*(vpt++)); 00531 if(step<m1.num_row()) sp+=step-1; 00532 for(stept=step+1;stept<=m1.num_row();stept++) 00533 { 00534 temp+=*sp*(*(vpt++)); 00535 if(stept<m1.num_row()) sp+=stept; 00536 } 00537 *(vrp++)=temp; 00538 } // for(step 00539 return mret; 00540 } 00541 00542 HepSymMatrix vT_times_v(const HepVector &v) 00543 #ifdef HEP_GNU_OPTIMIZED_RETURN 00544 return mret(v.num_row()); 00545 { 00546 #else 00547 { 00548 HepSymMatrix mret(v.num_row()); 00549 #endif 00550 HepMatrix::mIter mr=mret.m.begin(); 00551 HepMatrix::mcIter vt1,vt2; 00552 for(vt1=v.m.begin();vt1<v.m.begin()+v.num_row();vt1++) 00553 for(vt2=v.m.begin();vt2<=vt1;vt2++) 00554 *(mr++)=(*vt1)*(*vt2); 00555 return mret; 00556 } 00557 00558 /* ----------------------------------------------------------------------- 00559 This section contains the assignment and inplace operators =,+=,-=,*=,/=. 00560 ----------------------------------------------------------------------- */ 00561 00562 HepMatrix & HepMatrix::operator+=(const HepSymMatrix &m2) 00563 { 00564 CHK_DIM_2(num_row(),m2.num_row(),num_col(),m2.num_col(),+=); 00565 HepMatrix::mcIter sjk = m2.m.begin(); 00566 // j >= k 00567 for(int j=0; j!=nrow; ++j) { 00568 for(int k=0; k<=j; ++k) { 00569 m[j*ncol+k] += *sjk; 00570 // make sure this is not a diagonal element 00571 if(k!=j) m[k*nrow+j] += *sjk; 00572 ++sjk; 00573 } 00574 } 00575 return (*this); 00576 } 00577 00578 HepSymMatrix & HepSymMatrix::operator+=(const HepSymMatrix &m2) 00579 { 00580 CHK_DIM_2(num_row(),m2.num_row(),num_col(),m2.num_col(),+=); 00581 SIMPLE_BOP(+=) 00582 return (*this); 00583 } 00584 00585 HepMatrix & HepMatrix::operator-=(const HepSymMatrix &m2) 00586 { 00587 CHK_DIM_2(num_row(),m2.num_row(),num_col(),m2.num_col(),-=); 00588 HepMatrix::mcIter sjk = m2.m.begin(); 00589 // j >= k 00590 for(int j=0; j!=nrow; ++j) { 00591 for(int k=0; k<=j; ++k) { 00592 m[j*ncol+k] -= *sjk; 00593 // make sure this is not a diagonal element 00594 if(k!=j) m[k*nrow+j] -= *sjk; 00595 ++sjk; 00596 } 00597 } 00598 return (*this); 00599 } 00600 00601 HepSymMatrix & HepSymMatrix::operator-=(const HepSymMatrix &m2) 00602 { 00603 CHK_DIM_2(num_row(),m2.num_row(),num_col(),m2.num_col(),-=); 00604 SIMPLE_BOP(-=) 00605 return (*this); 00606 } 00607 00608 HepSymMatrix & HepSymMatrix::operator/=(double t) 00609 { 00610 SIMPLE_UOP(/=) 00611 return (*this); 00612 } 00613 00614 HepSymMatrix & HepSymMatrix::operator*=(double t) 00615 { 00616 SIMPLE_UOP(*=) 00617 return (*this); 00618 } 00619 00620 HepMatrix & HepMatrix::operator=(const HepSymMatrix &m1) 00621 { 00622 // define size, rows, and columns of *this 00623 nrow = ncol = m1.nrow; 00624 if(nrow*ncol != size_) 00625 { 00626 size_ = nrow*ncol; 00627 m.resize(size_); 00628 } 00629 // begin copy 00630 mcIter sjk = m1.m.begin(); 00631 // j >= k 00632 for(int j=0; j!=nrow; ++j) { 00633 for(int k=0; k<=j; ++k) { 00634 m[j*ncol+k] = *sjk; 00635 // we could copy the diagonal element twice or check 00636 // doing the check may be a tiny bit faster, 00637 // so we choose that option for now 00638 if(k!=j) m[k*nrow+j] = *sjk; 00639 ++sjk; 00640 } 00641 } 00642 return (*this); 00643 } 00644 00645 HepSymMatrix & HepSymMatrix::operator=(const HepSymMatrix &m1) 00646 { 00647 if(m1.nrow != nrow) 00648 { 00649 nrow = m1.nrow; 00650 size_ = m1.size_; 00651 m.resize(size_); 00652 } 00653 m = m1.m; 00654 return (*this); 00655 } 00656 00657 HepSymMatrix & HepSymMatrix::operator=(const HepDiagMatrix &m1) 00658 { 00659 if(m1.nrow != nrow) 00660 { 00661 nrow = m1.nrow; 00662 size_ = nrow * (nrow+1) / 2; 00663 m.resize(size_); 00664 } 00665 00666 m.assign(size_,0); 00667 HepMatrix::mIter mrr = m.begin(); 00668 HepMatrix::mcIter mr = m1.m.begin(); 00669 for(int r=1; r<=nrow; r++) { 00670 *mrr = *(mr++); 00671 if(r<nrow) mrr += (r+1); 00672 } 00673 return (*this); 00674 } 00675 00676 // Print the Matrix. 00677 00678 std::ostream& operator<<(std::ostream &s, const HepSymMatrix &q) 00679 { 00680 s << std::endl; 00681 /* Fixed format needs 3 extra characters for field, while scientific needs 7 */ 00682 int width; 00683 if(s.flags() & std::ios::fixed) 00684 width = s.precision()+3; 00685 else 00686 width = s.precision()+7; 00687 for(int irow = 1; irow<= q.num_row(); irow++) 00688 { 00689 for(int icol = 1; icol <= q.num_col(); icol++) 00690 { 00691 s.width(width); 00692 s << q(irow,icol) << " "; 00693 } 00694 s << std::endl; 00695 } 00696 return s; 00697 } 00698 00699 HepSymMatrix HepSymMatrix:: 00700 apply(double (*f)(double, int, int)) const 00701 #ifdef HEP_GNU_OPTIMIZED_RETURN 00702 return mret(num_row()); 00703 { 00704 #else 00705 { 00706 HepSymMatrix mret(num_row()); 00707 #endif 00708 HepMatrix::mcIter a = m.begin(); 00709 HepMatrix::mIter b = mret.m.begin(); 00710 for(int ir=1;ir<=num_row();ir++) { 00711 for(int ic=1;ic<=ir;ic++) { 00712 *(b++) = (*f)(*(a++), ir, ic); 00713 } 00714 } 00715 return mret; 00716 } 00717 00718 void HepSymMatrix::assign (const HepMatrix &m1) 00719 { 00720 if(m1.nrow != nrow) 00721 { 00722 nrow = m1.nrow; 00723 size_ = nrow * (nrow+1) / 2; 00724 m.resize(size_); 00725 } 00726 HepMatrix::mcIter a = m1.m.begin(); 00727 HepMatrix::mIter b = m.begin(); 00728 for(int r=1;r<=nrow;r++) { 00729 HepMatrix::mcIter d = a; 00730 for(int c=1;c<=r;c++) { 00731 *(b++) = *(d++); 00732 } 00733 if(r<nrow) a += nrow; 00734 } 00735 } 00736 00737 HepSymMatrix HepSymMatrix::similarity(const HepMatrix &m1) const 00738 #ifdef HEP_GNU_OPTIMIZED_RETURN 00739 return mret(m1.num_row()); 00740 { 00741 #else 00742 { 00743 HepSymMatrix mret(m1.num_row()); 00744 #endif 00745 HepMatrix temp = m1*(*this); 00746 // If m1*(*this) has correct dimensions, then so will the m1.T multiplication. 00747 // So there is no need to check dimensions again. 00748 int n = m1.num_col(); 00749 HepMatrix::mIter mr = mret.m.begin(); 00750 HepMatrix::mIter tempr1 = temp.m.begin(); 00751 for(int r=1;r<=mret.num_row();r++) { 00752 HepMatrix::mcIter m1c1 = m1.m.begin(); 00753 for(int c=1;c<=r;c++) { 00754 register double tmp = 0.0; 00755 HepMatrix::mIter tempri = tempr1; 00756 HepMatrix::mcIter m1ci = m1c1; 00757 for(int i=1;i<=m1.num_col();i++) { 00758 tmp+=(*(tempri++))*(*(m1ci++)); 00759 } 00760 *(mr++) = tmp; 00761 m1c1 += n; 00762 } 00763 tempr1 += n; 00764 } 00765 return mret; 00766 } 00767 00768 HepSymMatrix HepSymMatrix::similarity(const HepSymMatrix &m1) const 00769 #ifdef HEP_GNU_OPTIMIZED_RETURN 00770 return mret(m1.num_row()); 00771 { 00772 #else 00773 { 00774 HepSymMatrix mret(m1.num_row()); 00775 #endif 00776 HepMatrix temp = m1*(*this); 00777 int n = m1.num_col(); 00778 HepMatrix::mIter mr = mret.m.begin(); 00779 HepMatrix::mIter tempr1 = temp.m.begin(); 00780 for(int r=1;r<=mret.num_row();r++) { 00781 HepMatrix::mcIter m1c1 = m1.m.begin(); 00782 int c; 00783 for(c=1;c<=r;c++) { 00784 register double tmp = 0.0; 00785 HepMatrix::mIter tempri = tempr1; 00786 HepMatrix::mcIter m1ci = m1c1; 00787 int i; 00788 for(i=1;i<c;i++) { 00789 tmp+=(*(tempri++))*(*(m1ci++)); 00790 } 00791 for(i=c;i<=m1.num_col();i++) { 00792 tmp+=(*(tempri++))*(*(m1ci)); 00793 if(i<m1.num_col()) m1ci += i; 00794 } 00795 *(mr++) = tmp; 00796 m1c1 += c; 00797 } 00798 tempr1 += n; 00799 } 00800 return mret; 00801 } 00802 00803 double HepSymMatrix::similarity(const HepVector &m1) 00804 const { 00805 register double mret = 0.0; 00806 HepVector temp = (*this) *m1; 00807 // If m1*(*this) has correct dimensions, then so will the m1.T multiplication. 00808 // So there is no need to check dimensions again. 00809 HepMatrix::mIter a=temp.m.begin(); 00810 HepMatrix::mcIter b=m1.m.begin(); 00811 HepMatrix::mIter e=a+m1.num_row(); 00812 for(;a<e;) mret += (*(a++)) * (*(b++)); 00813 return mret; 00814 } 00815 00816 HepSymMatrix HepSymMatrix::similarityT(const HepMatrix &m1) const 00817 #ifdef HEP_GNU_OPTIMIZED_RETURN 00818 return mret(m1.num_col()); 00819 { 00820 #else 00821 { 00822 HepSymMatrix mret(m1.num_col()); 00823 #endif 00824 HepMatrix temp = (*this)*m1; 00825 int n = m1.num_col(); 00826 HepMatrix::mIter mrc = mret.m.begin(); 00827 HepMatrix::mIter temp1r = temp.m.begin(); 00828 for(int r=1;r<=mret.num_row();r++) { 00829 HepMatrix::mcIter m11c = m1.m.begin(); 00830 for(int c=1;c<=r;c++) { 00831 register double tmp = 0.0; 00832 for(int i=1;i<=m1.num_row();i++) { 00833 HepMatrix::mIter tempir = temp1r + n*(i-1); 00834 HepMatrix::mcIter m1ic = m11c + n*(i-1); 00835 tmp+=(*(tempir))*(*(m1ic)); 00836 } 00837 *(mrc++) = tmp; 00838 m11c++; 00839 } 00840 temp1r++; 00841 } 00842 return mret; 00843 } 00844 00845 void HepSymMatrix::invert(int &ifail) { 00846 00847 ifail = 0; 00848 00849 switch(nrow) { 00850 case 3: 00851 { 00852 double det, temp; 00853 double t1, t2, t3; 00854 double c11,c12,c13,c22,c23,c33; 00855 c11 = (*(m.begin()+2)) * (*(m.begin()+5)) - (*(m.begin()+4)) * (*(m.begin()+4)); 00856 c12 = (*(m.begin()+4)) * (*(m.begin()+3)) - (*(m.begin()+1)) * (*(m.begin()+5)); 00857 c13 = (*(m.begin()+1)) * (*(m.begin()+4)) - (*(m.begin()+2)) * (*(m.begin()+3)); 00858 c22 = (*(m.begin()+5)) * (*m.begin()) - (*(m.begin()+3)) * (*(m.begin()+3)); 00859 c23 = (*(m.begin()+3)) * (*(m.begin()+1)) - (*(m.begin()+4)) * (*m.begin()); 00860 c33 = (*m.begin()) * (*(m.begin()+2)) - (*(m.begin()+1)) * (*(m.begin()+1)); 00861 t1 = fabs(*m.begin()); 00862 t2 = fabs(*(m.begin()+1)); 00863 t3 = fabs(*(m.begin()+3)); 00864 if (t1 >= t2) { 00865 if (t3 >= t1) { 00866 temp = *(m.begin()+3); 00867 det = c23*c12-c22*c13; 00868 } else { 00869 temp = *m.begin(); 00870 det = c22*c33-c23*c23; 00871 } 00872 } else if (t3 >= t2) { 00873 temp = *(m.begin()+3); 00874 det = c23*c12-c22*c13; 00875 } else { 00876 temp = *(m.begin()+1); 00877 det = c13*c23-c12*c33; 00878 } 00879 if (det==0) { 00880 ifail = 1; 00881 return; 00882 } 00883 { 00884 double s = temp/det; 00885 HepMatrix::mIter mm = m.begin(); 00886 *(mm++) = s*c11; 00887 *(mm++) = s*c12; 00888 *(mm++) = s*c22; 00889 *(mm++) = s*c13; 00890 *(mm++) = s*c23; 00891 *(mm) = s*c33; 00892 } 00893 } 00894 break; 00895 case 2: 00896 { 00897 double det, temp, s; 00898 det = (*m.begin())*(*(m.begin()+2)) - (*(m.begin()+1))*(*(m.begin()+1)); 00899 if (det==0) { 00900 ifail = 1; 00901 return; 00902 } 00903 s = 1.0/det; 00904 *(m.begin()+1) *= -s; 00905 temp = s*(*(m.begin()+2)); 00906 *(m.begin()+2) = s*(*m.begin()); 00907 *m.begin() = temp; 00908 break; 00909 } 00910 case 1: 00911 { 00912 if ((*m.begin())==0) { 00913 ifail = 1; 00914 return; 00915 } 00916 *m.begin() = 1.0/(*m.begin()); 00917 break; 00918 } 00919 case 5: 00920 { 00921 invert5(ifail); 00922 return; 00923 } 00924 case 6: 00925 { 00926 invert6(ifail); 00927 return; 00928 } 00929 case 4: 00930 { 00931 invert4(ifail); 00932 return; 00933 } 00934 default: 00935 { 00936 invertBunchKaufman(ifail); 00937 return; 00938 } 00939 } 00940 return; // inversion successful 00941 } 00942 00943 double HepSymMatrix::determinant() const { 00944 static const int max_array = 20; 00945 // ir must point to an array which is ***1 longer than*** nrow 00946 static std::vector<int> ir_vec (max_array+1); 00947 if (ir_vec.size() <= static_cast<unsigned int>(nrow)) ir_vec.resize(nrow+1); 00948 int * ir = &ir_vec[0]; 00949 00950 double det; 00951 HepMatrix mt(*this); 00952 int i = mt.dfact_matrix(det, ir); 00953 if(i==0) return det; 00954 return 0.0; 00955 } 00956 00957 double HepSymMatrix::trace() const { 00958 double t = 0.0; 00959 for (int i=0; i<nrow; i++) 00960 t += *(m.begin() + (i+3)*i/2); 00961 return t; 00962 } 00963 00964 void HepSymMatrix::invertBunchKaufman(int &ifail) { 00965 // Bunch-Kaufman diagonal pivoting method 00966 // It is decribed in J.R. Bunch, L. Kaufman (1977). 00967 // "Some Stable Methods for Calculating Inertia and Solving Symmetric 00968 // Linear Systems", Math. Comp. 31, p. 162-179. or in Gene H. Golub, 00969 // Charles F. van Loan, "Matrix Computations" (the second edition 00970 // has a bug.) and implemented in "lapack" 00971 // Mario Stanke, 09/97 00972 00973 int i, j, k, s; 00974 int pivrow; 00975 00976 // Establish the two working-space arrays needed: x and piv are 00977 // used as pointers to arrays of doubles and ints respectively, each 00978 // of length nrow. We do not want to reallocate each time through 00979 // unless the size needs to grow. We do not want to leak memory, even 00980 // by having a new without a delete that is only done once. 00981 00982 static const int max_array = 25; 00983 #ifdef DISABLE_ALLOC 00984 static std::vector<double> xvec (max_array); 00985 static std::vector<int> pivv (max_array); 00986 typedef std::vector<int>::iterator pivIter; 00987 #else 00988 static std::vector<double,Alloc<double,25> > xvec (max_array); 00989 static std::vector<int, Alloc<int, 25> > pivv (max_array); 00990 typedef std::vector<int,Alloc<int,25> >::iterator pivIter; 00991 #endif 00992 if (xvec.size() < static_cast<unsigned int>(nrow)) xvec.resize(nrow); 00993 if (pivv.size() < static_cast<unsigned int>(nrow)) pivv.resize(nrow); 00994 // Note - resize shuld do nothing if the size is already larger than nrow, 00995 // but on VC++ there are indications that it does so we check. 00996 // Note - the data elements in a vector are guaranteed to be contiguous, 00997 // so x[i] and piv[i] are optimally fast. 00998 mIter x = xvec.begin(); 00999 // x[i] is used as helper storage, needs to have at least size nrow. 01000 pivIter piv = pivv.begin(); 01001 // piv[i] is used to store details of exchanges 01002 01003 double temp1, temp2; 01004 HepMatrix::mIter ip, mjj, iq; 01005 double lambda, sigma; 01006 const double alpha = .6404; // = (1+sqrt(17))/8 01007 const double epsilon = 32*DBL_EPSILON; 01008 // whenever a sum of two doubles is below or equal to epsilon 01009 // it is set to zero. 01010 // this constant could be set to zero but then the algorithm 01011 // doesn't neccessarily detect that a matrix is singular 01012 01013 for (i = 0; i < nrow; ++i) piv[i] = i+1; 01014 01015 ifail = 0; 01016 01017 // compute the factorization P*A*P^T = L * D * L^T 01018 // L is unit lower triangular, D is direct sum of 1x1 and 2x2 matrices 01019 // L and D^-1 are stored in A = *this, P is stored in piv[] 01020 01021 for (j=1; j < nrow; j+=s) // main loop over columns 01022 { 01023 mjj = m.begin() + j*(j-1)/2 + j-1; 01024 lambda = 0; // compute lambda = max of A(j+1:n,j) 01025 pivrow = j+1; 01026 //ip = m.begin() + (j+1)*j/2 + j-1; 01027 for (i=j+1; i <= nrow ; ++i) { 01028 // calculate ip to avoid going off end of storage array 01029 ip = m.begin() + (i-1)*i/2 + j-1; 01030 if (fabs(*ip) > lambda) { 01031 lambda = fabs(*ip); 01032 pivrow = i; 01033 } 01034 } // for i 01035 if (lambda == 0 ) { 01036 if (*mjj == 0) { 01037 ifail = 1; 01038 return; 01039 } 01040 s=1; 01041 *mjj = 1./ *mjj; 01042 } else { // lambda == 0 01043 if (fabs(*mjj) >= lambda*alpha) { 01044 s=1; 01045 pivrow=j; 01046 } else { // fabs(*mjj) >= lambda*alpha 01047 sigma = 0; // compute sigma = max A(pivrow, j:pivrow-1) 01048 ip = m.begin() + pivrow*(pivrow-1)/2+j-1; 01049 for (k=j; k < pivrow; k++) { 01050 if (fabs(*ip) > sigma) sigma = fabs(*ip); 01051 ip++; 01052 } // for k 01053 if (sigma * fabs(*mjj) >= alpha * lambda * lambda) { 01054 s=1; 01055 pivrow = j; 01056 } else if (fabs(*(m.begin()+pivrow*(pivrow-1)/2+pivrow-1)) 01057 >= alpha * sigma) { 01058 s=1; 01059 } else { 01060 s=2; 01061 } // if sigma... 01062 } // fabs(*mjj) >= lambda*alpha 01063 if (pivrow == j) { // no permutation neccessary 01064 piv[j-1] = pivrow; 01065 if (*mjj == 0) { 01066 ifail=1; 01067 return; 01068 } 01069 temp2 = *mjj = 1./ *mjj; // invert D(j,j) 01070 01071 // update A(j+1:n, j+1,n) 01072 for (i=j+1; i <= nrow; i++) { 01073 temp1 = *(m.begin() + i*(i-1)/2 + j-1) * temp2; 01074 ip = m.begin()+i*(i-1)/2+j; 01075 for (k=j+1; k<=i; k++) { 01076 *ip -= temp1 * *(m.begin() + k*(k-1)/2 + j-1); 01077 if (fabs(*ip) <= epsilon) 01078 *ip=0; 01079 ip++; 01080 } 01081 } // for i 01082 // update L 01083 //ip = m.begin() + (j+1)*j/2 + j-1; 01084 for (i=j+1; i <= nrow; ++i) { 01085 // calculate ip to avoid going off end of storage array 01086 ip = m.begin() + (i-1)*i/2 + j-1; 01087 *ip *= temp2; 01088 } 01089 } else if (s==1) { // 1x1 pivot 01090 piv[j-1] = pivrow; 01091 01092 // interchange rows and columns j and pivrow in 01093 // submatrix (j:n,j:n) 01094 ip = m.begin() + pivrow*(pivrow-1)/2 + j; 01095 for (i=j+1; i < pivrow; i++, ip++) { 01096 temp1 = *(m.begin() + i*(i-1)/2 + j-1); 01097 *(m.begin() + i*(i-1)/2 + j-1)= *ip; 01098 *ip = temp1; 01099 } // for i 01100 temp1 = *mjj; 01101 *mjj = *(m.begin()+pivrow*(pivrow-1)/2+pivrow-1); 01102 *(m.begin()+pivrow*(pivrow-1)/2+pivrow-1) = temp1; 01103 ip = m.begin() + (pivrow+1)*pivrow/2 + j-1; 01104 iq = ip + pivrow-j; 01105 for (i = pivrow+1; i <= nrow; ip += i, iq += i++) { 01106 temp1 = *iq; 01107 *iq = *ip; 01108 *ip = temp1; 01109 } // for i 01110 01111 if (*mjj == 0) { 01112 ifail = 1; 01113 return; 01114 } // *mjj == 0 01115 temp2 = *mjj = 1./ *mjj; // invert D(j,j) 01116 01117 // update A(j+1:n, j+1:n) 01118 for (i = j+1; i <= nrow; i++) { 01119 temp1 = *(m.begin() + i*(i-1)/2 + j-1) * temp2; 01120 ip = m.begin()+i*(i-1)/2+j; 01121 for (k=j+1; k<=i; k++) { 01122 *ip -= temp1 * *(m.begin() + k*(k-1)/2 + j-1); 01123 if (fabs(*ip) <= epsilon) 01124 *ip=0; 01125 ip++; 01126 } // for k 01127 } // for i 01128 // update L 01129 //ip = m.begin() + (j+1)*j/2 + j-1; 01130 for (i=j+1; i <= nrow; ++i) { 01131 // calculate ip to avoid going off end of storage array 01132 ip = m.begin() + (i-1)*i/2 + j-1; 01133 *ip *= temp2; 01134 } 01135 } else { // s=2, ie use a 2x2 pivot 01136 piv[j-1] = -pivrow; 01137 piv[j] = 0; // that means this is the second row of a 2x2 pivot 01138 01139 if (j+1 != pivrow) { 01140 // interchange rows and columns j+1 and pivrow in 01141 // submatrix (j:n,j:n) 01142 ip = m.begin() + pivrow*(pivrow-1)/2 + j+1; 01143 for (i=j+2; i < pivrow; i++, ip++) { 01144 temp1 = *(m.begin() + i*(i-1)/2 + j); 01145 *(m.begin() + i*(i-1)/2 + j) = *ip; 01146 *ip = temp1; 01147 } // for i 01148 temp1 = *(mjj + j + 1); 01149 *(mjj + j + 1) = 01150 *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1); 01151 *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1) = temp1; 01152 temp1 = *(mjj + j); 01153 *(mjj + j) = *(m.begin() + pivrow*(pivrow-1)/2 + j-1); 01154 *(m.begin() + pivrow*(pivrow-1)/2 + j-1) = temp1; 01155 ip = m.begin() + (pivrow+1)*pivrow/2 + j; 01156 iq = ip + pivrow-(j+1); 01157 for (i = pivrow+1; i <= nrow; ip += i, iq += i++) { 01158 temp1 = *iq; 01159 *iq = *ip; 01160 *ip = temp1; 01161 } // for i 01162 } // j+1 != pivrow 01163 // invert D(j:j+1,j:j+1) 01164 temp2 = *mjj * *(mjj + j + 1) - *(mjj + j) * *(mjj + j); 01165 if (temp2 == 0) { 01166 std::cerr 01167 << "SymMatrix::bunch_invert: error in pivot choice" 01168 << std::endl; 01169 } 01170 temp2 = 1. / temp2; 01171 // this quotient is guaranteed to exist by the choice 01172 // of the pivot 01173 temp1 = *mjj; 01174 *mjj = *(mjj + j + 1) * temp2; 01175 *(mjj + j + 1) = temp1 * temp2; 01176 *(mjj + j) = - *(mjj + j) * temp2; 01177 01178 if (j < nrow-1) { // otherwise do nothing 01179 // update A(j+2:n, j+2:n) 01180 for (i=j+2; i <= nrow ; i++) { 01181 ip = m.begin() + i*(i-1)/2 + j-1; 01182 temp1 = *ip * *mjj + *(ip + 1) * *(mjj + j); 01183 if (fabs(temp1 ) <= epsilon) temp1 = 0; 01184 temp2 = *ip * *(mjj + j) + *(ip + 1) * *(mjj + j + 1); 01185 if (fabs(temp2 ) <= epsilon) temp2 = 0; 01186 for (k = j+2; k <= i ; k++) { 01187 ip = m.begin() + i*(i-1)/2 + k-1; 01188 iq = m.begin() + k*(k-1)/2 + j-1; 01189 *ip -= temp1 * *iq + temp2 * *(iq+1); 01190 if (fabs(*ip) <= epsilon) 01191 *ip = 0; 01192 } // for k 01193 } // for i 01194 // update L 01195 for (i=j+2; i <= nrow ; i++) { 01196 ip = m.begin() + i*(i-1)/2 + j-1; 01197 temp1 = *ip * *mjj + *(ip+1) * *(mjj + j); 01198 if (fabs(temp1) <= epsilon) temp1 = 0; 01199 *(ip+1) = *ip * *(mjj + j) 01200 + *(ip+1) * *(mjj + j + 1); 01201 if (fabs(*(ip+1)) <= epsilon) *(ip+1) = 0; 01202 *ip = temp1; 01203 } // for k 01204 } // j < nrow-1 01205 } 01206 } 01207 } // end of main loop over columns 01208 01209 if (j == nrow) { // the the last pivot is 1x1 01210 mjj = m.begin() + j*(j-1)/2 + j-1; 01211 if (*mjj == 0) { 01212 ifail = 1; 01213 return; 01214 } else { *mjj = 1. / *mjj; } 01215 } // end of last pivot code 01216 01217 // computing the inverse from the factorization 01218 01219 for (j = nrow ; j >= 1 ; j -= s) // loop over columns 01220 { 01221 mjj = m.begin() + j*(j-1)/2 + j-1; 01222 if (piv[j-1] > 0) { // 1x1 pivot, compute column j of inverse 01223 s = 1; 01224 if (j < nrow) { 01225 //ip = m.begin() + (j+1)*j/2 + j-1; 01226 //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; 01227 ip = m.begin() + (j+1)*j/2 - 1; 01228 for (i=0; i < nrow-j; ++i) { 01229 ip += i + j; 01230 x[i] = *ip; 01231 } 01232 for (i=j+1; i<=nrow ; i++) { 01233 temp2=0; 01234 ip = m.begin() + i*(i-1)/2 + j; 01235 for (k=0; k <= i-j-1; k++) temp2 += *ip++ * x[k]; 01236 // avoid setting ip outside the bounds of the storage array 01237 ip -= 1; 01238 // using the value of k from the previous loop 01239 for ( ; k < nrow-j; ++k) { 01240 ip += j+k; 01241 temp2 += *ip * x[k]; 01242 } 01243 *(m.begin()+ i*(i-1)/2 + j-1) = -temp2; 01244 } // for i 01245 temp2 = 0; 01246 //ip = m.begin() + (j+1)*j/2 + j-1; 01247 //for (k=0; k < nrow-j; ip += 1+j+k++) 01248 //temp2 += x[k] * *ip; 01249 ip = m.begin() + (j+1)*j/2 - 1; 01250 for (k=0; k < nrow-j; ++k) { 01251 ip += j+k; 01252 temp2 += x[k] * *ip; 01253 } 01254 *mjj -= temp2; 01255 } // j < nrow 01256 } else { //2x2 pivot, compute columns j and j-1 of the inverse 01257 if (piv[j-1] != 0) 01258 std::cerr << "error in piv" << piv[j-1] << std::endl; 01259 s=2; 01260 if (j < nrow) { 01261 //ip = m.begin() + (j+1)*j/2 + j-1; 01262 //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; 01263 ip = m.begin() + (j+1)*j/2 - 1; 01264 for (i=0; i < nrow-j; ++i) { 01265 ip += i + j; 01266 x[i] = *ip; 01267 } 01268 for (i=j+1; i<=nrow ; i++) { 01269 temp2 = 0; 01270 ip = m.begin() + i*(i-1)/2 + j; 01271 for (k=0; k <= i-j-1; k++) 01272 temp2 += *ip++ * x[k]; 01273 for (ip += i-1; k < nrow-j; ip += 1+j+k++) 01274 temp2 += *ip * x[k]; 01275 *(m.begin()+ i*(i-1)/2 + j-1) = -temp2; 01276 } // for i 01277 temp2 = 0; 01278 //ip = m.begin() + (j+1)*j/2 + j-1; 01279 //for (k=0; k < nrow-j; ip += 1+j+k++) temp2 += x[k] * *ip; 01280 ip = m.begin() + (j+1)*j/2 - 1; 01281 for (k=0; k < nrow-j; ++k) { 01282 ip += k + j; 01283 temp2 += x[k] * *ip; 01284 } 01285 *mjj -= temp2; 01286 temp2 = 0; 01287 //ip = m.begin() + (j+1)*j/2 + j-2; 01288 //for (i=j+1; i <= nrow; ip += i++) temp2 += *ip * *(ip+1); 01289 ip = m.begin() + (j+1)*j/2 - 2; 01290 for (i=j+1; i <= nrow; ++i) { 01291 ip += i - 1; 01292 temp2 += *ip * *(ip+1); 01293 } 01294 *(mjj-1) -= temp2; 01295 //ip = m.begin() + (j+1)*j/2 + j-2; 01296 //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; 01297 ip = m.begin() + (j+1)*j/2 - 2; 01298 for (i=0; i < nrow-j; ++i) { 01299 ip += i + j; 01300 x[i] = *ip; 01301 } 01302 for (i=j+1; i <= nrow ; i++) { 01303 temp2 = 0; 01304 ip = m.begin() + i*(i-1)/2 + j; 01305 for (k=0; k <= i-j-1; k++) 01306 temp2 += *ip++ * x[k]; 01307 for (ip += i-1; k < nrow-j; ip += 1+j+k++) 01308 temp2 += *ip * x[k]; 01309 *(m.begin()+ i*(i-1)/2 + j-2)= -temp2; 01310 } // for i 01311 temp2 = 0; 01312 //ip = m.begin() + (j+1)*j/2 + j-2; 01313 //for (k=0; k < nrow-j; ip += 1+j+k++) 01314 // temp2 += x[k] * *ip; 01315 ip = m.begin() + (j+1)*j/2 - 2; 01316 for (k=0; k < nrow-j; ++k) { 01317 ip += k + j; 01318 temp2 += x[k] * *ip; 01319 } 01320 *(mjj-j) -= temp2; 01321 } // j < nrow 01322 } // else piv[j-1] > 0 01323 01324 // interchange rows and columns j and piv[j-1] 01325 // or rows and columns j and -piv[j-2] 01326 01327 pivrow = (piv[j-1]==0)? -piv[j-2] : piv[j-1]; 01328 ip = m.begin() + pivrow*(pivrow-1)/2 + j; 01329 for (i=j+1;i < pivrow; i++, ip++) { 01330 temp1 = *(m.begin() + i*(i-1)/2 + j-1); 01331 *(m.begin() + i*(i-1)/2 + j-1) = *ip; 01332 *ip = temp1; 01333 } // for i 01334 temp1 = *mjj; 01335 *mjj = *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1); 01336 *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1) = temp1; 01337 if (s==2) { 01338 temp1 = *(mjj-1); 01339 *(mjj-1) = *( m.begin() + pivrow*(pivrow-1)/2 + j-2); 01340 *( m.begin() + pivrow*(pivrow-1)/2 + j-2) = temp1; 01341 } // s==2 01342 01343 // problem right here 01344 if( pivrow < nrow ) { 01345 ip = m.begin() + (pivrow+1)*pivrow/2 + j-1; // &A(i,j) 01346 // adding parenthesis for VC++ 01347 iq = ip + (pivrow-j); 01348 for (i = pivrow+1; i <= nrow; i++) { 01349 temp1 = *iq; 01350 *iq = *ip; 01351 *ip = temp1; 01352 if( i < nrow ) { 01353 ip += i; 01354 iq += i; 01355 } 01356 } // for i 01357 } // pivrow < nrow 01358 } // end of loop over columns (in computing inverse from factorization) 01359 01360 return; // inversion successful 01361 01362 } 01363 01364 } // namespace CLHEP