CLHEP 2.0.4.7 Reference Documentation
CLHEP Home Page CLHEP Documentation CLHEP Bug Reports |
00001 // -*- C++ -*- 00002 // --------------------------------------------------------------------------- 00003 // 00004 // This file is a part of the CLHEP - a Class Library for High Energy Physics. 00005 // 00006 // 00007 // Copyright Cornell University 1993, 1996, All Rights Reserved. 00008 // 00009 // This software written by Nobu Katayama and Mike Smyth, Cornell University. 00010 // 00011 // Redistribution and use in source and binary forms, with or without 00012 // modification, are permitted provided that the following conditions 00013 // are met: 00014 // 1. Redistributions of source code must retain the above copyright 00015 // notice and author attribution, this list of conditions and the 00016 // following disclaimer. 00017 // 2. Redistributions in binary form must reproduce the above copyright 00018 // notice and author attribution, this list of conditions and the 00019 // following disclaimer in the documentation and/or other materials 00020 // provided with the distribution. 00021 // 3. Neither the name of the University nor the names of its contributors 00022 // may be used to endorse or promote products derived from this software 00023 // without specific prior written permission. 00024 // 00025 // Creation of derivative forms of this software for commercial 00026 // utilization may be subject to restriction; written permission may be 00027 // obtained from Cornell University. 00028 // 00029 // CORNELL MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. By way 00030 // of example, but not limitation, CORNELL MAKES NO REPRESENTATIONS OR 00031 // WARRANTIES OF MERCANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT 00032 // THE USE OF THIS SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, 00033 // COPYRIGHTS, TRADEMARKS, OR OTHER RIGHTS. Cornell University shall not be 00034 // held liable for any liability with respect to any claim by the user or any 00035 // other party arising from use of the program. 00036 // 00037 00038 #ifdef GNUPRAGMA 00039 #pragma implementation 00040 #endif 00041 00042 #include <string.h> 00043 #include <float.h> // for DBL_EPSILON 00044 00045 #include "CLHEP/Matrix/defs.h" 00046 #include "CLHEP/Random/Random.h" 00047 #include "CLHEP/Matrix/SymMatrix.h" 00048 #include "CLHEP/Matrix/Matrix.h" 00049 #include "CLHEP/Matrix/DiagMatrix.h" 00050 #include "CLHEP/Matrix/Vector.h" 00051 00052 #ifdef HEP_DEBUG_INLINE 00053 #include "CLHEP/Matrix/SymMatrix.icc" 00054 #endif 00055 00056 namespace CLHEP { 00057 00058 // Simple operation for all elements 00059 00060 #define SIMPLE_UOP(OPER) \ 00061 register HepMatrix::mIter a=m.begin(); \ 00062 register HepMatrix::mIter e=m.begin()+num_size(); \ 00063 for(;a<e; a++) (*a) OPER t; 00064 00065 #define SIMPLE_BOP(OPER) \ 00066 register HepMatrix::mIter a=m.begin(); \ 00067 register HepMatrix::mcIter b=m2.m.begin(); \ 00068 register HepMatrix::mcIter e=m.begin()+num_size(); \ 00069 for(;a<e; a++, b++) (*a) OPER (*b); 00070 00071 #define SIMPLE_TOP(OPER) \ 00072 register HepMatrix::mcIter a=m1.m.begin(); \ 00073 register HepMatrix::mcIter b=m2.m.begin(); \ 00074 register HepMatrix::mIter t=mret.m.begin(); \ 00075 register HepMatrix::mcIter e=m1.m.begin()+m1.num_size(); \ 00076 for( ;a<e; a++, b++, t++) (*t) = (*a) OPER (*b); 00077 00078 #define CHK_DIM_2(r1,r2,c1,c2,fun) \ 00079 if (r1!=r2 || c1!=c2) { \ 00080 HepGenMatrix::error("Range error in SymMatrix function " #fun "(1)."); \ 00081 } 00082 00083 #define CHK_DIM_1(c1,r2,fun) \ 00084 if (c1!=r2) { \ 00085 HepGenMatrix::error("Range error in SymMatrix function " #fun "(2)."); \ 00086 } 00087 00088 // Constructors. (Default constructors are inlined and in .icc file) 00089 00090 HepSymMatrix::HepSymMatrix(int p) 00091 : m(p*(p+1)/2), nrow(p) 00092 { 00093 size = nrow * (nrow+1) / 2; 00094 m.assign(size,0); 00095 } 00096 00097 HepSymMatrix::HepSymMatrix(int p, int init) 00098 : m(p*(p+1)/2), nrow(p) 00099 { 00100 size = nrow * (nrow+1) / 2; 00101 00102 m.assign(size,0); 00103 switch(init) 00104 { 00105 case 0: 00106 break; 00107 00108 case 1: 00109 { 00110 HepMatrix::mIter a; 00111 for(int i=0;i<nrow;++i) { 00112 a = m.begin() + (i+1)*i/2 + i; 00113 *a = 1.0; 00114 } 00115 break; 00116 } 00117 default: 00118 error("SymMatrix: initialization must be either 0 or 1."); 00119 } 00120 } 00121 00122 HepSymMatrix::HepSymMatrix(int p, HepRandom &r) 00123 : m(p*(p+1)/2), nrow(p) 00124 { 00125 size = nrow * (nrow+1) / 2; 00126 HepMatrix::mIter a = m.begin(); 00127 HepMatrix::mIter b = m.begin() + size; 00128 for(;a<b;a++) *a = r(); 00129 } 00130 00131 // 00132 // Destructor 00133 // 00134 HepSymMatrix::~HepSymMatrix() { 00135 } 00136 00137 HepSymMatrix::HepSymMatrix(const HepSymMatrix &m1) 00138 : m(m1.size), nrow(m1.nrow), size(m1.size) 00139 { 00140 m = m1.m; 00141 } 00142 00143 HepSymMatrix::HepSymMatrix(const HepDiagMatrix &m1) 00144 : m(m1.nrow*(m1.nrow+1)/2), nrow(m1.nrow) 00145 { 00146 size = nrow * (nrow+1) / 2; 00147 00148 int n = num_row(); 00149 m.assign(size,0); 00150 00151 HepMatrix::mIter mrr = m.begin(); 00152 HepMatrix::mcIter mr = m1.m.begin(); 00153 for(int r=1;r<=n;r++) { 00154 *mrr = *(mr++); 00155 if(r<n) mrr += (r+1); 00156 } 00157 } 00158 00159 // 00160 // 00161 // Sub matrix 00162 // 00163 // 00164 00165 HepSymMatrix HepSymMatrix::sub(int min_row, int max_row) const 00166 #ifdef HEP_GNU_OPTIMIZED_RETURN 00167 return mret(max_row-min_row+1); 00168 { 00169 #else 00170 { 00171 HepSymMatrix mret(max_row-min_row+1); 00172 #endif 00173 if(max_row > num_row()) 00174 error("HepSymMatrix::sub: Index out of range"); 00175 HepMatrix::mIter a = mret.m.begin(); 00176 HepMatrix::mcIter b1 = m.begin() + (min_row+2)*(min_row-1)/2; 00177 int rowsize=mret.num_row(); 00178 for(int irow=1; irow<=rowsize; irow++) { 00179 HepMatrix::mcIter b = b1; 00180 for(int icol=0; icol<irow; ++icol) { 00181 *(a++) = *(b++); 00182 } 00183 if(irow<rowsize) b1 += irow+min_row-1; 00184 } 00185 return mret; 00186 } 00187 00188 HepSymMatrix HepSymMatrix::sub(int min_row, int max_row) 00189 { 00190 HepSymMatrix mret(max_row-min_row+1); 00191 if(max_row > num_row()) 00192 error("HepSymMatrix::sub: Index out of range"); 00193 HepMatrix::mIter a = mret.m.begin(); 00194 HepMatrix::mIter b1 = m.begin() + (min_row+2)*(min_row-1)/2; 00195 int rowsize=mret.num_row(); 00196 for(int irow=1; irow<=rowsize; irow++) { 00197 HepMatrix::mIter b = b1; 00198 for(int icol=0; icol<irow; ++icol) { 00199 *(a++) = *(b++); 00200 } 00201 if(irow<rowsize) b1 += irow+min_row-1; 00202 } 00203 return mret; 00204 } 00205 00206 void HepSymMatrix::sub(int row,const HepSymMatrix &m1) 00207 { 00208 if(row <1 || row+m1.num_row()-1 > num_row() ) 00209 error("HepSymMatrix::sub: Index out of range"); 00210 HepMatrix::mcIter a = m1.m.begin(); 00211 HepMatrix::mIter b1 = m.begin() + (row+2)*(row-1)/2; 00212 int rowsize=m1.num_row(); 00213 for(int irow=1; irow<=rowsize; ++irow) { 00214 HepMatrix::mIter b = b1; 00215 for(int icol=0; icol<irow; ++icol) { 00216 *(b++) = *(a++); 00217 } 00218 if(irow<rowsize) b1 += irow+row-1; 00219 } 00220 } 00221 00222 // 00223 // Direct sum of two matricies 00224 // 00225 00226 HepSymMatrix dsum(const HepSymMatrix &m1, 00227 const HepSymMatrix &m2) 00228 #ifdef HEP_GNU_OPTIMIZED_RETURN 00229 return mret(m1.num_row() + m2.num_row(), 0); 00230 { 00231 #else 00232 { 00233 HepSymMatrix mret(m1.num_row() + m2.num_row(), 00234 0); 00235 #endif 00236 mret.sub(1,m1); 00237 mret.sub(m1.num_row()+1,m2); 00238 return mret; 00239 } 00240 00241 /* ----------------------------------------------------------------------- 00242 This section contains support routines for matrix.h. This section contains 00243 The two argument functions +,-. They call the copy constructor and +=,-=. 00244 ----------------------------------------------------------------------- */ 00245 HepSymMatrix HepSymMatrix::operator- () const 00246 #ifdef HEP_GNU_OPTIMIZED_RETURN 00247 return m2(nrow); 00248 { 00249 #else 00250 { 00251 HepSymMatrix m2(nrow); 00252 #endif 00253 register HepMatrix::mcIter a=m.begin(); 00254 register HepMatrix::mIter b=m2.m.begin(); 00255 register HepMatrix::mcIter e=m.begin()+num_size(); 00256 for(;a<e; a++, b++) (*b) = -(*a); 00257 return m2; 00258 } 00259 00260 00261 00262 HepMatrix operator+(const HepMatrix &m1,const HepSymMatrix &m2) 00263 #ifdef HEP_GNU_OPTIMIZED_RETURN 00264 return mret(m1); 00265 { 00266 #else 00267 { 00268 HepMatrix mret(m1); 00269 #endif 00270 CHK_DIM_2(m1.num_row(),m2.num_row(), m1.num_col(),m2.num_col(),+); 00271 mret += m2; 00272 return mret; 00273 } 00274 HepMatrix operator+(const HepSymMatrix &m1,const HepMatrix &m2) 00275 #ifdef HEP_GNU_OPTIMIZED_RETURN 00276 return mret(m2); 00277 { 00278 #else 00279 { 00280 HepMatrix mret(m2); 00281 #endif 00282 CHK_DIM_2(m1.num_row(),m2.num_row(),m1.num_col(),m2.num_col(),+); 00283 mret += m1; 00284 return mret; 00285 } 00286 00287 HepSymMatrix operator+(const HepSymMatrix &m1,const HepSymMatrix &m2) 00288 #ifdef HEP_GNU_OPTIMIZED_RETURN 00289 return mret(m1.nrow); 00290 { 00291 #else 00292 { 00293 HepSymMatrix mret(m1.nrow); 00294 #endif 00295 CHK_DIM_1(m1.nrow, m2.nrow,+); 00296 SIMPLE_TOP(+) 00297 return mret; 00298 } 00299 00300 // 00301 // operator - 00302 // 00303 00304 HepMatrix operator-(const HepMatrix &m1,const HepSymMatrix &m2) 00305 #ifdef HEP_GNU_OPTIMIZED_RETURN 00306 return mret(m1); 00307 { 00308 #else 00309 { 00310 HepMatrix mret(m1); 00311 #endif 00312 CHK_DIM_2(m1.num_row(),m2.num_row(), 00313 m1.num_col(),m2.num_col(),-); 00314 mret -= m2; 00315 return mret; 00316 } 00317 HepMatrix operator-(const HepSymMatrix &m1,const HepMatrix &m2) 00318 #ifdef HEP_GNU_OPTIMIZED_RETURN 00319 return mret(m1); 00320 { 00321 #else 00322 { 00323 HepMatrix mret(m1); 00324 #endif 00325 CHK_DIM_2(m1.num_row(),m2.num_row(), 00326 m1.num_col(),m2.num_col(),-); 00327 mret -= m2; 00328 return mret; 00329 } 00330 00331 HepSymMatrix operator-(const HepSymMatrix &m1,const HepSymMatrix &m2) 00332 #ifdef HEP_GNU_OPTIMIZED_RETURN 00333 return mret(m1.num_row()); 00334 { 00335 #else 00336 { 00337 HepSymMatrix mret(m1.num_row()); 00338 #endif 00339 CHK_DIM_1(m1.num_row(),m2.num_row(),-); 00340 SIMPLE_TOP(-) 00341 return mret; 00342 } 00343 00344 /* ----------------------------------------------------------------------- 00345 This section contains support routines for matrix.h. This file contains 00346 The two argument functions *,/. They call copy constructor and then /=,*=. 00347 ----------------------------------------------------------------------- */ 00348 00349 HepSymMatrix operator/( 00350 const HepSymMatrix &m1,double t) 00351 #ifdef HEP_GNU_OPTIMIZED_RETURN 00352 return mret(m1); 00353 { 00354 #else 00355 { 00356 HepSymMatrix mret(m1); 00357 #endif 00358 mret /= t; 00359 return mret; 00360 } 00361 00362 HepSymMatrix operator*(const HepSymMatrix &m1,double t) 00363 #ifdef HEP_GNU_OPTIMIZED_RETURN 00364 return mret(m1); 00365 { 00366 #else 00367 { 00368 HepSymMatrix mret(m1); 00369 #endif 00370 mret *= t; 00371 return mret; 00372 } 00373 00374 HepSymMatrix operator*(double t,const HepSymMatrix &m1) 00375 #ifdef HEP_GNU_OPTIMIZED_RETURN 00376 return mret(m1); 00377 { 00378 #else 00379 { 00380 HepSymMatrix mret(m1); 00381 #endif 00382 mret *= t; 00383 return mret; 00384 } 00385 00386 00387 HepMatrix operator*(const HepMatrix &m1,const HepSymMatrix &m2) 00388 #ifdef HEP_GNU_OPTIMIZED_RETURN 00389 return mret(m1.num_row(),m2.num_col()); 00390 { 00391 #else 00392 { 00393 HepMatrix mret(m1.num_row(),m2.num_col()); 00394 #endif 00395 CHK_DIM_1(m1.num_col(),m2.num_row(),*); 00396 HepMatrix::mcIter mit1, mit2, sp,snp; //mit2=0 00397 double temp; 00398 HepMatrix::mIter mir=mret.m.begin(); 00399 for(mit1=m1.m.begin(); 00400 mit1<m1.m.begin()+m1.num_row()*m1.num_col(); 00401 mit1 = mit2) 00402 { 00403 snp=m2.m.begin(); 00404 for(int step=1;step<=m2.num_row();++step) 00405 { 00406 mit2=mit1; 00407 sp=snp; 00408 snp+=step; 00409 temp=0; 00410 while(sp<snp) 00411 temp+=*(sp++)*(*(mit2++)); 00412 if( step<m2.num_row() ) { // only if we aren't on the last row 00413 sp+=step-1; 00414 for(int stept=step+1;stept<=m2.num_row();stept++) 00415 { 00416 temp+=*sp*(*(mit2++)); 00417 if(stept<m2.num_row()) sp+=stept; 00418 } 00419 } // if(step 00420 *(mir++)=temp; 00421 } // for(step 00422 } // for(mit1 00423 return mret; 00424 } 00425 00426 HepMatrix operator*(const HepSymMatrix &m1,const HepMatrix &m2) 00427 #ifdef HEP_GNU_OPTIMIZED_RETURN 00428 return mret(m1.num_row(),m2.num_col()); 00429 { 00430 #else 00431 { 00432 HepMatrix mret(m1.num_row(),m2.num_col()); 00433 #endif 00434 CHK_DIM_1(m1.num_col(),m2.num_row(),*); 00435 int step,stept; 00436 HepMatrix::mcIter mit1,mit2,sp,snp; 00437 double temp; 00438 HepMatrix::mIter mir=mret.m.begin(); 00439 for(step=1,snp=m1.m.begin();step<=m1.num_row();snp+=step++) 00440 for(mit1=m2.m.begin();mit1<m2.m.begin()+m2.num_col();mit1++) 00441 { 00442 mit2=mit1; 00443 sp=snp; 00444 temp=0; 00445 while(sp<snp+step) 00446 { 00447 temp+=*mit2*(*(sp++)); 00448 if( m2.num_size()-(mit2-m2.m.begin())>m2.num_col() ){ 00449 mit2+=m2.num_col(); 00450 } 00451 } 00452 if(step<m1.num_row()) { // only if we aren't on the last row 00453 sp+=step-1; 00454 for(stept=step+1;stept<=m1.num_row();stept++) 00455 { 00456 temp+=*mit2*(*sp); 00457 if(stept<m1.num_row()) { 00458 mit2+=m2.num_col(); 00459 sp+=stept; 00460 } 00461 } 00462 } // if(step 00463 *(mir++)=temp; 00464 } // for(mit1 00465 return mret; 00466 } 00467 00468 HepMatrix operator*(const HepSymMatrix &m1,const HepSymMatrix &m2) 00469 #ifdef HEP_GNU_OPTIMIZED_RETURN 00470 return mret(m1.num_row(),m1.num_row()); 00471 { 00472 #else 00473 { 00474 HepMatrix mret(m1.num_row(),m1.num_row()); 00475 #endif 00476 CHK_DIM_1(m1.num_col(),m2.num_row(),*); 00477 int step1,stept1,step2,stept2; 00478 HepMatrix::mcIter snp1,sp1,snp2,sp2; 00479 double temp; 00480 HepMatrix::mIter mr = mret.m.begin(); 00481 snp1=m1.m.begin(); 00482 for(step1=1;step1<=m1.num_row();++step1) { 00483 snp2=m2.m.begin(); 00484 for(step2=1;step2<=m2.num_row();++step2) 00485 { 00486 sp1=snp1; 00487 sp2=snp2; 00488 snp2+=step2; 00489 temp=0; 00490 if(step1<step2) 00491 { 00492 while(sp1<snp1+step1) { 00493 temp+=(*(sp1++))*(*(sp2++)); 00494 } 00495 sp1+=step1-1; 00496 for(stept1=step1+1;stept1!=step2+1;++stept1) { 00497 temp+=(*sp1)*(*(sp2++)); 00498 if(stept1<m2.num_row()) sp1+=stept1; 00499 } 00500 if(step2<m2.num_row()) { // only if we aren't on the last row 00501 sp2+=step2-1; 00502 for(stept2=step2+1;stept2<=m2.num_row();stept1++,stept2++) { 00503 temp+=(*sp1)*(*sp2); 00504 if(stept2<m2.num_row()) { 00505 sp1+=stept1; 00506 sp2+=stept2; 00507 } 00508 } // for(stept2 00509 } // if(step2 00510 } // step1<step2 00511 else 00512 { 00513 while(sp2<snp2) { 00514 temp+=(*(sp1++))*(*(sp2++)); 00515 } 00516 if(step2<m2.num_row()) { // only if we aren't on the last row 00517 sp2+=step2-1; 00518 for(stept2=step2+1;stept2!=step1+1;stept2++) { 00519 temp+=(*(sp1++))*(*sp2); 00520 if(stept2<m1.num_row()) sp2+=stept2; 00521 } 00522 if(step1<m1.num_row()) { // only if we aren't on the last row 00523 sp1+=step1-1; 00524 for(stept1=step1+1;stept1<=m1.num_row();stept1++,stept2++) { 00525 temp+=(*sp1)*(*sp2); 00526 if(stept1<m1.num_row()) { 00527 sp1+=stept1; 00528 sp2+=stept2; 00529 } 00530 } // for(stept1 00531 } // if(step1 00532 } // if(step2 00533 } // else 00534 *(mr++)=temp; 00535 } // for(step2 00536 if(step1<m1.num_row()) snp1+=step1; 00537 } // for(step1 00538 return mret; 00539 } 00540 00541 HepVector operator*(const HepSymMatrix &m1,const HepVector &m2) 00542 #ifdef HEP_GNU_OPTIMIZED_RETURN 00543 return mret(m1.num_row()); 00544 { 00545 #else 00546 { 00547 HepVector mret(m1.num_row()); 00548 #endif 00549 CHK_DIM_1(m1.num_col(),m2.num_row(),*); 00550 HepMatrix::mcIter sp,snp,vpt; 00551 double temp; 00552 int step,stept; 00553 HepMatrix::mIter vrp=mret.m.begin(); 00554 for(step=1,snp=m1.m.begin();step<=m1.num_row();++step) 00555 { 00556 sp=snp; 00557 vpt=m2.m.begin(); 00558 snp+=step; 00559 temp=0; 00560 while(sp<snp) 00561 temp+=*(sp++)*(*(vpt++)); 00562 if(step<m1.num_row()) sp+=step-1; 00563 for(stept=step+1;stept<=m1.num_row();stept++) 00564 { 00565 temp+=*sp*(*(vpt++)); 00566 if(stept<m1.num_row()) sp+=stept; 00567 } 00568 *(vrp++)=temp; 00569 } // for(step 00570 return mret; 00571 } 00572 00573 HepSymMatrix vT_times_v(const HepVector &v) 00574 #ifdef HEP_GNU_OPTIMIZED_RETURN 00575 return mret(v.num_row()); 00576 { 00577 #else 00578 { 00579 HepSymMatrix mret(v.num_row()); 00580 #endif 00581 HepMatrix::mIter mr=mret.m.begin(); 00582 HepMatrix::mcIter vt1,vt2; 00583 for(vt1=v.m.begin();vt1<v.m.begin()+v.num_row();vt1++) 00584 for(vt2=v.m.begin();vt2<=vt1;vt2++) 00585 *(mr++)=(*vt1)*(*vt2); 00586 return mret; 00587 } 00588 00589 /* ----------------------------------------------------------------------- 00590 This section contains the assignment and inplace operators =,+=,-=,*=,/=. 00591 ----------------------------------------------------------------------- */ 00592 00593 HepMatrix & HepMatrix::operator+=(const HepSymMatrix &m2) 00594 { 00595 CHK_DIM_2(num_row(),m2.num_row(),num_col(),m2.num_col(),+=); 00596 HepMatrix::mcIter sjk = m2.m.begin(); 00597 // j >= k 00598 for(int j=0; j!=nrow; ++j) { 00599 for(int k=0; k<=j; ++k) { 00600 m[j*ncol+k] += *sjk; 00601 // make sure this is not a diagonal element 00602 if(k!=j) m[k*nrow+j] += *sjk; 00603 ++sjk; 00604 } 00605 } 00606 return (*this); 00607 } 00608 00609 HepSymMatrix & HepSymMatrix::operator+=(const HepSymMatrix &m2) 00610 { 00611 CHK_DIM_2(num_row(),m2.num_row(),num_col(),m2.num_col(),+=); 00612 SIMPLE_BOP(+=) 00613 return (*this); 00614 } 00615 00616 HepMatrix & HepMatrix::operator-=(const HepSymMatrix &m2) 00617 { 00618 CHK_DIM_2(num_row(),m2.num_row(),num_col(),m2.num_col(),-=); 00619 HepMatrix::mcIter sjk = m2.m.begin(); 00620 // j >= k 00621 for(int j=0; j!=nrow; ++j) { 00622 for(int k=0; k<=j; ++k) { 00623 m[j*ncol+k] -= *sjk; 00624 // make sure this is not a diagonal element 00625 if(k!=j) m[k*nrow+j] -= *sjk; 00626 ++sjk; 00627 } 00628 } 00629 return (*this); 00630 } 00631 00632 HepSymMatrix & HepSymMatrix::operator-=(const HepSymMatrix &m2) 00633 { 00634 CHK_DIM_2(num_row(),m2.num_row(),num_col(),m2.num_col(),-=); 00635 SIMPLE_BOP(-=) 00636 return (*this); 00637 } 00638 00639 HepSymMatrix & HepSymMatrix::operator/=(double t) 00640 { 00641 SIMPLE_UOP(/=) 00642 return (*this); 00643 } 00644 00645 HepSymMatrix & HepSymMatrix::operator*=(double t) 00646 { 00647 SIMPLE_UOP(*=) 00648 return (*this); 00649 } 00650 00651 HepMatrix & HepMatrix::operator=(const HepSymMatrix &m1) 00652 { 00653 // define size, rows, and columns of *this 00654 nrow = ncol = m1.nrow; 00655 if(nrow*ncol != size) 00656 { 00657 size = nrow*ncol; 00658 m.resize(size); 00659 } 00660 // begin copy 00661 mcIter sjk = m1.m.begin(); 00662 // j >= k 00663 for(int j=0; j!=nrow; ++j) { 00664 for(int k=0; k<=j; ++k) { 00665 m[j*ncol+k] = *sjk; 00666 // we could copy the diagonal element twice or check 00667 // doing the check may be a tiny bit faster, 00668 // so we choose that option for now 00669 if(k!=j) m[k*nrow+j] = *sjk; 00670 ++sjk; 00671 } 00672 } 00673 return (*this); 00674 } 00675 00676 HepSymMatrix & HepSymMatrix::operator=(const HepSymMatrix &m1) 00677 { 00678 if(m1.nrow != nrow) 00679 { 00680 nrow = m1.nrow; 00681 size = m1.size; 00682 m.resize(size); 00683 } 00684 m = m1.m; 00685 return (*this); 00686 } 00687 00688 HepSymMatrix & HepSymMatrix::operator=(const HepDiagMatrix &m1) 00689 { 00690 if(m1.nrow != nrow) 00691 { 00692 nrow = m1.nrow; 00693 size = nrow * (nrow+1) / 2; 00694 m.resize(size); 00695 } 00696 00697 m.assign(size,0); 00698 HepMatrix::mIter mrr = m.begin(); 00699 HepMatrix::mcIter mr = m1.m.begin(); 00700 for(int r=1; r<=nrow; r++) { 00701 *mrr = *(mr++); 00702 if(r<nrow) mrr += (r+1); 00703 } 00704 return (*this); 00705 } 00706 00707 // Print the Matrix. 00708 00709 std::ostream& operator<<(std::ostream &s, const HepSymMatrix &q) 00710 { 00711 s << std::endl; 00712 /* Fixed format needs 3 extra characters for field, while scientific needs 7 */ 00713 int width; 00714 if(s.flags() & std::ios::fixed) 00715 width = s.precision()+3; 00716 else 00717 width = s.precision()+7; 00718 for(int irow = 1; irow<= q.num_row(); irow++) 00719 { 00720 for(int icol = 1; icol <= q.num_col(); icol++) 00721 { 00722 s.width(width); 00723 s << q(irow,icol) << " "; 00724 } 00725 s << std::endl; 00726 } 00727 return s; 00728 } 00729 00730 HepSymMatrix HepSymMatrix:: 00731 apply(double (*f)(double, int, int)) const 00732 #ifdef HEP_GNU_OPTIMIZED_RETURN 00733 return mret(num_row()); 00734 { 00735 #else 00736 { 00737 HepSymMatrix mret(num_row()); 00738 #endif 00739 HepMatrix::mcIter a = m.begin(); 00740 HepMatrix::mIter b = mret.m.begin(); 00741 for(int ir=1;ir<=num_row();ir++) { 00742 for(int ic=1;ic<=ir;ic++) { 00743 *(b++) = (*f)(*(a++), ir, ic); 00744 } 00745 } 00746 return mret; 00747 } 00748 00749 void HepSymMatrix::assign (const HepMatrix &m1) 00750 { 00751 if(m1.nrow != nrow) 00752 { 00753 nrow = m1.nrow; 00754 size = nrow * (nrow+1) / 2; 00755 m.resize(size); 00756 } 00757 HepMatrix::mcIter a = m1.m.begin(); 00758 HepMatrix::mIter b = m.begin(); 00759 for(int r=1;r<=nrow;r++) { 00760 HepMatrix::mcIter d = a; 00761 for(int c=1;c<=r;c++) { 00762 *(b++) = *(d++); 00763 } 00764 if(r<nrow) a += nrow; 00765 } 00766 } 00767 00768 HepSymMatrix HepSymMatrix::similarity(const HepMatrix &m1) const 00769 #ifdef HEP_GNU_OPTIMIZED_RETURN 00770 return mret(m1.num_row()); 00771 { 00772 #else 00773 { 00774 HepSymMatrix mret(m1.num_row()); 00775 #endif 00776 HepMatrix temp = m1*(*this); 00777 // If m1*(*this) has correct dimensions, then so will the m1.T multiplication. 00778 // So there is no need to check dimensions again. 00779 int n = m1.num_col(); 00780 HepMatrix::mIter mr = mret.m.begin(); 00781 HepMatrix::mIter tempr1 = temp.m.begin(); 00782 for(int r=1;r<=mret.num_row();r++) { 00783 HepMatrix::mcIter m1c1 = m1.m.begin(); 00784 for(int c=1;c<=r;c++) { 00785 register double tmp = 0.0; 00786 HepMatrix::mIter tempri = tempr1; 00787 HepMatrix::mcIter m1ci = m1c1; 00788 for(int i=1;i<=m1.num_col();i++) { 00789 tmp+=(*(tempri++))*(*(m1ci++)); 00790 } 00791 *(mr++) = tmp; 00792 m1c1 += n; 00793 } 00794 tempr1 += n; 00795 } 00796 return mret; 00797 } 00798 00799 HepSymMatrix HepSymMatrix::similarity(const HepSymMatrix &m1) const 00800 #ifdef HEP_GNU_OPTIMIZED_RETURN 00801 return mret(m1.num_row()); 00802 { 00803 #else 00804 { 00805 HepSymMatrix mret(m1.num_row()); 00806 #endif 00807 HepMatrix temp = m1*(*this); 00808 int n = m1.num_col(); 00809 HepMatrix::mIter mr = mret.m.begin(); 00810 HepMatrix::mIter tempr1 = temp.m.begin(); 00811 for(int r=1;r<=mret.num_row();r++) { 00812 HepMatrix::mcIter m1c1 = m1.m.begin(); 00813 int c; 00814 for(c=1;c<=r;c++) { 00815 register double tmp = 0.0; 00816 HepMatrix::mIter tempri = tempr1; 00817 HepMatrix::mcIter m1ci = m1c1; 00818 int i; 00819 for(i=1;i<c;i++) { 00820 tmp+=(*(tempri++))*(*(m1ci++)); 00821 } 00822 for(i=c;i<=m1.num_col();i++) { 00823 tmp+=(*(tempri++))*(*(m1ci)); 00824 if(i<m1.num_col()) m1ci += i; 00825 } 00826 *(mr++) = tmp; 00827 m1c1 += c; 00828 } 00829 tempr1 += n; 00830 } 00831 return mret; 00832 } 00833 00834 double HepSymMatrix::similarity(const HepVector &m1) 00835 const { 00836 register double mret = 0.0; 00837 HepVector temp = (*this) *m1; 00838 // If m1*(*this) has correct dimensions, then so will the m1.T multiplication. 00839 // So there is no need to check dimensions again. 00840 HepMatrix::mIter a=temp.m.begin(); 00841 HepMatrix::mcIter b=m1.m.begin(); 00842 HepMatrix::mIter e=a+m1.num_row(); 00843 for(;a<e;) mret += (*(a++)) * (*(b++)); 00844 return mret; 00845 } 00846 00847 HepSymMatrix HepSymMatrix::similarityT(const HepMatrix &m1) const 00848 #ifdef HEP_GNU_OPTIMIZED_RETURN 00849 return mret(m1.num_col()); 00850 { 00851 #else 00852 { 00853 HepSymMatrix mret(m1.num_col()); 00854 #endif 00855 HepMatrix temp = (*this)*m1; 00856 int n = m1.num_col(); 00857 HepMatrix::mIter mrc = mret.m.begin(); 00858 HepMatrix::mIter temp1r = temp.m.begin(); 00859 for(int r=1;r<=mret.num_row();r++) { 00860 HepMatrix::mcIter m11c = m1.m.begin(); 00861 for(int c=1;c<=r;c++) { 00862 register double tmp = 0.0; 00863 for(int i=1;i<=m1.num_row();i++) { 00864 HepMatrix::mIter tempir = temp1r + n*(i-1); 00865 HepMatrix::mcIter m1ic = m11c + n*(i-1); 00866 tmp+=(*(tempir))*(*(m1ic)); 00867 } 00868 *(mrc++) = tmp; 00869 m11c++; 00870 } 00871 temp1r++; 00872 } 00873 return mret; 00874 } 00875 00876 void HepSymMatrix::invert(int &ifail) { 00877 00878 ifail = 0; 00879 00880 switch(nrow) { 00881 case 3: 00882 { 00883 double det, temp; 00884 double t1, t2, t3; 00885 double c11,c12,c13,c22,c23,c33; 00886 c11 = (*(m.begin()+2)) * (*(m.begin()+5)) - (*(m.begin()+4)) * (*(m.begin()+4)); 00887 c12 = (*(m.begin()+4)) * (*(m.begin()+3)) - (*(m.begin()+1)) * (*(m.begin()+5)); 00888 c13 = (*(m.begin()+1)) * (*(m.begin()+4)) - (*(m.begin()+2)) * (*(m.begin()+3)); 00889 c22 = (*(m.begin()+5)) * (*m.begin()) - (*(m.begin()+3)) * (*(m.begin()+3)); 00890 c23 = (*(m.begin()+3)) * (*(m.begin()+1)) - (*(m.begin()+4)) * (*m.begin()); 00891 c33 = (*m.begin()) * (*(m.begin()+2)) - (*(m.begin()+1)) * (*(m.begin()+1)); 00892 t1 = fabs(*m.begin()); 00893 t2 = fabs(*(m.begin()+1)); 00894 t3 = fabs(*(m.begin()+3)); 00895 if (t1 >= t2) { 00896 if (t3 >= t1) { 00897 temp = *(m.begin()+3); 00898 det = c23*c12-c22*c13; 00899 } else { 00900 temp = *m.begin(); 00901 det = c22*c33-c23*c23; 00902 } 00903 } else if (t3 >= t2) { 00904 temp = *(m.begin()+3); 00905 det = c23*c12-c22*c13; 00906 } else { 00907 temp = *(m.begin()+1); 00908 det = c13*c23-c12*c33; 00909 } 00910 if (det==0) { 00911 ifail = 1; 00912 return; 00913 } 00914 { 00915 double s = temp/det; 00916 HepMatrix::mIter mm = m.begin(); 00917 *(mm++) = s*c11; 00918 *(mm++) = s*c12; 00919 *(mm++) = s*c22; 00920 *(mm++) = s*c13; 00921 *(mm++) = s*c23; 00922 *(mm) = s*c33; 00923 } 00924 } 00925 break; 00926 case 2: 00927 { 00928 double det, temp, s; 00929 det = (*m.begin())*(*(m.begin()+2)) - (*(m.begin()+1))*(*(m.begin()+1)); 00930 if (det==0) { 00931 ifail = 1; 00932 return; 00933 } 00934 s = 1.0/det; 00935 *(m.begin()+1) *= -s; 00936 temp = s*(*(m.begin()+2)); 00937 *(m.begin()+2) = s*(*m.begin()); 00938 *m.begin() = temp; 00939 break; 00940 } 00941 case 1: 00942 { 00943 if ((*m.begin())==0) { 00944 ifail = 1; 00945 return; 00946 } 00947 *m.begin() = 1.0/(*m.begin()); 00948 break; 00949 } 00950 case 5: 00951 { 00952 invert5(ifail); 00953 return; 00954 } 00955 case 6: 00956 { 00957 invert6(ifail); 00958 return; 00959 } 00960 case 4: 00961 { 00962 invert4(ifail); 00963 return; 00964 } 00965 default: 00966 { 00967 invertBunchKaufman(ifail); 00968 return; 00969 } 00970 } 00971 return; // inversion successful 00972 } 00973 00974 double HepSymMatrix::determinant() const { 00975 static const int max_array = 20; 00976 // ir must point to an array which is ***1 longer than*** nrow 00977 static std::vector<int> ir_vec (max_array+1); 00978 if (ir_vec.size() <= static_cast<unsigned int>(nrow)) ir_vec.resize(nrow+1); 00979 int * ir = &ir_vec[0]; 00980 00981 double det; 00982 HepMatrix mt(*this); 00983 int i = mt.dfact_matrix(det, ir); 00984 if(i==0) return det; 00985 return 0.0; 00986 } 00987 00988 double HepSymMatrix::trace() const { 00989 double t = 0.0; 00990 for (int i=0; i<nrow; i++) 00991 t += *(m.begin() + (i+3)*i/2); 00992 return t; 00993 } 00994 00995 void HepSymMatrix::invertBunchKaufman(int &ifail) { 00996 // Bunch-Kaufman diagonal pivoting method 00997 // It is decribed in J.R. Bunch, L. Kaufman (1977). 00998 // "Some Stable Methods for Calculating Inertia and Solving Symmetric 00999 // Linear Systems", Math. Comp. 31, p. 162-179. or in Gene H. Golub, 01000 // Charles F. van Loan, "Matrix Computations" (the second edition 01001 // has a bug.) and implemented in "lapack" 01002 // Mario Stanke, 09/97 01003 01004 int i, j, k, s; 01005 int pivrow; 01006 01007 // Establish the two working-space arrays needed: x and piv are 01008 // used as pointers to arrays of doubles and ints respectively, each 01009 // of length nrow. We do not want to reallocate each time through 01010 // unless the size needs to grow. We do not want to leak memory, even 01011 // by having a new without a delete that is only done once. 01012 01013 static const int max_array = 25; 01014 #ifdef DISABLE_ALLOC 01015 static std::vector<double> xvec (max_array); 01016 static std::vector<int> pivv (max_array); 01017 typedef std::vector<int>::iterator pivIter; 01018 #else 01019 static std::vector<double,Alloc<double,25> > xvec (max_array); 01020 static std::vector<int, Alloc<int, 25> > pivv (max_array); 01021 typedef std::vector<int,Alloc<int,25> >::iterator pivIter; 01022 #endif 01023 if (xvec.size() < static_cast<unsigned int>(nrow)) xvec.resize(nrow); 01024 if (pivv.size() < static_cast<unsigned int>(nrow)) pivv.resize(nrow); 01025 // Note - resize shuld do nothing if the size is already larger than nrow, 01026 // but on VC++ there are indications that it does so we check. 01027 // Note - the data elements in a vector are guaranteed to be contiguous, 01028 // so x[i] and piv[i] are optimally fast. 01029 mIter x = xvec.begin(); 01030 // x[i] is used as helper storage, needs to have at least size nrow. 01031 pivIter piv = pivv.begin(); 01032 // piv[i] is used to store details of exchanges 01033 01034 double temp1, temp2; 01035 HepMatrix::mIter ip, mjj, iq; 01036 double lambda, sigma; 01037 const double alpha = .6404; // = (1+sqrt(17))/8 01038 const double epsilon = 32*DBL_EPSILON; 01039 // whenever a sum of two doubles is below or equal to epsilon 01040 // it is set to zero. 01041 // this constant could be set to zero but then the algorithm 01042 // doesn't neccessarily detect that a matrix is singular 01043 01044 for (i = 0; i < nrow; ++i) piv[i] = i+1; 01045 01046 ifail = 0; 01047 01048 // compute the factorization P*A*P^T = L * D * L^T 01049 // L is unit lower triangular, D is direct sum of 1x1 and 2x2 matrices 01050 // L and D^-1 are stored in A = *this, P is stored in piv[] 01051 01052 for (j=1; j < nrow; j+=s) // main loop over columns 01053 { 01054 mjj = m.begin() + j*(j-1)/2 + j-1; 01055 lambda = 0; // compute lambda = max of A(j+1:n,j) 01056 pivrow = j+1; 01057 //ip = m.begin() + (j+1)*j/2 + j-1; 01058 for (i=j+1; i <= nrow ; ++i) { 01059 // calculate ip to avoid going off end of storage array 01060 ip = m.begin() + (i-1)*i/2 + j-1; 01061 if (fabs(*ip) > lambda) { 01062 lambda = fabs(*ip); 01063 pivrow = i; 01064 } 01065 } // for i 01066 if (lambda == 0 ) { 01067 if (*mjj == 0) { 01068 ifail = 1; 01069 return; 01070 } 01071 s=1; 01072 *mjj = 1./ *mjj; 01073 } else { // lambda == 0 01074 if (fabs(*mjj) >= lambda*alpha) { 01075 s=1; 01076 pivrow=j; 01077 } else { // fabs(*mjj) >= lambda*alpha 01078 sigma = 0; // compute sigma = max A(pivrow, j:pivrow-1) 01079 ip = m.begin() + pivrow*(pivrow-1)/2+j-1; 01080 for (k=j; k < pivrow; k++) { 01081 if (fabs(*ip) > sigma) sigma = fabs(*ip); 01082 ip++; 01083 } // for k 01084 if (sigma * fabs(*mjj) >= alpha * lambda * lambda) { 01085 s=1; 01086 pivrow = j; 01087 } else if (fabs(*(m.begin()+pivrow*(pivrow-1)/2+pivrow-1)) 01088 >= alpha * sigma) { 01089 s=1; 01090 } else { 01091 s=2; 01092 } // if sigma... 01093 } // fabs(*mjj) >= lambda*alpha 01094 if (pivrow == j) { // no permutation neccessary 01095 piv[j-1] = pivrow; 01096 if (*mjj == 0) { 01097 ifail=1; 01098 return; 01099 } 01100 temp2 = *mjj = 1./ *mjj; // invert D(j,j) 01101 01102 // update A(j+1:n, j+1,n) 01103 for (i=j+1; i <= nrow; i++) { 01104 temp1 = *(m.begin() + i*(i-1)/2 + j-1) * temp2; 01105 ip = m.begin()+i*(i-1)/2+j; 01106 for (k=j+1; k<=i; k++) { 01107 *ip -= temp1 * *(m.begin() + k*(k-1)/2 + j-1); 01108 if (fabs(*ip) <= epsilon) 01109 *ip=0; 01110 ip++; 01111 } 01112 } // for i 01113 // update L 01114 //ip = m.begin() + (j+1)*j/2 + j-1; 01115 for (i=j+1; i <= nrow; ++i) { 01116 // calculate ip to avoid going off end of storage array 01117 ip = m.begin() + (i-1)*i/2 + j-1; 01118 *ip *= temp2; 01119 } 01120 } else if (s==1) { // 1x1 pivot 01121 piv[j-1] = pivrow; 01122 01123 // interchange rows and columns j and pivrow in 01124 // submatrix (j:n,j:n) 01125 ip = m.begin() + pivrow*(pivrow-1)/2 + j; 01126 for (i=j+1; i < pivrow; i++, ip++) { 01127 temp1 = *(m.begin() + i*(i-1)/2 + j-1); 01128 *(m.begin() + i*(i-1)/2 + j-1)= *ip; 01129 *ip = temp1; 01130 } // for i 01131 temp1 = *mjj; 01132 *mjj = *(m.begin()+pivrow*(pivrow-1)/2+pivrow-1); 01133 *(m.begin()+pivrow*(pivrow-1)/2+pivrow-1) = temp1; 01134 ip = m.begin() + (pivrow+1)*pivrow/2 + j-1; 01135 iq = ip + pivrow-j; 01136 for (i = pivrow+1; i <= nrow; ip += i, iq += i++) { 01137 temp1 = *iq; 01138 *iq = *ip; 01139 *ip = temp1; 01140 } // for i 01141 01142 if (*mjj == 0) { 01143 ifail = 1; 01144 return; 01145 } // *mjj == 0 01146 temp2 = *mjj = 1./ *mjj; // invert D(j,j) 01147 01148 // update A(j+1:n, j+1:n) 01149 for (i = j+1; i <= nrow; i++) { 01150 temp1 = *(m.begin() + i*(i-1)/2 + j-1) * temp2; 01151 ip = m.begin()+i*(i-1)/2+j; 01152 for (k=j+1; k<=i; k++) { 01153 *ip -= temp1 * *(m.begin() + k*(k-1)/2 + j-1); 01154 if (fabs(*ip) <= epsilon) 01155 *ip=0; 01156 ip++; 01157 } // for k 01158 } // for i 01159 // update L 01160 //ip = m.begin() + (j+1)*j/2 + j-1; 01161 for (i=j+1; i <= nrow; ++i) { 01162 // calculate ip to avoid going off end of storage array 01163 ip = m.begin() + (i-1)*i/2 + j-1; 01164 *ip *= temp2; 01165 } 01166 } else { // s=2, ie use a 2x2 pivot 01167 piv[j-1] = -pivrow; 01168 piv[j] = 0; // that means this is the second row of a 2x2 pivot 01169 01170 if (j+1 != pivrow) { 01171 // interchange rows and columns j+1 and pivrow in 01172 // submatrix (j:n,j:n) 01173 ip = m.begin() + pivrow*(pivrow-1)/2 + j+1; 01174 for (i=j+2; i < pivrow; i++, ip++) { 01175 temp1 = *(m.begin() + i*(i-1)/2 + j); 01176 *(m.begin() + i*(i-1)/2 + j) = *ip; 01177 *ip = temp1; 01178 } // for i 01179 temp1 = *(mjj + j + 1); 01180 *(mjj + j + 1) = 01181 *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1); 01182 *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1) = temp1; 01183 temp1 = *(mjj + j); 01184 *(mjj + j) = *(m.begin() + pivrow*(pivrow-1)/2 + j-1); 01185 *(m.begin() + pivrow*(pivrow-1)/2 + j-1) = temp1; 01186 ip = m.begin() + (pivrow+1)*pivrow/2 + j; 01187 iq = ip + pivrow-(j+1); 01188 for (i = pivrow+1; i <= nrow; ip += i, iq += i++) { 01189 temp1 = *iq; 01190 *iq = *ip; 01191 *ip = temp1; 01192 } // for i 01193 } // j+1 != pivrow 01194 // invert D(j:j+1,j:j+1) 01195 temp2 = *mjj * *(mjj + j + 1) - *(mjj + j) * *(mjj + j); 01196 if (temp2 == 0) { 01197 std::cerr 01198 << "SymMatrix::bunch_invert: error in pivot choice" 01199 << std::endl; 01200 } 01201 temp2 = 1. / temp2; 01202 // this quotient is guaranteed to exist by the choice 01203 // of the pivot 01204 temp1 = *mjj; 01205 *mjj = *(mjj + j + 1) * temp2; 01206 *(mjj + j + 1) = temp1 * temp2; 01207 *(mjj + j) = - *(mjj + j) * temp2; 01208 01209 if (j < nrow-1) { // otherwise do nothing 01210 // update A(j+2:n, j+2:n) 01211 for (i=j+2; i <= nrow ; i++) { 01212 ip = m.begin() + i*(i-1)/2 + j-1; 01213 temp1 = *ip * *mjj + *(ip + 1) * *(mjj + j); 01214 if (fabs(temp1 ) <= epsilon) temp1 = 0; 01215 temp2 = *ip * *(mjj + j) + *(ip + 1) * *(mjj + j + 1); 01216 if (fabs(temp2 ) <= epsilon) temp2 = 0; 01217 for (k = j+2; k <= i ; k++) { 01218 ip = m.begin() + i*(i-1)/2 + k-1; 01219 iq = m.begin() + k*(k-1)/2 + j-1; 01220 *ip -= temp1 * *iq + temp2 * *(iq+1); 01221 if (fabs(*ip) <= epsilon) 01222 *ip = 0; 01223 } // for k 01224 } // for i 01225 // update L 01226 for (i=j+2; i <= nrow ; i++) { 01227 ip = m.begin() + i*(i-1)/2 + j-1; 01228 temp1 = *ip * *mjj + *(ip+1) * *(mjj + j); 01229 if (fabs(temp1) <= epsilon) temp1 = 0; 01230 *(ip+1) = *ip * *(mjj + j) 01231 + *(ip+1) * *(mjj + j + 1); 01232 if (fabs(*(ip+1)) <= epsilon) *(ip+1) = 0; 01233 *ip = temp1; 01234 } // for k 01235 } // j < nrow-1 01236 } 01237 } 01238 } // end of main loop over columns 01239 01240 if (j == nrow) { // the the last pivot is 1x1 01241 mjj = m.begin() + j*(j-1)/2 + j-1; 01242 if (*mjj == 0) { 01243 ifail = 1; 01244 return; 01245 } else { *mjj = 1. / *mjj; } 01246 } // end of last pivot code 01247 01248 // computing the inverse from the factorization 01249 01250 for (j = nrow ; j >= 1 ; j -= s) // loop over columns 01251 { 01252 mjj = m.begin() + j*(j-1)/2 + j-1; 01253 if (piv[j-1] > 0) { // 1x1 pivot, compute column j of inverse 01254 s = 1; 01255 if (j < nrow) { 01256 //ip = m.begin() + (j+1)*j/2 + j-1; 01257 //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; 01258 ip = m.begin() + (j+1)*j/2 - 1; 01259 for (i=0; i < nrow-j; ++i) { 01260 ip += i + j; 01261 x[i] = *ip; 01262 } 01263 for (i=j+1; i<=nrow ; i++) { 01264 temp2=0; 01265 ip = m.begin() + i*(i-1)/2 + j; 01266 for (k=0; k <= i-j-1; k++) temp2 += *ip++ * x[k]; 01267 // avoid setting ip outside the bounds of the storage array 01268 ip -= 1; 01269 // using the value of k from the previous loop 01270 for ( ; k < nrow-j; ++k) { 01271 ip += j+k; 01272 temp2 += *ip * x[k]; 01273 } 01274 *(m.begin()+ i*(i-1)/2 + j-1) = -temp2; 01275 } // for i 01276 temp2 = 0; 01277 //ip = m.begin() + (j+1)*j/2 + j-1; 01278 //for (k=0; k < nrow-j; ip += 1+j+k++) 01279 //temp2 += x[k] * *ip; 01280 ip = m.begin() + (j+1)*j/2 - 1; 01281 for (k=0; k < nrow-j; ++k) { 01282 ip += j+k; 01283 temp2 += x[k] * *ip; 01284 } 01285 *mjj -= temp2; 01286 } // j < nrow 01287 } else { //2x2 pivot, compute columns j and j-1 of the inverse 01288 if (piv[j-1] != 0) 01289 std::cerr << "error in piv" << piv[j-1] << std::endl; 01290 s=2; 01291 if (j < nrow) { 01292 //ip = m.begin() + (j+1)*j/2 + j-1; 01293 //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; 01294 ip = m.begin() + (j+1)*j/2 - 1; 01295 for (i=0; i < nrow-j; ++i) { 01296 ip += i + j; 01297 x[i] = *ip; 01298 } 01299 for (i=j+1; i<=nrow ; i++) { 01300 temp2 = 0; 01301 ip = m.begin() + i*(i-1)/2 + j; 01302 for (k=0; k <= i-j-1; k++) 01303 temp2 += *ip++ * x[k]; 01304 for (ip += i-1; k < nrow-j; ip += 1+j+k++) 01305 temp2 += *ip * x[k]; 01306 *(m.begin()+ i*(i-1)/2 + j-1) = -temp2; 01307 } // for i 01308 temp2 = 0; 01309 //ip = m.begin() + (j+1)*j/2 + j-1; 01310 //for (k=0; k < nrow-j; ip += 1+j+k++) temp2 += x[k] * *ip; 01311 ip = m.begin() + (j+1)*j/2 - 1; 01312 for (k=0; k < nrow-j; ++k) { 01313 ip += k + j; 01314 temp2 += x[k] * *ip; 01315 } 01316 *mjj -= temp2; 01317 temp2 = 0; 01318 //ip = m.begin() + (j+1)*j/2 + j-2; 01319 //for (i=j+1; i <= nrow; ip += i++) temp2 += *ip * *(ip+1); 01320 ip = m.begin() + (j+1)*j/2 - 2; 01321 for (i=j+1; i <= nrow; ++i) { 01322 ip += i - 1; 01323 temp2 += *ip * *(ip+1); 01324 } 01325 *(mjj-1) -= temp2; 01326 //ip = m.begin() + (j+1)*j/2 + j-2; 01327 //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip; 01328 ip = m.begin() + (j+1)*j/2 - 2; 01329 for (i=0; i < nrow-j; ++i) { 01330 ip += i + j; 01331 x[i] = *ip; 01332 } 01333 for (i=j+1; i <= nrow ; i++) { 01334 temp2 = 0; 01335 ip = m.begin() + i*(i-1)/2 + j; 01336 for (k=0; k <= i-j-1; k++) 01337 temp2 += *ip++ * x[k]; 01338 for (ip += i-1; k < nrow-j; ip += 1+j+k++) 01339 temp2 += *ip * x[k]; 01340 *(m.begin()+ i*(i-1)/2 + j-2)= -temp2; 01341 } // for i 01342 temp2 = 0; 01343 //ip = m.begin() + (j+1)*j/2 + j-2; 01344 //for (k=0; k < nrow-j; ip += 1+j+k++) 01345 // temp2 += x[k] * *ip; 01346 ip = m.begin() + (j+1)*j/2 - 2; 01347 for (k=0; k < nrow-j; ++k) { 01348 ip += k + j; 01349 temp2 += x[k] * *ip; 01350 } 01351 *(mjj-j) -= temp2; 01352 } // j < nrow 01353 } // else piv[j-1] > 0 01354 01355 // interchange rows and columns j and piv[j-1] 01356 // or rows and columns j and -piv[j-2] 01357 01358 pivrow = (piv[j-1]==0)? -piv[j-2] : piv[j-1]; 01359 ip = m.begin() + pivrow*(pivrow-1)/2 + j; 01360 for (i=j+1;i < pivrow; i++, ip++) { 01361 temp1 = *(m.begin() + i*(i-1)/2 + j-1); 01362 *(m.begin() + i*(i-1)/2 + j-1) = *ip; 01363 *ip = temp1; 01364 } // for i 01365 temp1 = *mjj; 01366 *mjj = *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1); 01367 *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1) = temp1; 01368 if (s==2) { 01369 temp1 = *(mjj-1); 01370 *(mjj-1) = *( m.begin() + pivrow*(pivrow-1)/2 + j-2); 01371 *( m.begin() + pivrow*(pivrow-1)/2 + j-2) = temp1; 01372 } // s==2 01373 01374 // problem right here 01375 if( pivrow < nrow ) { 01376 ip = m.begin() + (pivrow+1)*pivrow/2 + j-1; // &A(i,j) 01377 // adding parenthesis for VC++ 01378 iq = ip + (pivrow-j); 01379 for (i = pivrow+1; i <= nrow; i++) { 01380 temp1 = *iq; 01381 *iq = *ip; 01382 *ip = temp1; 01383 if( i < nrow ) { 01384 ip += i; 01385 iq += i; 01386 } 01387 } // for i 01388 } // pivrow < nrow 01389 } // end of loop over columns (in computing inverse from factorization) 01390 01391 return; // inversion successful 01392 01393 } 01394 01395 } // namespace CLHEP