aimsalgo  5.0.5
Neuroimaging image processing
mixtureOfPpca_d.h
Go to the documentation of this file.
1 /* This software and supporting documentation are distributed by
2  * Institut Federatif de Recherche 49
3  * CEA/NeuroSpin, Batiment 145,
4  * 91191 Gif-sur-Yvette cedex
5  * France
6  *
7  * This software is governed by the CeCILL-B license under
8  * French law and abiding by the rules of distribution of free software.
9  * You can use, modify and/or redistribute the software under the
10  * terms of the CeCILL-B license as circulated by CEA, CNRS
11  * and INRIA at the following URL "http://www.cecill.info".
12  *
13  * As a counterpart to the access to the source code and rights to copy,
14  * modify and redistribute granted by the license, users are provided only
15  * with a limited warranty and the software's author, the holder of the
16  * economic rights, and the successive licensors have only limited
17  * liability.
18  *
19  * In this respect, the user's attention is drawn to the risks associated
20  * with loading, using, modifying and/or developing or reproducing the
21  * software by the user in light of its specific status of free software,
22  * that may mean that it is complicated to manipulate, and that also
23  * therefore means that it is reserved for developers and experienced
24  * professionals having in-depth computer knowledge. Users are therefore
25  * encouraged to load and test the software's suitability as regards their
26  * requirements in conditions enabling the security of their systems and/or
27  * data to be ensured and, more generally, to use and operate it in the
28  * same conditions as regards security.
29  *
30  * The fact that you are presently reading this means that you have had
31  * knowledge of the CeCILL-B license and that you accept its terms.
32  */
33 
34 
35 #ifndef MIXTURE_OF_PPCA_D_H
36 #define MIXTURE_OF_PPCA_D_H
37 
38 #include <cstdlib>
40 #include <aims/io/finder.h>
41 #include <aims/io/writer.h>
42 #include <aims/vector/vector.h>
43 #include <aims/math/gausslu.h>
44 #include <aims/bucket/bucketMap.h>
46 #include <aims/math/svd.h>
47 #include <aims/data/data.h>
48 #include <vector>
49 #include <iostream>
50 #include <iomanip>
51 #include <math.h>
52 #include <aims/math/random.h>
53 
54 /* This code seems to be used... nowhere...
55  Anyway it is not currently working: namespace problems will prevent
56  compiling.
57 */
58 
59 namespace aims
60 {
61 
62 double * PpcaAnalyserElement::_exp = 0 ;
63 
64 PpcaAnalyserElement::PpcaAnalyserElement( int significantNumberOfVp, bool useOnlyCorrIndiv ) :
65  _useOnlyCorrIndiv( useOnlyCorrIndiv ), _significantNumberOfVp( significantNumberOfVp ), _computed( false ), _energy(0.), _valid(true)
66 {
67  if( !_exp ){
68  _exp = new double[75010] ;
69  for( int i = 0 ; i < 7510 ; ++i )
70  _exp[i] = exp( -i/100. ) ;
71  }
72 }
73 
74 
75 template <class T>
76 void
77 PpcaAnalyserElement::init( const std::list<int>& selectedIndividuals, double initialPi,
78  const AimsData<T>& individuals, double noiseRef )
79 {
80  AimsData<T> indivMatrix( max(int(selectedIndividuals.size()), 1), individuals.dimY() ) ;
81  list<int>::const_iterator iter( selectedIndividuals.begin() ),
82  last( selectedIndividuals.end() ) ;
83  int i = 0 ;
84 
85  while( iter != last ){
86  for( int t = 0 ; t < individuals.dimY() ; ++t )
87  indivMatrix( i, t ) = individuals( *iter, t ) ;
88  ++i ;
89  ++iter ;
90  }
91 
92  _Pi = initialPi ;
93  doIt( indivMatrix, individuals.dimX(), noiseRef ) ;
94 }
95 
96 
97 template <class T>
98 void
99 PpcaAnalyserElement::doIt( const AimsData<T>& indivMatrix, int totalNbOfIndividuals, double noiseRef )
100 {
101  _computed = true ;
102  int nbInd = indivMatrix.dimX() ;
103  int nbFrame = indivMatrix.dimY() ;
104 
105  AimsData<double> centeredIndivMatrix( nbInd, nbFrame ) ;
106  _mean = AimsData<double>( nbFrame ) ;
107 
108  for( int ind = 0 ; ind < nbInd ; ++ind )
109  for( int t = 0 ; t < nbFrame ; ++t )
110  _mean[t] += indivMatrix( ind, t ) ;
111  for( int t = 0 ; t < nbFrame ; ++t ){
112  _mean[t] /= centeredIndivMatrix.dimX() ;
113  for( int ind = 0 ; ind < centeredIndivMatrix.dimX() ; ++ind )
114  centeredIndivMatrix( ind, t ) = indivMatrix( ind, t ) - _mean[t] ;
115  }
116 // cout << "mean = " << _mean << endl ;
117 
118  // Matrice des correlations
119  AimsData<double> matVarCov( nbFrame, nbFrame ) ;
120  int x, y ;
121  ForEach2d( matVarCov, x, y ){
122  for( int k = 0 ; k < centeredIndivMatrix.dimX() ; ++k )
123  matVarCov(x, y) += centeredIndivMatrix(k, x) * centeredIndivMatrix(k, y) ;
124  matVarCov(x, y) /= centeredIndivMatrix.dimX() - 1 ;
125  }
126 
127  // Decomposition SVD
128  AimsSVD<double> svd;
130  AimsData<double> eigenValues = svd.doit( matVarCov ) ;
131  svd.sort( matVarCov, eigenValues ) ;
132 // cout << "valeurs propres = " << eigenValues << endl ;
133 
134  // Calcul de "noise variance"(somme des valeurs propres non significatives/par leur nb)=sigma^2
135  _sigma2 = 0. ;
136  for( int i = _significantNumberOfVp ; i < nbFrame ; ++i )
137  _sigma2 += eigenValues[i] ;
138  _sigma2 /= (double)( nbFrame - _significantNumberOfVp ) ;
139  cout << endl << "sigma2 = " << setprecision(3) << _sigma2 << endl ;
140 
141  AimsData<double> selectedEigenVectors( nbFrame, _significantNumberOfVp ) ; //dim dxq
142  for( int i = 0 ; i < _significantNumberOfVp ; ++i ){
143  for( int t = 0 ; t < nbFrame ; ++t ){
144  selectedEigenVectors(t, i) = matVarCov( t, i ) ;
145  }
146  }
147 // cout << "selected EV = " << endl << setprecision(3) << selectedEigenVectors << endl ;
148 
149  // Calcul de la "weight" matrice Wi (dim. dxq)
150  AimsData<double> tempWi( _significantNumberOfVp, _significantNumberOfVp ) ;
151 
152  for( int i = 0 ; i < _significantNumberOfVp ; ++i )
153  tempWi(i, i) = sqrt( eigenValues[i] - _sigma2 ) ;
154  _Wi = selectedEigenVectors.cross( tempWi ) ;
155 
156  // calcul de la matrice de covariance Ci = sigma^2I+WWT
157  AimsData<double> Ci = _Wi.cross( _Wi.clone().transpose() ) ;
158  for( int i = 0 ; i < nbFrame ; ++i )
159  Ci(i, i) += _sigma2 ;
160 
161  // calcul de detCi et invCi
162  double s = 0.000001 ;
163  AimsData< double > u = Ci.clone(); // because it is replaced on SVD output
164  AimsData< double > v( Ci.dimY(), Ci.dimY() );
165  AimsSVD< double > svd2;
166  svd2.setReturnType( AimsSVD<double>::MatrixOfSingularValues ) ;
167  AimsData< double > w = svd2.doit( u, &v );
168 
169  // Set the singular values lower than threshold to zero
170  double ts = s * w.maximum();
171  int i, n = w.dimX();
172  double detCi = 1. ;
173  for ( i = 0 ; i < n; i++ ){
174  if ( w( i, i ) < 0. ){
175  cout << endl << "pour i = " << i << ", valeur w = " << w( i, i ) << " ! VALEUR NEGATIVE CHANGEE !" << endl ;
176  w( i, i ) = - w( i, i ) ;
177  }
178  detCi *= w( i, i ) / _sigma2 ;
179  if ( w( i, i ) > ts ) w( i, i ) = 1.0f / w( i, i );
180  else w( i, i ) = 0.0f;
181  }
182  // Get the SVD inverse of h ( h-1 = V.W-2.V'.H' = V.W-1.U' )
183  // for solution of H'.H.x = H'.b <=> min(||H.x-b||^2)
184  _invCi = v.cross( w.cross( u.transpose() ) ) ;
185 // cout << "invCi = " << endl << _invCi << endl ;
186 
187 // cout << "det Ci = " << detCi << endl ;
188  _normFactor = 1 / ( pow( detCi, 0.5 ) * pow( _sigma2 / noiseRef, nbFrame / 2. ) ) ;
189 // cout << "normFactor avant = " << 1 / pow( detCi, 0.5 ) << endl ;
190  cout << "norm factor avec noiseRef = " << _normFactor << endl ;
191 
192  // calcul de la matrice de covariance posterieure Mi = sigma^2I+WTW et de invMi
193  AimsData<double> Mi = ( _Wi.clone().transpose() ).cross( _Wi ) ;
194  for( int i = 0 ; i < _significantNumberOfVp ; ++i )
195  Mi(i, i) += _sigma2 ;
196 
197  u = Mi.clone(); // because it is replaced on SVD output
198  v( Mi.dimY(), Mi.dimY() );
199  AimsSVD< double > svd3;
200  w = svd3.doit( u, &v );
201 
202  ts = s * w.maximum();
203  n = w.dimX();
204  for ( i = 0 ; i < n; i++ ){
205  if ( w( i, i ) > ts ) w( i, i ) = 1.0f / w( i, i );
206  else w( i, i ) = 0.0f;
207  }
208  _invMi = v.cross( w.cross( u.transpose() ) ) ;
209 // cout << "invMi = " << _invMi << endl ;
210 
211  _Rn = AimsData<double>( totalNbOfIndividuals ) ;
212 }
213 
214 
215 template <class T>
216 bool
217 PpcaAnalyserElement::newStep1( const AimsData<T>& indivMatrix, bool useOnlyCorrIndiv )
218 {
219  int nbInd = indivMatrix.dimX() ;
220  int nbFrame = indivMatrix.dimY() ;
221 
223 // calcul pour tous les individus de p(tn|i)
224 // + CALCUL DE LA CORRELATION ENTRE CHAQUE INDIV ET LE VECTEUR MOYEN
225 
226  AimsData<double> indivn( nbFrame ) ;
227  AimsData<double> centeredIndivn( nbFrame ) ;
228  _dist = AimsData<double>( nbInd ) ;
229  _pTni = AimsData<double>( nbInd ) ;
230 
231  double sumInd, sumMean, indMean, meanMean, corr = 0. ;
232  int count = 0 ;
233  AimsData<double> antiCorr( nbInd, nbFrame ) ;
234  AimsData<double> Corr( nbInd, nbFrame ) ;
235  int nbIndTaken = 0 ;
236 
237  //float invDimNormFactor = 1. / ( nbFrame - _significantNumberOfVp ) ;
238 
239  for( int n = 0 ; n < nbInd ; ++n ){
240  sumInd = 0. ;
241  sumMean = 0. ;
242  indMean = 0. ;
243  meanMean = 0. ;
244  corr = 0. ;
245 
246  for( int t = 0 ; t < nbFrame ; ++t ){
247  indivn[t] = indivMatrix(n, t) ;
248  centeredIndivn[t] = indivMatrix(n, t) - _mean[t] ;
249  sumInd += indivn[t] ;
250  sumMean += _mean[t] ;
251  }
252  // AJOUT 0411 calcul de la corr�lation entre chaque individu et le vecteur moyen
253  if( useOnlyCorrIndiv == 1 ){
254  indMean = sumInd / (double)nbFrame ;
255  meanMean = sumMean / (double)nbFrame ;
256  for( int t = 0 ; t < nbFrame ; ++t )
257  corr += ( indivn[t] - indMean ) * ( _mean[t] - meanMean ) ;
258 
259  if( corr < 0. ){
260  // probabilit� nulle si anti-corr�l�
261 /* for( int t = 0 ; t < nbFrame ; ++t ) */
262 /* antiCorr( nbAntiCorr, t ) = indivn[t] ; */
263 /* ++nbAntiCorr ; */
264  _pTni[n] = 0. ;
265  ++count ;
266  } else {
267  // calcul de p(tn|i)
268 /* for( int t = 0 ; t < nbFrame ; ++t ) */
269 /* Corr( nbCorr, t ) = indivn[t] ; */
270 /* ++nbCorr ; */
271  _dist[n] = ( centeredIndivn.clone().transpose() ).cross( _invCi.cross( centeredIndivn ) )(0, 0) ;
272  if( _dist[n] > 1500 )
273  _pTni[n] = 0 ;
274  else
275  _pTni[n] = /*pow( 2*M_PI, - nbFrame/2. ) * */ _normFactor * _exp[ int(0.5 * _dist[n] * 100. + 0.000001 ) ] ;
276  }
277 
278  } else {
279  _dist[n] = ( centeredIndivn.clone().transpose() ).cross( _invCi.cross( centeredIndivn ) )(0, 0) ;
280  if( _dist[n] > 1500 )
281  _pTni[n] = 0 ;
282  else
283  _pTni[n] = /*pow( 2*M_PI, - nbFrame/2. ) * */ _normFactor * _exp[ int(0.5 * _dist[n] * 100. + 0.000001) ] ;
284  }
285  if( _pTni[n] > 0. )
286  ++nbIndTaken ;
287  }
288 
289 /* Writer < AimsData<double> > wr( "correlated.ima" ) ; */
290 /* wr.write( Corr ) ; */
291 /* Writer < AimsData<double> > wr2( "anti-correlated.ima" ) ; */
292 /* wr2.write( antiCorr ) ; */
293 /* Writer < AimsData<double> > wr3( "mean.ima" ) ; */
294 /* wr3.write( _mean ) ; */
295 
296 /* double sumPtni = 0., prodPtni = 0., meanPtni = 0. , varPtni = 0. ; */
297 /* // moyenne et variance des pTni */
298 /* for( int n = 0 ; n < nbInd ; ++n ){ */
299 /* sumPtni += _pTni[n] ; */
300 /* prodPtni += _pTni[n] * _pTni[n] ; */
301 /* } */
302 /* meanPtni = sumPtni / nbInd ; */
303 /* varPtni = prodPtni / nbInd ; */
304 /* cout << ( _useOnlyCorrIndiv ? " anticorreles = " : "" ) << ( _useOnlyCorrIndiv ? toString(count) : "" ) */
305 /* << " - moyenne et variance des p(tn|i) = " */
306 /* << meanPtni << " - " << varPtni << endl ; */
307  if( nbIndTaken < indivMatrix.dimY() * 2 )
308  _valid = false ;
309  else
310  _valid = true ;
311  return _valid ;
312 }
313 
314 
315 template <class T>
316 double
318  double noiseRef )
319 {
320  bool explicitComputing = true ;
321 
322  int nbInd = indivMatrix.dimX() ;
323  int nbFrame = indivMatrix.dimY() ;
324  _energy = 0. ;
325 
327  // _Rn[i] = probabilityOfTnKnownI * Pi[i] / pTn ;
328 
329 
330 // _Rn = AimsData<double>( nbInd ) ;
331  double previousRn ;
332  int nbIndTemp = 0 ; // nb d'individus dont p(tn)!=0
333 // double previousRn = 0. ;
334  _sumDiff2Rni = 0. ;
335 
336  //cout << "Rn computing begin ------ " ;
337  for( int n = 0 ; n < nbInd ; ++n ){
338  previousRn = _Rn[n] ;
339  if( pTn[n] != 0. ){ // on ignore les points problematiques
340  _Rn[n] = _pTni[n] * _Pi / pTn[n] ;
341  //_energy += 1 - 4. * (_Rn[n] - 0.5) * (_Rn[n] - 0.5) ;
342  ++nbIndTemp ;
343  }
344  else
345  _Rn[n] = 0. ;
346 
347  // if( n < 10 && _Rn[n] != 0.) cout << "n = " << n << " - nouveau/ancien Rn = " << _Rn[n] << " " << previousRn << endl ;
348  //_sumDiff2Rni += pow( _Rn[n] - previousRn, 2. ) ;
349  }
350  //_energy /= nbInd ;
351  //_sumDiff2Rni /= nbInd ;
352 
353  // cout << "Rn computing end" << endl ;
354 
355  cout << /*endl <<*/ "critere pour la classe = " << setprecision(9) << _sumDiff2Rni << " - " ;
356 
357 
359  // _Pi = 1/nbInd * sum(Rni)
360 
361  _Pi = 0. ;
362  for( int n = 0 ; n < nbInd ; ++n )
363  _Pi += _Rn[n] ;
364  _Pi /= nbIndTemp ;
365  cout << "Pi = " << setprecision(3) << _Pi << endl ;
366 
367  double nbIndClass = _Pi * nbIndTemp ;
368  cout << "'nb d'individus appartenant' a la classe = " << nbIndClass << endl ;
369 
370 
372  // _mean = sum(Rni*tn)/sum(Rni)
373 
374  double sumRn = 0. ;
375  AimsData<double> sumRnTn( nbFrame ) ;
376  int nullRniNb = 0, nearNullRni = 0 ;
377  for( int n = 0 ; n < nbInd ; ++n ){
378  sumRn += _Rn[n] ;
379  if( _Rn[n] == 0. )
380  ++nullRniNb ;
381  if( _Rn[n] < 10e-30 )
382  ++nearNullRni ;
383  for( int t = 0 ; t < nbFrame ; ++t )
384  sumRnTn[t] += _Rn[n] * indivMatrix(n, t) ;
385  }
386 
387  cout << "Nombre de RNi nulls : " << nullRniNb << "et de Rni presque nulls" << nearNullRni << endl ;
388  for( int t = 0 ; t < nbFrame ; ++t )
389  _mean[t] = sumRnTn[t] / sumRn ;
390 // cout << "mean = " << _mean << endl ;
391 
392 
394  // Si = 1/Pi*nbInd sum( Rni (tn-mui) (tn-mui)T )
395 
396  AimsData<double> centeredIndiv( nbFrame ) ;
397  AimsData<double> Si( nbFrame, nbFrame ) ;
398 
399 /* for( int n = 0 ; n < nbInd ; ++n ) */
400 /* for( int t = 0 ; t < nbFrame ; ++t ) */
401 /* centeredIndivMatrix(n, t) = indivMatrix(n, t) - _mean[t] ; */
402 
403  int x, y ;
404  for( int n = 0 ; n < nbInd ; ++n )
405  if( _Rn[n] != 0. ){
406  for( int t = 0 ; t < nbFrame ; ++t )
407  centeredIndiv(t) = indivMatrix(n, t) - _mean[t] ;
408  ForEach2d( Si, x, y )
409  Si(x, y) += _Rn[n] * centeredIndiv(x) * centeredIndiv(y) ;
410  }
411  ForEach2d( Si, x, y )
412  Si(x, y) /= _Pi * nbIndTemp ;
413 
414 
415  //------------------- CALCUL EXPLICITE ---------------------------------
416 
417  double detCi = 1. ;
418 
419  if( explicitComputing ){
420  // Decomposition SVD
421 
422  //cout << "Compute Si svd begin" << endl ;
423  AimsSVD<double> svd;
425  AimsData<double> eigenValues = svd.doit( Si ) ;
426  svd.sort( Si, eigenValues ) ;
427 
428  //cout << "Compute Si svd end" << endl ;
429 
430  // Calcul de "noise variance"(somme des valeurs propres non significatives/par leur nb)=sigma^2
431  _sigma2 = 0. ;
432  for( int i = _significantNumberOfVp ; i < nbFrame ; ++i )
433  _sigma2 += eigenValues[i] ;
434  _sigma2 /= (double)( nbFrame - _significantNumberOfVp ) ;
435  cout << "sigma2 = " << _sigma2 << " (explicite)" << " - " ;
436 
437  AimsData<double> selectedEigenVectors( nbFrame, _significantNumberOfVp ) ; //dim dxq
438  for( int i = 0 ; i < _significantNumberOfVp ; ++i )
439  for( int t = 0 ; t < nbFrame ; ++t )
440  selectedEigenVectors(t, i) = Si( t, i ) ;
441 
442  // Calcul de la "weight" matrice Wi (dim. dxq)
443  AimsData<double> tempWi( _significantNumberOfVp, _significantNumberOfVp ) ;
444 
445  for( int i = 0 ; i < _significantNumberOfVp ; ++i )
446  tempWi(i, i) = sqrt( eigenValues[i] - _sigma2 ) ;
447  _Wi = selectedEigenVectors.cross( tempWi ) ;
448 
449  // calcul de la matrice de covariance Ci = sigma^2I+WWT
450  AimsData<double> Ci = _Wi.cross( _Wi.clone().transpose() ) ;
451  for( int i = 0 ; i < nbFrame ; ++i )
452  Ci(i, i) += _sigma2 ;
453 
454  // calcul de detCi et invCi
455  double s = 0.000001 ;
456  AimsData< double > u = Ci.clone(); // because it is replaced on SVD output
457  AimsData< double > v( Ci.dimY(), Ci.dimY() );
458  AimsSVD< double > svd2;
459  svd2.setReturnType( AimsSVD<double>::MatrixOfSingularValues ) ;
460  AimsData< double > w = svd2.doit( u, &v );
461 
462  // Set the singular values lower than threshold to zero
463  double ts = s * w.maximum();
464  int i, n = w.dimX();
465  for ( i = 0 ; i < n; i++ ){
466  if ( w( i, i ) < 0. ){
467  cout << endl << "pour i = " << i << ",valeur w = " << w( i, i ) << "! VALEUR NEGATIVE CHANGEE!" << endl ;
468  w( i, i ) = - w( i, i ) ;
469  }
470  detCi *= w( i, i ) / _sigma2 ;
471  if ( w( i, i ) > ts ) w( i, i ) = 1.0f / w( i, i );
472  else w( i, i ) = 0.0f;
473  }
474  // Get the SVD inverse of h ( h-1 = V.W-2.V'.H' = V.W-1.U' )
475  // for solution of H'.H.x = H'.b <=> min(||H.x-b||^2)
476  _invCi = v.cross( w.cross( u.transpose() ) ) ;
477 
478  } else {
479 
480  //---------------------Iterative computing----------------------------------
481 
483  // _Wi = SiWi (sigma2I + Mi-1 WiT Si Wi)-1 avec Mi = sigma2I + WiT*Wi
484 
485  AimsData<double> newWi = _invMi.cross( ( _Wi.clone().transpose() ).cross( Si.cross( _Wi ) ) ) ;
486  for( int i = 0 ; i < newWi.dimX() ; ++i )
487  newWi(i, i) += _sigma2 ;
488 
489  double s = 0.000001 ;
490  AimsData<double> u = newWi.clone(); // because it is replaced on SVD output
491  AimsData<double> v( newWi.dimY(), newWi.dimY() );
492  AimsSVD<double> svd1;
493  svd1.setReturnType( AimsSVD<double>::MatrixOfSingularValues ) ;
494  AimsData<double> w = svd1.doit( u, &v );
495  double ts = s * w.maximum();
496  int i, n = w.dimX();
497 
498  for ( i = 0 ; i < n ; i++ ){
499  if ( w( i, i ) < 0. ){
500  cout << endl << "pour i = " << i << ", w = " << w( i, i ) << " ! VALEUR NEGATIVE CHANGEE !" << endl ;
501  w( i, i ) = - w( i, i ) ;
502  }
503  if ( w( i, i ) > ts ) w( i, i ) = 1.0f / w( i, i );
504  else w( i, i ) = 0.0f;
505  }
506  newWi = v.cross( w.cross( u.transpose() ) ) ;
507 
508  newWi = Si.cross( _Wi.cross( newWi ) ) ;
509 // _Wi = newWi ;
510 
511 
513  // Mi = _sigmai2I + WiT Wi & invMi
514 
515 /* AimsData<double> Mi = ( newWi.clone().transpose() ).cross( newWi ) ; */
516 /* for( int i = 0 ; i < Mi.dimX() ; ++i ) */
517 /* Mi(i, i) += _sigma2 ; */
518 
519 /* AimsData<double> u2 = Mi.clone(); */
520 /* AimsData<double> v2( Mi.dimY(), Mi.dimY() ) ; */
521 /* AimsSVD< double > svd2 ; */
522 /* svd2.setReturnType( AimsSVD<double>::MatrixOfSingularValues ) ; */
523 /* AimsData<double> w2 = svd2.doit( u2, &v2 ) ; */
524 /* ts = s * w2.maximum(); */
525 /* n = w2.dimX(); */
526 
527 /* for( i = 0 ; i < n ; i++ ){ */
528 /* if ( w2( i, i ) < 0. ){ */
529 /* cout << endl << "pour i = " << i << ", w2 = " << w2( i, i ) << " ! VALEUR NEGATIVE CHANGEE !" << endl ; */
530 /* w2( i, i ) = - w2( i, i ) ; */
531 /* } */
532 /* if ( w2( i, i ) > ts ) w2( i, i ) = 1.0f / w2( i, i ) ; */
533 /* else w2( i, i ) = 0.0f; */
534 /* } */
535 /* _invMi = v2.cross( w2.cross( u2.transpose() ) ) ; */
536 
537 
539  // _sigmai2 = 1/d tr(Si - Si Wi Mi-1 WiT)
540 
541  _sigma2 = 0. ;
542  AimsData<double> temp = Si.cross( _Wi.cross( _invMi.cross( newWi.clone().transpose() ) ) ) ;
543 
544  for( int i = 0 ; i < temp.dimX() ; ++i )
545  _sigma2 += ( Si(i, i) - temp(i, i) ) ;
546  _sigma2 /= nbFrame ;
547  cout << "sigma2 = " << _sigma2 << " - " ;
548  if( _sigma2 < 0. ){
549  cout << "VALEUR NEGATIVE CHANGEE ! " << endl ;
550  _sigma2 = - _sigma2 ;
551  }
552 
553  // essai
554  _Wi = newWi ;
555 
556 
558  // Mi = _sigmai2I + WiT Wi & invMi
559 
560  AimsData<double> Mi = ( _Wi.clone().transpose() ).cross( _Wi ) ;
561  for( int i = 0 ; i < Mi.dimX() ; ++i )
562  Mi(i, i) += _sigma2 ;
563 
564  AimsData<double> u2 = Mi.clone();
565  AimsData<double> v2( Mi.dimY(), Mi.dimY() ) ;
566  AimsSVD< double > svd2 ;
567  svd2.setReturnType( AimsSVD<double>::MatrixOfSingularValues ) ;
568  AimsData<double> w2 = svd2.doit( u2, &v2 ) ;
569  ts = s * w2.maximum();
570  n = w2.dimX();
571  for( i = 0 ; i < n ; i++ ){
572  if ( w2( i, i ) < 0. ){
573  cout << endl << "pour i = " << i << ", w2 = " << w2( i, i ) << " ! VALEUR NEGATIVE CHANGEE !" << endl ;
574  w2( i, i ) = - w2( i, i ) ;
575  }
576  if ( w2( i, i ) > ts ) w2( i, i ) = 1.0f / w2( i, i ) ;
577  else w2( i, i ) = 0.0f;
578  }
579  _invMi = v2.cross( w2.cross( u2.transpose() ) ) ;
580 
581 
583  // Ci = sigmai2 + Wi WiT --> Ci-1 & det(Ci)
584 
585  AimsData<double> Ci = _Wi.cross( _Wi.clone().transpose() ) ;
586  for( int i = 0 ; i < nbFrame ; ++i )
587  Ci(i, i) += _sigma2 ;
588 
589  // verification Ci pas de valeurs negatives & symetrique
590  bool sym = true ;
591  for( i = 0 ; i < Ci.dimX() ; i++ ){
592  if( Ci(i, i) < 0 ){
593  cout << "pour i = " << i << ", Ci = " << Ci( i, i ) << " ! VALEUR NEGATIVE CHANGEE !" << endl ;
594  Ci( i, i ) = - Ci( i, i ) ;
595  }
596  for( int j = 0 ; j < Ci.dimY() ; j++ ){
597  if( Ci(i,j) != Ci(j,i) ){
598  sym = false ;
599  cout << "pour i,j = " << i << "," << j << " Ci(i,j) = " << Ci(i,j) << ",Ci(j,i) = " << Ci(j,i) << endl ;
600  }
601  }
602  }
603  if( sym == false ) cout << "--> Ci pas symetrique !" << endl ;
604 
605 
606  AimsData<double> u3 = Ci.clone();
607  AimsData<double> v3( Ci.dimY(), Ci.dimY() );
608  AimsSVD< double > svd3 ;
609  svd3.setReturnType( AimsSVD<double>::MatrixOfSingularValues ) ;
610  AimsData<double> w3 = svd3.doit( u3, &v3 );
611  ts = s * w3.maximum();
612  n = w3.dimX();
613  bool saveCi = false ;
614 
615 // cout << endl << "valeurs diagonale w3 = " ;
616  for( i = 0 ; i < n ; i++ ){
617 // cout << w3( i, i ) << " " ;
618  if ( w3( i, i ) < 0. ){
619  saveCi = true ;
620  cout << endl << "pour i = " << i << ", valeur w3 = " << w3( i, i ) << " ! VALEUR NEGATIVE CHANGEE !" << endl ;
621  w3( i, i ) = - w3( i, i ) ;
622  }
623  if( _sigma2 == 0. )
624  detCi *= w3( i, i ) ;
625  else
626  detCi *= w3( i, i ) / _sigma2 ;
627  if ( w3( i, i ) > ts ) w3( i, i ) = 1.0f / w3( i, i );
628  else w3( i, i ) = 0.0f;
629  }
630  _invCi = v3.cross( w3.cross( u3.transpose() ) ) ;
631 // cout << "_invCi = " << endl << _invCi << endl ;
632 
633  // sauvegarde de Ci dans un fichier
634 /* if( saveCi == true ){ */
635 /* ofstream ci_file, diff_file ; */
636 /* ci_file.open( "Ci_file.txt" ) ; */
637 /* w3_file.open( "w3_file.txt" ) ; */
638 /* diff_file.open( "diff_file.txt" ) ; */
639 
640 /* bool newLine ; */
641 /* cout << "dimensions de Ci = " << Ci.dimX() << " " << Ci.dimY() << endl ; */
642 /* for( int j = 0 ; j < Ci.dimY() ; j++ ){ */
643 /* newLine = true ; */
644 /* for( int i = 0 ; i < Ci.dimX() ; i++ ){ */
645 /* if( newLine == true && j != 0 ) ci_file << endl ; */
646 /* ci_file << Ci( i, j ) << " " ; */
647 /* newLine = false ; */
648 /* } */
649 /* } */
650 /* ci_file << endl ; */
651 /* ci_file.close() ; */
652 
653 /* // verification */
654 /* AimsData<double> prod = u3.cross( w3.cross( v3.clone().transpose() ) ) ; */
655 /* for( int j = 0 ; j < Ci.dimY() ; j++ ){ */
656 /* newLine = true ; */
657 /* for( int i = 0 ; i < Ci.dimX() ; i++ ){ */
658 /* if( newLine == true && j != 0 ) diff_file << endl ; */
659 /* diff_file << Ci(i,j) - prod(i,j) << " " ; */
660 /* newLine = false ; */
661 /* } */
662 /* } */
663 /* diff_file << endl ; */
664 /* diff_file.close() ; */
665 /* } */
666 
667  }
668 
669  // AJOUT 12/11: calcul de l'�nergie pour la mixture i Ei = nbOfInd * sigma2
670  //_energy = nbIndTemp * _sigma2 ;
671  cout << "energy = " << _energy ;
672 
673  if( _sigma2 == 0. )
674  _normFactor = 1 / pow( detCi, 0.5 ) ;
675  else
676  _normFactor = 1 / ( pow( detCi, 0.5 ) * pow( _sigma2 / noiseRef, nbFrame / 2. ) ) ;
677  cout << " - detCi = " << detCi /*<< " - normFactor sans noiseRef = " << 1 / pow( detCi, 0.5 )*/
678  << " - normFactor = " << _normFactor << endl ;
679 
680  return _sigma2 ;
681 }
682 
683 template <class T>
684 double
686 {
687  int nbInd = _individuals.dimX() ;
688  int nbIndTemp = 0 ;
689  double sumPtn = 0., prodPtn = 0., meanPtn = 0. , varPtn = 0. ;
690  AimsData<double> pTni( _individuals.dimX() ) ;
691  double Pi = 0. ;
692 
693  _nullPtnIndiv.clear() ;
694 
695  _pTn = 0 ;
696  cout << endl ;
697  for( int n = 0 ; n < nbInd ; ++n ){
698  for( int c = 0 ; c < _nbOfClasses ; ++c ){
699  Pi = _elements[c].getPi() ;
700  pTni = _elements[c].getPtni() ;
701  _pTn[n] += pTni[n] * Pi ;
702  }
703  if( _pTn[n] != 0 )
704  ++nbIndTemp ;
705  else
706  _nullPtnIndiv.push_back( n ) ;
707  }
708 
709  _nbOfRejected = nbInd - nbIndTemp ;
710 
711  cout << "nb d'individus pris en compte = " << nbIndTemp << " - nb d'individus rejetes = "
712  << nbInd - nbIndTemp << endl ;
713 
714  // moyenne et variance des pTn
715  double oldSumLnPTn = _logLikelihood ;
716  _logLikelihood = 0. ;
717  for( int n = 0 ; n < nbInd ; ++n ){
718  if( _pTn[n] != 0. )
719  _logLikelihood += log( _pTn[n] ) ;
720  else
721  _logLikelihood += -100000000. ;
722  sumPtn += _pTn[n] ;
723  prodPtn += _pTn[n] * _pTn[n] ;
724  }
725  meanPtn = sumPtn / nbIndTemp ;
726  varPtn = prodPtn / nbIndTemp ;
727  cout << endl << "CRITERIUM TO MAXIMIZE : LOG LIKELIHOOD = " << _logLikelihood << " and difference = "
728  << (_logLikelihood - oldSumLnPTn)/oldSumLnPTn << " for "
729  << nbInd - nbIndTemp << " rejections" << endl ;
730  cout << "moyenne et variance des p(tn) = " << meanPtn << " - " << varPtn << endl ;
731 
732  return (_logLikelihood - oldSumLnPTn)/oldSumLnPTn ;
733 }
734 
735 template <class T>
736 double
738 {
739  AimsData<double> mean( _individuals.dimY() ) ;
740  AimsData<double> invCi( _individuals.dimY(), _individuals.dimY() ) ;
741  AimsData<double> centeredIndivn( _individuals.dimY() ) ;
742  AimsData<double> dist(1, 1) ;
743  AimsData<double> Rn( _individuals.dimX() ) ;
744  AimsData<double> Rni( _individuals.dimX(), _nbOfClasses ) ;
745  AimsData<int> results( _individuals.dimX() ) ;
746  double max = 0., global_dist = 0. ;
747 
748  for( int c = 0 ; c < _nbOfClasses ; ++c ){
749  Rn = _elements[c].getRn() ;
750  for( int ind = 0 ; ind < _individuals.dimX() ; ++ind )
751  Rni(ind, c) = Rn[ind] ;
752  }
753 
754  for( int ind = 0 ; ind < _individuals.dimX() ; ++ind ){
755  max = Rni( ind, 0 ) ;
756  results[ind] = 1 ;
757  for( int c = 1 ; c < _nbOfClasses ; ++c ){
758  if( Rni(ind, c) > max ){
759  max = Rni( ind, c ) ;
760  results[ind] = c + 1 ;
761  }
762  }
763  mean = _elements[results[ind]-1].getMean() ;
764  invCi = _elements[results[ind]-1].getInvCi() ;
765  for( int t = 0 ; t < _individuals.dimY() ; ++t )
766  centeredIndivn[t] = _individuals(ind, t) - mean[t] ;
767  dist = ( centeredIndivn.clone().transpose() ).cross( invCi.cross( centeredIndivn ) ) ;
768  global_dist += dist(0,0) ;
769  }
770 
771  return global_dist ;
772 }
773 
774 
775 template <class T>
776 bool
777 MixtureOfPPCA<T>::classesVisualisation( int nbOfIterations, const string & fileOut, bool theEnd )
778 {
779  bool res = true ;
780 
781 // POUR TOUS LES INDIVIDUS
782  double max = 0. ;
783  AimsData<double> score( _individuals.dimX(), _nbOfClasses ) ;
784  AimsData<int> results( _individuals.dimX() ) ;
785  vector< list< int > > tempList( _nbOfClasses ) ;
786 
787  AimsData<double> Rni = getRni() ;
788 
789 /* for( int ind = 0 ; ind < _individuals.dimX() ; ++ind ){ */
790 /* sumRn = 0. ; */
791 /* for( int c = 0 ; c < _nbOfClasses ; ++c ) */
792 /* sumRn += Rni( ind, c ) ; */
793 /* for( int c = 0 ; c < _nbOfClasses ; ++c ) */
794 /* score( ind, c) = Rni(ind, c) / sumRn ; */
795 /* } */
796 
797 // CALCUL DES CLASSES FINALES
798  for( int ind = 0 ; ind < _individuals.dimX() ; ++ind ){
799  max = 0. ;
800  results[ind] = 0 ;
801  for( int c = 0 ; c < _nbOfClasses ; ++c ){
802  if( Rni(ind, c) > max ){
803  max = Rni( ind, c ) ;
804  results[ind] = c + 1 ;
805  }
806  }
807  if( results[ind] > 0 )
808  _finalClasses[results[ind] - 1].push_back( ind ) ;
809  }
810 
811 
812 // SAUVEGARDE DES CLASSES toutes les 10 it�rations
813 // AimsData< short > resultImage( data->dimX(), data->dimY(), data->dimZ() ) ; // data inconnu ici
814 // resultImage.setSizeXYZT( data->sizeX(), data->sizeY(), data->sizeZ() ) ;
815  if( nbOfIterations% 30 == 0 || theEnd ){
816  AimsData< short > resultImage( 128, 128, 111 ) ;
817  AimsData< short > rejected( 128, 128, 111 ) ;
818  AimsData< float > rniImage( 128, 128, 111, _nbOfClasses ) ;
819  rejected.setSizeXYZT( 0.858086, 0.858086, 2.425 ) ;
820  rejected = 0 ;
821  rniImage.setSizeXYZT( 0.858086, 0.858086, 2.425 ) ;
822  rniImage = 0. ;
823  resultImage.setSizeXYZT( 0.858086, 0.858086, 2.425 ) ;
824  resultImage = 0 ;
825  Point3d theVoxel ;
826  // recuperer le x,y,z du voxel dont la valeur est *iter
827  for( short c = 0 ; c < _nbOfClasses ; ++c ){
828  list<int>::const_iterator iter( _finalClasses[c].begin() ),
829  last( _finalClasses[c].end() ) ;
830  while( iter != last ){
831  theVoxel = _indPosVector[*iter] ;
832  resultImage( theVoxel ) = c + 1 ;
833  rniImage(theVoxel[0], theVoxel[1], theVoxel[2], c) = Rni( *iter, c ) ;
834  ++iter ;
835  }
836  }
837 
838  list<int>::const_iterator it( _nullPtnIndiv.begin() ),
839  lt( _nullPtnIndiv.end() ) ;
840  while( it != lt ){
841  theVoxel = _indPosVector[*it] ;
842  rejected( theVoxel ) = 1 ;
843  ++it ;
844  }
845 
846  string iter_nb = carto::toString( nbOfIterations ) ;
847  Writer< AimsData< short > > writer( "iter" + iter_nb + "_classes_" + fileOut ) ;
848  writer.write( resultImage ) ;
849 
850  Writer< AimsData< short > > writer3( "iter" + iter_nb + "_rejected__" + fileOut ) ;
851  writer3.write( rejected ) ;
852 
853  Writer< AimsData< float > > writer2( "iter" + iter_nb + "_rni.ima" ) ;
854  writer2.write( rniImage ) ;
855  }
856 
857  cout << endl << "Nb d'individus a l'iteration " << nbOfIterations << " ds chaque classe = " ;
858  for( int i = 0 ; i < _nbOfClasses ; ++i )
859  cout << _finalClasses[i].size() << " " ;
860  cout << endl ;
861 
862  // A LA DERNIERE ITERATION
863  if( theEnd ){
864 
865  // sauvegarder la matrice des individus _individuals (AimsData) et les classes finales (1 bucketMap)
866  Writer< AimsData<T> > writerMatrix( "indivMatrix" ) ;
867  Finder f ;
868  string fformat = f.format() ;
869  if( !writerMatrix.write( _individuals, false, &fformat ) )
870  return( false ) ;
871 
872 // AimsBucket< int > bClasses ;
873  AimsBucket< short > bClasses ;
874  int count ;
875 
876  for( int c = 0 ; c < _nbOfClasses ; ++c ){
877  count = 0 ;
878  list<int>::const_iterator iter( _finalClasses[c].begin() ),
879  last( _finalClasses[c].end() ) ;
880  while( iter != last ){
881 // AimsBucketItem< int > item ;
883  item.location() = Point3d( count, count, count ) ;
884  item.value() = *iter ;
885  bClasses[c].push_back( item ) ;
886  ++iter ;
887  ++count ;
888  }
889  }
890 // Writer< AimsBucket< int > > writerClasses( fileOut + "_finalClasses" ) ;
891  Writer< AimsBucket< short > > writerClasses( fileOut + "_finalClasses" ) ;
892  writerClasses.write( bClasses, true, &fformat ) ;
893  bClasses.clear() ;
894 
895  AimsData<double> classCorrelationMatrix( _nbOfClasses, _nbOfClasses ) ;
896  vector<double> classesNorm( _nbOfClasses, 0. ) ;
897  multimap<double, Point2d> fusion ;
898  multimap<double, int> split ;
899 
900  for( int i = 0 ; i < _nbOfClasses ; ++i )
901  for( int n = 0 ; n < _individuals.dimX() ; ++n ) {
902  for( int j = i ; j < _nbOfClasses ; ++j )
903  classCorrelationMatrix(i, j) += Rni( n, i ) * Rni(n, j ) ;
904 
905  classesNorm[i] += Rni( n, i ) * Rni( n, i ) ;
906  }
907 
908  for( int i = 0 ; i < _nbOfClasses ; ++i ){
909  classesNorm[i] = sqrt( classesNorm[i] ) ;
910  double meanNorm = 0. ;
911  for( int t = 0 ; t < _elements[i].getMean().dimX() ; ++t )
912  meanNorm += _elements[i].getMean()[t] ;
913 
914  split.insert( pair<double, int>( _elements[i].getSigma2() / meanNorm, i) ) ;
915  }
916  for( int i = 0 ; i < _nbOfClasses ; ++i )
917  for( int j = i ; j < _nbOfClasses ; ++j ){
918  if ( classesNorm[i] * classesNorm[j] > 0 )
919  classCorrelationMatrix(i, j) /= classesNorm[i] * classesNorm[j] ;
920  else
921  classCorrelationMatrix(i, j) = 0. ;
922 
923  // To get a cost, we take the opposite of correlation matrix
924  classCorrelationMatrix(j, i) = classCorrelationMatrix(i, j) ;
925  if( i != j )
926  fusion.insert( pair<double, Point2d> ( classCorrelationMatrix(i, j), Point2d(i, j) ) ) ;
927  }
928 
929  cout << "Classes � fusionner : " << endl ;
930 
931  count = 0 ;
932  for( multimap<double, Point2d>::reverse_iterator rit = fusion.rbegin() ; rit != fusion.rend() && count < 5 ; ++rit, ++count )
933  cout << (rit->second)[0] << " avec " << (rit->second)[1] << " : " << rit->first << endl ;
934 
935  cout << "Classe � s�parer : " << endl ;
936  count = 0 ;
937  for( multimap<double, int>::reverse_iterator rit = split.rbegin() ; rit != split.rend() && count < 5 ; ++rit, ++count )
938  cout << rit->second << " : " << rit->first << endl ;
939  }
940  return res ;
941 }
942 
943 
944 template <class T>
945 bool
947 {
948  bool res = true ;
949  int count ;
950  double sumDist ;
951  AimsData<double> meanDistMatrix( _nbOfClasses, _nbOfClasses ) ;
952  multimap< float, Point2d > fScore ;
953  multimap< float, Point2d > meanFScore ;
954 
955  for( int fromC = 0 ; fromC < _nbOfClasses ; ++fromC ){
956  for( int toC = 0 ; toC < _nbOfClasses ; ++toC ){
957  count = 0 ;
958  sumDist = 0. ;
959  list<int>::const_iterator iter( _finalClasses[fromC].begin() ),
960  last( _finalClasses[fromC].end() ) ;
961  while( iter != last ){
962  sumDist += _distToClasses( *iter, toC ) ;
963  ++count ;
964  ++iter ;
965  }
966  meanDistMatrix( fromC, toC ) = sumDist / count ;
967  }
968  }
969 /* cout << endl << "matrice des distances moyennes inter classes = " */
970 /* << endl << meanDistMatrix << endl ; */
971 
972  // remplissage de la map de fusion fScore
973  for( int i = 0 ; i < _nbOfClasses ; ++i )
974  for( int j = 0 ; j < _nbOfClasses ; ++j )
975  fScore.insert( pair< float, Point2d >( meanDistMatrix(i, j), Point2d(i, j) ) ) ;
976 
977  // remplissage de la map meanFScore
978  for( int i = 0 ; i < _nbOfClasses ; ++i )
979  for( int j = i+1 ; j < _nbOfClasses ; ++j )
980  meanFScore.insert( pair< float, Point2d >( ( meanDistMatrix(i, j) + meanDistMatrix(j, i) )/2.,
981  Point2d(i, j) ) ) ;
982 
983  cout << endl << "elements de la map des scores = " << endl;
984  multimap< float, Point2d >::iterator iter( fScore.begin() ), last( fScore.end() ) ;
985  while( iter != last ){
986  if( (*iter).first > 1.5 && (*iter).first < 100. ){
987  cout << " [" << (*iter).first << ", " << (*iter).second << "]" ;
988  cout << " sigmas = " << _sigma2Matrix[ (*iter).second[0] ] << " et "
989  << _sigma2Matrix[ (*iter).second[1] ] << endl;
990  }
991  ++iter ;
992  }
993 
994  cout << endl << "elements de la map des scores moyens 2 a 2 = " << endl;
995  multimap< float, Point2d >::iterator iterM( meanFScore.begin() ), lastM( meanFScore.end() ) ;
996  while( iterM != lastM ){
997  if( exp( -0.5 * (*iterM).first ) > 0.01 ){
998  cout << " [" << (*iterM).first << ", " << (*iterM).second << "]" ;
999  cout << " exp(-score/2) = " << exp( -0.5 * (*iterM).first ) << endl ;
1000  }
1001  ++iterM ;
1002  }
1003 
1004 
1005  return res;
1006 }
1007 
1008 
1009 template <class T>
1010 MixtureOfPPCA<T>::MixtureOfPPCA( int nbOfClasses, int significantNumberOfVp,
1011  int maxNbOfIterations,
1012  const AimsData<T>& individuals, const std::vector< Point3d > indPosVector,
1013  const std::vector< std::list <int> >& initialClasses,
1014  const std::string & fileOut, int runNb, int iterationToUseOnlyCorrelatedIndiv ) :
1015  _nbOfClasses( nbOfClasses ), _valid( true ), _significantNumberOfEigenValues( significantNumberOfVp ), _maxNbOfIterations( maxNbOfIterations ), _individuals( individuals ), _indPosVector( indPosVector ), _fileOut( fileOut ), _runNb( runNb ), _itToUseOnlyCorrelatedIndiv(iterationToUseOnlyCorrelatedIndiv)
1016 {
1017  cout << "Entering MixtureOfPPCA Constructor" << endl ;
1018 
1019  double Pi_init = 1. / (double)_nbOfClasses ;
1020  _elements.reserve( _nbOfClasses ) ;
1021  _pTn = AimsData<double>( _individuals.dimX() ) ;
1022  _pTn = 1 ;
1023  _noiseRef = 1. ;
1024  _sigma2init = vector<double>( _nbOfClasses ) ;
1025 
1026  cout << endl ;
1027  std::vector< std::list<int> > initClasses( initialClasses ) ;
1028 
1029  if( initClasses.size() == 0 ){
1030  initClasses = std::vector< std::list<int> >( _nbOfClasses ) ;
1031  for( int i = 0 ; i < _individuals.dimX() ; ++i ){
1032  double aux = UniformRandom( 0., 0.99999 ) ;
1033  int c = int( aux * _nbOfClasses ) ;
1034  initClasses[c].push_back( i ) ;
1035  }
1036  std::cout << std::endl ;
1037  }
1038 
1039  for( int c = 0 ; c < _nbOfClasses ; ++c ){
1040  std::cout << "Class " << c << " : ";
1041  for( std::list<int>::iterator iter = initClasses[c].begin() ; iter < initClasses[c].end() ; ++iter )
1042  std::cout << "\t" << (*iter) ;
1043  std::cout << std::endl ;
1044  }
1045 
1046  for( int c = 0 ; c < _nbOfClasses ; ++c ){
1047  if( int( initClasses[c].size() ) > _individuals.dimY() ) {
1048  PpcaAnalyserElement el( _significantNumberOfEigenValues ) ;
1049  cout << endl << "Initialization class " << c << " ..." ;
1050 
1051 /* cout << endl << "numeros des indivs de la classe " << c << " : " ; */
1052 /* count = 0 ; */
1053 /* list<int>::const_iterator iter( initClasses[c].begin() ), */
1054 /* last( initClasses[c].end() ) ; */
1055 /* while( iter != last && count < 10 ){ */
1056 /* cout << *iter << " " ; */
1057 /* ++count ; */
1058 /* ++iter ; */
1059 /* } */
1060 
1061  // AJOUT: calcul d'un facteur noiseRef = bruit de la classe 0 pour normaliser normFactor
1062  if( c == 0 ){
1063  el.init( initClasses[c], Pi_init, individuals, _noiseRef ) ;
1064  _noiseRef = el.getSigma2() ;
1065  cout << "Noise Ref = " << setprecision(3) << _noiseRef << endl ;
1066  }
1067  el.init( initClasses[c], Pi_init, individuals, _noiseRef ) ;
1068  cout << "Class " << c << " initialized !" << endl ;
1069  _elements.push_back( el ) ;
1070  }
1071  else
1072  cerr << "Not enough data for element " << c << endl ;
1073  }
1074 
1075 }
1076 
1077 
1078 template <class T>
1079 bool
1080 //const vector< vector< list< int > > >&
1082 {
1083  int nbOfIterations = 0 ;
1084  bool res ;
1085 // AimsData<double> distToClass( _individuals.dimX() ) ;
1086 // _distToClasses = AimsData<double>( _individuals.dimX(), _nbOfClasses ) ;
1087  _finalClasses = vector< list< int > >( _nbOfClasses ) ;
1088  _sigma2Matrix = AimsData<double>( _nbOfClasses ) ;
1089  double sumOfEnergies, criteria ;
1090 
1091  AimsData<double> imageOfMean( _nbOfClasses, _maxNbOfIterations, _individuals.dimY() ) ;
1092  AimsData<double> imageOfSigma2( _nbOfClasses, _maxNbOfIterations ) ;
1093  AimsData<double> imageOfPi( _nbOfClasses, _maxNbOfIterations ) ;
1094  AimsData<double> imageOfRniDiff( _nbOfClasses, _maxNbOfIterations ) ;
1095  AimsData<double> imageOfEnergy( _maxNbOfIterations ) ;
1096  AimsData<double> imageOfLogLikelihood( _maxNbOfIterations ) ;
1097  AimsData<double> imageOfLogLikelihoodProgression( _maxNbOfIterations ) ;
1098 
1099  _nbOfRejected = 0 ;
1100  int oldNbOfRejected = 0 ;
1101  bool notYetTheEnd = true ;
1102 
1103  do{
1104  for( int i = 0 ; i < _nbOfClasses ; ++i )
1105  _finalClasses[i].clear() ;
1106  cout << endl << "Iteration n�: " << nbOfIterations /*<< endl*/ ;
1107 
1108  // 1ERE PARTIE
1109  cout << endl << "Calcul des p(tn|i) pour chaque classe... " << endl ;
1110  for( int i = 0 ; i < _nbOfClasses ; ++i ){
1111  cerr << "Class " << i << ": " ;
1112  if( _elements[i].isValid() )
1113  _elements[i].newStep1( _individuals, 0 /*(_itToUseOnlyCorrelatedIndiv < 0 ? -1 : nbOfIterations > _itToUseOnlyCorrelatedIndiv )*/ ) ;
1114  }
1115 
1116  oldNbOfRejected = _nbOfRejected ;
1117 
1118  // INTERMEDIAIRE - calcul de p(tn) = sum( p(tn/i)*p(i) )
1119  cout << endl << "Calcul des p(tn)..." ;
1120  imageOfLogLikelihoodProgression(nbOfIterations) = pTnComputation() ;
1121  imageOfLogLikelihood(nbOfIterations) = _logLikelihood ;
1122 
1123  // 2EME PARTIE
1124  cout << endl << "Calcul des autres parametres pour chaque classe..." ;
1125 
1126  //_noiseRef = _elements[0].newStep2( _pTn, _individuals, 1. ) ;
1127  criteria = 0. ;
1128  double oldSumOfEnergies = sumOfEnergies ;
1129  sumOfEnergies = 0. ;
1130  for( int i = 0 ; i < _nbOfClasses ; ++i ){
1131  cout << "Class " << i << ": " ;
1132  if( _elements[i].isValid() ){
1133  _elements[i].newStep2( _pTn, _individuals, /*_noiseRef*/1000000. ) ;
1134  //_sigma2Matrix[i] = _elements[i].getSigma2() ;
1135  //cout << "ok !" << endl ;
1136  sumOfEnergies += _elements[i].getEnergy() ;
1137  criteria += _elements[i].getSumDiff2Rni() ;
1138 
1139  //------ AJOUT 12/11 pour visualiser les param�tres sous forme d'AimsData -----
1140  for( int t = 0 ; t < _individuals.dimY() ; ++t )
1141  imageOfMean( i, nbOfIterations, t ) = ( _elements[i].getMean() )[t] ;
1142  imageOfSigma2( i, nbOfIterations ) = _elements[i].getSigma2() ;
1143  imageOfPi( i, nbOfIterations ) = _elements[i].getPi() ;
1144  imageOfRniDiff( i, nbOfIterations ) = _elements[i].getSumDiff2Rni() ;
1145  }
1146  }
1147 
1148  cout << endl << "Critere d'arret = " << criteria << " or " << (oldSumOfEnergies - sumOfEnergies)/_elements.size() << endl ;
1149  imageOfEnergy( nbOfIterations ) = sumOfEnergies ;
1150 
1151 /* Writer < AimsData<double> > wr( "Mean3D.ima" ) ; */
1152 /* wr.write( imageOfMean ) ; */
1153 /* Writer < AimsData<double> > wr2( "Sigma2D.ima" ) ; */
1154 /* wr2.write( imageOfSigma2 ) ; */
1155 /* Writer < AimsData<double> > wr3( "Pi2D.ima" ) ; */
1156 /* wr3.write( imageOfPi ) ; */
1157 /* Writer < AimsData<double> > wr4( "RniDiff2D.ima" ) ; */
1158 /* wr4.write( imageOfRniDiff ) ; */
1159 /* Writer < AimsData<double> > wr5( "Energy1D.ima" ) ; */
1160 /* wr5.write( imageOfEnergy ) ; */
1161 /* Writer < AimsData<double> > wr6( "logLikelihood.ima" ) ; */
1162 /* wr6.write( imageOfLogLikelihood ) ; */
1163 /* Writer < AimsData<double> > wr7( "logLikelihoodProgression.ima" ) ; */
1164 /* wr7.write( imageOfLogLikelihoodProgression ) ; */
1165 
1166  notYetTheEnd = (nbOfIterations < _maxNbOfIterations) &&
1167  ( (nbOfIterations < 10 ) ||
1168  !( (oldNbOfRejected == _nbOfRejected) && (fabs( imageOfLogLikelihoodProgression(nbOfIterations) ) < 0.0000001) ) ) ;
1169 
1170  // fonction pour calculer et visualiser les classes
1171  res = classesVisualisation( nbOfIterations, _fileOut, !notYetTheEnd ) ;
1172 
1173  ++nbOfIterations ;
1174  } while( notYetTheEnd ) ;
1175 
1176  // matrice des distances de chaque individu � chaque classe
1177  /* for( int i = 0 ; i < _nbOfClasses ; ++i ){ */
1178  /* distToClass = _elements[i].getDist() ; */
1179  /* for( int n = 0 ; n < distToClass.dimX() ; ++n ){ */
1180  /* // distToClass d�j� au carr� - division par la dimension */
1181  /* _distToClasses(n, i) = distToClass[n] / _individuals.dimY() ; */
1182  /* } */
1183  /* } */
1184 
1185  // _valid = distMatrixComputation() ;
1186 
1187  // return _finalClasses ;
1188  // return _nullPtnIndiv ;
1189  return _valid ;
1190 }
1191 
1192 
1193 
1194 template <class T>
1197 {
1198  AimsData<double> Rn( _individuals.dimX() ) ;
1199  AimsData<double> RnAll( _individuals.dimX(), _nbOfClasses ) ;
1200  for( int c = 0 ; c < _nbOfClasses ; ++c ){
1201  Rn = _elements[c].getRn() ;
1202  for( int ind = 0 ; ind < _individuals.dimX() ; ++ind )
1203  RnAll(ind, c) = Rn[ind] ;
1204  }
1205 
1206  return RnAll ;
1207 }
1208 
1209 }
1210 
1211 #endif
const AimsData< double > & getMean()
Definition: mixtureOfPpca.h:74
AimsData< T > & transpose()
float max(float x, float y)
Definition: thickness.h:97
void sort(AimsData< T > &, AimsData< T > &, AimsData< T > *v=NULL)
sort the U and V matrices and the W vector in decreasing order
std::string format() const
AimsData< T > doit(AimsData< T > &, AimsData< T > *v=NULL)
Singular Value Decomposition.
double newStep2(AimsData< double > pTn, const AimsData< T > &indivMatrix, double noiseRef=1.)
#define ForEach2d(thing, x, y)
int dimY() const
void push_back(const AimsBucketItem< T > &item)
void setReturnType(SVDReturnType rt)
Definition: svd.h:73
Point3df cross(const Point3df &A, const Point3df &B)
Definition: projection.h:234
void init(const std::list< int > &selectedIndividuals, double initialPi, const AimsData< T > &individuals, double noiseRef=1.)
bool newStep1(const AimsData< T > &indivMatrix, bool useOnlyCorrIndiv)
double UniformRandom()
Uniform distribution between 0.0 and 1.0.
const AimsVector< short, 3 > & location() const
AimsData< double > getRni()
const std::vector< double > & mean() const
PpcaAnalyserElement(int significantNumberOfVp, bool useOnlyCorrIndiv=false)
virtual bool write(const T &obj, bool ascii=false, const std::string *format=0)
AimsData< T > clone() const
const T & value() const
bool classesVisualisation(int nbOfIterations, const std::string &fileOut, bool theEnd)
double getSigma2() const
Definition: mixtureOfPpca.h:68
AimsData< T > cross(const AimsData< T > &other)
std::vector< std::string > split(const std::string &text, const std::string &sep)
Definition: svd.h:55
void setSizeXYZT(float sizex=1.0f, float sizey=1.0f, float sizez=1.0f, float sizet=1.0f)
std::string toString(const T &object)
MixtureOfPPCA(int nbOfClasses, int significantNumberOfVp, int maxNbOfIterations, const AimsData< T > &individuals, const std::vector< Point3d > indPosVector, const std::vector< std::list< int > > &initialClasses, const std::string &fileOut, int runNb, int iterationToUseOnlyCorrelatedIndiv=false)
T maximum() const
int dimX() const