// // Example of a program to fit non-equidistant data points // ======================================================= // // The fitting function fcn is a simple chisquare function // The data consists of 5 data points (arrays x,y,z) + the errors in errorsz // More details on the various functions or parameters for these functions // can be obtained in an interactive ROOT session with: // Root > TMinuit *minuit = new TMinuit(10); // Root > minuit->mnhelp("*") to see the list of possible keywords // Root > minuit->mnhelp("SET") explains most parameters // const Int_t NYEARS=57; // numero di anni const Int_t NSTEPS=100; // numero di passi teorici da calcolare per ogni anno Int_t year[NYEARS]; Double_t z1[NYEARS],z2[NYEARS],errorz1[NYEARS],errorz2[NYEARS]; Double_t teo1[NYEARS*NSTEPS],teo2[NYEARS*NSTEPS]; Double_t teo1_y[NYEARS],teo2_y[NYEARS]; //______________________________________________________________________________ void fcn(Int_t &npar, Double_t *gin, Double_t &f, Double_t *par, Int_t iflag) { Int_t i,j,k; /////////////////////////////////////////////////////////////// // abbiamo il vettore par[]; usiamolo per eseguire il calcolo teorico con questi parametri; in uscita avremo il vettore su cui confrontare i dati teo1[0]=z1[0]; teo2[0]=z2[0]; Double_t alfa=0.7; Double_t gamma=0.5; for (j=1;jtemp) { temp=v[i]; } } return temp; } Double_t minim(Double_t v[], Int_t nlines) { Double_t temp=0; for (Int_t i=0; i30) { return 200000; } else { return TMath::Exp(x); } } Double_t func(Double_t a, Double_t b, Double_t c, Int_t j, Double_t v[]) { /* Questa e' la funzione da usare per l'equazione integrale. */ /* Qui "a" e' il logaritmo del valore INIZIALE della funzione. */ Double_t temp = a+b*(c*j-integ(v,j)); return exp_safe(temp); //quando l'argomento e' troppo negativo al posto dell'esponenziale impone zero } Double_t func2(Double_t a, Double_t b, Double_t c, Double_t d) { /* Questa e' la funzione da usare per l'equazione differenziale. */ /* Qui "a" e' il logaritmo del valore PRECEDENTE della funzione stessa, "d" il valore precedente dell'altra funzione. */ Double_t temp=a+b*(c-d); return exp_safe(temp); //quando l'argomento e' troppo negativo al posto dell'esponenziale impone zero } //______________________________________________________________________________ void fitnew() { ////////////////////////////////////////////////////////// // Leggiamo i valori sperimentali ////////////////////////////////////////////////////////// ifstream in; in.open("canada.txt"); //canada.txt e' il file di dati Int_t nlines = 0; while (1) { in >> year[nlines] >> z1[nlines] >> z2[nlines]; if (!in.good()) break; nlines++; } // The errors on z values for (Int_t i=0;iSetFCN(fcn); Double_t arglist[10]; Int_t ierflg = 0; arglist[0] = 1; gMinuit->mnexcm("SET ERR", arglist ,1,ierflg); //serve a dire come calcolare l'errore sui parametri arglist[0] = 2; gMinuit->mnexcm("SET STR", arglist ,1,ierflg); //serve a dire di usare il maggior numero di chiamate possibile // Set starting values and step sizes for parameters static Double_t vstart[2] = {0.000017 , 0.72 }; static Double_t step[2]; for (Int_t i=0; i<2; i++) { step[i]=0.01*vstart[i]; } gMinuit->mnparm(0, "beta", vstart[0], step[0], 0,0,ierflg); gMinuit->mnparm(1, "epsilon", vstart[1], step[1], 0,0,ierflg); /* arglist[0]=1; // par. n. arglist[1]=0.; // limite inf. arglist[2]=0.5; //limite sup. gMinuit->mnexcm("SET LIM", arglist ,3,ierflg); arglist[0]=2; // par. n. arglist[1]=0.; // limite inf. arglist[2]=1.; //limite sup. gMinuit->mnexcm("SET LIM", arglist ,3,ierflg); arglist[0]=3; // par. n. arglist[1]=0.; // limite inf. arglist[2]=1.; //limite sup. gMinuit->mnexcm("SET LIM", arglist ,3,ierflg); arglist[0]=4; // par. n. arglist[1]=0.; // limite inf. arglist[2]=1.; //limite sup. gMinuit->mnexcm("SET LIM", arglist ,3,ierflg); */ // Now ready for minimization step arglist[0] = 1000; // numero max di tentativi arglist[1] = 10.; // tolleranza // gMinuit->mnexcm("MIGRAD", arglist ,2,ierflg); gMinuit->mnexcm("MINIMIZE", arglist ,2,ierflg); // gMinuit->mnexcm("SEEK", arglist ,2,ierflg); //gMinuit->mnexcm("SCAN", arglist ,2,ierflg); /* Da http://wwwasdoc.web.cern.ch/wwwasdoc/minuit/node20.html : MIGRAD This is the best minimizer for nearly all functions. It is a variable-metric method with inexact line search, a stable metric updating scheme, and checks for positive-definiteness. It will run faster if you SET STRATEGY 0 and will be more reliable if you SET STRATEGY 2 (although the latter option may not help much). Its main weakness is that it depends heavily on knowledge of the first derivatives, and fails miserably if they are very inaccurate. If first derivatives are a problem, they can be calculated analytically inside FCN (see elsewhere in this writeup) or if this is not feasible, the user can try to improve the accuracy of Minuit's numerical approximation by adjusting values using the SET EPS and/or SET STRATEGY commands (see Floating Point Precision and SET STRATEGY). MINIMIZE This is equivalent to MIGRAD, except that if MIGRAD fails, it reverts to SIMPLEX and then calls MIGRAD again. This is what the old MIGRAD command used to do, but it was removed from the MIGRAD command so that users would have a choice, and because it is seldom of any use to call SIMPLEX when MIGRAD has failed (there are of course exceptions). SCAN This is not intended to minimize, and just scans the function, one parameter at a time. It does however retain the best value after each scan, so it does some sort of highly primitive minimization. SEEK We have retained this Monte Carlo search mainly for sentimental reasons, even though the limited experience with it is less than spectacular. The method now incorporates a Metropolis algorithm which always moves the search region to be centred at a new minimum, and has probability e(- F/Fmin) of moving the search region to a higher point with function value F. This gives it the theoretical ability to jump through function barriers like a multidimensional quantum mechanical tunneler in search of isolated minima, but it is widely believed by at least half of the authors of Minuit that this is unlikely to work in practice (counterexamples are welcome) since it seems to depend critically on choosing the right average step size for the random jumps, and if you knew that, you wouldn't need Minuit. SIMPLEX This genuine multidimensional minimization routine is usually much slower than MIGRAD, but it does not use first derivatives, so it should not be so sensitive to the precision of the FCN calculations, and is even rather robust with respect to gross fluctuations in the function value. However, it gives no reliable information about parameter errors, no information whatsoever about parameter correlations, and worst of all cannot be expected to converge accurately to the minimum in a finite time. Its estimate of EDM is largely fantasy, so it would not even know if it did converge. */ // Print results Double_t amin,edm,errdef; Int_t nvpar,nparx,icstat; gMinuit->mnstat(amin,edm,errdef,nvpar,nparx,icstat); gMinuit->mnprin(3,amin); cout << "Chi quadro del minimo: " << amin << endl; // cout << "edm: " << edm << endl; }