1 |
diff -Naur orig.root/hist/hist/inc/TEfficiency.h root/hist/hist/inc/TEfficiency.h
|
2 |
--- orig.root/hist/hist/inc/TEfficiency.h 2010-11-05 15:46:35.000000000 +0100
|
3 |
+++ root/hist/hist/inc/TEfficiency.h 2011-02-09 13:32:06.000000000 +0100
|
4 |
@@ -3,6 +3,7 @@
|
5 |
|
6 |
//standard header
|
7 |
#include <vector>
|
8 |
+#include <utility>
|
9 |
|
10 |
//ROOT header
|
11 |
#ifndef ROOT_TNamed
|
12 |
@@ -25,6 +26,7 @@
|
13 |
class TF1;
|
14 |
class TGraphAsymmErrors;
|
15 |
class TH1;
|
16 |
+class TH2;
|
17 |
class TList;
|
18 |
|
19 |
//|TEfficiency
|
20 |
@@ -35,11 +37,12 @@
|
21 |
public:
|
22 |
//enumaration type for different statistic options for calculating confidence intervals
|
23 |
//kF* ... frequentist methods; kB* ... bayesian methods
|
24 |
- enum {
|
25 |
- kFCP, //Clopper-Pearson interval (recommended by PDG)
|
26 |
- kFNormal, //normal approximation
|
27 |
- kFWilson, //Wilson interval
|
28 |
+ enum EStatOption {
|
29 |
+ kFCP = 0, //Clopper-Pearson interval (recommended by PDG)
|
30 |
+ kFNormal, //normal approximation
|
31 |
+ kFWilson, //Wilson interval
|
32 |
kFAC, //Agresti-Coull interval
|
33 |
+ kFFC, //Feldman-Cousins interval
|
34 |
kBJeffrey, //Jeffrey interval (Prior ~ Beta(0.5,0.5)
|
35 |
kBUniform, //Prior ~ Uniform = Beta(1,1)
|
36 |
kBBayesian //user specified Prior ~ Beta(fBeta_alpha,fBeta_beta)
|
37 |
@@ -47,21 +50,26 @@
|
38 |
|
39 |
protected:
|
40 |
|
41 |
- Double_t fBeta_alpha; //parameter for prior beta distribution (default = 1)
|
42 |
- Double_t fBeta_beta; //parameter for prior beta distribution (default = 1)
|
43 |
+ Double_t fBeta_alpha; //global parameter for prior beta distribution (default = 1)
|
44 |
+ Double_t fBeta_beta; //global parameter for prior beta distribution (default = 1)
|
45 |
+ std::vector<std::pair<Double_t, Double_t> > fBeta_bin_params; // parameter for prior beta distribution different bin by bin
|
46 |
+ // (default vector is empty)
|
47 |
Double_t (*fBoundary)(Int_t,Int_t,Double_t,Bool_t); //!pointer to a method calculating the boundaries of confidence intervals
|
48 |
Double_t fConfLevel; //confidence level (default = 0.95)
|
49 |
TDirectory* fDirectory; //!pointer to directory holding this TEfficiency object
|
50 |
TList* fFunctions; //->pointer to list of functions
|
51 |
TGraphAsymmErrors* fPaintGraph; //!temporary graph for painting
|
52 |
- TH1* fPaintHisto; //!temporary histogram for painting
|
53 |
+ TH2* fPaintHisto; //!temporary histogram for painting
|
54 |
TH1* fPassedHistogram; //histogram for events which passed certain criteria
|
55 |
- Int_t fStatisticOption; //defines how the confidence intervals are determined
|
56 |
+ EStatOption fStatisticOption; //defines how the confidence intervals are determined
|
57 |
TH1* fTotalHistogram; //histogram for total number of events
|
58 |
Double_t fWeight; //weight for all events (default = 1)
|
59 |
|
60 |
enum{
|
61 |
- kIsBayesian = BIT(14) //bayesian statistics are used
|
62 |
+ kIsBayesian = BIT(14), //bayesian statistics are used
|
63 |
+ kPosteriorMode = BIT(15), //use posterior mean for best estimate (Bayesian statistics)
|
64 |
+ kShortestInterval = BIT(16), // use shortest interval
|
65 |
+ kUseBinPrior = BIT(17) // use a different prior for each bin
|
66 |
};
|
67 |
|
68 |
void Build(const char* name,const char* title);
|
69 |
@@ -88,12 +96,15 @@
|
70 |
~TEfficiency();
|
71 |
|
72 |
void Add(const TEfficiency& rEff) {*this += rEff;}
|
73 |
- void Draw(const Option_t* opt="");
|
74 |
+ virtual Int_t DistancetoPrimitive(Int_t px, Int_t py);
|
75 |
+ void Draw(Option_t* opt = "");
|
76 |
+ virtual void ExecuteEvent(Int_t event, Int_t px, Int_t py);
|
77 |
void Fill(Bool_t bPassed,Double_t x,Double_t y=0,Double_t z=0);
|
78 |
Int_t FindFixBin(Double_t x,Double_t y=0,Double_t z=0) const;
|
79 |
Int_t Fit(TF1* f1,Option_t* opt="");
|
80 |
- Double_t GetBetaAlpha() const {return fBeta_alpha;}
|
81 |
- Double_t GetBetaBeta() const {return fBeta_beta;}
|
82 |
+ // use trick of -1 to return global parameters
|
83 |
+ Double_t GetBetaAlpha(Int_t bin = -1) const {return (fBeta_bin_params.size() > (UInt_t)bin) ? fBeta_bin_params[bin].first : fBeta_alpha;}
|
84 |
+ Double_t GetBetaBeta(Int_t bin = -1) const {return (fBeta_bin_params.size() > (UInt_t)bin) ? fBeta_bin_params[bin].second : fBeta_beta;}
|
85 |
Double_t GetConfidenceLevel() const {return fConfLevel;}
|
86 |
TH1* GetCopyPassedHisto() const;
|
87 |
TH1* GetCopyTotalHisto() const;
|
88 |
@@ -103,44 +114,63 @@
|
89 |
Double_t GetEfficiencyErrorLow(Int_t bin) const;
|
90 |
Double_t GetEfficiencyErrorUp(Int_t bin) const;
|
91 |
Int_t GetGlobalBin(Int_t binx,Int_t biny=0,Int_t binz=0) const;
|
92 |
+ TGraphAsymmErrors* GetPaintedGraph() const { return fPaintGraph; }
|
93 |
+ TH2* GetPaintedHistogram() const { return fPaintHisto; }
|
94 |
TList* GetListOfFunctions() const {return fFunctions;}
|
95 |
const TH1* GetPassedHistogram() const {return fPassedHistogram;}
|
96 |
- Int_t GetStatisticOption() const {return fStatisticOption;}
|
97 |
+ EStatOption GetStatisticOption() const {return fStatisticOption;}
|
98 |
const TH1* GetTotalHistogram() const {return fTotalHistogram;}
|
99 |
Double_t GetWeight() const {return fWeight;}
|
100 |
void Merge(TCollection* list);
|
101 |
TEfficiency& operator+=(const TEfficiency& rhs);
|
102 |
TEfficiency& operator=(const TEfficiency& rhs);
|
103 |
- void Paint(const Option_t* opt);
|
104 |
+ void Paint(Option_t* opt);
|
105 |
void SavePrimitive(ostream& out,Option_t* opt="");
|
106 |
void SetBetaAlpha(Double_t alpha);
|
107 |
- void SetBetaBeta(Double_t beta);
|
108 |
+ void SetBetaBeta(Double_t beta);
|
109 |
+ void SetBetaBinParameters(Int_t bin, Double_t alpha, Double_t beta);
|
110 |
void SetConfidenceLevel(Double_t level);
|
111 |
void SetDirectory(TDirectory* dir);
|
112 |
void SetName(const char* name);
|
113 |
Bool_t SetPassedEvents(Int_t bin,Int_t events);
|
114 |
Bool_t SetPassedHistogram(const TH1& rPassed,Option_t* opt);
|
115 |
- void SetStatisticOption(Int_t option);
|
116 |
+ void SetPosteriorMode(Bool_t on = true) { SetBit(kPosteriorMode,on); if(on) SetShortestInterval(); }
|
117 |
+ void SetPosteriorAverage(Bool_t on = true) { SetBit(kPosteriorMode,!on); }
|
118 |
+ void SetShortestInterval(Bool_t on = true) { SetBit(kShortestInterval,on); }
|
119 |
+ void SetCentralInterval(Bool_t on = true) { SetBit(kShortestInterval,!on); }
|
120 |
+ void SetStatisticOption(EStatOption option);
|
121 |
void SetTitle(const char* title);
|
122 |
Bool_t SetTotalEvents(Int_t bin,Int_t events);
|
123 |
Bool_t SetTotalHistogram(const TH1& rTotal,Option_t* opt);
|
124 |
void SetWeight(Double_t weight);
|
125 |
Bool_t UsesBayesianStat() const {return TestBit(kIsBayesian);}
|
126 |
+ Bool_t UsesPosteriorMode() const {return TestBit(kPosteriorMode) && TestBit(kIsBayesian);}
|
127 |
+ Bool_t UsesShortestInterval() const {return TestBit(kShortestInterval) && TestBit(kIsBayesian);}
|
128 |
+ Bool_t UsesPosteriorAverage() const {return !UsesPosteriorMode();}
|
129 |
+ Bool_t UsesCentralInterval() const {return !UsesShortestInterval();}
|
130 |
|
131 |
static Bool_t CheckBinning(const TH1& pass,const TH1& total);
|
132 |
static Bool_t CheckConsistency(const TH1& pass,const TH1& total,Option_t* opt="");
|
133 |
static Bool_t CheckEntries(const TH1& pass,const TH1& total,Option_t* opt="");
|
134 |
static Double_t Combine(Double_t& up,Double_t& low,Int_t n,const Int_t* pass,const Int_t* total,
|
135 |
- const Double_t* alpha,const Double_t* beta,Double_t level=0.683,
|
136 |
+ Double_t alpha,Double_t beta,Double_t level=0.683,
|
137 |
const Double_t* w=0,Option_t* opt="");
|
138 |
- static TGraphAsymmErrors* Combine(TCollection* pList,Option_t* opt="N",Int_t n=0,const Double_t* w=0);
|
139 |
+ static TGraphAsymmErrors* Combine(TCollection* pList,Option_t* opt="",Int_t n=0,const Double_t* w=0);
|
140 |
|
141 |
//calculating boundaries of confidence intervals
|
142 |
static Double_t AgrestiCoull(Int_t total,Int_t passed,Double_t level,Bool_t bUpper);
|
143 |
- static Double_t Bayesian(Int_t total,Int_t passed,Double_t level,Double_t alpha,Double_t beta,Bool_t bUpper);
|
144 |
static Double_t ClopperPearson(Int_t total,Int_t passed,Double_t level,Bool_t bUpper);
|
145 |
static Double_t Normal(Int_t total,Int_t passed,Double_t level,Bool_t bUpper);
|
146 |
static Double_t Wilson(Int_t total,Int_t passed,Double_t level,Bool_t bUpper);
|
147 |
+ static Double_t FeldmanCousins(Int_t total,Int_t passed,Double_t level,Bool_t bUpper);
|
148 |
+ static Bool_t FeldmanCousinsInterval(Int_t total,Int_t passed,Double_t level,Double_t & lower, Double_t & upper);
|
149 |
+ // Bayesian functions
|
150 |
+ static Double_t Bayesian(Int_t total,Int_t passed,Double_t level,Double_t alpha,Double_t beta,Bool_t bUpper, Bool_t bShortest = false);
|
151 |
+ // helper functions for Bayesian statistics
|
152 |
+ static Double_t BetaCentralInterval(Double_t level,Double_t alpha,Double_t beta,Bool_t bUpper);
|
153 |
+ static Bool_t BetaShortestInterval(Double_t level,Double_t alpha,Double_t beta,Double_t & lower, Double_t & upper);
|
154 |
+ static Double_t BetaMean(Double_t alpha,Double_t beta);
|
155 |
+ static Double_t BetaMode(Double_t alpha,Double_t beta);
|
156 |
|
157 |
ClassDef(TEfficiency,1) //calculating efficiencies
|
158 |
};
|
159 |
diff -Naur orig.root/hist/hist/src/TEfficiency.cxx root/hist/hist/src/TEfficiency.cxx
|
160 |
--- orig.root/hist/hist/src/TEfficiency.cxx 2010-11-05 15:46:35.000000000 +0100
|
161 |
+++ root/hist/hist/src/TEfficiency.cxx 2011-02-09 13:32:06.000000000 +0100
|
162 |
@@ -5,9 +5,11 @@
|
163 |
#include <vector>
|
164 |
#include <string>
|
165 |
#include <cmath>
|
166 |
+#include <stdlib.h>
|
167 |
|
168 |
//ROOT headers
|
169 |
-#include "Math/QuantFuncMathCore.h"
|
170 |
+#include "Math/ProbFunc.h"
|
171 |
+#include "Math/QuantFunc.h"
|
172 |
#include "TBinomialEfficiencyFitter.h"
|
173 |
#include "TDirectory.h"
|
174 |
#include "TF1.h"
|
175 |
@@ -20,15 +22,21 @@
|
176 |
#include "TROOT.h"
|
177 |
#include "TStyle.h"
|
178 |
#include "TVirtualPad.h"
|
179 |
+#include "TError.h"
|
180 |
+#include "Math/BrentMinimizer1D.h"
|
181 |
+#include "Math/WrappedFunction.h"
|
182 |
|
183 |
//custom headers
|
184 |
#include "TEfficiency.h"
|
185 |
|
186 |
+// file with extra class for FC method
|
187 |
+#include "TEfficiencyHelper.h"
|
188 |
+
|
189 |
//default values
|
190 |
const Double_t kDefBetaAlpha = 1;
|
191 |
const Double_t kDefBetaBeta = 1;
|
192 |
const Double_t kDefConfLevel = 0.682689492137; // 1 sigma
|
193 |
-const Int_t kDefStatOpt = TEfficiency::kFCP;
|
194 |
+const TEfficiency::EStatOption kDefStatOpt = TEfficiency::kFCP;
|
195 |
const Double_t kDefWeight = 1;
|
196 |
|
197 |
ClassImp(TEfficiency)
|
198 |
@@ -205,12 +213,21 @@
|
199 |
// #Rightarrow P(#epsilon | k ; N) = #frac{1}{norm'} #times #epsilon^{k + #alpha - 1} #times (1 - #epsilon)^{N - k + #beta - 1} #equiv Beta(#epsilon; k + #alpha, N - k + #beta)
|
200 |
// End_Latex
|
201 |
// Begin_Html
|
202 |
-// The expectation value of this posterior distribution is used as estimator for the efficiency:
|
203 |
+// By default the expectation value of this posterior distribution is used as estimator for the efficiency:
|
204 |
// End_Html
|
205 |
// Begin_Latex
|
206 |
// #hat{#varepsilon} = #frac{k + #alpha}{N + #alpha + #beta}
|
207 |
// End_Latex
|
208 |
-// Begin_Html
|
209 |
+// Begin_Html
|
210 |
+// Optionally the mode can also be used as value for the estimated efficiency. This can be done by calling SetBit(kPosteriorMode) or
|
211 |
+// <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:SetPosteriorMode">SetPosteriorMode</a>. In this case the estimated efficiency is:
|
212 |
+// End_Html
|
213 |
+// Begin_Latex
|
214 |
+// #hat{#varepsilon} = #frac{k + #alpha -1}{N + #alpha + #beta - 2}
|
215 |
+// End_Latex
|
216 |
+// Begin_Html
|
217 |
+// In the case of a uniform prior distribution, B(x,1,1), the posterior mode is k/n, equivalent to the frequentist estimate (the maximum likelihood value).
|
218 |
+//
|
219 |
// The statistic options also specifiy which confidence interval is used for calculating
|
220 |
// the uncertainties of the efficiency. The following properties define the error
|
221 |
// calculation:
|
222 |
@@ -219,6 +236,7 @@
|
223 |
// <li><b>fStatisticOption:</b> defines which method is used to calculate the boundaries of the confidence interval (<a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:SetStatisticOption">SetStatisticOption</a>)</li>
|
224 |
// <li><b>fBeta_alpha, fBeta_beta:</b> parameters for the prior distribution which is only used in the bayesian case (<a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:GetBetaAlpha">GetBetaAlpha</a> / <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:GetBetaBeta">GetBetaBeta</a> / <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:SetBetaAlpha">SetBetaAlpha</a> / <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:SetBetaBeta">SetBetaBeta</a>)</li>
|
225 |
// <li><b>kIsBayesian:</b> flag whether bayesian statistics are used or not (<a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:UsesBayesianStat">UsesBayesianStat</a>)</li>
|
226 |
+// <li><b>kShortestInterval:</b> flag whether shortest interval (instead of central one) are used in case of Bayesian statistics (<a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:UsesShortestInterval">UsesShortestInterval</a>). Normally shortest interval should be used in combination with the mode (see <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:UsesPosteriorMode">UsesPosteriorMode</a>)</li>
|
227 |
// <li><b>fWeight:</b> global weight for this TEfficiency object which is used during combining or merging with other TEfficiency objects(<a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:GetWeight">GetWeight</a> / <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:SetWeight">SetWeight</a>)</li>
|
228 |
// </ul>
|
229 |
// In the following table the implemented confidence intervals are listed
|
230 |
@@ -270,6 +288,16 @@
|
231 |
// </td>
|
232 |
// </tr>
|
233 |
// <tr>
|
234 |
+// <td>Feldman-Cousins</td><td>kFFC</td>
|
235 |
+// <td>
|
236 |
+// <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:FeldmanCousins">FeldmanCousins</a>
|
237 |
+// </td>
|
238 |
+// <td>false</td>
|
239 |
+// <td>
|
240 |
+// <ul><li>total events</li><li>passed events</li><li>confidence level</li></ul>
|
241 |
+// </td>
|
242 |
+// </tr>
|
243 |
+// <tr>
|
244 |
// <td>Jeffrey</td><td>kBJeffrey</td>
|
245 |
// <td>
|
246 |
// <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:Bayesian">Bayesian</a>
|
247 |
@@ -509,12 +537,17 @@
|
248 |
// histograms, the result is not stored in a TEfficiency object but a TGraphAsymmErrors
|
249 |
// is returned which shows the estimated combined efficiency and its uncertainty
|
250 |
// for each bin. At the moment the combination method <a href="http://root.cern.ch/root/html/TEfficiency.html#TEfficiency:Combine">Combine </a>only supports combination of 1-dimensional efficiencies in a bayesian approach.<br />
|
251 |
-// For calculating the combined efficiency and its uncertainty for each bin, a weighted average posterior distribution is constructed. An equal-tailed confidence interval is constructed which might be not the shortest one.
|
252 |
+// For calculating the combined efficiency and its uncertainty for each bin only Bayesian statistics is used. No frequentists methods are presently
|
253 |
+// supported for computing the combined efficiency and its confidence interval.
|
254 |
+// In the case of the Bayesian statistics a combined posterior is constructed taking into account the weight of each TEfficiency object. The same prior is used
|
255 |
+// for all the TEfficiency objects.
|
256 |
// End_Html
|
257 |
-// Begin_Latex(separator='=',align='rl')
|
258 |
-// P_{comb}(#epsilon | {k_{i}} , {N_{i}}) = norm #times #sum_{j} p_{j} #times P_{j}(#epsilon | k_{j} , N_{j})
|
259 |
-// p_{j} = probability of an event coming from sub sample j
|
260 |
-// p_{j} = w_{j} #times N_{j} is used if no probabilites are given
|
261 |
+// Begin_Latex
|
262 |
+// P_{comb}(#epsilon | {w_{i}}, {k_{i}} , {N_{i}}) = #frac{1}{norm} #prod_{i}{L(k_{i} | N_{i}, #epsilon)}^{w_{i}} #Pi( #epsilon )
|
263 |
+// L(k_{i} | N_{i}, #epsilon) is the likelihood function for the sample i ( a Binomial distribution)
|
264 |
+// #Pi( #epsilon) is the prior, a beta distribution B(#epsilon, #alpha, #beta).
|
265 |
+// The resulting combined posterior is
|
266 |
+// P_{comb}(#epsilon |{w_{i}}; {k_{i}}; {N_{i}}) = B(#epsilon, #sum_{i}{ w_{i} k_{i}} + #alpha, #sum_{i}{ w_{i}(n_{i}-k_{i})}+#beta)
|
267 |
// #hat{#varepsilon} = #int_{0}^{1} #epsilon #times P_{comb}(#epsilon | {k_{i}} , {N_{i}}) d#epsilon
|
268 |
// confidence level = 1 - #alpha
|
269 |
// #frac{#alpha}{2} = #int_{0}^{#epsilon_{low}} P_{comb}(#epsilon | {k_{i}} , {N_{i}}) d#epsilon ... defines lower boundary
|
270 |
@@ -524,14 +557,13 @@
|
271 |
// <b>Example:</b><br />
|
272 |
// If you use cuts to select electrons which can originate from two different
|
273 |
// processes, you can determine the selection efficiency for each process. The
|
274 |
-// overall selection efficiency is then the combined efficiency. The weights for
|
275 |
-// the individual posterior distributions should be the probability that an
|
276 |
+// overall selection efficiency is then the combined efficiency. The weights to be used in the
|
277 |
+// combination should be the probability that an
|
278 |
// electron comes from the corresponding process.
|
279 |
// End_Html
|
280 |
// Begin_Latex
|
281 |
// p_{1} = #frac{#sigma_{1}}{#sigma_{1} + #sigma_{2}} = #frac{N_{1}w_{1}}{N_{1}w_{1} + N_{2}w_{2}}
|
282 |
-// p_{2} = #frac{#sigma_{2}}{#sigma_{1} + #sigma{2}} = #frac{N_{2}w_{2}}{N_{1}w_{1} + N_{2}w_{2}}
|
283 |
-// P_{ges}(#epsilon | k_{1}, k_{2}; N_{1}, N_{2}) = p_{1} #times P_{1}(#epsilon | k_{1}; N_{1}) + p_{2} #times P_{2}(#epsilon | k_{2}; N_{2})
|
284 |
+// p_{2} = #frac{#sigma_{2}}{#sigma_{1} + #sigma_{2}} = #frac{N_{2}w_{2}}{N_{1}w_{1} + N_{2}w_{2}}
|
285 |
// End_Latex
|
286 |
// Begin_Html
|
287 |
// <h3><a name="other">VI. Further operations</a></h3>
|
288 |
@@ -1077,9 +1109,62 @@
|
289 |
}
|
290 |
|
291 |
//______________________________________________________________________________
|
292 |
-Double_t TEfficiency::Bayesian(Int_t total,Int_t passed,Double_t level,Double_t alpha,Double_t beta,Bool_t bUpper)
|
293 |
+Double_t TEfficiency::FeldmanCousins(Int_t total,Int_t passed,Double_t level,Bool_t bUpper)
|
294 |
+{
|
295 |
+ //calculates the boundaries for the frequentist Feldman-Cousins interval
|
296 |
+ //
|
297 |
+ //Input: - total : number of total events
|
298 |
+ // - passed: 0 <= number of passed events <= total
|
299 |
+ // - level : confidence level
|
300 |
+ // - bUpper: true - upper boundary is returned
|
301 |
+ // false - lower boundary is returned
|
302 |
+ //
|
303 |
+ //
|
304 |
+ Double_t lower = 0;
|
305 |
+ Double_t upper = 1;
|
306 |
+ if (!FeldmanCousinsInterval(total,passed,level, lower, upper)) {
|
307 |
+ ::Error("FeldmanCousins","Error running FC method - return 0 or 1");
|
308 |
+ }
|
309 |
+ return (bUpper) ? upper : lower;
|
310 |
+}
|
311 |
+Bool_t TEfficiency::FeldmanCousinsInterval(Int_t total,Int_t passed,Double_t level,Double_t & lower, Double_t & upper)
|
312 |
+{
|
313 |
+ //calculates the interval boundaries using the frequentist methods of Feldman-Cousins
|
314 |
+ //
|
315 |
+ //Input: - total : number of total events
|
316 |
+ // - passed: 0 <= number of passed events <= total
|
317 |
+ // - level : confidence level
|
318 |
+ //Output:
|
319 |
+ // - lower : lower boundary returned on exit
|
320 |
+ // - upper : lower boundary returned on exit
|
321 |
+ //
|
322 |
+ //Return a flag with the status of the calculation
|
323 |
+ //
|
324 |
+ // Calculation:
|
325 |
+ // The Feldman-Cousins is a frequentist method where the interval is estimated using a Neyman construction where the ordering
|
326 |
+ // is based on the likelihood ratio:
|
327 |
+ //Begin_Latex(separator='=',align='rl')
|
328 |
+ // LR = #frac{Binomial(k | N, #epsilon)}{Binomial(k | N, #hat{#epsilon} ) }
|
329 |
+ //End_Latex
|
330 |
+ //See G. J. Feldman and R. D. Cousins, Phys. Rev. D57 (1998) 3873
|
331 |
+ // and R. D. Cousins, K. E. Hymes, J. Tucker, Nuclear Instruments and Methods in Physics Research A 612 (2010) 388
|
332 |
+ //
|
333 |
+ // Implemented using classes developed by Jordan Tucker and Luca Lista
|
334 |
+ // See File hist/hist/src/TEfficiencyHelper.h
|
335 |
+ //
|
336 |
+ FeldmanCousinsBinomialInterval fc;
|
337 |
+ double alpha = 1.-level;
|
338 |
+ fc.Init(alpha);
|
339 |
+ fc.Calculate(passed, total);
|
340 |
+ lower = fc.Lower();
|
341 |
+ upper = fc.Upper();
|
342 |
+ return true;
|
343 |
+}
|
344 |
+
|
345 |
+//______________________________________________________________________________
|
346 |
+Double_t TEfficiency::Bayesian(Int_t total,Int_t passed,Double_t level,Double_t alpha,Double_t beta,Bool_t bUpper, Bool_t bShortest)
|
347 |
{
|
348 |
- //calculates the boundaries for a baysian confidence interval
|
349 |
+ //calculates the boundaries for a Bayesian confidence interval (shortest or central interval depending on the option)
|
350 |
//
|
351 |
//Input: - total : number of total events
|
352 |
// - passed: 0 <= number of passed events <= total
|
353 |
@@ -1089,8 +1174,9 @@
|
354 |
// - bUpper: true - upper boundary is returned
|
355 |
// false - lower boundary is returned
|
356 |
//
|
357 |
- //Note: The equal-tailed confidence interval is calculated which might be not
|
358 |
- // the shortest interval containing the desired coverage probability.
|
359 |
+ //Note: In the case central confidence interval is calculated.
|
360 |
+ // when passed = 0 (or passed = total) the lower (or upper)
|
361 |
+ // interval values will be larger than 0 (or smaller than 1).
|
362 |
//
|
363 |
//Calculation:
|
364 |
//
|
365 |
@@ -1105,27 +1191,188 @@
|
366 |
//Begin_Latex Prior(#varepsilon) = #frac{1}{B(#alpha,#beta)} #varepsilon ^{#alpha - 1} (1 - #varepsilon)^{#beta - 1}End_Latex
|
367 |
//The posterior probability is therefore again given by a beta distribution:
|
368 |
//Begin_Latex P(#varepsilon |k,N) #propto #varepsilon ^{k + #alpha - 1} (1 - #varepsilon)^{N - k + #beta - 1} End_Latex
|
369 |
- //The lower boundary for the equal-tailed confidence interval is given by the
|
370 |
+ //In case of central intervals
|
371 |
+ //the lower boundary for the equal-tailed confidence interval is given by the
|
372 |
//inverse cumulative (= quantile) function for the quantile Begin_Latex #frac{1 - level}{2} End_Latex.
|
373 |
//The upper boundary for the equal-tailed confidence interval is given by the
|
374 |
//inverse cumulative (= quantile) function for the quantile Begin_Latex #frac{1 + level}{2} End_Latex.
|
375 |
//Hence it is the solution Begin_Latex #varepsilon End_Latex of the following equation:
|
376 |
//Begin_Latex I_{#varepsilon}(k + #alpha,N - k + #beta) = #frac{1}{norm} #int_{0}^{#varepsilon} dt t^{k + #alpha - 1} (1 - t)^{N - k + #beta - 1} = #frac{1 #pm level}{2} End_Latex
|
377 |
+ // In the case of shortest interval the minimum interval aorund the mode is found by minimizing the length of all intervals whith the
|
378 |
+ // given probability content. See TEfficiency::BetaShortestInterval
|
379 |
+
|
380 |
+ Double_t a = double(passed)+alpha;
|
381 |
+ Double_t b = double(total-passed)+beta;
|
382 |
+
|
383 |
+ if (bShortest) {
|
384 |
+ double lower = 0;
|
385 |
+ double upper = 1;
|
386 |
+ BetaShortestInterval(level,a,b,lower,upper);
|
387 |
+ return (bUpper) ? upper : lower;
|
388 |
+ }
|
389 |
+ else
|
390 |
+ return BetaCentralInterval(level, a, b, bUpper);
|
391 |
+}
|
392 |
+//______________________________________________________________________________
|
393 |
+Double_t TEfficiency::BetaCentralInterval(Double_t level,Double_t a,Double_t b,Bool_t bUpper)
|
394 |
+{
|
395 |
+ //calculates the boundaries for a central confidence interval for a Beta distribution
|
396 |
+ //
|
397 |
+ //Input: - level : confidence level
|
398 |
+ // - a : parameter > 0 for the beta distribution (for a posterior is passed + prior_alpha
|
399 |
+ // - b : parameter > 0 for the beta distribution (for a posterior is (total-passed) + prior_beta
|
400 |
+ // - bUpper: true - upper boundary is returned
|
401 |
+ // false - lower boundary is returned
|
402 |
+ //
|
403 |
|
404 |
if(bUpper) {
|
405 |
- if((alpha > 0) && (beta > 0))
|
406 |
- return (passed == total)? 1.0 : ROOT::Math::beta_quantile((1+level)/2,passed+alpha,total-passed+beta);
|
407 |
- else
|
408 |
+ if((a > 0) && (b > 0))
|
409 |
+ return ROOT::Math::beta_quantile((1+level)/2,a,b);
|
410 |
+ else {
|
411 |
+ gROOT->Error("TEfficiency::BayesianCentral","Invalid input parameters - return 1");
|
412 |
return 1;
|
413 |
+ }
|
414 |
}
|
415 |
else {
|
416 |
- if((alpha > 0) && (beta > 0))
|
417 |
- return (passed == 0)? 0.0 : ROOT::Math::beta_quantile((1-level)/2,passed+alpha,total-passed+beta);
|
418 |
- else
|
419 |
+ if((a > 0) && (b > 0))
|
420 |
+ return ROOT::Math::beta_quantile((1-level)/2,a,b);
|
421 |
+ else {
|
422 |
+ gROOT->Error("TEfficiency::BayesianCentral","Invalid input parameters - return 0");
|
423 |
return 0;
|
424 |
+ }
|
425 |
}
|
426 |
}
|
427 |
|
428 |
+struct Beta_interval_length {
|
429 |
+ Beta_interval_length(Double_t level,Double_t alpha,Double_t beta ) :
|
430 |
+ fCL(level), fAlpha(alpha), fBeta(beta)
|
431 |
+ {}
|
432 |
+
|
433 |
+ Double_t LowerMax() {
|
434 |
+ // max allowed value of lower given the interval size
|
435 |
+ return ROOT::Math::beta_quantile_c(fCL, fAlpha,fBeta);
|
436 |
+ }
|
437 |
+
|
438 |
+ Double_t operator() (double lower) const {
|
439 |
+ // return length of interval
|
440 |
+ Double_t plow = ROOT::Math::beta_cdf(lower, fAlpha, fBeta);
|
441 |
+ Double_t pup = plow + fCL;
|
442 |
+ double upper = ROOT::Math::beta_quantile(pup, fAlpha,fBeta);
|
443 |
+ return upper-lower;
|
444 |
+ }
|
445 |
+ Double_t fCL; // interval size (confidence level)
|
446 |
+ Double_t fAlpha; // beta distribution alpha parameter
|
447 |
+ Double_t fBeta; // beta distribution beta parameter
|
448 |
+
|
449 |
+};
|
450 |
+
|
451 |
+//______________________________________________________________________________
|
452 |
+Bool_t TEfficiency::BetaShortestInterval(Double_t level,Double_t a,Double_t b, Double_t & lower, Double_t & upper)
|
453 |
+{
|
454 |
+ //calculates the boundaries for a shortest confidence interval for a Beta distribution
|
455 |
+ //
|
456 |
+ //Input: - level : confidence level
|
457 |
+ // - a : parameter > 0 for the beta distribution (for a posterior is passed + prior_alpha
|
458 |
+ // - b : parameter > 0 for the beta distribution (for a posterior is (total-passed) + prior_beta
|
459 |
+ // - bUpper: true - upper boundary is returned
|
460 |
+ // false - lower boundary is returned
|
461 |
+ //
|
462 |
+ //
|
463 |
+ //The lower/upper boundary are then obtained by finding the shortest interval of the beta distribbution
|
464 |
+ // contained the desired probability level.
|
465 |
+ // The length of all possible intervals is minimized in order to find the shortest one
|
466 |
+
|
467 |
+ if (a <= 0 || b <= 0) {
|
468 |
+ lower = 0; upper = 1;
|
469 |
+ gROOT->Error("TEfficiency::BayesianShortest","Invalid input parameters - return [0,1]");
|
470 |
+ return kFALSE;
|
471 |
+ }
|
472 |
+
|
473 |
+ // treat here special cases when mode == 0 or 1
|
474 |
+ double mode = BetaMode(a,b);
|
475 |
+ if (mode == 0.0) {
|
476 |
+ lower = 0;
|
477 |
+ upper = ROOT::Math::beta_quantile(level, a, b);
|
478 |
+ return kTRUE;
|
479 |
+ }
|
480 |
+ if (mode == 1.0) {
|
481 |
+ lower = ROOT::Math::beta_quantile_c(level, a, b);
|
482 |
+ upper = 1.0;
|
483 |
+ return kTRUE;
|
484 |
+ }
|
485 |
+ // special case when the shortest interval is undefined return the central interval
|
486 |
+ // can happen for a posterior when passed=total=0
|
487 |
+ //
|
488 |
+ if ( a==b && a<1.0) {
|
489 |
+ lower = BetaCentralInterval(level,a,b,kFALSE);
|
490 |
+ upper = BetaCentralInterval(level,a,b,kTRUE);
|
491 |
+ return kTRUE;
|
492 |
+ }
|
493 |
+
|
494 |
+ // for the other case perform a minimization
|
495 |
+ // make a function of the length of the posterior interval as a function of lower bound
|
496 |
+ Beta_interval_length intervalLength(level,a,b);
|
497 |
+ // minimize the interval length
|
498 |
+ ROOT::Math::WrappedFunction<const Beta_interval_length &> func(intervalLength);
|
499 |
+ ROOT::Math::BrentMinimizer1D minim;
|
500 |
+ minim.SetFunction(func, 0, intervalLength.LowerMax() );
|
501 |
+ minim.SetNpx(2); // no need to bracket with many iterations. Just do few times to estimate some better points
|
502 |
+ bool ret = minim.Minimize(100, 1.E-10,1.E-10);
|
503 |
+ if (!ret) {
|
504 |
+ gROOT->Error("TEfficiency::BayesianShortes","Error finding the shortest interval");
|
505 |
+ return kFALSE;
|
506 |
+ }
|
507 |
+ lower = minim.XMinimum();
|
508 |
+ upper = lower + minim.FValMinimum();
|
509 |
+ return kTRUE;
|
510 |
+}
|
511 |
+
|
512 |
+//______________________________________________________________________________
|
513 |
+Double_t TEfficiency::BetaMean(Double_t a,Double_t b)
|
514 |
+{
|
515 |
+ // compute the mean (average) of the beta distribution
|
516 |
+ //
|
517 |
+ //Input: a : parameter > 0 for the beta distribution (for a posterior is passed + prior_alpha
|
518 |
+ // b : parameter > 0 for the beta distribution (for a posterior is (total-passed) + prior_beta
|
519 |
+ //
|
520 |
+
|
521 |
+ if (a <= 0 || b <= 0 ) {
|
522 |
+ gROOT->Error("TEfficiency::BayesianMean","Invalid input parameters - return 0");
|
523 |
+ return 0;
|
524 |
+ }
|
525 |
+
|
526 |
+ Double_t mean = a / (a + b);
|
527 |
+ return mean;
|
528 |
+}
|
529 |
+
|
530 |
+//______________________________________________________________________________
|
531 |
+Double_t TEfficiency::BetaMode(Double_t a,Double_t b)
|
532 |
+{
|
533 |
+ // compute the mode of the beta distribution
|
534 |
+ //
|
535 |
+ //Input: a : parameter > 0 for the beta distribution (for a posterior is passed + prior_alpha
|
536 |
+ // b : parameter > 0 for the beta distribution (for a posterior is (total-passed) + prior_beta
|
537 |
+ //
|
538 |
+ // note the mode is defined for a Beta(a,b) only if (a,b)>1 (a = passed+alpha; b = total-passed+beta)
|
539 |
+ // return then the following in case (a,b) < 1:
|
540 |
+ // if (a==b) return 0.5 (it is really undefined)
|
541 |
+ // if (a < b) return 0;
|
542 |
+ // if (a > b) return 1;
|
543 |
+
|
544 |
+ if (a <= 0 || b <= 0 ) {
|
545 |
+ gROOT->Error("TEfficiency::BayesianMode","Invalid input parameters - return 0");
|
546 |
+ return 0;
|
547 |
+ }
|
548 |
+ if ( a <= 1 || b <= 1) {
|
549 |
+ if ( a < b) return 0;
|
550 |
+ if ( a > b) return 1;
|
551 |
+ if (a == b) return 0.5; // cannot do otherwise
|
552 |
+ }
|
553 |
+
|
554 |
+ // since a and b are > 1 here denominator cannot be 0 or < 0
|
555 |
+ Double_t mode = (a - 1.0) / (a + b -2.0);
|
556 |
+ return mode;
|
557 |
+}
|
558 |
//______________________________________________________________________________
|
559 |
void TEfficiency::Build(const char* name,const char* title)
|
560 |
{
|
561 |
@@ -1328,11 +1575,10 @@
|
562 |
else
|
563 |
return ((passed == 0) ? 0.0 : ROOT::Math::beta_quantile(alpha,passed,total-passed+1.0));
|
564 |
}
|
565 |
-
|
566 |
//______________________________________________________________________________
|
567 |
Double_t TEfficiency::Combine(Double_t& up,Double_t& low,Int_t n,
|
568 |
const Int_t* pass,const Int_t* total,
|
569 |
- const Double_t* alpha,const Double_t* beta,
|
570 |
+ Double_t alpha, Double_t beta,
|
571 |
Double_t level,const Double_t* w,Option_t* opt)
|
572 |
{
|
573 |
//calculates the combined efficiency and its uncertainties
|
574 |
@@ -1350,36 +1596,28 @@
|
575 |
//- beta : shape parameters for the beta distribution as prior
|
576 |
//- level : desired confidence level
|
577 |
//- w : weights for each sample; if not given, all samples get the weight 1
|
578 |
+ // The weights do not need to be normalized, since they are internally renormalized
|
579 |
+ // to the number of effective entries.
|
580 |
//- options:
|
581 |
- // + N : The weight for each sample is multiplied by the number of total events.
|
582 |
- // -> weight = w[i] * N[i]
|
583 |
- // This can be usefull when the weights and probability for each sample are given by
|
584 |
- //Begin_Latex(separator='=',align='rl')
|
585 |
- // w_{i} = #frac{#sigma_{i} #times L}{N_{i}}
|
586 |
- // p_{i} = #frac{#sigma_{i}}{sum_{j} #sigma_{j}} #equiv #frac{N_{i} #times w_{i}}{sum_{j} N_{j} #times w_{j}}
|
587 |
- //End_Latex
|
588 |
+ //
|
589 |
+ // + mode : The mode is returned instead of the mean of the posterior as best value
|
590 |
+ // When using the mode the shortest interval is also computed instead of the central one
|
591 |
+ // + shortest: compute shortest interval (done by default if mode option is set)
|
592 |
+ // + central: compute central interval (done by default if mode option is NOT set)
|
593 |
+ //
|
594 |
//Begin_Html
|
595 |
- //Notation:
|
596 |
- //<ul>
|
597 |
- //<li>k = passed events</li>
|
598 |
- //<li>N = total evens</li>
|
599 |
- //<li>n = number of combined samples</li>
|
600 |
- //<li>i = index for numbering samples</li>
|
601 |
- //<li>p = probability of sample i (either w[i] or w[i] * N[i], see options)</li>
|
602 |
- //</ul>
|
603 |
- //calculation:
|
604 |
+ //Calculation:
|
605 |
//<ol>
|
606 |
- //<li>The combined posterior distributions is calculated</li>
|
607 |
- //End_Html
|
608 |
+ //<li>The combined posterior distributions is calculated from the Bayes theorem assuming a common prior Beta distribution.
|
609 |
+ // It is easy to proof that the combined posterior is then:</li>
|
610 |
//Begin_Latex(separator='=',align='rl')
|
611 |
- //P_{comb}(#epsilon |{k_{i}}; {N_{i}}) = #frac{1}{sum p_{i}} #times #sum_{i} p_{i} #times P_{i}(#epsilon | k_{i}; N_{i})
|
612 |
- //p_{i} = w[i] or w_{i} #times N_{i} if option "N" is specified
|
613 |
+ //P_{comb}(#epsilon |{w_{i}}; {k_{i}}; {N_{i}}) = B(#epsilon, #sum_{i}{ w_{i} k_{i}} + #alpha, #sum_{i}{ w_{i}(n_{i}-k_{i})}+#beta)
|
614 |
+ //w_{i} = weight for each sample renormalized to the effective entries
|
615 |
+ //w^{'}_{i} = w_{i} #frac{ #sum_{i} {w_{i} } } { #sum_{i} {w_{i}^{2} } }
|
616 |
//End_Latex
|
617 |
//Begin_Html
|
618 |
- //<li>The estimated efficiency is the weighted average of the individual efficiencies.</li>
|
619 |
+ //<li>The estimated efficiency is the mode (or the mean) of the obtained posterior distribution </li>
|
620 |
//End_Html
|
621 |
- //Begin_Latex #hat{#varepsilon}_{comb} = #frac{1}{sum p_{i}} #times #sum_{i} p_{i} #times #frac{pass_{i} + #alpha_{i}}{total_{i} + #alpha_{i} + #beta_{i}}
|
622 |
- //End_Latex
|
623 |
//Begin_Html
|
624 |
//<li>The boundaries of the confidence interval for a confidence level (1 - a)
|
625 |
//are given by the a/2 and 1-a/2 quantiles of the resulting cumulative
|
626 |
@@ -1387,16 +1625,20 @@
|
627 |
//</ol>
|
628 |
//End_Html
|
629 |
//Example (uniform prior distribution):
|
630 |
- //Begin_Macro
|
631 |
+ //Begin_Macro(source)
|
632 |
//{
|
633 |
// TCanvas* c1 = new TCanvas("c1","",600,800);
|
634 |
// c1->Divide(1,2);
|
635 |
// c1->SetFillStyle(1001);
|
636 |
// c1->SetFillColor(kWhite);
|
637 |
//
|
638 |
- // TF1* p1 = new TF1("p1","TMath::BetaDist(x,18,8)",0,1);
|
639 |
- // TF1* p2 = new TF1("p2","TMath::BetaDist(x,3,7)",0,1);
|
640 |
- // TF1* comb = new TF1("comb","0.6*p1 + 0.4*p2",0,1);
|
641 |
+ // TF1* p1 = new TF1("p1","TMath::BetaDist(x,19,9)",0,1);
|
642 |
+ // TF1* p2 = new TF1("p2","TMath::BetaDist(x,4,8)",0,1);
|
643 |
+ // TF1* comb = new TF1("comb2","TMath::BetaDist(x,[0],[1])",0,1);
|
644 |
+ // double norm = 1./(0.6*0.6+0.4*0.4); // weight normalization
|
645 |
+ // double a = 0.6*18.0 + 0.4*3.0 + 1.0; // new alpha parameter of combined beta dist.
|
646 |
+ // double b = 0.6*10+0.4*7+1.0; // new beta parameter of combined beta dist.
|
647 |
+ // comb->SetParameters(norm*a ,norm *b );
|
648 |
// TF1* const1 = new TF1("const1","0.05",0,1);
|
649 |
// TF1* const2 = new TF1("const2","0.95",0,1);
|
650 |
//
|
651 |
@@ -1405,15 +1647,15 @@
|
652 |
// p2->SetLineColor(kBlue);
|
653 |
// comb->SetLineColor(kGreen+2);
|
654 |
//
|
655 |
- // TLegend* leg1 = new TLegend(0.2,0.65,0.6,0.85);
|
656 |
+ // TLegend* leg1 = new TLegend(0.12,0.65,0.5,0.85);
|
657 |
// leg1->AddEntry(p1,"k1 = 18, N1 = 26","l");
|
658 |
// leg1->AddEntry(p2,"k2 = 3, N2 = 10","l");
|
659 |
// leg1->AddEntry(comb,"combined: p1 = 0.6, p2=0.4","l");
|
660 |
//
|
661 |
// c1->cd(1);
|
662 |
- // p1->Draw();
|
663 |
+ // comb->Draw();
|
664 |
+ // p1->Draw("same");
|
665 |
// p2->Draw("same");
|
666 |
- // comb->Draw("same");
|
667 |
// leg1->Draw("same");
|
668 |
// c1->cd(2);
|
669 |
// const1->SetLineWidth(1);
|
670 |
@@ -1428,71 +1670,60 @@
|
671 |
//}
|
672 |
//End_Macro
|
673 |
|
674 |
- Double_t sumweights = 0;
|
675 |
- Double_t weight;
|
676 |
- Double_t mean = 0;
|
677 |
- TString formula = "( 0 ";
|
678 |
-
|
679 |
- Bool_t bModWeights = false;
|
680 |
-
|
681 |
- TString option = opt;
|
682 |
+ TString option(opt);
|
683 |
option.ToLower();
|
684 |
- if(option.Contains("n"))
|
685 |
- bModWeights = true;
|
686 |
-
|
687 |
- //create formula for cumulative of total posterior
|
688 |
- // cdf = 1/sum of weights * sum_i (weight_i * cdf_i(pass_i,total_i,alpha_i,beta_i))
|
689 |
- // and cdf_i(pass_i,total_i,alpha_i,beta_i) = beta_incomplete(pass_i + alpha_i,total_i - pass_i + beta_i)
|
690 |
- //combined efficiency is weighted average of individual efficiencies
|
691 |
- for(Int_t i = 0; i < n; ++i) {
|
692 |
- //get weight
|
693 |
- if(w) {
|
694 |
- //check weights > 0
|
695 |
- if(w[i] > 0)
|
696 |
- weight = w[i];
|
697 |
- else {
|
698 |
- gROOT->Error("TEfficiency::Combine","invalid custom weight found w = %.2lf",w[i]);
|
699 |
- gROOT->Info("TEfficiency::Combine","stop combining");
|
700 |
- return -1;
|
701 |
- }
|
702 |
- }
|
703 |
- //no weights given -> all objects get the same weight
|
704 |
- else
|
705 |
- weight = 1;
|
706 |
|
707 |
- if(bModWeights)
|
708 |
- weight *= total[i];
|
709 |
-
|
710 |
- sumweights += weight;
|
711 |
- //check: total >= pass
|
712 |
+ //LM: new formula for combination
|
713 |
+ // works only if alpha beta are the same always
|
714 |
+ // the weights are normalized to w(i) -> N_eff w(i)/ Sum w(i)
|
715 |
+ // i.e. w(i) -> Sum (w(i) / Sum (w(i)^2) * w(i)
|
716 |
+ // norm = Sum (w(i) / Sum (w(i)^2)
|
717 |
+ double ntot = 0;
|
718 |
+ double ktot = 0;
|
719 |
+ double sumw = 0;
|
720 |
+ double sumw2 = 0;
|
721 |
+ for (int i = 0; i < n ; ++i) {
|
722 |
if(pass[i] > total[i]) {
|
723 |
- gROOT->Error("TEfficiency::Combine","total events = %i < passed events %i",total[i],pass[i]);
|
724 |
- gROOT->Info("TEfficiency::Combine","stop combining");
|
725 |
+ ::Error("TEfficiency::Combine","total events = %i < passed events %i",total[i],pass[i]);
|
726 |
+ ::Info("TEfficiency::Combine","stop combining");
|
727 |
return -1;
|
728 |
}
|
729 |
- formula += TString::Format("+ %lf * TMath::BetaIncomplete(x,%lf,%lf) ",weight,
|
730 |
- pass[i]+alpha[i],total[i]-pass[i]+beta[i]);
|
731 |
|
732 |
- //add combined efficiency
|
733 |
- if(total[i] + alpha[i] + beta[i])
|
734 |
- mean += weight * (pass[i] + alpha[i])/(total[i] + alpha[i] + beta[i]);
|
735 |
- }
|
736 |
- formula += TString::Format(") / %lf",sumweights);
|
737 |
+ ntot += w[i] * total[i];
|
738 |
+ ktot += w[i] * pass[i];
|
739 |
+ sumw += w[i];
|
740 |
+ sumw2 += w[i]*w[i];
|
741 |
+ //mean += w[i] * (pass[i] + alpha[i])/(total[i] + alpha[i] + beta[i]);
|
742 |
+ }
|
743 |
+ double norm = sumw/sumw2;
|
744 |
+ ntot *= norm;
|
745 |
+ ktot *= norm;
|
746 |
+ if(ktot > ntot) {
|
747 |
+ ::Error("TEfficiency::Combine","total = %f < passed %f",ntot,ktot);
|
748 |
+ ::Info("TEfficiency::Combine","stop combining");
|
749 |
+ return -1;
|
750 |
+ }
|
751 |
+
|
752 |
+ double a = ktot + alpha;
|
753 |
+ double b = ntot - ktot + beta;
|
754 |
|
755 |
- //create pdf function
|
756 |
- TF1* pdf = new TF1("pdf",formula.Data(),0,1);
|
757 |
-
|
758 |
- //get quantiles for (1-level)/2 and (1+level)/2
|
759 |
- low = pdf->GetX((1-level)/2,0,1);
|
760 |
- up = pdf->GetX((1+level)/2,0,1);
|
761 |
+ double mean = a/(a+b);
|
762 |
+ double mode = BetaMode(a,b);
|
763 |
|
764 |
- delete pdf;
|
765 |
|
766 |
- mean = mean/sumweights;
|
767 |
+ Bool_t shortestInterval = option.Contains("sh") || ( option.Contains("mode") && !option.Contains("cent") );
|
768 |
|
769 |
- return mean;
|
770 |
-}
|
771 |
+ if (shortestInterval)
|
772 |
+ BetaShortestInterval(level, a, b, low, up);
|
773 |
+ else {
|
774 |
+ low = BetaCentralInterval(level, a, b, false);
|
775 |
+ up = BetaCentralInterval(level, a, b, true);
|
776 |
+ }
|
777 |
+
|
778 |
+ if (option.Contains("mode")) return mode;
|
779 |
+ return mean;
|
780 |
|
781 |
+}
|
782 |
//______________________________________________________________________________
|
783 |
TGraphAsymmErrors* TEfficiency::Combine(TCollection* pList,Option_t* option,
|
784 |
Int_t n,const Double_t* w)
|
785 |
@@ -1511,21 +1742,14 @@
|
786 |
//- options
|
787 |
// + s : strict combining; only TEfficiency objects with the same beta
|
788 |
// prior and the flag kIsBayesian == true are combined
|
789 |
+ // If not specified the prior parameter of the first TEfficiency object is used
|
790 |
// + v : verbose mode; print information about combining
|
791 |
// + cl=x : set confidence level (0 < cl < 1). If not specified, the
|
792 |
// confidence level of the first TEfficiency object is used.
|
793 |
- // + N : for each bin i the weight of each TEfficiency object j in pList
|
794 |
- // is multiplied by the number of total events as
|
795 |
- // Begin_Latex w{i,j} = weight{j} #times total{j}->GetBinContent(i) End_Latex
|
796 |
- // Begin_Html
|
797 |
- // <ul>
|
798 |
- // <li>w{i,j}: weight of bin i and TEfficiency object j</li>
|
799 |
- // <li>weight{j}: global weight of TEfficiency object j (either GetWeight() or w[j])</li>
|
800 |
- // <li>total{j}: histogram containing the total events of TEfficiency object j</li>
|
801 |
- // </ul>
|
802 |
- // End_Html
|
803 |
- // Otherwise the weights for the TEfficiency objects are global and
|
804 |
- // the same for each bin.
|
805 |
+ // + mode Use mode of combined posterior as estimated value for the efficiency
|
806 |
+ // + shortest: compute shortest interval (done by default if mode option is set)
|
807 |
+ // + central: compute central interval (done by default if mode option is NOT set)
|
808 |
+ //
|
809 |
//- n : number of weights (has to be the number of one-dimensional
|
810 |
// TEfficiency objects in pList)
|
811 |
// If no weights are passed, the internal weights GetWeight() of
|
812 |
@@ -1540,8 +1764,8 @@
|
813 |
opt.ToLower();
|
814 |
|
815 |
//parameter of prior distribution, confidence level and normalisation factor
|
816 |
- Double_t alpha = 0;
|
817 |
- Double_t beta = 0;
|
818 |
+ Double_t alpha = -1;
|
819 |
+ Double_t beta = -1;
|
820 |
Double_t level = 0;
|
821 |
|
822 |
//flags for combining
|
823 |
@@ -1549,11 +1773,11 @@
|
824 |
Bool_t bOutput = false;
|
825 |
Bool_t bWeights = false;
|
826 |
//list of all information needed to weight and combine efficiencies
|
827 |
- std::vector<TH1*> vTotal;
|
828 |
- std::vector<TH1*> vPassed;
|
829 |
- std::vector<Double_t> vWeights;
|
830 |
- std::vector<Double_t> vAlpha;
|
831 |
- std::vector<Double_t> vBeta;
|
832 |
+ std::vector<TH1*> vTotal; vTotal.reserve(n);
|
833 |
+ std::vector<TH1*> vPassed; vPassed.reserve(n);
|
834 |
+ std::vector<Double_t> vWeights; vWeights.reserve(n);
|
835 |
+// std::vector<Double_t> vAlpha;
|
836 |
+// std::vector<Double_t> vBeta;
|
837 |
|
838 |
if(opt.Contains("s")) {
|
839 |
opt.ReplaceAll("s","");
|
840 |
@@ -1566,7 +1790,8 @@
|
841 |
}
|
842 |
|
843 |
if(opt.Contains("cl=")) {
|
844 |
- sscanf(strstr(opt.Data(),"cl="),"cl=%lf",&level);
|
845 |
+ Ssiz_t pos = opt.Index("cl=") + 3;
|
846 |
+ level = atof( opt(pos,opt.Length() ).Data() );
|
847 |
if((level <= 0) || (level >= 1))
|
848 |
level = 0;
|
849 |
opt.ReplaceAll("cl=","");
|
850 |
@@ -1596,13 +1821,13 @@
|
851 |
if(pEff->GetDimension() > 1)
|
852 |
continue;
|
853 |
if(!level) level = pEff->GetConfidenceLevel();
|
854 |
+
|
855 |
+ if(alpha<1) alpha = pEff->GetBetaAlpha();
|
856 |
+ if(beta<1) beta = pEff->GetBetaBeta();
|
857 |
|
858 |
//if strict combining, check priors, confidence level and statistic
|
859 |
if(bStrict) {
|
860 |
- if(!alpha) alpha = pEff->GetBetaAlpha();
|
861 |
- if(!beta) beta = pEff->GetBetaBeta();
|
862 |
-
|
863 |
- if(alpha != pEff->GetBetaAlpha())
|
864 |
+ if(alpha != pEff->GetBetaAlpha())
|
865 |
continue;
|
866 |
if(beta != pEff->GetBetaBeta())
|
867 |
continue;
|
868 |
@@ -1618,14 +1843,14 @@
|
869 |
vWeights.push_back(pEff->fWeight);
|
870 |
|
871 |
//strict combining -> using global prior
|
872 |
- if(bStrict) {
|
873 |
- vAlpha.push_back(alpha);
|
874 |
- vBeta.push_back(beta);
|
875 |
- }
|
876 |
- else {
|
877 |
- vAlpha.push_back(pEff->GetBetaAlpha());
|
878 |
- vBeta.push_back(pEff->GetBetaBeta());
|
879 |
- }
|
880 |
+// if(bStrict) {
|
881 |
+// vAlpha.push_back(alpha);
|
882 |
+// vBeta.push_back(beta);
|
883 |
+// }
|
884 |
+// else {
|
885 |
+// vAlpha.push_back(pEff->GetBetaAlpha());
|
886 |
+// vBeta.push_back(pEff->GetBetaBeta());
|
887 |
+// }
|
888 |
}
|
889 |
}
|
890 |
|
891 |
@@ -1669,22 +1894,18 @@
|
892 |
}
|
893 |
|
894 |
//create TGraphAsymmErrors with efficiency
|
895 |
- Double_t* x = new Double_t[nbins_max];
|
896 |
- Double_t* xlow = new Double_t[nbins_max];
|
897 |
- Double_t* xhigh = new Double_t[nbins_max];
|
898 |
- Double_t* eff = new Double_t[nbins_max];
|
899 |
- Double_t* efflow = new Double_t[nbins_max];
|
900 |
- Double_t* effhigh = new Double_t[nbins_max];
|
901 |
+ std::vector<Double_t> x(nbins_max);
|
902 |
+ std::vector<Double_t> xlow(nbins_max);
|
903 |
+ std::vector<Double_t> xhigh(nbins_max);
|
904 |
+ std::vector<Double_t> eff(nbins_max);
|
905 |
+ std::vector<Double_t> efflow(nbins_max);
|
906 |
+ std::vector<Double_t> effhigh(nbins_max);
|
907 |
|
908 |
//parameters for combining:
|
909 |
//number of objects
|
910 |
Int_t num = vTotal.size();
|
911 |
- //shape parameters
|
912 |
- Double_t* a = new Double_t[n];
|
913 |
- Double_t* b = new Double_t[n];
|
914 |
- Int_t* pass = new Int_t[n];
|
915 |
- Int_t* total = new Int_t[n];
|
916 |
- Double_t* weights = new Double_t[n];
|
917 |
+ std::vector<Int_t> pass(num);
|
918 |
+ std::vector<Int_t> total(num);
|
919 |
|
920 |
//loop over all bins
|
921 |
Double_t low, up;
|
922 |
@@ -1695,67 +1916,63 @@
|
923 |
xhigh[i-1] = vTotal.at(0)->GetBinWidth(i) - xlow[i-1];
|
924 |
|
925 |
for(Int_t j = 0; j < num; ++j) {
|
926 |
- a[j] = vAlpha.at(j);
|
927 |
- b[j] = vBeta.at(j);
|
928 |
pass[j] = (Int_t)(vPassed.at(j)->GetBinContent(i) + 0.5);
|
929 |
total[j] = (Int_t)(vTotal.at(j)->GetBinContent(i) + 0.5);
|
930 |
- weights[j] = vWeights.at(j);
|
931 |
}
|
932 |
|
933 |
//fill efficiency and errors
|
934 |
- eff[i-1] = Combine(up,low,num,pass,total,a,b,level,weights,opt.Data());
|
935 |
+ eff[i-1] = Combine(up,low,num,&pass[0],&total[0],alpha,beta,level,&vWeights[0],opt.Data());
|
936 |
//did an error occured ?
|
937 |
if(eff[i-1] == -1) {
|
938 |
gROOT->Error("TEfficiency::Combine","error occured during combining");
|
939 |
gROOT->Info("TEfficiency::Combine","stop combining");
|
940 |
- //free memory
|
941 |
- delete [] x;
|
942 |
- delete [] xlow;
|
943 |
- delete [] xhigh;
|
944 |
- delete [] eff;
|
945 |
- delete [] efflow;
|
946 |
- delete [] effhigh;
|
947 |
- delete [] pass;
|
948 |
- delete [] total;
|
949 |
- delete [] weights;
|
950 |
- delete [] a;
|
951 |
- delete [] b;
|
952 |
return 0;
|
953 |
}
|
954 |
efflow[i-1]= eff[i-1] - low;
|
955 |
effhigh[i-1]= up - eff[i-1];
|
956 |
}//loop over all bins
|
957 |
|
958 |
- TGraphAsymmErrors* gr = new TGraphAsymmErrors(nbins_max,x,eff,xlow,xhigh,efflow,effhigh);
|
959 |
-
|
960 |
- delete [] x;
|
961 |
- delete [] xlow;
|
962 |
- delete [] xhigh;
|
963 |
- delete [] eff;
|
964 |
- delete [] efflow;
|
965 |
- delete [] effhigh;
|
966 |
- delete [] pass;
|
967 |
- delete [] total;
|
968 |
- delete [] weights;
|
969 |
- delete [] a;
|
970 |
- delete [] b;
|
971 |
+ TGraphAsymmErrors* gr = new TGraphAsymmErrors(nbins_max,&x[0],&eff[0],&xlow[0],&xhigh[0],&efflow[0],&effhigh[0]);
|
972 |
|
973 |
return gr;
|
974 |
}
|
975 |
|
976 |
//______________________________________________________________________________
|
977 |
-void TEfficiency::Draw(const Option_t* opt)
|
978 |
+Int_t TEfficiency::DistancetoPrimitive(Int_t px, Int_t py)
|
979 |
+{
|
980 |
+ // Compute distance from point px,py to a graph.
|
981 |
+ //
|
982 |
+ // Compute the closest distance of approach from point px,py to this line.
|
983 |
+ // The distance is computed in pixels units.
|
984 |
+ //
|
985 |
+ // Forward the call to the painted graph
|
986 |
+
|
987 |
+ if (fPaintGraph) return fPaintGraph->DistancetoPrimitive(px,py);
|
988 |
+ if (fPaintHisto) return fPaintHisto->DistancetoPrimitive(px,py);
|
989 |
+ return 0;
|
990 |
+}
|
991 |
+
|
992 |
+
|
993 |
+//______________________________________________________________________________
|
994 |
+void TEfficiency::Draw(Option_t* opt)
|
995 |
{
|
996 |
//draws the current TEfficiency object
|
997 |
//
|
998 |
//options:
|
999 |
- //- 1-dimensional case: same options as TGraphAsymmErrors::Draw()
|
1000 |
+ //- 1-dimensional case: same options as TGraphAsymmErrors::Draw()
|
1001 |
+ // but as default "AP" is used
|
1002 |
//- 2-dimensional case: same options as TH2::Draw()
|
1003 |
//- 3-dimensional case: not yet supported
|
1004 |
+ //
|
1005 |
+ // specific TEfficiency drawing options:
|
1006 |
+ // - E0 - plot bins where the total number of passed events is zero
|
1007 |
+ // (the error interval will be [0,1] )
|
1008 |
|
1009 |
//check options
|
1010 |
TString option = opt;
|
1011 |
option.ToLower();
|
1012 |
+ // use by default "AP"
|
1013 |
+ if (option.IsNull() ) option = "ap";
|
1014 |
|
1015 |
if(gPad && !option.Contains("same"))
|
1016 |
gPad->Clear();
|
1017 |
@@ -1764,6 +1981,22 @@
|
1018 |
}
|
1019 |
|
1020 |
//______________________________________________________________________________
|
1021 |
+void TEfficiency::ExecuteEvent(Int_t event, Int_t px, Int_t py)
|
1022 |
+{
|
1023 |
+ // Execute action corresponding to one event.
|
1024 |
+ //
|
1025 |
+ // This member function is called when the drawn class is clicked with the locator
|
1026 |
+ // If Left button clicked on one of the line end points, this point
|
1027 |
+ // follows the cursor until button is released.
|
1028 |
+ //
|
1029 |
+ // if Middle button clicked, the line is moved parallel to itself
|
1030 |
+ // until the button is released.
|
1031 |
+ // Forward the call to the underlying graph
|
1032 |
+ if (fPaintGraph) fPaintGraph->ExecuteEvent(event,px,py);
|
1033 |
+ else if (fPaintHisto) fPaintHisto->ExecuteEvent(event,px,py);
|
1034 |
+}
|
1035 |
+
|
1036 |
+//______________________________________________________________________________
|
1037 |
void TEfficiency::Fill(Bool_t bPassed,Double_t x,Double_t y,Double_t z)
|
1038 |
{
|
1039 |
//This function is used for filling the two histograms.
|
1040 |
@@ -1942,14 +2175,29 @@
|
1041 |
// for bayesian ones the expectation value of the resulting posterior
|
1042 |
// distribution is returned:
|
1043 |
// Begin_Latex #hat{#varepsilon} = #frac{passed + #alpha}{total + #alpha + #beta} End_Latex
|
1044 |
+ // If the bit kPosteriorMode is set (or the method TEfficiency::UsePosteriorMode() has been called ) the
|
1045 |
+ // mode (most probable value) of the posterior is returned:
|
1046 |
+ // Begin_Latex #hat{#varepsilon} = #frac{passed + #alpha -1}{total + #alpha + #beta -2} End_Latex
|
1047 |
+ //
|
1048 |
// - If the denominator is equal to 0, an efficiency of 0 is returned.
|
1049 |
-
|
1050 |
+ // - When Begin_Latex passed + #alpha < 1 End_Latex or Begin_Latex total - passed + #beta < 1 End_latex the above
|
1051 |
+ // formula for the mode is not valid. In these cases values the estimated efficiency is 0 or 1.
|
1052 |
+
|
1053 |
Int_t total = (Int_t)fTotalHistogram->GetBinContent(bin);
|
1054 |
Int_t passed = (Int_t)fPassedHistogram->GetBinContent(bin);
|
1055 |
|
1056 |
- if(TestBit(kIsBayesian))
|
1057 |
- return (total + fBeta_alpha + fBeta_beta)?
|
1058 |
- (passed + fBeta_alpha)/(total + fBeta_alpha + fBeta_beta): 0;
|
1059 |
+ if(TestBit(kIsBayesian)) {
|
1060 |
+
|
1061 |
+ // parameters for the beta prior distribution
|
1062 |
+ Double_t alpha = TestBit(kUseBinPrior) ? GetBetaAlpha(bin) : GetBetaAlpha();
|
1063 |
+ Double_t beta = TestBit(kUseBinPrior) ? GetBetaBeta(bin) : GetBetaBeta();
|
1064 |
+
|
1065 |
+ if (!TestBit(kPosteriorMode) )
|
1066 |
+ return BetaMean(double(passed) + alpha , double(total-passed) + beta);
|
1067 |
+ else
|
1068 |
+ return BetaMode(double(passed) + alpha , double(total-passed) + beta);
|
1069 |
+
|
1070 |
+ }
|
1071 |
else
|
1072 |
return (total)? ((Double_t)passed)/total : 0;
|
1073 |
}
|
1074 |
@@ -1967,9 +2215,12 @@
|
1075 |
Int_t passed = (Int_t)fPassedHistogram->GetBinContent(bin);
|
1076 |
|
1077 |
Double_t eff = GetEfficiency(bin);
|
1078 |
+ // parameters for the beta prior distribution
|
1079 |
+ Double_t alpha = TestBit(kUseBinPrior) ? GetBetaAlpha(bin) : GetBetaAlpha();
|
1080 |
+ Double_t beta = TestBit(kUseBinPrior) ? GetBetaBeta(bin) : GetBetaBeta();
|
1081 |
|
1082 |
if(TestBit(kIsBayesian))
|
1083 |
- return (eff - Bayesian(total,passed,fConfLevel,fBeta_alpha,fBeta_beta,false));
|
1084 |
+ return (eff - Bayesian(total,passed,fConfLevel,alpha,beta,false,TestBit(kShortestInterval)));
|
1085 |
else
|
1086 |
return (eff - fBoundary(total,passed,fConfLevel,false));
|
1087 |
}
|
1088 |
@@ -1987,9 +2238,12 @@
|
1089 |
Int_t passed = (Int_t)fPassedHistogram->GetBinContent(bin);
|
1090 |
|
1091 |
Double_t eff = GetEfficiency(bin);
|
1092 |
+ // parameters for the beta prior distribution
|
1093 |
+ Double_t alpha = TestBit(kUseBinPrior) ? GetBetaAlpha(bin) : GetBetaAlpha();
|
1094 |
+ Double_t beta = TestBit(kUseBinPrior) ? GetBetaBeta(bin) : GetBetaBeta();
|
1095 |
|
1096 |
if(TestBit(kIsBayesian))
|
1097 |
- return (Bayesian(total,passed,fConfLevel,fBeta_alpha,fBeta_beta,true) - eff);
|
1098 |
+ return (Bayesian(total,passed,fConfLevel,alpha,beta,true,TestBit(kShortestInterval)) - eff);
|
1099 |
else
|
1100 |
return fBoundary(total,passed,fConfLevel,true) - eff;
|
1101 |
}
|
1102 |
@@ -2057,6 +2311,7 @@
|
1103 |
//End_Latex
|
1104 |
|
1105 |
Double_t alpha = (1.0 - level)/2;
|
1106 |
+ if (total == 0) return (bUpper) ? 1 : 0;
|
1107 |
Double_t average = ((Double_t)passed) / total;
|
1108 |
Double_t sigma = std::sqrt(average * (1 - average) / total);
|
1109 |
Double_t delta = ROOT::Math::normal_quantile(1 - alpha,sigma);
|
1110 |
@@ -2082,6 +2337,26 @@
|
1111 |
//weight of rhs = Begin_Latex w_{2} End_Latex
|
1112 |
//Begin_Latex w_{new} = \frac{w_{1} \times w_{2}}{w_{1} + w_{2}}End_Latex
|
1113 |
|
1114 |
+
|
1115 |
+ if (fTotalHistogram == 0 && fPassedHistogram == 0) {
|
1116 |
+ // efficiency is empty just copy it over
|
1117 |
+ *this = rhs;
|
1118 |
+ return *this;
|
1119 |
+ }
|
1120 |
+ else if (fTotalHistogram == 0 || fPassedHistogram == 0) {
|
1121 |
+ Fatal("operator+=","Adding to a non consistent TEfficiency object which has not a total or a passed histogram ");
|
1122 |
+ return *this;
|
1123 |
+ }
|
1124 |
+
|
1125 |
+ if (rhs.fTotalHistogram == 0 && rhs.fTotalHistogram == 0 ) {
|
1126 |
+ Warning("operator+=","no operation: adding an empty object");
|
1127 |
+ return *this;
|
1128 |
+ }
|
1129 |
+ else if (rhs.fTotalHistogram == 0 || rhs.fTotalHistogram == 0 ) {
|
1130 |
+ Fatal("operator+=","Adding a non consistent TEfficiency object which has not a total or a passed histogram ");
|
1131 |
+ return *this;
|
1132 |
+ }
|
1133 |
+
|
1134 |
fTotalHistogram->ResetBit(TH1::kIsAverage);
|
1135 |
fPassedHistogram->ResetBit(TH1::kIsAverage);
|
1136 |
|
1137 |
@@ -2148,6 +2423,18 @@
|
1138 |
//paints this TEfficiency object
|
1139 |
//
|
1140 |
//For details on the possible option see Draw(Option_t*)
|
1141 |
+ //
|
1142 |
+ // Note for 1D classes
|
1143 |
+ // In 1D the TEfficiency uses a TGraphAsymmErrors for drawing
|
1144 |
+ // The TGraph is created only the first time Paint is used. The user can manipulate the
|
1145 |
+ // TGraph via the method TEfficiency::GetPaintedGraph()
|
1146 |
+ // The TGraph creates behing an histogram for the axis. The histogram is created also only the first time.
|
1147 |
+ // If the axis needs to be updated because in the meantime the class changed use this trick
|
1148 |
+ // which will trigger a re-calculation of the axis of the graph
|
1149 |
+ // TEfficiency::GetPaintedGraph()->Set(0)
|
1150 |
+ //
|
1151 |
+
|
1152 |
+
|
1153 |
|
1154 |
if(!gPad)
|
1155 |
return;
|
1156 |
@@ -2155,6 +2442,9 @@
|
1157 |
TString option = opt;
|
1158 |
option.ToLower();
|
1159 |
|
1160 |
+ Bool_t plot0Bins = false;
|
1161 |
+ if (option.Contains("e0") ) plot0Bins = true;
|
1162 |
+
|
1163 |
//use TGraphAsymmErrors for painting
|
1164 |
if(GetDimension() == 1) {
|
1165 |
Int_t npoints = fTotalHistogram->GetNbinsX();
|
1166 |
@@ -2162,25 +2452,61 @@
|
1167 |
fPaintGraph = new TGraphAsymmErrors(npoints);
|
1168 |
fPaintGraph->SetName("eff_graph");
|
1169 |
}
|
1170 |
- //refresh title before painting
|
1171 |
- fPaintGraph->SetTitle(GetTitle());
|
1172 |
|
1173 |
//errors for points
|
1174 |
- Double_t xlow,xup,ylow,yup;
|
1175 |
- //point i corresponds to bin i+1 in histogram
|
1176 |
- for(Int_t i = 0; i < npoints; ++i) {
|
1177 |
- fPaintGraph->SetPoint(i,fTotalHistogram->GetBinCenter(i+1),GetEfficiency(i+1));
|
1178 |
+ Double_t x,y,xlow,xup,ylow,yup;
|
1179 |
+ //point i corresponds to bin i+1 in histogram
|
1180 |
+ // point j is point graph index
|
1181 |
+ // LM: cannot use TGraph::SetPoint because it deletes the underlying
|
1182 |
+ // histogram each time (see TGraph::SetPoint)
|
1183 |
+ // so use it only when extra points are added to the graph
|
1184 |
+ Int_t j = 0;
|
1185 |
+ double * px = fPaintGraph->GetX();
|
1186 |
+ double * py = fPaintGraph->GetY();
|
1187 |
+ double * exl = fPaintGraph->GetEXlow();
|
1188 |
+ double * exh = fPaintGraph->GetEXhigh();
|
1189 |
+ double * eyl = fPaintGraph->GetEYlow();
|
1190 |
+ double * eyh = fPaintGraph->GetEYhigh();
|
1191 |
+ for (Int_t i = 0; i < npoints; ++i) {
|
1192 |
+ if (!plot0Bins && fTotalHistogram->GetBinContent(i+1) == 0 ) continue;
|
1193 |
+ x = fTotalHistogram->GetBinCenter(i+1);
|
1194 |
+ y = GetEfficiency(i+1);
|
1195 |
xlow = fTotalHistogram->GetBinCenter(i+1) - fTotalHistogram->GetBinLowEdge(i+1);
|
1196 |
xup = fTotalHistogram->GetBinWidth(i+1) - xlow;
|
1197 |
ylow = GetEfficiencyErrorLow(i+1);
|
1198 |
yup = GetEfficiencyErrorUp(i+1);
|
1199 |
- fPaintGraph->SetPointError(i,xlow,xup,ylow,yup);
|
1200 |
- }
|
1201 |
+ // in the case the graph already existed and extra points have been added
|
1202 |
+ if (j >= fPaintGraph->GetN() ) {
|
1203 |
+ fPaintGraph->SetPoint(j,x,y);
|
1204 |
+ fPaintGraph->SetPointError(j,xlow,xup,ylow,yup);
|
1205 |
+ }
|
1206 |
+ else {
|
1207 |
+ px[j] = x;
|
1208 |
+ py[j] = y;
|
1209 |
+ exl[j] = xlow;
|
1210 |
+ exh[j] = xup;
|
1211 |
+ eyl[j] = ylow;
|
1212 |
+ eyh[j] = yup;
|
1213 |
+ }
|
1214 |
+ j++;
|
1215 |
+ }
|
1216 |
+
|
1217 |
+ // tell the graph the effective number of points
|
1218 |
+ fPaintGraph->Set(j);
|
1219 |
+ //refresh title before painting if changed
|
1220 |
+ TString oldTitle = fPaintGraph->GetTitle();
|
1221 |
+ TString newTitle = GetTitle();
|
1222 |
+ if (oldTitle != newTitle )
|
1223 |
+ fPaintGraph->SetTitle(newTitle);
|
1224 |
|
1225 |
//copying style information
|
1226 |
TAttLine::Copy(*fPaintGraph);
|
1227 |
TAttFill::Copy(*fPaintGraph);
|
1228 |
TAttMarker::Copy(*fPaintGraph);
|
1229 |
+
|
1230 |
+ // this method forces the graph to compute correctly the axis
|
1231 |
+ // according to the given points
|
1232 |
+ fPaintGraph->GetHistogram();
|
1233 |
|
1234 |
//paint graph
|
1235 |
fPaintGraph->Paint(option.Data());
|
1236 |
@@ -2206,9 +2532,24 @@
|
1237 |
if(GetDimension() == 2) {
|
1238 |
Int_t nbinsx = fTotalHistogram->GetNbinsX();
|
1239 |
Int_t nbinsy = fTotalHistogram->GetNbinsY();
|
1240 |
+ TAxis * xaxis = fTotalHistogram->GetXaxis();
|
1241 |
+ TAxis * yaxis = fTotalHistogram->GetYaxis();
|
1242 |
if(!fPaintHisto) {
|
1243 |
- fPaintHisto = new TH2F("eff_histo",GetTitle(),nbinsx,fTotalHistogram->GetXaxis()->GetXbins()->GetArray(),
|
1244 |
- nbinsy,fTotalHistogram->GetYaxis()->GetXbins()->GetArray());
|
1245 |
+ if (xaxis->IsVariableBinSize() && yaxis->IsVariableBinSize() )
|
1246 |
+ fPaintHisto = new TH2F("eff_histo",GetTitle(),nbinsx,xaxis->GetXbins()->GetArray(),
|
1247 |
+ nbinsy,yaxis->GetXbins()->GetArray());
|
1248 |
+ else if (xaxis->IsVariableBinSize() && ! yaxis->IsVariableBinSize() )
|
1249 |
+ fPaintHisto = new TH2F("eff_histo",GetTitle(),nbinsx,xaxis->GetXbins()->GetArray(),
|
1250 |
+ nbinsy,yaxis->GetXmin(), yaxis->GetXmax());
|
1251 |
+ else if (!xaxis->IsVariableBinSize() && yaxis->IsVariableBinSize() )
|
1252 |
+ fPaintHisto = new TH2F("eff_histo",GetTitle(),nbinsx,xaxis->GetXmin(), xaxis->GetXmax(),
|
1253 |
+ nbinsy,yaxis->GetXbins()->GetArray());
|
1254 |
+ else
|
1255 |
+ fPaintHisto = new TH2F("eff_histo",GetTitle(),nbinsx,xaxis->GetXmin(), xaxis->GetXmax(),
|
1256 |
+ nbinsy,yaxis->GetXmin(), yaxis->GetXmax());
|
1257 |
+
|
1258 |
+
|
1259 |
+
|
1260 |
fPaintHisto->SetDirectory(0);
|
1261 |
}
|
1262 |
//refresh title before each painting
|
1263 |
@@ -2426,6 +2767,34 @@
|
1264 |
}
|
1265 |
|
1266 |
//______________________________________________________________________________
|
1267 |
+void TEfficiency::SetBetaBinParameters(Int_t bin, Double_t alpha, Double_t beta)
|
1268 |
+{
|
1269 |
+ //sets different shape parameter Begin_Latex \alpha and \beta End_Latex
|
1270 |
+ // for the prior distribution for each bin. By default the global parameter are used if they are not set
|
1271 |
+ // for the specific bin
|
1272 |
+ //The prior probability of the efficiency is given by the beta distribution:
|
1273 |
+ //Begin_Latex
|
1274 |
+ // f(\varepsilon;\alpha;\beta) = \frac{1}{B(\alpha,\beta)} \varepsilon^{\alpha-1} (1 - \varepsilon)^{\beta-1}
|
1275 |
+ //End_Latex
|
1276 |
+ //
|
1277 |
+ //Note: - both shape parameters have to be positive (i.e. > 0)
|
1278 |
+
|
1279 |
+ if (!fPassedHistogram || !fTotalHistogram) return;
|
1280 |
+ TH1 * h1 = fTotalHistogram;
|
1281 |
+ // doing this I get h1->fN which is available only for a TH1D
|
1282 |
+ UInt_t n = h1->GetBin(h1->GetNbinsX()+1, h1->GetNbinsY()+1, h1->GetNbinsZ()+1 ) + 1;
|
1283 |
+
|
1284 |
+ // in case vector is not created do with defult alpha, beta params
|
1285 |
+ if (fBeta_bin_params.size() != n )
|
1286 |
+ fBeta_bin_params = std::vector<std::pair<Double_t, Double_t> >(n, std::make_pair(fBeta_alpha, fBeta_beta) );
|
1287 |
+
|
1288 |
+ // vector contains also values for under/overflows
|
1289 |
+ fBeta_bin_params[bin] = std::make_pair(alpha,beta);
|
1290 |
+ SetBit(kUseBinPrior,true);
|
1291 |
+
|
1292 |
+}
|
1293 |
+
|
1294 |
+//______________________________________________________________________________
|
1295 |
void TEfficiency::SetConfidenceLevel(Double_t level)
|
1296 |
{
|
1297 |
//sets the confidence level (0 < level < 1)
|
1298 |
@@ -2542,7 +2911,7 @@
|
1299 |
}
|
1300 |
|
1301 |
//______________________________________________________________________________
|
1302 |
-void TEfficiency::SetStatisticOption(Int_t option)
|
1303 |
+void TEfficiency::SetStatisticOption(EStatOption option)
|
1304 |
{
|
1305 |
//sets the statistic option which affects the calculation of the confidence interval
|
1306 |
//
|
1307 |
@@ -2559,13 +2928,16 @@
|
1308 |
//- kFAC (=3) : using the Agresti-Coull interval
|
1309 |
// sets kIsBayesian = false
|
1310 |
// see also AgrestiCoull
|
1311 |
- //- kBJeffrey (=4) : using the Jeffrey interval
|
1312 |
+ //- kFFC (=4) : using the Feldman-Cousins frequentist method
|
1313 |
+ // sets kIsBayesian = false
|
1314 |
+ // see also FeldmanCousins
|
1315 |
+ //- kBJeffrey (=5) : using the Jeffrey interval
|
1316 |
// sets kIsBayesian = true, fBeta_alpha = 0.5 and fBeta_beta = 0.5
|
1317 |
// see also Bayesian
|
1318 |
- //- kBUniform (=5) : using a uniform prior
|
1319 |
+ //- kBUniform (=6) : using a uniform prior
|
1320 |
// sets kIsBayesian = true, fBeta_alpha = 1 and fBeta_beta = 1
|
1321 |
// see also Bayesian
|
1322 |
- //- kBBayesian (=6) : using a custom prior defined by fBeta_alpha and fBeta_beta
|
1323 |
+ //- kBBayesian (=7) : using a custom prior defined by fBeta_alpha and fBeta_beta
|
1324 |
// sets kIsBayesian = true
|
1325 |
// see also Bayesian
|
1326 |
|
1327 |
@@ -2589,15 +2961,21 @@
|
1328 |
fBoundary = &AgrestiCoull;
|
1329 |
SetBit(kIsBayesian,false);
|
1330 |
break;
|
1331 |
+ case kFFC:
|
1332 |
+ fBoundary = &FeldmanCousins;
|
1333 |
+ SetBit(kIsBayesian,false);
|
1334 |
+ break;
|
1335 |
case kBJeffrey:
|
1336 |
fBeta_alpha = 0.5;
|
1337 |
fBeta_beta = 0.5;
|
1338 |
SetBit(kIsBayesian,true);
|
1339 |
+ SetBit(kUseBinPrior,false);
|
1340 |
break;
|
1341 |
case kBUniform:
|
1342 |
fBeta_alpha = 1;
|
1343 |
fBeta_beta = 1;
|
1344 |
SetBit(kIsBayesian,true);
|
1345 |
+ SetBit(kUseBinPrior,false);
|
1346 |
break;
|
1347 |
case kBBayesian:
|
1348 |
SetBit(kIsBayesian,true);
|
1349 |
@@ -2744,6 +3122,7 @@
|
1350 |
//End_Latex
|
1351 |
|
1352 |
Double_t alpha = (1.0 - level)/2;
|
1353 |
+ if (total == 0) return (bUpper) ? 1 : 0;
|
1354 |
Double_t average = ((Double_t)passed) / total;
|
1355 |
Double_t kappa = ROOT::Math::normal_quantile(1 - alpha,1);
|
1356 |
|
1357 |
diff -Naur orig.root/hist/hist/src/TEfficiencyHelper.h root/hist/hist/src/TEfficiencyHelper.h
|
1358 |
--- orig.root/hist/hist/src/TEfficiencyHelper.h 1970-01-01 01:00:00.000000000 +0100
|
1359 |
+++ root/hist/hist/src/TEfficiencyHelper.h 2011-02-09 13:32:42.000000000 +0100
|
1360 |
@@ -0,0 +1,191 @@
|
1361 |
+// @(#)root/mathcore:$Id: TEfficiencyHelper.h 36767 2010-11-19 10:22:55Z moneta $
|
1362 |
+// Author: L. Moneta Nov 2010
|
1363 |
+
|
1364 |
+/**********************************************************************
|
1365 |
+ * *
|
1366 |
+ * Copyright (c) 2010 LCG ROOT Math Team, CERN/PH-SFT *
|
1367 |
+ * *
|
1368 |
+ * *
|
1369 |
+ **********************************************************************/
|
1370 |
+// helper class for binomial Neyman intervals
|
1371 |
+// author Jordan Tucker
|
1372 |
+// integration in CMSSW: Luca Lista
|
1373 |
+// modified and integrated in ROOT: Lorenzo Moneta
|
1374 |
+
|
1375 |
+
|
1376 |
+#ifndef TEFFiciencyHelper_h
|
1377 |
+#define TEFFiciencyHelper_h
|
1378 |
+
|
1379 |
+#include <algorithm>
|
1380 |
+#include <cmath>
|
1381 |
+#include <vector>
|
1382 |
+
|
1383 |
+#include "Math/PdfFuncMathCore.h"
|
1384 |
+
|
1385 |
+
|
1386 |
+// Helper class impelementing the
|
1387 |
+// binomial probability and the likelihood ratio
|
1388 |
+// used for ordering the interval in the FeldmanCousins interval class
|
1389 |
+class BinomialProbHelper {
|
1390 |
+public:
|
1391 |
+ BinomialProbHelper(double rho, int x, int n)
|
1392 |
+ : fRho(rho), fX(x), fN(n),
|
1393 |
+ fRho_hat(double(x)/n),
|
1394 |
+ fProb(ROOT::Math::binomial_pdf(x, rho, n)) {
|
1395 |
+ // Cache the likelihood ratio L(\rho)/L(\hat{\rho}), too.
|
1396 |
+ if (x == 0)
|
1397 |
+ fLRatio = pow(1 - rho, n);
|
1398 |
+ else if (x == n)
|
1399 |
+ fLRatio = pow(rho, n);
|
1400 |
+ else
|
1401 |
+ fLRatio = pow(rho/fRho_hat, x) * pow((1 - rho)/(1 - fRho_hat), n - x);
|
1402 |
+ }
|
1403 |
+
|
1404 |
+ double Rho () const { return fRho; };
|
1405 |
+ int X () const { return fX; };
|
1406 |
+ int N () const { return fN; };
|
1407 |
+ double Prob () const { return fProb; };
|
1408 |
+ double LRatio() const { return fLRatio; };
|
1409 |
+
|
1410 |
+private:
|
1411 |
+ double fRho;
|
1412 |
+ int fX;
|
1413 |
+ int fN;
|
1414 |
+ double fRho_hat;
|
1415 |
+ double fProb;
|
1416 |
+ double fLRatio;
|
1417 |
+};
|
1418 |
+
|
1419 |
+
|
1420 |
+
|
1421 |
+// Implement noncentral binomial confidence intervals using the Neyman
|
1422 |
+// construction. The Sorter class gives the ordering of points,
|
1423 |
+// i.e. it must be a functor implementing a greater-than relationship
|
1424 |
+// between two prob_helper instances. See feldman_cousins for an
|
1425 |
+// example.
|
1426 |
+template <typename Sorter>
|
1427 |
+class BinomialNeymanInterval {
|
1428 |
+public:
|
1429 |
+
|
1430 |
+ BinomialNeymanInterval() :
|
1431 |
+ fLower(0),
|
1432 |
+ fUpper(1),
|
1433 |
+ fAlpha(0)
|
1434 |
+ {}
|
1435 |
+
|
1436 |
+ void Init(double alpha) {
|
1437 |
+ fAlpha = alpha;
|
1438 |
+ }
|
1439 |
+
|
1440 |
+ // Given a true value of rho and ntot trials, calculate the
|
1441 |
+ // acceptance set [x_l, x_r] for use in a Neyman construction.
|
1442 |
+ bool Find_rho_set(const double rho, const int ntot, int& x_l, int& x_r) const {
|
1443 |
+ // Get the binomial probabilities for every x = 0..n, and sort them
|
1444 |
+ // in decreasing order, determined by the Sorter class.
|
1445 |
+ std::vector<BinomialProbHelper> probs;
|
1446 |
+ for (int i = 0; i <= ntot; ++i)
|
1447 |
+ probs.push_back(BinomialProbHelper(rho, i, ntot));
|
1448 |
+ std::sort(probs.begin(), probs.end(), fSorter);
|
1449 |
+
|
1450 |
+ // Add up the probabilities until the total is 1 - alpha or
|
1451 |
+ // bigger, adding the biggest point first, then the next biggest,
|
1452 |
+ // etc. "Biggest" is given by the Sorter class and is taken care
|
1453 |
+ // of by the sort above. JMTBAD need to find equal probs and use
|
1454 |
+ // the sorter to differentiate between them.
|
1455 |
+ const double target = 1 - fAlpha;
|
1456 |
+ // An invalid interval.
|
1457 |
+ x_l = ntot;
|
1458 |
+ x_r = 0;
|
1459 |
+ double sum = 0;
|
1460 |
+ for (int i = 0; i <= ntot && sum < target; ++i) {
|
1461 |
+ sum += probs[i].Prob();
|
1462 |
+ const int& x = probs[i].X();
|
1463 |
+ if (x < x_l) x_l = x;
|
1464 |
+ if (x > x_r) x_r = x;
|
1465 |
+ }
|
1466 |
+
|
1467 |
+ return x_l <= x_r;
|
1468 |
+ }
|
1469 |
+
|
1470 |
+ // Construct nrho acceptance sets in rho = [0,1] given ntot trials
|
1471 |
+ // and put the results in already-allocated x_l and x_r.
|
1472 |
+ bool Neyman(const int ntot, const int nrho, double* rho, double* x_l, double* x_r) {
|
1473 |
+ int xL, xR;
|
1474 |
+ for (int i = 0; i < nrho; ++i) {
|
1475 |
+ rho[i] = double(i)/nrho;
|
1476 |
+ Find_rho_set(rho[i], ntot, xL, xR);
|
1477 |
+ x_l[i] = xL;
|
1478 |
+ x_r[i] = xR;
|
1479 |
+ }
|
1480 |
+ return true;
|
1481 |
+ }
|
1482 |
+
|
1483 |
+ // Given X successes and n trials, calculate the interval using the
|
1484 |
+ // rho acceptance sets implemented above.
|
1485 |
+ void Calculate(const double X, const double n) {
|
1486 |
+ Set(0, 1);
|
1487 |
+
|
1488 |
+ const double tol = 1e-9;
|
1489 |
+ double rho_min, rho_max, rho;
|
1490 |
+ int x_l, x_r;
|
1491 |
+
|
1492 |
+ // Binary search for the smallest rho whose acceptance set has right
|
1493 |
+ // endpoint X; this is the lower endpoint of the rho interval.
|
1494 |
+ rho_min = 0; rho_max = 1;
|
1495 |
+ while (std::abs(rho_max - rho_min) > tol) {
|
1496 |
+ rho = (rho_min + rho_max)/2;
|
1497 |
+ Find_rho_set(rho, int(n), x_l, x_r);
|
1498 |
+ if (x_r < X)
|
1499 |
+ rho_min = rho;
|
1500 |
+ else
|
1501 |
+ rho_max = rho;
|
1502 |
+ }
|
1503 |
+ fLower = rho;
|
1504 |
+
|
1505 |
+ // Binary search for the largest rho whose acceptance set has left
|
1506 |
+ // endpoint X; this is the upper endpoint of the rho interval.
|
1507 |
+ rho_min = 0; rho_max = 1;
|
1508 |
+ while (std::abs(rho_max - rho_min) > tol) {
|
1509 |
+ rho = (rho_min + rho_max)/2;
|
1510 |
+ Find_rho_set(rho, int(n), x_l, x_r);
|
1511 |
+ if (x_l > X)
|
1512 |
+ rho_max = rho;
|
1513 |
+ else
|
1514 |
+ rho_min = rho;
|
1515 |
+ }
|
1516 |
+ fUpper = rho;
|
1517 |
+ }
|
1518 |
+
|
1519 |
+ double Lower() const { return fLower; }
|
1520 |
+ double Upper() const { return fUpper; }
|
1521 |
+
|
1522 |
+private:
|
1523 |
+ Sorter fSorter;
|
1524 |
+
|
1525 |
+ double fLower;
|
1526 |
+ double fUpper;
|
1527 |
+
|
1528 |
+ double fAlpha;
|
1529 |
+
|
1530 |
+ void Set(double l, double u) { fLower = l; fUpper = u; }
|
1531 |
+
|
1532 |
+};
|
1533 |
+
|
1534 |
+
|
1535 |
+
|
1536 |
+
|
1537 |
+struct FeldmanCousinsSorter {
|
1538 |
+ bool operator()(const BinomialProbHelper& l, const BinomialProbHelper& r) const {
|
1539 |
+ return l.LRatio() > r.LRatio();
|
1540 |
+ }
|
1541 |
+};
|
1542 |
+
|
1543 |
+class FeldmanCousinsBinomialInterval : public BinomialNeymanInterval<FeldmanCousinsSorter> {
|
1544 |
+ //const char* name() const { return "Feldman-Cousins"; }
|
1545 |
+
|
1546 |
+};
|
1547 |
+
|
1548 |
+
|
1549 |
+
|
1550 |
+
|
1551 |
+#endif
|
1552 |
diff -Naur orig.root/hist/hist/src/TGraphAsymmErrors.cxx root/hist/hist/src/TGraphAsymmErrors.cxx
|
1553 |
--- orig.root/hist/hist/src/TGraphAsymmErrors.cxx 2010-11-05 15:46:35.000000000 +0100
|
1554 |
+++ root/hist/hist/src/TGraphAsymmErrors.cxx 2011-02-09 13:32:06.000000000 +0100
|
1555 |
@@ -330,10 +330,10 @@
|
1556 |
{
|
1557 |
//This function is only kept for backward compatibility.
|
1558 |
//You should rather use the Divide method.
|
1559 |
- //It calls Divide(pass,total,"cl=0.683 b(1,1)") which is equivalent to the
|
1560 |
+ //It calls Divide(pass,total,"cl=0.683 b(1,1) mode") which is equivalent to the
|
1561 |
//former BayesDivide method.
|
1562 |
|
1563 |
- Divide(pass,total,"cl=0.683 b(1,1)");
|
1564 |
+ Divide(pass,total,"cl=0.683 b(1,1) mode");
|
1565 |
}
|
1566 |
|
1567 |
//______________________________________________________________________________
|
1568 |
@@ -370,8 +370,14 @@
|
1569 |
// - w : Wilson interval (see TEfficiency::Wilson)
|
1570 |
// - n : normal approximation propagation (see TEfficiency::Normal)
|
1571 |
// - ac : Agresti-Coull interval (see TEfficiency::AgrestiCoull)
|
1572 |
+ // - fc : Feldman-Cousins interval (see TEfficiency::FeldmanCousinsInterval)
|
1573 |
// - b(a,b): bayesian interval using a prior probability ~Beta(a,b); a,b > 0
|
1574 |
// (see TEfficiency::Bayesian)
|
1575 |
+ // - mode : use mode of posterior for Bayesian interval (default is mean)
|
1576 |
+ // - shortest: use shortest interval (done by default if mode is set)
|
1577 |
+ // - central: use central interval (done by default if mode is NOT set)
|
1578 |
+ // - e0 : plot (in Bayesian case) efficiency and interval for bins where total=0
|
1579 |
+ // (default is to skip them)
|
1580 |
//
|
1581 |
// Note:
|
1582 |
// Unfortunately there is no straightforward approach for determining a confidence
|
1583 |
@@ -438,6 +444,7 @@
|
1584 |
//confidence level
|
1585 |
if(option.Contains("cl=")) {
|
1586 |
Double_t level = -1;
|
1587 |
+ // coverity [secure_coding : FALSE]
|
1588 |
sscanf(strstr(option.Data(),"cl="),"cl=%lf",&level);
|
1589 |
if((level > 0) && (level < 1))
|
1590 |
conf = level;
|
1591 |
@@ -469,6 +476,11 @@
|
1592 |
option.ReplaceAll("ac","");
|
1593 |
pBound = &TEfficiency::AgrestiCoull;
|
1594 |
}
|
1595 |
+ // Feldman-Cousins interval
|
1596 |
+ if(option.Contains("fc")) {
|
1597 |
+ option.ReplaceAll("fc","");
|
1598 |
+ pBound = &TEfficiency::FeldmanCousins;
|
1599 |
+ }
|
1600 |
|
1601 |
//bayesian with prior
|
1602 |
if(option.Contains("b(")) {
|
1603 |
@@ -486,6 +498,25 @@
|
1604 |
option.ReplaceAll("b(","");
|
1605 |
bIsBayesian = true;
|
1606 |
}
|
1607 |
+
|
1608 |
+ // use posterior mode
|
1609 |
+ Bool_t usePosteriorMode = false;
|
1610 |
+ if(bIsBayesian && option.Contains("mode") ) {
|
1611 |
+ usePosteriorMode = true;
|
1612 |
+ option.ReplaceAll("mode","");
|
1613 |
+ }
|
1614 |
+
|
1615 |
+ Bool_t plot0Bins = false;
|
1616 |
+ if(option.Contains("e0") ) {
|
1617 |
+ plot0Bins = true;
|
1618 |
+ option.ReplaceAll("e0","");
|
1619 |
+ }
|
1620 |
+
|
1621 |
+ Bool_t useShortestInterval = false;
|
1622 |
+ if (bIsBayesian && ( option.Contains("sh") || (usePosteriorMode && !option.Contains("cen") ) ) ) {
|
1623 |
+ useShortestInterval = true;
|
1624 |
+ }
|
1625 |
+
|
1626 |
|
1627 |
//Set the graph to have a number of points equal to the number of histogram
|
1628 |
//bins
|
1629 |
@@ -521,21 +552,30 @@
|
1630 |
p = int(pass->GetBinContent(b) + 0.5);
|
1631 |
}
|
1632 |
|
1633 |
+ if (!t && !plot0Bins) continue; // skip bins with total = 0
|
1634 |
+ eff = 0; // default value when total =0;
|
1635 |
+
|
1636 |
//using bayesian statistics
|
1637 |
if(bIsBayesian) {
|
1638 |
- if(t + alpha + beta)
|
1639 |
- eff = (p + alpha)/(t + alpha + beta);
|
1640 |
- else
|
1641 |
- continue;
|
1642 |
-
|
1643 |
- low = TEfficiency::Bayesian(t,p,conf,alpha,beta,false);
|
1644 |
- upper = TEfficiency::Bayesian(t,p,conf,alpha,beta,true);
|
1645 |
+ double aa = double(p) + alpha;
|
1646 |
+ double bb = double(t-p) + beta;
|
1647 |
+ if (usePosteriorMode)
|
1648 |
+ eff = TEfficiency::BetaMode(aa,bb);
|
1649 |
+ else
|
1650 |
+ eff = TEfficiency::BetaMean(aa,bb);
|
1651 |
+
|
1652 |
+ if (useShortestInterval) {
|
1653 |
+ TEfficiency::BetaShortestInterval(conf,aa,bb,low,upper);
|
1654 |
+ }
|
1655 |
+ else {
|
1656 |
+ low = TEfficiency::BetaCentralInterval(conf,aa,bb,false);
|
1657 |
+ upper = TEfficiency::BetaCentralInterval(conf,aa,bb,true);
|
1658 |
+ }
|
1659 |
}
|
1660 |
+ // case of non-bayesian statistics
|
1661 |
else {
|
1662 |
if(t)
|
1663 |
eff = ((Double_t)p)/t;
|
1664 |
- else
|
1665 |
- continue;
|
1666 |
|
1667 |
low = pBound(t,p,conf,false);
|
1668 |
upper = pBound(t,p,conf,true);
|