HighLAND
PileUpSystematics.cxx
1 #include "PileUpSystematics.hxx"
2 #include "ND280AnalysisUtils.hxx"
3 
4 /*
5 \\! [PileUpSystematics_info]
6 \htmlonly
7 There are a number of categories of possible pile up, but only the
8 effect of sand muons is significant for the \(\nu_\mu\) analyses. For vertex
9 selection in FGD1, events with TPC1 activity are rejected by the
10 external veto cut because, in most cases, TPC1 activity is due to tracks
11 from interactions upstream of the detector (sand muons) or outside the
12 tracker fiducial volume. A quick study of MC events rejected due to the
13 TPC1 veto indicates that the majority are not true CC interactions.
14 Since sand muons are not included in the standard NEUT simulation, the
15 Monte Carlo does not include the effect of events that are rejected due
16 to coincidence with a~sand muon and a correction must be made.
17 
18 <br/>
19 <br/>
20 
21 For vertex selection in FGD2, an analagous cut is made to veto events
22 with TPC2 activity. Therefore, the pileup correction and systematic is
23 evaluated in an identical manner for TPC1 and TPC2, with TPC1 pileup
24 applicable to selections with FGD1 vertices and TPC2 pileup applicable
25 to selections with FGD2 vertices.
26 
27 <br/>
28 <br/>
29 
30 For vertex selection in FGD2, an analagous cut is made to veto events
31 with TPC2 activity. Therefore, the pileup correction and systematic is
32 evaluated in an identical manner for TPC1 and TPC2, with TPC1 pileup
33 applicable to selections with FGD1 vertices and TPC2 pileup applicable
34 to selections with FGD2 vertices.
35 
36 <br/>
37 <br/>
38 
39 Evaluation of this correction is made in an identical procedure to the
40 2013 analysis (as described in T2K-TN-152~\cite{TN-152}). The correction
41 is evaluated for each data set (Run~1, 2, 3b, 3c, 4, 5ab, 5c) separately
42 for P0D water-in and water-out using the relevant MC samples for each
43 period. (Note sand muons simulations are only available for water-out).
44 The procedure is to count the number of TPC1 (TPC2) events \(NTPC_s\) in
45 a separate sand muon Monte Carlo sample which relates to a fixed PoT,
46 \(POT_s\). The data intensity \(I_{d} = POT/nSpills\), is then derived from
47 the data sample and used to calculate the effective number of spills and
48 hence the sand TPC1 (TPC2) events/bunch for that data set, where \(N_b\) is
49 the number of bunches per spill (6 for Run 1, 8 otherwise). Flux tuning
50 is applied using the 13a tuning from the
51 nd5_tuned13av1.0_13anom_runX files.
52 
53 Since the pile up is not taken into account in the Monte Carlo, the
54 number of selected events should be reduced in the Monte Carlo. To do
55 so, we re-weight the Monte Carlo with the following reduction factor,
56 
57 \[
58 w_{c} = (1-C_{s})
59 \]
60 
61 where \(C_{s}\) is the correction to be applied and defined as
62 
63 \[
64 C_{s} = \frac{NTPC_s\times I_{d}}{POT_s\times N_b}
65 \]
66 
67 
68 Since there is an uncertainty in the sand muon simulation of 10\% (for
69 neutrinos, 30\% is taken for antineutrinos) and there are possible
70 differences between the data and Monte Carlo arising from differences in
71 the actual and simulated beam intensity and different material
72 descriptions for side-band materials, there is a~systematic uncertainty
73 on this pile-up contribution. The uncertainty is evaluated through
74 a~data-MC comparison of the number of TPC1 (TPC2) events/bunch, with the
75 sand muon contribution added to the MC. Explicitly, the data-MC
76 difference is
77 
78 \[
79 \Delta_{\rm data:MC} = C_d - (C_{MC}+C_s)
80 \]
81 
82 
83 where
84 
85 \[
86 C_{d}=\frac{NTPC_d}{nSpills\times N_b}
87 \]
88 \[
89 C_{MC}=\frac{NTPC_{MC}\times I_{d}}{POT_{MC}\times N_b} .
90 \]
91 
92 The same procedure is applied to evaluate the number of
93 TPC1 (TPC2) events per bunch for data and MC (MC is also weighted by the
94 data intensity) and the \(\Delta_{\rm data:MC}\) value can then be taken
95 as the uncertainty. However, the sand muon uncertainty means that there
96 is a 10\% uncertainty (30\% for antineutrinos) in the correction factor,
97 but combining the two uncertainties is double counting so the procedure
98 is to take either \(\Delta_{\rm data:MC}\) or \(0.1\times C_{s}\)
99 (\(0.3\times C_{s}\) for anti-neutrinos), whichever is larger, as
100 \(\sigma_{\rm pileup}\). This systematic error is then propagated as the
101 normalization error using Eq. \endhtmlonly \ref eq_normsyst \htmlonly
102 
103 
104 \[
105 w_{pileup} = 1+\alpha \cdot \sigma_{\rm pileup}
106 \]
107 
108 where \(\alpha\) is the random variation.
109 
110 
111 \endhtmlonly
112 \\! [PileUpSystematics_info]
113 */
114 
115 
116 //********************************************************************
117 PileUpSystematics::PileUpSystematics():EventWeightBase(1){
118 //********************************************************************
119 
120  _fgd1 = new BinnedParams("PileUpFGD1",BinnedParams::k1D_SYMMETRIC, versionUtils::Extension());
121  _fgd2 = new BinnedParams("PileUpFGD2",BinnedParams::k1D_SYMMETRIC, versionUtils::Extension());
122  SetNParameters(_fgd1->GetNBins()+_fgd2->GetNBins());
123 }
124 
125 //********************************************************************
126 Weight_h PileUpSystematics::ComputeWeight(const ToyExperiment& toy, const AnaEventC& eventBB, const ToyBoxB& box, const SelectionBase& sel){
127 //********************************************************************
128 
129  Weight_h eventWeight;
130 
131  const AnaEventB& event = *static_cast<const AnaEventB*>(&eventBB);
132 
133 
134  // Get FV corresponding to the current branch
135  SubDetId_h det = sel.GetDetectorFV(box.SuccessfulBranch);
136 
137  if (det != SubDetId::kFGD1 && det != SubDetId::kFGD2) return eventWeight;
138 
139  // Get the run period (from 0 to 8)
140  Int_t runPeriod = anaUtils::GetRunPeriod(event.EventInfo.Run);
141 
142  // MainTrack should be in FV. This condition is already in box.DetectorFV
143  // if (!anaUtils::InFiducialVolume(box.DetectorFV, box.MainTrack->PositionStart)) return eventWeight;
144 
145  // Get the pileup values for this run period
146  Float_t pileup; // a correction
147  Float_t pileup_error; // a systematic error
148  Int_t index;
149  if (det == SubDetId::kFGD1){
150  if(!_fgd1->GetBinValues(runPeriod, pileup, pileup_error, index)) return eventWeight;
151  }
152  else if(det == SubDetId::kFGD2){
153  if(!_fgd2->GetBinValues(runPeriod, pileup, pileup_error, index)) return eventWeight;
154  }
155  else return eventWeight;
156 
157  // compute the weight (corr + syst)
158  // eventWeight= 1 - pileup - pileup_error * toy.GetToyVariations(_index)->Variations[index]; // TO BE UNCOMMENTED
159  // eventWeight= 1 - (pileup_error * toy.GetToyVariations(_index)->Variations[index])/(1 - pileup); // TO BE REMOVED
160 
161 
162  eventWeight.Systematic = 1 - pileup - pileup_error * toy.GetToyVariations(_index)->Variations[index];
163  eventWeight.Correction = 1 - pileup;
164 
165  return eventWeight;
166 }
167 
Int_t _index
The index of this systematic (needed by SystematicsManager);.
Float_t * Variations
the vector of Variations, one for each of the systematic parameters
Weight_h ComputeWeight(const ToyExperiment &, const AnaEventC &, const ToyBoxB &)
void SetNParameters(int N)
Set the number of systematic parameters associated to this systematic.
int GetRunPeriod(int run, int subrun=-1)
Returns the run period (sequentially: 0,1,2,3,4,5 ...)
Int_t SuccessfulBranch
The branch that is successful for this toy in the selection this ToyBox belongs to.
Definition: ToyBoxB.hxx:46
ToyVariations * GetToyVariations(UInt_t index) const
returns the variations for a given systematic (index)
SubDetId_h GetDetectorFV(Int_t ibranch=0) const
Get the detector in which the Fiducial Volume is defined.