aGrUM  0.20.3
a C++ library for (probabilistic) graphical models
CNMonteCarloSampling_tpl.h
Go to the documentation of this file.
1 /**
2  *
3  * Copyright (c) 2005-2021 by Pierre-Henri WUILLEMIN(@LIP6) & Christophe GONZALES(@AMU)
4  * info_at_agrum_dot_org
5  *
6  * This library is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Lesser General Public License as published by
8  * the Free Software Foundation, either version 3 of the License, or
9  * (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with this library. If not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 
23 #include <agrum/tools/core/exceptions.h>
24 
25 namespace gum {
26  namespace credal {
27 
28  template < typename GUM_SCALAR, class BNInferenceEngine >
29  CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::CNMonteCarloSampling(
30  const CredalNet< GUM_SCALAR >& credalNet) :
32  credalNet) {
33  _infEs_::repetitiveInd_ = false;
34  // __infEs::iterStop_ = 1000;
35  _infEs_::storeVertices_ = false;
36  _infEs_::storeBNOpt_ = false;
37 
38  this->setMaxTime(60);
39  this->enableMaxTime();
40 
41  /// this->setBurnIn ( 1000 );
42  this->setPeriodSize(1000);
43 
44  GUM_CONSTRUCTOR(CNMonteCarloSampling);
45  }
46 
47  template < typename GUM_SCALAR, class BNInferenceEngine >
48  CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::~CNMonteCarloSampling() {
49  GUM_DESTRUCTOR(CNMonteCarloSampling);
50  }
51 
52  template < typename GUM_SCALAR, class BNInferenceEngine >
53  void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::makeInference() {
54  if (_infEs_::repetitiveInd_) {
55  try {
56  this->repetitiveInit_();
57  } catch (InvalidArgument& err) {
58  GUM_SHOWERROR(err);
59  _infEs_::repetitiveInd_ = false;
60  }
61  }
62 
63  // debug
64  /// notOptDelete = 0;
65 
67 
69 
70  // don't put it after burnIn, it could stop with timeout : we want at
71  // least one
72  // burnIn and one periodSize
73  GUM_SCALAR eps = 1.; // to validate testSuite ?
74 
75  /// auto bsize = this->burnIn();
76  auto psize = this->periodSize();
77  /*
78 
79  auto remaining = this->remainingBurnIn();
80 
81  /// this->burnIn() should be 0 therefor the first do ... while should
82  be
83  /// skipped
84  if ( remaining != 0 ) {
85  /// instead of doing the whole burnIn in one pass, we do it period by
86  /// period
87  /// so we can test the timer ( done by continueApproximationScheme )
88  and
89  /// exit
90  /// during burnIn
91  /// no error is thrown if we exit during burnIn ( although one should
92  )
93  do {
94  eps = 0;
95 
96  int iters = int( ( remaining < psize ) ? remaining : psize );
97 
98  #pragma omp parallel for
99 
100  for ( int iter = 0; iter < iters; iter++ ) {
101  _threadInference_();
102  _threadUpdate_();
103  } // end of : parallel periodSize
104 
105  this->updateApproximationScheme( iters );
106 
107  /// this->updateMarginals_(); // fusion threads + update margi
108 
109  /// eps = this->computeEpsilon_(); // also updates oldMargi
110 
111  remaining = this->remainingBurnIn();
112 
113  } while ( ( remaining > 0 ) && this->continueApproximationScheme( eps
114  ) );
115  }
116  */
117 
118  if (this->continueApproximationScheme(eps)) {
119  do {
120  eps = 0;
121 
122 // less overheads with high periodSize
123 #pragma omp parallel for
124 
125  for (int iter = 0; iter < int(psize); iter++) {
128  } // end of : parallel periodSize
129 
130  this->updateApproximationScheme(int(psize));
131 
132  this->updateMarginals_(); // fusion threads + update margi
133 
134  eps = this->computeEpsilon_(); // also updates oldMargi
135 
136  } while (this->continueApproximationScheme(eps));
137  }
138 
139  if (!this->modal_.empty()) { this->expFusion_(); }
140 
141  if (_infEs_::storeBNOpt_) { this->optFusion_(); }
142 
143  if (_infEs_::storeVertices_) { this->verticesFusion_(); }
144 
145  if (!this->modal_.empty()) {
146  this->dynamicExpectations_(); // work with any network
147  }
148  }
149 
150  template < typename GUM_SCALAR, class BNInferenceEngine >
151  inline void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::_threadUpdate_() {
152  int tId = getThreadNumber();
153  // bool keepSample = false;
154 
155  if (this->l_inferenceEngine_[tId]->evidenceProbability() > 0) {
156  const DAG& tDag = this->workingSet_[tId]->dag();
157 
158  for (auto node: tDag.nodes()) {
159  const Potential< GUM_SCALAR >& potential(this->l_inferenceEngine_[tId]->posterior(node));
160  Instantiation ins(potential);
161  std::vector< GUM_SCALAR > vertex;
162 
163  for (ins.setFirst(); !ins.end(); ++ins) {
164  vertex.push_back(potential[ins]);
165  }
166 
167  // true for redundancy elimination of node it credal set
168  // but since global marginals are only updated at the end of each
169  // period of
170  // approximationScheme, it is "useless" ( and expensive ) to check now
171  this->updateThread_(node, vertex, false);
172 
173  } // end of : for all nodes
174  } // end of : if ( p(e) > 0 )
175  }
176 
177  template < typename GUM_SCALAR, class BNInferenceEngine >
178  inline void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::_threadInference_() {
179  int tId = getThreadNumber();
181 
182  this->l_inferenceEngine_[tId]->eraseAllEvidence();
184  this->l_inferenceEngine_[tId]->makeInference();
185  }
186 
187  template < typename GUM_SCALAR, class BNInferenceEngine >
188  void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::_mcInitApproximationScheme_() {
189  // this->setEpsilon ( std::numeric_limits< GUM_SCALAR >::min() );
190  /**
191  * VERIFIER d/dt(e(t+1)-e(t))
192  */
193  this->setEpsilon(0.);
194  this->enableEpsilon(); // to be sure
195 
196  this->disableMinEpsilonRate();
197  this->disableMaxIter();
198 
199  this->initApproximationScheme();
200  }
201 
202  template < typename GUM_SCALAR, class BNInferenceEngine >
203  void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::_mcThreadDataCopy_() {
204  int num_threads;
205 #pragma omp parallel
206  {
207  int this_thread = getThreadNumber();
208 
209 // implicit wait clause (don't put nowait)
210 #pragma omp single
211  {
212  // should we ask for max threads instead ( no differences here in
213  // practice
214  // )
215  num_threads = getNumberOfRunningThreads();
216 
217  this->initThreadsData_(num_threads, _infEs_::storeVertices_, _infEs_::storeBNOpt_);
218  this->l_inferenceEngine_.resize(num_threads, nullptr);
219 
220  // if ( _infEs_::storeBNOpt_ )
221  // this->l_sampledNet_.resize ( num_threads );
222  } // end of : single region
223 
224  // we could put those below in a function in InferenceEngine, but let's
225  // keep
226  // this parallel region instead of breaking it and making another one to
227  // do
228  // the same stuff in 2 places since :
229  // !!! BNInferenceEngine still needs to be initialized here anyway !!!
230 
231  BayesNet< GUM_SCALAR >* thread_bn = new BayesNet< GUM_SCALAR >();
232 #pragma omp critical(Init)
233  {
234  // IBayesNet< GUM_SCALAR > * thread_bn = new IBayesNet< GUM_SCALAR
235  // >();//(this->credalNet_->current_bn());
236  *thread_bn = this->credalNet_->current_bn();
237  }
238  this->workingSet_[this_thread] = thread_bn;
239 
240  this->l_marginalMin_[this_thread] = this->marginalMin_;
241  this->l_marginalMax_[this_thread] = this->marginalMax_;
242  this->l_expectationMin_[this_thread] = this->expectationMin_;
243  this->l_expectationMax_[this_thread] = this->expectationMax_;
244  this->l_modal_[this_thread] = this->modal_;
245 
246  _infEs_::l_clusters_[this_thread].resize(2);
247  _infEs_::l_clusters_[this_thread][0] = _infEs_::t0_;
248  _infEs_::l_clusters_[this_thread][1] = _infEs_::t1_;
249 
250  if (_infEs_::storeVertices_) { this->l_marginalSets_[this_thread] = this->marginalSets_; }
251 
252  List< const Potential< GUM_SCALAR >* >* evi_list
253  = new List< const Potential< GUM_SCALAR >* >();
254  this->workingSetE_[this_thread] = evi_list;
255 
256  // #TODO: the next instruction works only for lazy propagation.
257  // => find a way to remove the second argument
258  BNInferenceEngine* inference_engine
259  = new BNInferenceEngine((this->workingSet_[this_thread]),
260  RelevantPotentialsFinderType::FIND_ALL);
261 
262  this->l_inferenceEngine_[this_thread] = inference_engine;
263 
264  if (_infEs_::storeBNOpt_) {
265  VarMod2BNsMap< GUM_SCALAR >* threadOpt
266  = new VarMod2BNsMap< GUM_SCALAR >(*this->credalNet_);
267  this->l_optimalNet_[this_thread] = threadOpt;
268  }
269  }
270  }
271 
272  template < typename GUM_SCALAR, class BNInferenceEngine >
273  inline void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::_binaryRep_(
274  std::vector< bool >& toFill,
275  const Idx value) const {
276  Idx n = value;
277  auto tfsize = toFill.size();
278 
279  // get bits of choosen_vertex
280  for (decltype(tfsize) i = 0; i < tfsize; i++) {
281  toFill[i] = n & 1;
282  n /= 2;
283  }
284  }
285 
286  template < typename GUM_SCALAR, class BNInferenceEngine >
287  inline void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::_verticesSampling_() {
288  int this_thread = getThreadNumber();
289  IBayesNet< GUM_SCALAR >* working_bn = this->workingSet_[this_thread];
290 
291  const auto cpt = &this->credalNet_->credalNet_currentCpt();
292 
293  using dBN = std::vector< std::vector< std::vector< bool > > >;
294 
295  dBN sample;
296 
297  if (_infEs_::storeBNOpt_) { sample = dBN(this->l_optimalNet_[this_thread]->getSampleDef()); }
298 
299  if (_infEs_::repetitiveInd_) {
300  const auto& t0 = _infEs_::l_clusters_[this_thread][0];
301  const auto& t1 = _infEs_::l_clusters_[this_thread][1];
302 
303  for (const auto& elt: t0) {
304  auto dSize = working_bn->variable(elt.first).domainSize();
305  Potential< GUM_SCALAR >* potential(
306  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
307  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
308 
309  Size pconfs = Size((*cpt)[elt.first].size());
310 
311  for (Size pconf = 0; pconf < pconfs; pconf++) {
312  Size choosen_vertex = rand() % (*cpt)[elt.first][pconf].size();
313 
314  if (_infEs_::storeBNOpt_) { _binaryRep_(sample[elt.first][pconf], choosen_vertex); }
315 
316  for (Size mod = 0; mod < dSize; mod++) {
317  var_cpt[pconf * dSize + mod] = (*cpt)[elt.first][pconf][choosen_vertex][mod];
318  }
319  } // end of : pconf
320 
321  potential->fillWith(var_cpt);
322 
323  Size t0esize = Size(elt.second.size());
324 
325  for (Size pos = 0; pos < t0esize; pos++) {
326  if (_infEs_::storeBNOpt_) { sample[elt.second[pos]] = sample[elt.first]; }
327 
328  Potential< GUM_SCALAR >* potential2(
329  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.second[pos])));
330  potential2->fillWith(var_cpt);
331  }
332  }
333 
334  for (const auto& elt: t1) {
335  auto dSize = working_bn->variable(elt.first).domainSize();
336  Potential< GUM_SCALAR >* potential(
337  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
338  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
339 
340  for (Size pconf = 0; pconf < (*cpt)[elt.first].size(); pconf++) {
341  Idx choosen_vertex = Idx(rand() % (*cpt)[elt.first][pconf].size());
342 
343  if (_infEs_::storeBNOpt_) { _binaryRep_(sample[elt.first][pconf], choosen_vertex); }
344 
345  for (decltype(dSize) mod = 0; mod < dSize; mod++) {
346  var_cpt[pconf * dSize + mod] = (*cpt)[elt.first][pconf][choosen_vertex][mod];
347  }
348  } // end of : pconf
349 
350  potential->fillWith(var_cpt);
351 
352  auto t1esize = elt.second.size();
353 
354  for (decltype(t1esize) pos = 0; pos < t1esize; pos++) {
355  if (_infEs_::storeBNOpt_) { sample[elt.second[pos]] = sample[elt.first]; }
356 
357  Potential< GUM_SCALAR >* potential2(
358  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.second[pos])));
359  potential2->fillWith(var_cpt);
360  }
361  }
362 
363  if (_infEs_::storeBNOpt_) { this->l_optimalNet_[this_thread]->setCurrentSample(sample); }
364  } else {
365  for (auto node: working_bn->nodes()) {
366  auto dSize = working_bn->variable(node).domainSize();
367  Potential< GUM_SCALAR >* potential(
368  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(node)));
369  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
370 
371  auto pConfs = (*cpt)[node].size();
372 
373  for (decltype(pConfs) pconf = 0; pconf < pConfs; pconf++) {
374  Size nVertices = Size((*cpt)[node][pconf].size());
375  Idx choosen_vertex = Idx(rand() % nVertices);
376 
377  if (_infEs_::storeBNOpt_) { _binaryRep_(sample[node][pconf], choosen_vertex); }
378 
379  for (decltype(dSize) mod = 0; mod < dSize; mod++) {
380  var_cpt[pconf * dSize + mod] = (*cpt)[node][pconf][choosen_vertex][mod];
381  }
382  } // end of : pconf
383 
384  potential->fillWith(var_cpt);
385  }
386 
387  if (_infEs_::storeBNOpt_) { this->l_optimalNet_[this_thread]->setCurrentSample(sample); }
388  }
389  }
390 
391  template < typename GUM_SCALAR, class BNInferenceEngine >
392  inline void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::_insertEvidence_() {
393  if (this->evidence_.size() == 0) { return; }
394 
395  int this_thread = getThreadNumber();
396 
397  BNInferenceEngine* inference_engine = this->l_inferenceEngine_[this_thread];
398 
399  IBayesNet< GUM_SCALAR >* working_bn = this->workingSet_[this_thread];
400 
401  List< const Potential< GUM_SCALAR >* >* evi_list = this->workingSetE_[this_thread];
402 
403  if (evi_list->size() > 0) {
404  for (const auto pot: *evi_list)
405  inference_engine->addEvidence(*pot);
406  return;
407  }
408 
409  for (const auto& elt: this->evidence_) {
410  Potential< GUM_SCALAR >* p = new Potential< GUM_SCALAR >;
411  (*p) << working_bn->variable(elt.first);
412 
413  try {
414  p->fillWith(elt.second);
415  } catch (Exception& err) {
416  GUM_SHOWERROR(err);
417  throw(err);
418  }
419 
420  evi_list->insert(p);
421  }
422 
423  if (evi_list->size() > 0) {
424  for (const auto pot: *evi_list)
425  inference_engine->addEvidence(*pot);
426  }
427  }
428 
429  } // namespace credal
430 } // namespace gum
void _threadInference_()
Thread performs an inference using BNInferenceEngine.
INLINE void emplace(Args &&... args)
Definition: set_tpl.h:643
void _mcThreadDataCopy_()
Initialize threads data.
void _mcInitApproximationScheme_()
Initialize approximation Scheme.
CNMonteCarloSampling(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
void makeInference()
Starts the inference.
<agrum/CN/CNMonteCarloSampling.h>
void _verticesSampling_()
Thread samples a IBayesNet from the CredalNet.
void _insertEvidence_()
Insert CredalNet evidence into a thread BNInferenceEngine.
void _threadUpdate_()
Update thread data after a IBayesNet inference.
namespace for all credal networks entities
Definition: LpInterface.cpp:37