aGrUM  0.20.2
a C++ library for (probabilistic) graphical models
CNMonteCarloSampling_tpl.h
Go to the documentation of this file.
1 /**
2  *
3  * Copyright 2005-2020 Pierre-Henri WUILLEMIN(@LIP6) & Christophe GONZALES(@AMU)
4  * info_at_agrum_dot_org
5  *
6  * This library is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Lesser General Public License as published by
8  * the Free Software Foundation, either version 3 of the License, or
9  * (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with this library. If not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 
23 #include <agrum/tools/core/exceptions.h>
24 
25 namespace gum {
26  namespace credal {
27 
28  template < typename GUM_SCALAR, class BNInferenceEngine >
29  CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::CNMonteCarloSampling(
30  const CredalNet< GUM_SCALAR >& credalNet) :
33  infEs__::repetitiveInd_ = false;
34  //__infEs::iterStop_ = 1000;
35  infEs__::storeVertices_ = false;
36  infEs__::storeBNOpt_ = false;
37 
38  this->setMaxTime(60);
39  this->enableMaxTime();
40 
41  /// this->setBurnIn ( 1000 );
42  this->setPeriodSize(1000);
43 
44  GUM_CONSTRUCTOR(CNMonteCarloSampling);
45  }
46 
47  template < typename GUM_SCALAR, class BNInferenceEngine >
48  CNMonteCarloSampling< GUM_SCALAR,
49  BNInferenceEngine >::~CNMonteCarloSampling() {
50  GUM_DESTRUCTOR(CNMonteCarloSampling);
51  }
52 
53  template < typename GUM_SCALAR, class BNInferenceEngine >
54  void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::makeInference() {
55  if (infEs__::repetitiveInd_) {
56  try {
57  this->repetitiveInit_();
58  } catch (InvalidArgument& err) {
59  GUM_SHOWERROR(err);
60  infEs__::repetitiveInd_ = false;
61  }
62  }
63 
64  // debug
65  /// notOptDelete = 0;
66 
68 
70 
71  // don't put it after burnIn, it could stop with timeout : we want at
72  // least one
73  // burnIn and one periodSize
74  GUM_SCALAR eps = 1.; // to validate testSuite ?
75 
76  /// auto bsize = this->burnIn();
77  auto psize = this->periodSize();
78  /*
79 
80  auto remaining = this->remainingBurnIn();
81 
82  /// this->burnIn() should be 0 therefor the first do ... while should
83  be
84  /// skipped
85  if ( remaining != 0 ) {
86  /// instead of doing the whole burnIn in one pass, we do it period by
87  /// period
88  /// so we can test the timer ( done by continueApproximationScheme )
89  and
90  /// exit
91  /// during burnIn
92  /// no error is thrown if we exit during burnIn ( although one should
93  )
94  do {
95  eps = 0;
96 
97  int iters = int( ( remaining < psize ) ? remaining : psize );
98 
99  #pragma omp parallel for
100 
101  for ( int iter = 0; iter < iters; iter++ ) {
102  threadInference__();
103  threadUpdate__();
104  } // end of : parallel periodSize
105 
106  this->updateApproximationScheme( iters );
107 
108  /// this->updateMarginals_(); // fusion threads + update margi
109 
110  /// eps = this->computeEpsilon_(); // also updates oldMargi
111 
112  remaining = this->remainingBurnIn();
113 
114  } while ( ( remaining > 0 ) && this->continueApproximationScheme( eps
115  ) );
116  }
117  */
118 
119  if (this->continueApproximationScheme(eps)) {
120  do {
121  eps = 0;
122 
123 // less overheads with high periodSize
124 #pragma omp parallel for
125 
126  for (int iter = 0; iter < int(psize); iter++) {
129  } // end of : parallel periodSize
130 
131  this->updateApproximationScheme(int(psize));
132 
133  this->updateMarginals_(); // fusion threads + update margi
134 
135  eps = this->computeEpsilon_(); // also updates oldMargi
136 
137  } while (this->continueApproximationScheme(eps));
138  }
139 
140  if (!this->modal_.empty()) { this->expFusion_(); }
141 
142  if (infEs__::storeBNOpt_) { this->optFusion_(); }
143 
144  if (infEs__::storeVertices_) { this->verticesFusion_(); }
145 
146  if (!this->modal_.empty()) {
147  this->dynamicExpectations_(); // work with any network
148  }
149 
150  /// GUM_TRACE ( this->messageApproximationScheme() );
151  }
152 
153  template < typename GUM_SCALAR, class BNInferenceEngine >
154  inline void
155  CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::threadUpdate__() {
156  int tId = getThreadNumber();
157  // bool keepSample = false;
158 
159  if (this->l_inferenceEngine_[tId]->evidenceProbability() > 0) {
160  const DAG& tDag = this->workingSet_[tId]->dag();
161 
162  for (auto node: tDag.nodes()) {
163  const Potential< GUM_SCALAR >& potential(
164  this->l_inferenceEngine_[tId]->posterior(node));
165  Instantiation ins(potential);
166  std::vector< GUM_SCALAR > vertex;
167 
168  for (ins.setFirst(); !ins.end(); ++ins) {
169  vertex.push_back(potential[ins]);
170  }
171 
172  // true for redundancy elimination of node it credal set
173  // but since global marginals are only updated at the end of each
174  // period of
175  // approximationScheme, it is "useless" ( and expensive ) to check now
176  this->updateThread_(node, vertex, false);
177 
178  } // end of : for all nodes
179  } // end of : if ( p(e) > 0 )
180  }
181 
182  template < typename GUM_SCALAR, class BNInferenceEngine >
183  inline void
184  CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::threadInference__() {
185  int tId = getThreadNumber();
187 
188  this->l_inferenceEngine_[tId]->eraseAllEvidence();
190  this->l_inferenceEngine_[tId]->makeInference();
191  }
192 
193  template < typename GUM_SCALAR, class BNInferenceEngine >
194  void CNMonteCarloSampling< GUM_SCALAR,
195  BNInferenceEngine >::mcInitApproximationScheme__() {
196  // this->setEpsilon ( std::numeric_limits< GUM_SCALAR >::min() );
197  /**
198  * VERIFIER d/dt(e(t+1)-e(t))
199  */
200  this->setEpsilon(0.);
201  this->enableEpsilon(); // to be sure
202 
203  this->disableMinEpsilonRate();
204  this->disableMaxIter();
205 
206  this->initApproximationScheme();
207  }
208 
209  template < typename GUM_SCALAR, class BNInferenceEngine >
210  void CNMonteCarloSampling< GUM_SCALAR,
211  BNInferenceEngine >::mcThreadDataCopy__() {
212  int num_threads;
213 #pragma omp parallel
214  {
215  int this_thread = getThreadNumber();
216 
217 // implicit wait clause (don't put nowait)
218 #pragma omp single
219  {
220  // should we ask for max threads instead ( no differences here in
221  // practice
222  // )
223  num_threads = getNumberOfRunningThreads();
224 
225  this->initThreadsData_(num_threads,
226  infEs__::storeVertices_,
227  infEs__::storeBNOpt_);
228  this->l_inferenceEngine_.resize(num_threads, nullptr);
229 
230  // if ( infEs__::storeBNOpt_ )
231  // this->l_sampledNet_.resize ( num_threads );
232  } // end of : single region
233 
234  // we could put those below in a function in InferenceEngine, but let's
235  // keep
236  // this parallel region instead of breaking it and making another one to
237  // do
238  // the same stuff in 2 places since :
239  // !!! BNInferenceEngine still needs to be initialized here anyway !!!
240 
241  BayesNet< GUM_SCALAR >* thread_bn = new BayesNet< GUM_SCALAR >();
242 #pragma omp critical(Init)
243  {
244  // IBayesNet< GUM_SCALAR > * thread_bn = new IBayesNet< GUM_SCALAR
245  // >();//(this->credalNet_->current_bn());
246  *thread_bn = this->credalNet_->current_bn();
247  }
248  this->workingSet_[this_thread] = thread_bn;
249 
250  this->l_marginalMin_[this_thread] = this->marginalMin_;
251  this->l_marginalMax_[this_thread] = this->marginalMax_;
252  this->l_expectationMin_[this_thread] = this->expectationMin_;
253  this->l_expectationMax_[this_thread] = this->expectationMax_;
254  this->l_modal_[this_thread] = this->modal_;
255 
256  infEs__::l_clusters_[this_thread].resize(2);
257  infEs__::l_clusters_[this_thread][0] = infEs__::t0_;
258  infEs__::l_clusters_[this_thread][1] = infEs__::t1_;
259 
260  if (infEs__::storeVertices_) {
261  this->l_marginalSets_[this_thread] = this->marginalSets_;
262  }
263 
264  List< const Potential< GUM_SCALAR >* >* evi_list
265  = new List< const Potential< GUM_SCALAR >* >();
266  this->workingSetE_[this_thread] = evi_list;
267 
268  // #TODO: the next instruction works only for lazy propagation.
269  // => find a way to remove the second argument
270  BNInferenceEngine* inference_engine
271  = new BNInferenceEngine((this->workingSet_[this_thread]),
272  RelevantPotentialsFinderType::FIND_ALL);
273 
274  this->l_inferenceEngine_[this_thread] = inference_engine;
275 
276  if (infEs__::storeBNOpt_) {
277  VarMod2BNsMap< GUM_SCALAR >* threadOpt
278  = new VarMod2BNsMap< GUM_SCALAR >(*this->credalNet_);
279  this->l_optimalNet_[this_thread] = threadOpt;
280  }
281  }
282  }
283 
284  template < typename GUM_SCALAR, class BNInferenceEngine >
285  inline void CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::binaryRep__(
286  std::vector< bool >& toFill,
287  const Idx value) const {
288  Idx n = value;
289  auto tfsize = toFill.size();
290 
291  // get bits of choosen_vertex
292  for (decltype(tfsize) i = 0; i < tfsize; i++) {
293  toFill[i] = n & 1;
294  n /= 2;
295  }
296  }
297 
298  template < typename GUM_SCALAR, class BNInferenceEngine >
299  inline void CNMonteCarloSampling< GUM_SCALAR,
300  BNInferenceEngine >::verticesSampling__() {
301  int this_thread = getThreadNumber();
302  IBayesNet< GUM_SCALAR >* working_bn = this->workingSet_[this_thread];
303 
304  const auto cpt = &this->credalNet_->credalNet_currentCpt();
305 
306  using dBN = std::vector< std::vector< std::vector< bool > > >;
307 
308  dBN sample;
309 
310  if (infEs__::storeBNOpt_) {
311  sample = dBN(this->l_optimalNet_[this_thread]->getSampleDef());
312  }
313 
314  if (infEs__::repetitiveInd_) {
315  const auto& t0 = infEs__::l_clusters_[this_thread][0];
316  const auto& t1 = infEs__::l_clusters_[this_thread][1];
317 
318  for (const auto& elt: t0) {
319  auto dSize = working_bn->variable(elt.first).domainSize();
320  Potential< GUM_SCALAR >* potential(
321  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
322  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
323 
324  Size pconfs = Size((*cpt)[elt.first].size());
325 
326  for (Size pconf = 0; pconf < pconfs; pconf++) {
327  Size choosen_vertex = rand() % (*cpt)[elt.first][pconf].size();
328 
329  if (infEs__::storeBNOpt_) {
330  binaryRep__(sample[elt.first][pconf], choosen_vertex);
331  }
332 
333  for (Size mod = 0; mod < dSize; mod++) {
334  var_cpt[pconf * dSize + mod]
335  = (*cpt)[elt.first][pconf][choosen_vertex][mod];
336  }
337  } // end of : pconf
338 
339  potential->fillWith(var_cpt);
340 
341  Size t0esize = Size(elt.second.size());
342 
343  for (Size pos = 0; pos < t0esize; pos++) {
344  if (infEs__::storeBNOpt_) {
345  sample[elt.second[pos]] = sample[elt.first];
346  }
347 
348  Potential< GUM_SCALAR >* potential2(
349  const_cast< Potential< GUM_SCALAR >* >(
350  &working_bn->cpt(elt.second[pos])));
351  potential2->fillWith(var_cpt);
352  }
353  }
354 
355  for (const auto& elt: t1) {
356  auto dSize = working_bn->variable(elt.first).domainSize();
357  Potential< GUM_SCALAR >* potential(
358  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
359  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
360 
361  for (Size pconf = 0; pconf < (*cpt)[elt.first].size(); pconf++) {
362  Idx choosen_vertex = Idx(rand() % (*cpt)[elt.first][pconf].size());
363 
364  if (infEs__::storeBNOpt_) {
365  binaryRep__(sample[elt.first][pconf], choosen_vertex);
366  }
367 
368  for (decltype(dSize) mod = 0; mod < dSize; mod++) {
369  var_cpt[pconf * dSize + mod]
370  = (*cpt)[elt.first][pconf][choosen_vertex][mod];
371  }
372  } // end of : pconf
373 
374  potential->fillWith(var_cpt);
375 
376  auto t1esize = elt.second.size();
377 
378  for (decltype(t1esize) pos = 0; pos < t1esize; pos++) {
379  if (infEs__::storeBNOpt_) {
380  sample[elt.second[pos]] = sample[elt.first];
381  }
382 
383  Potential< GUM_SCALAR >* potential2(
384  const_cast< Potential< GUM_SCALAR >* >(
385  &working_bn->cpt(elt.second[pos])));
386  potential2->fillWith(var_cpt);
387  }
388  }
389 
390  if (infEs__::storeBNOpt_) {
391  this->l_optimalNet_[this_thread]->setCurrentSample(sample);
392  }
393  } else {
394  for (auto node: working_bn->nodes()) {
395  auto dSize = working_bn->variable(node).domainSize();
396  Potential< GUM_SCALAR >* potential(
397  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(node)));
398  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
399 
400  auto pConfs = (*cpt)[node].size();
401 
402  for (decltype(pConfs) pconf = 0; pconf < pConfs; pconf++) {
403  Size nVertices = Size((*cpt)[node][pconf].size());
404  Idx choosen_vertex = Idx(rand() % nVertices);
405 
406  if (infEs__::storeBNOpt_) {
407  binaryRep__(sample[node][pconf], choosen_vertex);
408  }
409 
410  for (decltype(dSize) mod = 0; mod < dSize; mod++) {
411  var_cpt[pconf * dSize + mod]
412  = (*cpt)[node][pconf][choosen_vertex][mod];
413  }
414  } // end of : pconf
415 
416  potential->fillWith(var_cpt);
417  }
418 
419  if (infEs__::storeBNOpt_) {
420  this->l_optimalNet_[this_thread]->setCurrentSample(sample);
421  }
422  }
423  }
424 
425  template < typename GUM_SCALAR, class BNInferenceEngine >
426  inline void
427  CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::insertEvidence__() {
428  if (this->evidence_.size() == 0) { return; }
429 
430  int this_thread = getThreadNumber();
431 
432  BNInferenceEngine* inference_engine = this->l_inferenceEngine_[this_thread];
433 
434  IBayesNet< GUM_SCALAR >* working_bn = this->workingSet_[this_thread];
435 
436  List< const Potential< GUM_SCALAR >* >* evi_list
437  = this->workingSetE_[this_thread];
438 
439  if (evi_list->size() > 0) {
440  for (const auto pot: *evi_list)
441  inference_engine->addEvidence(*pot);
442  return;
443  }
444 
445  for (const auto& elt: this->evidence_) {
446  Potential< GUM_SCALAR >* p = new Potential< GUM_SCALAR >;
447  (*p) << working_bn->variable(elt.first);
448 
449  try {
450  p->fillWith(elt.second);
451  } catch (Exception& err) {
452  GUM_SHOWERROR(err);
453  throw(err);
454  }
455 
456  evi_list->insert(p);
457  }
458 
459  if (evi_list->size() > 0) {
460  for (const auto pot: *evi_list)
461  inference_engine->addEvidence(*pot);
462  }
463  }
464 
465  } // namespace credal
466 } // namespace gum
void threadUpdate__()
Update thread data after a IBayesNet inference.
void verticesSampling__()
Thread samples a IBayesNet from the CredalNet.
INLINE void emplace(Args &&... args)
Definition: set_tpl.h:669
void mcThreadDataCopy__()
Initialize threads data.
void threadInference__()
Thread performs an inference using BNInferenceEngine.
void mcInitApproximationScheme__()
Initialize approximation Scheme.
CNMonteCarloSampling(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
void makeInference()
Starts the inference.
<agrum/CN/CNMonteCarloSampling.h>
void insertEvidence__()
Insert CredalNet evidence into a thread BNInferenceEngine.
namespace for all credal networks entities
Definition: LpInterface.cpp:37