aGrUM  0.16.0
CNMonteCarloSampling_tpl.h
Go to the documentation of this file.
1 
23 #include "CNMonteCarloSampling.h"
24 #include <agrum/core/exceptions.h>
25 
26 namespace gum {
27  namespace credal {
28 
29  template < typename GUM_SCALAR, class BNInferenceEngine >
31  const CredalNet< GUM_SCALAR >& credalNet) :
32  MultipleInferenceEngine< GUM_SCALAR, BNInferenceEngine >::
33  MultipleInferenceEngine(credalNet) {
35  //__infEs::_iterStop = 1000;
37  __infEs::_storeBNOpt = false;
38 
39  this->setMaxTime(60);
40  this->enableMaxTime();
41 
43  this->setPeriodSize(1000);
44 
45  GUM_CONSTRUCTOR(CNMonteCarloSampling);
46  }
47 
48  template < typename GUM_SCALAR, class BNInferenceEngine >
49  CNMonteCarloSampling< GUM_SCALAR,
50  BNInferenceEngine >::~CNMonteCarloSampling() {
51  GUM_DESTRUCTOR(CNMonteCarloSampling);
52  }
53 
54  template < typename GUM_SCALAR, class BNInferenceEngine >
57  try {
58  this->_repetitiveInit();
59  } catch (InvalidArgument& err) {
60  GUM_SHOWERROR(err);
62  }
63  }
64 
65  // debug
67 
69 
71 
72  // don't put it after burnIn, it could stop with timeout : we want at
73  // least one
74  // burnIn and one periodSize
75  GUM_SCALAR eps = 1.; // to validate testSuite ?
76 
78  auto psize = this->periodSize();
79  /*
80 
81  auto remaining = this->remainingBurnIn();
82 
84  be
86  if ( remaining != 0 ) {
90  and
94  )
95  do {
96  eps = 0;
97 
98  int iters = int( ( remaining < psize ) ? remaining : psize );
99 
100  #pragma omp parallel for
101 
102  for ( int iter = 0; iter < iters; iter++ ) {
103  __threadInference();
104  __threadUpdate();
105  } // end of : parallel periodSize
106 
107  this->updateApproximationScheme( iters );
108 
110 
112 
113  remaining = this->remainingBurnIn();
114 
115  } while ( ( remaining > 0 ) && this->continueApproximationScheme( eps
116  ) );
117  }
118  */
119 
120  if (this->continueApproximationScheme(eps)) {
121  do {
122  eps = 0;
123 
124 // less overheads with high periodSize
125 #pragma omp parallel for
126 
127  for (int iter = 0; iter < int(psize); iter++) {
129  __threadUpdate();
130  } // end of : parallel periodSize
131 
132  this->updateApproximationScheme(int(psize));
133 
134  this->_updateMarginals(); // fusion threads + update margi
135 
136  eps = this->_computeEpsilon(); // also updates oldMargi
137 
138  } while (this->continueApproximationScheme(eps));
139  }
140 
141  if (!this->_modal.empty()) { this->_expFusion(); }
142 
143  if (__infEs::_storeBNOpt) { this->_optFusion(); }
144 
145  if (__infEs::_storeVertices) { this->_verticesFusion(); }
146 
147  if (!this->_modal.empty()) {
148  this->_dynamicExpectations(); // work with any network
149  }
150 
152  }
153 
154  template < typename GUM_SCALAR, class BNInferenceEngine >
155  inline void
157  int tId = getThreadNumber();
158  // bool keepSample = false;
159 
160  if (this->_l_inferenceEngine[tId]->evidenceProbability() > 0) {
161  const DAG& tDag = this->_workingSet[tId]->dag();
162 
163  for (auto node : tDag.nodes()) {
164  const Potential< GUM_SCALAR >& potential(
165  this->_l_inferenceEngine[tId]->posterior(node));
166  Instantiation ins(potential);
167  std::vector< GUM_SCALAR > vertex;
168 
169  for (ins.setFirst(); !ins.end(); ++ins) {
170  vertex.push_back(potential[ins]);
171  }
172 
173  // true for redundancy elimination of node it credal set
174  // but since global marginals are only updated at the end of each
175  // period of
176  // approximationScheme, it is "useless" ( and expensive ) to check now
177  this->_updateThread(node, vertex, false);
178 
179  } // end of : for all nodes
180  } // end of : if ( p(e) > 0 )
181  }
182 
183  template < typename GUM_SCALAR, class BNInferenceEngine >
184  inline void
186  int tId = getThreadNumber();
188 
189  this->_l_inferenceEngine[tId]->eraseAllEvidence();
191  this->_l_inferenceEngine[tId]->makeInference();
192  }
193 
194  template < typename GUM_SCALAR, class BNInferenceEngine >
195  void CNMonteCarloSampling< GUM_SCALAR,
196  BNInferenceEngine >::__mcInitApproximationScheme() {
197  // this->setEpsilon ( std::numeric_limits< GUM_SCALAR >::min() );
201  this->setEpsilon(0.);
202  this->enableEpsilon(); // to be sure
203 
204  this->disableMinEpsilonRate();
205  this->disableMaxIter();
206 
207  this->initApproximationScheme();
208  }
209 
210  template < typename GUM_SCALAR, class BNInferenceEngine >
211  void CNMonteCarloSampling< GUM_SCALAR,
212  BNInferenceEngine >::__mcThreadDataCopy() {
213  int num_threads;
214 #pragma omp parallel
215  {
216  int this_thread = getThreadNumber();
217 
218 // implicit wait clause (don't put nowait)
219 #pragma omp single
220  {
221  // should we ask for max threads instead ( no differences here in
222  // practice
223  // )
224  num_threads = getNumberOfRunningThreads();
225 
226  this->_initThreadsData(
228  this->_l_inferenceEngine.resize(num_threads, nullptr);
229 
230  // if ( __infEs::_storeBNOpt )
231  // this->_l_sampledNet.resize ( num_threads );
232  } // end of : single region
233 
234  // we could put those below in a function in InferenceEngine, but let's
235  // keep
236  // this parallel region instead of breaking it and making another one to
237  // do
238  // the same stuff in 2 places since :
239  // !!! BNInferenceEngine still needs to be initialized here anyway !!!
240 
242 #pragma omp critical(Init)
243  {
244  // IBayesNet< GUM_SCALAR > * thread_bn = new IBayesNet< GUM_SCALAR
245  // >();//(this->_credalNet->current_bn());
246  *thread_bn = this->_credalNet->current_bn();
247  }
248  this->_workingSet[this_thread] = thread_bn;
249 
250  this->_l_marginalMin[this_thread] = this->_marginalMin;
251  this->_l_marginalMax[this_thread] = this->_marginalMax;
252  this->_l_expectationMin[this_thread] = this->_expectationMin;
253  this->_l_expectationMax[this_thread] = this->_expectationMax;
254  this->_l_modal[this_thread] = this->_modal;
255 
256  __infEs::_l_clusters[this_thread].resize(2);
257  __infEs::_l_clusters[this_thread][0] = __infEs::_t0;
258  __infEs::_l_clusters[this_thread][1] = __infEs::_t1;
259 
261  this->_l_marginalSets[this_thread] = this->_marginalSets;
262  }
263 
266  this->_workingSetE[this_thread] = evi_list;
267 
268  // #TODO: the next instruction works only for lazy propagation.
269  // => find a way to remove the second argument
270  BNInferenceEngine* inference_engine =
271  new BNInferenceEngine((this->_workingSet[this_thread]),
273 
274  this->_l_inferenceEngine[this_thread] = inference_engine;
275 
276  if (__infEs::_storeBNOpt) {
277  VarMod2BNsMap< GUM_SCALAR >* threadOpt =
279  this->_l_optimalNet[this_thread] = threadOpt;
280  }
281  }
282  }
283 
284  template < typename GUM_SCALAR, class BNInferenceEngine >
286  std::vector< bool >& toFill, const Idx value) const {
287  Idx n = value;
288  auto tfsize = toFill.size();
289 
290  // get bits of choosen_vertex
291  for (decltype(tfsize) i = 0; i < tfsize; i++) {
292  toFill[i] = n & 1;
293  n /= 2;
294  }
295  }
296 
297  template < typename GUM_SCALAR, class BNInferenceEngine >
298  inline void CNMonteCarloSampling< GUM_SCALAR,
299  BNInferenceEngine >::__verticesSampling() {
300  int this_thread = getThreadNumber();
301  IBayesNet< GUM_SCALAR >* working_bn = this->_workingSet[this_thread];
302 
303  const auto cpt = &this->_credalNet->credalNet_currentCpt();
304 
305  using dBN = std::vector< std::vector< std::vector< bool > > >;
306 
307  dBN sample;
308 
309  if (__infEs::_storeBNOpt) {
310  sample = dBN(this->_l_optimalNet[this_thread]->getSampleDef());
311  }
312 
314  const auto& t0 = __infEs::_l_clusters[this_thread][0];
315  const auto& t1 = __infEs::_l_clusters[this_thread][1];
316 
317  for (const auto& elt : t0) {
318  auto dSize = working_bn->variable(elt.first).domainSize();
319  Potential< GUM_SCALAR >* potential(
320  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
321  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
322 
323  Size pconfs = Size((*cpt)[elt.first].size());
324 
325  for (Size pconf = 0; pconf < pconfs; pconf++) {
326  Size choosen_vertex = rand() % (*cpt)[elt.first][pconf].size();
327 
328  if (__infEs::_storeBNOpt) {
329  __binaryRep(sample[elt.first][pconf], choosen_vertex);
330  }
331 
332  for (Size mod = 0; mod < dSize; mod++) {
333  var_cpt[pconf * dSize + mod] =
334  (*cpt)[elt.first][pconf][choosen_vertex][mod];
335  }
336  } // end of : pconf
337 
338  potential->fillWith(var_cpt);
339 
340  Size t0esize = Size(elt.second.size());
341 
342  for (Size pos = 0; pos < t0esize; pos++) {
343  if (__infEs::_storeBNOpt) {
344  sample[elt.second[pos]] = sample[elt.first];
345  }
346 
347  Potential< GUM_SCALAR >* potential2(
348  const_cast< Potential< GUM_SCALAR >* >(
349  &working_bn->cpt(elt.second[pos])));
350  potential2->fillWith(var_cpt);
351  }
352  }
353 
354  for (const auto& elt : t1) {
355  auto dSize = working_bn->variable(elt.first).domainSize();
356  Potential< GUM_SCALAR >* potential(
357  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
358  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
359 
360  for (Size pconf = 0; pconf < (*cpt)[elt.first].size(); pconf++) {
361  Idx choosen_vertex = Idx(rand() % (*cpt)[elt.first][pconf].size());
362 
363  if (__infEs::_storeBNOpt) {
364  __binaryRep(sample[elt.first][pconf], choosen_vertex);
365  }
366 
367  for (decltype(dSize) mod = 0; mod < dSize; mod++) {
368  var_cpt[pconf * dSize + mod] =
369  (*cpt)[elt.first][pconf][choosen_vertex][mod];
370  }
371  } // end of : pconf
372 
373  potential->fillWith(var_cpt);
374 
375  auto t1esize = elt.second.size();
376 
377  for (decltype(t1esize) pos = 0; pos < t1esize; pos++) {
378  if (__infEs::_storeBNOpt) {
379  sample[elt.second[pos]] = sample[elt.first];
380  }
381 
382  Potential< GUM_SCALAR >* potential2(
383  const_cast< Potential< GUM_SCALAR >* >(
384  &working_bn->cpt(elt.second[pos])));
385  potential2->fillWith(var_cpt);
386  }
387  }
388 
389  if (__infEs::_storeBNOpt) {
390  this->_l_optimalNet[this_thread]->setCurrentSample(sample);
391  }
392  } else {
393  for (auto node : working_bn->nodes()) {
394  auto dSize = working_bn->variable(node).domainSize();
395  Potential< GUM_SCALAR >* potential(
396  const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(node)));
397  std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
398 
399  auto pConfs = (*cpt)[node].size();
400 
401  for (decltype(pConfs) pconf = 0; pconf < pConfs; pconf++) {
402  Size nVertices = Size((*cpt)[node][pconf].size());
403  Idx choosen_vertex = Idx(rand() % nVertices);
404 
405  if (__infEs::_storeBNOpt) {
406  __binaryRep(sample[node][pconf], choosen_vertex);
407  }
408 
409  for (decltype(dSize) mod = 0; mod < dSize; mod++) {
410  var_cpt[pconf * dSize + mod] =
411  (*cpt)[node][pconf][choosen_vertex][mod];
412  }
413  } // end of : pconf
414 
415  potential->fillWith(var_cpt);
416  }
417 
418  if (__infEs::_storeBNOpt) {
419  this->_l_optimalNet[this_thread]->setCurrentSample(sample);
420  }
421  }
422  }
423 
424  template < typename GUM_SCALAR, class BNInferenceEngine >
425  inline void
427  if (this->_evidence.size() == 0) { return; }
428 
429  int this_thread = getThreadNumber();
430 
431  BNInferenceEngine* inference_engine = this->_l_inferenceEngine[this_thread];
432 
433  IBayesNet< GUM_SCALAR >* working_bn = this->_workingSet[this_thread];
434 
436  this->_workingSetE[this_thread];
437 
438  if (evi_list->size() > 0) {
439  for (const auto pot : *evi_list)
440  inference_engine->addEvidence(*pot);
441  return;
442  }
443 
444  for (const auto& elt : this->_evidence) {
446  (*p) << working_bn->variable(elt.first);
447 
448  try {
449  p->fillWith(elt.second);
450  } catch (Exception& err) {
451  GUM_SHOWERROR(err);
452  throw(err);
453  }
454 
455  evi_list->insert(p);
456  }
457 
458  if (evi_list->size() > 0) {
459  for (const auto pot : *evi_list)
460  inference_engine->addEvidence(*pot);
461  }
462  }
463 
464  } // namespace credal
465 } // namespace gum
aGrUM&#39;s Potential is a multi-dimensional array with tensor operators.
Definition: potential.h:60
void __mcThreadDataCopy()
Initialize threads data.
Class representing a Bayesian Network.
Definition: BayesNet.h:78
std::vector< BNInferenceEngine *> _l_inferenceEngine
Threads BNInferenceEngine.
void _initThreadsData(const Size &num_threads, const bool __storeVertices, const bool __storeBNOpt)
Initialize threads data.
virtual Size domainSize() const final
Returns the product of the variables domain size.
void disableMinEpsilonRate()
Disable stopping criterion on epsilon rate.
__expes _l_expectationMin
Threads lower expectations, one per thread.
unsigned int getNumberOfRunningThreads()
Get the current number of running threads.
Size size() const noexcept
Returns the number of elements stored into the hashtable.
unsigned int getThreadNumber()
Get the calling thread id.
bool _storeBNOpt
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
void __insertEvidence()
Insert CredalNet evidence into a thread BNInferenceEngine.
#define GUM_SHOWERROR(e)
Definition: exceptions.h:61
void _dynamicExpectations()
Rearrange lower and upper expectations to suit dynamic networks.
credalSet _marginalSets
Credal sets vertices, if enabled.
margi _marginalMin
Lower marginals.
virtual const Potential< GUM_SCALAR > & cpt(NodeId varId) const =0
Returns the CPT of a variable.
void _optFusion()
Fusion of threads optimal IBayesNet.
__margis _l_marginalMin
Threads lower marginals, one per thread.
void setPeriodSize(Size p)
How many samples between two stopping is enable.
void _expFusion()
Fusion of threads expectations.
void initApproximationScheme()
Initialise the scheme.
Generic doubly linked lists.
Definition: list.h:372
Class representing the minimal interface for Bayesian Network.
Definition: IBayesNet.h:62
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
Definition: agrum.h:25
std::vector< List< const Potential< GUM_SCALAR > *> *> _workingSetE
Threads evidence.
void _repetitiveInit()
Initialize _t0 and _t1 clusters.
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
Size periodSize() const
Returns the period size.
void __threadInference()
Thread performs an inference using BNInferenceEngine.
cluster _t0
Clusters of nodes used with dynamic networks.
void setMaxTime(double timeout)
Stopping criterion on timeout.
CNMonteCarloSampling(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
std::vector< VarMod2BNsMap< GUM_SCALAR > *> _l_optimalNet
Threads optimal IBayesNet.
std::vector< __bnet *> _workingSet
Threads IBayesNet.
Class template representing a Credal Network.
Definition: credalNet.h:89
void __threadUpdate()
Update thread data after a IBayesNet inference.
virtual const DiscreteVariable & variable(NodeId id) const =0
Returns a constant reference over a variable given it&#39;s node id.
bool continueApproximationScheme(double error)
Update the scheme w.r.t the new error.
void _updateMarginals()
Fusion of threads marginals.
void makeInference()
Starts the inference.
void __mcInitApproximationScheme()
Initialize approximation Scheme.
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
const NodeGraphPart & nodes() const
Returns a constant reference to the dag of this Bayes Net.
Definition: DAGmodel_inl.h:115
const Potential< GUM_SCALAR > & fillWith(const Potential< GUM_SCALAR > &src) const
copy a Potential data using name of variables and labels (not necessarily the same variables in the s...
Size size() const noexcept
Returns the number of elements in the list.
Definition: list_tpl.h:1851
const NodeGraphPart & nodes() const
return *this as a NodeGraphPart
void __verticesSampling()
Thread samples a IBayesNet from the CredalNet.
Base class for all aGrUM&#39;s exceptions.
Definition: exceptions.h:106
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
<agrum/CN/CNMonteCarloSampling.h>
__expes _l_expectationMax
Threads upper expectations, one per thread.
bool _repetitiveInd
True if using repetitive independence ( dynamic network only ), False otherwise.
dynExpe _modal
Variables modalities used to compute expectations.
Class for assigning/browsing values to tuples of discrete variables.
Definition: instantiation.h:83
Val & insert(const Val &val)
Inserts a new element at the end of the chained list (alias of pushBack).
Definition: list_tpl.h:1619
__clusters _l_clusters
Threads clusters.
margi _evidence
Holds observed variables states.
cluster _t1
Clusters of nodes used with dynamic networks.
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
bool _updateThread(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Update thread information (marginals, expectations, IBayesNet, vertices) for a given node id...
void setFirst()
Assign the first values to the tuple of the Instantiation.
void setEpsilon(double eps)
Given that we approximate f(t), stopping criterion on |f(t+1)-f(t)|.
void disableMaxIter()
Disable stopping criterion on max iterations.
Size Idx
Type for indexes.
Definition: types.h:53
Class template representing a CredalNet inference engine using one or more IBayesNet inference engine...
__margis _l_marginalMax
Threads upper marginals, one per thread.
bool _storeVertices
True if credal sets vertices are stored, False otherwise.
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:48
Class used to store optimum IBayesNet during some inference algorithms.
Definition: varMod2BNsMap.h:56
__credalSets _l_marginalSets
Threads vertices.
Base class for dag.
Definition: DAG.h:102
void __binaryRep(std::vector< bool > &toFill, const Idx value) const
Get the binary representation of a given value.
void enableMaxTime()
Enable stopping criterion on timeout.
margi _marginalMax
Upper marginals.
bool end() const
Returns true if the Instantiation reached the end.
const GUM_SCALAR _computeEpsilon()
Compute epsilon and update old marginals.
void updateApproximationScheme(unsigned int incr=1)
Update the scheme w.r.t the new error and increment steps.
void enableEpsilon()
Enable stopping criterion on epsilon.