aGrUM  0.16.0
multipleInferenceEngine_tpl.h
Go to the documentation of this file.
1 
25 
26 namespace gum {
27  namespace credal {
28 
29  template < typename GUM_SCALAR, class BNInferenceEngine >
32  InferenceEngine< GUM_SCALAR >::InferenceEngine(credalNet) {
33  GUM_CONSTRUCTOR(MultipleInferenceEngine);
34  }
35 
36  template < typename GUM_SCALAR, class BNInferenceEngine >
37  MultipleInferenceEngine< GUM_SCALAR,
38  BNInferenceEngine >::~MultipleInferenceEngine() {
39  GUM_DESTRUCTOR(MultipleInferenceEngine);
40  }
41 
42  template < typename GUM_SCALAR, class BNInferenceEngine >
43  inline void
45  const Size& num_threads,
46  const bool __storeVertices,
47  const bool __storeBNOpt) {
48  _workingSet.clear();
49  _workingSet.resize(num_threads, nullptr);
50  _workingSetE.clear();
51  _workingSetE.resize(num_threads, nullptr);
52 
53  _l_marginalMin.clear();
54  _l_marginalMin.resize(num_threads);
55  _l_marginalMax.clear();
56  _l_marginalMax.resize(num_threads);
57  _l_expectationMin.clear();
58  _l_expectationMin.resize(num_threads);
59  _l_expectationMax.clear();
60  _l_expectationMax.resize(num_threads);
61 
62  _l_clusters.clear();
63  _l_clusters.resize(num_threads);
64 
65  if (__storeVertices) {
66  _l_marginalSets.clear();
67  _l_marginalSets.resize(num_threads);
68  }
69 
70  if (__storeBNOpt) {
71  for (Size ptr = 0; ptr < this->_l_optimalNet.size(); ptr++)
72  if (this->_l_optimalNet[ptr] != nullptr) delete _l_optimalNet[ptr];
73 
74  _l_optimalNet.clear();
75  _l_optimalNet.resize(num_threads);
76  }
77 
78  _l_modal.clear();
79  _l_modal.resize(num_threads);
80 
82  this->_oldMarginalMin = this->_marginalMin;
83  this->_oldMarginalMax.clear();
84  this->_oldMarginalMax = this->_marginalMax;
85  }
86 
87  template < typename GUM_SCALAR, class BNInferenceEngine >
88  inline bool
90  const NodeId& id,
91  const std::vector< GUM_SCALAR >& vertex,
92  const bool& elimRedund) {
93  int tId = getThreadNumber();
94 
95  // save E(X) if we don't save vertices
96  if (!__infE::_storeVertices && !_l_modal[tId].empty()) {
97  std::string var_name = _workingSet[tId]->variable(id).name();
98  auto delim = var_name.find_first_of("_");
99  var_name = var_name.substr(0, delim);
100 
101  if (_l_modal[tId].exists(var_name)) {
102  GUM_SCALAR exp = 0;
103  Size vsize = Size(vertex.size());
104 
105  for (Size mod = 0; mod < vsize; mod++)
106  exp += vertex[mod] * _l_modal[tId][var_name][mod];
107 
108  if (exp > _l_expectationMax[tId][id]) _l_expectationMax[tId][id] = exp;
109 
110  if (exp < _l_expectationMin[tId][id]) _l_expectationMin[tId][id] = exp;
111  }
112  } // end of : if modal (map) not empty
113 
114  bool newOne = false;
115  bool added = false;
116  bool result = false;
117  // for burn in, we need to keep checking on local marginals and not global
118  // ones
119  // (faster inference)
120  // we also don't want to store dbn for observed variables since there will
121  // be a
122  // huge number of them (probably all of them).
123  Size vsize = Size(vertex.size());
124 
125  for (Size mod = 0; mod < vsize; mod++) {
126  if (vertex[mod] < _l_marginalMin[tId][id][mod]) {
127  _l_marginalMin[tId][id][mod] = vertex[mod];
128  newOne = true;
129 
130  if (__infE::_storeBNOpt && !__infE::_evidence.exists(id)) {
131  std::vector< Size > key(3);
132  key[0] = id;
133  key[1] = mod;
134  key[2] = 0;
135 
136  if (_l_optimalNet[tId]->insert(key, true)) result = true;
137  }
138  }
139 
140  if (vertex[mod] > _l_marginalMax[tId][id][mod]) {
141  _l_marginalMax[tId][id][mod] = vertex[mod];
142  newOne = true;
143 
144  if (__infE::_storeBNOpt && !__infE::_evidence.exists(id)) {
145  std::vector< Size > key(3);
146  key[0] = id;
147  key[1] = mod;
148  key[2] = 1;
149 
150  if (_l_optimalNet[tId]->insert(key, true)) result = true;
151  }
152  } else if (vertex[mod] == _l_marginalMin[tId][id][mod]
153  || vertex[mod] == _l_marginalMax[tId][id][mod]) {
154  newOne = true;
155 
156  if (__infE::_storeBNOpt && vertex[mod] == _l_marginalMin[tId][id][mod]
157  && !__infE::_evidence.exists(id)) {
158  std::vector< Size > key(3);
159  key[0] = id;
160  key[1] = mod;
161  key[2] = 0;
162 
163  if (_l_optimalNet[tId]->insert(key, false)) result = true;
164  }
165 
166  if (__infE::_storeBNOpt && vertex[mod] == _l_marginalMax[tId][id][mod]
167  && !__infE::_evidence.exists(id)) {
168  std::vector< Size > key(3);
169  key[0] = id;
170  key[1] = mod;
171  key[2] = 1;
172 
173  if (_l_optimalNet[tId]->insert(key, false)) result = true;
174  }
175  }
176 
177  // store point to compute credal set vertices.
178  // check for redundancy at each step or at the end ?
179  if (__infE::_storeVertices && !added && newOne) {
180  __updateThreadCredalSets(id, vertex, elimRedund);
181  added = true;
182  }
183  }
184 
185  // if all variables didn't get better marginals, we will delete
186  if (__infE::_storeBNOpt && result) return true;
187 
188  return false;
189  }
190 
191  template < typename GUM_SCALAR, class BNInferenceEngine >
194  const std::vector< GUM_SCALAR >& vertex,
195  const bool& elimRedund) {
196  int tId = getThreadNumber();
197  auto& nodeCredalSet = _l_marginalSets[tId][id];
198  Size dsize = Size(vertex.size());
199 
200  bool eq = true;
201 
202  for (auto it = nodeCredalSet.cbegin(), itEnd = nodeCredalSet.cend();
203  it != itEnd;
204  ++it) {
205  eq = true;
206 
207  for (Size i = 0; i < dsize; i++) {
208  if (std::fabs(vertex[i] - (*it)[i]) > 1e-6) {
209  eq = false;
210  break;
211  }
212  }
213 
214  if (eq) break;
215  }
216 
217  if (!eq || nodeCredalSet.size() == 0) {
218  nodeCredalSet.push_back(vertex);
219  return;
220  } else
221  return;
222 
226  if (nodeCredalSet.size() == 1) return;
227 
228  // check that the point and all previously added ones are not inside the
229  // actual
230  // polytope
231  auto itEnd = std::remove_if(
232  nodeCredalSet.begin(),
233  nodeCredalSet.end(),
234  [&](const std::vector< GUM_SCALAR >& v) -> bool {
235  for (auto jt = v.cbegin(),
236  jtEnd = v.cend(),
237  minIt = _l_marginalMin[tId][id].cbegin(),
238  minItEnd = _l_marginalMin[tId][id].cend(),
239  maxIt = _l_marginalMax[tId][id].cbegin(),
240  maxItEnd = _l_marginalMax[tId][id].cend();
241  jt != jtEnd && minIt != minItEnd && maxIt != maxItEnd;
242  ++jt, ++minIt, ++maxIt) {
243  if ((std::fabs(*jt - *minIt) < 1e-6 || std::fabs(*jt - *maxIt) < 1e-6)
244  && std::fabs(*minIt - *maxIt) > 1e-6)
245  return false;
246  }
247  return true;
248  });
249 
250  nodeCredalSet.erase(itEnd, nodeCredalSet.end());
251 
252  // we need at least 2 points to make a convex combination
253  if (!elimRedund || nodeCredalSet.size() <= 2) return;
254 
255  // there may be points not inside the polytope but on one of it's facet,
256  // meaning it's still a convex combination of vertices of this facet. Here
257  // we
258  // need lrs.
259  Size setSize = Size(nodeCredalSet.size());
260 
261  LRSWrapper< GUM_SCALAR > lrsWrapper;
262  lrsWrapper.setUpV(dsize, setSize);
263 
264  for (const auto& vtx : nodeCredalSet)
265  lrsWrapper.fillV(vtx);
266 
267  lrsWrapper.elimRedundVrep();
268 
269  _l_marginalSets[tId][id] = lrsWrapper.getOutput();
270  }
271 
272  template < typename GUM_SCALAR, class BNInferenceEngine >
273  inline void MultipleInferenceEngine< GUM_SCALAR,
274  BNInferenceEngine >::_updateMarginals() {
275 #pragma omp parallel
276  {
277  int threadId = getThreadNumber();
278  long nsize = long(_workingSet[threadId]->size());
279 
280 #pragma omp for
281 
282  for (long i = 0; i < nsize; i++) {
283  Size dSize = Size(_l_marginalMin[threadId][i].size());
284 
285  for (Size j = 0; j < dSize; j++) {
286  Size tsize = Size(_l_marginalMin.size());
287 
288  // go through all threads
289  for (Size tId = 0; tId < tsize; tId++) {
290  if (_l_marginalMin[tId][i][j] < this->_marginalMin[i][j])
291  this->_marginalMin[i][j] = _l_marginalMin[tId][i][j];
292 
293  if (_l_marginalMax[tId][i][j] > this->_marginalMax[i][j])
294  this->_marginalMax[i][j] = _l_marginalMax[tId][i][j];
295  } // end of : all threads
296  } // end of : all modalities
297  } // end of : all variables
298  } // end of : parallel region
299  }
300 
301  template < typename GUM_SCALAR, class BNInferenceEngine >
302  inline const GUM_SCALAR
303  MultipleInferenceEngine< GUM_SCALAR,
304  BNInferenceEngine >::_computeEpsilon() {
305  GUM_SCALAR eps = 0;
306 #pragma omp parallel
307  {
308  GUM_SCALAR tEps = 0;
309  GUM_SCALAR delta;
310 
311  int tId = getThreadNumber();
312  long nsize = long(_workingSet[tId]->size());
313 
314 #pragma omp for
315 
316  for (long i = 0; i < nsize; i++) {
317  Size dSize = Size(_l_marginalMin[tId][i].size());
318 
319  for (Size j = 0; j < dSize; j++) {
320  // on min
321  delta = this->_marginalMin[i][j] - this->_oldMarginalMin[i][j];
322  delta = (delta < 0) ? (-delta) : delta;
323  tEps = (tEps < delta) ? delta : tEps;
324 
325  // on max
326  delta = this->_marginalMax[i][j] - this->_oldMarginalMax[i][j];
327  delta = (delta < 0) ? (-delta) : delta;
328  tEps = (tEps < delta) ? delta : tEps;
329 
330  this->_oldMarginalMin[i][j] = this->_marginalMin[i][j];
331  this->_oldMarginalMax[i][j] = this->_marginalMax[i][j];
332  }
333  } // end of : all variables
334 
335 #pragma omp critical(epsilon_max)
336  {
337 #pragma omp flush(eps)
338  eps = (eps < tEps) ? tEps : eps;
339  }
340 
341  } // end of : parallel region
342  return eps;
343  }
344 
345  template < typename GUM_SCALAR, class BNInferenceEngine >
346  void MultipleInferenceEngine< GUM_SCALAR,
347  BNInferenceEngine >::_updateOldMarginals() {
348 #pragma omp parallel
349  {
350  int threadId = getThreadNumber();
351  long nsize = long(_workingSet[threadId]->size());
352 
353 #pragma omp for
354 
355  for (long i = 0; i < nsize; i++) {
356  Size dSize = Size(_l_marginalMin[threadId][i].size());
357 
358  for (Size j = 0; j < dSize; j++) {
359  Size tsize = Size(_l_marginalMin.size());
360 
361  // go through all threads
362  for (Size tId = 0; tId < tsize; tId++) {
363  if (_l_marginalMin[tId][i][j] < this->_oldMarginalMin[i][j])
364  this->_oldMarginalMin[i][j] = _l_marginalMin[tId][i][j];
365 
366  if (_l_marginalMax[tId][i][j] > this->_oldMarginalMax[i][j])
367  this->_oldMarginalMax[i][j] = _l_marginalMax[tId][i][j];
368  } // end of : all threads
369  } // end of : all modalities
370  } // end of : all variables
371  } // end of : parallel region
372  }
373 
374  template < typename GUM_SCALAR, class BNInferenceEngine >
375  void MultipleInferenceEngine< GUM_SCALAR,
376  BNInferenceEngine >::_verticesFusion() {
377  // don't create threads if there are no vertices saved
378  if (!__infE::_storeVertices) return;
379 
380 #pragma omp parallel
381  {
382  int threadId = getThreadNumber();
383  Size nsize = Size(_workingSet[threadId]->size());
384 
385 #pragma omp for
386 
387  for (long i = 0; i < long(nsize); i++) {
388  Size tsize = Size(_l_marginalMin.size());
389 
390  // go through all threads
391  for (long tId = 0; tId < long(tsize); tId++) {
392  auto& nodeThreadCredalSet = _l_marginalSets[tId][i];
393 
394  // for each vertex, if we are at any opt marginal, add it to the set
395  for (const auto& vtx : nodeThreadCredalSet) {
396  // we run redundancy elimination at each step
397  // because there could be 100000 threads and the set will be so
398  // huge
399  // ...
400  // BUT not if vertices are of dimension 2 ! opt check and equality
401  // should be enough
402  __infE::_updateCredalSets(i, vtx, (vtx.size() > 2) ? true : false);
403  } // end of : nodeThreadCredalSet
404  } // end of : all threads
405  } // end of : all variables
406  } // end of : parallel region
407  }
408 
409  template < typename GUM_SCALAR, class BNInferenceEngine >
411  // don't create threads if there are no modalities to compute expectations
412  if (this->_modal.empty()) return;
413 
414  // we can compute expectations from vertices of the final credal set
416 #pragma omp parallel
417  {
418  int threadId = getThreadNumber();
419 
420  if (!this->_l_modal[threadId].empty()) {
421  Size nsize = Size(_workingSet[threadId]->size());
422 
423 #pragma omp for
424 
425  for (long i = 0; i < long(nsize);
426  i++) { // i needs to be signed (due to omp with visual c++
427  // 15)
428  std::string var_name = _workingSet[threadId]->variable(i).name();
429  auto delim = var_name.find_first_of("_");
430  var_name = var_name.substr(0, delim);
431 
432  if (!_l_modal[threadId].exists(var_name)) continue;
433 
434  for (const auto& vertex : __infE::_marginalSets[i]) {
435  GUM_SCALAR exp = 0;
436  Size vsize = Size(vertex.size());
437 
438  for (Size mod = 0; mod < vsize; mod++)
439  exp += vertex[mod] * _l_modal[threadId][var_name][mod];
440 
441  if (exp > __infE::_expectationMax[i])
442  __infE::_expectationMax[i] = exp;
443 
444  if (exp < __infE::_expectationMin[i])
445  __infE::_expectationMin[i] = exp;
446  }
447  } // end of : each variable parallel for
448  } // end of : if this thread has modals
449  } // end of parallel region
450  return;
451  }
452 
453 #pragma omp parallel
454  {
455  int threadId = getThreadNumber();
456 
457  if (!this->_l_modal[threadId].empty()) {
458  Size nsize = Size(_workingSet[threadId]->size());
459 #pragma omp for
460  for (long i = 0; i < long(nsize);
461  i++) { // long instead of Idx due to omp for visual C++15
462  std::string var_name = _workingSet[threadId]->variable(i).name();
463  auto delim = var_name.find_first_of("_");
464  var_name = var_name.substr(0, delim);
465 
466  if (!_l_modal[threadId].exists(var_name)) continue;
467 
468  Size tsize = Size(_l_expectationMax.size());
469 
470  for (Idx tId = 0; tId < tsize; tId++) {
471  if (_l_expectationMax[tId][i] > this->_expectationMax[i])
472  this->_expectationMax[i] = _l_expectationMax[tId][i];
473 
474  if (_l_expectationMin[tId][i] < this->_expectationMin[i])
475  this->_expectationMin[i] = _l_expectationMin[tId][i];
476  } // end of : each thread
477  } // end of : each variable
478  } // end of : if modals not empty
479  } // end of : parallel region
480  }
481 
482  template < typename GUM_SCALAR, class BNInferenceEngine >
484  typedef std::vector< bool > dBN;
485 
486  Size nsize = Size(_workingSet[0]->size());
487 
488  // no parallel insert in hash-tables (OptBN)
489  for (Idx i = 0; i < nsize; i++) {
490  // we don't store anything for observed variables
491  if (__infE::_evidence.exists(i)) continue;
492 
493  Size dSize = Size(_l_marginalMin[0][i].size());
494 
495  for (Size j = 0; j < dSize; j++) {
496  // go through all threads
497  std::vector< Size > keymin(3);
498  keymin[0] = i;
499  keymin[1] = j;
500  keymin[2] = 0;
501  std::vector< Size > keymax(keymin);
502  keymax[2] = 1;
503 
504  Size tsize = Size(_l_marginalMin.size());
505 
506  for (Size tId = 0; tId < tsize; tId++) {
507  if (_l_marginalMin[tId][i][j] == this->_marginalMin[i][j]) {
508  const std::vector< dBN* >& tOpts =
509  _l_optimalNet[tId]->getBNOptsFromKey(keymin);
510  Size osize = Size(tOpts.size());
511 
512  for (Size bn = 0; bn < osize; bn++) {
513  __infE::_dbnOpt.insert(*tOpts[bn], keymin);
514  }
515  }
516 
517  if (_l_marginalMax[tId][i][j] == this->_marginalMax[i][j]) {
518  const std::vector< dBN* >& tOpts =
519  _l_optimalNet[tId]->getBNOptsFromKey(keymax);
520  Size osize = Size(tOpts.size());
521 
522  for (Size bn = 0; bn < osize; bn++) {
523  __infE::_dbnOpt.insert(*tOpts[bn], keymax);
524  }
525  }
526  } // end of : all threads
527  } // end of : all modalities
528  } // end of : all variables
529  }
530 
531  template < typename GUM_SCALAR, class BNInferenceEngine >
532  void MultipleInferenceEngine< GUM_SCALAR,
533  BNInferenceEngine >::eraseAllEvidence() {
535  Size tsize = Size(_workingSet.size());
536 
537  // delete pointers
538  for (Size bn = 0; bn < tsize; bn++) {
539  if (__infE::_storeVertices) _l_marginalSets[bn].clear();
540 
541  if (_workingSet[bn] != nullptr) delete _workingSet[bn];
542 
544  if (_l_inferenceEngine[bn] != nullptr) delete _l_optimalNet[bn];
545 
546  if (this->_workingSetE[bn] != nullptr) {
547  for (const auto ev : *_workingSetE[bn])
548  delete ev;
549 
550  delete _workingSetE[bn];
551  }
552 
553  if (_l_inferenceEngine[bn] != nullptr) delete _l_inferenceEngine[bn];
554  }
555 
556  // this is important, those will be resized with the correct number of
557  // threads.
558 
559  _workingSet.clear();
560  _workingSetE.clear();
561  _l_inferenceEngine.clear();
562  _l_optimalNet.clear();
563 
564  _l_marginalMin.clear();
565  _l_marginalMax.clear();
566  _l_expectationMin.clear();
567  _l_expectationMax.clear();
568  _l_modal.clear();
569  _l_marginalSets.clear();
570  _l_evidence.clear();
571  _l_clusters.clear();
572  }
573  } // namespace credal
574 } // namespace gum
std::vector< BNInferenceEngine *> _l_inferenceEngine
Threads BNInferenceEngine.
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
void _initThreadsData(const Size &num_threads, const bool __storeVertices, const bool __storeBNOpt)
Initialize threads data.
margi _oldMarginalMin
Old lower marginals used to compute epsilon.
__expes _l_expectationMin
Threads lower expectations, one per thread.
unsigned int getThreadNumber()
Get the calling thread id.
bool _storeBNOpt
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
credalSet _marginalSets
Credal sets vertices, if enabled.
margi _marginalMin
Lower marginals.
void _optFusion()
Fusion of threads optimal IBayesNet.
__margis _l_marginalMin
Threads lower marginals, one per thread.
void _expFusion()
Fusion of threads expectations.
margi _oldMarginalMax
Old upper marginals used to compute epsilon.
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
Definition: agrum.h:25
std::vector< List< const Potential< GUM_SCALAR > *> *> _workingSetE
Threads evidence.
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
MultipleInferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
std::vector< VarMod2BNsMap< GUM_SCALAR > *> _l_optimalNet
Threads optimal IBayesNet.
std::vector< __bnet *> _workingSet
Threads IBayesNet.
Class template representing a Credal Network.
Definition: credalNet.h:89
Class template acting as a wrapper for Lexicographic Reverse Search by David Avis.
Definition: LrsWrapper.h:107
void _updateMarginals()
Fusion of threads marginals.
VarMod2BNsMap< GUM_SCALAR > _dbnOpt
Object used to efficiently store optimal bayes net during inference, for some algorithms.
void _updateOldMarginals()
Update old marginals (from current marginals).
__expes _l_expectationMax
Threads upper expectations, one per thread.
void setUpV(const Size &card, const Size &vertices)
Sets up a V-representation.
dynExpe _modal
Variables modalities used to compute expectations.
void _updateCredalSets(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Given a node id and one of it&#39;s possible vertex, update it&#39;s credal set.
__clusters _l_clusters
Threads clusters.
Abstract class template representing a CredalNet inference engine.
margi _evidence
Holds observed variables states.
void clear()
Removes all the elements in the hash table.
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
bool _updateThread(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Update thread information (marginals, expectations, IBayesNet, vertices) for a given node id...
Size Idx
Type for indexes.
Definition: types.h:53
Class template representing a CredalNet inference engine using one or more IBayesNet inference engine...
__margis _l_marginalMax
Threads upper marginals, one per thread.
bool _storeVertices
True if credal sets vertices are stored, False otherwise.
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:48
__credalSets _l_marginalSets
Threads vertices.
void __updateThreadCredalSets(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund)
Ask for redundancy elimination of a node credal set of a calling thread.
Size NodeId
Type for node ids.
Definition: graphElements.h:98
margi _marginalMax
Upper marginals.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
const GUM_SCALAR _computeEpsilon()
Compute epsilon and update old marginals.