aGrUM  0.13.2
multipleInferenceEngine_tpl.h
Go to the documentation of this file.
1 
2 /***************************************************************************
3  * Copyright (C) 2017 by Pierre-Henri WUILLEMIN and Christophe GONZALES *
4  * {prenom.nom}_at_lip6.fr *
5  * *
6  * This program is free software; you can redistribute it and/or modify *
7  * it under the terms of the GNU General Public License as published by *
8  * the Free Software Foundation; either version 2 of the License, or *
9  * (at your option) any later version. *
10  * *
11  * This program is distributed in the hope that it will be useful, *
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14  * GNU General Public License for more details. *
15  * *
16  * You should have received a copy of the GNU General Public License *
17  * along with this program; if not, write to the *
18  * Free Software Foundation, Inc., *
19  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
20  ***************************************************************************/
21 
22 
25 
26 namespace gum {
27  namespace credal {
28 
29  template < typename GUM_SCALAR, class BNInferenceEngine >
32  InferenceEngine< GUM_SCALAR >::InferenceEngine(credalNet) {
33  GUM_CONSTRUCTOR(MultipleInferenceEngine);
34  }
35 
36  template < typename GUM_SCALAR, class BNInferenceEngine >
37  MultipleInferenceEngine< GUM_SCALAR,
38  BNInferenceEngine >::~MultipleInferenceEngine() {
39  GUM_DESTRUCTOR(MultipleInferenceEngine);
40  }
41 
42  template < typename GUM_SCALAR, class BNInferenceEngine >
43  inline void
45  const Size& num_threads,
46  const bool __storeVertices,
47  const bool __storeBNOpt) {
48  _workingSet.clear();
49  _workingSet.resize(num_threads, nullptr);
50  _workingSetE.clear();
51  _workingSetE.resize(num_threads, nullptr);
52 
53  _l_marginalMin.clear();
54  _l_marginalMin.resize(num_threads);
55  _l_marginalMax.clear();
56  _l_marginalMax.resize(num_threads);
57  _l_expectationMin.clear();
58  _l_expectationMin.resize(num_threads);
59  _l_expectationMax.clear();
60  _l_expectationMax.resize(num_threads);
61 
62  _l_clusters.clear();
63  _l_clusters.resize(num_threads);
64 
65  if (__storeVertices) {
66  _l_marginalSets.clear();
67  _l_marginalSets.resize(num_threads);
68  }
69 
70  if (__storeBNOpt) {
71  for (Size ptr = 0; ptr < this->_l_optimalNet.size(); ptr++)
72  if (this->_l_optimalNet[ptr] != nullptr) delete _l_optimalNet[ptr];
73 
74  _l_optimalNet.clear();
75  _l_optimalNet.resize(num_threads);
76  }
77 
78  _l_modal.clear();
79  _l_modal.resize(num_threads);
80 
82  this->_oldMarginalMin = this->_marginalMin;
83  this->_oldMarginalMax.clear();
84  this->_oldMarginalMax = this->_marginalMax;
85  }
86 
87  template < typename GUM_SCALAR, class BNInferenceEngine >
88  inline bool
90  const NodeId& id,
91  const std::vector< GUM_SCALAR >& vertex,
92  const bool& elimRedund) {
93  int tId = getThreadNumber();
94 
95  // save E(X) if we don't save vertices
96  if (!__infE::_storeVertices && !_l_modal[tId].empty()) {
97  std::string var_name = _workingSet[tId]->variable(id).name();
98  auto delim = var_name.find_first_of("_");
99  var_name = var_name.substr(0, delim);
100 
101  if (_l_modal[tId].exists(var_name)) {
102  GUM_SCALAR exp = 0;
103  Size vsize = Size(vertex.size());
104 
105  for (Size mod = 0; mod < vsize; mod++)
106  exp += vertex[mod] * _l_modal[tId][var_name][mod];
107 
108  if (exp > _l_expectationMax[tId][id]) _l_expectationMax[tId][id] = exp;
109 
110  if (exp < _l_expectationMin[tId][id]) _l_expectationMin[tId][id] = exp;
111  }
112  } // end of : if modal (map) not empty
113 
114  bool newOne = false;
115  bool added = false;
116  bool result = false;
117  // for burn in, we need to keep checking on local marginals and not global
118  // ones
119  // (faster inference)
120  // we also don't want to store dbn for observed variables since there will
121  // be a
122  // huge number of them (probably all of them).
123  Size vsize = Size(vertex.size());
124 
125  for (Size mod = 0; mod < vsize; mod++) {
126  if (vertex[mod] < _l_marginalMin[tId][id][mod]) {
127  _l_marginalMin[tId][id][mod] = vertex[mod];
128  newOne = true;
129 
130  if (__infE::_storeBNOpt && !__infE::_evidence.exists(id)) {
131  std::vector< Size > key(3);
132  key[0] = id;
133  key[1] = mod;
134  key[2] = 0;
135 
136  if (_l_optimalNet[tId]->insert(key, true)) result = true;
137  }
138  }
139 
140  if (vertex[mod] > _l_marginalMax[tId][id][mod]) {
141  _l_marginalMax[tId][id][mod] = vertex[mod];
142  newOne = true;
143 
144  if (__infE::_storeBNOpt && !__infE::_evidence.exists(id)) {
145  std::vector< Size > key(3);
146  key[0] = id;
147  key[1] = mod;
148  key[2] = 1;
149 
150  if (_l_optimalNet[tId]->insert(key, true)) result = true;
151  }
152  } else if (vertex[mod] == _l_marginalMin[tId][id][mod]
153  || vertex[mod] == _l_marginalMax[tId][id][mod]) {
154  newOne = true;
155 
156  if (__infE::_storeBNOpt && vertex[mod] == _l_marginalMin[tId][id][mod]
157  && !__infE::_evidence.exists(id)) {
158  std::vector< Size > key(3);
159  key[0] = id;
160  key[1] = mod;
161  key[2] = 0;
162 
163  if (_l_optimalNet[tId]->insert(key, false)) result = true;
164  }
165 
166  if (__infE::_storeBNOpt && vertex[mod] == _l_marginalMax[tId][id][mod]
167  && !__infE::_evidence.exists(id)) {
168  std::vector< Size > key(3);
169  key[0] = id;
170  key[1] = mod;
171  key[2] = 1;
172 
173  if (_l_optimalNet[tId]->insert(key, false)) result = true;
174  }
175  }
176 
177  // store point to compute credal set vertices.
178  // check for redundancy at each step or at the end ?
179  if (__infE::_storeVertices && !added && newOne) {
180  __updateThreadCredalSets(id, vertex, elimRedund);
181  added = true;
182  }
183  }
184 
185  // if all variables didn't get better marginals, we will delete
186  if (__infE::_storeBNOpt && result) return true;
187 
188  return false;
189  }
190 
191  template < typename GUM_SCALAR, class BNInferenceEngine >
194  const std::vector< GUM_SCALAR >& vertex,
195  const bool& elimRedund) {
196  int tId = getThreadNumber();
197  auto& nodeCredalSet = _l_marginalSets[tId][id];
198  Size dsize = Size(vertex.size());
199 
200  bool eq = true;
201 
202  for (auto it = nodeCredalSet.cbegin(), itEnd = nodeCredalSet.cend();
203  it != itEnd;
204  ++it) {
205  eq = true;
206 
207  for (Size i = 0; i < dsize; i++) {
208  if (std::fabs(vertex[i] - (*it)[i]) > 1e-6) {
209  eq = false;
210  break;
211  }
212  }
213 
214  if (eq) break;
215  }
216 
217  if (!eq || nodeCredalSet.size() == 0) {
218  nodeCredalSet.push_back(vertex);
219  return;
220  } else
221  return;
222 
226  if (nodeCredalSet.size() == 1) return;
227 
228  // check that the point and all previously added ones are not inside the
229  // actual
230  // polytope
231  auto itEnd = std::remove_if(
232  nodeCredalSet.begin(),
233  nodeCredalSet.end(),
234  [&](const std::vector< GUM_SCALAR >& v) -> bool {
235  for (auto jt = v.cbegin(),
236  jtEnd = v.cend(),
237  minIt = _l_marginalMin[tId][id].cbegin(),
238  minItEnd = _l_marginalMin[tId][id].cend(),
239  maxIt = _l_marginalMax[tId][id].cbegin(),
240  maxItEnd = _l_marginalMax[tId][id].cend();
241  jt != jtEnd && minIt != minItEnd && maxIt != maxItEnd;
242  ++jt, ++minIt, ++maxIt) {
243  if ((std::fabs(*jt - *minIt) < 1e-6 || std::fabs(*jt - *maxIt) < 1e-6)
244  && std::fabs(*minIt - *maxIt) > 1e-6)
245  return false;
246  }
247  return true;
248  });
249 
250  nodeCredalSet.erase(itEnd, nodeCredalSet.end());
251 
252  // we need at least 2 points to make a convex combination
253  if (!elimRedund || nodeCredalSet.size() <= 2) return;
254 
255  // there may be points not inside the polytope but on one of it's facet,
256  // meaning it's still a convex combination of vertices of this facet. Here
257  // we
258  // need lrs.
259  Size setSize = Size(nodeCredalSet.size());
260 
261  LRSWrapper< GUM_SCALAR > lrsWrapper;
262  lrsWrapper.setUpV(dsize, setSize);
263 
264  for (const auto& vtx : nodeCredalSet)
265  lrsWrapper.fillV(vtx);
266 
267  lrsWrapper.elimRedundVrep();
268 
269  _l_marginalSets[tId][id] = lrsWrapper.getOutput();
270  }
271 
272  template < typename GUM_SCALAR, class BNInferenceEngine >
273  inline void MultipleInferenceEngine< GUM_SCALAR,
274  BNInferenceEngine >::_updateMarginals() {
275 #pragma omp parallel
276  {
277  int threadId = getThreadNumber();
278  long nsize = long(_workingSet[threadId]->size());
279 
280 #pragma omp for
281 
282  for (long i = 0; i < nsize; i++) {
283  Size dSize = Size(_l_marginalMin[threadId][i].size());
284 
285  for (Size j = 0; j < dSize; j++) {
286  Size tsize = Size(_l_marginalMin.size());
287 
288  // go through all threads
289  for (Size tId = 0; tId < tsize; tId++) {
290  if (_l_marginalMin[tId][i][j] < this->_marginalMin[i][j])
291  this->_marginalMin[i][j] = _l_marginalMin[tId][i][j];
292 
293  if (_l_marginalMax[tId][i][j] > this->_marginalMax[i][j])
294  this->_marginalMax[i][j] = _l_marginalMax[tId][i][j];
295  } // end of : all threads
296  } // end of : all modalities
297  } // end of : all variables
298  } // end of : parallel region
299  }
300 
301  template < typename GUM_SCALAR, class BNInferenceEngine >
302  inline const GUM_SCALAR
304  GUM_SCALAR eps = 0;
305 #pragma omp parallel
306  {
307  GUM_SCALAR tEps = 0;
308  GUM_SCALAR delta;
309 
310  int tId = getThreadNumber();
311  long nsize = long(_workingSet[tId]->size());
312 
313 #pragma omp for
314 
315  for (long i = 0; i < nsize; i++) {
316  Size dSize = Size(_l_marginalMin[tId][i].size());
317 
318  for (Size j = 0; j < dSize; j++) {
319  // on min
320  delta = this->_marginalMin[i][j] - this->_oldMarginalMin[i][j];
321  delta = (delta < 0) ? (-delta) : delta;
322  tEps = (tEps < delta) ? delta : tEps;
323 
324  // on max
325  delta = this->_marginalMax[i][j] - this->_oldMarginalMax[i][j];
326  delta = (delta < 0) ? (-delta) : delta;
327  tEps = (tEps < delta) ? delta : tEps;
328 
329  this->_oldMarginalMin[i][j] = this->_marginalMin[i][j];
330  this->_oldMarginalMax[i][j] = this->_marginalMax[i][j];
331  }
332  } // end of : all variables
333 
334 #pragma omp critical(epsilon_max)
335  {
336 #pragma omp flush(eps)
337  eps = (eps < tEps) ? tEps : eps;
338  }
339 
340  } // end of : parallel region
341  return eps;
342  }
343 
344  template < typename GUM_SCALAR, class BNInferenceEngine >
345  void MultipleInferenceEngine< GUM_SCALAR,
346  BNInferenceEngine >::_updateOldMarginals() {
347 #pragma omp parallel
348  {
349  int threadId = getThreadNumber();
350  long nsize = long(_workingSet[threadId]->size());
351 
352 #pragma omp for
353 
354  for (long i = 0; i < nsize; i++) {
355  Size dSize = Size(_l_marginalMin[threadId][i].size());
356 
357  for (Size j = 0; j < dSize; j++) {
358  Size tsize = Size(_l_marginalMin.size());
359 
360  // go through all threads
361  for (Size tId = 0; tId < tsize; tId++) {
362  if (_l_marginalMin[tId][i][j] < this->_oldMarginalMin[i][j])
363  this->_oldMarginalMin[i][j] = _l_marginalMin[tId][i][j];
364 
365  if (_l_marginalMax[tId][i][j] > this->_oldMarginalMax[i][j])
366  this->_oldMarginalMax[i][j] = _l_marginalMax[tId][i][j];
367  } // end of : all threads
368  } // end of : all modalities
369  } // end of : all variables
370  } // end of : parallel region
371  }
372 
373  template < typename GUM_SCALAR, class BNInferenceEngine >
374  void
376  // don't create threads if there are no vertices saved
377  if (!__infE::_storeVertices) return;
378 
379 #pragma omp parallel
380  {
381  int threadId = getThreadNumber();
382  Size nsize = Size(_workingSet[threadId]->size());
383 
384 #pragma omp for
385 
386  for (long i = 0; i < long(nsize); i++) {
387  Size tsize = Size(_l_marginalMin.size());
388  Size dSize = Size(_l_marginalMin[threadId][i].size());
389 
390  // go through all threads
391  for (long tId = 0; tId < long(tsize); tId++) {
392  auto& nodeThreadCredalSet = _l_marginalSets[tId][i];
393 
394  // for each vertex, if we are at any opt marginal, add it to the set
395  for (const auto& vtx : nodeThreadCredalSet) {
396  // we run redundancy elimination at each step
397  // because there could be 100000 threads and the set will be so
398  // huge
399  // ...
400  // BUT not if vertices are of dimension 2 ! opt check and equality
401  // should be enough
402  __infE::_updateCredalSets(i, vtx, (vtx.size() > 2) ? true : false);
403  } // end of : nodeThreadCredalSet
404  } // end of : all threads
405  } // end of : all variables
406  } // end of : parallel region
407  }
408 
409  template < typename GUM_SCALAR, class BNInferenceEngine >
411  // don't create threads if there are no modalities to compute expectations
412  if (this->_modal.empty()) return;
413 
414  // we can compute expectations from vertices of the final credal set
416 #pragma omp parallel
417  {
418  int threadId = getThreadNumber();
419 
420  if (!this->_l_modal[threadId].empty()) {
421  Size nsize = Size(_workingSet[threadId]->size());
422 
423 #pragma omp for
424 
425  for (long i = 0; i < long(nsize);
426  i++) { // i needs to be signed (due to omp with visual c++
427  // 15)
428  std::string var_name = _workingSet[threadId]->variable(i).name();
429  auto delim = var_name.find_first_of("_");
430  var_name = var_name.substr(0, delim);
431 
432  if (!_l_modal[threadId].exists(var_name)) continue;
433 
434  Size setsize = Size(__infE::_marginalSets[i].size());
435 
436  for (const auto& vertex : __infE::_marginalSets[i]) {
437  GUM_SCALAR exp = 0;
438  Size vsize = Size(vertex.size());
439 
440  for (Size mod = 0; mod < vsize; mod++)
441  exp += vertex[mod] * _l_modal[threadId][var_name][mod];
442 
443  if (exp > __infE::_expectationMax[i])
444  __infE::_expectationMax[i] = exp;
445 
446  if (exp < __infE::_expectationMin[i])
447  __infE::_expectationMin[i] = exp;
448  }
449  } // end of : each variable parallel for
450  } // end of : if this thread has modals
451  } // end of parallel region
452  return;
453  }
454 
455 #pragma omp parallel
456  {
457  int threadId = getThreadNumber();
458 
459  if (!this->_l_modal[threadId].empty()) {
460  Size nsize = Size(_workingSet[threadId]->size());
461 #pragma omp for
462  for (long i = 0; i < long(nsize);
463  i++) { // long instead of Idx due to omp for visual C++15
464  std::string var_name = _workingSet[threadId]->variable(i).name();
465  auto delim = var_name.find_first_of("_");
466  var_name = var_name.substr(0, delim);
467 
468  if (!_l_modal[threadId].exists(var_name)) continue;
469 
470  Size tsize = Size(_l_expectationMax.size());
471 
472  for (Idx tId = 0; tId < tsize; tId++) {
473  if (_l_expectationMax[tId][i] > this->_expectationMax[i])
474  this->_expectationMax[i] = _l_expectationMax[tId][i];
475 
476  if (_l_expectationMin[tId][i] < this->_expectationMin[i])
477  this->_expectationMin[i] = _l_expectationMin[tId][i];
478  } // end of : each thread
479  } // end of : each variable
480  } // end of : if modals not empty
481  } // end of : parallel region
482  }
483 
484  template < typename GUM_SCALAR, class BNInferenceEngine >
486  typedef std::vector< bool > dBN;
487 
488  Size nsize = Size(_workingSet[0]->size());
489 
490  // no parallel insert in hash-tables (OptBN)
491  for (Idx i = 0; i < nsize; i++) {
492  // we don't store anything for observed variables
493  if (__infE::_evidence.exists(i)) continue;
494 
495  Size dSize = Size(_l_marginalMin[0][i].size());
496 
497  for (Size j = 0; j < dSize; j++) {
498  // go through all threads
499  std::vector< Size > keymin(3);
500  keymin[0] = i;
501  keymin[1] = j;
502  keymin[2] = 0;
503  std::vector< Size > keymax(keymin);
504  keymax[2] = 1;
505 
506  Size tsize = Size(_l_marginalMin.size());
507 
508  for (Size tId = 0; tId < tsize; tId++) {
509  if (_l_marginalMin[tId][i][j] == this->_marginalMin[i][j]) {
510  const std::vector< dBN* >& tOpts =
511  _l_optimalNet[tId]->getBNOptsFromKey(keymin);
512  Size osize = Size(tOpts.size());
513 
514  for (Size bn = 0; bn < osize; bn++) {
515  __infE::_dbnOpt.insert(*tOpts[bn], keymin);
516  }
517  }
518 
519  if (_l_marginalMax[tId][i][j] == this->_marginalMax[i][j]) {
520  const std::vector< dBN* >& tOpts =
521  _l_optimalNet[tId]->getBNOptsFromKey(keymax);
522  Size osize = Size(tOpts.size());
523 
524  for (Size bn = 0; bn < osize; bn++) {
525  __infE::_dbnOpt.insert(*tOpts[bn], keymax);
526  }
527  }
528  } // end of : all threads
529  } // end of : all modalities
530  } // end of : all variables
531  }
532 
533  template < typename GUM_SCALAR, class BNInferenceEngine >
534  void MultipleInferenceEngine< GUM_SCALAR,
535  BNInferenceEngine >::eraseAllEvidence() {
537  Size tsize = Size(_workingSet.size());
538 
539  // delete pointers
540  for (Size bn = 0; bn < tsize; bn++) {
541  if (__infE::_storeVertices) _l_marginalSets[bn].clear();
542 
543  if (_workingSet[bn] != nullptr) delete _workingSet[bn];
544 
546  if (_l_inferenceEngine[bn] != nullptr) delete _l_optimalNet[bn];
547 
548  if (this->_workingSetE[bn] != nullptr) {
549  for (const auto ev : *_workingSetE[bn])
550  delete ev;
551 
552  delete _workingSetE[bn];
553  }
554 
555  if (_l_inferenceEngine[bn] != nullptr) delete _l_inferenceEngine[bn];
556  }
557 
558  // this is important, those will be resized with the correct number of
559  // threads.
560 
561  _workingSet.clear();
562  _workingSetE.clear();
563  _l_inferenceEngine.clear();
564  _l_optimalNet.clear();
565 
566  _l_marginalMin.clear();
567  _l_marginalMax.clear();
568  _l_expectationMin.clear();
569  _l_expectationMax.clear();
570  _l_modal.clear();
571  _l_marginalSets.clear();
572  _l_evidence.clear();
573  _l_clusters.clear();
574  }
575  } // namespace credal
576 } // namespace gum
std::vector< BNInferenceEngine * > _l_inferenceEngine
Threads BNInferenceEngine.
Abstract class representing CredalNet inference engines.
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
void _initThreadsData(const Size &num_threads, const bool __storeVertices, const bool __storeBNOpt)
Initialize threads data.
margi _oldMarginalMin
Old lower marginals used to compute epsilon.
unsigned int NodeId
Type for node ids.
Definition: graphElements.h:97
__expes _l_expectationMin
Threads lower expectations, one per thread.
void setUpV(const unsigned int &card, const unsigned int &vertices)
Sets up a V-representation.
unsigned int getThreadNumber()
Get the calling thread id.
bool _storeBNOpt
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
credalSet _marginalSets
Credal sets vertices, if enabled.
margi _marginalMin
Lower marginals.
void _optFusion()
Fusion of threads optimal IBayesNet.
__margis _l_marginalMin
Threads lower marginals, one per thread.
void _expFusion()
Fusion of threads expectations.
margi _oldMarginalMax
Old upper marginals used to compute epsilon.
gum is the global namespace for all aGrUM entities
Definition: agrum.h:25
std::vector< List< const Potential< GUM_SCALAR > * > * > _workingSetE
Threads evidence.
Abstract class representing CredalNet inference engines.
MultipleInferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
std::vector< VarMod2BNsMap< GUM_SCALAR > * > _l_optimalNet
Threads optimal IBayesNet.
std::vector< __bnet * > _workingSet
Threads IBayesNet.
Class template representing a Credal Network.
Definition: credalNet.h:87
Class template acting as a wrapper for Lexicographic Reverse Search by David Avis.
Definition: LrsWrapper.h:104
void _updateMarginals()
Fusion of threads marginals.
VarMod2BNsMap< GUM_SCALAR > _dbnOpt
Object used to efficiently store optimal bayes net during inference, for some algorithms.
void _updateOldMarginals()
Update old marginals (from current marginals).
__expes _l_expectationMax
Threads upper expectations, one per thread.
dynExpe _modal
Variables modalities used to compute expectations.
void _updateCredalSets(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Given a node id and one of it&#39;s possible vertex, update it&#39;s credal set.
__clusters _l_clusters
Threads clusters.
Abstract class template representing a CredalNet inference engine.
margi _evidence
Holds observed variables states.
void clear()
Removes all the elements in the hash table.
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
bool _updateThread(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Update thread information (marginals, expectations, IBayesNet, vertices) for a given node id...
Class template representing a CredalNet inference engine using one or more IBayesNet inference engine...
__margis _l_marginalMax
Threads upper marginals, one per thread.
bool _storeVertices
True if credal sets vertices are stored, False otherwise.
unsigned long Idx
Type for indexes.
Definition: types.h:43
__credalSets _l_marginalSets
Threads vertices.
void __updateThreadCredalSets(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund)
Ask for redundancy elimination of a node credal set of a calling thread.
margi _marginalMax
Upper marginals.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
const GUM_SCALAR _computeEpsilon()
Compute epsilon and update old marginals.