aGrUM  0.20.3
a C++ library for (probabilistic) graphical models
multipleInferenceEngine_tpl.h
Go to the documentation of this file.
1 /**
2  *
3  * Copyright (c) 2005-2021 by Pierre-Henri WUILLEMIN(@LIP6) & Christophe GONZALES(@AMU)
4  * info_at_agrum_dot_org
5  *
6  * This library is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Lesser General Public License as published by
8  * the Free Software Foundation, either version 3 of the License, or
9  * (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with this library. If not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 
22 #include <agrum/CN/inference/inferenceEngine.h>
23 #include <agrum/CN/inference/multipleInferenceEngine.h>
24 
25 namespace gum {
26  namespace credal {
27 
28  template < typename GUM_SCALAR, class BNInferenceEngine >
29  MultipleInferenceEngine< GUM_SCALAR, BNInferenceEngine >::MultipleInferenceEngine(
30  const CredalNet< GUM_SCALAR >& credalNet) :
31  InferenceEngine< GUM_SCALAR >::InferenceEngine(credalNet) {
32  GUM_CONSTRUCTOR(MultipleInferenceEngine);
33  }
34 
35  template < typename GUM_SCALAR, class BNInferenceEngine >
38  }
39 
40  template < typename GUM_SCALAR, class BNInferenceEngine >
42  const Size& num_threads,
43  const bool _storeVertices_,
44  const bool _storeBNOpt_) {
46  workingSet_.resize(num_threads, nullptr);
49 
58 
61 
62  if (_storeVertices_) {
65  }
66 
67  if (_storeBNOpt_) {
68  for (Size ptr = 0; ptr < this->l_optimalNet_.size(); ptr++)
69  if (this->l_optimalNet_[ptr] != nullptr) delete l_optimalNet_[ptr];
70 
73  }
74 
75  l_modal_.clear();
77 
79  this->oldMarginalMin_ = this->marginalMin_;
80  this->oldMarginalMax_.clear();
81  this->oldMarginalMax_ = this->marginalMax_;
82  }
83 
84  template < typename GUM_SCALAR, class BNInferenceEngine >
86  const NodeId& id,
87  const std::vector< GUM_SCALAR >& vertex,
88  const bool& elimRedund) {
89  int tId = getThreadNumber();
90 
91  // save E(X) if we don't save vertices
92  if (!_infE_::storeVertices_ && !l_modal_[tId].empty()) {
94  auto delim = var_name.find_first_of("_");
96 
97  if (l_modal_[tId].exists(var_name)) {
98  GUM_SCALAR exp = 0;
99  Size vsize = Size(vertex.size());
100 
101  for (Size mod = 0; mod < vsize; mod++)
102  exp += vertex[mod] * l_modal_[tId][var_name][mod];
103 
105 
107  }
108  } // end of : if modal (map) not empty
109 
110  bool newOne = false;
111  bool added = false;
112  bool result = false;
113  // for burn in, we need to keep checking on local marginals and not global
114  // ones
115  // (faster inference)
116  // we also don't want to store dbn for observed variables since there will
117  // be a
118  // huge number of them (probably all of them).
119  Size vsize = Size(vertex.size());
120 
121  for (Size mod = 0; mod < vsize; mod++) {
122  if (vertex[mod] < l_marginalMin_[tId][id][mod]) {
124  newOne = true;
125 
127  std::vector< Size > key(3);
128  key[0] = id;
129  key[1] = mod;
130  key[2] = 0;
131 
132  if (l_optimalNet_[tId]->insert(key, true)) result = true;
133  }
134  }
135 
136  if (vertex[mod] > l_marginalMax_[tId][id][mod]) {
138  newOne = true;
139 
141  std::vector< Size > key(3);
142  key[0] = id;
143  key[1] = mod;
144  key[2] = 1;
145 
146  if (l_optimalNet_[tId]->insert(key, true)) result = true;
147  }
148  } else if (vertex[mod] == l_marginalMin_[tId][id][mod]
149  || vertex[mod] == l_marginalMax_[tId][id][mod]) {
150  newOne = true;
151 
153  && !_infE_::evidence_.exists(id)) {
154  std::vector< Size > key(3);
155  key[0] = id;
156  key[1] = mod;
157  key[2] = 0;
158 
159  if (l_optimalNet_[tId]->insert(key, false)) result = true;
160  }
161 
163  && !_infE_::evidence_.exists(id)) {
164  std::vector< Size > key(3);
165  key[0] = id;
166  key[1] = mod;
167  key[2] = 1;
168 
169  if (l_optimalNet_[tId]->insert(key, false)) result = true;
170  }
171  }
172 
173  // store point to compute credal set vertices.
174  // check for redundancy at each step or at the end ?
175  if (_infE_::storeVertices_ && !added && newOne) {
177  added = true;
178  }
179  }
180 
181  // if all variables didn't get better marginals, we will delete
182  if (_infE_::storeBNOpt_ && result) return true;
183 
184  return false;
185  }
186 
187  template < typename GUM_SCALAR, class BNInferenceEngine >
189  const NodeId& id,
190  const std::vector< GUM_SCALAR >& vertex,
191  const bool& elimRedund) {
192  int tId = getThreadNumber();
194  Size dsize = Size(vertex.size());
195 
196  bool eq = true;
197 
198  for (auto it = nodeCredalSet.cbegin(), itEnd = nodeCredalSet.cend(); it != itEnd; ++it) {
199  eq = true;
200 
201  for (Size i = 0; i < dsize; i++) {
202  if (std::fabs(vertex[i] - (*it)[i]) > 1e-6) {
203  eq = false;
204  break;
205  }
206  }
207 
208  if (eq) break;
209  }
210 
211  if (!eq || nodeCredalSet.size() == 0) {
213  return;
214  } else
215  return;
216 
217  /// we need this because of the next lambda return contidion fabs ( *minIt
218  /// -
219  /// *maxIt ) > 1e-6 which never happens if there is only one vertice
220  if (nodeCredalSet.size() == 1) return;
221 
222  // check that the point and all previously added ones are not inside the
223  // actual
224  // polytope
225  auto itEnd
227  nodeCredalSet.end(),
228  [&](const std::vector< GUM_SCALAR >& v) -> bool {
229  for (auto jt = v.cbegin(),
230  jtEnd = v.cend(),
235  jt != jtEnd && minIt != minItEnd && maxIt != maxItEnd;
236  ++jt, ++minIt, ++maxIt) {
237  if ((std::fabs(*jt - *minIt) < 1e-6 || std::fabs(*jt - *maxIt) < 1e-6)
238  && std::fabs(*minIt - *maxIt) > 1e-6)
239  return false;
240  }
241  return true;
242  });
243 
245 
246  // we need at least 2 points to make a convex combination
247  if (!elimRedund || nodeCredalSet.size() <= 2) return;
248 
249  // there may be points not inside the polytope but on one of it's facet,
250  // meaning it's still a convex combination of vertices of this facet. Here
251  // we
252  // need lrs.
254 
257 
258  for (const auto& vtx: nodeCredalSet)
260 
262 
264  }
265 
266  template < typename GUM_SCALAR, class BNInferenceEngine >
268 #pragma omp parallel
269  {
270  int threadId = getThreadNumber();
271  long nsize = long(workingSet_[threadId]->size());
272 
273 #pragma omp for
274 
275  for (long i = 0; i < nsize; i++) {
277 
278  for (Size j = 0; j < dSize; j++) {
280 
281  // go through all threads
282  for (Size tId = 0; tId < tsize; tId++) {
283  if (l_marginalMin_[tId][i][j] < this->marginalMin_[i][j])
284  this->marginalMin_[i][j] = l_marginalMin_[tId][i][j];
285 
286  if (l_marginalMax_[tId][i][j] > this->marginalMax_[i][j])
287  this->marginalMax_[i][j] = l_marginalMax_[tId][i][j];
288  } // end of : all threads
289  } // end of : all modalities
290  } // end of : all variables
291  } // end of : parallel region
292  }
293 
294  template < typename GUM_SCALAR, class BNInferenceEngine >
295  inline const GUM_SCALAR
297  GUM_SCALAR eps = 0;
298 #pragma omp parallel
299  {
300  GUM_SCALAR tEps = 0;
302 
303  int tId = getThreadNumber();
304  long nsize = long(workingSet_[tId]->size());
305 
306 #pragma omp for
307 
308  for (long i = 0; i < nsize; i++) {
310 
311  for (Size j = 0; j < dSize; j++) {
312  // on min
313  delta = this->marginalMin_[i][j] - this->oldMarginalMin_[i][j];
314  delta = (delta < 0) ? (-delta) : delta;
315  tEps = (tEps < delta) ? delta : tEps;
316 
317  // on max
318  delta = this->marginalMax_[i][j] - this->oldMarginalMax_[i][j];
319  delta = (delta < 0) ? (-delta) : delta;
320  tEps = (tEps < delta) ? delta : tEps;
321 
322  this->oldMarginalMin_[i][j] = this->marginalMin_[i][j];
323  this->oldMarginalMax_[i][j] = this->marginalMax_[i][j];
324  }
325  } // end of : all variables
326 
327 #pragma omp critical(epsilon_max)
328  {
329 #pragma omp flush(eps)
330  eps = (eps < tEps) ? tEps : eps;
331  }
332 
333  } // end of : parallel region
334  return eps;
335  }
336 
337  template < typename GUM_SCALAR, class BNInferenceEngine >
339 #pragma omp parallel
340  {
341  int threadId = getThreadNumber();
342  long nsize = long(workingSet_[threadId]->size());
343 
344 #pragma omp for
345 
346  for (long i = 0; i < nsize; i++) {
348 
349  for (Size j = 0; j < dSize; j++) {
351 
352  // go through all threads
353  for (Size tId = 0; tId < tsize; tId++) {
354  if (l_marginalMin_[tId][i][j] < this->oldMarginalMin_[i][j])
355  this->oldMarginalMin_[i][j] = l_marginalMin_[tId][i][j];
356 
357  if (l_marginalMax_[tId][i][j] > this->oldMarginalMax_[i][j])
358  this->oldMarginalMax_[i][j] = l_marginalMax_[tId][i][j];
359  } // end of : all threads
360  } // end of : all modalities
361  } // end of : all variables
362  } // end of : parallel region
363  }
364 
365  template < typename GUM_SCALAR, class BNInferenceEngine >
367  // don't create threads if there are no vertices saved
368  if (!_infE_::storeVertices_) return;
369 
370 #pragma omp parallel
371  {
372  int threadId = getThreadNumber();
374 
375 #pragma omp for
376 
377  for (long i = 0; i < long(nsize); i++) {
379 
380  // go through all threads
381  for (long tId = 0; tId < long(tsize); tId++) {
383 
384  // for each vertex, if we are at any opt marginal, add it to the set
385  for (const auto& vtx: nodeThreadCredalSet) {
386  // we run redundancy elimination at each step
387  // because there could be 100000 threads and the set will be so
388  // huge
389  // ...
390  // BUT not if vertices are of dimension 2 ! opt check and equality
391  // should be enough
392  _infE_::updateCredalSets_(i, vtx, (vtx.size() > 2) ? true : false);
393  } // end of : nodeThreadCredalSet
394  } // end of : all threads
395  } // end of : all variables
396  } // end of : parallel region
397  }
398 
399  template < typename GUM_SCALAR, class BNInferenceEngine >
401  // don't create threads if there are no modalities to compute expectations
402  if (this->modal_.empty()) return;
403 
404  // we can compute expectations from vertices of the final credal set
405  if (_infE_::storeVertices_) {
406 #pragma omp parallel
407  {
408  int threadId = getThreadNumber();
409 
410  if (!this->l_modal_[threadId].empty()) {
412 
413 #pragma omp for
414 
415  for (long i = 0; i < long(nsize); i++) { // i needs to be signed (due to omp with
416  // visual c++ 15)
418  auto delim = var_name.find_first_of("_");
420 
421  if (!l_modal_[threadId].exists(var_name)) continue;
422 
423  for (const auto& vertex: _infE_::marginalSets_[i]) {
424  GUM_SCALAR exp = 0;
425  Size vsize = Size(vertex.size());
426 
427  for (Size mod = 0; mod < vsize; mod++)
429 
431 
433  }
434  } // end of : each variable parallel for
435  } // end of : if this thread has modals
436  } // end of parallel region
437  return;
438  }
439 
440 #pragma omp parallel
441  {
442  int threadId = getThreadNumber();
443 
444  if (!this->l_modal_[threadId].empty()) {
446 #pragma omp for
447  for (long i = 0; i < long(nsize);
448  i++) { // long instead of Idx due to omp for visual C++15
450  auto delim = var_name.find_first_of("_");
452 
453  if (!l_modal_[threadId].exists(var_name)) continue;
454 
456 
457  for (Idx tId = 0; tId < tsize; tId++) {
458  if (l_expectationMax_[tId][i] > this->expectationMax_[i])
460 
461  if (l_expectationMin_[tId][i] < this->expectationMin_[i])
463  } // end of : each thread
464  } // end of : each variable
465  } // end of : if modals not empty
466  } // end of : parallel region
467  }
468 
469  template < typename GUM_SCALAR, class BNInferenceEngine >
471  typedef std::vector< bool > dBN;
472 
473  Size nsize = Size(workingSet_[0]->size());
474 
475  // no parallel insert in hash-tables (OptBN)
476  for (Idx i = 0; i < nsize; i++) {
477  // we don't store anything for observed variables
478  if (_infE_::evidence_.exists(i)) continue;
479 
480  Size dSize = Size(l_marginalMin_[0][i].size());
481 
482  for (Size j = 0; j < dSize; j++) {
483  // go through all threads
484  std::vector< Size > keymin(3);
485  keymin[0] = i;
486  keymin[1] = j;
487  keymin[2] = 0;
488  std::vector< Size > keymax(keymin);
489  keymax[2] = 1;
490 
492 
493  for (Size tId = 0; tId < tsize; tId++) {
494  if (l_marginalMin_[tId][i][j] == this->marginalMin_[i][j]) {
496  Size osize = Size(tOpts.size());
497 
498  for (Size bn = 0; bn < osize; bn++) {
500  }
501  }
502 
503  if (l_marginalMax_[tId][i][j] == this->marginalMax_[i][j]) {
505  Size osize = Size(tOpts.size());
506 
507  for (Size bn = 0; bn < osize; bn++) {
509  }
510  }
511  } // end of : all threads
512  } // end of : all modalities
513  } // end of : all variables
514  }
515 
516  template < typename GUM_SCALAR, class BNInferenceEngine >
520 
521  // delete pointers
522  for (Size bn = 0; bn < tsize; bn++) {
524 
525  if (workingSet_[bn] != nullptr) delete workingSet_[bn];
526 
527  if (_infE_::storeBNOpt_)
528  if (l_inferenceEngine_[bn] != nullptr) delete l_optimalNet_[bn];
529 
530  if (this->workingSetE_[bn] != nullptr) {
531  for (const auto ev: *workingSetE_[bn])
532  delete ev;
533 
534  delete workingSetE_[bn];
535  }
536 
537  if (l_inferenceEngine_[bn] != nullptr) delete l_inferenceEngine_[bn];
538  }
539 
540  // this is important, those will be resized with the correct number of
541  // threads.
542 
543  workingSet_.clear();
547 
552  l_modal_.clear();
554  l_evidence_.clear();
555  l_clusters_.clear();
556  }
557  } // namespace credal
558 } // namespace gum
INLINE void emplace(Args &&... args)
Definition: set_tpl.h:643
namespace for all credal networks entities
Definition: LpInterface.cpp:37