aGrUM  0.18.1
a C++ library for (probabilistic) graphical models
CNLoopyPropagation_tpl.h
Go to the documentation of this file.
1 
24 
25 namespace gum {
26  namespace credal {
27 
28  template < typename GUM_SCALAR >
29  void CNLoopyPropagation< GUM_SCALAR >::saveInference(const std::string& path) {
30  std::string path_name = path.substr(0, path.size() - 4);
31  path_name = path_name + ".res";
32 
33  std::ofstream res(path_name.c_str(), std::ios::out | std::ios::trunc);
34 
35  if (!res.good()) {
37  "CNLoopyPropagation<GUM_SCALAR>::saveInference(std::"
38  "string & path) : could not open file : "
39  + path_name);
40  }
41 
42  std::string ext = path.substr(path.size() - 3, path.size());
43 
44  if (std::strcmp(ext.c_str(), "evi") == 0) {
45  std::ifstream evi(path.c_str(), std::ios::in);
46  std::string ligne;
47 
48  if (!evi.good()) {
50  "CNLoopyPropagation<GUM_SCALAR>::saveInference(std::"
51  "string & path) : could not open file : "
52  + ext);
53  }
54 
55  while (evi.good()) {
56  getline(evi, ligne);
57  res << ligne << "\n";
58  }
59 
60  evi.close();
61  }
62 
63  res << "[RESULTATS]"
64  << "\n";
65 
66  for (auto node: bnet__->nodes()) {
67  // calcul distri posteriori
68  GUM_SCALAR msg_p_min = 1.0;
69  GUM_SCALAR msg_p_max = 0.0;
70 
71  // cas evidence, calcul immediat
72  if (infE__::evidence_.exists(node)) {
73  if (infE__::evidence_[node][1] == 0.) {
74  msg_p_min = 0.;
75  } else if (infE__::evidence_[node][1] == 1.) {
76  msg_p_min = 1.;
77  }
78 
79  msg_p_max = msg_p_min;
80  }
81  // sinon depuis node P et node L
82  else {
83  GUM_SCALAR min = NodesP_min_[node];
84  GUM_SCALAR max;
85 
86  if (NodesP_max_.exists(node)) {
87  max = NodesP_max_[node];
88  } else {
89  max = min;
90  }
91 
92  GUM_SCALAR lmin = NodesL_min_[node];
93  GUM_SCALAR lmax;
94 
95  if (NodesL_max_.exists(node)) {
96  lmax = NodesL_max_[node];
97  } else {
98  lmax = lmin;
99  }
100 
101  // cas limites sur min
102  if (min == INF_ && lmin == 0.) {
103  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
104  }
105 
106  if (lmin == INF_) { // cas infini
107  msg_p_min = GUM_SCALAR(1.);
108  } else if (min == 0. || lmin == 0.) {
109  msg_p_min = GUM_SCALAR(0.);
110  } else {
111  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
112  }
113 
114  // cas limites sur max
115  if (max == INF_ && lmax == 0.) {
116  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
117  }
118 
119  if (lmax == INF_) { // cas infini
120  msg_p_max = GUM_SCALAR(1.);
121  } else if (max == 0. || lmax == 0.) {
122  msg_p_max = GUM_SCALAR(0.);
123  } else {
124  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
125  }
126  }
127 
128  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
129  msg_p_min = msg_p_max;
130  }
131 
132  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
133  msg_p_max = msg_p_min;
134  }
135 
136  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
137  std::cout << std::endl;
138  std::cout << "pas de proba calculable (verifier observations)"
139  << std::endl;
140  }
141 
142  res << "P(" << bnet__->variable(node).name() << " | e) = ";
143 
144  if (infE__::evidence_.exists(node)) {
145  res << "(observe)" << std::endl;
146  } else {
147  res << std::endl;
148  }
149 
150  res << "\t\t" << bnet__->variable(node).label(0) << " [ "
151  << (GUM_SCALAR)1. - msg_p_max;
152 
153  if (msg_p_min != msg_p_max) {
154  res << ", " << (GUM_SCALAR)1. - msg_p_min << " ] | ";
155  } else {
156  res << " ] | ";
157  }
158 
159  res << bnet__->variable(node).label(1) << " [ " << msg_p_min;
160 
161  if (msg_p_min != msg_p_max) {
162  res << ", " << msg_p_max << " ]" << std::endl;
163  } else {
164  res << " ]" << std::endl;
165  }
166  } // end of : for each node
167 
168  res.close();
169  }
170 
180  template < typename GUM_SCALAR >
182  GUM_SCALAR& msg_l_min,
183  GUM_SCALAR& msg_l_max,
184  std::vector< GUM_SCALAR >& lx,
185  GUM_SCALAR& num_min,
186  GUM_SCALAR& num_max,
187  GUM_SCALAR& den_min,
188  GUM_SCALAR& den_max) {
189  GUM_SCALAR num_min_tmp = 1.;
190  GUM_SCALAR den_min_tmp = 1.;
191  GUM_SCALAR num_max_tmp = 1.;
192  GUM_SCALAR den_max_tmp = 1.;
193 
194  GUM_SCALAR res_min = 1.0, res_max = 0.0;
195 
196  auto lsize = lx.size();
197 
198  for (decltype(lsize) i = 0; i < lsize; i++) {
199  bool non_defini_min = false;
200  bool non_defini_max = false;
201 
202  if (lx[i] == INF_) {
203  num_min_tmp = num_min;
204  den_min_tmp = den_max;
205  num_max_tmp = num_max;
206  den_max_tmp = den_min;
207  } else if (lx[i] == (GUM_SCALAR)1.) {
208  num_min_tmp = GUM_SCALAR(1.);
209  den_min_tmp = GUM_SCALAR(1.);
210  num_max_tmp = GUM_SCALAR(1.);
211  den_max_tmp = GUM_SCALAR(1.);
212  } else if (lx[i] > (GUM_SCALAR)1.) {
213  GUM_SCALAR li = GUM_SCALAR(1.) / (lx[i] - GUM_SCALAR(1.));
214  num_min_tmp = num_min + li;
215  den_min_tmp = den_max + li;
216  num_max_tmp = num_max + li;
217  den_max_tmp = den_min + li;
218  } else if (lx[i] < (GUM_SCALAR)1.) {
219  GUM_SCALAR li = GUM_SCALAR(1.) / (lx[i] - GUM_SCALAR(1.));
220  num_min_tmp = num_max + li;
221  den_min_tmp = den_min + li;
222  num_max_tmp = num_min + li;
223  den_max_tmp = den_max + li;
224  }
225 
226  if (den_min_tmp == 0. && num_min_tmp == 0.) {
227  non_defini_min = true;
228  } else if (den_min_tmp == 0. && num_min_tmp != 0.) {
229  res_min = INF_;
230  } else if (den_min_tmp != INF_ || num_min_tmp != INF_) {
231  res_min = num_min_tmp / den_min_tmp;
232  }
233 
234  if (den_max_tmp == 0. && num_max_tmp == 0.) {
235  non_defini_max = true;
236  } else if (den_max_tmp == 0. && num_max_tmp != 0.) {
237  res_max = INF_;
238  } else if (den_max_tmp != INF_ || num_max_tmp != INF_) {
239  res_max = num_max_tmp / den_max_tmp;
240  }
241 
242  if (non_defini_max && non_defini_min) {
243  std::cout << "undefined msg" << std::endl;
244  continue;
245  } else if (non_defini_min && !non_defini_max) {
246  res_min = res_max;
247  } else if (non_defini_max && !non_defini_min) {
248  res_max = res_min;
249  }
250 
251  if (res_min < 0.) { res_min = 0.; }
252 
253  if (res_max < 0.) { res_max = 0.; }
254 
255  if (msg_l_min == msg_l_max && msg_l_min == -2.) {
256  msg_l_min = res_min;
257  msg_l_max = res_max;
258  }
259 
260  if (res_max > msg_l_max) { msg_l_max = res_max; }
261 
262  if (res_min < msg_l_min) { msg_l_min = res_min; }
263 
264  } // end of : for each lx
265  }
266 
270  template < typename GUM_SCALAR >
272  std::vector< std::vector< GUM_SCALAR > >& combi_msg_p,
273  const NodeId& id,
274  GUM_SCALAR& msg_l_min,
275  GUM_SCALAR& msg_l_max,
276  std::vector< GUM_SCALAR >& lx,
277  const Idx& pos) {
278  GUM_SCALAR num_min = 0.;
279  GUM_SCALAR num_max = 0.;
280  GUM_SCALAR den_min = 0.;
281  GUM_SCALAR den_max = 0.;
282 
283  auto taille = combi_msg_p.size();
284 
285  std::vector< typename std::vector< GUM_SCALAR >::iterator > it(taille);
286 
287  for (decltype(taille) i = 0; i < taille; i++) {
288  it[i] = combi_msg_p[i].begin();
289  }
290 
291  Size pp = pos;
292 
293  Size combi_den = 0;
294  Size combi_num = pp;
295 
296  // marginalisation
297  while (it[taille - 1] != combi_msg_p[taille - 1].end()) {
298  GUM_SCALAR prod = 1.;
299 
300  for (decltype(taille) k = 0; k < taille; k++) {
301  prod *= *it[k];
302  }
303 
304  den_min += (cn__->get_CPT_min()[id][combi_den] * prod);
305  den_max += (cn__->get_CPT_max()[id][combi_den] * prod);
306 
307  num_min += (cn__->get_CPT_min()[id][combi_num] * prod);
308  num_max += (cn__->get_CPT_max()[id][combi_num] * prod);
309 
310  combi_den++;
311  combi_num++;
312 
313  if (combi_den % pp == 0) {
314  combi_den += pp;
315  combi_num += pp;
316  }
317 
318  // incrementation
319  ++it[0];
320 
321  for (decltype(taille) i = 0;
322  (i < taille - 1) && (it[i] == combi_msg_p[i].end());
323  ++i) {
324  it[i] = combi_msg_p[i].begin();
325  ++it[i + 1];
326  }
327  } // end of : marginalisation
328 
329  compute_ext_(msg_l_min, msg_l_max, lx, num_min, num_max, den_min, den_max);
330  }
331 
336  template < typename GUM_SCALAR >
338  std::vector< std::vector< GUM_SCALAR > >& combi_msg_p,
339  const NodeId& id,
340  GUM_SCALAR& msg_p_min,
341  GUM_SCALAR& msg_p_max) {
342  GUM_SCALAR min = 0.;
343  GUM_SCALAR max = 0.;
344 
345  auto taille = combi_msg_p.size();
346 
347  std::vector< typename std::vector< GUM_SCALAR >::iterator > it(taille);
348 
349  for (decltype(taille) i = 0; i < taille; i++) {
350  it[i] = combi_msg_p[i].begin();
351  }
352 
353  int combi = 0;
354  auto theEnd = combi_msg_p[taille - 1].end();
355 
356  while (it[taille - 1] != theEnd) {
357  GUM_SCALAR prod = 1.;
358 
359  for (decltype(taille) k = 0; k < taille; k++) {
360  prod *= *it[k];
361  }
362 
363  min += (cn__->get_CPT_min()[id][combi] * prod);
364  max += (cn__->get_CPT_max()[id][combi] * prod);
365 
366  combi++;
367 
368  // incrementation
369  ++it[0];
370 
371  for (decltype(taille) i = 0;
372  (i < taille - 1) && (it[i] == combi_msg_p[i].end());
373  ++i) {
374  it[i] = combi_msg_p[i].begin();
375  ++it[i + 1];
376  }
377  }
378 
379  if (min < msg_p_min) { msg_p_min = min; }
380 
381  if (max > msg_p_max) { msg_p_max = max; }
382  }
383 
387  template < typename GUM_SCALAR >
389  std::vector< std::vector< std::vector< GUM_SCALAR > > >& msgs_p,
390  const NodeId& id,
391  GUM_SCALAR& msg_p_min,
392  GUM_SCALAR& msg_p_max) {
393  auto taille = msgs_p.size();
394 
395  // source node
396  if (taille == 0) {
397  msg_p_min = cn__->get_CPT_min()[id][0];
398  msg_p_max = cn__->get_CPT_max()[id][0];
399  return;
400  }
401 
402  decltype(taille) msgPerm = 1;
403 #pragma omp parallel
404  {
405  GUM_SCALAR msg_pmin = msg_p_min;
406  GUM_SCALAR msg_pmax = msg_p_max;
407 
408  std::vector< std::vector< GUM_SCALAR > > combi_msg_p(taille);
409 
410  decltype(taille) confs = 1;
411 
412 #pragma omp for
413 
414  for (long i = 0; i < long(taille); i++) {
415  confs *= msgs_p[i].size();
416  }
417 
418 #pragma omp atomic
419  msgPerm *= confs;
420 #pragma omp barrier
421 #pragma omp \
422  flush // ( msgPerm ) let the compiler choose what to flush (due to mvsc)
423 
424 #pragma omp for
425 
426  for (int j = 0; j < int(msgPerm); j++) {
427  // get jth msg :
428  auto jvalue = j;
429 
430  for (decltype(taille) i = 0; i < taille; i++) {
431  if (msgs_p[i].size() == 2) {
432  combi_msg_p[i] = (jvalue & 1) ? msgs_p[i][1] : msgs_p[i][0];
433  jvalue /= 2;
434  } else {
435  combi_msg_p[i] = msgs_p[i][0];
436  }
437  }
438 
439  compute_ext_(combi_msg_p, id, msg_pmin, msg_pmax);
440  }
441 
442 // since min is INF_ and max is 0 at init, there is no issue having more threads
443 // here
444 // than during for loop
445 #pragma omp critical(msgpminmax)
446  {
447 #pragma omp flush //( msg_p_min )
448  //#pragma omp flush ( msg_p_max ) let the compiler choose what to
449  // flush (due to mvsc)
450 
451  if (msg_p_min > msg_pmin) { msg_p_min = msg_pmin; }
452 
453  if (msg_p_max < msg_pmax) { msg_p_max = msg_pmax; }
454  }
455  }
456  return;
457  }
458 
463  template < typename GUM_SCALAR >
465  std::vector< std::vector< std::vector< GUM_SCALAR > > >& msgs_p,
466  const NodeId& id,
467  GUM_SCALAR& real_msg_l_min,
468  GUM_SCALAR& real_msg_l_max,
469  std::vector< GUM_SCALAR >& lx,
470  const Idx& pos) {
471  GUM_SCALAR msg_l_min = real_msg_l_min;
472  GUM_SCALAR msg_l_max = real_msg_l_max;
473 
474  auto taille = msgs_p.size();
475 
476  // one parent node, the one receiving the message
477  if (taille == 0) {
478  GUM_SCALAR num_min = cn__->get_CPT_min()[id][1];
479  GUM_SCALAR num_max = cn__->get_CPT_max()[id][1];
480  GUM_SCALAR den_min = cn__->get_CPT_min()[id][0];
481  GUM_SCALAR den_max = cn__->get_CPT_max()[id][0];
482 
483  compute_ext_(msg_l_min, msg_l_max, lx, num_min, num_max, den_min, den_max);
484 
485  real_msg_l_min = msg_l_min;
486  real_msg_l_max = msg_l_max;
487  return;
488  }
489 
490  decltype(taille) msgPerm = 1;
491 #pragma omp parallel
492  {
493  GUM_SCALAR msg_lmin = msg_l_min;
494  GUM_SCALAR msg_lmax = msg_l_max;
495  std::vector< std::vector< GUM_SCALAR > > combi_msg_p(taille);
496 
497  decltype(taille) confs = 1;
498 #pragma omp for
499 
500  for (int i = 0; i < int(taille); i++) {
501  confs *= msgs_p[i].size();
502  }
503 
504 #pragma omp atomic
505  msgPerm *= confs;
506 #pragma omp barrier
507 #pragma omp flush(msgPerm)
508 
509 // direct binary representation of config, no need for iterators
510 #pragma omp for
511 
512  for (long j = 0; j < long(msgPerm); j++) {
513  // get jth msg :
514  auto jvalue = j;
515 
516  for (decltype(taille) i = 0; i < taille; i++) {
517  if (msgs_p[i].size() == 2) {
518  combi_msg_p[i] = (jvalue & 1) ? msgs_p[i][1] : msgs_p[i][0];
519  jvalue /= 2;
520  } else {
521  combi_msg_p[i] = msgs_p[i][0];
522  }
523  }
524 
525  compute_ext_(combi_msg_p, id, msg_lmin, msg_lmax, lx, pos);
526  }
527 
528 // there may be more threads here than in the for loop, therefor positive test
529 // is NECESSARY (init is -2)
530 #pragma omp critical(msglminmax)
531  {
532 #pragma omp flush(msg_l_min)
533 #pragma omp flush(msg_l_max)
534 
535  if ((msg_l_min > msg_lmin || msg_l_min == -2) && msg_lmin > 0) {
536  msg_l_min = msg_lmin;
537  }
538 
539  if ((msg_l_max < msg_lmax || msg_l_max == -2) && msg_lmax > 0) {
540  msg_l_max = msg_lmax;
541  }
542  }
543  }
544 
545  real_msg_l_min = msg_l_min;
546  real_msg_l_max = msg_l_max;
547  }
548 
549  template < typename GUM_SCALAR >
551  if (InferenceUpToDate_) { return; }
552 
553  initialize_();
554 
555  infE__::initApproximationScheme();
556 
557  switch (inferenceType__) {
558  case InferenceType::nodeToNeighbours:
559  makeInferenceNodeToNeighbours_();
560  break;
561 
562  case InferenceType::ordered: makeInferenceByOrderedArcs_(); break;
563 
564  case InferenceType::randomOrder: makeInferenceByRandomOrder_(); break;
565  }
566 
567  //_updateMarginals();
568  updateIndicatrices_(); // will call updateMarginals_()
569 
570  computeExpectations_();
571 
572  InferenceUpToDate_ = true;
573  }
574 
575  template < typename GUM_SCALAR >
577  infE__::eraseAllEvidence();
578 
579  ArcsL_min_.clear();
580  ArcsL_max_.clear();
581  ArcsP_min_.clear();
582  ArcsP_max_.clear();
583  NodesL_min_.clear();
584  NodesL_max_.clear();
585  NodesP_min_.clear();
586  NodesP_max_.clear();
587 
588  InferenceUpToDate_ = false;
589 
590  if (msg_l_sent_.size() > 0) {
591  for (auto node: bnet__->nodes()) {
592  delete msg_l_sent_[node];
593  }
594  }
595 
596  msg_l_sent_.clear();
597  update_l_.clear();
598  update_p_.clear();
599 
600  active_nodes_set.clear();
601  next_active_nodes_set.clear();
602  }
603 
604  template < typename GUM_SCALAR >
606  const DAG& graphe = bnet__->dag();
607 
608  // use const iterators with cbegin when available
609  for (auto node: bnet__->topologicalOrder()) {
610  update_p_.set(node, false);
611  update_l_.set(node, false);
612  NodeSet* parents_ = new NodeSet();
613  msg_l_sent_.set(node, parents_);
614 
615  // accelerer init pour evidences
616  if (infE__::evidence_.exists(node)) {
617  if (infE__::evidence_[node][1] != 0.
618  && infE__::evidence_[node][1] != 1.) {
620  "CNLoopyPropagation can only handle HARD evidences");
621  }
622 
623  active_nodes_set.insert(node);
624  update_l_.set(node, true);
625  update_p_.set(node, true);
626 
627  if (infE__::evidence_[node][1] == (GUM_SCALAR)1.) {
628  NodesL_min_.set(node, INF_);
629  NodesP_min_.set(node, (GUM_SCALAR)1.);
630  } else if (infE__::evidence_[node][1] == (GUM_SCALAR)0.) {
631  NodesL_min_.set(node, (GUM_SCALAR)0.);
632  NodesP_min_.set(node, (GUM_SCALAR)0.);
633  }
634 
635  std::vector< GUM_SCALAR > marg(2);
636  marg[1] = NodesP_min_[node];
637  marg[0] = 1 - marg[1];
638 
639  infE__::oldMarginalMin_.set(node, marg);
640  infE__::oldMarginalMax_.set(node, marg);
641 
642  continue;
643  }
644 
645  NodeSet par_ = graphe.parents(node);
646  NodeSet enf_ = graphe.children(node);
647 
648  if (par_.size() == 0) {
649  active_nodes_set.insert(node);
650  update_p_.set(node, true);
651  update_l_.set(node, true);
652  }
653 
654  if (enf_.size() == 0) {
655  active_nodes_set.insert(node);
656  update_p_.set(node, true);
657  update_l_.set(node, true);
658  }
659 
664  const auto parents = &bnet__->cpt(node).variablesSequence();
665 
666  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
667  std::vector< std::vector< GUM_SCALAR > > msg_p;
668  std::vector< GUM_SCALAR > distri(2);
669 
670  // +1 from start to avoid counting_ itself
671  // use const iterators when available with cbegin
672  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
673  ++jt) {
674  // compute probability distribution to avoid doing it multiple times
675  // (at
676  // each combination of messages)
677  distri[1] = NodesP_min_[bnet__->nodeId(**jt)];
678  distri[0] = (GUM_SCALAR)1. - distri[1];
679  msg_p.push_back(distri);
680 
681  if (NodesP_max_.exists(bnet__->nodeId(**jt))) {
682  distri[1] = NodesP_max_[bnet__->nodeId(**jt)];
683  distri[0] = (GUM_SCALAR)1. - distri[1];
684  msg_p.push_back(distri);
685  }
686 
687  msgs_p.push_back(msg_p);
688  msg_p.clear();
689  }
690 
691  GUM_SCALAR msg_p_min = 1.;
692  GUM_SCALAR msg_p_max = 0.;
693 
694  if (cn__->currentNodeType(node)
696  enum_combi_(msgs_p, node, msg_p_min, msg_p_max);
697  }
698 
699  if (msg_p_min <= (GUM_SCALAR)0.) { msg_p_min = (GUM_SCALAR)0.; }
700 
701  if (msg_p_max <= (GUM_SCALAR)0.) { msg_p_max = (GUM_SCALAR)0.; }
702 
703  NodesP_min_.set(node, msg_p_min);
704  std::vector< GUM_SCALAR > marg(2);
705  marg[1] = msg_p_min;
706  marg[0] = 1 - msg_p_min;
707 
708  infE__::oldMarginalMin_.set(node, marg);
709 
710  if (msg_p_min != msg_p_max) {
711  marg[1] = msg_p_max;
712  marg[0] = 1 - msg_p_max;
713  NodesP_max_.insert(node, msg_p_max);
714  }
715 
716  infE__::oldMarginalMax_.set(node, marg);
717 
718  NodesL_min_.set(node, (GUM_SCALAR)1.);
719  }
720 
721  for (auto arc: bnet__->arcs()) {
722  ArcsP_min_.set(arc, NodesP_min_[arc.tail()]);
723 
724  if (NodesP_max_.exists(arc.tail())) {
725  ArcsP_max_.set(arc, NodesP_max_[arc.tail()]);
726  }
727 
728  ArcsL_min_.set(arc, NodesL_min_[arc.tail()]);
729  }
730  }
731 
732  template < typename GUM_SCALAR >
734  const DAG& graphe = bnet__->dag();
735 
736  GUM_SCALAR eps;
737  // to validate TestSuite
738  infE__::continueApproximationScheme(1.);
739 
740  do {
741  for (auto node: active_nodes_set) {
742  for (auto chil: graphe.children(node)) {
743  if (cn__->currentNodeType(chil)
745  continue;
746  }
747 
748  msgP_(node, chil);
749  }
750 
751  for (auto par: graphe.parents(node)) {
752  if (cn__->currentNodeType(node)
754  continue;
755  }
756 
757  msgL_(node, par);
758  }
759  }
760 
761  eps = calculateEpsilon_();
762 
763  infE__::updateApproximationScheme();
764 
765  active_nodes_set.clear();
766  active_nodes_set = next_active_nodes_set;
767  next_active_nodes_set.clear();
768 
769  } while (infE__::continueApproximationScheme(eps)
770  && active_nodes_set.size() > 0);
771 
772  infE__::stopApproximationScheme(); // just to be sure of the
773  // approximationScheme has been notified of
774  // the end of looop
775  }
776 
777  template < typename GUM_SCALAR >
779  Size nbrArcs = bnet__->dag().sizeArcs();
780 
781  std::vector< cArcP > seq;
782  seq.reserve(nbrArcs);
783 
784  for (const auto& arc: bnet__->arcs()) {
785  seq.push_back(&arc);
786  }
787 
788  GUM_SCALAR eps;
789  // validate TestSuite
790  infE__::continueApproximationScheme(1.);
791 
792  do {
793  for (Size j = 0, theEnd = nbrArcs / 2; j < theEnd; j++) {
794  auto w1 = rand() % nbrArcs, w2 = rand() % nbrArcs;
795 
796  if (w1 == w2) { continue; }
797 
798  std::swap(seq[w1], seq[w2]);
799  }
800 
801  for (const auto it: seq) {
802  if (cn__->currentNodeType(it->tail())
804  || cn__->currentNodeType(it->head())
806  continue;
807  }
808 
809  msgP_(it->tail(), it->head());
810  msgL_(it->head(), it->tail());
811  }
812 
813  eps = calculateEpsilon_();
814 
815  infE__::updateApproximationScheme();
816 
817  } while (infE__::continueApproximationScheme(eps));
818  }
819 
820  // gives slightly worse results for some variable/modalities than other
821  // inference
822  // types (node D on 2U network loose 0.03 precision)
823  template < typename GUM_SCALAR >
825  Size nbrArcs = bnet__->dag().sizeArcs();
826 
827  std::vector< cArcP > seq;
828  seq.reserve(nbrArcs);
829 
830  for (const auto& arc: bnet__->arcs()) {
831  seq.push_back(&arc);
832  }
833 
834  GUM_SCALAR eps;
835  // validate TestSuite
836  infE__::continueApproximationScheme(1.);
837 
838  do {
839  for (const auto it: seq) {
840  if (cn__->currentNodeType(it->tail())
842  || cn__->currentNodeType(it->head())
844  continue;
845  }
846 
847  msgP_(it->tail(), it->head());
848  msgL_(it->head(), it->tail());
849  }
850 
851  eps = calculateEpsilon_();
852 
853  infE__::updateApproximationScheme();
854 
855  } while (infE__::continueApproximationScheme(eps));
856  }
857 
858  template < typename GUM_SCALAR >
860  NodeSet const& children = bnet__->children(Y);
861  NodeSet const& parents_ = bnet__->parents(Y);
862 
863  const auto parents = &bnet__->cpt(Y).variablesSequence();
864 
865  if (((children.size() + parents->size() - 1) == 1)
866  && (!infE__::evidence_.exists(Y))) {
867  return;
868  }
869 
870  bool update_l = update_l_[Y];
871  bool update_p = update_p_[Y];
872 
873  if (!update_p && !update_l) { return; }
874 
875  msg_l_sent_[Y]->insert(X);
876 
877  // for future refresh LM/PI
878  if (msg_l_sent_[Y]->size() == parents_.size()) {
879  msg_l_sent_[Y]->clear();
880  update_l_[Y] = false;
881  }
882 
883  // refresh LM_part
884  if (update_l) {
885  if (!children.empty() && !infE__::evidence_.exists(Y)) {
886  GUM_SCALAR lmin = 1.;
887  GUM_SCALAR lmax = 1.;
888 
889  for (auto chil: children) {
890  lmin *= ArcsL_min_[Arc(Y, chil)];
891 
892  if (ArcsL_max_.exists(Arc(Y, chil))) {
893  lmax *= ArcsL_max_[Arc(Y, chil)];
894  } else {
895  lmax *= ArcsL_min_[Arc(Y, chil)];
896  }
897  }
898 
899  lmin = lmax;
900 
901  if (lmax != lmax && lmin == lmin) { lmax = lmin; }
902 
903  if (lmax != lmax && lmin != lmin) {
904  std::cout << "no likelihood defined [lmin, lmax] (incompatibles "
905  "evidence ?)"
906  << std::endl;
907  }
908 
909  if (lmin < 0.) { lmin = 0.; }
910 
911  if (lmax < 0.) { lmax = 0.; }
912 
913  // no need to update nodeL if evidence since nodeL will never be used
914 
915  NodesL_min_[Y] = lmin;
916 
917  if (lmin != lmax) {
918  NodesL_max_.set(Y, lmax);
919  } else if (NodesL_max_.exists(Y)) {
920  NodesL_max_.erase(Y);
921  }
922 
923  } // end of : node has children & no evidence
924 
925  } // end of : if update_l
926 
927  GUM_SCALAR lmin = NodesL_min_[Y];
928  GUM_SCALAR lmax;
929 
930  if (NodesL_max_.exists(Y)) {
931  lmax = NodesL_max_[Y];
932  } else {
933  lmax = lmin;
934  }
935 
940  if (lmin == lmax && lmin == 1.) {
941  ArcsL_min_[Arc(X, Y)] = lmin;
942 
943  if (ArcsL_max_.exists(Arc(X, Y))) { ArcsL_max_.erase(Arc(X, Y)); }
944 
945  return;
946  }
947 
948  // garder pour chaque noeud un table des parents maj, une fois tous maj,
949  // stop
950  // jusque notification msg L ou P
951 
952  if (update_p || update_l) {
953  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
954  std::vector< std::vector< GUM_SCALAR > > msg_p;
955  std::vector< GUM_SCALAR > distri(2);
956 
957  Idx pos;
958 
959  // +1 from start to avoid counting_ itself
960  // use const iterators with cbegin when available
961  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
962  ++jt) {
963  if (bnet__->nodeId(**jt) == X) {
964  // retirer la variable courante de la taille
965  pos = parents->pos(*jt) - 1;
966  continue;
967  }
968 
969  // compute probability distribution to avoid doing it multiple times
970  // (at
971  // each combination of messages)
972  distri[1] = ArcsP_min_[Arc(bnet__->nodeId(**jt), Y)];
973  distri[0] = GUM_SCALAR(1.) - distri[1];
974  msg_p.push_back(distri);
975 
976  if (ArcsP_max_.exists(Arc(bnet__->nodeId(**jt), Y))) {
977  distri[1] = ArcsP_max_[Arc(bnet__->nodeId(**jt), Y)];
978  distri[0] = GUM_SCALAR(1.) - distri[1];
979  msg_p.push_back(distri);
980  }
981 
982  msgs_p.push_back(msg_p);
983  msg_p.clear();
984  }
985 
986  GUM_SCALAR min = -2.;
987  GUM_SCALAR max = -2.;
988 
989  std::vector< GUM_SCALAR > lx;
990  lx.push_back(lmin);
991 
992  if (lmin != lmax) { lx.push_back(lmax); }
993 
994  enum_combi_(msgs_p, Y, min, max, lx, pos);
995 
996  if (min == -2. || max == -2.) {
997  if (min != -2.) {
998  max = min;
999  } else if (max != -2.) {
1000  min = max;
1001  } else {
1002  std::cout << std::endl;
1003  std::cout << "!!!! pas de message L calculable !!!!" << std::endl;
1004  return;
1005  }
1006  }
1007 
1008  if (min < 0.) { min = 0.; }
1009 
1010  if (max < 0.) { max = 0.; }
1011 
1012  bool update = false;
1013 
1014  if (min != ArcsL_min_[Arc(X, Y)]) {
1015  ArcsL_min_[Arc(X, Y)] = min;
1016  update = true;
1017  }
1018 
1019  if (ArcsL_max_.exists(Arc(X, Y))) {
1020  if (max != ArcsL_max_[Arc(X, Y)]) {
1021  if (max != min) {
1022  ArcsL_max_[Arc(X, Y)] = max;
1023  } else { // if ( max == min )
1024  ArcsL_max_.erase(Arc(X, Y));
1025  }
1026 
1027  update = true;
1028  }
1029  } else {
1030  if (max != min) {
1031  ArcsL_max_.insert(Arc(X, Y), max);
1032  update = true;
1033  }
1034  }
1035 
1036  if (update) {
1037  update_l_.set(X, true);
1038  next_active_nodes_set.insert(X);
1039  }
1040 
1041  } // end of update_p || update_l
1042  }
1043 
1044  template < typename GUM_SCALAR >
1046  const NodeId demanding_child) {
1047  NodeSet const& children = bnet__->children(X);
1048 
1049  const auto parents = &bnet__->cpt(X).variablesSequence();
1050 
1051  if (((children.size() + parents->size() - 1) == 1)
1052  && (!infE__::evidence_.exists(X))) {
1053  return;
1054  }
1055 
1056  // LM_part ---- from all children but one --- the lonely one will get the
1057  // message
1058 
1059  if (infE__::evidence_.exists(X)) {
1060  ArcsP_min_[Arc(X, demanding_child)] = infE__::evidence_[X][1];
1061 
1062  if (ArcsP_max_.exists(Arc(X, demanding_child))) {
1063  ArcsP_max_.erase(Arc(X, demanding_child));
1064  }
1065 
1066  return;
1067  }
1068 
1069  bool update_l = update_l_[X];
1070  bool update_p = update_p_[X];
1071 
1072  if (!update_p && !update_l) { return; }
1073 
1074  GUM_SCALAR lmin = 1.;
1075  GUM_SCALAR lmax = 1.;
1076 
1077  // use cbegin if available
1078  for (auto chil: children) {
1079  if (chil == demanding_child) { continue; }
1080 
1081  lmin *= ArcsL_min_[Arc(X, chil)];
1082 
1083  if (ArcsL_max_.exists(Arc(X, chil))) {
1084  lmax *= ArcsL_max_[Arc(X, chil)];
1085  } else {
1086  lmax *= ArcsL_min_[Arc(X, chil)];
1087  }
1088  }
1089 
1090  if (lmin != lmin && lmax == lmax) { lmin = lmax; }
1091 
1092  if (lmax != lmax && lmin == lmin) { lmax = lmin; }
1093 
1094  if (lmax != lmax && lmin != lmin) {
1095  std::cout << "pas de vraisemblance definie [lmin, lmax] (observations "
1096  "incompatibles ?)"
1097  << std::endl;
1098  return;
1099  }
1100 
1101  if (lmin < 0.) { lmin = 0.; }
1102 
1103  if (lmax < 0.) { lmax = 0.; }
1104 
1105  // refresh PI_part
1106  GUM_SCALAR min = INF_;
1107  GUM_SCALAR max = 0.;
1108 
1109  if (update_p) {
1110  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
1111  std::vector< std::vector< GUM_SCALAR > > msg_p;
1112  std::vector< GUM_SCALAR > distri(2);
1113 
1114  // +1 from start to avoid counting_ itself
1115  // use const_iterators if available
1116  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
1117  ++jt) {
1118  // compute probability distribution to avoid doing it multiple times
1119  // (at
1120  // each combination of messages)
1121  distri[1] = ArcsP_min_[Arc(bnet__->nodeId(**jt), X)];
1122  distri[0] = GUM_SCALAR(1.) - distri[1];
1123  msg_p.push_back(distri);
1124 
1125  if (ArcsP_max_.exists(Arc(bnet__->nodeId(**jt), X))) {
1126  distri[1] = ArcsP_max_[Arc(bnet__->nodeId(**jt), X)];
1127  distri[0] = GUM_SCALAR(1.) - distri[1];
1128  msg_p.push_back(distri);
1129  }
1130 
1131  msgs_p.push_back(msg_p);
1132  msg_p.clear();
1133  }
1134 
1135  enum_combi_(msgs_p, X, min, max);
1136 
1137  if (min < 0.) { min = 0.; }
1138 
1139  if (max < 0.) { max = 0.; }
1140 
1141  if (min == INF_ || max == INF_) {
1142  std::cout << " ERREUR msg P min = max = INF " << std::endl;
1143  std::cout.flush();
1144  return;
1145  }
1146 
1147  NodesP_min_[X] = min;
1148 
1149  if (min != max) {
1150  NodesP_max_.set(X, max);
1151  } else if (NodesP_max_.exists(X)) {
1152  NodesP_max_.erase(X);
1153  }
1154 
1155  update_p_.set(X, false);
1156 
1157  } // end of update_p
1158  else {
1159  min = NodesP_min_[X];
1160 
1161  if (NodesP_max_.exists(X)) {
1162  max = NodesP_max_[X];
1163  } else {
1164  max = min;
1165  }
1166  }
1167 
1168  if (update_p || update_l) {
1169  GUM_SCALAR msg_p_min;
1170  GUM_SCALAR msg_p_max;
1171 
1172  // cas limites sur min
1173  if (min == INF_ && lmin == 0.) {
1174  std::cout << "MESSAGE P ERR (negatif) : pi = inf, l = 0" << std::endl;
1175  }
1176 
1177  if (lmin == INF_) { // cas infini
1178  msg_p_min = GUM_SCALAR(1.);
1179  } else if (min == 0. || lmin == 0.) {
1180  msg_p_min = 0;
1181  } else {
1182  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
1183  }
1184 
1185  // cas limites sur max
1186  if (max == INF_ && lmax == 0.) {
1187  std::cout << "MESSAGE P ERR (negatif) : pi = inf, l = 0" << std::endl;
1188  }
1189 
1190  if (lmax == INF_) { // cas infini
1191  msg_p_max = GUM_SCALAR(1.);
1192  } else if (max == 0. || lmax == 0.) {
1193  msg_p_max = 0;
1194  } else {
1195  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
1196  }
1197 
1198  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
1199  msg_p_min = msg_p_max;
1200  std::cout << std::endl;
1201  std::cout << "msg_p_min is NaN" << std::endl;
1202  }
1203 
1204  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
1205  msg_p_max = msg_p_min;
1206  std::cout << std::endl;
1207  std::cout << "msg_p_max is NaN" << std::endl;
1208  }
1209 
1210  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
1211  std::cout << std::endl;
1212  std::cout << "pas de message P calculable (verifier observations)"
1213  << std::endl;
1214  return;
1215  }
1216 
1217  if (msg_p_min < 0.) { msg_p_min = 0.; }
1218 
1219  if (msg_p_max < 0.) { msg_p_max = 0.; }
1220 
1221  bool update = false;
1222 
1223  if (msg_p_min != ArcsP_min_[Arc(X, demanding_child)]) {
1224  ArcsP_min_[Arc(X, demanding_child)] = msg_p_min;
1225  update = true;
1226  }
1227 
1228  if (ArcsP_max_.exists(Arc(X, demanding_child))) {
1229  if (msg_p_max != ArcsP_max_[Arc(X, demanding_child)]) {
1230  if (msg_p_max != msg_p_min) {
1231  ArcsP_max_[Arc(X, demanding_child)] = msg_p_max;
1232  } else { // if ( msg_p_max == msg_p_min )
1233  ArcsP_max_.erase(Arc(X, demanding_child));
1234  }
1235 
1236  update = true;
1237  }
1238  } else {
1239  if (msg_p_max != msg_p_min) {
1240  ArcsP_max_.insert(Arc(X, demanding_child), msg_p_max);
1241  update = true;
1242  }
1243  }
1244 
1245  if (update) {
1246  update_p_.set(demanding_child, true);
1247  next_active_nodes_set.insert(demanding_child);
1248  }
1249 
1250  } // end of : update_l || update_p
1251  }
1252 
1253  template < typename GUM_SCALAR >
1255  for (auto node: bnet__->nodes()) {
1256  if ((!refreshIndic)
1257  && cn__->currentNodeType(node)
1259  continue;
1260  }
1261 
1262  NodeSet const& children = bnet__->children(node);
1263 
1264  auto parents = &bnet__->cpt(node).variablesSequence();
1265 
1266  if (update_l_[node]) {
1267  GUM_SCALAR lmin = 1.;
1268  GUM_SCALAR lmax = 1.;
1269 
1270  if (!children.empty() && !infE__::evidence_.exists(node)) {
1271  for (auto chil: children) {
1272  lmin *= ArcsL_min_[Arc(node, chil)];
1273 
1274  if (ArcsL_max_.exists(Arc(node, chil))) {
1275  lmax *= ArcsL_max_[Arc(node, chil)];
1276  } else {
1277  lmax *= ArcsL_min_[Arc(node, chil)];
1278  }
1279  }
1280 
1281  if (lmin != lmin && lmax == lmax) { lmin = lmax; }
1282 
1283  lmax = lmin;
1284 
1285  if (lmax != lmax && lmin != lmin) {
1286  std::cout
1287  << "pas de vraisemblance definie [lmin, lmax] (observations "
1288  "incompatibles ?)"
1289  << std::endl;
1290  return;
1291  }
1292 
1293  if (lmin < 0.) { lmin = 0.; }
1294 
1295  if (lmax < 0.) { lmax = 0.; }
1296 
1297  NodesL_min_[node] = lmin;
1298 
1299  if (lmin != lmax) {
1300  NodesL_max_.set(node, lmax);
1301  } else if (NodesL_max_.exists(node)) {
1302  NodesL_max_.erase(node);
1303  }
1304  }
1305 
1306  } // end of : update_l
1307 
1308  if (update_p_[node]) {
1309  if ((parents->size() - 1) > 0 && !infE__::evidence_.exists(node)) {
1310  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
1311  std::vector< std::vector< GUM_SCALAR > > msg_p;
1312  std::vector< GUM_SCALAR > distri(2);
1313 
1314  // +1 from start to avoid counting_ itself
1315  // cbegin
1316  for (auto jt = ++parents->begin(), theEnd = parents->end();
1317  jt != theEnd;
1318  ++jt) {
1319  // compute probability distribution to avoid doing it multiple
1320  // times
1321  // (at each combination of messages)
1322  distri[1] = ArcsP_min_[Arc(bnet__->nodeId(**jt), node)];
1323  distri[0] = GUM_SCALAR(1.) - distri[1];
1324  msg_p.push_back(distri);
1325 
1326  if (ArcsP_max_.exists(Arc(bnet__->nodeId(**jt), node))) {
1327  distri[1] = ArcsP_max_[Arc(bnet__->nodeId(**jt), node)];
1328  distri[0] = GUM_SCALAR(1.) - distri[1];
1329  msg_p.push_back(distri);
1330  }
1331 
1332  msgs_p.push_back(msg_p);
1333  msg_p.clear();
1334  }
1335 
1336  GUM_SCALAR min = INF_;
1337  GUM_SCALAR max = 0.;
1338 
1339  enum_combi_(msgs_p, node, min, max);
1340 
1341  if (min < 0.) { min = 0.; }
1342 
1343  if (max < 0.) { max = 0.; }
1344 
1345  NodesP_min_[node] = min;
1346 
1347  if (min != max) {
1348  NodesP_max_.set(node, max);
1349  } else if (NodesP_max_.exists(node)) {
1350  NodesP_max_.erase(node);
1351  }
1352 
1353  update_p_[node] = false;
1354  }
1355  } // end of update_p
1356 
1357  } // end of : for each node
1358  }
1359 
1360  template < typename GUM_SCALAR >
1362  for (auto node: bnet__->nodes()) {
1363  GUM_SCALAR msg_p_min = 1.;
1364  GUM_SCALAR msg_p_max = 0.;
1365 
1366  if (infE__::evidence_.exists(node)) {
1367  if (infE__::evidence_[node][1] == 0.) {
1368  msg_p_min = (GUM_SCALAR)0.;
1369  } else if (infE__::evidence_[node][1] == 1.) {
1370  msg_p_min = 1.;
1371  }
1372 
1373  msg_p_max = msg_p_min;
1374  } else {
1375  GUM_SCALAR min = NodesP_min_[node];
1376  GUM_SCALAR max;
1377 
1378  if (NodesP_max_.exists(node)) {
1379  max = NodesP_max_[node];
1380  } else {
1381  max = min;
1382  }
1383 
1384  GUM_SCALAR lmin = NodesL_min_[node];
1385  GUM_SCALAR lmax;
1386 
1387  if (NodesL_max_.exists(node)) {
1388  lmax = NodesL_max_[node];
1389  } else {
1390  lmax = lmin;
1391  }
1392 
1393  if (min == INF_ || max == INF_) {
1394  std::cout << " min ou max === INF_ !!!!!!!!!!!!!!!!!!!!!!!!!! "
1395  << std::endl;
1396  return;
1397  }
1398 
1399  if (min == INF_ && lmin == 0.) {
1400  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
1401  return;
1402  }
1403 
1404  if (lmin == INF_) {
1405  msg_p_min = GUM_SCALAR(1.);
1406  } else if (min == 0. || lmin == 0.) {
1407  msg_p_min = GUM_SCALAR(0.);
1408  } else {
1409  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
1410  }
1411 
1412  if (max == INF_ && lmax == 0.) {
1413  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
1414  return;
1415  }
1416 
1417  if (lmax == INF_) {
1418  msg_p_max = GUM_SCALAR(1.);
1419  } else if (max == 0. || lmax == 0.) {
1420  msg_p_max = GUM_SCALAR(0.);
1421  } else {
1422  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
1423  }
1424  }
1425 
1426  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
1427  msg_p_min = msg_p_max;
1428  std::cout << std::endl;
1429  std::cout << "msg_p_min is NaN" << std::endl;
1430  }
1431 
1432  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
1433  msg_p_max = msg_p_min;
1434  std::cout << std::endl;
1435  std::cout << "msg_p_max is NaN" << std::endl;
1436  }
1437 
1438  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
1439  std::cout << std::endl;
1440  std::cout << "Please check the observations (no proba can be computed)"
1441  << std::endl;
1442  return;
1443  }
1444 
1445  if (msg_p_min < 0.) { msg_p_min = 0.; }
1446 
1447  if (msg_p_max < 0.) { msg_p_max = 0.; }
1448 
1449  infE__::marginalMin_[node][0] = 1 - msg_p_max;
1450  infE__::marginalMax_[node][0] = 1 - msg_p_min;
1451  infE__::marginalMin_[node][1] = msg_p_min;
1452  infE__::marginalMax_[node][1] = msg_p_max;
1453  }
1454  }
1455 
1456  template < typename GUM_SCALAR >
1458  refreshLMsPIs_();
1459  updateMarginals_();
1460 
1461  return infE__::computeEpsilon_();
1462  }
1463 
1464  template < typename GUM_SCALAR >
1466  for (auto node: bnet__->nodes()) {
1467  if (cn__->currentNodeType(node)
1469  continue;
1470  }
1471 
1472  for (auto pare: bnet__->parents(node)) {
1473  msgP_(pare, node);
1474  }
1475  }
1476 
1477  refreshLMsPIs_(true);
1478  updateMarginals_();
1479  }
1480 
1481  template < typename GUM_SCALAR >
1483  if (infE__::modal_.empty()) { return; }
1484 
1485  std::vector< std::vector< GUM_SCALAR > > vertices(
1486  2, std::vector< GUM_SCALAR >(2));
1487 
1488  for (auto node: bnet__->nodes()) {
1489  vertices[0][0] = infE__::marginalMin_[node][0];
1490  vertices[0][1] = infE__::marginalMax_[node][1];
1491 
1492  vertices[1][0] = infE__::marginalMax_[node][0];
1493  vertices[1][1] = infE__::marginalMin_[node][1];
1494 
1495  for (auto vertex = 0, vend = 2; vertex != vend; vertex++) {
1496  infE__::updateExpectations_(node, vertices[vertex]);
1497  // test credal sets vertices elim
1498  // remove with L2U since variables are binary
1499  // but does the user know that ?
1500  infE__::updateCredalSets_(
1501  node,
1502  vertices[vertex]); // no redundancy elimination with 2 vertices
1503  }
1504  }
1505  }
1506 
1507  template < typename GUM_SCALAR >
1509  const CredalNet< GUM_SCALAR >& cnet) :
1510  InferenceEngine< GUM_SCALAR >::InferenceEngine(cnet) {
1511  if (!cnet.isSeparatelySpecified()) {
1513  "CNLoopyPropagation is only available "
1514  "with separately specified nets");
1515  }
1516 
1517  // test for binary cn
1518  for (auto node: cnet.current_bn().nodes())
1519  if (cnet.current_bn().variable(node).domainSize() != 2) {
1521  "CNLoopyPropagation is only available "
1522  "with binary credal networks");
1523  }
1524 
1525  // test if compute CPTMinMax has been called
1526  if (!cnet.hasComputedCPTMinMax()) {
1528  "CNLoopyPropagation only works when "
1529  "\"computeCPTMinMax()\" has been called for "
1530  "this credal net");
1531  }
1532 
1533  cn__ = &cnet;
1534  bnet__ = &cnet.current_bn();
1535 
1537  InferenceUpToDate_ = false;
1538 
1539  GUM_CONSTRUCTOR(CNLoopyPropagation);
1540  }
1541 
1542  template < typename GUM_SCALAR >
1544  InferenceUpToDate_ = false;
1545 
1546  if (msg_l_sent_.size() > 0) {
1547  for (auto node: bnet__->nodes()) {
1548  delete msg_l_sent_[node];
1549  }
1550  }
1551 
1552  //_msg_l_sent.clear();
1553  //_update_l.clear();
1554  //_update_p.clear();
1555 
1556  GUM_DESTRUCTOR(CNLoopyPropagation);
1557  }
1558 
1559  template < typename GUM_SCALAR >
1561  inferenceType__ = inft;
1562  }
1563 
1564  template < typename GUM_SCALAR >
1567  return inferenceType__;
1568  }
1569  } // namespace credal
1570 } // end of namespace gum
const bool isSeparatelySpecified() const
void refreshLMsPIs_(bool refreshIndic=false)
Get the last messages from one&#39;s parents and children.
#define INF_
bool empty() const noexcept
Indicates whether the set is the empty set.
Definition: set_tpl.h:707
NodeProperty< NodeSet *> msg_l_sent_
Used to keep track of one&#39;s messages sent to it&#39;s parents.
void makeInferenceByOrderedArcs_()
Starts the inference with this inference type.
GUM_SCALAR calculateEpsilon_()
Compute epsilon.
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
void makeInferenceNodeToNeighbours_()
Starts the inference with this inference type.
void eraseAllEvidence()
Erase all inference related data to perform another one.
NodeType currentNodeType(const NodeId &id) const
Copyright 2005-2020 Pierre-Henri WUILLEMIN() & Christophe GONZALES() info_at_agrum_dot_org.
void swap(HashTable< LpCol, double > *&a, HashTable< LpCol, double > *&b)
Swap the addresses of two pointers to hashTables.
<agrum/CN/CNLoopyPropagation.h>
void erase(const Key &k)
Erases an element from the set.
Definition: set_tpl.h:656
Copyright 2005-2020 Pierre-Henri WUILLEMIN() & Christophe GONZALES() info_at_agrum_dot_org.
Definition: agrum.h:25
void enum_combi_(std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
Used by msgL_.
Class template representing a Credal Network.
Definition: credalNet.h:89
InferenceType inferenceType()
Get the inference type.
const IBayesNet< GUM_SCALAR > * bnet__
A pointer to it&#39;s IBayesNet used as a DAG.
The base class for all directed edgesThis class is used as a basis for manipulating all directed edge...
CNLoopyPropagation(const CredalNet< GUM_SCALAR > &cnet)
Constructor.
bool InferenceUpToDate_
TRUE if inference has already been performed, FALSE otherwise.
Uses a node-set so we don&#39;t iterate on nodes that can&#39;t send a new message.
const NodeSet & parents(const NodeId id) const
returns the set of nodes with arc ingoing to a given node
void msgP_(const NodeId X, const NodeId demanding_child)
Sends a message to one&#39;s child, i.e.
void saveInference(const std::string &path)
void makeInferenceByRandomOrder_()
Starts the inference with this inference type.
void makeInference()
Starts the inference.
const BayesNet< GUM_SCALAR > & current_bn() const
const NodeSet & children(const NodeId id) const
returns the set of nodes with arc outgoing from a given node
Abstract class template representing a CredalNet inference engine.
void initialize_()
Topological forward propagation to initialize old marginals & messages.
const bool hasComputedCPTMinMax() const
void updateIndicatrices_()
Only update indicatrices variables at the end of computations ( calls msgP_ ).
void compute_ext_(GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, GUM_SCALAR &num_min, GUM_SCALAR &num_max, GUM_SCALAR &den_min, GUM_SCALAR &den_max)
Used by msgL_.
void updateMarginals_()
Compute marginals from up-to-date messages.
InferenceType
Inference type to be used by the algorithm.
InferenceType inferenceType__
The choosen inference type.
Size Idx
Type for indexes.
Definition: types.h:53
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:48
Size size() const noexcept
Returns the number of elements in the set.
Definition: set_tpl.h:701
void msgL_(const NodeId X, const NodeId demanding_parent)
Sends a message to one&#39;s parent, i.e.
const CredalNet< GUM_SCALAR > * cn__
A pointer to the CredalNet to be used.
Base class for dag.
Definition: DAG.h:102
Size NodeId
Type for node ids.
Definition: graphElements.h:98
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
#define GUM_ERROR(type, msg)
Definition: exceptions.h:55
void computeExpectations_()
Since the network is binary, expectations can be computed from the final marginals which give us the ...