aGrUM  0.16.0
CNLoopyPropagation_tpl.h
Go to the documentation of this file.
1 
24 
25 namespace gum {
26  namespace credal {
27 
28  template < typename GUM_SCALAR >
29  void CNLoopyPropagation< GUM_SCALAR >::saveInference(const std::string& path) {
30  std::string path_name = path.substr(0, path.size() - 4);
31  path_name = path_name + ".res";
32 
33  std::ofstream res(path_name.c_str(), std::ios::out | std::ios::trunc);
34 
35  if (!res.good()) {
37  "CNLoopyPropagation<GUM_SCALAR>::saveInference(std::"
38  "string & path) : could not open file : "
39  + path_name);
40  }
41 
42  std::string ext = path.substr(path.size() - 3, path.size());
43 
44  if (std::strcmp(ext.c_str(), "evi") == 0) {
45  std::ifstream evi(path.c_str(), std::ios::in);
46  std::string ligne;
47 
48  if (!evi.good()) {
50  "CNLoopyPropagation<GUM_SCALAR>::saveInference(std::"
51  "string & path) : could not open file : "
52  + ext);
53  }
54 
55  while (evi.good()) {
56  getline(evi, ligne);
57  res << ligne << "\n";
58  }
59 
60  evi.close();
61  }
62 
63  res << "[RESULTATS]"
64  << "\n";
65 
66  for (auto node : __bnet->nodes()) {
67  // calcul distri posteriori
68  GUM_SCALAR msg_p_min = 1.0;
69  GUM_SCALAR msg_p_max = 0.0;
70 
71  // cas evidence, calcul immediat
72  if (__infE::_evidence.exists(node)) {
73  if (__infE::_evidence[node][1] == 0.) {
74  msg_p_min = 0.;
75  } else if (__infE::_evidence[node][1] == 1.) {
76  msg_p_min = 1.;
77  }
78 
79  msg_p_max = msg_p_min;
80  }
81  // sinon depuis node P et node L
82  else {
83  GUM_SCALAR min = _NodesP_min[node];
84  GUM_SCALAR max;
85 
86  if (_NodesP_max.exists(node)) {
87  max = _NodesP_max[node];
88  } else {
89  max = min;
90  }
91 
92  GUM_SCALAR lmin = _NodesL_min[node];
93  GUM_SCALAR lmax;
94 
95  if (_NodesL_max.exists(node)) {
96  lmax = _NodesL_max[node];
97  } else {
98  lmax = lmin;
99  }
100 
101  // cas limites sur min
102  if (min == _INF && lmin == 0.) {
103  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
104  }
105 
106  if (lmin == _INF) { // cas infini
107  msg_p_min = GUM_SCALAR(1.);
108  } else if (min == 0. || lmin == 0.) {
109  msg_p_min = GUM_SCALAR(0.);
110  } else {
111  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
112  }
113 
114  // cas limites sur max
115  if (max == _INF && lmax == 0.) {
116  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
117  }
118 
119  if (lmax == _INF) { // cas infini
120  msg_p_max = GUM_SCALAR(1.);
121  } else if (max == 0. || lmax == 0.) {
122  msg_p_max = GUM_SCALAR(0.);
123  } else {
124  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
125  }
126  }
127 
128  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
129  msg_p_min = msg_p_max;
130  }
131 
132  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
133  msg_p_max = msg_p_min;
134  }
135 
136  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
137  std::cout << std::endl;
138  std::cout << "pas de proba calculable (verifier observations)"
139  << std::endl;
140  }
141 
142  res << "P(" << __bnet->variable(node).name() << " | e) = ";
143 
144  if (__infE::_evidence.exists(node)) {
145  res << "(observe)" << std::endl;
146  } else {
147  res << std::endl;
148  }
149 
150  res << "\t\t" << __bnet->variable(node).label(0) << " [ "
151  << (GUM_SCALAR)1. - msg_p_max;
152 
153  if (msg_p_min != msg_p_max) {
154  res << ", " << (GUM_SCALAR)1. - msg_p_min << " ] | ";
155  } else {
156  res << " ] | ";
157  }
158 
159  res << __bnet->variable(node).label(1) << " [ " << msg_p_min;
160 
161  if (msg_p_min != msg_p_max) {
162  res << ", " << msg_p_max << " ]" << std::endl;
163  } else {
164  res << " ]" << std::endl;
165  }
166  } // end of : for each node
167 
168  res.close();
169  }
170 
180  template < typename GUM_SCALAR >
182  GUM_SCALAR& msg_l_min,
183  GUM_SCALAR& msg_l_max,
184  std::vector< GUM_SCALAR >& lx,
185  GUM_SCALAR& num_min,
186  GUM_SCALAR& num_max,
187  GUM_SCALAR& den_min,
188  GUM_SCALAR& den_max) {
189  GUM_SCALAR num_min_tmp = 1.;
190  GUM_SCALAR den_min_tmp = 1.;
191  GUM_SCALAR num_max_tmp = 1.;
192  GUM_SCALAR den_max_tmp = 1.;
193 
194  GUM_SCALAR res_min = 1.0, res_max = 0.0;
195 
196  auto lsize = lx.size();
197 
198  for (decltype(lsize) i = 0; i < lsize; i++) {
199  bool non_defini_min = false;
200  bool non_defini_max = false;
201 
202  if (lx[i] == _INF) {
203  num_min_tmp = num_min;
204  den_min_tmp = den_max;
205  num_max_tmp = num_max;
206  den_max_tmp = den_min;
207  } else if (lx[i] == (GUM_SCALAR)1.) {
208  num_min_tmp = GUM_SCALAR(1.);
209  den_min_tmp = GUM_SCALAR(1.);
210  num_max_tmp = GUM_SCALAR(1.);
211  den_max_tmp = GUM_SCALAR(1.);
212  } else if (lx[i] > (GUM_SCALAR)1.) {
213  GUM_SCALAR li = GUM_SCALAR(1.) / (lx[i] - GUM_SCALAR(1.));
214  num_min_tmp = num_min + li;
215  den_min_tmp = den_max + li;
216  num_max_tmp = num_max + li;
217  den_max_tmp = den_min + li;
218  } else if (lx[i] < (GUM_SCALAR)1.) {
219  GUM_SCALAR li = GUM_SCALAR(1.) / (lx[i] - GUM_SCALAR(1.));
220  num_min_tmp = num_max + li;
221  den_min_tmp = den_min + li;
222  num_max_tmp = num_min + li;
223  den_max_tmp = den_max + li;
224  }
225 
226  if (den_min_tmp == 0. && num_min_tmp == 0.) {
227  non_defini_min = true;
228  } else if (den_min_tmp == 0. && num_min_tmp != 0.) {
229  res_min = _INF;
230  } else if (den_min_tmp != _INF || num_min_tmp != _INF) {
231  res_min = num_min_tmp / den_min_tmp;
232  }
233 
234  if (den_max_tmp == 0. && num_max_tmp == 0.) {
235  non_defini_max = true;
236  } else if (den_max_tmp == 0. && num_max_tmp != 0.) {
237  res_max = _INF;
238  } else if (den_max_tmp != _INF || num_max_tmp != _INF) {
239  res_max = num_max_tmp / den_max_tmp;
240  }
241 
242  if (non_defini_max && non_defini_min) {
243  std::cout << "undefined msg" << std::endl;
244  continue;
245  } else if (non_defini_min && !non_defini_max) {
246  res_min = res_max;
247  } else if (non_defini_max && !non_defini_min) {
248  res_max = res_min;
249  }
250 
251  if (res_min < 0.) { res_min = 0.; }
252 
253  if (res_max < 0.) { res_max = 0.; }
254 
255  if (msg_l_min == msg_l_max && msg_l_min == -2.) {
256  msg_l_min = res_min;
257  msg_l_max = res_max;
258  }
259 
260  if (res_max > msg_l_max) { msg_l_max = res_max; }
261 
262  if (res_min < msg_l_min) { msg_l_min = res_min; }
263 
264  } // end of : for each lx
265  }
266 
270  template < typename GUM_SCALAR >
272  std::vector< std::vector< GUM_SCALAR > >& combi_msg_p,
273  const NodeId& id,
274  GUM_SCALAR& msg_l_min,
275  GUM_SCALAR& msg_l_max,
276  std::vector< GUM_SCALAR >& lx,
277  const Idx& pos) {
278  GUM_SCALAR num_min = 0.;
279  GUM_SCALAR num_max = 0.;
280  GUM_SCALAR den_min = 0.;
281  GUM_SCALAR den_max = 0.;
282 
283  auto taille = combi_msg_p.size();
284 
285  std::vector< typename std::vector< GUM_SCALAR >::iterator > it(taille);
286 
287  for (decltype(taille) i = 0; i < taille; i++) {
288  it[i] = combi_msg_p[i].begin();
289  }
290 
291  Size pp = pos;
292 
293  Size combi_den = 0;
294  Size combi_num = pp;
295 
296  // marginalisation
297  while (it[taille - 1] != combi_msg_p[taille - 1].end()) {
298  GUM_SCALAR prod = 1.;
299 
300  for (decltype(taille) k = 0; k < taille; k++) {
301  prod *= *it[k];
302  }
303 
304  den_min += (__cn->get_CPT_min()[id][combi_den] * prod);
305  den_max += (__cn->get_CPT_max()[id][combi_den] * prod);
306 
307  num_min += (__cn->get_CPT_min()[id][combi_num] * prod);
308  num_max += (__cn->get_CPT_max()[id][combi_num] * prod);
309 
310  combi_den++;
311  combi_num++;
312 
313  if (combi_den % pp == 0) {
314  combi_den += pp;
315  combi_num += pp;
316  }
317 
318  // incrementation
319  ++it[0];
320 
321  for (decltype(taille) i = 0;
322  (i < taille - 1) && (it[i] == combi_msg_p[i].end());
323  ++i) {
324  it[i] = combi_msg_p[i].begin();
325  ++it[i + 1];
326  }
327  } // end of : marginalisation
328 
329  _compute_ext(msg_l_min, msg_l_max, lx, num_min, num_max, den_min, den_max);
330  }
331 
336  template < typename GUM_SCALAR >
338  std::vector< std::vector< GUM_SCALAR > >& combi_msg_p,
339  const NodeId& id,
340  GUM_SCALAR& msg_p_min,
341  GUM_SCALAR& msg_p_max) {
342  GUM_SCALAR min = 0.;
343  GUM_SCALAR max = 0.;
344 
345  auto taille = combi_msg_p.size();
346 
347  std::vector< typename std::vector< GUM_SCALAR >::iterator > it(taille);
348 
349  for (decltype(taille) i = 0; i < taille; i++) {
350  it[i] = combi_msg_p[i].begin();
351  }
352 
353  int combi = 0;
354  auto theEnd = combi_msg_p[taille - 1].end();
355 
356  while (it[taille - 1] != theEnd) {
357  GUM_SCALAR prod = 1.;
358 
359  for (decltype(taille) k = 0; k < taille; k++) {
360  prod *= *it[k];
361  }
362 
363  min += (__cn->get_CPT_min()[id][combi] * prod);
364  max += (__cn->get_CPT_max()[id][combi] * prod);
365 
366  combi++;
367 
368  // incrementation
369  ++it[0];
370 
371  for (decltype(taille) i = 0;
372  (i < taille - 1) && (it[i] == combi_msg_p[i].end());
373  ++i) {
374  it[i] = combi_msg_p[i].begin();
375  ++it[i + 1];
376  }
377  }
378 
379  if (min < msg_p_min) { msg_p_min = min; }
380 
381  if (max > msg_p_max) { msg_p_max = max; }
382  }
383 
387  template < typename GUM_SCALAR >
389  std::vector< std::vector< std::vector< GUM_SCALAR > > >& msgs_p,
390  const NodeId& id,
391  GUM_SCALAR& msg_p_min,
392  GUM_SCALAR& msg_p_max) {
393  auto taille = msgs_p.size();
394 
395  // source node
396  if (taille == 0) {
397  msg_p_min = __cn->get_CPT_min()[id][0];
398  msg_p_max = __cn->get_CPT_max()[id][0];
399  return;
400  }
401 
402  decltype(taille) msgPerm = 1;
403 #pragma omp parallel
404  {
405  GUM_SCALAR msg_pmin = msg_p_min;
406  GUM_SCALAR msg_pmax = msg_p_max;
407 
408  std::vector< std::vector< GUM_SCALAR > > combi_msg_p(taille);
409 
410  decltype(taille) confs = 1;
411 
412 #pragma omp for
413 
414  for (long i = 0; i < long(taille); i++) {
415  confs *= msgs_p[i].size();
416  }
417 
418 #pragma omp atomic
419  msgPerm *= confs;
420 #pragma omp barrier
421 #pragma omp \
422  flush // ( msgPerm ) let the compiler choose what to flush (due to mvsc)
423 
424 #pragma omp for
425 
426  for (int j = 0; j < int(msgPerm); j++) {
427  // get jth msg :
428  auto jvalue = j;
429 
430  for (decltype(taille) i = 0; i < taille; i++) {
431  if (msgs_p[i].size() == 2) {
432  combi_msg_p[i] = (jvalue & 1) ? msgs_p[i][1] : msgs_p[i][0];
433  jvalue /= 2;
434  } else {
435  combi_msg_p[i] = msgs_p[i][0];
436  }
437  }
438 
439  _compute_ext(combi_msg_p, id, msg_pmin, msg_pmax);
440  }
441 
442 // since min is _INF and max is 0 at init, there is no issue having more threads
443 // here
444 // than during for loop
445 #pragma omp critical(msgpminmax)
446  {
447 #pragma omp flush //( msg_p_min )
448  //#pragma omp flush ( msg_p_max ) let the compiler choose what to
449  // flush (due to mvsc)
450 
451  if (msg_p_min > msg_pmin) { msg_p_min = msg_pmin; }
452 
453  if (msg_p_max < msg_pmax) { msg_p_max = msg_pmax; }
454  }
455  }
456  return;
457  }
458 
463  template < typename GUM_SCALAR >
465  std::vector< std::vector< std::vector< GUM_SCALAR > > >& msgs_p,
466  const NodeId& id,
467  GUM_SCALAR& real_msg_l_min,
468  GUM_SCALAR& real_msg_l_max,
469  std::vector< GUM_SCALAR >& lx,
470  const Idx& pos) {
471  GUM_SCALAR msg_l_min = real_msg_l_min;
472  GUM_SCALAR msg_l_max = real_msg_l_max;
473 
474  auto taille = msgs_p.size();
475 
476  // one parent node, the one receiving the message
477  if (taille == 0) {
478  GUM_SCALAR num_min = __cn->get_CPT_min()[id][1];
479  GUM_SCALAR num_max = __cn->get_CPT_max()[id][1];
480  GUM_SCALAR den_min = __cn->get_CPT_min()[id][0];
481  GUM_SCALAR den_max = __cn->get_CPT_max()[id][0];
482 
483  _compute_ext(msg_l_min, msg_l_max, lx, num_min, num_max, den_min, den_max);
484 
485  real_msg_l_min = msg_l_min;
486  real_msg_l_max = msg_l_max;
487  return;
488  }
489 
490  decltype(taille) msgPerm = 1;
491 #pragma omp parallel
492  {
493  GUM_SCALAR msg_lmin = msg_l_min;
494  GUM_SCALAR msg_lmax = msg_l_max;
495  std::vector< std::vector< GUM_SCALAR > > combi_msg_p(taille);
496 
497  decltype(taille) confs = 1;
498 #pragma omp for
499 
500  for (int i = 0; i < int(taille); i++) {
501  confs *= msgs_p[i].size();
502  }
503 
504 #pragma omp atomic
505  msgPerm *= confs;
506 #pragma omp barrier
507 #pragma omp flush(msgPerm)
508 
509 // direct binary representation of config, no need for iterators
510 #pragma omp for
511 
512  for (long j = 0; j < long(msgPerm); j++) {
513  // get jth msg :
514  auto jvalue = j;
515 
516  for (decltype(taille) i = 0; i < taille; i++) {
517  if (msgs_p[i].size() == 2) {
518  combi_msg_p[i] = (jvalue & 1) ? msgs_p[i][1] : msgs_p[i][0];
519  jvalue /= 2;
520  } else {
521  combi_msg_p[i] = msgs_p[i][0];
522  }
523  }
524 
525  _compute_ext(combi_msg_p, id, msg_lmin, msg_lmax, lx, pos);
526  }
527 
528 // there may be more threads here than in the for loop, therefor positive test
529 // is NECESSARY (init is -2)
530 #pragma omp critical(msglminmax)
531  {
532 #pragma omp flush(msg_l_min)
533 #pragma omp flush(msg_l_max)
534 
535  if ((msg_l_min > msg_lmin || msg_l_min == -2) && msg_lmin > 0) {
536  msg_l_min = msg_lmin;
537  }
538 
539  if ((msg_l_max < msg_lmax || msg_l_max == -2) && msg_lmax > 0) {
540  msg_l_max = msg_lmax;
541  }
542  }
543  }
544 
545  real_msg_l_min = msg_l_min;
546  real_msg_l_max = msg_l_max;
547  }
548 
549  template < typename GUM_SCALAR >
551  if (_InferenceUpToDate) { return; }
552 
553  _initialize();
554 
555  __infE::initApproximationScheme();
556 
557  switch (__inferenceType) {
558  case InferenceType::nodeToNeighbours:
559  _makeInferenceNodeToNeighbours();
560  break;
561 
562  case InferenceType::ordered: _makeInferenceByOrderedArcs(); break;
563 
564  case InferenceType::randomOrder: _makeInferenceByRandomOrder(); break;
565  }
566 
567  //_updateMarginals();
568  _updateIndicatrices(); // will call _updateMarginals()
569 
570  _computeExpectations();
571 
572  _InferenceUpToDate = true;
573  }
574 
575  template < typename GUM_SCALAR >
577  __infE::eraseAllEvidence();
578 
579  _ArcsL_min.clear();
580  _ArcsL_max.clear();
581  _ArcsP_min.clear();
582  _ArcsP_max.clear();
583  _NodesL_min.clear();
584  _NodesL_max.clear();
585  _NodesP_min.clear();
586  _NodesP_max.clear();
587 
588  _InferenceUpToDate = false;
589 
590  if (_msg_l_sent.size() > 0) {
591  for (auto node : __bnet->nodes()) {
592  delete _msg_l_sent[node];
593  }
594  }
595 
596  _msg_l_sent.clear();
597  _update_l.clear();
598  _update_p.clear();
599 
600  active_nodes_set.clear();
601  next_active_nodes_set.clear();
602  }
603 
604  template < typename GUM_SCALAR >
606  const DAG& graphe = __bnet->dag();
607 
608  // use const iterators with cbegin when available
609  for (auto node : __bnet->topologicalOrder()) {
610  _update_p.set(node, false);
611  _update_l.set(node, false);
612  NodeSet* _parents = new NodeSet();
613  _msg_l_sent.set(node, _parents);
614 
615  // accelerer init pour evidences
616  if (__infE::_evidence.exists(node)) {
617  if (__infE::_evidence[node][1] != 0.
618  && __infE::_evidence[node][1] != 1.) {
620  "CNLoopyPropagation can only handle HARD evidences");
621  }
622 
623  active_nodes_set.insert(node);
624  _update_l.set(node, true);
625  _update_p.set(node, true);
626 
627  if (__infE::_evidence[node][1] == (GUM_SCALAR)1.) {
628  _NodesL_min.set(node, _INF);
629  _NodesP_min.set(node, (GUM_SCALAR)1.);
630  } else if (__infE::_evidence[node][1] == (GUM_SCALAR)0.) {
631  _NodesL_min.set(node, (GUM_SCALAR)0.);
632  _NodesP_min.set(node, (GUM_SCALAR)0.);
633  }
634 
635  std::vector< GUM_SCALAR > marg(2);
636  marg[1] = _NodesP_min[node];
637  marg[0] = 1 - marg[1];
638 
639  __infE::_oldMarginalMin.set(node, marg);
640  __infE::_oldMarginalMax.set(node, marg);
641 
642  continue;
643  }
644 
645  NodeSet _par = graphe.parents(node);
646  NodeSet _enf = graphe.children(node);
647 
648  if (_par.size() == 0) {
649  active_nodes_set.insert(node);
650  _update_p.set(node, true);
651  _update_l.set(node, true);
652  }
653 
654  if (_enf.size() == 0) {
655  active_nodes_set.insert(node);
656  _update_p.set(node, true);
657  _update_l.set(node, true);
658  }
659 
664  const auto parents = &__bnet->cpt(node).variablesSequence();
665 
666  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
667  std::vector< std::vector< GUM_SCALAR > > msg_p;
668  std::vector< GUM_SCALAR > distri(2);
669 
670  // +1 from start to avoid _counting itself
671  // use const iterators when available with cbegin
672  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
673  ++jt) {
674  // compute probability distribution to avoid doing it multiple times
675  // (at
676  // each combination of messages)
677  distri[1] = _NodesP_min[__bnet->nodeId(**jt)];
678  distri[0] = (GUM_SCALAR)1. - distri[1];
679  msg_p.push_back(distri);
680 
681  if (_NodesP_max.exists(__bnet->nodeId(**jt))) {
682  distri[1] = _NodesP_max[__bnet->nodeId(**jt)];
683  distri[0] = (GUM_SCALAR)1. - distri[1];
684  msg_p.push_back(distri);
685  }
686 
687  msgs_p.push_back(msg_p);
688  msg_p.clear();
689  }
690 
691  GUM_SCALAR msg_p_min = 1.;
692  GUM_SCALAR msg_p_max = 0.;
693 
694  if (__cn->currentNodeType(node)
696  _enum_combi(msgs_p, node, msg_p_min, msg_p_max);
697  }
698 
699  if (msg_p_min <= (GUM_SCALAR)0.) { msg_p_min = (GUM_SCALAR)0.; }
700 
701  if (msg_p_max <= (GUM_SCALAR)0.) { msg_p_max = (GUM_SCALAR)0.; }
702 
703  _NodesP_min.set(node, msg_p_min);
704  std::vector< GUM_SCALAR > marg(2);
705  marg[1] = msg_p_min;
706  marg[0] = 1 - msg_p_min;
707 
708  __infE::_oldMarginalMin.set(node, marg);
709 
710  if (msg_p_min != msg_p_max) {
711  marg[1] = msg_p_max;
712  marg[0] = 1 - msg_p_max;
713  _NodesP_max.insert(node, msg_p_max);
714  }
715 
716  __infE::_oldMarginalMax.set(node, marg);
717 
718  _NodesL_min.set(node, (GUM_SCALAR)1.);
719  }
720 
721  for (auto arc : __bnet->arcs()) {
722  _ArcsP_min.set(arc, _NodesP_min[arc.tail()]);
723 
724  if (_NodesP_max.exists(arc.tail())) {
725  _ArcsP_max.set(arc, _NodesP_max[arc.tail()]);
726  }
727 
728  _ArcsL_min.set(arc, _NodesL_min[arc.tail()]);
729  }
730  }
731 
732  template < typename GUM_SCALAR >
734  const DAG& graphe = __bnet->dag();
735 
736  GUM_SCALAR eps;
737  // to validate TestSuite
738  __infE::continueApproximationScheme(1.);
739 
740  do {
741  for (auto node : active_nodes_set) {
742  for (auto chil : graphe.children(node)) {
743  if (__cn->currentNodeType(chil)
745  continue;
746  }
747 
748  _msgP(node, chil);
749  }
750 
751  for (auto par : graphe.parents(node)) {
752  if (__cn->currentNodeType(node)
754  continue;
755  }
756 
757  _msgL(node, par);
758  }
759  }
760 
761  eps = _calculateEpsilon();
762 
763  __infE::updateApproximationScheme();
764 
765  active_nodes_set.clear();
766  active_nodes_set = next_active_nodes_set;
767  next_active_nodes_set.clear();
768 
769  } while (__infE::continueApproximationScheme(eps)
770  && active_nodes_set.size() > 0);
771 
772  __infE::stopApproximationScheme(); // just to be sure of the
773  // approximationScheme has been notified of
774  // the end of looop
775  }
776 
777  template < typename GUM_SCALAR >
779  Size nbrArcs = __bnet->dag().sizeArcs();
780 
781  std::vector< cArcP > seq;
782  seq.reserve(nbrArcs);
783 
784  for (const auto& arc : __bnet->arcs()) {
785  seq.push_back(&arc);
786  }
787 
788  GUM_SCALAR eps;
789  // validate TestSuite
790  __infE::continueApproximationScheme(1.);
791 
792  do {
793  for (Size j = 0, theEnd = nbrArcs / 2; j < theEnd; j++) {
794  auto w1 = rand() % nbrArcs, w2 = rand() % nbrArcs;
795 
796  if (w1 == w2) { continue; }
797 
798  std::swap(seq[w1], seq[w2]);
799  }
800 
801  for (const auto it : seq) {
802  if (__cn->currentNodeType(it->tail())
804  || __cn->currentNodeType(it->head())
806  continue;
807  }
808 
809  _msgP(it->tail(), it->head());
810  _msgL(it->head(), it->tail());
811  }
812 
813  eps = _calculateEpsilon();
814 
815  __infE::updateApproximationScheme();
816 
817  } while (__infE::continueApproximationScheme(eps));
818  }
819 
820  // gives slightly worse results for some variable/modalities than other
821  // inference
822  // types (node D on 2U network loose 0.03 precision)
823  template < typename GUM_SCALAR >
825  Size nbrArcs = __bnet->dag().sizeArcs();
826 
827  std::vector< cArcP > seq;
828  seq.reserve(nbrArcs);
829 
830  for (const auto& arc : __bnet->arcs()) {
831  seq.push_back(&arc);
832  }
833 
834  GUM_SCALAR eps;
835  // validate TestSuite
836  __infE::continueApproximationScheme(1.);
837 
838  do {
839  for (const auto it : seq) {
840  if (__cn->currentNodeType(it->tail())
842  || __cn->currentNodeType(it->head())
844  continue;
845  }
846 
847  _msgP(it->tail(), it->head());
848  _msgL(it->head(), it->tail());
849  }
850 
851  eps = _calculateEpsilon();
852 
853  __infE::updateApproximationScheme();
854 
855  } while (__infE::continueApproximationScheme(eps));
856  }
857 
858  template < typename GUM_SCALAR >
860  NodeSet const& children = __bnet->children(Y);
861  NodeSet const& _parents = __bnet->parents(Y);
862 
863  const auto parents = &__bnet->cpt(Y).variablesSequence();
864 
865  if (((children.size() + parents->size() - 1) == 1)
866  && (!__infE::_evidence.exists(Y))) {
867  return;
868  }
869 
870  bool update_l = _update_l[Y];
871  bool update_p = _update_p[Y];
872 
873  if (!update_p && !update_l) { return; }
874 
875  _msg_l_sent[Y]->insert(X);
876 
877  // for future refresh LM/PI
878  if (_msg_l_sent[Y]->size() == _parents.size()) {
879  _msg_l_sent[Y]->clear();
880  _update_l[Y] = false;
881  }
882 
883  // refresh LM_part
884  if (update_l) {
885  if (!children.empty() && !__infE::_evidence.exists(Y)) {
886  GUM_SCALAR lmin = 1.;
887  GUM_SCALAR lmax = 1.;
888 
889  for (auto chil : children) {
890  lmin *= _ArcsL_min[Arc(Y, chil)];
891 
892  if (_ArcsL_max.exists(Arc(Y, chil))) {
893  lmax *= _ArcsL_max[Arc(Y, chil)];
894  } else {
895  lmax *= _ArcsL_min[Arc(Y, chil)];
896  }
897  }
898 
899  lmin = lmax;
900 
901  if (lmax != lmax && lmin == lmin) { lmax = lmin; }
902 
903  if (lmax != lmax && lmin != lmin) {
904  std::cout << "no likelihood defined [lmin, lmax] (incompatibles "
905  "evidence ?)"
906  << std::endl;
907  }
908 
909  if (lmin < 0.) { lmin = 0.; }
910 
911  if (lmax < 0.) { lmax = 0.; }
912 
913  // no need to update nodeL if evidence since nodeL will never be used
914 
915  _NodesL_min[Y] = lmin;
916 
917  if (lmin != lmax) {
918  _NodesL_max.set(Y, lmax);
919  } else if (_NodesL_max.exists(Y)) {
920  _NodesL_max.erase(Y);
921  }
922 
923  } // end of : node has children & no evidence
924 
925  } // end of : if update_l
926 
927  GUM_SCALAR lmin = _NodesL_min[Y];
928  GUM_SCALAR lmax;
929 
930  if (_NodesL_max.exists(Y)) {
931  lmax = _NodesL_max[Y];
932  } else {
933  lmax = lmin;
934  }
935 
940  if (lmin == lmax && lmin == 1.) {
941  _ArcsL_min[Arc(X, Y)] = lmin;
942 
943  if (_ArcsL_max.exists(Arc(X, Y))) { _ArcsL_max.erase(Arc(X, Y)); }
944 
945  return;
946  }
947 
948  // garder pour chaque noeud un table des parents maj, une fois tous maj,
949  // stop
950  // jusque notification msg L ou P
951 
952  if (update_p || update_l) {
953  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
954  std::vector< std::vector< GUM_SCALAR > > msg_p;
955  std::vector< GUM_SCALAR > distri(2);
956 
957  Idx pos;
958 
959  // +1 from start to avoid _counting itself
960  // use const iterators with cbegin when available
961  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
962  ++jt) {
963  if (__bnet->nodeId(**jt) == X) {
964  // retirer la variable courante de la taille
965  pos = parents->pos(*jt) - 1;
966  continue;
967  }
968 
969  // compute probability distribution to avoid doing it multiple times
970  // (at
971  // each combination of messages)
972  distri[1] = _ArcsP_min[Arc(__bnet->nodeId(**jt), Y)];
973  distri[0] = GUM_SCALAR(1.) - distri[1];
974  msg_p.push_back(distri);
975 
976  if (_ArcsP_max.exists(Arc(__bnet->nodeId(**jt), Y))) {
977  distri[1] = _ArcsP_max[Arc(__bnet->nodeId(**jt), Y)];
978  distri[0] = GUM_SCALAR(1.) - distri[1];
979  msg_p.push_back(distri);
980  }
981 
982  msgs_p.push_back(msg_p);
983  msg_p.clear();
984  }
985 
986  GUM_SCALAR min = -2.;
987  GUM_SCALAR max = -2.;
988 
989  std::vector< GUM_SCALAR > lx;
990  lx.push_back(lmin);
991 
992  if (lmin != lmax) { lx.push_back(lmax); }
993 
994  _enum_combi(msgs_p, Y, min, max, lx, pos);
995 
996  if (min == -2. || max == -2.) {
997  if (min != -2.) {
998  max = min;
999  } else if (max != -2.) {
1000  min = max;
1001  } else {
1002  std::cout << std::endl;
1003  std::cout << "!!!! pas de message L calculable !!!!" << std::endl;
1004  return;
1005  }
1006  }
1007 
1008  if (min < 0.) { min = 0.; }
1009 
1010  if (max < 0.) { max = 0.; }
1011 
1012  bool update = false;
1013 
1014  if (min != _ArcsL_min[Arc(X, Y)]) {
1015  _ArcsL_min[Arc(X, Y)] = min;
1016  update = true;
1017  }
1018 
1019  if (_ArcsL_max.exists(Arc(X, Y))) {
1020  if (max != _ArcsL_max[Arc(X, Y)]) {
1021  if (max != min) {
1022  _ArcsL_max[Arc(X, Y)] = max;
1023  } else { // if ( max == min )
1024  _ArcsL_max.erase(Arc(X, Y));
1025  }
1026 
1027  update = true;
1028  }
1029  } else {
1030  if (max != min) {
1031  _ArcsL_max.insert(Arc(X, Y), max);
1032  update = true;
1033  }
1034  }
1035 
1036  if (update) {
1037  _update_l.set(X, true);
1038  next_active_nodes_set.insert(X);
1039  }
1040 
1041  } // end of update_p || update_l
1042  }
1043 
1044  template < typename GUM_SCALAR >
1046  const NodeId demanding_child) {
1047  NodeSet const& children = __bnet->children(X);
1048 
1049  const auto parents = &__bnet->cpt(X).variablesSequence();
1050 
1051  if (((children.size() + parents->size() - 1) == 1)
1052  && (!__infE::_evidence.exists(X))) {
1053  return;
1054  }
1055 
1056  // LM_part ---- from all children but one --- the lonely one will get the
1057  // message
1058 
1059  if (__infE::_evidence.exists(X)) {
1060  _ArcsP_min[Arc(X, demanding_child)] = __infE::_evidence[X][1];
1061 
1062  if (_ArcsP_max.exists(Arc(X, demanding_child))) {
1063  _ArcsP_max.erase(Arc(X, demanding_child));
1064  }
1065 
1066  return;
1067  }
1068 
1069  bool update_l = _update_l[X];
1070  bool update_p = _update_p[X];
1071 
1072  if (!update_p && !update_l) { return; }
1073 
1074  GUM_SCALAR lmin = 1.;
1075  GUM_SCALAR lmax = 1.;
1076 
1077  // use cbegin if available
1078  for (auto chil : children) {
1079  if (chil == demanding_child) { continue; }
1080 
1081  lmin *= _ArcsL_min[Arc(X, chil)];
1082 
1083  if (_ArcsL_max.exists(Arc(X, chil))) {
1084  lmax *= _ArcsL_max[Arc(X, chil)];
1085  } else {
1086  lmax *= _ArcsL_min[Arc(X, chil)];
1087  }
1088  }
1089 
1090  if (lmin != lmin && lmax == lmax) { lmin = lmax; }
1091 
1092  if (lmax != lmax && lmin == lmin) { lmax = lmin; }
1093 
1094  if (lmax != lmax && lmin != lmin) {
1095  std::cout << "pas de vraisemblance definie [lmin, lmax] (observations "
1096  "incompatibles ?)"
1097  << std::endl;
1098  return;
1099  }
1100 
1101  if (lmin < 0.) { lmin = 0.; }
1102 
1103  if (lmax < 0.) { lmax = 0.; }
1104 
1105  // refresh PI_part
1106  GUM_SCALAR min = _INF;
1107  GUM_SCALAR max = 0.;
1108 
1109  if (update_p) {
1110  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
1111  std::vector< std::vector< GUM_SCALAR > > msg_p;
1112  std::vector< GUM_SCALAR > distri(2);
1113 
1114  // +1 from start to avoid _counting itself
1115  // use const_iterators if available
1116  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
1117  ++jt) {
1118  // compute probability distribution to avoid doing it multiple times
1119  // (at
1120  // each combination of messages)
1121  distri[1] = _ArcsP_min[Arc(__bnet->nodeId(**jt), X)];
1122  distri[0] = GUM_SCALAR(1.) - distri[1];
1123  msg_p.push_back(distri);
1124 
1125  if (_ArcsP_max.exists(Arc(__bnet->nodeId(**jt), X))) {
1126  distri[1] = _ArcsP_max[Arc(__bnet->nodeId(**jt), X)];
1127  distri[0] = GUM_SCALAR(1.) - distri[1];
1128  msg_p.push_back(distri);
1129  }
1130 
1131  msgs_p.push_back(msg_p);
1132  msg_p.clear();
1133  }
1134 
1135  _enum_combi(msgs_p, X, min, max);
1136 
1137  if (min < 0.) { min = 0.; }
1138 
1139  if (max < 0.) { max = 0.; }
1140 
1141  if (min == _INF || max == _INF) {
1142  std::cout << " ERREUR msg P min = max = INF " << std::endl;
1143  std::cout.flush();
1144  return;
1145  }
1146 
1147  _NodesP_min[X] = min;
1148 
1149  if (min != max) {
1150  _NodesP_max.set(X, max);
1151  } else if (_NodesP_max.exists(X)) {
1152  _NodesP_max.erase(X);
1153  }
1154 
1155  _update_p.set(X, false);
1156 
1157  } // end of update_p
1158  else {
1159  min = _NodesP_min[X];
1160 
1161  if (_NodesP_max.exists(X)) {
1162  max = _NodesP_max[X];
1163  } else {
1164  max = min;
1165  }
1166  }
1167 
1168  if (update_p || update_l) {
1169  GUM_SCALAR msg_p_min;
1170  GUM_SCALAR msg_p_max;
1171 
1172  // cas limites sur min
1173  if (min == _INF && lmin == 0.) {
1174  std::cout << "MESSAGE P ERR (negatif) : pi = inf, l = 0" << std::endl;
1175  }
1176 
1177  if (lmin == _INF) { // cas infini
1178  msg_p_min = GUM_SCALAR(1.);
1179  } else if (min == 0. || lmin == 0.) {
1180  msg_p_min = 0;
1181  } else {
1182  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
1183  }
1184 
1185  // cas limites sur max
1186  if (max == _INF && lmax == 0.) {
1187  std::cout << "MESSAGE P ERR (negatif) : pi = inf, l = 0" << std::endl;
1188  }
1189 
1190  if (lmax == _INF) { // cas infini
1191  msg_p_max = GUM_SCALAR(1.);
1192  } else if (max == 0. || lmax == 0.) {
1193  msg_p_max = 0;
1194  } else {
1195  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
1196  }
1197 
1198  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
1199  msg_p_min = msg_p_max;
1200  std::cout << std::endl;
1201  std::cout << "msg_p_min is NaN" << std::endl;
1202  }
1203 
1204  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
1205  msg_p_max = msg_p_min;
1206  std::cout << std::endl;
1207  std::cout << "msg_p_max is NaN" << std::endl;
1208  }
1209 
1210  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
1211  std::cout << std::endl;
1212  std::cout << "pas de message P calculable (verifier observations)"
1213  << std::endl;
1214  return;
1215  }
1216 
1217  if (msg_p_min < 0.) { msg_p_min = 0.; }
1218 
1219  if (msg_p_max < 0.) { msg_p_max = 0.; }
1220 
1221  bool update = false;
1222 
1223  if (msg_p_min != _ArcsP_min[Arc(X, demanding_child)]) {
1224  _ArcsP_min[Arc(X, demanding_child)] = msg_p_min;
1225  update = true;
1226  }
1227 
1228  if (_ArcsP_max.exists(Arc(X, demanding_child))) {
1229  if (msg_p_max != _ArcsP_max[Arc(X, demanding_child)]) {
1230  if (msg_p_max != msg_p_min) {
1231  _ArcsP_max[Arc(X, demanding_child)] = msg_p_max;
1232  } else { // if ( msg_p_max == msg_p_min )
1233  _ArcsP_max.erase(Arc(X, demanding_child));
1234  }
1235 
1236  update = true;
1237  }
1238  } else {
1239  if (msg_p_max != msg_p_min) {
1240  _ArcsP_max.insert(Arc(X, demanding_child), msg_p_max);
1241  update = true;
1242  }
1243  }
1244 
1245  if (update) {
1246  _update_p.set(demanding_child, true);
1247  next_active_nodes_set.insert(demanding_child);
1248  }
1249 
1250  } // end of : update_l || update_p
1251  }
1252 
1253  template < typename GUM_SCALAR >
1255  for (auto node : __bnet->nodes()) {
1256  if ((!refreshIndic)
1257  && __cn->currentNodeType(node)
1259  continue;
1260  }
1261 
1262  NodeSet const& children = __bnet->children(node);
1263 
1264  auto parents = &__bnet->cpt(node).variablesSequence();
1265 
1266  if (_update_l[node]) {
1267  GUM_SCALAR lmin = 1.;
1268  GUM_SCALAR lmax = 1.;
1269 
1270  if (!children.empty() && !__infE::_evidence.exists(node)) {
1271  for (auto chil : children) {
1272  lmin *= _ArcsL_min[Arc(node, chil)];
1273 
1274  if (_ArcsL_max.exists(Arc(node, chil))) {
1275  lmax *= _ArcsL_max[Arc(node, chil)];
1276  } else {
1277  lmax *= _ArcsL_min[Arc(node, chil)];
1278  }
1279  }
1280 
1281  if (lmin != lmin && lmax == lmax) { lmin = lmax; }
1282 
1283  lmax = lmin;
1284 
1285  if (lmax != lmax && lmin != lmin) {
1286  std::cout
1287  << "pas de vraisemblance definie [lmin, lmax] (observations "
1288  "incompatibles ?)"
1289  << std::endl;
1290  return;
1291  }
1292 
1293  if (lmin < 0.) { lmin = 0.; }
1294 
1295  if (lmax < 0.) { lmax = 0.; }
1296 
1297  _NodesL_min[node] = lmin;
1298 
1299  if (lmin != lmax) {
1300  _NodesL_max.set(node, lmax);
1301  } else if (_NodesL_max.exists(node)) {
1302  _NodesL_max.erase(node);
1303  }
1304  }
1305 
1306  } // end of : update_l
1307 
1308  if (_update_p[node]) {
1309  if ((parents->size() - 1) > 0 && !__infE::_evidence.exists(node)) {
1310  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
1311  std::vector< std::vector< GUM_SCALAR > > msg_p;
1312  std::vector< GUM_SCALAR > distri(2);
1313 
1314  // +1 from start to avoid _counting itself
1315  // cbegin
1316  for (auto jt = ++parents->begin(), theEnd = parents->end();
1317  jt != theEnd;
1318  ++jt) {
1319  // compute probability distribution to avoid doing it multiple
1320  // times
1321  // (at each combination of messages)
1322  distri[1] = _ArcsP_min[Arc(__bnet->nodeId(**jt), node)];
1323  distri[0] = GUM_SCALAR(1.) - distri[1];
1324  msg_p.push_back(distri);
1325 
1326  if (_ArcsP_max.exists(Arc(__bnet->nodeId(**jt), node))) {
1327  distri[1] = _ArcsP_max[Arc(__bnet->nodeId(**jt), node)];
1328  distri[0] = GUM_SCALAR(1.) - distri[1];
1329  msg_p.push_back(distri);
1330  }
1331 
1332  msgs_p.push_back(msg_p);
1333  msg_p.clear();
1334  }
1335 
1336  GUM_SCALAR min = _INF;
1337  GUM_SCALAR max = 0.;
1338 
1339  _enum_combi(msgs_p, node, min, max);
1340 
1341  if (min < 0.) { min = 0.; }
1342 
1343  if (max < 0.) { max = 0.; }
1344 
1345  _NodesP_min[node] = min;
1346 
1347  if (min != max) {
1348  _NodesP_max.set(node, max);
1349  } else if (_NodesP_max.exists(node)) {
1350  _NodesP_max.erase(node);
1351  }
1352 
1353  _update_p[node] = false;
1354  }
1355  } // end of update_p
1356 
1357  } // end of : for each node
1358  }
1359 
1360  template < typename GUM_SCALAR >
1362  for (auto node : __bnet->nodes()) {
1363  GUM_SCALAR msg_p_min = 1.;
1364  GUM_SCALAR msg_p_max = 0.;
1365 
1366  if (__infE::_evidence.exists(node)) {
1367  if (__infE::_evidence[node][1] == 0.) {
1368  msg_p_min = (GUM_SCALAR)0.;
1369  } else if (__infE::_evidence[node][1] == 1.) {
1370  msg_p_min = 1.;
1371  }
1372 
1373  msg_p_max = msg_p_min;
1374  } else {
1375  GUM_SCALAR min = _NodesP_min[node];
1376  GUM_SCALAR max;
1377 
1378  if (_NodesP_max.exists(node)) {
1379  max = _NodesP_max[node];
1380  } else {
1381  max = min;
1382  }
1383 
1384  GUM_SCALAR lmin = _NodesL_min[node];
1385  GUM_SCALAR lmax;
1386 
1387  if (_NodesL_max.exists(node)) {
1388  lmax = _NodesL_max[node];
1389  } else {
1390  lmax = lmin;
1391  }
1392 
1393  if (min == _INF || max == _INF) {
1394  std::cout << " min ou max === _INF !!!!!!!!!!!!!!!!!!!!!!!!!! "
1395  << std::endl;
1396  return;
1397  }
1398 
1399  if (min == _INF && lmin == 0.) {
1400  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
1401  return;
1402  }
1403 
1404  if (lmin == _INF) {
1405  msg_p_min = GUM_SCALAR(1.);
1406  } else if (min == 0. || lmin == 0.) {
1407  msg_p_min = GUM_SCALAR(0.);
1408  } else {
1409  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
1410  }
1411 
1412  if (max == _INF && lmax == 0.) {
1413  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
1414  return;
1415  }
1416 
1417  if (lmax == _INF) {
1418  msg_p_max = GUM_SCALAR(1.);
1419  } else if (max == 0. || lmax == 0.) {
1420  msg_p_max = GUM_SCALAR(0.);
1421  } else {
1422  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
1423  }
1424  }
1425 
1426  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
1427  msg_p_min = msg_p_max;
1428  std::cout << std::endl;
1429  std::cout << "msg_p_min is NaN" << std::endl;
1430  }
1431 
1432  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
1433  msg_p_max = msg_p_min;
1434  std::cout << std::endl;
1435  std::cout << "msg_p_max is NaN" << std::endl;
1436  }
1437 
1438  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
1439  std::cout << std::endl;
1440  std::cout << "Please check the observations (no proba can be computed)"
1441  << std::endl;
1442  return;
1443  }
1444 
1445  if (msg_p_min < 0.) { msg_p_min = 0.; }
1446 
1447  if (msg_p_max < 0.) { msg_p_max = 0.; }
1448 
1449  __infE::_marginalMin[node][0] = 1 - msg_p_max;
1450  __infE::_marginalMax[node][0] = 1 - msg_p_min;
1451  __infE::_marginalMin[node][1] = msg_p_min;
1452  __infE::_marginalMax[node][1] = msg_p_max;
1453  }
1454  }
1455 
1456  template < typename GUM_SCALAR >
1458  _refreshLMsPIs();
1459  _updateMarginals();
1460 
1461  return __infE::_computeEpsilon();
1462  }
1463 
1464  template < typename GUM_SCALAR >
1466  for (auto node : __bnet->nodes()) {
1467  if (__cn->currentNodeType(node)
1469  continue;
1470  }
1471 
1472  for (auto pare : __bnet->parents(node)) {
1473  _msgP(pare, node);
1474  }
1475  }
1476 
1477  _refreshLMsPIs(true);
1478  _updateMarginals();
1479  }
1480 
1481  template < typename GUM_SCALAR >
1483  if (__infE::_modal.empty()) { return; }
1484 
1485  std::vector< std::vector< GUM_SCALAR > > vertices(
1486  2, std::vector< GUM_SCALAR >(2));
1487 
1488  for (auto node : __bnet->nodes()) {
1489  vertices[0][0] = __infE::_marginalMin[node][0];
1490  vertices[0][1] = __infE::_marginalMax[node][1];
1491 
1492  vertices[1][0] = __infE::_marginalMax[node][0];
1493  vertices[1][1] = __infE::_marginalMin[node][1];
1494 
1495  for (auto vertex = 0, vend = 2; vertex != vend; vertex++) {
1496  __infE::_updateExpectations(node, vertices[vertex]);
1497  // test credal sets vertices elim
1498  // remove with L2U since variables are binary
1499  // but does the user know that ?
1500  __infE::_updateCredalSets(
1501  node,
1502  vertices[vertex]); // no redundancy elimination with 2 vertices
1503  }
1504  }
1505  }
1506 
1507  template < typename GUM_SCALAR >
1509  const CredalNet< GUM_SCALAR >& cnet) :
1510  InferenceEngine< GUM_SCALAR >::InferenceEngine(cnet) {
1511  if (!cnet.isSeparatelySpecified()) {
1513  "CNLoopyPropagation is only available "
1514  "with separately specified nets");
1515  }
1516 
1517  // test for binary cn
1518  for (auto node : cnet.current_bn().nodes())
1519  if (cnet.current_bn().variable(node).domainSize() != 2) {
1521  "CNLoopyPropagation is only available "
1522  "with binary credal networks");
1523  }
1524 
1525  // test if compute CPTMinMax has been called
1526  if (!cnet.hasComputedCPTMinMax()) {
1528  "CNLoopyPropagation only works when "
1529  "\"computeCPTMinMax()\" has been called for "
1530  "this credal net");
1531  }
1532 
1533  __cn = &cnet;
1534  __bnet = &cnet.current_bn();
1535 
1537  _InferenceUpToDate = false;
1538 
1539  GUM_CONSTRUCTOR(CNLoopyPropagation);
1540  }
1541 
1542  template < typename GUM_SCALAR >
1544  _InferenceUpToDate = false;
1545 
1546  if (_msg_l_sent.size() > 0) {
1547  for (auto node : __bnet->nodes()) {
1548  delete _msg_l_sent[node];
1549  }
1550  }
1551 
1552  //_msg_l_sent.clear();
1553  //_update_l.clear();
1554  //_update_p.clear();
1555 
1556  GUM_DESTRUCTOR(CNLoopyPropagation);
1557  }
1558 
1559  template < typename GUM_SCALAR >
1561  __inferenceType = inft;
1562  }
1563 
1564  template < typename GUM_SCALAR >
1567  return __inferenceType;
1568  }
1569  } // namespace credal
1570 } // end of namespace gum
const bool isSeparatelySpecified() const
bool empty() const noexcept
Indicates whether the set is the empty set.
Definition: set_tpl.h:707
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
void _makeInferenceByOrderedArcs()
Starts the inference with this inference type.
#define _INF
void eraseAllEvidence()
Erase all inference related data to perform another one.
InferenceType __inferenceType
The choosen inference type.
NodeType currentNodeType(const NodeId &id) const
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
void _initialize()
Topological forward propagation to initialize old marginals & messages.
void swap(HashTable< LpCol, double > *&a, HashTable< LpCol, double > *&b)
Swap the addresses of two pointers to hashTables.
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
<agrum/CN/CNLoopyPropagation.h>
void erase(const Key &k)
Erases an element from the set.
Definition: set_tpl.h:656
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
Definition: agrum.h:25
void _computeExpectations()
Since the network is binary, expectations can be computed from the final marginals which give us the ...
void _makeInferenceByRandomOrder()
Starts the inference with this inference type.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
void _updateMarginals()
Compute marginals from up-to-date messages.
Class template representing a Credal Network.
Definition: credalNet.h:89
InferenceType inferenceType()
Get the inference type.
bool _InferenceUpToDate
TRUE if inference has already been performed, FALSE otherwise.
The base class for all directed edgesThis class is used as a basis for manipulating all directed edge...
CNLoopyPropagation(const CredalNet< GUM_SCALAR > &cnet)
Constructor.
Uses a node-set so we don&#39;t iterate on nodes that can&#39;t send a new message.
const NodeSet & parents(const NodeId id) const
returns the set of nodes with arc ingoing to a given node
GUM_SCALAR _calculateEpsilon()
Compute epsilon.
void saveInference(const std::string &path)
void _msgL(const NodeId X, const NodeId demanding_parent)
Sends a message to one&#39;s parent, i.e.
void _enum_combi(std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
Used by _msgL.
void makeInference()
Starts the inference.
NodeProperty< NodeSet *> _msg_l_sent
Used to keep track of one&#39;s messages sent to it&#39;s parents.
const BayesNet< GUM_SCALAR > & current_bn() const
const NodeSet & children(const NodeId id) const
returns the set of nodes with arc outgoing from a given node
Abstract class template representing a CredalNet inference engine.
void _refreshLMsPIs(bool refreshIndic=false)
Get the last messages from one&#39;s parents and children.
const bool hasComputedCPTMinMax() const
InferenceType
Inference type to be used by the algorithm.
Size Idx
Type for indexes.
Definition: types.h:53
void _compute_ext(GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, GUM_SCALAR &num_min, GUM_SCALAR &num_max, GUM_SCALAR &den_min, GUM_SCALAR &den_max)
Used by _msgL.
void _updateIndicatrices()
Only update indicatrices variables at the end of computations ( calls _msgP ).
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:48
void _msgP(const NodeId X, const NodeId demanding_child)
Sends a message to one&#39;s child, i.e.
Size size() const noexcept
Returns the number of elements in the set.
Definition: set_tpl.h:701
Base class for dag.
Definition: DAG.h:102
Size NodeId
Type for node ids.
Definition: graphElements.h:98
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
#define GUM_ERROR(type, msg)
Definition: exceptions.h:55
void _makeInferenceNodeToNeighbours()
Starts the inference with this inference type.