aGrUM  0.13.2
ShaferShenoyInference_tpl.h
Go to the documentation of this file.
1 /***************************************************************************
2  * Copyright (C) 2005 by Christophe GONZALES et Pierre-Henri WUILLEMIN *
3  * {prenom.nom}_at_lip6.fr *
4  * *
5  * This program is free software; you can redistribute it and/or modify *
6  * it under the terms of the GNU General Public License as published by *
7  * the Free Software Foundation; either version 2 of the License, or *
8  * (at your option) any later version. *
9  * *
10  * This program is distributed in the hope that it will be useful, *
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13  * GNU General Public License for more details. *
14  * *
15  * You should have received a copy of the GNU General Public License *
16  * along with this program; if not, write to the *
17  * Free Software Foundation, Inc., *
18  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19  ***************************************************************************/
26 #ifndef DOXYGEN_SHOULD_SKIP_THIS
28 
36 
37 
38 namespace gum {
39  // default constructor
40  template < typename GUM_SCALAR >
42  const IBayesNet< GUM_SCALAR >* BN,
43  FindBarrenNodesType barren_type,
44  bool use_binary_join_tree) :
45  JointTargetedInference< GUM_SCALAR >(BN),
46  EvidenceInference< GUM_SCALAR >(BN),
47  __use_binary_join_tree(use_binary_join_tree) {
48  // sets the barren nodes finding algorithm
49  setFindBarrenNodesType(barren_type);
50 
51  // create a default triangulation (the user can change it afterwards)
52  __triangulation = new DefaultTriangulation;
53 
54  // for debugging purposessetRequiredInference
55  GUM_CONSTRUCTOR(ShaferShenoyInference);
56  }
57 
58 
59  // destructor
60  template < typename GUM_SCALAR >
62  // remove all the potentials created during the last message passing
63  for (const auto& pots : __created_potentials)
64  for (const auto pot : pots.second)
65  delete pot;
66 
67  // remove the potentials created after removing the nodes that received
68  // hard evidence
69  for (const auto& pot : __hard_ev_projected_CPTs)
70  delete pot.second;
71 
72  // remove all the potentials in __clique_ss_potential that do not belong
73  // to __clique_potentials : in this case, those potentials have been
74  // created by combination of the corresponding list of potentials in
75  // __clique_potentials. In other words, the size of this list is strictly
76  // greater than 1.
77  for (auto pot : __clique_ss_potential) {
78  if (__clique_potentials[pot.first].size() > 1) delete pot.second;
79  }
80 
81  // remove all the posteriors computed
82  for (const auto& pot : __target_posteriors)
83  delete pot.second;
84  for (const auto& pot : __joint_target_posteriors)
85  delete pot.second;
86 
87  // remove the junction tree and the triangulation algorithm
88  if (__JT != nullptr) delete __JT;
89  if (__junctionTree != nullptr) delete __junctionTree;
90  delete __triangulation;
91 
92  // for debugging purposes
93  GUM_DESTRUCTOR(ShaferShenoyInference);
94  }
95 
96 
98  template < typename GUM_SCALAR >
100  const Triangulation& new_triangulation) {
101  delete __triangulation;
102  __triangulation = new_triangulation.newFactory();
103  __is_new_jt_needed = true;
105  }
106 
107 
109  template < typename GUM_SCALAR >
111  __createNewJT();
112 
113  return __JT;
114  }
115 
117  template < typename GUM_SCALAR >
119  __createNewJT();
120 
121  return __junctionTree;
122  }
123 
124 
126  template < typename GUM_SCALAR >
128  Potential< GUM_SCALAR >* (*proj)(const Potential< GUM_SCALAR >&,
129  const Set< const DiscreteVariable* >&)) {
130  __projection_op = proj;
131  }
132 
133 
135  template < typename GUM_SCALAR >
137  Potential< GUM_SCALAR >* (*comb)(const Potential< GUM_SCALAR >&,
138  const Potential< GUM_SCALAR >&)) {
139  __combination_op = comb;
140  }
141 
142 
144  template < typename GUM_SCALAR >
146  // remove all the messages computed
147  for (auto& potset : __separator_potentials)
148  potset.second.clear();
149  for (auto& mess_computed : __messages_computed)
150  mess_computed.second = false;
151 
152  // remove all the created potentials
153  for (const auto& potset : __created_potentials)
154  for (const auto pot : potset.second)
155  delete pot;
156 
157  // remove all the posteriors
158  for (const auto& pot : __target_posteriors)
159  delete pot.second;
160  for (const auto& pot : __joint_target_posteriors)
161  delete pot.second;
162 
163  // indicate that new messages need be computed
164  if (this->isInferenceReady() || this->isDone())
166  }
167 
168 
170  template < typename GUM_SCALAR >
172  FindBarrenNodesType type) {
173  if (type != __barren_nodes_type) {
174  // WARNING: if a new type is added here, method __createJT should
175  // certainly be updated as well, in particular its step 2.
176  switch (type) {
179 
180  default:
181  GUM_ERROR(InvalidArgument,
182  "setFindBarrenNodesType for type "
183  << (unsigned int)type << " is not implemented yet");
184  }
185 
186  __barren_nodes_type = type;
187 
188  // potentially, we may need to reconstruct a junction tree
190  }
191  }
192 
193 
195  template < typename GUM_SCALAR >
196  INLINE void
198  bool isHardEvidence) {
199  // if we have a new hard evidence, this modifies the undigraph over which
200  // the join tree is created. This is also the case if id is not a node of
201  // of the undigraph
202  if (isHardEvidence || !__graph.exists(id))
203  __is_new_jt_needed = true;
204  else {
205  try {
206  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ADDED);
207  } catch (DuplicateElement&) {
208  // here, the evidence change already existed. This necessarily means
209  // that the current saved change is an EVIDENCE_ERASED. So if we
210  // erased the evidence and added some again, this corresponds to an
211  // EVIDENCE_MODIFIED
212  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
213  }
214  }
215  }
216 
217 
219  template < typename GUM_SCALAR >
220  INLINE void
222  bool isHardEvidence) {
223  // if we delete a hard evidence, this modifies the undigraph over which
224  // the join tree is created.
225  if (isHardEvidence)
226  __is_new_jt_needed = true;
227  else {
228  try {
229  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ERASED);
230  } catch (DuplicateElement&) {
231  // here, the evidence change already existed and it is necessarily an
232  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
233  // been added and is now erased, this is similar to not having created
234  // it. If the evidence was only modified, it already existed in the
235  // last inference and we should now indicate that it has been removed.
236  if (__evidence_changes[id] == EvidenceChangeType::EVIDENCE_ADDED)
237  __evidence_changes.erase(id);
238  else
239  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_ERASED;
240  }
241  }
242  }
243 
244 
246  template < typename GUM_SCALAR >
248  bool has_hard_evidence) {
249  if (has_hard_evidence || !this->hardEvidenceNodes().empty())
250  __is_new_jt_needed = true;
251  else {
252  for (const auto node : this->softEvidenceNodes()) {
253  try {
254  __evidence_changes.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
255  } catch (DuplicateElement&) {
256  // here, the evidence change already existed and it is necessarily an
257  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
258  // been added and is now erased, this is similar to not having created
259  // it. If the evidence was only modified, it already existed in the
260  // last inference and we should now indicate that it has been removed.
261  if (__evidence_changes[node] == EvidenceChangeType::EVIDENCE_ADDED)
262  __evidence_changes.erase(node);
263  else
264  __evidence_changes[node] = EvidenceChangeType::EVIDENCE_ERASED;
265  }
266  }
267  }
268  }
269 
270 
272  template < typename GUM_SCALAR >
274  NodeId id, bool hasChangedSoftHard) {
275  if (hasChangedSoftHard)
276  __is_new_jt_needed = true;
277  else {
278  try {
279  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_MODIFIED);
280  } catch (DuplicateElement&) {
281  // here, the evidence change already existed and it is necessarily an
282  // EVIDENCE_ADDED. So we should keep this state to indicate that this
283  // evidence is new w.r.t. the last inference
284  }
285  }
286  }
287 
288 
290  template < typename GUM_SCALAR >
291  INLINE void
293 
294 
296  template < typename GUM_SCALAR >
297  INLINE void
299 
300 
302  template < typename GUM_SCALAR >
303  INLINE void
305 
306 
308  template < typename GUM_SCALAR >
309  INLINE void
311  }
312 
313 
315  template < typename GUM_SCALAR >
317 
318 
320  template < typename GUM_SCALAR >
322 
323 
325  template < typename GUM_SCALAR >
327 
328 
330  template < typename GUM_SCALAR >
332 
333 
334  // check whether a new junction tree is really needed for the next inference
335  template < typename GUM_SCALAR >
337  // if we do not have a JT or if __new_jt_needed is set to true, then
338  // we know that we need to create a new join tree
339  if ((__JT == nullptr) || __is_new_jt_needed) return true;
340 
341  // if some some targets do not belong to the join tree and, consequently,
342  // to the undigraph that was used to construct the join tree, then we need
343  // to create a new JT. This situation may occur if we constructed the
344  // join tree after pruning irrelevant/barren nodes from the BN)
345  // however, note that the nodes that received hard evidence do not belong to
346  // the graph and, therefore, should not be taken into account
347  const auto& hard_ev_nodes = this->hardEvidenceNodes();
348  for (const auto node : this->targets()) {
349  if (!__graph.exists(node) && !hard_ev_nodes.exists(node)) return true;
350  }
351  for (const auto& nodes : this->jointTargets()) {
352  // here, we need to check that at least one clique contains all the nodes.
353  bool containing_clique_found = false;
354  for (const auto node : nodes) {
355  bool found = true;
356  const NodeSet& clique = __JT->clique(__node_to_clique[node]);
357  for (const auto xnode : nodes) {
358  if (!clique.contains(xnode) && !hard_ev_nodes.exists(xnode)) {
359  found = false;
360  break;
361  }
362  }
363  if (found) {
364  containing_clique_found = true;
365  break;
366  }
367  }
368 
369  if (!containing_clique_found) return true;
370  }
371 
372  // if some new evidence have been added on nodes that do not belong
373  // to __graph, then we potentially have to reconstruct the join tree
374  for (const auto& change : __evidence_changes) {
375  if ((change.second == EvidenceChangeType::EVIDENCE_ADDED)
376  && !__graph.exists(change.first))
377  return true;
378  }
379 
380  // here, the current JT is exactly what we need for the next inference
381  return false;
382  }
383 
384 
386  template < typename GUM_SCALAR >
388  // to create the JT, we first create the moral graph of the BN in the
389  // following way in order to take into account the barren nodes and the
390  // nodes that received evidence:
391  // 1/ we create an undirected graph containing only the nodes and no edge
392  // 2/ if we take into account barren nodes, remove them from the graph
393  // 3/ add edges so that each node and its parents in the BN form a clique
394  // 4/ add edges so that set targets are cliques of the moral graph
395  // 5/ remove the nodes that received hard evidence (by step 3/, their
396  // parents are linked by edges, which is necessary for inference)
397  //
398  // At the end of step 5/, we have our moral graph and we can triangulate it
399  // to get the new junction tree
400 
401  // 1/ create an undirected graph containing only the nodes and no edge
402  const auto& bn = this->BN();
403  __graph.clear();
404  for (auto node : bn.dag())
405  __graph.addNodeWithId(node);
406 
407  // 2/ if we wish to exploit barren nodes, we shall remove them from the
408  // BN to do so: we identify all the nodes that are not targets and have
409  // received no evidence and such that their descendants are neither
410  // targets nor evidence nodes. Such nodes can be safely discarded from
411  // the BN without altering the inference output
413  // identify the barren nodes
414  NodeSet target_nodes = this->targets();
415  for (const auto& nodeset : this->jointTargets()) {
416  target_nodes += nodeset;
417  }
418 
419  // check that all the nodes are not targets, otherwise, there is no
420  // barren node
421  if (target_nodes.size() != bn.size()) {
422  BarrenNodesFinder finder(&(bn.dag()));
423  finder.setTargets(&target_nodes);
424 
425  NodeSet evidence_nodes;
426  for (const auto& pair : this->evidence()) {
427  evidence_nodes.insert(pair.first);
428  }
429  finder.setEvidence(&evidence_nodes);
430 
431  NodeSet barren_nodes = finder.barrenNodes();
432 
433  // remove the barren nodes from the moral graph
434  for (const auto node : barren_nodes) {
435  __graph.eraseNode(node);
436  }
437  }
438  }
439 
440  // 3/ add edges so that each node and its parents in the BN form a clique
441  for (const auto node : __graph) {
442  const NodeSet& parents = bn.parents(node);
443  for (auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
444  __graph.addEdge(*iter1, node);
445  auto iter2 = iter1;
446  for (++iter2; iter2 != parents.cend(); ++iter2) {
447  __graph.addEdge(*iter1, *iter2);
448  }
449  }
450  }
451 
452  // 4/ if there exist some joint targets, we shall add new edges into the
453  // moral graph in order to ensure that there exists a clique containing
454  // each joint
455  for (const auto& nodeset : this->jointTargets()) {
456  for (auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
457  auto iter2 = iter1;
458  for (++iter2; iter2 != nodeset.cend(); ++iter2) {
459  __graph.addEdge(*iter1, *iter2);
460  }
461  }
462  }
463 
464  // 5/ remove all the nodes that received hard evidence
466  for (const auto node : __hard_ev_nodes) {
467  __graph.eraseNode(node);
468  }
469 
470 
471  // now, we can compute the new junction tree. To speed-up computations
472  // (essentially, those of a distribution phase), we construct from this
473  // junction tree a binary join tree
474  if (__JT != nullptr) delete __JT;
475  if (__junctionTree != nullptr) delete __junctionTree;
476 
477  __triangulation->setGraph(&__graph, &(this->domainSizes()));
478  const JunctionTree& triang_jt = __triangulation->junctionTree();
480  BinaryJoinTreeConverterDefault bjt_converter;
481  NodeSet emptyset;
482  __JT = new CliqueGraph(
483  bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
484  } else {
485  __JT = new CliqueGraph(triang_jt);
486  }
487  __junctionTree = new CliqueGraph(triang_jt);
488 
489 
490  // indicate, for each node of the moral graph a clique in __JT that can
491  // contain its conditional probability table
493  const std::vector< NodeId >& JT_elim_order =
495  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
496  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
497  ++i)
498  elim_order.insert(JT_elim_order[i], (int)i);
499  const DAG& dag = bn.dag();
500  for (const auto node : __graph) {
501  // get the variables in the potential of node (and its parents)
502  NodeId first_eliminated_node = node;
503  int elim_number = elim_order[first_eliminated_node];
504 
505  for (const auto parent : dag.parents(node)) {
506  if (__graph.existsNode(parent) && (elim_order[parent] < elim_number)) {
507  elim_number = elim_order[parent];
508  first_eliminated_node = parent;
509  }
510  }
511 
512  // first_eliminated_node contains the first var (node or one of its
513  // parents) eliminated => the clique created during its elimination
514  // contains node and all of its parents => it can contain the potential
515  // assigned to the node in the BN
517  node, __triangulation->createdJunctionTreeClique(first_eliminated_node));
518  }
519 
520  // do the same for the nodes that received evidence. Here, we only store
521  // the nodes for which at least one parent belongs to __graph (otherwise
522  // their CPT is just a constant real number).
523  for (const auto node : __hard_ev_nodes) {
524  // get the set of parents of the node that belong to __graph
525  NodeSet pars(dag.parents(node).size());
526  for (const auto par : dag.parents(node))
527  if (__graph.exists(par)) pars.insert(par);
528 
529  if (!pars.empty()) {
530  NodeId first_eliminated_node = *(pars.begin());
531  int elim_number = elim_order[first_eliminated_node];
532 
533  for (const auto parent : pars) {
534  if (elim_order[parent] < elim_number) {
535  elim_number = elim_order[parent];
536  first_eliminated_node = parent;
537  }
538  }
539 
540  // first_eliminated_node contains the first var (node or one of its
541  // parents) eliminated => the clique created during its elimination
542  // contains node and all of its parents => it can contain the potential
543  // assigned to the node in the BN
545  node, __triangulation->createdJunctionTreeClique(first_eliminated_node));
546  }
547  }
548 
549  // indicate for each joint_target a clique that contains it
550  __joint_target_to_clique.clear();
551  for (const auto& set : this->jointTargets()) {
552  // remove from set all the nodes that received hard evidence (since they
553  // do not belong to the join tree)
554  NodeSet nodeset = set;
555  for (const auto node : __hard_ev_nodes)
556  if (nodeset.contains(node)) nodeset.erase(node);
557 
558  if (!nodeset.empty()) {
559  // the clique we are looking for is the one that was created when
560  // the first element of nodeset was eliminated
561  NodeId first_eliminated_node = *(nodeset.begin());
562  int elim_number = elim_order[first_eliminated_node];
563  for (const auto node : nodeset) {
564  if (elim_order[node] < elim_number) {
565  elim_number = elim_order[node];
566  first_eliminated_node = node;
567  }
568  }
569 
571  set, __triangulation->createdJunctionTreeClique(first_eliminated_node));
572  }
573  }
574 
575 
576  // compute the roots of __JT's connected components
578 
579  // remove all the Shafer-Shenoy potentials stored into the cliques
580  for (const auto& xpot : __clique_ss_potential) {
581  if (__clique_potentials[xpot.first].size() > 1) delete xpot.second;
582  }
583 
584  // create empty potential lists into the cliques of the joint tree as well
585  // as empty lists of evidence
586  __PotentialSet empty_set;
587  __clique_potentials.clear();
588  __node_to_soft_evidence.clear();
589  for (const auto node : *__JT) {
590  __clique_potentials.insert(node, empty_set);
591  }
592 
593  // remove all the potentials created during the last inference
594  for (auto& potlist : __created_potentials)
595  for (auto pot : potlist.second)
596  delete pot;
597  __created_potentials.clear();
598 
599  // remove all the potentials created to take into account hard evidence
600  // during the last inference
601  for (auto pot_pair : __hard_ev_projected_CPTs)
602  delete pot_pair.second;
603  __hard_ev_projected_CPTs.clear();
604 
605  // remove all the constants created due to projections of CPTs that were
606  // defined over only hard evidence nodes
607  __constants.clear();
608 
609  // create empty lists of potentials for the messages and indicate that no
610  // message has been computed yet
611  __separator_potentials.clear();
612  __messages_computed.clear();
613  for (const auto& edge : __JT->edges()) {
614  const Arc arc1(edge.first(), edge.second());
615  __separator_potentials.insert(arc1, empty_set);
616  __messages_computed.insert(arc1, false);
617  const Arc arc2(Arc(edge.second(), edge.first()));
618  __separator_potentials.insert(arc2, empty_set);
619  __messages_computed.insert(arc2, false);
620  }
621 
622  // remove all the posteriors computed so far
623  for (const auto& pot : __target_posteriors)
624  delete pot.second;
625  __target_posteriors.clear();
626  for (const auto& pot : __joint_target_posteriors)
627  delete pot.second;
628  __joint_target_posteriors.clear();
629 
630 
631  // put all the CPT's of the Bayes net nodes into the cliques
632  // here, beware: all the potentials that are defined over some nodes
633  // including hard evidence must be projected so that these nodes are
634  // removed from the potential
635  const auto& evidence = this->evidence();
636  const auto& hard_evidence = this->hardEvidence();
637  for (const auto node : dag) {
638  if (__graph.exists(node) || __hard_ev_nodes.contains(node)) {
639  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
640 
641  // get the list of nodes with hard evidence in cpt
642  NodeSet hard_nodes;
643  const auto& variables = cpt.variablesSequence();
644  for (const auto var : variables) {
645  NodeId xnode = bn.nodeId(*var);
646  if (__hard_ev_nodes.contains(xnode)) hard_nodes.insert(xnode);
647  }
648 
649  // if hard_nodes contains hard evidence nodes, perform a projection
650  // and insert the result into the appropriate clique, else insert
651  // directly cpt into the clique
652  if (hard_nodes.empty()) {
653  __clique_potentials[__node_to_clique[node]].insert(&cpt);
654  } else {
655  // marginalize out the hard evidence nodes: if the cpt is defined
656  // only over nodes that received hard evidence, do not consider it
657  // as a potential anymore but as a constant
658  if (hard_nodes.size() == variables.size()) {
659  Instantiation inst;
660  const auto& vars = cpt.variablesSequence();
661  for (auto var : vars)
662  inst << *var;
663  for (Size i = 0; i < hard_nodes.size(); ++i) {
664  inst.chgVal(variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
665  }
666  __constants.insert(node, cpt[inst]);
667  } else {
668  // perform the projection with a combine and project instance
669  Set< const DiscreteVariable* > hard_variables;
670  __PotentialSet marg_cpt_set{&cpt};
671  for (const auto xnode : hard_nodes) {
672  marg_cpt_set.insert(evidence[xnode]);
673  hard_variables.insert(&(bn.variable(xnode)));
674  }
675 
676  // perform the combination of those potentials and their projection
677  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
678  combine_and_project(__combination_op, SSNewprojPotential);
679  __PotentialSet new_cpt_list =
680  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
681 
682  // there should be only one potential in new_cpt_list
683  if (new_cpt_list.size() != 1) {
684  // remove the CPT created to avoid memory leaks
685  for (auto pot : new_cpt_list) {
686  if (!marg_cpt_set.contains(pot)) delete pot;
687  }
688  GUM_ERROR(FatalError,
689  "the projection of a potential containing "
690  << "hard evidence is empty!");
691  }
692  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
693  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
694  __hard_ev_projected_CPTs.insert(node, projected_cpt);
695  }
696  }
697  }
698  }
699 
700  // we shall now add all the potentials of the soft evidence
701  for (const auto node : this->softEvidenceNodes()) {
702  __node_to_soft_evidence.insert(node, evidence[node]);
703  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
704  }
705 
706  // now, in __clique_potentials, for each clique, we have the list of
707  // potentials that must be comined in order to produce the Shafer-Shenoy's
708  // potential stored into the clique. So, perform this combination and
709  // store the result in __clique_ss_potential
710  __clique_ss_potential.clear();
711  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
713  for (const auto& xpotset : __clique_potentials) {
714  const auto& potset = xpotset.second;
715  if (potset.size() > 0) {
716  // here, there will be an entry in __clique_ss_potential
717  // If there is only one element in potset, this element shall be
718  // stored into __clique_ss_potential, else all the elements of potset
719  // shall be combined and their result shall be stored
720  if (potset.size() == 1) {
721  __clique_ss_potential.insert(xpotset.first, *(potset.cbegin()));
722  } else {
723  auto joint = fast_combination.combine(potset);
724  __clique_ss_potential.insert(xpotset.first, joint);
725  }
726  }
727  }
728 
729  // indicate that the data structures are up to date.
730  __evidence_changes.clear();
731  __is_new_jt_needed = false;
732  }
733 
734 
736  template < typename GUM_SCALAR >
738  // check if a new JT is really needed. If so, create it
739  if (__isNewJTNeeded()) {
740  __createNewJT();
741  } else {
742  // here, we can answer the next queries without reconstructing all the
743  // junction tree. All we need to do is to indicate that we should
744  // update the potentials and messages for these queries
746  }
747  }
748 
749 
751  template < typename GUM_SCALAR >
753  NodeId from_id, NodeId to_id, NodeSet& invalidated_cliques) {
754  // invalidate the current clique
755  invalidated_cliques.insert(to_id);
756 
757  // invalidate the current arc
758  const Arc arc(from_id, to_id);
759  bool& message_computed = __messages_computed[arc];
760  if (message_computed) {
761  message_computed = false;
762  __separator_potentials[arc].clear();
763  if (__created_potentials.exists(arc)) {
764  auto& arc_created_potentials = __created_potentials[arc];
765  for (auto pot : arc_created_potentials)
766  delete pot;
767  arc_created_potentials.clear();
768  }
769 
770  // go on with the diffusion
771  for (const auto node_id : __JT->neighbours(to_id)) {
772  if (node_id != from_id)
773  __diffuseMessageInvalidations(to_id, node_id, invalidated_cliques);
774  }
775  }
776  }
777 
778 
781  template < typename GUM_SCALAR >
783  // for each clique, indicate whether the potential stored into
784  // __clique_ss_potentials[clique] is the result of a combination. In this
785  // case, it has been allocated by the combination and will need to be
786  // deallocated if its clique has been invalidated
787  NodeProperty< bool > ss_potential_to_deallocate(__clique_potentials.size());
788  for (auto pot_iter = __clique_potentials.cbegin();
789  pot_iter != __clique_potentials.cend();
790  ++pot_iter) {
791  ss_potential_to_deallocate.insert(pot_iter.key(),
792  (pot_iter.val().size() > 1));
793  }
794 
795  // compute the set of CPTs that were projected due to hard evidence and
796  // whose hard evidence have changed, so that they need a new projection.
797  // By the way, remove these CPTs since they are no more needed
798  // Here only the values of the hard evidence can have changed (else a
799  // fully new join tree would have been computed).
800  // Note also that we know that the CPTs still contain some variable(s) after
801  // the projection (else they should be constants)
802  NodeSet hard_nodes_changed(__hard_ev_nodes.size());
803  for (const auto node : __hard_ev_nodes)
804  if (__evidence_changes.exists(node)) hard_nodes_changed.insert(node);
805 
806  NodeSet nodes_with_projected_CPTs_changed;
807  const auto& bn = this->BN();
808  for (auto pot_iter = __hard_ev_projected_CPTs.beginSafe();
809  pot_iter != __hard_ev_projected_CPTs.endSafe();
810  ++pot_iter) {
811  for (const auto var : bn.cpt(pot_iter.key()).variablesSequence()) {
812  if (hard_nodes_changed.contains(bn.nodeId(*var))) {
813  nodes_with_projected_CPTs_changed.insert(pot_iter.key());
814  delete pot_iter.val();
815  __clique_potentials[__node_to_clique[pot_iter.key()]].erase(
816  pot_iter.val());
817  __hard_ev_projected_CPTs.erase(pot_iter);
818  break;
819  }
820  }
821  }
822 
823 
824  // invalidate all the messages that are no more correct: start from each of
825  // the nodes whose soft evidence has changed and perform a diffusion from
826  // the clique into which the soft evidence has been entered, indicating that
827  // the messages spreading from this clique are now invalid. At the same time,
828  // if there were potentials created on the arcs over which the messages were
829  // sent, remove them from memory. For all the cliques that received some
830  // projected CPT that should now be changed, do the same.
831  NodeSet invalidated_cliques(__JT->size());
832  for (const auto& pair : __evidence_changes) {
833  if (__node_to_clique.exists(pair.first)) {
834  const auto clique = __node_to_clique[pair.first];
835  invalidated_cliques.insert(clique);
836  for (const auto neighbor : __JT->neighbours(clique)) {
837  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
838  }
839  }
840  }
841 
842  // now, add to the set of invalidated cliques those that contain projected
843  // CPTs that were changed.
844  for (auto node : nodes_with_projected_CPTs_changed) {
845  const auto clique = __node_to_clique[node];
846  invalidated_cliques.insert(clique);
847  for (const auto neighbor : __JT->neighbours(clique)) {
848  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
849  }
850  }
851 
852  // now that we know the cliques whose set of potentials have been changed,
853  // we can discard their corresponding Shafer-Shenoy potential
854  for (const auto clique : invalidated_cliques) {
855  if (__clique_ss_potential.exists(clique)
856  && ss_potential_to_deallocate[clique]) {
857  delete __clique_ss_potential[clique];
858  }
859  }
860 
861 
862  // now we shall remove all the posteriors that belong to the
863  // invalidated cliques. First, cope only with the nodes that did not
864  // received hard evidence since the other nodes do not belong to the
865  // join tree
866  for (auto iter = __target_posteriors.beginSafe();
867  iter != __target_posteriors.endSafe();
868  ++iter) {
869  if (__graph.exists(iter.key())
870  && (invalidated_cliques.exists(__node_to_clique[iter.key()]))) {
871  delete iter.val();
872  __target_posteriors.erase(iter);
873  }
874  }
875 
876  // now cope with the nodes that received hard evidence
877  for (auto iter = __target_posteriors.beginSafe();
878  iter != __target_posteriors.endSafe();
879  ++iter) {
880  if (hard_nodes_changed.contains(iter.key())) {
881  delete iter.val();
882  __target_posteriors.erase(iter);
883  }
884  }
885 
886  // finally, cope with joint targets
887  for (auto iter = __joint_target_posteriors.beginSafe();
888  iter != __joint_target_posteriors.endSafe();
889  ++iter) {
890  if (invalidated_cliques.exists(__joint_target_to_clique[iter.key()])) {
891  delete iter.val();
892  __joint_target_posteriors.erase(iter);
893  }
894  }
895 
896 
897  // remove all the evidence that were entered into __node_to_soft_evidence
898  // and __clique_potentials and add the new soft ones
899  for (auto& pot_pair : __node_to_soft_evidence) {
900  __clique_potentials[__node_to_clique[pot_pair.first]].erase(pot_pair.second);
901  }
902  __node_to_soft_evidence.clear();
903 
904  const auto& evidence = this->evidence();
905  for (const auto node : this->softEvidenceNodes()) {
906  __node_to_soft_evidence.insert(node, evidence[node]);
907  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
908  }
909 
910 
911  // Now add the projections of the CPTs due to newly changed hard evidence:
912  // if we are performing _updateOutdatedBNPotentials, this means that the
913  // set of nodes that received hard evidence has not been changed, only
914  // their instantiations can have been changed. So, if there is an entry
915  // for node in __constants, there will still be such an entry after
916  // performing the new projections. Idem for __hard_ev_projected_CPTs
917  for (const auto node : nodes_with_projected_CPTs_changed) {
918  // perform the projection with a combine and project instance
919  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
920  const auto& variables = cpt.variablesSequence();
921  Set< const DiscreteVariable* > hard_variables;
922  __PotentialSet marg_cpt_set{&cpt};
923  for (const auto var : variables) {
924  NodeId xnode = bn.nodeId(*var);
925  if (__hard_ev_nodes.exists(xnode)) {
926  marg_cpt_set.insert(evidence[xnode]);
927  hard_variables.insert(var);
928  }
929  }
930 
931  // perform the combination of those potentials and their projection
932  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
933  combine_and_project(__combination_op, SSNewprojPotential);
934  __PotentialSet new_cpt_list =
935  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
936 
937  // there should be only one potential in new_cpt_list
938  if (new_cpt_list.size() != 1) {
939  // remove the CPT created to avoid memory leaks
940  for (auto pot : new_cpt_list) {
941  if (!marg_cpt_set.contains(pot)) delete pot;
942  }
943  GUM_ERROR(FatalError,
944  "the projection of a potential containing "
945  << "hard evidence is empty!");
946  }
947  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
948  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
949  __hard_ev_projected_CPTs.insert(node, projected_cpt);
950  }
951 
952  // here, the list of potentials stored in the invalidated cliques have
953  // been updated. So, now, we can combine them to produce the Shafer-Shenoy
954  // potential stored into the clique
955  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
957  for (const auto clique : invalidated_cliques) {
958  const auto& potset = __clique_potentials[clique];
959 
960  if (potset.size() > 0) {
961  // here, there will be an entry in __clique_ss_potential
962  // If there is only one element in potset, this element shall be
963  // stored into __clique_ss_potential, else all the elements of potset
964  // shall be combined and their result shall be stored
965  if (potset.size() == 1) {
966  __clique_ss_potential[clique] = *(potset.cbegin());
967  } else {
968  auto joint = fast_combination.combine(potset);
969  __clique_ss_potential[clique] = joint;
970  }
971  }
972  }
973 
974 
975  // update the constants
976  const auto& hard_evidence = this->hardEvidence();
977  for (auto& node_cst : __constants) {
978  const Potential< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
979  const auto& variables = cpt.variablesSequence();
980  Instantiation inst;
981  for (const auto var : variables)
982  inst << *var;
983  for (const auto var : variables) {
984  inst.chgVal(var, hard_evidence[bn.nodeId(*var)]);
985  }
986  node_cst.second = cpt[inst];
987  }
988 
989  // indicate that all changes have been performed
990  __evidence_changes.clear();
991  }
992 
993 
995  template < typename GUM_SCALAR >
997  // get the set of cliques in which we can find the targets and joint_targets
998  NodeSet clique_targets;
999  for (const auto node : this->targets()) {
1000  try {
1001  clique_targets.insert(__node_to_clique[node]);
1002  } catch (Exception&) {}
1003  }
1004  for (const auto& set : this->jointTargets()) {
1005  try {
1006  clique_targets.insert(__joint_target_to_clique[set]);
1007  } catch (Exception&) {}
1008  }
1009 
1010  // put in a vector these cliques and their size
1011  std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
1012  const auto& bn = this->BN();
1013  std::size_t i = 0;
1014  for (const auto clique_id : clique_targets) {
1015  const auto& clique = __JT->clique(clique_id);
1016  Size dom_size = 1;
1017  for (const auto node : clique) {
1018  dom_size *= bn.variable(node).domainSize();
1019  }
1020  possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
1021  ++i;
1022  }
1023 
1024  // sort the cliques by increasing domain size
1025  std::sort(possible_roots.begin(),
1026  possible_roots.end(),
1027  [](const std::pair< NodeId, Size >& a,
1028  const std::pair< NodeId, Size >& b) -> bool {
1029  return a.second < b.second;
1030  });
1031 
1032  // pick up the clique with the smallest size in each connected component
1033  NodeProperty< bool > marked = __JT->nodesProperty(false);
1034  std::function< void(NodeId, NodeId) > diffuse_marks =
1035  [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
1036  if (!marked[node]) {
1037  marked[node] = true;
1038  for (const auto neigh : __JT->neighbours(node))
1039  if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1040  }
1041  };
1042  __roots.clear();
1043  for (const auto xclique : possible_roots) {
1044  NodeId clique = xclique.first;
1045  if (!marked[clique]) {
1046  __roots.insert(clique);
1047  diffuse_marks(clique, clique);
1048  }
1049  }
1050  }
1051 
1052 
1053  // performs the collect phase of Lazy Propagation
1054  template < typename GUM_SCALAR >
1056  NodeId from) {
1057  for (const auto other : __JT->neighbours(id)) {
1058  if ((other != from) && !__messages_computed[Arc(other, id)])
1059  __collectMessage(other, id);
1060  }
1061 
1062  if ((id != from) && !__messages_computed[Arc(id, from)]) {
1063  __produceMessage(id, from);
1064  }
1065  }
1066 
1067 
1068  // remove barren variables
1069  template < typename GUM_SCALAR >
1070  Set< const Potential< GUM_SCALAR >* >
1072  __PotentialSet& pot_list, Set< const DiscreteVariable* >& del_vars) {
1073  // remove from del_vars the variables that received some evidence:
1074  // only those that did not received evidence can be barren variables
1075  Set< const DiscreteVariable* > the_del_vars = del_vars;
1076  for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe();
1077  ++iter) {
1078  NodeId id = this->BN().nodeId(**iter);
1079  if (this->hardEvidenceNodes().exists(id)
1080  || this->softEvidenceNodes().exists(id)) {
1081  the_del_vars.erase(iter);
1082  }
1083  }
1084 
1085  // assign to each random variable the set of potentials that contain it
1086  HashTable< const DiscreteVariable*, __PotentialSet > var2pots;
1087  __PotentialSet empty_pot_set;
1088  for (const auto pot : pot_list) {
1089  const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1090  for (const auto var : vars) {
1091  if (the_del_vars.exists(var)) {
1092  if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1093  var2pots[var].insert(pot);
1094  }
1095  }
1096  }
1097 
1098  // each variable with only one potential is a barren variable
1099  // assign to each potential with barren nodes its set of barren variables
1100  HashTable< const Potential< GUM_SCALAR >*, Set< const DiscreteVariable* > >
1101  pot2barren_var;
1102  Set< const DiscreteVariable* > empty_var_set;
1103  for (auto elt : var2pots) {
1104  if (elt.second.size() == 1) { // here we have a barren variable
1105  const Potential< GUM_SCALAR >* pot = *(elt.second.begin());
1106  if (!pot2barren_var.exists(pot)) {
1107  pot2barren_var.insert(pot, empty_var_set);
1108  }
1109  pot2barren_var[pot].insert(elt.first); // insert the barren variable
1110  }
1111  }
1112 
1113  // for each potential with barren variables, marginalize them.
1114  // if the potential has only barren variables, simply remove them from the
1115  // set of potentials, else just project the potential
1116  MultiDimProjection< GUM_SCALAR, Potential > projector(SSNewprojPotential);
1117  __PotentialSet projected_pots;
1118  for (auto elt : pot2barren_var) {
1119  // remove the current potential from pot_list as, anyway, we will change
1120  // it
1121  const Potential< GUM_SCALAR >* pot = elt.first;
1122  pot_list.erase(pot);
1123 
1124  // check whether we need to add a projected new potential or not (i.e.,
1125  // whether there exist non-barren variables or not)
1126  if (pot->variablesSequence().size() != elt.second.size()) {
1127  auto new_pot = projector.project(*pot, elt.second);
1128  pot_list.insert(new_pot);
1129  projected_pots.insert(new_pot);
1130  }
1131  }
1132 
1133  return projected_pots;
1134  }
1135 
1136 
1137  // remove variables del_vars from the list of potentials pot_list
1138  template < typename GUM_SCALAR >
1139  Set< const Potential< GUM_SCALAR >* >
1141  Set< const Potential< GUM_SCALAR >* > pot_list,
1142  Set< const DiscreteVariable* >& del_vars,
1143  Set< const DiscreteVariable* >& kept_vars) {
1144  // remove the potentials corresponding to barren variables if we want
1145  // to exploit barren nodes
1146  __PotentialSet barren_projected_potentials;
1148  barren_projected_potentials = __removeBarrenVariables(pot_list, del_vars);
1149  }
1150 
1151  // create a combine and project operator that will perform the
1152  // marginalization
1153  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential > combine_and_project(
1155  __PotentialSet new_pot_list =
1156  combine_and_project.combineAndProject(pot_list, del_vars);
1157 
1158  // remove all the potentials that were created due to projections of
1159  // barren nodes and that are not part of the new_pot_list: these
1160  // potentials were just temporary potentials
1161  for (auto iter = barren_projected_potentials.beginSafe();
1162  iter != barren_projected_potentials.endSafe();
1163  ++iter) {
1164  if (!new_pot_list.exists(*iter)) delete *iter;
1165  }
1166 
1167  // remove all the potentials that have no dimension
1168  for (auto iter_pot = new_pot_list.beginSafe();
1169  iter_pot != new_pot_list.endSafe();
1170  ++iter_pot) {
1171  if ((*iter_pot)->variablesSequence().size() == 0) {
1172  // as we have already marginalized out variables that received evidence,
1173  // it may be the case that, after combining and projecting, some
1174  // potentials might be empty. In this case, we shall keep their
1175  // constant and remove them from memory
1176  // # TODO: keep the constants!
1177  delete *iter_pot;
1178  new_pot_list.erase(iter_pot);
1179  }
1180  }
1181 
1182  return new_pot_list;
1183  }
1184 
1185 
1186  // creates the message sent by clique from_id to clique to_id
1187  template < typename GUM_SCALAR >
1189  NodeId to_id) {
1190  // get the potentials of the clique.
1191  __PotentialSet pot_list;
1192  if (__clique_ss_potential.exists(from_id))
1193  pot_list.insert(__clique_ss_potential[from_id]);
1194 
1195  // add the messages sent by adjacent nodes to from_id
1196  for (const auto other_id : __JT->neighbours(from_id))
1197  if (other_id != to_id)
1198  pot_list += __separator_potentials[Arc(other_id, from_id)];
1199 
1200  // get the set of variables that need be removed from the potentials
1201  const NodeSet& from_clique = __JT->clique(from_id);
1202  const NodeSet& separator = __JT->separator(from_id, to_id);
1203  Set< const DiscreteVariable* > del_vars(from_clique.size());
1204  Set< const DiscreteVariable* > kept_vars(separator.size());
1205  const auto& bn = this->BN();
1206 
1207  for (const auto node : from_clique) {
1208  if (!separator.contains(node)) {
1209  del_vars.insert(&(bn.variable(node)));
1210  } else {
1211  kept_vars.insert(&(bn.variable(node)));
1212  }
1213  }
1214 
1215  // pot_list now contains all the potentials to multiply and marginalize
1216  // => combine the messages
1217  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1218 
1219  // remove all the potentials that are equal to ones (as probability
1220  // matrix multiplications are tensorial, such potentials are useless)
1221  for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe();
1222  ++iter) {
1223  const auto pot = *iter;
1224  if (pot->variablesSequence().size() == 1) {
1225  bool is_all_ones = true;
1226  for (Instantiation inst(*pot); !inst.end(); ++inst) {
1227  if ((*pot)[inst] < __1_minus_epsilon) {
1228  is_all_ones = false;
1229  break;
1230  }
1231  }
1232  if (is_all_ones) {
1233  if (!pot_list.exists(pot)) delete pot;
1234  new_pot_list.erase(iter);
1235  continue;
1236  }
1237  }
1238  }
1239 
1240  // if there are still potentials in new_pot_list, combine them to
1241  // produce the message on the separator
1242  const Arc arc(from_id, to_id);
1243  if (!new_pot_list.empty()) {
1244  if (new_pot_list.size() == 1) { // there is only one potential
1245  // in new_pot_list, so this corresponds to our message on
1246  // the separator
1247  auto pot = *(new_pot_list.begin());
1248  __separator_potentials[arc] = std::move(new_pot_list);
1249  if (!pot_list.exists(pot)) {
1250  if (!__created_potentials.exists(arc))
1251  __created_potentials.insert(arc, __PotentialSet());
1252  __created_potentials[arc].insert(pot);
1253  }
1254  } else {
1255  // create the message in the separator
1256  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1258  auto joint = fast_combination.combine(new_pot_list);
1259  __separator_potentials[arc].insert(joint);
1260  if (!__created_potentials.exists(arc))
1261  __created_potentials.insert(arc, __PotentialSet());
1262  __created_potentials[arc].insert(joint);
1263 
1264  // remove the temporary messages created in new_pot_list
1265  for (const auto pot : new_pot_list) {
1266  if (!pot_list.exists(pot)) { delete pot; }
1267  }
1268  }
1269  }
1270 
1271  __messages_computed[arc] = true;
1272  }
1273 
1274 
1275  // performs a whole inference
1276  template < typename GUM_SCALAR >
1278  // collect messages for all single targets
1279  for (const auto node : this->targets()) {
1280  // perform only collects in the join tree for nodes that have
1281  // not received hard evidence (those that received hard evidence were
1282  // not included into the join tree for speed-up reasons)
1283  if (__graph.exists(node)) {
1285  }
1286  }
1287 
1288  // collect messages for all set targets
1289  // by parsing __joint_target_to_clique, we ensure that the cliques that
1290  // are referenced belong to the join tree (even if some of the nodes in
1291  // their associated joint_target do not belong to __graph)
1292  for (const auto set : __joint_target_to_clique)
1293  __collectMessage(set.second, set.second);
1294  }
1295 
1296 
1298  template < typename GUM_SCALAR >
1299  Potential< GUM_SCALAR >*
1301  const auto& bn = this->BN();
1302 
1303  // hard evidence do not belong to the join tree
1304  // # TODO: check for sets of inconsistent hard evidence
1305  if (this->hardEvidenceNodes().contains(id)) {
1306  return new Potential< GUM_SCALAR >(*(this->evidence()[id]));
1307  }
1308 
1309  // if we still need to perform some inference task, do it (this should
1310  // already have been done by _makeInference)
1311  NodeId clique_of_id = __node_to_clique[id];
1312  __collectMessage(clique_of_id, clique_of_id);
1313 
1314  // now we just need to create the product of the potentials of the clique
1315  // containing id with the messages received by this clique and
1316  // marginalize out all variables except id
1317  __PotentialSet pot_list;
1318  if (__clique_ss_potential.exists(clique_of_id))
1319  pot_list.insert(__clique_ss_potential[clique_of_id]);
1320 
1321  // add the messages sent by adjacent nodes to targetCliquxse
1322  for (const auto other : __JT->neighbours(clique_of_id))
1323  pot_list += __separator_potentials[Arc(other, clique_of_id)];
1324 
1325  // get the set of variables that need be removed from the potentials
1326  const NodeSet& nodes = __JT->clique(clique_of_id);
1327  Set< const DiscreteVariable* > kept_vars{&(bn.variable(id))};
1328  Set< const DiscreteVariable* > del_vars(nodes.size());
1329  for (const auto node : nodes) {
1330  if (node != id) del_vars.insert(&(bn.variable(node)));
1331  }
1332 
1333  // pot_list now contains all the potentials to multiply and marginalize
1334  // => combine the messages
1335  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1336  Potential< GUM_SCALAR >* joint = nullptr;
1337 
1338  if (new_pot_list.size() == 1) {
1339  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1340  // if pot already existed, create a copy, so that we can put it into
1341  // the __target_posteriors property
1342  if (pot_list.exists(joint)) {
1343  joint = new Potential< GUM_SCALAR >(*joint);
1344  } else {
1345  // remove the joint from new_pot_list so that it will not be
1346  // removed just after the else block
1347  new_pot_list.clear();
1348  }
1349  } else {
1350  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1352  joint = fast_combination.combine(new_pot_list);
1353  }
1354 
1355  // remove the potentials that were created in new_pot_list
1356  for (auto pot : new_pot_list)
1357  if (!pot_list.exists(pot)) delete pot;
1358 
1359  // check that the joint posterior is different from a 0 vector: this would
1360  // indicate that some hard evidence are not compatible (their joint
1361  // probability is equal to 0)
1362  bool nonzero_found = false;
1363  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1364  if ((*joint)[inst]) {
1365  nonzero_found = true;
1366  break;
1367  }
1368  }
1369  if (!nonzero_found) {
1370  // remove joint from memory to avoid memory leaks
1371  delete joint;
1372  GUM_ERROR(IncompatibleEvidence,
1373  "some evidence entered into the Bayes "
1374  "net are incompatible (their joint proba = 0)");
1375  }
1376  return joint;
1377  }
1378 
1379 
1381  template < typename GUM_SCALAR >
1382  const Potential< GUM_SCALAR >&
1384  // check if we have already computed the posterior
1385  if (__target_posteriors.exists(id)) { return *(__target_posteriors[id]); }
1386 
1387  // compute the joint posterior and normalize
1388  auto joint = _unnormalizedJointPosterior(id);
1389  joint->normalize();
1390  __target_posteriors.insert(id, joint);
1391 
1392  return *joint;
1393  }
1394 
1395 
1396  // returns the marginal a posteriori proba of a given node
1397  template < typename GUM_SCALAR >
1398  Potential< GUM_SCALAR >*
1400  const NodeSet& set) {
1401  // hard evidence do not belong to the join tree, so extract the nodes
1402  // from targets that are not hard evidence
1403  NodeSet targets = set, hard_ev_nodes;
1404  for (const auto node : this->hardEvidenceNodes()) {
1405  if (targets.contains(node)) {
1406  targets.erase(node);
1407  hard_ev_nodes.insert(node);
1408  }
1409  }
1410 
1411  // if all the nodes have received hard evidence, then compute the
1412  // joint posterior directly by multiplying the hard evidence potentials
1413  const auto& evidence = this->evidence();
1414  if (targets.empty()) {
1415  __PotentialSet pot_list;
1416  for (const auto node : set) {
1417  pot_list.insert(evidence[node]);
1418  }
1419  if (pot_list.size() == 1) {
1420  auto pot = new Potential< GUM_SCALAR >(**(pot_list.begin()));
1421  return pot;
1422  } else {
1423  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1425  return fast_combination.combine(pot_list);
1426  }
1427  }
1428 
1429 
1430  // if we still need to perform some inference task, do it: so, first,
1431  // determine the clique on which we should perform collect to compute
1432  // the unnormalized joint posterior of a set of nodes containing "set"
1433  NodeId clique_of_set;
1434  try {
1435  clique_of_set = __joint_target_to_clique[set];
1436  } catch (NotFound&) {
1437  // here, the precise set of targets does not belong to the set of targets
1438  // defined by the user. So we will try to find a clique in the junction
1439  // tree that contains "targets":
1440 
1441  // 1/ we should check that all the nodes belong to the join tree
1442  for (const auto node : targets) {
1443  if (!__graph.exists(node)) {
1444  GUM_ERROR(UndefinedElement, node << " is not a target node");
1445  }
1446  }
1447 
1448  // 2/ the clique created by the first eliminated node among target is the
1449  // one we are looking for
1450  const std::vector< NodeId >& JT_elim_order =
1452  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
1453  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
1454  ++i)
1455  elim_order.insert(JT_elim_order[i], (int)i);
1456  NodeId first_eliminated_node = *(targets.begin());
1457  int elim_number = elim_order[first_eliminated_node];
1458  for (const auto node : targets) {
1459  if (elim_order[node] < elim_number) {
1460  elim_number = elim_order[node];
1461  first_eliminated_node = node;
1462  }
1463  }
1464  clique_of_set =
1465  __triangulation->createdJunctionTreeClique(first_eliminated_node);
1466 
1467  // 3/ check that cliquee_of_set contains the all the nodes in the target
1468  const NodeSet& clique_nodes = __JT->clique(clique_of_set);
1469  for (const auto node : targets) {
1470  if (!clique_nodes.contains(node)) {
1471  GUM_ERROR(UndefinedElement, set << " is not a joint target");
1472  }
1473  }
1474 
1475  // add the discovered clique to __joint_target_to_clique
1476  __joint_target_to_clique.insert(set, clique_of_set);
1477  }
1478 
1479  // now perform a collect on the clique
1480  __collectMessage(clique_of_set, clique_of_set);
1481 
1482  // now we just need to create the product of the potentials of the clique
1483  // containing set with the messages received by this clique and
1484  // marginalize out all variables except set
1485  __PotentialSet pot_list;
1486  if (__clique_ss_potential.exists(clique_of_set))
1487  pot_list.insert(__clique_ss_potential[clique_of_set]);
1488 
1489  // add the messages sent by adjacent nodes to targetClique
1490  for (const auto other : __JT->neighbours(clique_of_set))
1491  pot_list += __separator_potentials[Arc(other, clique_of_set)];
1492 
1493  // get the set of variables that need be removed from the potentials
1494  const NodeSet& nodes = __JT->clique(clique_of_set);
1495  Set< const DiscreteVariable* > del_vars(nodes.size());
1496  Set< const DiscreteVariable* > kept_vars(targets.size());
1497  const auto& bn = this->BN();
1498  for (const auto node : nodes) {
1499  if (!targets.contains(node)) {
1500  del_vars.insert(&(bn.variable(node)));
1501  } else {
1502  kept_vars.insert(&(bn.variable(node)));
1503  }
1504  }
1505 
1506  // pot_list now contains all the potentials to multiply and marginalize
1507  // => combine the messages
1508  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1509  Potential< GUM_SCALAR >* joint = nullptr;
1510 
1511  if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
1512  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1513  // if pot already existed, create a copy, so that we can put it into
1514  // the __target_posteriors property
1515  if (pot_list.exists(joint)) {
1516  joint = new Potential< GUM_SCALAR >(*joint);
1517  } else {
1518  // remove the joint from new_pot_list so that it will not be
1519  // removed just after the next else block
1520  new_pot_list.clear();
1521  }
1522  } else {
1523  // combine all the potentials in new_pot_list with all the hard evidence
1524  // of the nodes in set
1525  __PotentialSet new_new_pot_list = new_pot_list;
1526  for (const auto node : hard_ev_nodes) {
1527  new_new_pot_list.insert(evidence[node]);
1528  }
1529  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1531  joint = fast_combination.combine(new_new_pot_list);
1532  }
1533 
1534  // remove the potentials that were created in new_pot_list
1535  for (auto pot : new_pot_list)
1536  if (!pot_list.exists(pot)) delete pot;
1537 
1538  // check that the joint posterior is different from a 0 vector: this would
1539  // indicate that some hard evidence are not compatible
1540  bool nonzero_found = false;
1541  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1542  if ((*joint)[inst]) {
1543  nonzero_found = true;
1544  break;
1545  }
1546  }
1547  if (!nonzero_found) {
1548  // remove joint from memory to avoid memory leaks
1549  delete joint;
1550  GUM_ERROR(IncompatibleEvidence,
1551  "some evidence entered into the Bayes "
1552  "net are incompatible (their joint proba = 0)");
1553  }
1554 
1555  return joint;
1556  }
1557 
1558 
1560  template < typename GUM_SCALAR >
1561  const Potential< GUM_SCALAR >&
1563  // check if we have already computed the posterior
1564  if (__joint_target_posteriors.exists(set)) {
1565  return *(__joint_target_posteriors[set]);
1566  }
1567 
1568  // compute the joint posterior and normalize
1569  auto joint = _unnormalizedJointPosterior(set);
1570  joint->normalize();
1571  __joint_target_posteriors.insert(set, joint);
1572 
1573  return *joint;
1574  }
1575 
1576 
1578  template < typename GUM_SCALAR >
1579  const Potential< GUM_SCALAR >&
1581  const NodeSet& wanted_target, const NodeSet& declared_target) {
1582  // check if we have already computed the posterior of wanted_target
1583  if (__joint_target_posteriors.exists(wanted_target))
1584  return *(__joint_target_posteriors[wanted_target]);
1585 
1586  // here, we will have to compute the posterior of declared_target and
1587  // marginalize out all the variables that do not belong to wanted_target
1588 
1589  // check if we have already computed the posterior of declared_target
1590  if (!__joint_target_posteriors.exists(declared_target)) {
1591  _jointPosterior(declared_target);
1592  }
1593 
1594  // marginalize out all the variables that do not belong to wanted_target
1595  const auto& bn = this->BN();
1596  Set< const DiscreteVariable* > del_vars;
1597  for (const auto node : declared_target)
1598  if (!wanted_target.contains(node)) del_vars.insert(&(bn.variable(node)));
1599  auto pot = new Potential< GUM_SCALAR >(
1600  __joint_target_posteriors[declared_target]->margSumOut(del_vars));
1601 
1602  // save the result into the cache
1603  __joint_target_posteriors.insert(wanted_target, pot);
1604 
1605  return *pot;
1606  }
1607 
1608 
1609  template < typename GUM_SCALAR >
1611  // perform inference in each connected component
1612  this->makeInference();
1613 
1614  // for each connected component, select a variable X and compute the
1615  // joint probability of X and evidence e. Then marginalize-out X to get
1616  // p(e) in this connected component. Finally, multiply all the p(e) that
1617  // we got and the elements in __constants. The result is the probability
1618  // of evidence
1619 
1620  GUM_SCALAR prob_ev = 1;
1621  for (const auto root : __roots) {
1622  // get a node in the clique
1623  NodeId node = *(__JT->clique(root).begin());
1624  Potential< GUM_SCALAR >* tmp = _unnormalizedJointPosterior(node);
1625  GUM_SCALAR sum = 0;
1626  for (Instantiation iter(*tmp); !iter.end(); ++iter)
1627  sum += tmp->get(iter);
1628  prob_ev *= sum;
1629  delete tmp;
1630  }
1631 
1632  for (const auto& projected_cpt : __constants)
1633  prob_ev *= projected_cpt.second;
1634 
1635  return prob_ev;
1636  }
1637 
1638 
1639 } /* namespace gum */
1640 
1641 #endif // DOXYGEN_SHOULD_SKIP_THIS
NodeProperty< const Potential< GUM_SCALAR > * > __clique_ss_potential
the potentials stored into the cliques by Shafer-Shenoy
void _onAllJointTargetsErased() final
fired before a all the joint targets are removed
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
FindBarrenNodesType __barren_nodes_type
the type of barren nodes computation we wish
void __setProjectionFunction(Potential< GUM_SCALAR > *(*proj)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &))
sets the operator for performing the projections
virtual void clear()
removes all the nodes and edges from the graph
Definition: undiGraph_inl.h:40
void _onEvidenceErased(NodeId id, bool isHardEvidence) final
fired before an evidence is removed
virtual void addNodeWithId(const NodeId id)
try to insert a node with the given id
NodeProperty< GUM_SCALAR > __constants
the constants resulting from the projections of CPTs defined over only hard evidence nodes remove th...
unsigned int NodeId
Type for node ids.
Definition: graphElements.h:97
const NodeProperty< const Potential< GUM_SCALAR > * > & evidence() const
returns the set of evidence
JunctionTree * __junctionTree
the junction tree to answer the last inference query
void __collectMessage(NodeId id, NodeId from)
actually perform the collect phase
__PotentialSet __marginalizeOut(__PotentialSet pot_list, Set< const DiscreteVariable * > &del_vars, Set< const DiscreteVariable * > &kept_vars)
removes variables del_vars from a list of potentials and returns the resulting list ...
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
ArcProperty< bool > __messages_computed
indicates whether a message (from one clique to another) has been computed
Set< const Potential< GUM_SCALAR > * > __PotentialSet
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
void __setCombinationFunction(Potential< GUM_SCALAR > *(*comb)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &))
sets the operator for performing the combinations
virtual bool isInferenceReady() const noexceptfinal
returns whether the inference object is in a ready state
const NodeSet & clique(const NodeId idClique) const
returns the set of nodes included into a given clique
d-separation analysis (as described in Koller & Friedman 2009)
const GUM_SCALAR __1_minus_epsilon
for comparisons with 1 - epsilon
Implementation of Shafer-Shenoy&#39;s algorithm for inference in Bayesian Networks.
Potential< GUM_SCALAR > * _unnormalizedJointPosterior(NodeId id) final
returns a fresh potential equal to P(argument,evidence)
void _onAllMarginalTargetsAdded() final
fired after all the nodes of the BN are added as single targets
NodeSet __roots
a clique node used as a root in each connected component of __JT
The BayesBall algorithm (as described by Schachter).
void erase(const Key &k)
Erases an element from the set.
Definition: set_tpl.h:656
GUM_SCALAR evidenceProbability()
returns the probability of evidence
void _makeInference() final
called when the inference has to be performed effectively
gum is the global namespace for all aGrUM entities
Definition: agrum.h:25
bool __isNewJTNeeded() const
check whether a new join tree is really needed for the next inference
const NodeProperty< Idx > & hardEvidence() const
indicate for each node with hard evidence which value it took
HashTable< NodeSet, const Potential< GUM_SCALAR > * > __joint_target_posteriors
the set of set target posteriors computed during the last inference
void _onMarginalTargetAdded(NodeId id) final
fired after a new single target is inserted
void _updateOutdatedBNPotentials() final
prepares inference when the latter is in OutdatedBNPotentials state
UndiGraph __graph
the undigraph extracted from the BN and used to construct the join tree
virtual void eraseNode(const NodeId id)
remove a node and its adjacent edges from the graph
Definition: undiGraph_inl.h:55
virtual void makeInference() final
perform the heavy computations needed to compute the targets&#39; posteriors
ShaferShenoyInference(const IBayesNet< GUM_SCALAR > *BN, FindBarrenNodesType barren_type=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
NodeProperty< EvidenceChangeType > __evidence_changes
indicates which nodes of the BN have evidence that changed since the last inference ...
FindBarrenNodesType
type of algorithm to determine barren nodes
NodeProperty< const Potential< GUM_SCALAR > * > __node_to_soft_evidence
the soft evidence stored in the cliques per their assigned node in the BN
HashTable< NodeId, NodeId > __node_to_clique
for each node of __graph (~ in the Bayes net), associate an ID in the JT
virtual const NodeProperty< Size > & domainSizes() const final
get the domain sizes of the random variables of the BN
NodeProperty< const Potential< GUM_SCALAR > * > __target_posteriors
the set of single posteriors computed during the last inference
NodeProperty< const Potential< GUM_SCALAR > * > __hard_ev_projected_CPTs
the CPTs that were projected due to hard evidence nodes
virtual NodeId createdJunctionTreeClique(const NodeId id)=0
returns the Id of the clique created by the elimination of a given node during the triangulation proc...
void _onJointTargetAdded(const NodeSet &set) final
fired after a new joint target is inserted
void _updateOutdatedBNStructure() final
prepares inference when the latter is in OutdatedBNStructure state
void setTriangulation(const Triangulation &new_triangulation)
use a new triangulation algorithm
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
Definition: cliqueGraph.h:303
const Potential< GUM_SCALAR > & _jointPosterior(const NodeSet &set) final
returns the posterior of a declared target set
void _onAllTargetsErased() final
fired before a all single and joint_targets are removed
bool __is_new_jt_needed
indicates whether a new join tree is needed for the next inference
static INLINE Potential< GUM_SCALAR > * SSNewprojPotential(const Potential< GUM_SCALAR > &t1, const Set< const DiscreteVariable * > &del_vars)
Header files of gum::Instantiation.
void setFindBarrenNodesType(FindBarrenNodesType type)
sets how we determine barren nodes
void _onJointTargetErased(const NodeSet &set) final
fired before a joint target is removed
HashTable< NodeSet, NodeId > __joint_target_to_clique
for each set target, assign a clique in the JT that contains it
void _setOutdatedBNStructureState()
put the inference into an outdated BN structure state
~ShaferShenoyInference() final
destructor
void _onEvidenceAdded(NodeId id, bool isHardEvidence) final
fired after a new evidence is inserted
ArcProperty< __PotentialSet > __created_potentials
the set of potentials created for the last inference messages
Triangulation * __triangulation
the triangulation class creating the junction tree used for inference
void __createNewJT()
create a new junction tree as well as its related data structures
const NodeSet & hardEvidenceNodes() const
returns the set of nodes with hard evidence
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
Definition: cliqueGraph.h:299
Detect barren nodes for inference in Bayesian networks.
void clear()
Removes all the elements in the hash table.
virtual const NodeSet & targets() const noexceptfinal
returns the list of marginal targets
void __diffuseMessageInvalidations(NodeId from, NodeId to, NodeSet &cliques_invalidated)
invalidate all the messages sent from a given clique
An algorithm for converting a join tree into a binary join tree.
__PotentialSet __removeBarrenVariables(__PotentialSet &pot_list, Set< const DiscreteVariable * > &del_vars)
JoinTree * __JT
the join (or junction) tree used to answer the last inference query
const NodeSet & softEvidenceNodes() const
returns the set of nodes with soft evidence
void _onEvidenceChanged(NodeId id, bool hasChangedSoftHard) final
fired after an evidence is changed, in particular when its status (soft/hard) changes ...
void __computeJoinTreeRoots()
compute a root for each connected component of __JT
virtual Triangulation * newFactory() const =0
returns a fresh triangulation of the same type as the current object but with an empty graph ...
Potential< GUM_SCALAR > *(* __projection_op)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &)
the operator for performing the projections
NodeSet __hard_ev_nodes
the hard evidence nodes which were projected in CPTs
bool __use_binary_join_tree
indicates whether we should transform junction trees into binary join trees
void _onMarginalTargetErased(NodeId id) final
fired before a single target is removed
void clear()
Removes all the elements, if any, from the set.
Definition: set_tpl.h:375
virtual const Set< NodeSet > & jointTargets() const noexceptfinal
returns the list of joint targets
NodeProperty< __PotentialSet > __clique_potentials
the list of all potentials stored in the cliques
Potential< GUM_SCALAR > *(* __combination_op)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &)
the operator for performing the combinations
const Potential< GUM_SCALAR > & _posterior(NodeId id) final
returns the posterior of a given variable
virtual const CliqueGraph & junctionTree()=0
returns a compatible junction tree
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
const JoinTree * joinTree()
returns the current join tree used
bool exists(const NodeId id) const
alias for existsNode
virtual const std::vector< NodeId > & eliminationOrder()=0
returns an elimination ordering compatible with the triangulated graph
void _onAllEvidenceErased(bool contains_hard_evidence) final
fired before all the evidence are erased
void __invalidateAllMessages()
invalidate all messages, posteriors and created potentials
virtual const IBayesNet< GUM_SCALAR > & BN() const final
Returns a constant reference over the IBayesNet referenced by this class.
virtual bool isDone() const noexceptfinal
returns whether the inference object is in a done state
void _onAllMarginalTargetsErased() final
fired before a all the single targets are removed
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
void __produceMessage(NodeId from_id, NodeId to_id)
creates the message sent by clique from_id to clique to_id
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66
virtual void setGraph(const UndiGraph *graph, const NodeProperty< Size > *domsizes)=0
initialize the triangulation data structures for a new graph
void _setOutdatedBNPotentialsState()
puts the inference into an OutdatedBNPotentials state if it is not already in an OutdatedBNStructure ...
ArcProperty< __PotentialSet > __separator_potentials
the list of all potentials stored in the separators after inferences
const JunctionTree * junctionTree()
returns the current junction tree