aGrUM  0.14.2
ShaferShenoyInference_tpl.h
Go to the documentation of this file.
1 /***************************************************************************
2  * Copyright (C) 2005 by Christophe GONZALES et Pierre-Henri WUILLEMIN *
3  * {prenom.nom}_at_lip6.fr *
4  * *
5  * This program is free software; you can redistribute it and/or modify *
6  * it under the terms of the GNU General Public License as published by *
7  * the Free Software Foundation; either version 2 of the License, or *
8  * (at your option) any later version. *
9  * *
10  * This program is distributed in the hope that it will be useful, *
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13  * GNU General Public License for more details. *
14  * *
15  * You should have received a copy of the GNU General Public License *
16  * along with this program; if not, write to the *
17  * Free Software Foundation, Inc., *
18  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19  ***************************************************************************/
26 #ifndef DOXYGEN_SHOULD_SKIP_THIS
28 
36 
37 
38 namespace gum {
39  // default constructor
40  template < typename GUM_SCALAR >
42  const IBayesNet< GUM_SCALAR >* BN,
43  FindBarrenNodesType barren_type,
44  bool use_binary_join_tree) :
45  JointTargetedInference< GUM_SCALAR >(BN),
46  EvidenceInference< GUM_SCALAR >(BN),
47  __use_binary_join_tree(use_binary_join_tree) {
48  // sets the barren nodes finding algorithm
49  setFindBarrenNodesType(barren_type);
50 
51  // create a default triangulation (the user can change it afterwards)
52  __triangulation = new DefaultTriangulation;
53 
54  // for debugging purposessetRequiredInference
55  GUM_CONSTRUCTOR(ShaferShenoyInference);
56  }
57 
58 
59  // destructor
60  template < typename GUM_SCALAR >
62  // remove all the potentials created during the last message passing
63  for (const auto& pots : __created_potentials)
64  for (const auto pot : pots.second)
65  delete pot;
66 
67  // remove the potentials created after removing the nodes that received
68  // hard evidence
69  for (const auto& pot : __hard_ev_projected_CPTs)
70  delete pot.second;
71 
72  // remove all the potentials in __clique_ss_potential that do not belong
73  // to __clique_potentials : in this case, those potentials have been
74  // created by combination of the corresponding list of potentials in
75  // __clique_potentials. In other words, the size of this list is strictly
76  // greater than 1.
77  for (auto pot : __clique_ss_potential) {
78  if (__clique_potentials[pot.first].size() > 1) delete pot.second;
79  }
80 
81  // remove all the posteriors computed
82  for (const auto& pot : __target_posteriors)
83  delete pot.second;
84  for (const auto& pot : __joint_target_posteriors)
85  delete pot.second;
86 
87  // remove the junction tree and the triangulation algorithm
88  if (__JT != nullptr) delete __JT;
89  if (__junctionTree != nullptr) delete __junctionTree;
90  delete __triangulation;
91 
92  // for debugging purposes
93  GUM_DESTRUCTOR(ShaferShenoyInference);
94  }
95 
96 
98  template < typename GUM_SCALAR >
100  const Triangulation& new_triangulation) {
101  delete __triangulation;
102  __triangulation = new_triangulation.newFactory();
103  __is_new_jt_needed = true;
105  }
106 
107 
109  template < typename GUM_SCALAR >
111  __createNewJT();
112 
113  return __JT;
114  }
115 
117  template < typename GUM_SCALAR >
119  __createNewJT();
120 
121  return __junctionTree;
122  }
123 
124 
126  template < typename GUM_SCALAR >
128  Potential< GUM_SCALAR >* (*proj)(const Potential< GUM_SCALAR >&,
129  const Set< const DiscreteVariable* >&)) {
130  __projection_op = proj;
131  }
132 
133 
135  template < typename GUM_SCALAR >
137  Potential< GUM_SCALAR >* (*comb)(const Potential< GUM_SCALAR >&,
138  const Potential< GUM_SCALAR >&)) {
139  __combination_op = comb;
140  }
141 
142 
144  template < typename GUM_SCALAR >
146  // remove all the messages computed
147  for (auto& potset : __separator_potentials)
148  potset.second.clear();
149  for (auto& mess_computed : __messages_computed)
150  mess_computed.second = false;
151 
152  // remove all the created potentials
153  for (const auto& potset : __created_potentials)
154  for (const auto pot : potset.second)
155  delete pot;
156 
157  // remove all the posteriors
158  for (const auto& pot : __target_posteriors)
159  delete pot.second;
160  for (const auto& pot : __joint_target_posteriors)
161  delete pot.second;
162 
163  // indicate that new messages need be computed
164  if (this->isInferenceReady() || this->isDone())
166  }
167 
168 
170  template < typename GUM_SCALAR >
172  FindBarrenNodesType type) {
173  if (type != __barren_nodes_type) {
174  // WARNING: if a new type is added here, method __createJT should
175  // certainly be updated as well, in particular its step 2.
176  switch (type) {
179 
180  default:
181  GUM_ERROR(InvalidArgument,
182  "setFindBarrenNodesType for type "
183  << (unsigned int)type << " is not implemented yet");
184  }
185 
186  __barren_nodes_type = type;
187 
188  // potentially, we may need to reconstruct a junction tree
190  }
191  }
192 
193 
195  template < typename GUM_SCALAR >
196  INLINE void
198  bool isHardEvidence) {
199  // if we have a new hard evidence, this modifies the undigraph over which
200  // the join tree is created. This is also the case if id is not a node of
201  // of the undigraph
202  if (isHardEvidence || !__graph.exists(id))
203  __is_new_jt_needed = true;
204  else {
205  try {
206  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ADDED);
207  } catch (DuplicateElement&) {
208  // here, the evidence change already existed. This necessarily means
209  // that the current saved change is an EVIDENCE_ERASED. So if we
210  // erased the evidence and added some again, this corresponds to an
211  // EVIDENCE_MODIFIED
212  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
213  }
214  }
215  }
216 
217 
219  template < typename GUM_SCALAR >
220  INLINE void
222  bool isHardEvidence) {
223  // if we delete a hard evidence, this modifies the undigraph over which
224  // the join tree is created.
225  if (isHardEvidence)
226  __is_new_jt_needed = true;
227  else {
228  try {
229  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ERASED);
230  } catch (DuplicateElement&) {
231  // here, the evidence change already existed and it is necessarily an
232  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
233  // been added and is now erased, this is similar to not having created
234  // it. If the evidence was only modified, it already existed in the
235  // last inference and we should now indicate that it has been removed.
236  if (__evidence_changes[id] == EvidenceChangeType::EVIDENCE_ADDED)
237  __evidence_changes.erase(id);
238  else
239  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_ERASED;
240  }
241  }
242  }
243 
244 
246  template < typename GUM_SCALAR >
248  bool has_hard_evidence) {
249  if (has_hard_evidence || !this->hardEvidenceNodes().empty())
250  __is_new_jt_needed = true;
251  else {
252  for (const auto node : this->softEvidenceNodes()) {
253  try {
254  __evidence_changes.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
255  } catch (DuplicateElement&) {
256  // here, the evidence change already existed and it is necessarily an
257  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
258  // been added and is now erased, this is similar to not having created
259  // it. If the evidence was only modified, it already existed in the
260  // last inference and we should now indicate that it has been removed.
261  if (__evidence_changes[node] == EvidenceChangeType::EVIDENCE_ADDED)
262  __evidence_changes.erase(node);
263  else
264  __evidence_changes[node] = EvidenceChangeType::EVIDENCE_ERASED;
265  }
266  }
267  }
268  }
269 
270 
272  template < typename GUM_SCALAR >
274  const NodeId id, bool hasChangedSoftHard) {
275  if (hasChangedSoftHard)
276  __is_new_jt_needed = true;
277  else {
278  try {
279  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_MODIFIED);
280  } catch (DuplicateElement&) {
281  // here, the evidence change already existed and it is necessarily an
282  // EVIDENCE_ADDED. So we should keep this state to indicate that this
283  // evidence is new w.r.t. the last inference
284  }
285  }
286  }
287 
288 
290  template < typename GUM_SCALAR >
291  INLINE void
293  }
294 
295 
297  template < typename GUM_SCALAR >
299  const NodeId id) {}
300 
301 
303  template < typename GUM_SCALAR >
304  INLINE void
306  }
307 
308 
310  template < typename GUM_SCALAR >
312  const NodeSet& set) {}
313 
314 
316  template < typename GUM_SCALAR >
318 
319 
321  template < typename GUM_SCALAR >
323 
325  template < typename GUM_SCALAR >
327  const IBayesNet< GUM_SCALAR >* bn) {}
328 
330  template < typename GUM_SCALAR >
332 
333 
335  template < typename GUM_SCALAR >
337 
338 
339  // check whether a new junction tree is really needed for the next inference
340  template < typename GUM_SCALAR >
342  // if we do not have a JT or if __new_jt_needed is set to true, then
343  // we know that we need to create a new join tree
344  if ((__JT == nullptr) || __is_new_jt_needed) return true;
345 
346  // if some some targets do not belong to the join tree and, consequently,
347  // to the undigraph that was used to construct the join tree, then we need
348  // to create a new JT. This situation may occur if we constructed the
349  // join tree after pruning irrelevant/barren nodes from the BN)
350  // however, note that the nodes that received hard evidence do not belong to
351  // the graph and, therefore, should not be taken into account
352  const auto& hard_ev_nodes = this->hardEvidenceNodes();
353  for (const auto node : this->targets()) {
354  if (!__graph.exists(node) && !hard_ev_nodes.exists(node)) return true;
355  }
356  for (const auto& nodes : this->jointTargets()) {
357  // here, we need to check that at least one clique contains all the nodes.
358  bool containing_clique_found = false;
359  for (const auto node : nodes) {
360  bool found = true;
361  const NodeSet& clique = __JT->clique(__node_to_clique[node]);
362  for (const auto xnode : nodes) {
363  if (!clique.contains(xnode) && !hard_ev_nodes.exists(xnode)) {
364  found = false;
365  break;
366  }
367  }
368  if (found) {
369  containing_clique_found = true;
370  break;
371  }
372  }
373 
374  if (!containing_clique_found) return true;
375  }
376 
377  // if some new evidence have been added on nodes that do not belong
378  // to __graph, then we potentially have to reconstruct the join tree
379  for (const auto& change : __evidence_changes) {
380  if ((change.second == EvidenceChangeType::EVIDENCE_ADDED)
381  && !__graph.exists(change.first))
382  return true;
383  }
384 
385  // here, the current JT is exactly what we need for the next inference
386  return false;
387  }
388 
389 
391  template < typename GUM_SCALAR >
393  // to create the JT, we first create the moral graph of the BN in the
394  // following way in order to take into account the barren nodes and the
395  // nodes that received evidence:
396  // 1/ we create an undirected graph containing only the nodes and no edge
397  // 2/ if we take into account barren nodes, remove them from the graph
398  // 3/ add edges so that each node and its parents in the BN form a clique
399  // 4/ add edges so that set targets are cliques of the moral graph
400  // 5/ remove the nodes that received hard evidence (by step 3/, their
401  // parents are linked by edges, which is necessary for inference)
402  //
403  // At the end of step 5/, we have our moral graph and we can triangulate it
404  // to get the new junction tree
405 
406  // 1/ create an undirected graph containing only the nodes and no edge
407  const auto& bn = this->BN();
408  __graph.clear();
409  for (auto node : bn.dag())
410  __graph.addNodeWithId(node);
411 
412  // 2/ if we wish to exploit barren nodes, we shall remove them from the
413  // BN to do so: we identify all the nodes that are not targets and have
414  // received no evidence and such that their descendants are neither
415  // targets nor evidence nodes. Such nodes can be safely discarded from
416  // the BN without altering the inference output
418  // identify the barren nodes
419  NodeSet target_nodes = this->targets();
420  for (const auto& nodeset : this->jointTargets()) {
421  target_nodes += nodeset;
422  }
423 
424  // check that all the nodes are not targets, otherwise, there is no
425  // barren node
426  if (target_nodes.size() != bn.size()) {
427  BarrenNodesFinder finder(&(bn.dag()));
428  finder.setTargets(&target_nodes);
429 
430  NodeSet evidence_nodes;
431  for (const auto& pair : this->evidence()) {
432  evidence_nodes.insert(pair.first);
433  }
434  finder.setEvidence(&evidence_nodes);
435 
436  NodeSet barren_nodes = finder.barrenNodes();
437 
438  // remove the barren nodes from the moral graph
439  for (const auto node : barren_nodes) {
440  __graph.eraseNode(node);
441  }
442  }
443  }
444 
445  // 3/ add edges so that each node and its parents in the BN form a clique
446  for (const auto node : __graph) {
447  const NodeSet& parents = bn.parents(node);
448  for (auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
449  __graph.addEdge(*iter1, node);
450  auto iter2 = iter1;
451  for (++iter2; iter2 != parents.cend(); ++iter2) {
452  __graph.addEdge(*iter1, *iter2);
453  }
454  }
455  }
456 
457  // 4/ if there exist some joint targets, we shall add new edges into the
458  // moral graph in order to ensure that there exists a clique containing
459  // each joint
460  for (const auto& nodeset : this->jointTargets()) {
461  for (auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
462  auto iter2 = iter1;
463  for (++iter2; iter2 != nodeset.cend(); ++iter2) {
464  __graph.addEdge(*iter1, *iter2);
465  }
466  }
467  }
468 
469  // 5/ remove all the nodes that received hard evidence
471  for (const auto node : __hard_ev_nodes) {
472  __graph.eraseNode(node);
473  }
474 
475 
476  // now, we can compute the new junction tree. To speed-up computations
477  // (essentially, those of a distribution phase), we construct from this
478  // junction tree a binary join tree
479  if (__JT != nullptr) delete __JT;
480  if (__junctionTree != nullptr) delete __junctionTree;
481 
482  __triangulation->setGraph(&__graph, &(this->domainSizes()));
483  const JunctionTree& triang_jt = __triangulation->junctionTree();
485  BinaryJoinTreeConverterDefault bjt_converter;
486  NodeSet emptyset;
487  __JT = new CliqueGraph(
488  bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
489  } else {
490  __JT = new CliqueGraph(triang_jt);
491  }
492  __junctionTree = new CliqueGraph(triang_jt);
493 
494 
495  // indicate, for each node of the moral graph a clique in __JT that can
496  // contain its conditional probability table
498  const std::vector< NodeId >& JT_elim_order =
500  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
501  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
502  ++i)
503  elim_order.insert(JT_elim_order[i], (int)i);
504  const DAG& dag = bn.dag();
505  for (const auto node : __graph) {
506  // get the variables in the potential of node (and its parents)
507  NodeId first_eliminated_node = node;
508  int elim_number = elim_order[first_eliminated_node];
509 
510  for (const auto parent : dag.parents(node)) {
511  if (__graph.existsNode(parent) && (elim_order[parent] < elim_number)) {
512  elim_number = elim_order[parent];
513  first_eliminated_node = parent;
514  }
515  }
516 
517  // first_eliminated_node contains the first var (node or one of its
518  // parents) eliminated => the clique created during its elimination
519  // contains node and all of its parents => it can contain the potential
520  // assigned to the node in the BN
522  node, __triangulation->createdJunctionTreeClique(first_eliminated_node));
523  }
524 
525  // do the same for the nodes that received evidence. Here, we only store
526  // the nodes for which at least one parent belongs to __graph (otherwise
527  // their CPT is just a constant real number).
528  for (const auto node : __hard_ev_nodes) {
529  // get the set of parents of the node that belong to __graph
530  NodeSet pars(dag.parents(node).size());
531  for (const auto par : dag.parents(node))
532  if (__graph.exists(par)) pars.insert(par);
533 
534  if (!pars.empty()) {
535  NodeId first_eliminated_node = *(pars.begin());
536  int elim_number = elim_order[first_eliminated_node];
537 
538  for (const auto parent : pars) {
539  if (elim_order[parent] < elim_number) {
540  elim_number = elim_order[parent];
541  first_eliminated_node = parent;
542  }
543  }
544 
545  // first_eliminated_node contains the first var (node or one of its
546  // parents) eliminated => the clique created during its elimination
547  // contains node and all of its parents => it can contain the potential
548  // assigned to the node in the BN
550  node,
551  __triangulation->createdJunctionTreeClique(first_eliminated_node));
552  }
553  }
554 
555  // indicate for each joint_target a clique that contains it
556  __joint_target_to_clique.clear();
557  for (const auto& set : this->jointTargets()) {
558  // remove from set all the nodes that received hard evidence (since they
559  // do not belong to the join tree)
560  NodeSet nodeset = set;
561  for (const auto node : __hard_ev_nodes)
562  if (nodeset.contains(node)) nodeset.erase(node);
563 
564  if (!nodeset.empty()) {
565  // the clique we are looking for is the one that was created when
566  // the first element of nodeset was eliminated
567  NodeId first_eliminated_node = *(nodeset.begin());
568  int elim_number = elim_order[first_eliminated_node];
569  for (const auto node : nodeset) {
570  if (elim_order[node] < elim_number) {
571  elim_number = elim_order[node];
572  first_eliminated_node = node;
573  }
574  }
575 
577  set, __triangulation->createdJunctionTreeClique(first_eliminated_node));
578  }
579  }
580 
581 
582  // compute the roots of __JT's connected components
584 
585  // remove all the Shafer-Shenoy potentials stored into the cliques
586  for (const auto& xpot : __clique_ss_potential) {
587  if (__clique_potentials[xpot.first].size() > 1) delete xpot.second;
588  }
589 
590  // create empty potential lists into the cliques of the joint tree as well
591  // as empty lists of evidence
592  __PotentialSet empty_set;
593  __clique_potentials.clear();
594  __node_to_soft_evidence.clear();
595  for (const auto node : *__JT) {
596  __clique_potentials.insert(node, empty_set);
597  }
598 
599  // remove all the potentials created during the last inference
600  for (auto& potlist : __created_potentials)
601  for (auto pot : potlist.second)
602  delete pot;
603  __created_potentials.clear();
604 
605  // remove all the potentials created to take into account hard evidence
606  // during the last inference
607  for (auto pot_pair : __hard_ev_projected_CPTs)
608  delete pot_pair.second;
609  __hard_ev_projected_CPTs.clear();
610 
611  // remove all the constants created due to projections of CPTs that were
612  // defined over only hard evidence nodes
613  __constants.clear();
614 
615  // create empty lists of potentials for the messages and indicate that no
616  // message has been computed yet
617  __separator_potentials.clear();
618  __messages_computed.clear();
619  for (const auto& edge : __JT->edges()) {
620  const Arc arc1(edge.first(), edge.second());
621  __separator_potentials.insert(arc1, empty_set);
622  __messages_computed.insert(arc1, false);
623  const Arc arc2(Arc(edge.second(), edge.first()));
624  __separator_potentials.insert(arc2, empty_set);
625  __messages_computed.insert(arc2, false);
626  }
627 
628  // remove all the posteriors computed so far
629  for (const auto& pot : __target_posteriors)
630  delete pot.second;
631  __target_posteriors.clear();
632  for (const auto& pot : __joint_target_posteriors)
633  delete pot.second;
634  __joint_target_posteriors.clear();
635 
636 
637  // put all the CPT's of the Bayes net nodes into the cliques
638  // here, beware: all the potentials that are defined over some nodes
639  // including hard evidence must be projected so that these nodes are
640  // removed from the potential
641  const auto& evidence = this->evidence();
642  const auto& hard_evidence = this->hardEvidence();
643  for (const auto node : dag) {
644  if (__graph.exists(node) || __hard_ev_nodes.contains(node)) {
645  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
646 
647  // get the list of nodes with hard evidence in cpt
648  NodeSet hard_nodes;
649  const auto& variables = cpt.variablesSequence();
650  for (const auto var : variables) {
651  NodeId xnode = bn.nodeId(*var);
652  if (__hard_ev_nodes.contains(xnode)) hard_nodes.insert(xnode);
653  }
654 
655  // if hard_nodes contains hard evidence nodes, perform a projection
656  // and insert the result into the appropriate clique, else insert
657  // directly cpt into the clique
658  if (hard_nodes.empty()) {
659  __clique_potentials[__node_to_clique[node]].insert(&cpt);
660  } else {
661  // marginalize out the hard evidence nodes: if the cpt is defined
662  // only over nodes that received hard evidence, do not consider it
663  // as a potential anymore but as a constant
664  if (hard_nodes.size() == variables.size()) {
665  Instantiation inst;
666  const auto& vars = cpt.variablesSequence();
667  for (auto var : vars)
668  inst << *var;
669  for (Size i = 0; i < hard_nodes.size(); ++i) {
670  inst.chgVal(variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
671  }
672  __constants.insert(node, cpt[inst]);
673  } else {
674  // perform the projection with a combine and project instance
675  Set< const DiscreteVariable* > hard_variables;
676  __PotentialSet marg_cpt_set{&cpt};
677  for (const auto xnode : hard_nodes) {
678  marg_cpt_set.insert(evidence[xnode]);
679  hard_variables.insert(&(bn.variable(xnode)));
680  }
681 
682  // perform the combination of those potentials and their projection
683  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
684  combine_and_project(__combination_op, SSNewprojPotential);
685  __PotentialSet new_cpt_list =
686  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
687 
688  // there should be only one potential in new_cpt_list
689  if (new_cpt_list.size() != 1) {
690  // remove the CPT created to avoid memory leaks
691  for (auto pot : new_cpt_list) {
692  if (!marg_cpt_set.contains(pot)) delete pot;
693  }
694  GUM_ERROR(FatalError,
695  "the projection of a potential containing "
696  << "hard evidence is empty!");
697  }
698  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
699  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
700  __hard_ev_projected_CPTs.insert(node, projected_cpt);
701  }
702  }
703  }
704  }
705 
706  // we shall now add all the potentials of the soft evidence
707  for (const auto node : this->softEvidenceNodes()) {
708  __node_to_soft_evidence.insert(node, evidence[node]);
709  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
710  }
711 
712  // now, in __clique_potentials, for each clique, we have the list of
713  // potentials that must be comined in order to produce the Shafer-Shenoy's
714  // potential stored into the clique. So, perform this combination and
715  // store the result in __clique_ss_potential
716  __clique_ss_potential.clear();
717  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
719  for (const auto& xpotset : __clique_potentials) {
720  const auto& potset = xpotset.second;
721  if (potset.size() > 0) {
722  // here, there will be an entry in __clique_ss_potential
723  // If there is only one element in potset, this element shall be
724  // stored into __clique_ss_potential, else all the elements of potset
725  // shall be combined and their result shall be stored
726  if (potset.size() == 1) {
727  __clique_ss_potential.insert(xpotset.first, *(potset.cbegin()));
728  } else {
729  auto joint = fast_combination.combine(potset);
730  __clique_ss_potential.insert(xpotset.first, joint);
731  }
732  }
733  }
734 
735  // indicate that the data structures are up to date.
736  __evidence_changes.clear();
737  __is_new_jt_needed = false;
738  }
739 
740 
742  template < typename GUM_SCALAR >
744  // check if a new JT is really needed. If so, create it
745  if (__isNewJTNeeded()) {
746  __createNewJT();
747  } else {
748  // here, we can answer the next queries without reconstructing all the
749  // junction tree. All we need to do is to indicate that we should
750  // update the potentials and messages for these queries
752  }
753  }
754 
755 
757  template < typename GUM_SCALAR >
759  NodeId from_id, NodeId to_id, NodeSet& invalidated_cliques) {
760  // invalidate the current clique
761  invalidated_cliques.insert(to_id);
762 
763  // invalidate the current arc
764  const Arc arc(from_id, to_id);
765  bool& message_computed = __messages_computed[arc];
766  if (message_computed) {
767  message_computed = false;
768  __separator_potentials[arc].clear();
769  if (__created_potentials.exists(arc)) {
770  auto& arc_created_potentials = __created_potentials[arc];
771  for (auto pot : arc_created_potentials)
772  delete pot;
773  arc_created_potentials.clear();
774  }
775 
776  // go on with the diffusion
777  for (const auto node_id : __JT->neighbours(to_id)) {
778  if (node_id != from_id)
779  __diffuseMessageInvalidations(to_id, node_id, invalidated_cliques);
780  }
781  }
782  }
783 
784 
787  template < typename GUM_SCALAR >
789  // for each clique, indicate whether the potential stored into
790  // __clique_ss_potentials[clique] is the result of a combination. In this
791  // case, it has been allocated by the combination and will need to be
792  // deallocated if its clique has been invalidated
793  NodeProperty< bool > ss_potential_to_deallocate(__clique_potentials.size());
794  for (auto pot_iter = __clique_potentials.cbegin();
795  pot_iter != __clique_potentials.cend();
796  ++pot_iter) {
797  ss_potential_to_deallocate.insert(pot_iter.key(),
798  (pot_iter.val().size() > 1));
799  }
800 
801  // compute the set of CPTs that were projected due to hard evidence and
802  // whose hard evidence have changed, so that they need a new projection.
803  // By the way, remove these CPTs since they are no more needed
804  // Here only the values of the hard evidence can have changed (else a
805  // fully new join tree would have been computed).
806  // Note also that we know that the CPTs still contain some variable(s) after
807  // the projection (else they should be constants)
808  NodeSet hard_nodes_changed(__hard_ev_nodes.size());
809  for (const auto node : __hard_ev_nodes)
810  if (__evidence_changes.exists(node)) hard_nodes_changed.insert(node);
811 
812  NodeSet nodes_with_projected_CPTs_changed;
813  const auto& bn = this->BN();
814  for (auto pot_iter = __hard_ev_projected_CPTs.beginSafe();
815  pot_iter != __hard_ev_projected_CPTs.endSafe();
816  ++pot_iter) {
817  for (const auto var : bn.cpt(pot_iter.key()).variablesSequence()) {
818  if (hard_nodes_changed.contains(bn.nodeId(*var))) {
819  nodes_with_projected_CPTs_changed.insert(pot_iter.key());
820  delete pot_iter.val();
821  __clique_potentials[__node_to_clique[pot_iter.key()]].erase(
822  pot_iter.val());
823  __hard_ev_projected_CPTs.erase(pot_iter);
824  break;
825  }
826  }
827  }
828 
829 
830  // invalidate all the messages that are no more correct: start from each of
831  // the nodes whose soft evidence has changed and perform a diffusion from
832  // the clique into which the soft evidence has been entered, indicating that
833  // the messages spreading from this clique are now invalid. At the same time,
834  // if there were potentials created on the arcs over which the messages were
835  // sent, remove them from memory. For all the cliques that received some
836  // projected CPT that should now be changed, do the same.
837  NodeSet invalidated_cliques(__JT->size());
838  for (const auto& pair : __evidence_changes) {
839  if (__node_to_clique.exists(pair.first)) {
840  const auto clique = __node_to_clique[pair.first];
841  invalidated_cliques.insert(clique);
842  for (const auto neighbor : __JT->neighbours(clique)) {
843  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
844  }
845  }
846  }
847 
848  // now, add to the set of invalidated cliques those that contain projected
849  // CPTs that were changed.
850  for (auto node : nodes_with_projected_CPTs_changed) {
851  const auto clique = __node_to_clique[node];
852  invalidated_cliques.insert(clique);
853  for (const auto neighbor : __JT->neighbours(clique)) {
854  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
855  }
856  }
857 
858  // now that we know the cliques whose set of potentials have been changed,
859  // we can discard their corresponding Shafer-Shenoy potential
860  for (const auto clique : invalidated_cliques) {
861  if (__clique_ss_potential.exists(clique)
862  && ss_potential_to_deallocate[clique]) {
863  delete __clique_ss_potential[clique];
864  }
865  }
866 
867 
868  // now we shall remove all the posteriors that belong to the
869  // invalidated cliques. First, cope only with the nodes that did not
870  // received hard evidence since the other nodes do not belong to the
871  // join tree
872  for (auto iter = __target_posteriors.beginSafe();
873  iter != __target_posteriors.endSafe();
874  ++iter) {
875  if (__graph.exists(iter.key())
876  && (invalidated_cliques.exists(__node_to_clique[iter.key()]))) {
877  delete iter.val();
878  __target_posteriors.erase(iter);
879  }
880  }
881 
882  // now cope with the nodes that received hard evidence
883  for (auto iter = __target_posteriors.beginSafe();
884  iter != __target_posteriors.endSafe();
885  ++iter) {
886  if (hard_nodes_changed.contains(iter.key())) {
887  delete iter.val();
888  __target_posteriors.erase(iter);
889  }
890  }
891 
892  // finally, cope with joint targets
893  for (auto iter = __joint_target_posteriors.beginSafe();
894  iter != __joint_target_posteriors.endSafe();
895  ++iter) {
896  if (invalidated_cliques.exists(__joint_target_to_clique[iter.key()])) {
897  delete iter.val();
898  __joint_target_posteriors.erase(iter);
899  }
900  }
901 
902 
903  // remove all the evidence that were entered into __node_to_soft_evidence
904  // and __clique_potentials and add the new soft ones
905  for (auto& pot_pair : __node_to_soft_evidence) {
906  __clique_potentials[__node_to_clique[pot_pair.first]].erase(pot_pair.second);
907  }
908  __node_to_soft_evidence.clear();
909 
910  const auto& evidence = this->evidence();
911  for (const auto node : this->softEvidenceNodes()) {
912  __node_to_soft_evidence.insert(node, evidence[node]);
913  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
914  }
915 
916 
917  // Now add the projections of the CPTs due to newly changed hard evidence:
918  // if we are performing _updateOutdatedBNPotentials, this means that the
919  // set of nodes that received hard evidence has not been changed, only
920  // their instantiations can have been changed. So, if there is an entry
921  // for node in __constants, there will still be such an entry after
922  // performing the new projections. Idem for __hard_ev_projected_CPTs
923  for (const auto node : nodes_with_projected_CPTs_changed) {
924  // perform the projection with a combine and project instance
925  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
926  const auto& variables = cpt.variablesSequence();
927  Set< const DiscreteVariable* > hard_variables;
928  __PotentialSet marg_cpt_set{&cpt};
929  for (const auto var : variables) {
930  NodeId xnode = bn.nodeId(*var);
931  if (__hard_ev_nodes.exists(xnode)) {
932  marg_cpt_set.insert(evidence[xnode]);
933  hard_variables.insert(var);
934  }
935  }
936 
937  // perform the combination of those potentials and their projection
938  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
939  combine_and_project(__combination_op, SSNewprojPotential);
940  __PotentialSet new_cpt_list =
941  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
942 
943  // there should be only one potential in new_cpt_list
944  if (new_cpt_list.size() != 1) {
945  // remove the CPT created to avoid memory leaks
946  for (auto pot : new_cpt_list) {
947  if (!marg_cpt_set.contains(pot)) delete pot;
948  }
949  GUM_ERROR(FatalError,
950  "the projection of a potential containing "
951  << "hard evidence is empty!");
952  }
953  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
954  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
955  __hard_ev_projected_CPTs.insert(node, projected_cpt);
956  }
957 
958  // here, the list of potentials stored in the invalidated cliques have
959  // been updated. So, now, we can combine them to produce the Shafer-Shenoy
960  // potential stored into the clique
961  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
963  for (const auto clique : invalidated_cliques) {
964  const auto& potset = __clique_potentials[clique];
965 
966  if (potset.size() > 0) {
967  // here, there will be an entry in __clique_ss_potential
968  // If there is only one element in potset, this element shall be
969  // stored into __clique_ss_potential, else all the elements of potset
970  // shall be combined and their result shall be stored
971  if (potset.size() == 1) {
972  __clique_ss_potential[clique] = *(potset.cbegin());
973  } else {
974  auto joint = fast_combination.combine(potset);
975  __clique_ss_potential[clique] = joint;
976  }
977  }
978  }
979 
980 
981  // update the constants
982  const auto& hard_evidence = this->hardEvidence();
983  for (auto& node_cst : __constants) {
984  const Potential< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
985  const auto& variables = cpt.variablesSequence();
986  Instantiation inst;
987  for (const auto var : variables)
988  inst << *var;
989  for (const auto var : variables) {
990  inst.chgVal(var, hard_evidence[bn.nodeId(*var)]);
991  }
992  node_cst.second = cpt[inst];
993  }
994 
995  // indicate that all changes have been performed
996  __evidence_changes.clear();
997  }
998 
999 
1001  template < typename GUM_SCALAR >
1003  // get the set of cliques in which we can find the targets and joint_targets
1004  NodeSet clique_targets;
1005  for (const auto node : this->targets()) {
1006  try {
1007  clique_targets.insert(__node_to_clique[node]);
1008  } catch (Exception&) {}
1009  }
1010  for (const auto& set : this->jointTargets()) {
1011  try {
1012  clique_targets.insert(__joint_target_to_clique[set]);
1013  } catch (Exception&) {}
1014  }
1015 
1016  // put in a vector these cliques and their size
1017  std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
1018  const auto& bn = this->BN();
1019  std::size_t i = 0;
1020  for (const auto clique_id : clique_targets) {
1021  const auto& clique = __JT->clique(clique_id);
1022  Size dom_size = 1;
1023  for (const auto node : clique) {
1024  dom_size *= bn.variable(node).domainSize();
1025  }
1026  possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
1027  ++i;
1028  }
1029 
1030  // sort the cliques by increasing domain size
1031  std::sort(possible_roots.begin(),
1032  possible_roots.end(),
1033  [](const std::pair< NodeId, Size >& a,
1034  const std::pair< NodeId, Size >& b) -> bool {
1035  return a.second < b.second;
1036  });
1037 
1038  // pick up the clique with the smallest size in each connected component
1039  NodeProperty< bool > marked = __JT->nodesProperty(false);
1040  std::function< void(NodeId, NodeId) > diffuse_marks =
1041  [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
1042  if (!marked[node]) {
1043  marked[node] = true;
1044  for (const auto neigh : __JT->neighbours(node))
1045  if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1046  }
1047  };
1048  __roots.clear();
1049  for (const auto xclique : possible_roots) {
1050  NodeId clique = xclique.first;
1051  if (!marked[clique]) {
1052  __roots.insert(clique);
1053  diffuse_marks(clique, clique);
1054  }
1055  }
1056  }
1057 
1058 
1059  // performs the collect phase of Lazy Propagation
1060  template < typename GUM_SCALAR >
1062  NodeId from) {
1063  for (const auto other : __JT->neighbours(id)) {
1064  if ((other != from) && !__messages_computed[Arc(other, id)])
1065  __collectMessage(other, id);
1066  }
1067 
1068  if ((id != from) && !__messages_computed[Arc(id, from)]) {
1069  __produceMessage(id, from);
1070  }
1071  }
1072 
1073 
1074  // remove barren variables
1075  template < typename GUM_SCALAR >
1076  Set< const Potential< GUM_SCALAR >* >
1078  __PotentialSet& pot_list, Set< const DiscreteVariable* >& del_vars) {
1079  // remove from del_vars the variables that received some evidence:
1080  // only those that did not received evidence can be barren variables
1081  Set< const DiscreteVariable* > the_del_vars = del_vars;
1082  for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe();
1083  ++iter) {
1084  NodeId id = this->BN().nodeId(**iter);
1085  if (this->hardEvidenceNodes().exists(id)
1086  || this->softEvidenceNodes().exists(id)) {
1087  the_del_vars.erase(iter);
1088  }
1089  }
1090 
1091  // assign to each random variable the set of potentials that contain it
1092  HashTable< const DiscreteVariable*, __PotentialSet > var2pots;
1093  __PotentialSet empty_pot_set;
1094  for (const auto pot : pot_list) {
1095  const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1096  for (const auto var : vars) {
1097  if (the_del_vars.exists(var)) {
1098  if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1099  var2pots[var].insert(pot);
1100  }
1101  }
1102  }
1103 
1104  // each variable with only one potential is a barren variable
1105  // assign to each potential with barren nodes its set of barren variables
1106  HashTable< const Potential< GUM_SCALAR >*, Set< const DiscreteVariable* > >
1107  pot2barren_var;
1108  Set< const DiscreteVariable* > empty_var_set;
1109  for (auto elt : var2pots) {
1110  if (elt.second.size() == 1) { // here we have a barren variable
1111  const Potential< GUM_SCALAR >* pot = *(elt.second.begin());
1112  if (!pot2barren_var.exists(pot)) {
1113  pot2barren_var.insert(pot, empty_var_set);
1114  }
1115  pot2barren_var[pot].insert(elt.first); // insert the barren variable
1116  }
1117  }
1118 
1119  // for each potential with barren variables, marginalize them.
1120  // if the potential has only barren variables, simply remove them from the
1121  // set of potentials, else just project the potential
1122  MultiDimProjection< GUM_SCALAR, Potential > projector(SSNewprojPotential);
1123  __PotentialSet projected_pots;
1124  for (auto elt : pot2barren_var) {
1125  // remove the current potential from pot_list as, anyway, we will change
1126  // it
1127  const Potential< GUM_SCALAR >* pot = elt.first;
1128  pot_list.erase(pot);
1129 
1130  // check whether we need to add a projected new potential or not (i.e.,
1131  // whether there exist non-barren variables or not)
1132  if (pot->variablesSequence().size() != elt.second.size()) {
1133  auto new_pot = projector.project(*pot, elt.second);
1134  pot_list.insert(new_pot);
1135  projected_pots.insert(new_pot);
1136  }
1137  }
1138 
1139  return projected_pots;
1140  }
1141 
1142 
1143  // remove variables del_vars from the list of potentials pot_list
1144  template < typename GUM_SCALAR >
1145  Set< const Potential< GUM_SCALAR >* >
1147  Set< const Potential< GUM_SCALAR >* > pot_list,
1148  Set< const DiscreteVariable* >& del_vars,
1149  Set< const DiscreteVariable* >& kept_vars) {
1150  // remove the potentials corresponding to barren variables if we want
1151  // to exploit barren nodes
1152  __PotentialSet barren_projected_potentials;
1154  barren_projected_potentials = __removeBarrenVariables(pot_list, del_vars);
1155  }
1156 
1157  // create a combine and project operator that will perform the
1158  // marginalization
1159  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential > combine_and_project(
1161  __PotentialSet new_pot_list =
1162  combine_and_project.combineAndProject(pot_list, del_vars);
1163 
1164  // remove all the potentials that were created due to projections of
1165  // barren nodes and that are not part of the new_pot_list: these
1166  // potentials were just temporary potentials
1167  for (auto iter = barren_projected_potentials.beginSafe();
1168  iter != barren_projected_potentials.endSafe();
1169  ++iter) {
1170  if (!new_pot_list.exists(*iter)) delete *iter;
1171  }
1172 
1173  // remove all the potentials that have no dimension
1174  for (auto iter_pot = new_pot_list.beginSafe();
1175  iter_pot != new_pot_list.endSafe();
1176  ++iter_pot) {
1177  if ((*iter_pot)->variablesSequence().size() == 0) {
1178  // as we have already marginalized out variables that received evidence,
1179  // it may be the case that, after combining and projecting, some
1180  // potentials might be empty. In this case, we shall keep their
1181  // constant and remove them from memory
1182  // # TODO: keep the constants!
1183  delete *iter_pot;
1184  new_pot_list.erase(iter_pot);
1185  }
1186  }
1187 
1188  return new_pot_list;
1189  }
1190 
1191 
1192  // creates the message sent by clique from_id to clique to_id
1193  template < typename GUM_SCALAR >
1195  NodeId to_id) {
1196  // get the potentials of the clique.
1197  __PotentialSet pot_list;
1198  if (__clique_ss_potential.exists(from_id))
1199  pot_list.insert(__clique_ss_potential[from_id]);
1200 
1201  // add the messages sent by adjacent nodes to from_id
1202  for (const auto other_id : __JT->neighbours(from_id))
1203  if (other_id != to_id)
1204  pot_list += __separator_potentials[Arc(other_id, from_id)];
1205 
1206  // get the set of variables that need be removed from the potentials
1207  const NodeSet& from_clique = __JT->clique(from_id);
1208  const NodeSet& separator = __JT->separator(from_id, to_id);
1209  Set< const DiscreteVariable* > del_vars(from_clique.size());
1210  Set< const DiscreteVariable* > kept_vars(separator.size());
1211  const auto& bn = this->BN();
1212 
1213  for (const auto node : from_clique) {
1214  if (!separator.contains(node)) {
1215  del_vars.insert(&(bn.variable(node)));
1216  } else {
1217  kept_vars.insert(&(bn.variable(node)));
1218  }
1219  }
1220 
1221  // pot_list now contains all the potentials to multiply and marginalize
1222  // => combine the messages
1223  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1224 
1225  // remove all the potentials that are equal to ones (as probability
1226  // matrix multiplications are tensorial, such potentials are useless)
1227  for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe();
1228  ++iter) {
1229  const auto pot = *iter;
1230  if (pot->variablesSequence().size() == 1) {
1231  bool is_all_ones = true;
1232  for (Instantiation inst(*pot); !inst.end(); ++inst) {
1233  if ((*pot)[inst] < __1_minus_epsilon) {
1234  is_all_ones = false;
1235  break;
1236  }
1237  }
1238  if (is_all_ones) {
1239  if (!pot_list.exists(pot)) delete pot;
1240  new_pot_list.erase(iter);
1241  continue;
1242  }
1243  }
1244  }
1245 
1246  // if there are still potentials in new_pot_list, combine them to
1247  // produce the message on the separator
1248  const Arc arc(from_id, to_id);
1249  if (!new_pot_list.empty()) {
1250  if (new_pot_list.size() == 1) { // there is only one potential
1251  // in new_pot_list, so this corresponds to our message on
1252  // the separator
1253  auto pot = *(new_pot_list.begin());
1254  __separator_potentials[arc] = std::move(new_pot_list);
1255  if (!pot_list.exists(pot)) {
1256  if (!__created_potentials.exists(arc))
1257  __created_potentials.insert(arc, __PotentialSet());
1258  __created_potentials[arc].insert(pot);
1259  }
1260  } else {
1261  // create the message in the separator
1262  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1264  auto joint = fast_combination.combine(new_pot_list);
1265  __separator_potentials[arc].insert(joint);
1266  if (!__created_potentials.exists(arc))
1267  __created_potentials.insert(arc, __PotentialSet());
1268  __created_potentials[arc].insert(joint);
1269 
1270  // remove the temporary messages created in new_pot_list
1271  for (const auto pot : new_pot_list) {
1272  if (!pot_list.exists(pot)) { delete pot; }
1273  }
1274  }
1275  }
1276 
1277  __messages_computed[arc] = true;
1278  }
1279 
1280 
1281  // performs a whole inference
1282  template < typename GUM_SCALAR >
1284  // collect messages for all single targets
1285  for (const auto node : this->targets()) {
1286  // perform only collects in the join tree for nodes that have
1287  // not received hard evidence (those that received hard evidence were
1288  // not included into the join tree for speed-up reasons)
1289  if (__graph.exists(node)) {
1291  }
1292  }
1293 
1294  // collect messages for all set targets
1295  // by parsing __joint_target_to_clique, we ensure that the cliques that
1296  // are referenced belong to the join tree (even if some of the nodes in
1297  // their associated joint_target do not belong to __graph)
1298  for (const auto set : __joint_target_to_clique)
1299  __collectMessage(set.second, set.second);
1300  }
1301 
1302 
1304  template < typename GUM_SCALAR >
1305  Potential< GUM_SCALAR >*
1307  const auto& bn = this->BN();
1308 
1309  // hard evidence do not belong to the join tree
1310  // # TODO: check for sets of inconsistent hard evidence
1311  if (this->hardEvidenceNodes().contains(id)) {
1312  return new Potential< GUM_SCALAR >(*(this->evidence()[id]));
1313  }
1314 
1315  // if we still need to perform some inference task, do it (this should
1316  // already have been done by _makeInference)
1317  NodeId clique_of_id = __node_to_clique[id];
1318  __collectMessage(clique_of_id, clique_of_id);
1319 
1320  // now we just need to create the product of the potentials of the clique
1321  // containing id with the messages received by this clique and
1322  // marginalize out all variables except id
1323  __PotentialSet pot_list;
1324  if (__clique_ss_potential.exists(clique_of_id))
1325  pot_list.insert(__clique_ss_potential[clique_of_id]);
1326 
1327  // add the messages sent by adjacent nodes to targetCliquxse
1328  for (const auto other : __JT->neighbours(clique_of_id))
1329  pot_list += __separator_potentials[Arc(other, clique_of_id)];
1330 
1331  // get the set of variables that need be removed from the potentials
1332  const NodeSet& nodes = __JT->clique(clique_of_id);
1333  Set< const DiscreteVariable* > kept_vars{&(bn.variable(id))};
1334  Set< const DiscreteVariable* > del_vars(nodes.size());
1335  for (const auto node : nodes) {
1336  if (node != id) del_vars.insert(&(bn.variable(node)));
1337  }
1338 
1339  // pot_list now contains all the potentials to multiply and marginalize
1340  // => combine the messages
1341  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1342  Potential< GUM_SCALAR >* joint = nullptr;
1343 
1344  if (new_pot_list.size() == 1) {
1345  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1346  // if pot already existed, create a copy, so that we can put it into
1347  // the __target_posteriors property
1348  if (pot_list.exists(joint)) {
1349  joint = new Potential< GUM_SCALAR >(*joint);
1350  } else {
1351  // remove the joint from new_pot_list so that it will not be
1352  // removed just after the else block
1353  new_pot_list.clear();
1354  }
1355  } else {
1356  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1358  joint = fast_combination.combine(new_pot_list);
1359  }
1360 
1361  // remove the potentials that were created in new_pot_list
1362  for (auto pot : new_pot_list)
1363  if (!pot_list.exists(pot)) delete pot;
1364 
1365  // check that the joint posterior is different from a 0 vector: this would
1366  // indicate that some hard evidence are not compatible (their joint
1367  // probability is equal to 0)
1368  bool nonzero_found = false;
1369  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1370  if ((*joint)[inst]) {
1371  nonzero_found = true;
1372  break;
1373  }
1374  }
1375  if (!nonzero_found) {
1376  // remove joint from memory to avoid memory leaks
1377  delete joint;
1378  GUM_ERROR(IncompatibleEvidence,
1379  "some evidence entered into the Bayes "
1380  "net are incompatible (their joint proba = 0)");
1381  }
1382  return joint;
1383  }
1384 
1385 
1387  template < typename GUM_SCALAR >
1388  const Potential< GUM_SCALAR >&
1390  // check if we have already computed the posterior
1391  if (__target_posteriors.exists(id)) { return *(__target_posteriors[id]); }
1392 
1393  // compute the joint posterior and normalize
1394  auto joint = _unnormalizedJointPosterior(id);
1395  joint->normalize();
1396  __target_posteriors.insert(id, joint);
1397 
1398  return *joint;
1399  }
1400 
1401 
1402  // returns the marginal a posteriori proba of a given node
1403  template < typename GUM_SCALAR >
1404  Potential< GUM_SCALAR >*
1406  const NodeSet& set) {
1407  // hard evidence do not belong to the join tree, so extract the nodes
1408  // from targets that are not hard evidence
1409  NodeSet targets = set, hard_ev_nodes;
1410  for (const auto node : this->hardEvidenceNodes()) {
1411  if (targets.contains(node)) {
1412  targets.erase(node);
1413  hard_ev_nodes.insert(node);
1414  }
1415  }
1416 
1417  // if all the nodes have received hard evidence, then compute the
1418  // joint posterior directly by multiplying the hard evidence potentials
1419  const auto& evidence = this->evidence();
1420  if (targets.empty()) {
1421  __PotentialSet pot_list;
1422  for (const auto node : set) {
1423  pot_list.insert(evidence[node]);
1424  }
1425  if (pot_list.size() == 1) {
1426  auto pot = new Potential< GUM_SCALAR >(**(pot_list.begin()));
1427  return pot;
1428  } else {
1429  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1431  return fast_combination.combine(pot_list);
1432  }
1433  }
1434 
1435 
1436  // if we still need to perform some inference task, do it: so, first,
1437  // determine the clique on which we should perform collect to compute
1438  // the unnormalized joint posterior of a set of nodes containing "set"
1439  NodeId clique_of_set;
1440  try {
1441  clique_of_set = __joint_target_to_clique[set];
1442  } catch (NotFound&) {
1443  // here, the precise set of targets does not belong to the set of targets
1444  // defined by the user. So we will try to find a clique in the junction
1445  // tree that contains "targets":
1446 
1447  // 1/ we should check that all the nodes belong to the join tree
1448  for (const auto node : targets) {
1449  if (!__graph.exists(node)) {
1450  GUM_ERROR(UndefinedElement, node << " is not a target node");
1451  }
1452  }
1453 
1454  // 2/ the clique created by the first eliminated node among target is the
1455  // one we are looking for
1456  const std::vector< NodeId >& JT_elim_order =
1458  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
1459  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
1460  ++i)
1461  elim_order.insert(JT_elim_order[i], (int)i);
1462  NodeId first_eliminated_node = *(targets.begin());
1463  int elim_number = elim_order[first_eliminated_node];
1464  for (const auto node : targets) {
1465  if (elim_order[node] < elim_number) {
1466  elim_number = elim_order[node];
1467  first_eliminated_node = node;
1468  }
1469  }
1470  clique_of_set =
1471  __triangulation->createdJunctionTreeClique(first_eliminated_node);
1472 
1473  // 3/ check that cliquee_of_set contains the all the nodes in the target
1474  const NodeSet& clique_nodes = __JT->clique(clique_of_set);
1475  for (const auto node : targets) {
1476  if (!clique_nodes.contains(node)) {
1477  GUM_ERROR(UndefinedElement, set << " is not a joint target");
1478  }
1479  }
1480 
1481  // add the discovered clique to __joint_target_to_clique
1482  __joint_target_to_clique.insert(set, clique_of_set);
1483  }
1484 
1485  // now perform a collect on the clique
1486  __collectMessage(clique_of_set, clique_of_set);
1487 
1488  // now we just need to create the product of the potentials of the clique
1489  // containing set with the messages received by this clique and
1490  // marginalize out all variables except set
1491  __PotentialSet pot_list;
1492  if (__clique_ss_potential.exists(clique_of_set))
1493  pot_list.insert(__clique_ss_potential[clique_of_set]);
1494 
1495  // add the messages sent by adjacent nodes to targetClique
1496  for (const auto other : __JT->neighbours(clique_of_set))
1497  pot_list += __separator_potentials[Arc(other, clique_of_set)];
1498 
1499  // get the set of variables that need be removed from the potentials
1500  const NodeSet& nodes = __JT->clique(clique_of_set);
1501  Set< const DiscreteVariable* > del_vars(nodes.size());
1502  Set< const DiscreteVariable* > kept_vars(targets.size());
1503  const auto& bn = this->BN();
1504  for (const auto node : nodes) {
1505  if (!targets.contains(node)) {
1506  del_vars.insert(&(bn.variable(node)));
1507  } else {
1508  kept_vars.insert(&(bn.variable(node)));
1509  }
1510  }
1511 
1512  // pot_list now contains all the potentials to multiply and marginalize
1513  // => combine the messages
1514  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1515  Potential< GUM_SCALAR >* joint = nullptr;
1516 
1517  if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
1518  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1519  // if pot already existed, create a copy, so that we can put it into
1520  // the __target_posteriors property
1521  if (pot_list.exists(joint)) {
1522  joint = new Potential< GUM_SCALAR >(*joint);
1523  } else {
1524  // remove the joint from new_pot_list so that it will not be
1525  // removed just after the next else block
1526  new_pot_list.clear();
1527  }
1528  } else {
1529  // combine all the potentials in new_pot_list with all the hard evidence
1530  // of the nodes in set
1531  __PotentialSet new_new_pot_list = new_pot_list;
1532  for (const auto node : hard_ev_nodes) {
1533  new_new_pot_list.insert(evidence[node]);
1534  }
1535  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1537  joint = fast_combination.combine(new_new_pot_list);
1538  }
1539 
1540  // remove the potentials that were created in new_pot_list
1541  for (auto pot : new_pot_list)
1542  if (!pot_list.exists(pot)) delete pot;
1543 
1544  // check that the joint posterior is different from a 0 vector: this would
1545  // indicate that some hard evidence are not compatible
1546  bool nonzero_found = false;
1547  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1548  if ((*joint)[inst]) {
1549  nonzero_found = true;
1550  break;
1551  }
1552  }
1553  if (!nonzero_found) {
1554  // remove joint from memory to avoid memory leaks
1555  delete joint;
1556  GUM_ERROR(IncompatibleEvidence,
1557  "some evidence entered into the Bayes "
1558  "net are incompatible (their joint proba = 0)");
1559  }
1560 
1561  return joint;
1562  }
1563 
1564 
1566  template < typename GUM_SCALAR >
1567  const Potential< GUM_SCALAR >&
1569  // check if we have already computed the posterior
1570  if (__joint_target_posteriors.exists(set)) {
1571  return *(__joint_target_posteriors[set]);
1572  }
1573 
1574  // compute the joint posterior and normalize
1575  auto joint = _unnormalizedJointPosterior(set);
1576  joint->normalize();
1577  __joint_target_posteriors.insert(set, joint);
1578 
1579  return *joint;
1580  }
1581 
1582 
1584  template < typename GUM_SCALAR >
1585  const Potential< GUM_SCALAR >&
1587  const NodeSet& wanted_target, const NodeSet& declared_target) {
1588  // check if we have already computed the posterior of wanted_target
1589  if (__joint_target_posteriors.exists(wanted_target))
1590  return *(__joint_target_posteriors[wanted_target]);
1591 
1592  // here, we will have to compute the posterior of declared_target and
1593  // marginalize out all the variables that do not belong to wanted_target
1594 
1595  // check if we have already computed the posterior of declared_target
1596  if (!__joint_target_posteriors.exists(declared_target)) {
1597  _jointPosterior(declared_target);
1598  }
1599 
1600  // marginalize out all the variables that do not belong to wanted_target
1601  const auto& bn = this->BN();
1602  Set< const DiscreteVariable* > del_vars;
1603  for (const auto node : declared_target)
1604  if (!wanted_target.contains(node)) del_vars.insert(&(bn.variable(node)));
1605  auto pot = new Potential< GUM_SCALAR >(
1606  __joint_target_posteriors[declared_target]->margSumOut(del_vars));
1607 
1608  // save the result into the cache
1609  __joint_target_posteriors.insert(wanted_target, pot);
1610 
1611  return *pot;
1612  }
1613 
1614 
1615  template < typename GUM_SCALAR >
1617  // perform inference in each connected component
1618  this->makeInference();
1619 
1620  // for each connected component, select a variable X and compute the
1621  // joint probability of X and evidence e. Then marginalize-out X to get
1622  // p(e) in this connected component. Finally, multiply all the p(e) that
1623  // we got and the elements in __constants. The result is the probability
1624  // of evidence
1625 
1626  GUM_SCALAR prob_ev = 1;
1627  for (const auto root : __roots) {
1628  // get a node in the clique
1629  NodeId node = *(__JT->clique(root).begin());
1630  Potential< GUM_SCALAR >* tmp = _unnormalizedJointPosterior(node);
1631  GUM_SCALAR sum = 0;
1632  for (Instantiation iter(*tmp); !iter.end(); ++iter)
1633  sum += tmp->get(iter);
1634  prob_ev *= sum;
1635  delete tmp;
1636  }
1637 
1638  for (const auto& projected_cpt : __constants)
1639  prob_ev *= projected_cpt.second;
1640 
1641  return prob_ev;
1642  }
1643 
1644 
1645 } /* namespace gum */
1646 
1647 #endif // DOXYGEN_SHOULD_SKIP_THIS
NodeProperty< const Potential< GUM_SCALAR > *> __clique_ss_potential
the potentials stored into the cliques by Shafer-Shenoy
void _onAllJointTargetsErased() final
fired before a all the joint targets are removed
void _onEvidenceChanged(const NodeId id, bool hasChangedSoftHard) final
fired after an evidence is changed, in particular when its status (soft/hard) changes ...
FindBarrenNodesType __barren_nodes_type
the type of barren nodes computation we wish
void __setProjectionFunction(Potential< GUM_SCALAR > *(*proj)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &))
sets the operator for performing the projections
virtual void clear()
removes all the nodes and edges from the graph
Definition: undiGraph_inl.h:40
virtual void addNodeWithId(const NodeId id)
try to insert a node with the given id
const NodeProperty< const Potential< GUM_SCALAR > *> & evidence() const
returns the set of evidence
NodeProperty< GUM_SCALAR > __constants
the constants resulting from the projections of CPTs defined over only hard evidence nodes remove th...
JunctionTree * __junctionTree
the junction tree to answer the last inference query
void __collectMessage(NodeId id, NodeId from)
actually perform the collect phase
void _onEvidenceErased(const NodeId id, bool isHardEvidence) final
fired before an evidence is removed
__PotentialSet __marginalizeOut(__PotentialSet pot_list, Set< const DiscreteVariable * > &del_vars, Set< const DiscreteVariable * > &kept_vars)
removes variables del_vars from a list of potentials and returns the resulting list ...
void _onMarginalTargetErased(const NodeId id) final
fired before a single target is removed
void _onMarginalTargetAdded(const NodeId id) final
fired after a new single target is inserted
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
ArcProperty< bool > __messages_computed
indicates whether a message (from one clique to another) has been computed
Set< const Potential< GUM_SCALAR > *> __PotentialSet
void __setCombinationFunction(Potential< GUM_SCALAR > *(*comb)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &))
sets the operator for performing the combinations
d-separation analysis (as described in Koller & Friedman 2009)
const GUM_SCALAR __1_minus_epsilon
for comparisons with 1 - epsilon
Implementation of Shafer-Shenoy&#39;s algorithm for inference in Bayesian Networks.
Potential< GUM_SCALAR > * _unnormalizedJointPosterior(NodeId id) final
returns a fresh potential equal to P(argument,evidence)
void _onAllMarginalTargetsAdded() final
fired after all the nodes of the BN are added as single targets
NodeSet __roots
a clique node used as a root in each connected component of __JT
The BayesBall algorithm (as described by Schachter).
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
void erase(const Key &k)
Erases an element from the set.
Definition: set_tpl.h:653
bool exists(const NodeId id) const
alias for existsNode
GUM_SCALAR evidenceProbability()
returns the probability of evidence
void _makeInference() final
called when the inference has to be performed effectively
gum is the global namespace for all aGrUM entities
Definition: agrum.h:25
HashTable< NodeSet, const Potential< GUM_SCALAR > *> __joint_target_posteriors
the set of set target posteriors computed during the last inference
void _updateOutdatedBNPotentials() final
prepares inference when the latter is in OutdatedBNPotentials state
UndiGraph __graph
the undigraph extracted from the BN and used to construct the join tree
virtual void eraseNode(const NodeId id)
remove a node and its adjacent edges from the graph
Definition: undiGraph_inl.h:55
virtual void makeInference() final
perform the heavy computations needed to compute the targets&#39; posteriors
ShaferShenoyInference(const IBayesNet< GUM_SCALAR > *BN, FindBarrenNodesType barren_type=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
NodeProperty< EvidenceChangeType > __evidence_changes
indicates which nodes of the BN have evidence that changed since the last inference ...
FindBarrenNodesType
type of algorithm to determine barren nodes
NodeProperty< const Potential< GUM_SCALAR > *> __node_to_soft_evidence
the soft evidence stored in the cliques per their assigned node in the BN
const NodeSet & softEvidenceNodes() const
returns the set of nodes with soft evidence
HashTable< NodeId, NodeId > __node_to_clique
for each node of __graph (~ in the Bayes net), associate an ID in the JT
virtual const NodeProperty< Size > & domainSizes() const final
get the domain sizes of the random variables of the BN
NodeProperty< const Potential< GUM_SCALAR > *> __target_posteriors
the set of single posteriors computed during the last inference
NodeProperty< const Potential< GUM_SCALAR > *> __hard_ev_projected_CPTs
the CPTs that were projected due to hard evidence nodes
virtual NodeId createdJunctionTreeClique(const NodeId id)=0
returns the Id of the clique created by the elimination of a given node during the triangulation proc...
void _onJointTargetAdded(const NodeSet &set) final
fired after a new joint target is inserted
bool __isNewJTNeeded() const
check whether a new join tree is really needed for the next inference
void _updateOutdatedBNStructure() final
prepares inference when the latter is in OutdatedBNStructure state
const NodeProperty< Idx > & hardEvidence() const
indicate for each node with hard evidence which value it took
virtual bool isDone() const noexcept final
returns whether the inference object is in a done state
void setTriangulation(const Triangulation &new_triangulation)
use a new triangulation algorithm
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
Definition: cliqueGraph.h:303
const Potential< GUM_SCALAR > & _jointPosterior(const NodeSet &set) final
returns the posterior of a declared target set
virtual const Set< NodeSet > & jointTargets() const noexcept final
returns the list of joint targets
void _onAllTargetsErased() final
fired before a all single and joint_targets are removed
virtual void _onBayesNetChanged(const IBayesNet< GUM_SCALAR > *bn) final
fired after a new Bayes net has been assigned to the engine
bool __is_new_jt_needed
indicates whether a new join tree is needed for the next inference
static INLINE Potential< GUM_SCALAR > * SSNewprojPotential(const Potential< GUM_SCALAR > &t1, const Set< const DiscreteVariable * > &del_vars)
Header files of gum::Instantiation.
void setFindBarrenNodesType(FindBarrenNodesType type)
sets how we determine barren nodes
void _onJointTargetErased(const NodeSet &set) final
fired before a joint target is removed
HashTable< NodeSet, NodeId > __joint_target_to_clique
for each set target, assign a clique in the JT that contains it
void _setOutdatedBNStructureState()
put the inference into an outdated BN structure state
~ShaferShenoyInference() final
destructor
const NodeSet & clique(const NodeId idClique) const
returns the set of nodes included into a given clique
ArcProperty< __PotentialSet > __created_potentials
the set of potentials created for the last inference messages
const NodeSet & hardEvidenceNodes() const
returns the set of nodes with hard evidence
Triangulation * __triangulation
the triangulation class creating the junction tree used for inference
void __createNewJT()
create a new junction tree as well as its related data structures
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
Definition: cliqueGraph.h:299
Detect barren nodes for inference in Bayesian networks.
void clear()
Removes all the elements in the hash table.
void __diffuseMessageInvalidations(NodeId from, NodeId to, NodeSet &cliques_invalidated)
invalidate all the messages sent from a given clique
An algorithm for converting a join tree into a binary join tree.
__PotentialSet __removeBarrenVariables(__PotentialSet &pot_list, Set< const DiscreteVariable * > &del_vars)
JoinTree * __JT
the join (or junction) tree used to answer the last inference query
void __computeJoinTreeRoots()
compute a root for each connected component of __JT
virtual bool isInferenceReady() const noexcept final
returns whether the inference object is in a ready state
void _onEvidenceAdded(const NodeId id, bool isHardEvidence) final
fired after a new evidence is inserted
virtual Triangulation * newFactory() const =0
returns a fresh triangulation of the same type as the current object but with an empty graph ...
virtual const NodeSet & targets() const noexcept final
returns the list of marginal targets
NodeSet __hard_ev_nodes
the hard evidence nodes which were projected in CPTs
bool __use_binary_join_tree
indicates whether we should transform junction trees into binary join trees
void clear()
Removes all the elements, if any, from the set.
Definition: set_tpl.h:372
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:45
NodeProperty< __PotentialSet > __clique_potentials
the list of all potentials stored in the cliques
Potential< GUM_SCALAR > *(* __combination_op)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &)
the operator for performing the combinations
const Potential< GUM_SCALAR > & _posterior(NodeId id) final
returns the posterior of a given variable
virtual const CliqueGraph & junctionTree()=0
returns a compatible junction tree
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
const JoinTree * joinTree()
returns the current join tree used
virtual const std::vector< NodeId > & eliminationOrder()=0
returns an elimination ordering compatible with the triangulated graph
void _onAllEvidenceErased(bool contains_hard_evidence) final
fired before all the evidence are erased
void __invalidateAllMessages()
invalidate all messages, posteriors and created potentials
virtual const IBayesNet< GUM_SCALAR > & BN() const final
Returns a constant reference over the IBayesNet referenced by this class.
void _onAllMarginalTargetsErased() final
fired before a all the single targets are removed
Potential< GUM_SCALAR > *(* __projection_op)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &)
the operator for performing the projections
Size NodeId
Type for node ids.
Definition: graphElements.h:97
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:610
void __produceMessage(NodeId from_id, NodeId to_id)
creates the message sent by clique from_id to clique to_id
#define GUM_ERROR(type, msg)
Definition: exceptions.h:52
virtual void setGraph(const UndiGraph *graph, const NodeProperty< Size > *domsizes)=0
initialize the triangulation data structures for a new graph
void _setOutdatedBNPotentialsState()
puts the inference into an OutdatedBNPotentials state if it is not already in an OutdatedBNStructure ...
ArcProperty< __PotentialSet > __separator_potentials
the list of all potentials stored in the separators after inferences
const JunctionTree * junctionTree()
returns the current junction tree