aGrUM  0.16.0
ShaferShenoyInference_tpl.h
Go to the documentation of this file.
1 
29 #ifndef DOXYGEN_SHOULD_SKIP_THIS
31 
39 
40 
41 namespace gum {
42  // default constructor
43  template < typename GUM_SCALAR >
45  const IBayesNet< GUM_SCALAR >* BN,
46  FindBarrenNodesType barren_type,
47  bool use_binary_join_tree) :
48  JointTargetedInference< GUM_SCALAR >(BN),
49  EvidenceInference< GUM_SCALAR >(BN),
50  __use_binary_join_tree(use_binary_join_tree) {
51  // sets the barren nodes finding algorithm
52  setFindBarrenNodesType(barren_type);
53 
54  // create a default triangulation (the user can change it afterwards)
55  __triangulation = new DefaultTriangulation;
56 
57  // for debugging purposessetRequiredInference
58  GUM_CONSTRUCTOR(ShaferShenoyInference);
59  }
60 
61 
62  // destructor
63  template < typename GUM_SCALAR >
65  // remove all the potentials created during the last message passing
66  for (const auto& pots : __created_potentials)
67  for (const auto pot : pots.second)
68  delete pot;
69 
70  // remove the potentials created after removing the nodes that received
71  // hard evidence
72  for (const auto& pot : __hard_ev_projected_CPTs)
73  delete pot.second;
74 
75  // remove all the potentials in __clique_ss_potential that do not belong
76  // to __clique_potentials : in this case, those potentials have been
77  // created by combination of the corresponding list of potentials in
78  // __clique_potentials. In other words, the size of this list is strictly
79  // greater than 1.
80  for (auto pot : __clique_ss_potential) {
81  if (__clique_potentials[pot.first].size() > 1) delete pot.second;
82  }
83 
84  // remove all the posteriors computed
85  for (const auto& pot : __target_posteriors)
86  delete pot.second;
87  for (const auto& pot : __joint_target_posteriors)
88  delete pot.second;
89 
90  // remove the junction tree and the triangulation algorithm
91  if (__JT != nullptr) delete __JT;
92  if (__junctionTree != nullptr) delete __junctionTree;
93  delete __triangulation;
94 
95  // for debugging purposes
96  GUM_DESTRUCTOR(ShaferShenoyInference);
97  }
98 
99 
101  template < typename GUM_SCALAR >
103  const Triangulation& new_triangulation) {
104  delete __triangulation;
105  __triangulation = new_triangulation.newFactory();
106  __is_new_jt_needed = true;
108  }
109 
110 
112  template < typename GUM_SCALAR >
114  __createNewJT();
115 
116  return __JT;
117  }
118 
120  template < typename GUM_SCALAR >
122  __createNewJT();
123 
124  return __junctionTree;
125  }
126 
127 
129  template < typename GUM_SCALAR >
131  Potential< GUM_SCALAR >* (*proj)(const Potential< GUM_SCALAR >&,
132  const Set< const DiscreteVariable* >&)) {
133  __projection_op = proj;
134  }
135 
136 
138  template < typename GUM_SCALAR >
140  Potential< GUM_SCALAR >* (*comb)(const Potential< GUM_SCALAR >&,
141  const Potential< GUM_SCALAR >&)) {
142  __combination_op = comb;
143  }
144 
145 
147  template < typename GUM_SCALAR >
149  // remove all the messages computed
150  for (auto& potset : __separator_potentials)
151  potset.second.clear();
152  for (auto& mess_computed : __messages_computed)
153  mess_computed.second = false;
154 
155  // remove all the created potentials
156  for (const auto& potset : __created_potentials)
157  for (const auto pot : potset.second)
158  delete pot;
159 
160  // remove all the posteriors
161  for (const auto& pot : __target_posteriors)
162  delete pot.second;
163  for (const auto& pot : __joint_target_posteriors)
164  delete pot.second;
165 
166  // indicate that new messages need be computed
167  if (this->isInferenceReady() || this->isDone())
169  }
170 
171 
173  template < typename GUM_SCALAR >
175  FindBarrenNodesType type) {
176  if (type != __barren_nodes_type) {
177  // WARNING: if a new type is added here, method __createJT should
178  // certainly be updated as well, in particular its step 2.
179  switch (type) {
182 
183  default:
184  GUM_ERROR(InvalidArgument,
185  "setFindBarrenNodesType for type "
186  << (unsigned int)type << " is not implemented yet");
187  }
188 
189  __barren_nodes_type = type;
190 
191  // potentially, we may need to reconstruct a junction tree
193  }
194  }
195 
196 
198  template < typename GUM_SCALAR >
199  INLINE void
201  bool isHardEvidence) {
202  // if we have a new hard evidence, this modifies the undigraph over which
203  // the join tree is created. This is also the case if id is not a node of
204  // of the undigraph
205  if (isHardEvidence || !__graph.exists(id))
206  __is_new_jt_needed = true;
207  else {
208  try {
209  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ADDED);
210  } catch (DuplicateElement&) {
211  // here, the evidence change already existed. This necessarily means
212  // that the current saved change is an EVIDENCE_ERASED. So if we
213  // erased the evidence and added some again, this corresponds to an
214  // EVIDENCE_MODIFIED
215  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
216  }
217  }
218  }
219 
220 
222  template < typename GUM_SCALAR >
223  INLINE void
225  bool isHardEvidence) {
226  // if we delete a hard evidence, this modifies the undigraph over which
227  // the join tree is created.
228  if (isHardEvidence)
229  __is_new_jt_needed = true;
230  else {
231  try {
232  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ERASED);
233  } catch (DuplicateElement&) {
234  // here, the evidence change already existed and it is necessarily an
235  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
236  // been added and is now erased, this is similar to not having created
237  // it. If the evidence was only modified, it already existed in the
238  // last inference and we should now indicate that it has been removed.
239  if (__evidence_changes[id] == EvidenceChangeType::EVIDENCE_ADDED)
240  __evidence_changes.erase(id);
241  else
242  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_ERASED;
243  }
244  }
245  }
246 
247 
249  template < typename GUM_SCALAR >
251  bool has_hard_evidence) {
252  if (has_hard_evidence || !this->hardEvidenceNodes().empty())
253  __is_new_jt_needed = true;
254  else {
255  for (const auto node : this->softEvidenceNodes()) {
256  try {
257  __evidence_changes.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
258  } catch (DuplicateElement&) {
259  // here, the evidence change already existed and it is necessarily an
260  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
261  // been added and is now erased, this is similar to not having created
262  // it. If the evidence was only modified, it already existed in the
263  // last inference and we should now indicate that it has been removed.
264  if (__evidence_changes[node] == EvidenceChangeType::EVIDENCE_ADDED)
265  __evidence_changes.erase(node);
266  else
267  __evidence_changes[node] = EvidenceChangeType::EVIDENCE_ERASED;
268  }
269  }
270  }
271  }
272 
273 
275  template < typename GUM_SCALAR >
277  const NodeId id, bool hasChangedSoftHard) {
278  if (hasChangedSoftHard)
279  __is_new_jt_needed = true;
280  else {
281  try {
282  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_MODIFIED);
283  } catch (DuplicateElement&) {
284  // here, the evidence change already existed and it is necessarily an
285  // EVIDENCE_ADDED. So we should keep this state to indicate that this
286  // evidence is new w.r.t. the last inference
287  }
288  }
289  }
290 
291 
293  template < typename GUM_SCALAR >
294  INLINE void
296  }
297 
298 
300  template < typename GUM_SCALAR >
302  const NodeId id) {}
303 
304 
306  template < typename GUM_SCALAR >
307  INLINE void
309  }
310 
311 
313  template < typename GUM_SCALAR >
315  const NodeSet& set) {}
316 
317 
319  template < typename GUM_SCALAR >
321 
322 
324  template < typename GUM_SCALAR >
326 
328  template < typename GUM_SCALAR >
330  const IBayesNet< GUM_SCALAR >* bn) {}
331 
333  template < typename GUM_SCALAR >
335 
336 
338  template < typename GUM_SCALAR >
340 
341 
342  // check whether a new junction tree is really needed for the next inference
343  template < typename GUM_SCALAR >
345  // if we do not have a JT or if __new_jt_needed is set to true, then
346  // we know that we need to create a new join tree
347  if ((__JT == nullptr) || __is_new_jt_needed) return true;
348 
349  // if some some targets do not belong to the join tree and, consequently,
350  // to the undigraph that was used to construct the join tree, then we need
351  // to create a new JT. This situation may occur if we constructed the
352  // join tree after pruning irrelevant/barren nodes from the BN)
353  // however, note that the nodes that received hard evidence do not belong to
354  // the graph and, therefore, should not be taken into account
355  const auto& hard_ev_nodes = this->hardEvidenceNodes();
356  for (const auto node : this->targets()) {
357  if (!__graph.exists(node) && !hard_ev_nodes.exists(node)) return true;
358  }
359  for (const auto& joint_target : this->jointTargets()) {
360  // here, we need to check that at least one clique contains all the
361  // nodes of the joint target.
362  bool containing_clique_found = false;
363  for (const auto node : joint_target) {
364  bool found = true;
365  try {
366  const NodeSet& clique = __JT->clique(__node_to_clique[node]);
367  for (const auto xnode : joint_target) {
368  if (!clique.contains(xnode) && !hard_ev_nodes.exists(xnode)) {
369  found = false;
370  break;
371  }
372  }
373  } catch (NotFound&) { found = false; }
374 
375  if (found) {
376  containing_clique_found = true;
377  break;
378  }
379  }
380 
381  if (!containing_clique_found) return true;
382  }
383 
384  // if some new evidence have been added on nodes that do not belong
385  // to __graph, then we potentially have to reconstruct the join tree
386  for (const auto& change : __evidence_changes) {
387  if ((change.second == EvidenceChangeType::EVIDENCE_ADDED)
388  && !__graph.exists(change.first))
389  return true;
390  }
391 
392  // here, the current JT is exactly what we need for the next inference
393  return false;
394  }
395 
396 
398  template < typename GUM_SCALAR >
400  // to create the JT, we first create the moral graph of the BN in the
401  // following way in order to take into account the barren nodes and the
402  // nodes that received evidence:
403  // 1/ we create an undirected graph containing only the nodes and no edge
404  // 2/ if we take into account barren nodes, remove them from the graph
405  // 3/ add edges so that each node and its parents in the BN form a clique
406  // 4/ add edges so that set targets are cliques of the moral graph
407  // 5/ remove the nodes that received hard evidence (by step 3/, their
408  // parents are linked by edges, which is necessary for inference)
409  //
410  // At the end of step 5/, we have our moral graph and we can triangulate it
411  // to get the new junction tree
412 
413  // 1/ create an undirected graph containing only the nodes and no edge
414  const auto& bn = this->BN();
415  __graph.clear();
416  for (auto node : bn.dag())
417  __graph.addNodeWithId(node);
418 
419  // 2/ if we wish to exploit barren nodes, we shall remove them from the
420  // BN to do so: we identify all the nodes that are not targets and have
421  // received no evidence and such that their descendants are neither
422  // targets nor evidence nodes. Such nodes can be safely discarded from
423  // the BN without altering the inference output
425  // identify the barren nodes
426  NodeSet target_nodes = this->targets();
427  for (const auto& nodeset : this->jointTargets()) {
428  target_nodes += nodeset;
429  }
430 
431  // check that all the nodes are not targets, otherwise, there is no
432  // barren node
433  if (target_nodes.size() != bn.size()) {
434  BarrenNodesFinder finder(&(bn.dag()));
435  finder.setTargets(&target_nodes);
436 
437  NodeSet evidence_nodes;
438  for (const auto& pair : this->evidence()) {
439  evidence_nodes.insert(pair.first);
440  }
441  finder.setEvidence(&evidence_nodes);
442 
443  NodeSet barren_nodes = finder.barrenNodes();
444 
445  // remove the barren nodes from the moral graph
446  for (const auto node : barren_nodes) {
447  __graph.eraseNode(node);
448  }
449  }
450  }
451 
452  // 3/ add edges so that each node and its parents in the BN form a clique
453  for (const auto node : __graph) {
454  const NodeSet& parents = bn.parents(node);
455  for (auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
456  __graph.addEdge(*iter1, node);
457  auto iter2 = iter1;
458  for (++iter2; iter2 != parents.cend(); ++iter2) {
459  __graph.addEdge(*iter1, *iter2);
460  }
461  }
462  }
463 
464  // 4/ if there exist some joint targets, we shall add new edges into the
465  // moral graph in order to ensure that there exists a clique containing
466  // each joint
467  for (const auto& nodeset : this->jointTargets()) {
468  for (auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
469  auto iter2 = iter1;
470  for (++iter2; iter2 != nodeset.cend(); ++iter2) {
471  __graph.addEdge(*iter1, *iter2);
472  }
473  }
474  }
475 
476  // 5/ remove all the nodes that received hard evidence
478  for (const auto node : __hard_ev_nodes) {
479  __graph.eraseNode(node);
480  }
481 
482 
483  // now, we can compute the new junction tree. To speed-up computations
484  // (essentially, those of a distribution phase), we construct from this
485  // junction tree a binary join tree
486  if (__JT != nullptr) delete __JT;
487  if (__junctionTree != nullptr) delete __junctionTree;
488 
489  __triangulation->setGraph(&__graph, &(this->domainSizes()));
490  const JunctionTree& triang_jt = __triangulation->junctionTree();
492  BinaryJoinTreeConverterDefault bjt_converter;
493  NodeSet emptyset;
494  __JT = new CliqueGraph(
495  bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
496  } else {
497  __JT = new CliqueGraph(triang_jt);
498  }
499  __junctionTree = new CliqueGraph(triang_jt);
500 
501 
502  // indicate, for each node of the moral graph a clique in __JT that can
503  // contain its conditional probability table
505  const std::vector< NodeId >& JT_elim_order =
507  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
508  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
509  ++i)
510  elim_order.insert(JT_elim_order[i], (int)i);
511  const DAG& dag = bn.dag();
512  for (const auto node : __graph) {
513  // get the variables in the potential of node (and its parents)
514  NodeId first_eliminated_node = node;
515  int elim_number = elim_order[first_eliminated_node];
516 
517  for (const auto parent : dag.parents(node)) {
518  if (__graph.existsNode(parent) && (elim_order[parent] < elim_number)) {
519  elim_number = elim_order[parent];
520  first_eliminated_node = parent;
521  }
522  }
523 
524  // first_eliminated_node contains the first var (node or one of its
525  // parents) eliminated => the clique created during its elimination
526  // contains node and all of its parents => it can contain the potential
527  // assigned to the node in the BN
529  node, __triangulation->createdJunctionTreeClique(first_eliminated_node));
530  }
531 
532  // do the same for the nodes that received evidence. Here, we only store
533  // the nodes for which at least one parent belongs to __graph (otherwise
534  // their CPT is just a constant real number).
535  for (const auto node : __hard_ev_nodes) {
536  // get the set of parents of the node that belong to __graph
537  NodeSet pars(dag.parents(node).size());
538  for (const auto par : dag.parents(node))
539  if (__graph.exists(par)) pars.insert(par);
540 
541  if (!pars.empty()) {
542  NodeId first_eliminated_node = *(pars.begin());
543  int elim_number = elim_order[first_eliminated_node];
544 
545  for (const auto parent : pars) {
546  if (elim_order[parent] < elim_number) {
547  elim_number = elim_order[parent];
548  first_eliminated_node = parent;
549  }
550  }
551 
552  // first_eliminated_node contains the first var (node or one of its
553  // parents) eliminated => the clique created during its elimination
554  // contains node and all of its parents => it can contain the potential
555  // assigned to the node in the BN
557  node,
558  __triangulation->createdJunctionTreeClique(first_eliminated_node));
559  }
560  }
561 
562  // indicate for each joint_target a clique that contains it
563  __joint_target_to_clique.clear();
564  for (const auto& set : this->jointTargets()) {
565  // remove from set all the nodes that received hard evidence (since they
566  // do not belong to the join tree)
567  NodeSet nodeset = set;
568  for (const auto node : __hard_ev_nodes)
569  if (nodeset.contains(node)) nodeset.erase(node);
570 
571  if (!nodeset.empty()) {
572  // the clique we are looking for is the one that was created when
573  // the first element of nodeset was eliminated
574  NodeId first_eliminated_node = *(nodeset.begin());
575  int elim_number = elim_order[first_eliminated_node];
576  for (const auto node : nodeset) {
577  if (elim_order[node] < elim_number) {
578  elim_number = elim_order[node];
579  first_eliminated_node = node;
580  }
581  }
582 
584  set, __triangulation->createdJunctionTreeClique(first_eliminated_node));
585  }
586  }
587 
588 
589  // compute the roots of __JT's connected components
591 
592  // remove all the Shafer-Shenoy potentials stored into the cliques
593  for (const auto& xpot : __clique_ss_potential) {
594  if (__clique_potentials[xpot.first].size() > 1) delete xpot.second;
595  }
596 
597  // create empty potential lists into the cliques of the joint tree as well
598  // as empty lists of evidence
599  __PotentialSet empty_set;
600  __clique_potentials.clear();
601  __node_to_soft_evidence.clear();
602  for (const auto node : *__JT) {
603  __clique_potentials.insert(node, empty_set);
604  }
605 
606  // remove all the potentials created during the last inference
607  for (auto& potlist : __created_potentials)
608  for (auto pot : potlist.second)
609  delete pot;
610  __created_potentials.clear();
611 
612  // remove all the potentials created to take into account hard evidence
613  // during the last inference
614  for (auto pot_pair : __hard_ev_projected_CPTs)
615  delete pot_pair.second;
616  __hard_ev_projected_CPTs.clear();
617 
618  // remove all the constants created due to projections of CPTs that were
619  // defined over only hard evidence nodes
620  __constants.clear();
621 
622  // create empty lists of potentials for the messages and indicate that no
623  // message has been computed yet
624  __separator_potentials.clear();
625  __messages_computed.clear();
626  for (const auto& edge : __JT->edges()) {
627  const Arc arc1(edge.first(), edge.second());
628  __separator_potentials.insert(arc1, empty_set);
629  __messages_computed.insert(arc1, false);
630  const Arc arc2(Arc(edge.second(), edge.first()));
631  __separator_potentials.insert(arc2, empty_set);
632  __messages_computed.insert(arc2, false);
633  }
634 
635  // remove all the posteriors computed so far
636  for (const auto& pot : __target_posteriors)
637  delete pot.second;
638  __target_posteriors.clear();
639  for (const auto& pot : __joint_target_posteriors)
640  delete pot.second;
641  __joint_target_posteriors.clear();
642 
643 
644  // put all the CPT's of the Bayes net nodes into the cliques
645  // here, beware: all the potentials that are defined over some nodes
646  // including hard evidence must be projected so that these nodes are
647  // removed from the potential
648  const auto& evidence = this->evidence();
649  const auto& hard_evidence = this->hardEvidence();
650  for (const auto node : dag) {
651  if (__graph.exists(node) || __hard_ev_nodes.contains(node)) {
652  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
653 
654  // get the list of nodes with hard evidence in cpt
655  NodeSet hard_nodes;
656  const auto& variables = cpt.variablesSequence();
657  for (const auto var : variables) {
658  NodeId xnode = bn.nodeId(*var);
659  if (__hard_ev_nodes.contains(xnode)) hard_nodes.insert(xnode);
660  }
661 
662  // if hard_nodes contains hard evidence nodes, perform a projection
663  // and insert the result into the appropriate clique, else insert
664  // directly cpt into the clique
665  if (hard_nodes.empty()) {
666  __clique_potentials[__node_to_clique[node]].insert(&cpt);
667  } else {
668  // marginalize out the hard evidence nodes: if the cpt is defined
669  // only over nodes that received hard evidence, do not consider it
670  // as a potential anymore but as a constant
671  if (hard_nodes.size() == variables.size()) {
672  Instantiation inst;
673  const auto& vars = cpt.variablesSequence();
674  for (auto var : vars)
675  inst << *var;
676  for (Size i = 0; i < hard_nodes.size(); ++i) {
677  inst.chgVal(variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
678  }
679  __constants.insert(node, cpt[inst]);
680  } else {
681  // perform the projection with a combine and project instance
682  Set< const DiscreteVariable* > hard_variables;
683  __PotentialSet marg_cpt_set{&cpt};
684  for (const auto xnode : hard_nodes) {
685  marg_cpt_set.insert(evidence[xnode]);
686  hard_variables.insert(&(bn.variable(xnode)));
687  }
688 
689  // perform the combination of those potentials and their projection
690  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
691  combine_and_project(__combination_op, SSNewprojPotential);
692  __PotentialSet new_cpt_list =
693  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
694 
695  // there should be only one potential in new_cpt_list
696  if (new_cpt_list.size() != 1) {
697  // remove the CPT created to avoid memory leaks
698  for (auto pot : new_cpt_list) {
699  if (!marg_cpt_set.contains(pot)) delete pot;
700  }
701  GUM_ERROR(FatalError,
702  "the projection of a potential containing "
703  << "hard evidence is empty!");
704  }
705  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
706  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
707  __hard_ev_projected_CPTs.insert(node, projected_cpt);
708  }
709  }
710  }
711  }
712 
713  // we shall now add all the potentials of the soft evidence
714  for (const auto node : this->softEvidenceNodes()) {
715  __node_to_soft_evidence.insert(node, evidence[node]);
716  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
717  }
718 
719  // now, in __clique_potentials, for each clique, we have the list of
720  // potentials that must be comined in order to produce the Shafer-Shenoy's
721  // potential stored into the clique. So, perform this combination and
722  // store the result in __clique_ss_potential
723  __clique_ss_potential.clear();
724  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
726  for (const auto& xpotset : __clique_potentials) {
727  const auto& potset = xpotset.second;
728  if (potset.size() > 0) {
729  // here, there will be an entry in __clique_ss_potential
730  // If there is only one element in potset, this element shall be
731  // stored into __clique_ss_potential, else all the elements of potset
732  // shall be combined and their result shall be stored
733  if (potset.size() == 1) {
734  __clique_ss_potential.insert(xpotset.first, *(potset.cbegin()));
735  } else {
736  auto joint = fast_combination.combine(potset);
737  __clique_ss_potential.insert(xpotset.first, joint);
738  }
739  }
740  }
741 
742  // indicate that the data structures are up to date.
743  __evidence_changes.clear();
744  __is_new_jt_needed = false;
745  }
746 
747 
749  template < typename GUM_SCALAR >
751  // check if a new JT is really needed. If so, create it
752  if (__isNewJTNeeded()) {
753  __createNewJT();
754  } else {
755  // here, we can answer the next queries without reconstructing all the
756  // junction tree. All we need to do is to indicate that we should
757  // update the potentials and messages for these queries
759  }
760  }
761 
762 
764  template < typename GUM_SCALAR >
766  NodeId from_id, NodeId to_id, NodeSet& invalidated_cliques) {
767  // invalidate the current clique
768  invalidated_cliques.insert(to_id);
769 
770  // invalidate the current arc
771  const Arc arc(from_id, to_id);
772  bool& message_computed = __messages_computed[arc];
773  if (message_computed) {
774  message_computed = false;
775  __separator_potentials[arc].clear();
776  if (__created_potentials.exists(arc)) {
777  auto& arc_created_potentials = __created_potentials[arc];
778  for (auto pot : arc_created_potentials)
779  delete pot;
780  arc_created_potentials.clear();
781  }
782 
783  // go on with the diffusion
784  for (const auto node_id : __JT->neighbours(to_id)) {
785  if (node_id != from_id)
786  __diffuseMessageInvalidations(to_id, node_id, invalidated_cliques);
787  }
788  }
789  }
790 
791 
794  template < typename GUM_SCALAR >
796  // for each clique, indicate whether the potential stored into
797  // __clique_ss_potentials[clique] is the result of a combination. In this
798  // case, it has been allocated by the combination and will need to be
799  // deallocated if its clique has been invalidated
800  NodeProperty< bool > ss_potential_to_deallocate(__clique_potentials.size());
801  for (auto pot_iter = __clique_potentials.cbegin();
802  pot_iter != __clique_potentials.cend();
803  ++pot_iter) {
804  ss_potential_to_deallocate.insert(pot_iter.key(),
805  (pot_iter.val().size() > 1));
806  }
807 
808  // compute the set of CPTs that were projected due to hard evidence and
809  // whose hard evidence have changed, so that they need a new projection.
810  // By the way, remove these CPTs since they are no more needed
811  // Here only the values of the hard evidence can have changed (else a
812  // fully new join tree would have been computed).
813  // Note also that we know that the CPTs still contain some variable(s) after
814  // the projection (else they should be constants)
815  NodeSet hard_nodes_changed(__hard_ev_nodes.size());
816  for (const auto node : __hard_ev_nodes)
817  if (__evidence_changes.exists(node)) hard_nodes_changed.insert(node);
818 
819  NodeSet nodes_with_projected_CPTs_changed;
820  const auto& bn = this->BN();
821  for (auto pot_iter = __hard_ev_projected_CPTs.beginSafe();
822  pot_iter != __hard_ev_projected_CPTs.endSafe();
823  ++pot_iter) {
824  for (const auto var : bn.cpt(pot_iter.key()).variablesSequence()) {
825  if (hard_nodes_changed.contains(bn.nodeId(*var))) {
826  nodes_with_projected_CPTs_changed.insert(pot_iter.key());
827  delete pot_iter.val();
828  __clique_potentials[__node_to_clique[pot_iter.key()]].erase(
829  pot_iter.val());
830  __hard_ev_projected_CPTs.erase(pot_iter);
831  break;
832  }
833  }
834  }
835 
836 
837  // invalidate all the messages that are no more correct: start from each of
838  // the nodes whose soft evidence has changed and perform a diffusion from
839  // the clique into which the soft evidence has been entered, indicating that
840  // the messages spreading from this clique are now invalid. At the same time,
841  // if there were potentials created on the arcs over which the messages were
842  // sent, remove them from memory. For all the cliques that received some
843  // projected CPT that should now be changed, do the same.
844  NodeSet invalidated_cliques(__JT->size());
845  for (const auto& pair : __evidence_changes) {
846  if (__node_to_clique.exists(pair.first)) {
847  const auto clique = __node_to_clique[pair.first];
848  invalidated_cliques.insert(clique);
849  for (const auto neighbor : __JT->neighbours(clique)) {
850  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
851  }
852  }
853  }
854 
855  // now, add to the set of invalidated cliques those that contain projected
856  // CPTs that were changed.
857  for (auto node : nodes_with_projected_CPTs_changed) {
858  const auto clique = __node_to_clique[node];
859  invalidated_cliques.insert(clique);
860  for (const auto neighbor : __JT->neighbours(clique)) {
861  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
862  }
863  }
864 
865  // now that we know the cliques whose set of potentials have been changed,
866  // we can discard their corresponding Shafer-Shenoy potential
867  for (const auto clique : invalidated_cliques) {
868  if (__clique_ss_potential.exists(clique)
869  && ss_potential_to_deallocate[clique]) {
870  delete __clique_ss_potential[clique];
871  }
872  }
873 
874 
875  // now we shall remove all the posteriors that belong to the
876  // invalidated cliques. First, cope only with the nodes that did not
877  // received hard evidence since the other nodes do not belong to the
878  // join tree
879  for (auto iter = __target_posteriors.beginSafe();
880  iter != __target_posteriors.endSafe();
881  ++iter) {
882  if (__graph.exists(iter.key())
883  && (invalidated_cliques.exists(__node_to_clique[iter.key()]))) {
884  delete iter.val();
885  __target_posteriors.erase(iter);
886  }
887  }
888 
889  // now cope with the nodes that received hard evidence
890  for (auto iter = __target_posteriors.beginSafe();
891  iter != __target_posteriors.endSafe();
892  ++iter) {
893  if (hard_nodes_changed.contains(iter.key())) {
894  delete iter.val();
895  __target_posteriors.erase(iter);
896  }
897  }
898 
899  // finally, cope with joint targets
900  for (auto iter = __joint_target_posteriors.beginSafe();
901  iter != __joint_target_posteriors.endSafe();
902  ++iter) {
903  if (invalidated_cliques.exists(__joint_target_to_clique[iter.key()])) {
904  delete iter.val();
905  __joint_target_posteriors.erase(iter);
906  }
907  }
908 
909 
910  // remove all the evidence that were entered into __node_to_soft_evidence
911  // and __clique_potentials and add the new soft ones
912  for (auto& pot_pair : __node_to_soft_evidence) {
913  __clique_potentials[__node_to_clique[pot_pair.first]].erase(pot_pair.second);
914  }
915  __node_to_soft_evidence.clear();
916 
917  const auto& evidence = this->evidence();
918  for (const auto node : this->softEvidenceNodes()) {
919  __node_to_soft_evidence.insert(node, evidence[node]);
920  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
921  }
922 
923 
924  // Now add the projections of the CPTs due to newly changed hard evidence:
925  // if we are performing _updateOutdatedBNPotentials, this means that the
926  // set of nodes that received hard evidence has not been changed, only
927  // their instantiations can have been changed. So, if there is an entry
928  // for node in __constants, there will still be such an entry after
929  // performing the new projections. Idem for __hard_ev_projected_CPTs
930  for (const auto node : nodes_with_projected_CPTs_changed) {
931  // perform the projection with a combine and project instance
932  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
933  const auto& variables = cpt.variablesSequence();
934  Set< const DiscreteVariable* > hard_variables;
935  __PotentialSet marg_cpt_set{&cpt};
936  for (const auto var : variables) {
937  NodeId xnode = bn.nodeId(*var);
938  if (__hard_ev_nodes.exists(xnode)) {
939  marg_cpt_set.insert(evidence[xnode]);
940  hard_variables.insert(var);
941  }
942  }
943 
944  // perform the combination of those potentials and their projection
945  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
946  combine_and_project(__combination_op, SSNewprojPotential);
947  __PotentialSet new_cpt_list =
948  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
949 
950  // there should be only one potential in new_cpt_list
951  if (new_cpt_list.size() != 1) {
952  // remove the CPT created to avoid memory leaks
953  for (auto pot : new_cpt_list) {
954  if (!marg_cpt_set.contains(pot)) delete pot;
955  }
956  GUM_ERROR(FatalError,
957  "the projection of a potential containing "
958  << "hard evidence is empty!");
959  }
960  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
961  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
962  __hard_ev_projected_CPTs.insert(node, projected_cpt);
963  }
964 
965  // here, the list of potentials stored in the invalidated cliques have
966  // been updated. So, now, we can combine them to produce the Shafer-Shenoy
967  // potential stored into the clique
968  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
970  for (const auto clique : invalidated_cliques) {
971  const auto& potset = __clique_potentials[clique];
972 
973  if (potset.size() > 0) {
974  // here, there will be an entry in __clique_ss_potential
975  // If there is only one element in potset, this element shall be
976  // stored into __clique_ss_potential, else all the elements of potset
977  // shall be combined and their result shall be stored
978  if (potset.size() == 1) {
979  __clique_ss_potential[clique] = *(potset.cbegin());
980  } else {
981  auto joint = fast_combination.combine(potset);
982  __clique_ss_potential[clique] = joint;
983  }
984  }
985  }
986 
987 
988  // update the constants
989  const auto& hard_evidence = this->hardEvidence();
990  for (auto& node_cst : __constants) {
991  const Potential< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
992  const auto& variables = cpt.variablesSequence();
993  Instantiation inst;
994  for (const auto var : variables)
995  inst << *var;
996  for (const auto var : variables) {
997  inst.chgVal(var, hard_evidence[bn.nodeId(*var)]);
998  }
999  node_cst.second = cpt[inst];
1000  }
1001 
1002  // indicate that all changes have been performed
1003  __evidence_changes.clear();
1004  }
1005 
1006 
1008  template < typename GUM_SCALAR >
1010  // get the set of cliques in which we can find the targets and joint_targets
1011  NodeSet clique_targets;
1012  for (const auto node : this->targets()) {
1013  try {
1014  clique_targets.insert(__node_to_clique[node]);
1015  } catch (Exception&) {}
1016  }
1017  for (const auto& set : this->jointTargets()) {
1018  try {
1019  clique_targets.insert(__joint_target_to_clique[set]);
1020  } catch (Exception&) {}
1021  }
1022 
1023  // put in a vector these cliques and their size
1024  std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
1025  const auto& bn = this->BN();
1026  std::size_t i = 0;
1027  for (const auto clique_id : clique_targets) {
1028  const auto& clique = __JT->clique(clique_id);
1029  Size dom_size = 1;
1030  for (const auto node : clique) {
1031  dom_size *= bn.variable(node).domainSize();
1032  }
1033  possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
1034  ++i;
1035  }
1036 
1037  // sort the cliques by increasing domain size
1038  std::sort(possible_roots.begin(),
1039  possible_roots.end(),
1040  [](const std::pair< NodeId, Size >& a,
1041  const std::pair< NodeId, Size >& b) -> bool {
1042  return a.second < b.second;
1043  });
1044 
1045  // pick up the clique with the smallest size in each connected component
1046  NodeProperty< bool > marked = __JT->nodesProperty(false);
1047  std::function< void(NodeId, NodeId) > diffuse_marks =
1048  [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
1049  if (!marked[node]) {
1050  marked[node] = true;
1051  for (const auto neigh : __JT->neighbours(node))
1052  if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1053  }
1054  };
1055  __roots.clear();
1056  for (const auto xclique : possible_roots) {
1057  NodeId clique = xclique.first;
1058  if (!marked[clique]) {
1059  __roots.insert(clique);
1060  diffuse_marks(clique, clique);
1061  }
1062  }
1063  }
1064 
1065 
1066  // performs the collect phase of Lazy Propagation
1067  template < typename GUM_SCALAR >
1069  NodeId from) {
1070  for (const auto other : __JT->neighbours(id)) {
1071  if ((other != from) && !__messages_computed[Arc(other, id)])
1072  __collectMessage(other, id);
1073  }
1074 
1075  if ((id != from) && !__messages_computed[Arc(id, from)]) {
1076  __produceMessage(id, from);
1077  }
1078  }
1079 
1080 
1081  // remove barren variables
1082  template < typename GUM_SCALAR >
1083  Set< const Potential< GUM_SCALAR >* >
1085  __PotentialSet& pot_list, Set< const DiscreteVariable* >& del_vars) {
1086  // remove from del_vars the variables that received some evidence:
1087  // only those that did not received evidence can be barren variables
1088  Set< const DiscreteVariable* > the_del_vars = del_vars;
1089  for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe();
1090  ++iter) {
1091  NodeId id = this->BN().nodeId(**iter);
1092  if (this->hardEvidenceNodes().exists(id)
1093  || this->softEvidenceNodes().exists(id)) {
1094  the_del_vars.erase(iter);
1095  }
1096  }
1097 
1098  // assign to each random variable the set of potentials that contain it
1099  HashTable< const DiscreteVariable*, __PotentialSet > var2pots;
1100  __PotentialSet empty_pot_set;
1101  for (const auto pot : pot_list) {
1102  const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1103  for (const auto var : vars) {
1104  if (the_del_vars.exists(var)) {
1105  if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1106  var2pots[var].insert(pot);
1107  }
1108  }
1109  }
1110 
1111  // each variable with only one potential is a barren variable
1112  // assign to each potential with barren nodes its set of barren variables
1113  HashTable< const Potential< GUM_SCALAR >*, Set< const DiscreteVariable* > >
1114  pot2barren_var;
1115  Set< const DiscreteVariable* > empty_var_set;
1116  for (auto elt : var2pots) {
1117  if (elt.second.size() == 1) { // here we have a barren variable
1118  const Potential< GUM_SCALAR >* pot = *(elt.second.begin());
1119  if (!pot2barren_var.exists(pot)) {
1120  pot2barren_var.insert(pot, empty_var_set);
1121  }
1122  pot2barren_var[pot].insert(elt.first); // insert the barren variable
1123  }
1124  }
1125 
1126  // for each potential with barren variables, marginalize them.
1127  // if the potential has only barren variables, simply remove them from the
1128  // set of potentials, else just project the potential
1129  MultiDimProjection< GUM_SCALAR, Potential > projector(SSNewprojPotential);
1130  __PotentialSet projected_pots;
1131  for (auto elt : pot2barren_var) {
1132  // remove the current potential from pot_list as, anyway, we will change
1133  // it
1134  const Potential< GUM_SCALAR >* pot = elt.first;
1135  pot_list.erase(pot);
1136 
1137  // check whether we need to add a projected new potential or not (i.e.,
1138  // whether there exist non-barren variables or not)
1139  if (pot->variablesSequence().size() != elt.second.size()) {
1140  auto new_pot = projector.project(*pot, elt.second);
1141  pot_list.insert(new_pot);
1142  projected_pots.insert(new_pot);
1143  }
1144  }
1145 
1146  return projected_pots;
1147  }
1148 
1149 
1150  // remove variables del_vars from the list of potentials pot_list
1151  template < typename GUM_SCALAR >
1152  Set< const Potential< GUM_SCALAR >* >
1154  Set< const Potential< GUM_SCALAR >* > pot_list,
1155  Set< const DiscreteVariable* >& del_vars,
1156  Set< const DiscreteVariable* >& kept_vars) {
1157  // remove the potentials corresponding to barren variables if we want
1158  // to exploit barren nodes
1159  __PotentialSet barren_projected_potentials;
1161  barren_projected_potentials = __removeBarrenVariables(pot_list, del_vars);
1162  }
1163 
1164  // create a combine and project operator that will perform the
1165  // marginalization
1166  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential > combine_and_project(
1168  __PotentialSet new_pot_list =
1169  combine_and_project.combineAndProject(pot_list, del_vars);
1170 
1171  // remove all the potentials that were created due to projections of
1172  // barren nodes and that are not part of the new_pot_list: these
1173  // potentials were just temporary potentials
1174  for (auto iter = barren_projected_potentials.beginSafe();
1175  iter != barren_projected_potentials.endSafe();
1176  ++iter) {
1177  if (!new_pot_list.exists(*iter)) delete *iter;
1178  }
1179 
1180  // remove all the potentials that have no dimension
1181  for (auto iter_pot = new_pot_list.beginSafe();
1182  iter_pot != new_pot_list.endSafe();
1183  ++iter_pot) {
1184  if ((*iter_pot)->variablesSequence().size() == 0) {
1185  // as we have already marginalized out variables that received evidence,
1186  // it may be the case that, after combining and projecting, some
1187  // potentials might be empty. In this case, we shall keep their
1188  // constant and remove them from memory
1189  // # TODO: keep the constants!
1190  delete *iter_pot;
1191  new_pot_list.erase(iter_pot);
1192  }
1193  }
1194 
1195  return new_pot_list;
1196  }
1197 
1198 
1199  // creates the message sent by clique from_id to clique to_id
1200  template < typename GUM_SCALAR >
1202  NodeId to_id) {
1203  // get the potentials of the clique.
1204  __PotentialSet pot_list;
1205  if (__clique_ss_potential.exists(from_id))
1206  pot_list.insert(__clique_ss_potential[from_id]);
1207 
1208  // add the messages sent by adjacent nodes to from_id
1209  for (const auto other_id : __JT->neighbours(from_id))
1210  if (other_id != to_id)
1211  pot_list += __separator_potentials[Arc(other_id, from_id)];
1212 
1213  // get the set of variables that need be removed from the potentials
1214  const NodeSet& from_clique = __JT->clique(from_id);
1215  const NodeSet& separator = __JT->separator(from_id, to_id);
1216  Set< const DiscreteVariable* > del_vars(from_clique.size());
1217  Set< const DiscreteVariable* > kept_vars(separator.size());
1218  const auto& bn = this->BN();
1219 
1220  for (const auto node : from_clique) {
1221  if (!separator.contains(node)) {
1222  del_vars.insert(&(bn.variable(node)));
1223  } else {
1224  kept_vars.insert(&(bn.variable(node)));
1225  }
1226  }
1227 
1228  // pot_list now contains all the potentials to multiply and marginalize
1229  // => combine the messages
1230  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1231 
1232  // remove all the potentials that are equal to ones (as probability
1233  // matrix multiplications are tensorial, such potentials are useless)
1234  for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe();
1235  ++iter) {
1236  const auto pot = *iter;
1237  if (pot->variablesSequence().size() == 1) {
1238  bool is_all_ones = true;
1239  for (Instantiation inst(*pot); !inst.end(); ++inst) {
1240  if ((*pot)[inst] < __1_minus_epsilon) {
1241  is_all_ones = false;
1242  break;
1243  }
1244  }
1245  if (is_all_ones) {
1246  if (!pot_list.exists(pot)) delete pot;
1247  new_pot_list.erase(iter);
1248  continue;
1249  }
1250  }
1251  }
1252 
1253  // if there are still potentials in new_pot_list, combine them to
1254  // produce the message on the separator
1255  const Arc arc(from_id, to_id);
1256  if (!new_pot_list.empty()) {
1257  if (new_pot_list.size() == 1) { // there is only one potential
1258  // in new_pot_list, so this corresponds to our message on
1259  // the separator
1260  auto pot = *(new_pot_list.begin());
1261  __separator_potentials[arc] = std::move(new_pot_list);
1262  if (!pot_list.exists(pot)) {
1263  if (!__created_potentials.exists(arc))
1264  __created_potentials.insert(arc, __PotentialSet());
1265  __created_potentials[arc].insert(pot);
1266  }
1267  } else {
1268  // create the message in the separator
1269  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1271  auto joint = fast_combination.combine(new_pot_list);
1272  __separator_potentials[arc].insert(joint);
1273  if (!__created_potentials.exists(arc))
1274  __created_potentials.insert(arc, __PotentialSet());
1275  __created_potentials[arc].insert(joint);
1276 
1277  // remove the temporary messages created in new_pot_list
1278  for (const auto pot : new_pot_list) {
1279  if (!pot_list.exists(pot)) { delete pot; }
1280  }
1281  }
1282  }
1283 
1284  __messages_computed[arc] = true;
1285  }
1286 
1287 
1288  // performs a whole inference
1289  template < typename GUM_SCALAR >
1291  // collect messages for all single targets
1292  for (const auto node : this->targets()) {
1293  // perform only collects in the join tree for nodes that have
1294  // not received hard evidence (those that received hard evidence were
1295  // not included into the join tree for speed-up reasons)
1296  if (__graph.exists(node)) {
1298  }
1299  }
1300 
1301  // collect messages for all set targets
1302  // by parsing __joint_target_to_clique, we ensure that the cliques that
1303  // are referenced belong to the join tree (even if some of the nodes in
1304  // their associated joint_target do not belong to __graph)
1305  for (const auto set : __joint_target_to_clique)
1306  __collectMessage(set.second, set.second);
1307  }
1308 
1309 
1311  template < typename GUM_SCALAR >
1312  Potential< GUM_SCALAR >*
1314  const auto& bn = this->BN();
1315 
1316  // hard evidence do not belong to the join tree
1317  // # TODO: check for sets of inconsistent hard evidence
1318  if (this->hardEvidenceNodes().contains(id)) {
1319  return new Potential< GUM_SCALAR >(*(this->evidence()[id]));
1320  }
1321 
1322  // if we still need to perform some inference task, do it (this should
1323  // already have been done by _makeInference)
1324  NodeId clique_of_id = __node_to_clique[id];
1325  __collectMessage(clique_of_id, clique_of_id);
1326 
1327  // now we just need to create the product of the potentials of the clique
1328  // containing id with the messages received by this clique and
1329  // marginalize out all variables except id
1330  __PotentialSet pot_list;
1331  if (__clique_ss_potential.exists(clique_of_id))
1332  pot_list.insert(__clique_ss_potential[clique_of_id]);
1333 
1334  // add the messages sent by adjacent nodes to targetCliquxse
1335  for (const auto other : __JT->neighbours(clique_of_id))
1336  pot_list += __separator_potentials[Arc(other, clique_of_id)];
1337 
1338  // get the set of variables that need be removed from the potentials
1339  const NodeSet& nodes = __JT->clique(clique_of_id);
1340  Set< const DiscreteVariable* > kept_vars{&(bn.variable(id))};
1341  Set< const DiscreteVariable* > del_vars(nodes.size());
1342  for (const auto node : nodes) {
1343  if (node != id) del_vars.insert(&(bn.variable(node)));
1344  }
1345 
1346  // pot_list now contains all the potentials to multiply and marginalize
1347  // => combine the messages
1348  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1349  Potential< GUM_SCALAR >* joint = nullptr;
1350 
1351  if (new_pot_list.size() == 1) {
1352  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1353  // if pot already existed, create a copy, so that we can put it into
1354  // the __target_posteriors property
1355  if (pot_list.exists(joint)) {
1356  joint = new Potential< GUM_SCALAR >(*joint);
1357  } else {
1358  // remove the joint from new_pot_list so that it will not be
1359  // removed just after the else block
1360  new_pot_list.clear();
1361  }
1362  } else {
1363  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1365  joint = fast_combination.combine(new_pot_list);
1366  }
1367 
1368  // remove the potentials that were created in new_pot_list
1369  for (auto pot : new_pot_list)
1370  if (!pot_list.exists(pot)) delete pot;
1371 
1372  // check that the joint posterior is different from a 0 vector: this would
1373  // indicate that some hard evidence are not compatible (their joint
1374  // probability is equal to 0)
1375  bool nonzero_found = false;
1376  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1377  if ((*joint)[inst]) {
1378  nonzero_found = true;
1379  break;
1380  }
1381  }
1382  if (!nonzero_found) {
1383  // remove joint from memory to avoid memory leaks
1384  delete joint;
1385  GUM_ERROR(IncompatibleEvidence,
1386  "some evidence entered into the Bayes "
1387  "net are incompatible (their joint proba = 0)");
1388  }
1389  return joint;
1390  }
1391 
1392 
1394  template < typename GUM_SCALAR >
1395  const Potential< GUM_SCALAR >&
1397  // check if we have already computed the posterior
1398  if (__target_posteriors.exists(id)) { return *(__target_posteriors[id]); }
1399 
1400  // compute the joint posterior and normalize
1401  auto joint = _unnormalizedJointPosterior(id);
1402  joint->normalize();
1403  __target_posteriors.insert(id, joint);
1404 
1405  return *joint;
1406  }
1407 
1408 
1409  // returns the marginal a posteriori proba of a given node
1410  template < typename GUM_SCALAR >
1411  Potential< GUM_SCALAR >*
1413  const NodeSet& set) {
1414  // hard evidence do not belong to the join tree, so extract the nodes
1415  // from targets that are not hard evidence
1416  NodeSet targets = set, hard_ev_nodes;
1417  for (const auto node : this->hardEvidenceNodes()) {
1418  if (targets.contains(node)) {
1419  targets.erase(node);
1420  hard_ev_nodes.insert(node);
1421  }
1422  }
1423 
1424  // if all the nodes have received hard evidence, then compute the
1425  // joint posterior directly by multiplying the hard evidence potentials
1426  const auto& evidence = this->evidence();
1427  if (targets.empty()) {
1428  __PotentialSet pot_list;
1429  for (const auto node : set) {
1430  pot_list.insert(evidence[node]);
1431  }
1432  if (pot_list.size() == 1) {
1433  auto pot = new Potential< GUM_SCALAR >(**(pot_list.begin()));
1434  return pot;
1435  } else {
1436  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1438  return fast_combination.combine(pot_list);
1439  }
1440  }
1441 
1442 
1443  // if we still need to perform some inference task, do it: so, first,
1444  // determine the clique on which we should perform collect to compute
1445  // the unnormalized joint posterior of a set of nodes containing "set"
1446  NodeId clique_of_set;
1447  try {
1448  clique_of_set = __joint_target_to_clique[set];
1449  } catch (NotFound&) {
1450  // here, the precise set of targets does not belong to the set of targets
1451  // defined by the user. So we will try to find a clique in the junction
1452  // tree that contains "targets":
1453 
1454  // 1/ we should check that all the nodes belong to the join tree
1455  for (const auto node : targets) {
1456  if (!__graph.exists(node)) {
1457  GUM_ERROR(UndefinedElement, node << " is not a target node");
1458  }
1459  }
1460 
1461  // 2/ the clique created by the first eliminated node among target is the
1462  // one we are looking for
1463  const std::vector< NodeId >& JT_elim_order =
1465  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
1466  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
1467  ++i)
1468  elim_order.insert(JT_elim_order[i], (int)i);
1469  NodeId first_eliminated_node = *(targets.begin());
1470  int elim_number = elim_order[first_eliminated_node];
1471  for (const auto node : targets) {
1472  if (elim_order[node] < elim_number) {
1473  elim_number = elim_order[node];
1474  first_eliminated_node = node;
1475  }
1476  }
1477  clique_of_set =
1478  __triangulation->createdJunctionTreeClique(first_eliminated_node);
1479 
1480  // 3/ check that cliquee_of_set contains the all the nodes in the target
1481  const NodeSet& clique_nodes = __JT->clique(clique_of_set);
1482  for (const auto node : targets) {
1483  if (!clique_nodes.contains(node)) {
1484  GUM_ERROR(UndefinedElement, set << " is not a joint target");
1485  }
1486  }
1487 
1488  // add the discovered clique to __joint_target_to_clique
1489  __joint_target_to_clique.insert(set, clique_of_set);
1490  }
1491 
1492  // now perform a collect on the clique
1493  __collectMessage(clique_of_set, clique_of_set);
1494 
1495  // now we just need to create the product of the potentials of the clique
1496  // containing set with the messages received by this clique and
1497  // marginalize out all variables except set
1498  __PotentialSet pot_list;
1499  if (__clique_ss_potential.exists(clique_of_set))
1500  pot_list.insert(__clique_ss_potential[clique_of_set]);
1501 
1502  // add the messages sent by adjacent nodes to targetClique
1503  for (const auto other : __JT->neighbours(clique_of_set))
1504  pot_list += __separator_potentials[Arc(other, clique_of_set)];
1505 
1506  // get the set of variables that need be removed from the potentials
1507  const NodeSet& nodes = __JT->clique(clique_of_set);
1508  Set< const DiscreteVariable* > del_vars(nodes.size());
1509  Set< const DiscreteVariable* > kept_vars(targets.size());
1510  const auto& bn = this->BN();
1511  for (const auto node : nodes) {
1512  if (!targets.contains(node)) {
1513  del_vars.insert(&(bn.variable(node)));
1514  } else {
1515  kept_vars.insert(&(bn.variable(node)));
1516  }
1517  }
1518 
1519  // pot_list now contains all the potentials to multiply and marginalize
1520  // => combine the messages
1521  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1522  Potential< GUM_SCALAR >* joint = nullptr;
1523 
1524  if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
1525  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1526  // if pot already existed, create a copy, so that we can put it into
1527  // the __target_posteriors property
1528  if (pot_list.exists(joint)) {
1529  joint = new Potential< GUM_SCALAR >(*joint);
1530  } else {
1531  // remove the joint from new_pot_list so that it will not be
1532  // removed just after the next else block
1533  new_pot_list.clear();
1534  }
1535  } else {
1536  // combine all the potentials in new_pot_list with all the hard evidence
1537  // of the nodes in set
1538  __PotentialSet new_new_pot_list = new_pot_list;
1539  for (const auto node : hard_ev_nodes) {
1540  new_new_pot_list.insert(evidence[node]);
1541  }
1542  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1544  joint = fast_combination.combine(new_new_pot_list);
1545  }
1546 
1547  // remove the potentials that were created in new_pot_list
1548  for (auto pot : new_pot_list)
1549  if (!pot_list.exists(pot)) delete pot;
1550 
1551  // check that the joint posterior is different from a 0 vector: this would
1552  // indicate that some hard evidence are not compatible
1553  bool nonzero_found = false;
1554  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1555  if ((*joint)[inst]) {
1556  nonzero_found = true;
1557  break;
1558  }
1559  }
1560  if (!nonzero_found) {
1561  // remove joint from memory to avoid memory leaks
1562  delete joint;
1563  GUM_ERROR(IncompatibleEvidence,
1564  "some evidence entered into the Bayes "
1565  "net are incompatible (their joint proba = 0)");
1566  }
1567 
1568  return joint;
1569  }
1570 
1571 
1573  template < typename GUM_SCALAR >
1574  const Potential< GUM_SCALAR >&
1576  // check if we have already computed the posterior
1577  if (__joint_target_posteriors.exists(set)) {
1578  return *(__joint_target_posteriors[set]);
1579  }
1580 
1581  // compute the joint posterior and normalize
1582  auto joint = _unnormalizedJointPosterior(set);
1583  joint->normalize();
1584  __joint_target_posteriors.insert(set, joint);
1585 
1586  return *joint;
1587  }
1588 
1589 
1591  template < typename GUM_SCALAR >
1592  const Potential< GUM_SCALAR >&
1594  const NodeSet& wanted_target, const NodeSet& declared_target) {
1595  // check if we have already computed the posterior of wanted_target
1596  if (__joint_target_posteriors.exists(wanted_target))
1597  return *(__joint_target_posteriors[wanted_target]);
1598 
1599  // here, we will have to compute the posterior of declared_target and
1600  // marginalize out all the variables that do not belong to wanted_target
1601 
1602  // check if we have already computed the posterior of declared_target
1603  if (!__joint_target_posteriors.exists(declared_target)) {
1604  _jointPosterior(declared_target);
1605  }
1606 
1607  // marginalize out all the variables that do not belong to wanted_target
1608  const auto& bn = this->BN();
1609  Set< const DiscreteVariable* > del_vars;
1610  for (const auto node : declared_target)
1611  if (!wanted_target.contains(node)) del_vars.insert(&(bn.variable(node)));
1612  auto pot = new Potential< GUM_SCALAR >(
1613  __joint_target_posteriors[declared_target]->margSumOut(del_vars));
1614 
1615  // save the result into the cache
1616  __joint_target_posteriors.insert(wanted_target, pot);
1617 
1618  return *pot;
1619  }
1620 
1621 
1622  template < typename GUM_SCALAR >
1624  // perform inference in each connected component
1625  this->makeInference();
1626 
1627  // for each connected component, select a variable X and compute the
1628  // joint probability of X and evidence e. Then marginalize-out X to get
1629  // p(e) in this connected component. Finally, multiply all the p(e) that
1630  // we got and the elements in __constants. The result is the probability
1631  // of evidence
1632 
1633  GUM_SCALAR prob_ev = 1;
1634  for (const auto root : __roots) {
1635  // get a node in the clique
1636  NodeId node = *(__JT->clique(root).begin());
1637  Potential< GUM_SCALAR >* tmp = _unnormalizedJointPosterior(node);
1638  GUM_SCALAR sum = 0;
1639  for (Instantiation iter(*tmp); !iter.end(); ++iter)
1640  sum += tmp->get(iter);
1641  prob_ev *= sum;
1642  delete tmp;
1643  }
1644 
1645  for (const auto& projected_cpt : __constants)
1646  prob_ev *= projected_cpt.second;
1647 
1648  return prob_ev;
1649  }
1650 
1651 
1652 } /* namespace gum */
1653 
1654 #endif // DOXYGEN_SHOULD_SKIP_THIS
NodeProperty< const Potential< GUM_SCALAR > *> __clique_ss_potential
the potentials stored into the cliques by Shafer-Shenoy
void _onAllJointTargetsErased() final
fired before a all the joint targets are removed
void _onEvidenceChanged(const NodeId id, bool hasChangedSoftHard) final
fired after an evidence is changed, in particular when its status (soft/hard) changes ...
FindBarrenNodesType __barren_nodes_type
the type of barren nodes computation we wish
void __setProjectionFunction(Potential< GUM_SCALAR > *(*proj)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &))
sets the operator for performing the projections
virtual void clear()
removes all the nodes and edges from the graph
Definition: undiGraph_inl.h:43
virtual void addNodeWithId(const NodeId id)
try to insert a node with the given id
const NodeProperty< const Potential< GUM_SCALAR > *> & evidence() const
returns the set of evidence
NodeProperty< GUM_SCALAR > __constants
the constants resulting from the projections of CPTs defined over only hard evidence nodes remove th...
JunctionTree * __junctionTree
the junction tree to answer the last inference query
void __collectMessage(NodeId id, NodeId from)
actually perform the collect phase
void _onEvidenceErased(const NodeId id, bool isHardEvidence) final
fired before an evidence is removed
__PotentialSet __marginalizeOut(__PotentialSet pot_list, Set< const DiscreteVariable * > &del_vars, Set< const DiscreteVariable * > &kept_vars)
removes variables del_vars from a list of potentials and returns the resulting list ...
void _onMarginalTargetErased(const NodeId id) final
fired before a single target is removed
void _onMarginalTargetAdded(const NodeId id) final
fired after a new single target is inserted
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
ArcProperty< bool > __messages_computed
indicates whether a message (from one clique to another) has been computed
Set< const Potential< GUM_SCALAR > *> __PotentialSet
void __setCombinationFunction(Potential< GUM_SCALAR > *(*comb)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &))
sets the operator for performing the combinations
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
const GUM_SCALAR __1_minus_epsilon
for comparisons with 1 - epsilon
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
Potential< GUM_SCALAR > * _unnormalizedJointPosterior(NodeId id) final
returns a fresh potential equal to P(argument,evidence)
void _onAllMarginalTargetsAdded() final
fired after all the nodes of the BN are added as single targets
NodeSet __roots
a clique node used as a root in each connected component of __JT
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
void erase(const Key &k)
Erases an element from the set.
Definition: set_tpl.h:656
bool exists(const NodeId id) const
alias for existsNode
GUM_SCALAR evidenceProbability()
returns the probability of evidence
void _makeInference() final
called when the inference has to be performed effectively
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
Definition: agrum.h:25
HashTable< NodeSet, const Potential< GUM_SCALAR > *> __joint_target_posteriors
the set of set target posteriors computed during the last inference
void _updateOutdatedBNPotentials() final
prepares inference when the latter is in OutdatedBNPotentials state
UndiGraph __graph
the undigraph extracted from the BN and used to construct the join tree
virtual void eraseNode(const NodeId id)
remove a node and its adjacent edges from the graph
Definition: undiGraph_inl.h:58
virtual void makeInference() final
perform the heavy computations needed to compute the targets&#39; posteriors
ShaferShenoyInference(const IBayesNet< GUM_SCALAR > *BN, FindBarrenNodesType barren_type=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
NodeProperty< EvidenceChangeType > __evidence_changes
indicates which nodes of the BN have evidence that changed since the last inference ...
FindBarrenNodesType
type of algorithm to determine barren nodes
NodeProperty< const Potential< GUM_SCALAR > *> __node_to_soft_evidence
the soft evidence stored in the cliques per their assigned node in the BN
const NodeSet & softEvidenceNodes() const
returns the set of nodes with soft evidence
HashTable< NodeId, NodeId > __node_to_clique
for each node of __graph (~ in the Bayes net), associate an ID in the JT
virtual const NodeProperty< Size > & domainSizes() const final
get the domain sizes of the random variables of the BN
NodeProperty< const Potential< GUM_SCALAR > *> __target_posteriors
the set of single posteriors computed during the last inference
NodeProperty< const Potential< GUM_SCALAR > *> __hard_ev_projected_CPTs
the CPTs that were projected due to hard evidence nodes
virtual NodeId createdJunctionTreeClique(const NodeId id)=0
returns the Id of the clique created by the elimination of a given node during the triangulation proc...
void _onJointTargetAdded(const NodeSet &set) final
fired after a new joint target is inserted
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
bool __isNewJTNeeded() const
check whether a new join tree is really needed for the next inference
void _updateOutdatedBNStructure() final
prepares inference when the latter is in OutdatedBNStructure state
const NodeProperty< Idx > & hardEvidence() const
indicate for each node with hard evidence which value it took
virtual bool isDone() const noexcept final
returns whether the inference object is in a done state
void setTriangulation(const Triangulation &new_triangulation)
use a new triangulation algorithm
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
Definition: cliqueGraph.h:306
const Potential< GUM_SCALAR > & _jointPosterior(const NodeSet &set) final
returns the posterior of a declared target set
virtual const Set< NodeSet > & jointTargets() const noexcept final
returns the list of joint targets
void _onAllTargetsErased() final
fired before a all single and joint_targets are removed
virtual void _onBayesNetChanged(const IBayesNet< GUM_SCALAR > *bn) final
fired after a new Bayes net has been assigned to the engine
bool __is_new_jt_needed
indicates whether a new join tree is needed for the next inference
static INLINE Potential< GUM_SCALAR > * SSNewprojPotential(const Potential< GUM_SCALAR > &t1, const Set< const DiscreteVariable * > &del_vars)
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
void setFindBarrenNodesType(FindBarrenNodesType type)
sets how we determine barren nodes
void _onJointTargetErased(const NodeSet &set) final
fired before a joint target is removed
HashTable< NodeSet, NodeId > __joint_target_to_clique
for each set target, assign a clique in the JT that contains it
void _setOutdatedBNStructureState()
put the inference into an outdated BN structure state
~ShaferShenoyInference() final
destructor
const NodeSet & clique(const NodeId idClique) const
returns the set of nodes included into a given clique
ArcProperty< __PotentialSet > __created_potentials
the set of potentials created for the last inference messages
const NodeSet & hardEvidenceNodes() const
returns the set of nodes with hard evidence
Triangulation * __triangulation
the triangulation class creating the junction tree used for inference
void __createNewJT()
create a new junction tree as well as its related data structures
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
Definition: cliqueGraph.h:302
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
void clear()
Removes all the elements in the hash table.
void __diffuseMessageInvalidations(NodeId from, NodeId to, NodeSet &cliques_invalidated)
invalidate all the messages sent from a given clique
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
__PotentialSet __removeBarrenVariables(__PotentialSet &pot_list, Set< const DiscreteVariable * > &del_vars)
JoinTree * __JT
the join (or junction) tree used to answer the last inference query
void __computeJoinTreeRoots()
compute a root for each connected component of __JT
virtual bool isInferenceReady() const noexcept final
returns whether the inference object is in a ready state
void _onEvidenceAdded(const NodeId id, bool isHardEvidence) final
fired after a new evidence is inserted
virtual Triangulation * newFactory() const =0
returns a fresh triangulation of the same type as the current object but with an empty graph ...
virtual const NodeSet & targets() const noexcept final
returns the list of marginal targets
NodeSet __hard_ev_nodes
the hard evidence nodes which were projected in CPTs
bool __use_binary_join_tree
indicates whether we should transform junction trees into binary join trees
void clear()
Removes all the elements, if any, from the set.
Definition: set_tpl.h:375
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:48
NodeProperty< __PotentialSet > __clique_potentials
the list of all potentials stored in the cliques
Potential< GUM_SCALAR > *(* __combination_op)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &)
the operator for performing the combinations
const Potential< GUM_SCALAR > & _posterior(NodeId id) final
returns the posterior of a given variable
virtual const CliqueGraph & junctionTree()=0
returns a compatible junction tree
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
const JoinTree * joinTree()
returns the current join tree used
virtual const std::vector< NodeId > & eliminationOrder()=0
returns an elimination ordering compatible with the triangulated graph
void _onAllEvidenceErased(bool contains_hard_evidence) final
fired before all the evidence are erased
void __invalidateAllMessages()
invalidate all messages, posteriors and created potentials
virtual const IBayesNet< GUM_SCALAR > & BN() const final
Returns a constant reference over the IBayesNet referenced by this class.
void _onAllMarginalTargetsErased() final
fired before a all the single targets are removed
Potential< GUM_SCALAR > *(* __projection_op)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &)
the operator for performing the projections
Size NodeId
Type for node ids.
Definition: graphElements.h:98
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
void __produceMessage(NodeId from_id, NodeId to_id)
creates the message sent by clique from_id to clique to_id
#define GUM_ERROR(type, msg)
Definition: exceptions.h:55
virtual void setGraph(const UndiGraph *graph, const NodeProperty< Size > *domsizes)=0
initialize the triangulation data structures for a new graph
void _setOutdatedBNPotentialsState()
puts the inference into an OutdatedBNPotentials state if it is not already in an OutdatedBNStructure ...
ArcProperty< __PotentialSet > __separator_potentials
the list of all potentials stored in the separators after inferences
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
const JunctionTree * junctionTree()
returns the current junction tree