aGrUM  0.14.2
lazyPropagation_tpl.h
Go to the documentation of this file.
1 /***************************************************************************
2  * Copyright (C) 2005 by Christophe GONZALES et Pierre-Henri WUILLEMIN *
3  * {prenom.nom}_at_lip6.fr *
4  * *
5  * This program is free software; you can redistribute it and/or modify *
6  * it under the terms of the GNU General Public License as published by *
7  * the Free Software Foundation; either version 2 of the License, or *
8  * (at your option) any later version. *
9  * *
10  * This program is distributed in the hope that it will be useful, *
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13  * GNU General Public License for more details. *
14  * *
15  * You should have received a copy of the GNU General Public License *
16  * along with this program; if not, write to the *
17  * Free Software Foundation, Inc., *
18  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19  ***************************************************************************/
28 #ifndef DOXYGEN_SHOULD_SKIP_THIS
29 # include <algorithm>
30 
38 
39 
40 namespace gum {
41  // default constructor
42  template < typename GUM_SCALAR >
44  const IBayesNet< GUM_SCALAR >* BN,
45  RelevantPotentialsFinderType relevant_type,
46  FindBarrenNodesType barren_type,
47  bool use_binary_join_tree) :
48  JointTargetedInference< GUM_SCALAR >(BN),
49  EvidenceInference< GUM_SCALAR >(BN),
50  __use_binary_join_tree(use_binary_join_tree) {
51  // sets the relevant potential and the barren nodes finding algorithm
52  setRelevantPotentialsFinderType(relevant_type);
53  setFindBarrenNodesType(barren_type);
54 
55  // create a default triangulation (the user can change it afterwards)
56  __triangulation = new DefaultTriangulation;
57 
58  // for debugging purposessetRequiredInference
59  GUM_CONSTRUCTOR(LazyPropagation);
60  }
61 
62 
63  // destructor
64  template < typename GUM_SCALAR >
66  // remove all the potentials created during the last message passing
67  for (const auto& pots : __created_potentials)
68  for (const auto pot : pots.second)
69  delete pot;
70 
71  // remove the potentials created after removing the nodes that received
72  // hard evidence
73  for (const auto& pot : __hard_ev_projected_CPTs)
74  delete pot.second;
75 
76  // remove all the posteriors computed
77  for (const auto& pot : __target_posteriors)
78  delete pot.second;
79  for (const auto& pot : __joint_target_posteriors)
80  delete pot.second;
81 
82  // remove the junction tree and the triangulation algorithm
83  if (__JT != nullptr) delete __JT;
84  if (__junctionTree != nullptr) delete __junctionTree;
85  delete __triangulation;
86 
87  // for debugging purposes
88  GUM_DESTRUCTOR(LazyPropagation);
89  }
90 
91 
93  template < typename GUM_SCALAR >
95  const Triangulation& new_triangulation) {
96  delete __triangulation;
97  __triangulation = new_triangulation.newFactory();
98  __is_new_jt_needed = true;
100  }
101 
102 
104  template < typename GUM_SCALAR >
106  __createNewJT();
107 
108  return __JT;
109  }
110 
112  template < typename GUM_SCALAR >
114  __createNewJT();
115 
116  return __junctionTree;
117  }
118 
119 
121  template < typename GUM_SCALAR >
124  if (type != __find_relevant_potential_type) {
125  switch (type) {
129  break;
130 
134  break;
135 
139  break;
140 
144  break;
145 
146  default:
147  GUM_ERROR(InvalidArgument,
148  "setRelevantPotentialsFinderType for type "
149  << (unsigned int)type << " is not implemented yet");
150  }
151 
153 
154  // indicate that all messages need be reconstructed to take into account
155  // the change in d-separation analysis
157  }
158  }
159 
160 
162  template < typename GUM_SCALAR >
164  Potential< GUM_SCALAR >* (*proj)(const Potential< GUM_SCALAR >&,
165  const Set< const DiscreteVariable* >&)) {
166  __projection_op = proj;
167  }
168 
169 
171  template < typename GUM_SCALAR >
173  Potential< GUM_SCALAR >* (*comb)(const Potential< GUM_SCALAR >&,
174  const Potential< GUM_SCALAR >&)) {
175  __combination_op = comb;
176  }
177 
178 
180  template < typename GUM_SCALAR >
182  // remove all the messages computed
183  for (auto& potset : __separator_potentials)
184  potset.second.clear();
185  for (auto& mess_computed : __messages_computed)
186  mess_computed.second = false;
187 
188  // remove all the created potentials
189  for (const auto& potset : __created_potentials)
190  for (const auto pot : potset.second)
191  delete pot;
192 
193  // remove all the posteriors
194  for (const auto& pot : __target_posteriors)
195  delete pot.second;
196  for (const auto& pot : __joint_target_posteriors)
197  delete pot.second;
198 
199  // indicate that new messages need be computed
200  if (this->isInferenceReady() || this->isDone())
202  }
203 
204 
206  template < typename GUM_SCALAR >
208  FindBarrenNodesType type) {
209  if (type != __barren_nodes_type) {
210  // WARNING: if a new type is added here, method __createJT should
211  // certainly
212  // be updated as well, in particular its step 2.
213  switch (type) {
216 
217  default:
218  GUM_ERROR(InvalidArgument,
219  "setFindBarrenNodesType for type "
220  << (unsigned int)type << " is not implemented yet");
221  }
222 
223  __barren_nodes_type = type;
224 
225  // potentially, we may need to reconstruct a junction tree
227  }
228  }
229 
230 
232  template < typename GUM_SCALAR >
233  INLINE void
235  bool isHardEvidence) {
236  // if we have a new hard evidence, this modifies the undigraph over which
237  // the join tree is created. This is also the case if id is not a node of
238  // of the undigraph
239  if (isHardEvidence || !__graph.exists(id))
240  __is_new_jt_needed = true;
241  else {
242  try {
243  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ADDED);
244  } catch (DuplicateElement&) {
245  // here, the evidence change already existed. This necessarily means
246  // that the current saved change is an EVIDENCE_ERASED. So if we
247  // erased the evidence and added some again, this corresponds to an
248  // EVIDENCE_MODIFIED
249  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
250  }
251  }
252  }
253 
254 
256  template < typename GUM_SCALAR >
257  INLINE void
259  bool isHardEvidence) {
260  // if we delete a hard evidence, this modifies the undigraph over which
261  // the join tree is created.
262  if (isHardEvidence)
263  __is_new_jt_needed = true;
264  else {
265  try {
266  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ERASED);
267  } catch (DuplicateElement&) {
268  // here, the evidence change already existed and it is necessarily an
269  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
270  // been added and is now erased, this is similar to not having created
271  // it. If the evidence was only modified, it already existed in the
272  // last inference and we should now indicate that it has been removed.
273  if (__evidence_changes[id] == EvidenceChangeType::EVIDENCE_ADDED)
274  __evidence_changes.erase(id);
275  else
276  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_ERASED;
277  }
278  }
279  }
280 
281 
283  template < typename GUM_SCALAR >
284  void
286  if (has_hard_evidence || !this->hardEvidenceNodes().empty())
287  __is_new_jt_needed = true;
288  else {
289  for (const auto node : this->softEvidenceNodes()) {
290  try {
291  __evidence_changes.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
292  } catch (DuplicateElement&) {
293  // here, the evidence change already existed and it is necessarily an
294  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
295  // been added and is now erased, this is similar to not having created
296  // it. If the evidence was only modified, it already existed in the
297  // last inference and we should now indicate that it has been removed.
298  if (__evidence_changes[node] == EvidenceChangeType::EVIDENCE_ADDED)
299  __evidence_changes.erase(node);
300  else
301  __evidence_changes[node] = EvidenceChangeType::EVIDENCE_ERASED;
302  }
303  }
304  }
305  }
306 
307 
309  template < typename GUM_SCALAR >
310  INLINE void
312  bool hasChangedSoftHard) {
313  if (hasChangedSoftHard)
314  __is_new_jt_needed = true;
315  else {
316  try {
317  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_MODIFIED);
318  } catch (DuplicateElement&) {
319  // here, the evidence change already existed and it is necessarily an
320  // EVIDENCE_ADDED. So we should keep this state to indicate that this
321  // evidence is new w.r.t. the last inference
322  }
323  }
324  }
325 
326 
328  template < typename GUM_SCALAR >
330  const IBayesNet< GUM_SCALAR >* bn) {}
331 
332 
334  template < typename GUM_SCALAR >
335  INLINE void
337 
338 
340  template < typename GUM_SCALAR >
341  INLINE void
343 
344 
346  template < typename GUM_SCALAR >
347  INLINE void
349 
350 
352  template < typename GUM_SCALAR >
353  INLINE void
355 
356 
358  template < typename GUM_SCALAR >
360 
361 
363  template < typename GUM_SCALAR >
365 
366 
368  template < typename GUM_SCALAR >
370 
371 
373  template < typename GUM_SCALAR >
375 
376 
377  // check whether a new junction tree is really needed for the next inference
378  template < typename GUM_SCALAR >
380  // if we do not have a JT or if __new_jt_needed is set to true, then
381  // we know that we need to create a new join tree
382  if ((__JT == nullptr) || __is_new_jt_needed) return true;
383 
384  // if some some targets do not belong to the join tree and, consequently,
385  // to the undigraph that was used to construct the join tree, then we need
386  // to create a new JT. This situation may occur if we constructed the
387  // join tree after pruning irrelevant/barren nodes from the BN)
388  // however, note that the nodes that received hard evidence do not belong to
389  // the graph and, therefore, should not be taken into account
390  const auto& hard_ev_nodes = this->hardEvidenceNodes();
391  for (const auto node : this->targets()) {
392  if (!__graph.exists(node) && !hard_ev_nodes.exists(node)) return true;
393  }
394  for (const auto& nodes : this->jointTargets()) {
395  // here, we need to check that at least one clique contains all the nodes.
396  bool containing_clique_found = false;
397  for (const auto node : nodes) {
398  bool found = true;
399  const NodeSet& clique = __JT->clique(__node_to_clique[node]);
400  for (const auto xnode : nodes) {
401  if (!clique.contains(xnode) && !hard_ev_nodes.exists(xnode)) {
402  found = false;
403  break;
404  }
405  }
406  if (found) {
407  containing_clique_found = true;
408  break;
409  }
410  }
411 
412  if (!containing_clique_found) return true;
413  }
414 
415  // if some new evidence have been added on nodes that do not belong
416  // to __graph, then we potentially have to reconstruct the join tree
417  for (const auto& change : __evidence_changes) {
418  if ((change.second == EvidenceChangeType::EVIDENCE_ADDED)
419  && !__graph.exists(change.first))
420  return true;
421  }
422 
423  // here, the current JT is exactly what we need for the next inference
424  return false;
425  }
426 
427 
429  template < typename GUM_SCALAR >
431  // to create the JT, we first create the moral graph of the BN in the
432  // following way in order to take into account the barren nodes and the
433  // nodes that received evidence:
434  // 1/ we create an undirected graph containing only the nodes and no edge
435  // 2/ if we take into account barren nodes, remove them from the graph
436  // 3/ add edges so that each node and its parents in the BN form a clique
437  // 4/ add edges so that set targets are cliques of the moral graph
438  // 5/ remove the nodes that received hard evidence (by step 3/, their
439  // parents are linked by edges, which is necessary for inference)
440  //
441  // At the end of step 5/, we have our moral graph and we can triangulate it
442  // to get the new junction tree
443 
444  // 1/ create an undirected graph containing only the nodes and no edge
445  const auto& bn = this->BN();
446  __graph.clear();
447  for (const auto node : bn.dag())
448  __graph.addNodeWithId(node);
449 
450  // 2/ if we wish to exploit barren nodes, we shall remove them from the BN
451  // to do so: we identify all the nodes that are not targets and have
452  // received
453  // no evidence and such that their descendants are neither targets nor
454  // evidence nodes. Such nodes can be safely discarded from the BN without
455  // altering the inference output
457  // identify the barren nodes
458  NodeSet target_nodes = this->targets();
459  for (const auto& nodeset : this->jointTargets()) {
460  target_nodes += nodeset;
461  }
462 
463  // check that all the nodes are not targets, otherwise, there is no
464  // barren node
465  if (target_nodes.size() != bn.size()) {
466  BarrenNodesFinder finder(&(bn.dag()));
467  finder.setTargets(&target_nodes);
468 
469  NodeSet evidence_nodes;
470  for (const auto& pair : this->evidence()) {
471  evidence_nodes.insert(pair.first);
472  }
473  finder.setEvidence(&evidence_nodes);
474 
475  NodeSet barren_nodes = finder.barrenNodes();
476 
477  // remove the barren nodes from the moral graph
478  for (const auto node : barren_nodes) {
479  __graph.eraseNode(node);
480  }
481  }
482  }
483 
484  // 3/ add edges so that each node and its parents in the BN form a clique
485  for (const auto node : __graph) {
486  const NodeSet& parents = bn.parents(node);
487  for (auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
488  __graph.addEdge(*iter1, node);
489  auto iter2 = iter1;
490  for (++iter2; iter2 != parents.cend(); ++iter2) {
491  __graph.addEdge(*iter1, *iter2);
492  }
493  }
494  }
495 
496  // 4/ if there exist some joint targets, we shall add new edges
497  // into the moral graph in order to ensure that there exists a clique
498  // containing each joint
499  for (const auto& nodeset : this->jointTargets()) {
500  for (auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
501  auto iter2 = iter1;
502  for (++iter2; iter2 != nodeset.cend(); ++iter2) {
503  __graph.addEdge(*iter1, *iter2);
504  }
505  }
506  }
507 
508  // 5/ remove all the nodes that received hard evidence
510  for (const auto node : __hard_ev_nodes) {
511  __graph.eraseNode(node);
512  }
513 
514 
515  // now, we can compute the new junction tree. To speed-up computations
516  // (essentially, those of a distribution phase), we construct from this
517  // junction tree a binary join tree
518  if (__JT != nullptr) delete __JT;
519  if (__junctionTree != nullptr) delete __junctionTree;
520 
521  __triangulation->setGraph(&__graph, &(this->domainSizes()));
522  const JunctionTree& triang_jt = __triangulation->junctionTree();
524  BinaryJoinTreeConverterDefault bjt_converter;
525  NodeSet emptyset;
526  __JT = new CliqueGraph(
527  bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
528  } else {
529  __JT = new CliqueGraph(triang_jt);
530  }
531  __junctionTree = new CliqueGraph(triang_jt);
532 
533 
534  // indicate, for each node of the moral graph a clique in __JT that can
535  // contain its conditional probability table
537  const std::vector< NodeId >& JT_elim_order =
539  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
540  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
541  ++i)
542  elim_order.insert(JT_elim_order[i], (int)i);
543  const DAG& dag = bn.dag();
544  for (const auto node : __graph) {
545  // get the variables in the potential of node (and its parents)
546  NodeId first_eliminated_node = node;
547  int elim_number = elim_order[first_eliminated_node];
548 
549  for (const auto parent : dag.parents(node)) {
550  if (__graph.existsNode(parent) && (elim_order[parent] < elim_number)) {
551  elim_number = elim_order[parent];
552  first_eliminated_node = parent;
553  }
554  }
555 
556  // first_eliminated_node contains the first var (node or one of its
557  // parents) eliminated => the clique created during its elimination
558  // contains node and all of its parents => it can contain the potential
559  // assigned to the node in the BN
561  node, __triangulation->createdJunctionTreeClique(first_eliminated_node));
562  }
563 
564  // do the same for the nodes that received evidence. Here, we only store
565  // the nodes for which at least one parent belongs to __graph (otherwise
566  // their CPT is just a constant real number).
567  for (const auto node : __hard_ev_nodes) {
568  // get the set of parents of the node that belong to __graph
569  NodeSet pars(dag.parents(node).size());
570  for (const auto par : dag.parents(node))
571  if (__graph.exists(par)) pars.insert(par);
572 
573  if (!pars.empty()) {
574  NodeId first_eliminated_node = *(pars.begin());
575  int elim_number = elim_order[first_eliminated_node];
576 
577  for (const auto parent : pars) {
578  if (elim_order[parent] < elim_number) {
579  elim_number = elim_order[parent];
580  first_eliminated_node = parent;
581  }
582  }
583 
584  // first_eliminated_node contains the first var (node or one of its
585  // parents) eliminated => the clique created during its elimination
586  // contains node and all of its parents => it can contain the potential
587  // assigned to the node in the BN
589  node,
590  __triangulation->createdJunctionTreeClique(first_eliminated_node));
591  }
592  }
593 
594  // indicate for each joint_target a clique that contains it
595  __joint_target_to_clique.clear();
596  for (const auto& set : this->jointTargets()) {
597  // remove from set all the nodes that received hard evidence (since they
598  // do not belong to the join tree)
599  NodeSet nodeset = set;
600  for (const auto node : __hard_ev_nodes)
601  if (nodeset.contains(node)) nodeset.erase(node);
602 
603  if (!nodeset.empty()) {
604  // the clique we are looking for is the one that was created when
605  // the first element of nodeset was eliminated
606  NodeId first_eliminated_node = *(nodeset.begin());
607  int elim_number = elim_order[first_eliminated_node];
608  for (const auto node : nodeset) {
609  if (elim_order[node] < elim_number) {
610  elim_number = elim_order[node];
611  first_eliminated_node = node;
612  }
613  }
614 
616  set, __triangulation->createdJunctionTreeClique(first_eliminated_node));
617  }
618  }
619 
620 
621  // compute the roots of __JT's connected components
623 
624  // create empty potential lists into the cliques of the joint tree as well
625  // as empty lists of evidence
626  __PotentialSet empty_set;
627  __clique_potentials.clear();
628  __node_to_soft_evidence.clear();
629  for (const auto node : *__JT) {
630  __clique_potentials.insert(node, empty_set);
631  }
632 
633  // remove all the potentials created during the last inference
634  for (const auto& potlist : __created_potentials)
635  for (const auto pot : potlist.second)
636  delete pot;
637  __created_potentials.clear();
638 
639  // remove all the potentials created to take into account hard evidence
640  // during the last inference
641  for (const auto pot_pair : __hard_ev_projected_CPTs)
642  delete pot_pair.second;
643  __hard_ev_projected_CPTs.clear();
644 
645  // remove all the constants created due to projections of CPTs that were
646  // defined over only hard evidence nodes
647  __constants.clear();
648 
649  // create empty lists of potentials for the messages and indicate that no
650  // message has been computed yet
651  __separator_potentials.clear();
652  __messages_computed.clear();
653  for (const auto& edge : __JT->edges()) {
654  const Arc arc1(edge.first(), edge.second());
655  __separator_potentials.insert(arc1, empty_set);
656  __messages_computed.insert(arc1, false);
657  const Arc arc2(Arc(edge.second(), edge.first()));
658  __separator_potentials.insert(arc2, empty_set);
659  __messages_computed.insert(arc2, false);
660  }
661 
662  // remove all the posteriors computed so far
663  for (const auto& pot : __target_posteriors)
664  delete pot.second;
665  __target_posteriors.clear();
666  for (const auto& pot : __joint_target_posteriors)
667  delete pot.second;
668  __joint_target_posteriors.clear();
669 
670 
671  // put all the CPT's of the Bayes net nodes into the cliques
672  // here, beware: all the potentials that are defined over some nodes
673  // including hard evidence must be projected so that these nodes are
674  // removed from the potential
675  const auto& evidence = this->evidence();
676  const auto& hard_evidence = this->hardEvidence();
677  for (const auto node : dag) {
678  if (__graph.exists(node) || __hard_ev_nodes.contains(node)) {
679  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
680 
681  // get the list of nodes with hard evidence in cpt
682  NodeSet hard_nodes;
683  const auto& variables = cpt.variablesSequence();
684  for (const auto var : variables) {
685  NodeId xnode = bn.nodeId(*var);
686  if (__hard_ev_nodes.contains(xnode)) hard_nodes.insert(xnode);
687  }
688 
689  // if hard_nodes contains hard evidence nodes, perform a projection
690  // and insert the result into the appropriate clique, else insert
691  // directly cpt into the clique
692  if (hard_nodes.empty()) {
693  __clique_potentials[__node_to_clique[node]].insert(&cpt);
694  } else {
695  // marginalize out the hard evidence nodes: if the cpt is defined
696  // only over nodes that received hard evidence, do not consider it
697  // as a potential anymore but as a constant
698  if (hard_nodes.size() == variables.size()) {
699  Instantiation inst;
700  const auto& vars = cpt.variablesSequence();
701  for (const auto var : vars)
702  inst << *var;
703  for (Size i = 0; i < hard_nodes.size(); ++i) {
704  inst.chgVal(variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
705  }
706  __constants.insert(node, cpt.get(inst));
707  } else {
708  // perform the projection with a combine and project instance
709  Set< const DiscreteVariable* > hard_variables;
710  __PotentialSet marg_cpt_set{&cpt};
711  for (const auto xnode : hard_nodes) {
712  marg_cpt_set.insert(evidence[xnode]);
713  hard_variables.insert(&(bn.variable(xnode)));
714  }
715 
716  // perform the combination of those potentials and their projection
717  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
718  combine_and_project(__combination_op, LPNewprojPotential);
719  __PotentialSet new_cpt_list =
720  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
721 
722  // there should be only one potential in new_cpt_list
723  if (new_cpt_list.size() != 1) {
724  // remove the CPT created to avoid memory leaks
725  for (const auto pot : new_cpt_list) {
726  if (!marg_cpt_set.contains(pot)) delete pot;
727  }
728  GUM_ERROR(FatalError,
729  "the projection of a potential containing "
730  << "hard evidence is empty!");
731  }
732  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
733  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
734  __hard_ev_projected_CPTs.insert(node, projected_cpt);
735  }
736  }
737  }
738  }
739 
740  // we shall now add all the potentials of the soft evidence
741  for (const auto node : this->softEvidenceNodes()) {
742  __node_to_soft_evidence.insert(node, evidence[node]);
743  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
744  }
745 
746  // indicate that the data structures are up to date.
747  __evidence_changes.clear();
748  __is_new_jt_needed = false;
749  }
750 
751 
753  template < typename GUM_SCALAR >
755  // check if a new JT is really needed. If so, create it
756  if (__isNewJTNeeded()) {
757  __createNewJT();
758  } else {
759  // here, we can answer the next queries without reconstructing all the
760  // junction tree. All we need to do is to indicate that we should
761  // update the potentials and messages for these queries
763  }
764  }
765 
766 
768  template < typename GUM_SCALAR >
770  NodeId from_id, NodeId to_id, NodeSet& invalidated_cliques) {
771  // invalidate the current clique
772  invalidated_cliques.insert(to_id);
773 
774  // invalidate the current arc
775  const Arc arc(from_id, to_id);
776  bool& message_computed = __messages_computed[arc];
777  if (message_computed) {
778  message_computed = false;
779  __separator_potentials[arc].clear();
780  if (__created_potentials.exists(arc)) {
781  auto& arc_created_potentials = __created_potentials[arc];
782  for (const auto pot : arc_created_potentials)
783  delete pot;
784  arc_created_potentials.clear();
785  }
786 
787  // go on with the diffusion
788  for (const auto node_id : __JT->neighbours(to_id)) {
789  if (node_id != from_id)
790  __diffuseMessageInvalidations(to_id, node_id, invalidated_cliques);
791  }
792  }
793  }
794 
795 
798  template < typename GUM_SCALAR >
800  // compute the set of CPTs that were projected due to hard evidence and
801  // whose hard evidence have changed, so that they need a new projection.
802  // By the way, remove these CPTs since they are no more needed
803  // Here only the values of the hard evidence can have changed (else a
804  // fully new join tree would have been computed).
805  // Note also that we know that the CPTs still contain some variable(s) after
806  // the projection (else they should be constants)
807  NodeSet hard_nodes_changed(__hard_ev_nodes.size());
808  for (const auto node : __hard_ev_nodes)
809  if (__evidence_changes.exists(node)) hard_nodes_changed.insert(node);
810 
811  NodeSet nodes_with_projected_CPTs_changed;
812  const auto& bn = this->BN();
813  for (auto pot_iter = __hard_ev_projected_CPTs.beginSafe();
814  pot_iter != __hard_ev_projected_CPTs.endSafe();
815  ++pot_iter) {
816  for (const auto var : bn.cpt(pot_iter.key()).variablesSequence()) {
817  if (hard_nodes_changed.contains(bn.nodeId(*var))) {
818  nodes_with_projected_CPTs_changed.insert(pot_iter.key());
819  delete pot_iter.val();
820  __clique_potentials[__node_to_clique[pot_iter.key()]].erase(
821  pot_iter.val());
822  __hard_ev_projected_CPTs.erase(pot_iter);
823  break;
824  }
825  }
826  }
827 
828 
829  // invalidate all the messages that are no more correct: start from each of
830  // the nodes whose soft evidence has changed and perform a diffusion from
831  // the clique into which the soft evidence has been entered, indicating that
832  // the messages spreading from this clique are now invalid. At the same time,
833  // if there were potentials created on the arcs over which the messages were
834  // sent, remove them from memory. For all the cliques that received some
835  // projected CPT that should now be changed, do the same.
836  NodeSet invalidated_cliques(__JT->size());
837  for (const auto& pair : __evidence_changes) {
838  if (__node_to_clique.exists(pair.first)) {
839  const auto clique = __node_to_clique[pair.first];
840  invalidated_cliques.insert(clique);
841  for (const auto neighbor : __JT->neighbours(clique)) {
842  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
843  }
844  }
845  }
846 
847  // now, add to the set of invalidated cliques those that contain projected
848  // CPTs that were changed.
849  for (const auto node : nodes_with_projected_CPTs_changed) {
850  const auto clique = __node_to_clique[node];
851  invalidated_cliques.insert(clique);
852  for (const auto neighbor : __JT->neighbours(clique)) {
853  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
854  }
855  }
856 
857 
858  // now we shall remove all the posteriors that belong to the
859  // invalidated cliques. First, cope only with the nodes that did not
860  // received hard evidence since the other nodes do not belong to the
861  // join tree
862  for (auto iter = __target_posteriors.beginSafe();
863  iter != __target_posteriors.endSafe();
864  ++iter) {
865  if (__graph.exists(iter.key())
866  && (invalidated_cliques.exists(__node_to_clique[iter.key()]))) {
867  delete iter.val();
868  __target_posteriors.erase(iter);
869  }
870  }
871 
872  // now cope with the nodes that received hard evidence
873  for (auto iter = __target_posteriors.beginSafe();
874  iter != __target_posteriors.endSafe();
875  ++iter) {
876  if (hard_nodes_changed.contains(iter.key())) {
877  delete iter.val();
878  __target_posteriors.erase(iter);
879  }
880  }
881 
882  // finally, cope with joint targets
883  for (auto iter = __joint_target_posteriors.beginSafe();
884  iter != __joint_target_posteriors.endSafe();
885  ++iter) {
886  if (invalidated_cliques.exists(__joint_target_to_clique[iter.key()])) {
887  delete iter.val();
888  __joint_target_posteriors.erase(iter);
889  }
890  }
891 
892 
893  // remove all the evidence that were entered into __node_to_soft_evidence
894  // and __clique_potentials and add the new soft ones
895  for (const auto& pot_pair : __node_to_soft_evidence) {
896  __clique_potentials[__node_to_clique[pot_pair.first]].erase(pot_pair.second);
897  }
898  __node_to_soft_evidence.clear();
899 
900  const auto& evidence = this->evidence();
901  for (const auto node : this->softEvidenceNodes()) {
902  __node_to_soft_evidence.insert(node, evidence[node]);
903  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
904  }
905 
906 
907  // Now add the projections of the CPTs due to newly changed hard evidence:
908  // if we are performing _updateOutdatedBNPotentials, this means that the
909  // set of nodes that received hard evidence has not changed, only
910  // their instantiations can have changed. So, if there is an entry
911  // for node in __constants, there will still be such an entry after
912  // performing the new projections. Idem for __hard_ev_projected_CPTs
913  for (const auto node : nodes_with_projected_CPTs_changed) {
914  // perform the projection with a combine and project instance
915  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
916  const auto& variables = cpt.variablesSequence();
917  Set< const DiscreteVariable* > hard_variables;
918  __PotentialSet marg_cpt_set{&cpt};
919  for (const auto var : variables) {
920  NodeId xnode = bn.nodeId(*var);
921  if (__hard_ev_nodes.exists(xnode)) {
922  marg_cpt_set.insert(evidence[xnode]);
923  hard_variables.insert(var);
924  }
925  }
926 
927  // perform the combination of those potentials and their projection
928  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
929  combine_and_project(__combination_op, LPNewprojPotential);
930  __PotentialSet new_cpt_list =
931  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
932 
933  // there should be only one potential in new_cpt_list
934  if (new_cpt_list.size() != 1) {
935  // remove the CPT created to avoid memory leaks
936  for (const auto pot : new_cpt_list) {
937  if (!marg_cpt_set.contains(pot)) delete pot;
938  }
939  GUM_ERROR(FatalError,
940  "the projection of a potential containing "
941  << "hard evidence is empty!");
942  }
943  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
944  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
945  __hard_ev_projected_CPTs.insert(node, projected_cpt);
946  }
947 
948  // update the constants
949  const auto& hard_evidence = this->hardEvidence();
950  for (auto& node_cst : __constants) {
951  const Potential< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
952  const auto& variables = cpt.variablesSequence();
953  Instantiation inst;
954  for (const auto var : variables)
955  inst << *var;
956  for (const auto var : variables) {
957  inst.chgVal(var, hard_evidence[bn.nodeId(*var)]);
958  }
959  node_cst.second = cpt.get(inst);
960  }
961 
962  // indicate that all changes have been performed
963  __evidence_changes.clear();
964  }
965 
966 
968  template < typename GUM_SCALAR >
970  // get the set of cliques in which we can find the targets and joint_targets
971  NodeSet clique_targets;
972  for (const auto node : this->targets()) {
973  try {
974  clique_targets.insert(__node_to_clique[node]);
975  } catch (Exception&) {}
976  }
977  for (const auto& set : this->jointTargets()) {
978  try {
979  clique_targets.insert(__joint_target_to_clique[set]);
980  } catch (Exception&) {}
981  }
982 
983  // put in a vector these cliques and their size
984  std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
985  const auto& bn = this->BN();
986  std::size_t i = 0;
987  for (const auto clique_id : clique_targets) {
988  const auto& clique = __JT->clique(clique_id);
989  Size dom_size = 1;
990  for (const auto node : clique) {
991  dom_size *= bn.variable(node).domainSize();
992  }
993  possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
994  ++i;
995  }
996 
997  // sort the cliques by increasing domain size
998  std::sort(possible_roots.begin(),
999  possible_roots.end(),
1000  [](const std::pair< NodeId, Size >& a,
1001  const std::pair< NodeId, Size >& b) -> bool {
1002  return a.second < b.second;
1003  });
1004 
1005  // pick up the clique with the smallest size in each connected component
1006  NodeProperty< bool > marked = __JT->nodesProperty(false);
1007  std::function< void(NodeId, NodeId) > diffuse_marks =
1008  [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
1009  if (!marked[node]) {
1010  marked[node] = true;
1011  for (const auto neigh : __JT->neighbours(node))
1012  if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1013  }
1014  };
1015  __roots.clear();
1016  for (const auto xclique : possible_roots) {
1017  NodeId clique = xclique.first;
1018  if (!marked[clique]) {
1019  __roots.insert(clique);
1020  diffuse_marks(clique, clique);
1021  }
1022  }
1023  }
1024 
1025 
1026  // performs the collect phase of Lazy Propagation
1027  template < typename GUM_SCALAR >
1029  NodeId from) {
1030  for (const auto other : __JT->neighbours(id)) {
1031  if ((other != from) && !__messages_computed[Arc(other, id)])
1032  __collectMessage(other, id);
1033  }
1034 
1035  if ((id != from) && !__messages_computed[Arc(id, from)]) {
1036  __produceMessage(id, from);
1037  }
1038  }
1039 
1040 
1041  // find the potentials d-connected to a set of variables
1042  template < typename GUM_SCALAR >
1044  Set< const Potential< GUM_SCALAR >* >& pot_list,
1045  Set< const DiscreteVariable* >& kept_vars) {}
1046 
1047 
1048  // find the potentials d-connected to a set of variables
1049  template < typename GUM_SCALAR >
1051  Set< const Potential< GUM_SCALAR >* >& pot_list,
1052  Set< const DiscreteVariable* >& kept_vars) {
1053  // find the node ids of the kept variables
1054  NodeSet kept_ids;
1055  const auto& bn = this->BN();
1056  for (const auto var : kept_vars) {
1057  kept_ids.insert(bn.nodeId(*var));
1058  }
1059 
1060  // determine the set of potentials d-connected with the kept variables
1061  NodeSet requisite_nodes;
1062  BayesBall::requisiteNodes(bn.dag(),
1063  kept_ids,
1064  this->hardEvidenceNodes(),
1065  this->softEvidenceNodes(),
1066  requisite_nodes);
1067  for (auto iter = pot_list.beginSafe(); iter != pot_list.endSafe(); ++iter) {
1068  const Sequence< const DiscreteVariable* >& vars =
1069  (**iter).variablesSequence();
1070  bool found = false;
1071  for (const auto var : vars) {
1072  if (requisite_nodes.exists(bn.nodeId(*var))) {
1073  found = true;
1074  break;
1075  }
1076  }
1077 
1078  if (!found) { pot_list.erase(iter); }
1079  }
1080  }
1081 
1082 
1083  // find the potentials d-connected to a set of variables
1084  template < typename GUM_SCALAR >
1086  Set< const Potential< GUM_SCALAR >* >& pot_list,
1087  Set< const DiscreteVariable* >& kept_vars) {
1088  // find the node ids of the kept variables
1089  NodeSet kept_ids;
1090  const auto& bn = this->BN();
1091  for (const auto var : kept_vars) {
1092  kept_ids.insert(bn.nodeId(*var));
1093  }
1094 
1095  // determine the set of potentials d-connected with the kept variables
1097  kept_ids,
1098  this->hardEvidenceNodes(),
1099  this->softEvidenceNodes(),
1100  pot_list);
1101  }
1102 
1103 
1104  // find the potentials d-connected to a set of variables
1105  template < typename GUM_SCALAR >
1107  Set< const Potential< GUM_SCALAR >* >& pot_list,
1108  Set< const DiscreteVariable* >& kept_vars) {
1109  // find the node ids of the kept variables
1110  NodeSet kept_ids;
1111  const auto& bn = this->BN();
1112  for (const auto var : kept_vars) {
1113  kept_ids.insert(bn.nodeId(*var));
1114  }
1115 
1116  // determine the set of potentials d-connected with the kept variables
1117  dSeparation dsep;
1118  dsep.relevantPotentials(bn,
1119  kept_ids,
1120  this->hardEvidenceNodes(),
1121  this->softEvidenceNodes(),
1122  pot_list);
1123  }
1124 
1125 
1126  // find the potentials d-connected to a set of variables
1127  template < typename GUM_SCALAR >
1129  Set< const Potential< GUM_SCALAR >* >& pot_list,
1130  Set< const DiscreteVariable* >& kept_vars) {
1133  __findRelevantPotentialsWithdSeparation2(pot_list, kept_vars);
1134  break;
1135 
1137  __findRelevantPotentialsWithdSeparation(pot_list, kept_vars);
1138  break;
1139 
1141  __findRelevantPotentialsWithdSeparation3(pot_list, kept_vars);
1142  break;
1143 
1145  __findRelevantPotentialsGetAll(pot_list, kept_vars);
1146  break;
1147 
1148  default: GUM_ERROR(FatalError, "not implemented yet");
1149  }
1150  }
1151 
1152 
1153  // remove barren variables
1154  template < typename GUM_SCALAR >
1155  Set< const Potential< GUM_SCALAR >* >
1157  __PotentialSet& pot_list, Set< const DiscreteVariable* >& del_vars) {
1158  // remove from del_vars the variables that received some evidence:
1159  // only those that did not received evidence can be barren variables
1160  Set< const DiscreteVariable* > the_del_vars = del_vars;
1161  for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe();
1162  ++iter) {
1163  NodeId id = this->BN().nodeId(**iter);
1164  if (this->hardEvidenceNodes().exists(id)
1165  || this->softEvidenceNodes().exists(id)) {
1166  the_del_vars.erase(iter);
1167  }
1168  }
1169 
1170  // assign to each random variable the set of potentials that contain it
1171  HashTable< const DiscreteVariable*, __PotentialSet > var2pots;
1172  __PotentialSet empty_pot_set;
1173  for (const auto pot : pot_list) {
1174  const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1175  for (const auto var : vars) {
1176  if (the_del_vars.exists(var)) {
1177  if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1178  var2pots[var].insert(pot);
1179  }
1180  }
1181  }
1182 
1183  // each variable with only one potential is a barren variable
1184  // assign to each potential with barren nodes its set of barren variables
1185  HashTable< const Potential< GUM_SCALAR >*, Set< const DiscreteVariable* > >
1186  pot2barren_var;
1187  Set< const DiscreteVariable* > empty_var_set;
1188  for (const auto elt : var2pots) {
1189  if (elt.second.size() == 1) { // here we have a barren variable
1190  const Potential< GUM_SCALAR >* pot = *(elt.second.begin());
1191  if (!pot2barren_var.exists(pot)) {
1192  pot2barren_var.insert(pot, empty_var_set);
1193  }
1194  pot2barren_var[pot].insert(elt.first); // insert the barren variable
1195  }
1196  }
1197 
1198  // for each potential with barren variables, marginalize them.
1199  // if the potential has only barren variables, simply remove them from the
1200  // set of potentials, else just project the potential
1201  MultiDimProjection< GUM_SCALAR, Potential > projector(LPNewprojPotential);
1202  __PotentialSet projected_pots;
1203  for (const auto elt : pot2barren_var) {
1204  // remove the current potential from pot_list as, anyway, we will change
1205  // it
1206  const Potential< GUM_SCALAR >* pot = elt.first;
1207  pot_list.erase(pot);
1208 
1209  // check whether we need to add a projected new potential or not (i.e.,
1210  // whether there exist non-barren variables or not)
1211  if (pot->variablesSequence().size() != elt.second.size()) {
1212  auto new_pot = projector.project(*pot, elt.second);
1213  pot_list.insert(new_pot);
1214  projected_pots.insert(new_pot);
1215  }
1216  }
1217 
1218  return projected_pots;
1219  }
1220 
1221 
1222  // remove variables del_vars from the list of potentials pot_list
1223  template < typename GUM_SCALAR >
1224  Set< const Potential< GUM_SCALAR >* >
1226  Set< const Potential< GUM_SCALAR >* > pot_list,
1227  Set< const DiscreteVariable* >& del_vars,
1228  Set< const DiscreteVariable* >& kept_vars) {
1229  // use d-separation analysis to check which potentials shall be combined
1230  __findRelevantPotentialsXX(pot_list, kept_vars);
1231 
1232  // remove the potentials corresponding to barren variables if we want
1233  // to exploit barren nodes
1234  __PotentialSet barren_projected_potentials;
1236  barren_projected_potentials = __removeBarrenVariables(pot_list, del_vars);
1237  }
1238 
1239  // create a combine and project operator that will perform the
1240  // marginalization
1241  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential > combine_and_project(
1243  __PotentialSet new_pot_list =
1244  combine_and_project.combineAndProject(pot_list, del_vars);
1245 
1246  // remove all the potentials that were created due to projections of
1247  // barren nodes and that are not part of the new_pot_list: these
1248  // potentials were just temporary potentials
1249  for (auto iter = barren_projected_potentials.beginSafe();
1250  iter != barren_projected_potentials.endSafe();
1251  ++iter) {
1252  if (!new_pot_list.exists(*iter)) delete *iter;
1253  }
1254 
1255  // remove all the potentials that have no dimension
1256  for (auto iter_pot = new_pot_list.beginSafe();
1257  iter_pot != new_pot_list.endSafe();
1258  ++iter_pot) {
1259  if ((*iter_pot)->variablesSequence().size() == 0) {
1260  // as we have already marginalized out variables that received evidence,
1261  // it may be the case that, after combining and projecting, some
1262  // potentials might be empty. In this case, we shall keep their
1263  // constant and remove them from memory
1264  // # TODO: keep the constants!
1265  delete *iter_pot;
1266  new_pot_list.erase(iter_pot);
1267  }
1268  }
1269 
1270  return new_pot_list;
1271  }
1272 
1273 
1274  // creates the message sent by clique from_id to clique to_id
1275  template < typename GUM_SCALAR >
1277  NodeId to_id) {
1278  // get the potentials of the clique.
1279  __PotentialSet pot_list = __clique_potentials[from_id];
1280 
1281  // add the messages sent by adjacent nodes to from_id
1282  for (const auto other_id : __JT->neighbours(from_id))
1283  if (other_id != to_id)
1284  pot_list += __separator_potentials[Arc(other_id, from_id)];
1285 
1286  // get the set of variables that need be removed from the potentials
1287  const NodeSet& from_clique = __JT->clique(from_id);
1288  const NodeSet& separator = __JT->separator(from_id, to_id);
1289  Set< const DiscreteVariable* > del_vars(from_clique.size());
1290  Set< const DiscreteVariable* > kept_vars(separator.size());
1291  const auto& bn = this->BN();
1292 
1293  for (const auto node : from_clique) {
1294  if (!separator.contains(node)) {
1295  del_vars.insert(&(bn.variable(node)));
1296  } else {
1297  kept_vars.insert(&(bn.variable(node)));
1298  }
1299  }
1300 
1301  // pot_list now contains all the potentials to multiply and marginalize
1302  // => combine the messages
1303  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1304 
1305  // keep track of the newly created potentials but remove first all the
1306  // potentials that are equal to ones (as probability matrix multiplications
1307  // are tensorial, such potentials are useless)
1308  const Arc arc(from_id, to_id);
1309  for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe();
1310  ++iter) {
1311  const auto pot = *iter;
1312  if (pot->variablesSequence().size() == 1) {
1313  bool is_all_ones = true;
1314  for (Instantiation inst(*pot); !inst.end(); ++inst) {
1315  if (pot->get(inst) < __1_minus_epsilon) {
1316  is_all_ones = false;
1317  break;
1318  }
1319  }
1320  if (is_all_ones) {
1321  if (!pot_list.exists(pot)) delete pot;
1322  new_pot_list.erase(iter);
1323  continue;
1324  }
1325  }
1326 
1327  if (!pot_list.exists(pot)) {
1328  if (!__created_potentials.exists(arc))
1329  __created_potentials.insert(arc, __PotentialSet());
1330  __created_potentials[arc].insert(pot);
1331  }
1332  }
1333 
1334  __separator_potentials[arc] = std::move(new_pot_list);
1335  __messages_computed[arc] = true;
1336  }
1337 
1338 
1339  // performs a whole inference
1340  template < typename GUM_SCALAR >
1342  // collect messages for all single targets
1343  for (const auto node : this->targets()) {
1344  // perform only collects in the join tree for nodes that have
1345  // not received hard evidence (those that received hard evidence were
1346  // not included into the join tree for speed-up reasons)
1347  if (__graph.exists(node)) {
1349  }
1350  }
1351 
1352  // collect messages for all set targets
1353  // by parsing __joint_target_to_clique, we ensure that the cliques that
1354  // are referenced belong to the join tree (even if some of the nodes in
1355  // their associated joint_target do not belong to __graph)
1356  for (const auto set : __joint_target_to_clique)
1357  __collectMessage(set.second, set.second);
1358  }
1359 
1360 
1362  template < typename GUM_SCALAR >
1363  Potential< GUM_SCALAR >*
1365  const auto& bn = this->BN();
1366 
1367  // hard evidence do not belong to the join tree
1368  // # TODO: check for sets of inconsistent hard evidence
1369  if (this->hardEvidenceNodes().contains(id)) {
1370  return new Potential< GUM_SCALAR >(*(this->evidence()[id]));
1371  }
1372 
1373  // if we still need to perform some inference task, do it (this should
1374  // already have been done by _makeInference)
1375  NodeId clique_of_id = __node_to_clique[id];
1376  __collectMessage(clique_of_id, clique_of_id);
1377 
1378  // now we just need to create the product of the potentials of the clique
1379  // containing id with the messages received by this clique and
1380  // marginalize out all variables except id
1381  __PotentialSet pot_list = __clique_potentials[clique_of_id];
1382 
1383  // add the messages sent by adjacent nodes to targetClique
1384  for (const auto other : __JT->neighbours(clique_of_id))
1385  pot_list += __separator_potentials[Arc(other, clique_of_id)];
1386 
1387  // get the set of variables that need be removed from the potentials
1388  const NodeSet& nodes = __JT->clique(clique_of_id);
1389  Set< const DiscreteVariable* > kept_vars{&(bn.variable(id))};
1390  Set< const DiscreteVariable* > del_vars(nodes.size());
1391  for (const auto node : nodes) {
1392  if (node != id) del_vars.insert(&(bn.variable(node)));
1393  }
1394 
1395  // pot_list now contains all the potentials to multiply and marginalize
1396  // => combine the messages
1397  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1398  Potential< GUM_SCALAR >* joint = nullptr;
1399 
1400  if (new_pot_list.size() == 1) {
1401  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1402  // if pot already existed, create a copy, so that we can put it into
1403  // the __target_posteriors property
1404  if (pot_list.exists(joint)) {
1405  joint = new Potential< GUM_SCALAR >(*joint);
1406  } else {
1407  // remove the joint from new_pot_list so that it will not be
1408  // removed just after the else block
1409  new_pot_list.clear();
1410  }
1411  } else {
1412  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1414  joint = fast_combination.combine(new_pot_list);
1415  }
1416 
1417  // remove the potentials that were created in new_pot_list
1418  for (const auto pot : new_pot_list)
1419  if (!pot_list.exists(pot)) delete pot;
1420 
1421  // check that the joint posterior is different from a 0 vector: this would
1422  // indicate that some hard evidence are not compatible (their joint
1423  // probability is equal to 0)
1424  bool nonzero_found = false;
1425  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1426  if (joint->get(inst)) {
1427  nonzero_found = true;
1428  break;
1429  }
1430  }
1431  if (!nonzero_found) {
1432  // remove joint from memory to avoid memory leaks
1433  delete joint;
1434  GUM_ERROR(IncompatibleEvidence,
1435  "some evidence entered into the Bayes "
1436  "net are incompatible (their joint proba = 0)");
1437  }
1438  return joint;
1439  }
1440 
1441 
1443  template < typename GUM_SCALAR >
1444  const Potential< GUM_SCALAR >&
1446  // check if we have already computed the posterior
1447  if (__target_posteriors.exists(id)) { return *(__target_posteriors[id]); }
1448 
1449  // compute the joint posterior and normalize
1450  auto joint = _unnormalizedJointPosterior(id);
1451  joint->normalize();
1452  __target_posteriors.insert(id, joint);
1453 
1454  return *joint;
1455  }
1456 
1457 
1458  // returns the marginal a posteriori proba of a given node
1459  template < typename GUM_SCALAR >
1460  Potential< GUM_SCALAR >*
1462  const NodeSet& set) {
1463  // hard evidence do not belong to the join tree, so extract the nodes
1464  // from targets that are not hard evidence
1465  NodeSet targets = set, hard_ev_nodes;
1466  for (const auto node : this->hardEvidenceNodes()) {
1467  if (targets.contains(node)) {
1468  targets.erase(node);
1469  hard_ev_nodes.insert(node);
1470  }
1471  }
1472 
1473  // if all the nodes have received hard evidence, then compute the
1474  // joint posterior directly by multiplying the hard evidence potentials
1475  const auto& evidence = this->evidence();
1476  if (targets.empty()) {
1477  __PotentialSet pot_list;
1478  for (const auto node : set) {
1479  pot_list.insert(evidence[node]);
1480  }
1481  if (pot_list.size() == 1) {
1482  auto pot = new Potential< GUM_SCALAR >(**(pot_list.begin()));
1483  return pot;
1484  } else {
1485  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1487  return fast_combination.combine(pot_list);
1488  }
1489  }
1490 
1491 
1492  // if we still need to perform some inference task, do it: so, first,
1493  // determine the clique on which we should perform collect to compute
1494  // the unnormalized joint posterior of a set of nodes containing "targets"
1495  NodeId clique_of_set;
1496  try {
1497  clique_of_set = __joint_target_to_clique[set];
1498  } catch (NotFound&) {
1499  // here, the precise set of targets does not belong to the set of targets
1500  // defined by the user. So we will try to find a clique in the junction
1501  // tree that contains "targets":
1502 
1503  // 1/ we should check that all the nodes belong to the join tree
1504  for (const auto node : targets) {
1505  if (!__graph.exists(node)) {
1506  GUM_ERROR(UndefinedElement, node << " is not a target node");
1507  }
1508  }
1509 
1510  // 2/ the clique created by the first eliminated node among target is the
1511  // one we are looking for
1512  const std::vector< NodeId >& JT_elim_order =
1514 
1515  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
1516  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
1517  ++i)
1518  elim_order.insert(JT_elim_order[i], (int)i);
1519  NodeId first_eliminated_node = *(targets.begin());
1520  int elim_number = elim_order[first_eliminated_node];
1521  for (const auto node : targets) {
1522  if (elim_order[node] < elim_number) {
1523  elim_number = elim_order[node];
1524  first_eliminated_node = node;
1525  }
1526  }
1527 
1528  clique_of_set =
1529  __triangulation->createdJunctionTreeClique(first_eliminated_node);
1530 
1531 
1532  // 3/ check that clique_of_set contains the all the nodes in the target
1533  const NodeSet& clique_nodes = __JT->clique(clique_of_set);
1534  for (const auto node : targets) {
1535  if (!clique_nodes.contains(node)) {
1536  GUM_ERROR(UndefinedElement, set << " is not a joint target");
1537  }
1538  }
1539 
1540  // add the discovered clique to __joint_target_to_clique
1541  __joint_target_to_clique.insert(set, clique_of_set);
1542  }
1543 
1544  // now perform a collect on the clique
1545  __collectMessage(clique_of_set, clique_of_set);
1546 
1547  // now we just need to create the product of the potentials of the clique
1548  // containing set with the messages received by this clique and
1549  // marginalize out all variables except set
1550  __PotentialSet pot_list = __clique_potentials[clique_of_set];
1551 
1552  // add the messages sent by adjacent nodes to targetClique
1553  for (const auto other : __JT->neighbours(clique_of_set))
1554  pot_list += __separator_potentials[Arc(other, clique_of_set)];
1555 
1556  // get the set of variables that need be removed from the potentials
1557  const NodeSet& nodes = __JT->clique(clique_of_set);
1558  Set< const DiscreteVariable* > del_vars(nodes.size());
1559  Set< const DiscreteVariable* > kept_vars(targets.size());
1560  const auto& bn = this->BN();
1561  for (const auto node : nodes) {
1562  if (!targets.contains(node)) {
1563  del_vars.insert(&(bn.variable(node)));
1564  } else {
1565  kept_vars.insert(&(bn.variable(node)));
1566  }
1567  }
1568 
1569  // pot_list now contains all the potentials to multiply and marginalize
1570  // => combine the messages
1571  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1572  Potential< GUM_SCALAR >* joint = nullptr;
1573 
1574  if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
1575  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1576 
1577  // if pot already existed, create a copy, so that we can put it into
1578  // the __target_posteriors property
1579  if (pot_list.exists(joint)) {
1580  joint = new Potential< GUM_SCALAR >(*joint);
1581  } else {
1582  // remove the joint from new_pot_list so that it will not be
1583  // removed just after the next else block
1584  new_pot_list.clear();
1585  }
1586  } else {
1587  // combine all the potentials in new_pot_list with all the hard evidence
1588  // of the nodes in set
1589  __PotentialSet new_new_pot_list = new_pot_list;
1590  for (const auto node : hard_ev_nodes) {
1591  new_new_pot_list.insert(evidence[node]);
1592  }
1593  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1595  joint = fast_combination.combine(new_new_pot_list);
1596  }
1597 
1598  // remove the potentials that were created in new_pot_list
1599  for (const auto pot : new_pot_list)
1600  if (!pot_list.exists(pot)) delete pot;
1601 
1602  // check that the joint posterior is different from a 0 vector: this would
1603  // indicate that some hard evidence are not compatible
1604  bool nonzero_found = false;
1605  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1606  if ((*joint)[inst]) {
1607  nonzero_found = true;
1608  break;
1609  }
1610  }
1611  if (!nonzero_found) {
1612  // remove joint from memory to avoid memory leaks
1613  delete joint;
1614  GUM_ERROR(IncompatibleEvidence,
1615  "some evidence entered into the Bayes "
1616  "net are incompatible (their joint proba = 0)");
1617  }
1618 
1619  return joint;
1620  }
1621 
1622 
1624  template < typename GUM_SCALAR >
1625  const Potential< GUM_SCALAR >&
1627  // check if we have already computed the posterior
1628  if (__joint_target_posteriors.exists(set)) {
1629  return *(__joint_target_posteriors[set]);
1630  }
1631 
1632  // compute the joint posterior and normalize
1633  auto joint = _unnormalizedJointPosterior(set);
1634  joint->normalize();
1635  __joint_target_posteriors.insert(set, joint);
1636 
1637  return *joint;
1638  }
1639 
1640 
1642  template < typename GUM_SCALAR >
1643  const Potential< GUM_SCALAR >& LazyPropagation< GUM_SCALAR >::_jointPosterior(
1644  const NodeSet& wanted_target, const NodeSet& declared_target) {
1645  // check if we have already computed the posterior of wanted_target
1646  if (__joint_target_posteriors.exists(wanted_target))
1647  return *(__joint_target_posteriors[wanted_target]);
1648 
1649  // here, we will have to compute the posterior of declared_target and
1650  // marginalize out all the variables that do not belong to wanted_target
1651 
1652  // check if we have already computed the posterior of declared_target
1653  if (!__joint_target_posteriors.exists(declared_target)) {
1654  _jointPosterior(declared_target);
1655  }
1656 
1657  // marginalize out all the variables that do not belong to wanted_target
1658  const auto& bn = this->BN();
1659  Set< const DiscreteVariable* > del_vars;
1660  for (const auto node : declared_target)
1661  if (!wanted_target.contains(node)) del_vars.insert(&(bn.variable(node)));
1662  Potential< GUM_SCALAR >* pot = new Potential< GUM_SCALAR >(
1663  __joint_target_posteriors[declared_target]->margSumOut(del_vars));
1664 
1665  // save the result into the cache
1666  __joint_target_posteriors.insert(wanted_target, pot);
1667 
1668  return *pot;
1669  }
1670 
1671 
1672  template < typename GUM_SCALAR >
1674  // here, we should check that __find_relevant_potential_type is equal to
1675  // FIND_ALL. Otherwise, the computations could be wrong.
1676  RelevantPotentialsFinderType old_relevant_type =
1678 
1679  // if the relevant potentials finder is not equal to FIND_ALL, all the
1680  // current computations may lead to incorrect results, so we shall
1681  // discard them
1682  if (old_relevant_type != RelevantPotentialsFinderType::FIND_ALL) {
1684  __is_new_jt_needed = true;
1686  }
1687 
1688  // perform inference in each connected component
1689  this->makeInference();
1690 
1691  // for each connected component, select a variable X and compute the
1692  // joint probability of X and evidence e. Then marginalize-out X to get
1693  // p(e) in this connected component. Finally, multiply all the p(e) that
1694  // we got and the elements in __constants. The result is the probability
1695  // of evidence
1696 
1697  GUM_SCALAR prob_ev = 1;
1698  for (const auto root : __roots) {
1699  // get a node in the clique
1700  NodeId node = *(__JT->clique(root).begin());
1701  Potential< GUM_SCALAR >* tmp = _unnormalizedJointPosterior(node);
1702  GUM_SCALAR sum = 0;
1703  for (Instantiation iter(*tmp); !iter.end(); ++iter)
1704  sum += tmp->get(iter);
1705  prob_ev *= sum;
1706  delete tmp;
1707  }
1708 
1709  for (const auto& projected_cpt : __constants)
1710  prob_ev *= projected_cpt.second;
1711 
1712  // put back the relevant potential type selected by the user
1713  __find_relevant_potential_type = old_relevant_type;
1714 
1715  return prob_ev;
1716  }
1717 
1718 } /* namespace gum */
1719 
1720 #endif // DOXYGEN_SHOULD_SKIP_THIS
~LazyPropagation() final
destructor
NodeProperty< const Potential< GUM_SCALAR > *> __node_to_soft_evidence
the soft evidence stored in the cliques per their assigned node in the BN
HashTable< NodeSet, const Potential< GUM_SCALAR > *> __joint_target_posteriors
the set of set target posteriors computed during the last inference
ArcProperty< bool > __messages_computed
indicates whether a message (from one clique to another) has been computed
void setFindBarrenNodesType(FindBarrenNodesType type)
sets how we determine barren nodes
NodeProperty< const Potential< GUM_SCALAR > *> __hard_ev_projected_CPTs
the CPTs that were projected due to hard evidence nodes
Set< const Potential< GUM_SCALAR > *> __PotentialSet
void _updateOutdatedBNStructure() final
prepares inference when the latter is in OutdatedBNStructure state
virtual void clear()
removes all the nodes and edges from the graph
Definition: undiGraph_inl.h:40
virtual void addNodeWithId(const NodeId id)
try to insert a node with the given id
const NodeProperty< const Potential< GUM_SCALAR > *> & evidence() const
returns the set of evidence
bool __isNewJTNeeded() const
check whether a new join tree is really needed for the next inference
void setRelevantPotentialsFinderType(RelevantPotentialsFinderType type)
sets how we determine the relevant potentials to combine
JunctionTree * __junctionTree
the junction tree to answer the last inference query
Triangulation * __triangulation
the triangulation class creating the junction tree used for inference
void _onAllEvidenceErased(bool has_hard_evidence) final
fired before all the evidence are erased
void __findRelevantPotentialsWithdSeparation3(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
bool __is_new_jt_needed
indicates whether a new join tree is needed for the next inference
Potential< GUM_SCALAR > * _unnormalizedJointPosterior(NodeId id) final
returns a fresh potential equal to P(argument,evidence)
ArcProperty< __PotentialSet > __created_potentials
the set of potentials created for the last inference messages
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
void setTriangulation(const Triangulation &new_triangulation)
use a new triangulation algorithm
void __findRelevantPotentialsXX(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
d-separation analysis (as described in Koller & Friedman 2009)
static void relevantPotentials(const IBayesNet< GUM_SCALAR > &bn, const NodeSet &query, const NodeSet &hardEvidence, const NodeSet &softEvidence, Set< const TABLE< GUM_SCALAR > * > &potentials)
update a set of potentials, keeping only those d-connected with query variables given evidence ...
Definition: BayesBall_tpl.h:32
void __findRelevantPotentialsWithdSeparation2(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
The BayesBall algorithm (as described by Schachter).
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
NodeSet __hard_ev_nodes
the hard evidence nodes which were projected in CPTs
void erase(const Key &k)
Erases an element from the set.
Definition: set_tpl.h:653
bool exists(const NodeId id) const
alias for existsNode
gum is the global namespace for all aGrUM entities
Definition: agrum.h:25
GUM_SCALAR evidenceProbability() final
returns the probability of evidence
void _onJointTargetAdded(const NodeSet &set) final
fired after a new joint target is inserted
void _onAllJointTargetsErased() final
fired before a all the joint targets are removed
const Potential< GUM_SCALAR > & _jointPosterior(const NodeSet &set) final
returns the posterior of a declared target set
RelevantPotentialsFinderType
type of algorithm for determining the relevant potentials for combinations using some d-separation an...
virtual void eraseNode(const NodeId id)
remove a node and its adjacent edges from the graph
Definition: undiGraph_inl.h:55
FindBarrenNodesType __barren_nodes_type
the type of barren nodes computation we wish
NodeProperty< const Potential< GUM_SCALAR > *> __target_posteriors
the set of single posteriors computed during the last inference
virtual void makeInference() final
perform the heavy computations needed to compute the targets&#39; posteriors
const GUM_SCALAR __1_minus_epsilon
for comparisons with 1 - epsilon
void _onAllTargetsErased() final
fired before a all single and joint_targets are removed
NodeSet __roots
a clique node used as a root in each connected component of __JT
void _onMarginalTargetErased(const NodeId id) final
fired before a single target is removed
virtual void _onBayesNetChanged(const IBayesNet< GUM_SCALAR > *bn) final
fired after a new Bayes net has been assigned to the engine
void _onEvidenceChanged(const NodeId id, bool hasChangedSoftHard) final
fired after an evidence is changed, in particular when its status (soft/hard) changes ...
FindBarrenNodesType
type of algorithm to determine barren nodes
const Potential< GUM_SCALAR > & _posterior(NodeId id) final
returns the posterior of a given variable
const NodeSet & softEvidenceNodes() const
returns the set of nodes with soft evidence
void __collectMessage(NodeId id, NodeId from)
actually perform the collect phase
ArcProperty< __PotentialSet > __separator_potentials
the list of all potentials stored in the separators after inferences
virtual const NodeProperty< Size > & domainSizes() const final
get the domain sizes of the random variables of the BN
virtual NodeId createdJunctionTreeClique(const NodeId id)=0
returns the Id of the clique created by the elimination of a given node during the triangulation proc...
const NodeProperty< Idx > & hardEvidence() const
indicate for each node with hard evidence which value it took
virtual bool isDone() const noexcept final
returns whether the inference object is in a done state
const JoinTree * joinTree()
returns the current join tree used
NodeProperty< GUM_SCALAR > __constants
the constants resulting from the projections of CPTs defined over only hard evidence nodes remove th...
JoinTree * __JT
the join (or junction) tree used to answer the last inference query
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
Definition: cliqueGraph.h:303
void _updateOutdatedBNPotentials() final
prepares inference when the latter is in OutdatedBNPotentials state
void _onEvidenceAdded(const NodeId id, bool isHardEvidence) final
fired after a new evidence is inserted
virtual const Set< NodeSet > & jointTargets() const noexcept final
returns the list of joint targets
void _makeInference() final
called when the inference has to be performed effectively
void __diffuseMessageInvalidations(NodeId from_id, NodeId to_id, NodeSet &invalidated_cliques)
invalidate all the messages sent from a given clique
Header files of gum::Instantiation.
void _onAllMarginalTargetsAdded() final
fired after all the nodes of the BN are added as single targets
void __setProjectionFunction(Potential< GUM_SCALAR > *(*proj)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &))
sets the operator for performing the projections
void _setOutdatedBNStructureState()
put the inference into an outdated BN structure state
const NodeSet & clique(const NodeId idClique) const
returns the set of nodes included into a given clique
const NodeSet & hardEvidenceNodes() const
returns the set of nodes with hard evidence
NodeProperty< __PotentialSet > __clique_potentials
the list of all potentials stored in the cliques
void __computeJoinTreeRoots()
compute a root for each connected component of __JT
HashTable< NodeSet, NodeId > __joint_target_to_clique
for each set target, assign a clique in the JT that contains it
__PotentialSet __removeBarrenVariables(__PotentialSet &pot_list, Set< const DiscreteVariable * > &del_vars)
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
Definition: cliqueGraph.h:299
void __createNewJT()
create a new junction tree as well as its related data structures
Detect barren nodes for inference in Bayesian networks.
static INLINE Potential< GUM_SCALAR > * LPNewprojPotential(const Potential< GUM_SCALAR > &t1, const Set< const DiscreteVariable * > &del_vars)
bool __use_binary_join_tree
indicates whether we should transform junction trees into binary join trees
void __invalidateAllMessages()
invalidate all messages, posteriors and created potentials
Potential< GUM_SCALAR > *(* __projection_op)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &)
the operator for performing the projections
__PotentialSet __marginalizeOut(__PotentialSet pot_list, Set< const DiscreteVariable * > &del_vars, Set< const DiscreteVariable * > &kept_vars)
removes variables del_vars from a list of potentials and returns the resulting list ...
void clear()
Removes all the elements in the hash table.
LazyPropagation(const IBayesNet< GUM_SCALAR > *BN, RelevantPotentialsFinderType=RelevantPotentialsFinderType::DSEP_BAYESBALL_POTENTIALS, FindBarrenNodesType=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
const JunctionTree * junctionTree()
returns the current junction tree
void _onJointTargetErased(const NodeSet &set) final
fired before a joint target is removed
An algorithm for converting a join tree into a binary join tree.
virtual bool isInferenceReady() const noexcept final
returns whether the inference object is in a ready state
virtual Triangulation * newFactory() const =0
returns a fresh triangulation of the same type as the current object but with an empty graph ...
void _onAllMarginalTargetsErased() final
fired before a all the single targets are removed
virtual const NodeSet & targets() const noexcept final
returns the list of marginal targets
HashTable< NodeId, NodeId > __node_to_clique
for each node of __graph (~ in the Bayes net), associate an ID in the JT
void clear()
Removes all the elements, if any, from the set.
Definition: set_tpl.h:372
void __produceMessage(NodeId from_id, NodeId to_id)
creates the message sent by clique from_id to clique to_id
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:45
RelevantPotentialsFinderType __find_relevant_potential_type
the type of relevant potential finding algorithm to be used
void(LazyPropagation< GUM_SCALAR >::* __findRelevantPotentials)(Set< const Potential< GUM_SCALAR > * > &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
virtual const CliqueGraph & junctionTree()=0
returns a compatible junction tree
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
void __findRelevantPotentialsGetAll(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
virtual const std::vector< NodeId > & eliminationOrder()=0
returns an elimination ordering compatible with the triangulated graph
void _onMarginalTargetAdded(const NodeId id) final
fired after a new single target is inserted
virtual const IBayesNet< GUM_SCALAR > & BN() const final
Returns a constant reference over the IBayesNet referenced by this class.
Size NodeId
Type for node ids.
Definition: graphElements.h:97
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:610
UndiGraph __graph
the undigraph extracted from the BN and used to construct the join tree
void __setCombinationFunction(Potential< GUM_SCALAR > *(*comb)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &))
sets the operator for performing the combinations
void _onEvidenceErased(const NodeId id, bool isHardEvidence) final
fired before an evidence is removed
#define GUM_ERROR(type, msg)
Definition: exceptions.h:52
virtual void setGraph(const UndiGraph *graph, const NodeProperty< Size > *domsizes)=0
initialize the triangulation data structures for a new graph
static void requisiteNodes(const DAG &dag, const NodeSet &query, const NodeSet &hardEvidence, const NodeSet &softEvidence, NodeSet &requisite)
Fill the &#39;requisite&#39; nodeset with the requisite nodes in dag given a query and evidence.
Definition: BayesBall.cpp:33
Potential< GUM_SCALAR > *(* __combination_op)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &)
the operator for performing the combinations
void _setOutdatedBNPotentialsState()
puts the inference into an OutdatedBNPotentials state if it is not already in an OutdatedBNStructure ...
NodeProperty< EvidenceChangeType > __evidence_changes
indicates which nodes of the BN have evidence that changed since the last inference ...
void __findRelevantPotentialsWithdSeparation(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...