aGrUM  0.16.0
lazyPropagation_tpl.h
Go to the documentation of this file.
1 
31 #ifndef DOXYGEN_SHOULD_SKIP_THIS
32 # include <algorithm>
33 
41 
42 
43 namespace gum {
44  // default constructor
45  template < typename GUM_SCALAR >
47  const IBayesNet< GUM_SCALAR >* BN,
48  RelevantPotentialsFinderType relevant_type,
49  FindBarrenNodesType barren_type,
50  bool use_binary_join_tree) :
51  JointTargetedInference< GUM_SCALAR >(BN),
52  EvidenceInference< GUM_SCALAR >(BN),
53  __use_binary_join_tree(use_binary_join_tree) {
54  // sets the relevant potential and the barren nodes finding algorithm
55  setRelevantPotentialsFinderType(relevant_type);
56  setFindBarrenNodesType(barren_type);
57 
58  // create a default triangulation (the user can change it afterwards)
59  __triangulation = new DefaultTriangulation;
60 
61  // for debugging purposessetRequiredInference
62  GUM_CONSTRUCTOR(LazyPropagation);
63  }
64 
65 
66  // destructor
67  template < typename GUM_SCALAR >
69  // remove all the potentials created during the last message passing
70  for (const auto& pots : __created_potentials)
71  for (const auto pot : pots.second)
72  delete pot;
73 
74  // remove the potentials created after removing the nodes that received
75  // hard evidence
76  for (const auto& pot : __hard_ev_projected_CPTs)
77  delete pot.second;
78 
79  // remove all the posteriors computed
80  for (const auto& pot : __target_posteriors)
81  delete pot.second;
82  for (const auto& pot : __joint_target_posteriors)
83  delete pot.second;
84 
85  // remove the junction tree and the triangulation algorithm
86  if (__JT != nullptr) delete __JT;
87  if (__junctionTree != nullptr) delete __junctionTree;
88  delete __triangulation;
89 
90  // for debugging purposes
91  GUM_DESTRUCTOR(LazyPropagation);
92  }
93 
94 
96  template < typename GUM_SCALAR >
98  const Triangulation& new_triangulation) {
99  delete __triangulation;
100  __triangulation = new_triangulation.newFactory();
101  __is_new_jt_needed = true;
103  }
104 
105 
107  template < typename GUM_SCALAR >
109  __createNewJT();
110 
111  return __JT;
112  }
113 
115  template < typename GUM_SCALAR >
117  __createNewJT();
118 
119  return __junctionTree;
120  }
121 
122 
124  template < typename GUM_SCALAR >
127  if (type != __find_relevant_potential_type) {
128  switch (type) {
132  break;
133 
137  break;
138 
142  break;
143 
147  break;
148 
149  default:
150  GUM_ERROR(InvalidArgument,
151  "setRelevantPotentialsFinderType for type "
152  << (unsigned int)type << " is not implemented yet");
153  }
154 
156 
157  // indicate that all messages need be reconstructed to take into account
158  // the change in d-separation analysis
160  }
161  }
162 
163 
165  template < typename GUM_SCALAR >
167  Potential< GUM_SCALAR >* (*proj)(const Potential< GUM_SCALAR >&,
168  const Set< const DiscreteVariable* >&)) {
169  __projection_op = proj;
170  }
171 
172 
174  template < typename GUM_SCALAR >
176  Potential< GUM_SCALAR >* (*comb)(const Potential< GUM_SCALAR >&,
177  const Potential< GUM_SCALAR >&)) {
178  __combination_op = comb;
179  }
180 
181 
183  template < typename GUM_SCALAR >
185  // remove all the messages computed
186  for (auto& potset : __separator_potentials)
187  potset.second.clear();
188  for (auto& mess_computed : __messages_computed)
189  mess_computed.second = false;
190 
191  // remove all the created potentials
192  for (const auto& potset : __created_potentials)
193  for (const auto pot : potset.second)
194  delete pot;
195 
196  // remove all the posteriors
197  for (const auto& pot : __target_posteriors)
198  delete pot.second;
199  for (const auto& pot : __joint_target_posteriors)
200  delete pot.second;
201 
202  // indicate that new messages need be computed
203  if (this->isInferenceReady() || this->isDone())
205  }
206 
207 
209  template < typename GUM_SCALAR >
211  FindBarrenNodesType type) {
212  if (type != __barren_nodes_type) {
213  // WARNING: if a new type is added here, method __createJT should
214  // certainly
215  // be updated as well, in particular its step 2.
216  switch (type) {
219 
220  default:
221  GUM_ERROR(InvalidArgument,
222  "setFindBarrenNodesType for type "
223  << (unsigned int)type << " is not implemented yet");
224  }
225 
226  __barren_nodes_type = type;
227 
228  // potentially, we may need to reconstruct a junction tree
230  }
231  }
232 
233 
235  template < typename GUM_SCALAR >
236  INLINE void
238  bool isHardEvidence) {
239  // if we have a new hard evidence, this modifies the undigraph over which
240  // the join tree is created. This is also the case if id is not a node of
241  // of the undigraph
242  if (isHardEvidence || !__graph.exists(id))
243  __is_new_jt_needed = true;
244  else {
245  try {
246  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ADDED);
247  } catch (DuplicateElement&) {
248  // here, the evidence change already existed. This necessarily means
249  // that the current saved change is an EVIDENCE_ERASED. So if we
250  // erased the evidence and added some again, this corresponds to an
251  // EVIDENCE_MODIFIED
252  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
253  }
254  }
255  }
256 
257 
259  template < typename GUM_SCALAR >
260  INLINE void
262  bool isHardEvidence) {
263  // if we delete a hard evidence, this modifies the undigraph over which
264  // the join tree is created.
265  if (isHardEvidence)
266  __is_new_jt_needed = true;
267  else {
268  try {
269  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ERASED);
270  } catch (DuplicateElement&) {
271  // here, the evidence change already existed and it is necessarily an
272  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
273  // been added and is now erased, this is similar to not having created
274  // it. If the evidence was only modified, it already existed in the
275  // last inference and we should now indicate that it has been removed.
276  if (__evidence_changes[id] == EvidenceChangeType::EVIDENCE_ADDED)
277  __evidence_changes.erase(id);
278  else
279  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_ERASED;
280  }
281  }
282  }
283 
284 
286  template < typename GUM_SCALAR >
287  void
289  if (has_hard_evidence || !this->hardEvidenceNodes().empty())
290  __is_new_jt_needed = true;
291  else {
292  for (const auto node : this->softEvidenceNodes()) {
293  try {
294  __evidence_changes.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
295  } catch (DuplicateElement&) {
296  // here, the evidence change already existed and it is necessarily an
297  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
298  // been added and is now erased, this is similar to not having created
299  // it. If the evidence was only modified, it already existed in the
300  // last inference and we should now indicate that it has been removed.
301  if (__evidence_changes[node] == EvidenceChangeType::EVIDENCE_ADDED)
302  __evidence_changes.erase(node);
303  else
304  __evidence_changes[node] = EvidenceChangeType::EVIDENCE_ERASED;
305  }
306  }
307  }
308  }
309 
310 
312  template < typename GUM_SCALAR >
313  INLINE void
315  bool hasChangedSoftHard) {
316  if (hasChangedSoftHard)
317  __is_new_jt_needed = true;
318  else {
319  try {
320  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_MODIFIED);
321  } catch (DuplicateElement&) {
322  // here, the evidence change already existed and it is necessarily an
323  // EVIDENCE_ADDED. So we should keep this state to indicate that this
324  // evidence is new w.r.t. the last inference
325  }
326  }
327  }
328 
329 
331  template < typename GUM_SCALAR >
333  const IBayesNet< GUM_SCALAR >* bn) {}
334 
335 
337  template < typename GUM_SCALAR >
338  INLINE void
340 
341 
343  template < typename GUM_SCALAR >
344  INLINE void
346 
347 
349  template < typename GUM_SCALAR >
350  INLINE void
352 
353 
355  template < typename GUM_SCALAR >
356  INLINE void
358 
359 
361  template < typename GUM_SCALAR >
363 
364 
366  template < typename GUM_SCALAR >
368 
369 
371  template < typename GUM_SCALAR >
373 
374 
376  template < typename GUM_SCALAR >
378 
379 
380  // check whether a new junction tree is really needed for the next inference
381  template < typename GUM_SCALAR >
383  // if we do not have a JT or if __new_jt_needed is set to true, then
384  // we know that we need to create a new join tree
385  if ((__JT == nullptr) || __is_new_jt_needed) return true;
386 
387  // if some some targets do not belong to the join tree and, consequently,
388  // to the undigraph that was used to construct the join tree, then we need
389  // to create a new JT. This situation may occur if we constructed the
390  // join tree after pruning irrelevant/barren nodes from the BN)
391  // however, note that the nodes that received hard evidence do not belong to
392  // the graph and, therefore, should not be taken into account
393  const auto& hard_ev_nodes = this->hardEvidenceNodes();
394  for (const auto node : this->targets()) {
395  if (!__graph.exists(node) && !hard_ev_nodes.exists(node)) return true;
396  }
397  for (const auto& joint_target : this->jointTargets()) {
398  // here, we need to check that at least one clique contains all the
399  // nodes of the joint target.
400  bool containing_clique_found = false;
401  for (const auto node : joint_target) {
402  bool found = true;
403  try {
404  const NodeSet& clique = __JT->clique(__node_to_clique[node]);
405  for (const auto xnode : joint_target) {
406  if (!clique.contains(xnode) && !hard_ev_nodes.exists(xnode)) {
407  found = false;
408  break;
409  }
410  }
411  } catch (NotFound&) { found = false; }
412 
413  if (found) {
414  containing_clique_found = true;
415  break;
416  }
417  }
418 
419  if (!containing_clique_found) return true;
420  }
421 
422  // if some new evidence have been added on nodes that do not belong
423  // to __graph, then we potentially have to reconstruct the join tree
424  for (const auto& change : __evidence_changes) {
425  if ((change.second == EvidenceChangeType::EVIDENCE_ADDED)
426  && !__graph.exists(change.first))
427  return true;
428  }
429 
430  // here, the current JT is exactly what we need for the next inference
431  return false;
432  }
433 
434 
436  template < typename GUM_SCALAR >
438  // to create the JT, we first create the moral graph of the BN in the
439  // following way in order to take into account the barren nodes and the
440  // nodes that received evidence:
441  // 1/ we create an undirected graph containing only the nodes and no edge
442  // 2/ if we take into account barren nodes, remove them from the graph
443  // 3/ add edges so that each node and its parents in the BN form a clique
444  // 4/ add edges so that set targets are cliques of the moral graph
445  // 5/ remove the nodes that received hard evidence (by step 3/, their
446  // parents are linked by edges, which is necessary for inference)
447  //
448  // At the end of step 5/, we have our moral graph and we can triangulate it
449  // to get the new junction tree
450 
451  // 1/ create an undirected graph containing only the nodes and no edge
452  const auto& bn = this->BN();
453  __graph.clear();
454  for (const auto node : bn.dag())
455  __graph.addNodeWithId(node);
456 
457  // 2/ if we wish to exploit barren nodes, we shall remove them from the BN
458  // to do so: we identify all the nodes that are not targets and have
459  // received
460  // no evidence and such that their descendants are neither targets nor
461  // evidence nodes. Such nodes can be safely discarded from the BN without
462  // altering the inference output
464  // identify the barren nodes
465  NodeSet target_nodes = this->targets();
466  for (const auto& nodeset : this->jointTargets()) {
467  target_nodes += nodeset;
468  }
469 
470  // check that all the nodes are not targets, otherwise, there is no
471  // barren node
472  if (target_nodes.size() != bn.size()) {
473  BarrenNodesFinder finder(&(bn.dag()));
474  finder.setTargets(&target_nodes);
475 
476  NodeSet evidence_nodes;
477  for (const auto& pair : this->evidence()) {
478  evidence_nodes.insert(pair.first);
479  }
480  finder.setEvidence(&evidence_nodes);
481 
482  NodeSet barren_nodes = finder.barrenNodes();
483 
484  // remove the barren nodes from the moral graph
485  for (const auto node : barren_nodes) {
486  __graph.eraseNode(node);
487  }
488  }
489  }
490 
491  // 3/ add edges so that each node and its parents in the BN form a clique
492  for (const auto node : __graph) {
493  const NodeSet& parents = bn.parents(node);
494  for (auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
495  __graph.addEdge(*iter1, node);
496  auto iter2 = iter1;
497  for (++iter2; iter2 != parents.cend(); ++iter2) {
498  __graph.addEdge(*iter1, *iter2);
499  }
500  }
501  }
502 
503  // 4/ if there exist some joint targets, we shall add new edges
504  // into the moral graph in order to ensure that there exists a clique
505  // containing each joint
506  for (const auto& nodeset : this->jointTargets()) {
507  for (auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
508  auto iter2 = iter1;
509  for (++iter2; iter2 != nodeset.cend(); ++iter2) {
510  __graph.addEdge(*iter1, *iter2);
511  }
512  }
513  }
514 
515  // 5/ remove all the nodes that received hard evidence
517  for (const auto node : __hard_ev_nodes) {
518  __graph.eraseNode(node);
519  }
520 
521 
522  // now, we can compute the new junction tree. To speed-up computations
523  // (essentially, those of a distribution phase), we construct from this
524  // junction tree a binary join tree
525  if (__JT != nullptr) delete __JT;
526  if (__junctionTree != nullptr) delete __junctionTree;
527 
528  __triangulation->setGraph(&__graph, &(this->domainSizes()));
529  const JunctionTree& triang_jt = __triangulation->junctionTree();
531  BinaryJoinTreeConverterDefault bjt_converter;
532  NodeSet emptyset;
533  __JT = new CliqueGraph(
534  bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
535  } else {
536  __JT = new CliqueGraph(triang_jt);
537  }
538  __junctionTree = new CliqueGraph(triang_jt);
539 
540 
541  // indicate, for each node of the moral graph a clique in __JT that can
542  // contain its conditional probability table
544  const std::vector< NodeId >& JT_elim_order =
546  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
547  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
548  ++i)
549  elim_order.insert(JT_elim_order[i], (int)i);
550  const DAG& dag = bn.dag();
551  for (const auto node : __graph) {
552  // get the variables in the potential of node (and its parents)
553  NodeId first_eliminated_node = node;
554  int elim_number = elim_order[first_eliminated_node];
555 
556  for (const auto parent : dag.parents(node)) {
557  if (__graph.existsNode(parent) && (elim_order[parent] < elim_number)) {
558  elim_number = elim_order[parent];
559  first_eliminated_node = parent;
560  }
561  }
562 
563  // first_eliminated_node contains the first var (node or one of its
564  // parents) eliminated => the clique created during its elimination
565  // contains node and all of its parents => it can contain the potential
566  // assigned to the node in the BN
568  node, __triangulation->createdJunctionTreeClique(first_eliminated_node));
569  }
570 
571  // do the same for the nodes that received evidence. Here, we only store
572  // the nodes for which at least one parent belongs to __graph (otherwise
573  // their CPT is just a constant real number).
574  for (const auto node : __hard_ev_nodes) {
575  // get the set of parents of the node that belong to __graph
576  NodeSet pars(dag.parents(node).size());
577  for (const auto par : dag.parents(node))
578  if (__graph.exists(par)) pars.insert(par);
579 
580  if (!pars.empty()) {
581  NodeId first_eliminated_node = *(pars.begin());
582  int elim_number = elim_order[first_eliminated_node];
583 
584  for (const auto parent : pars) {
585  if (elim_order[parent] < elim_number) {
586  elim_number = elim_order[parent];
587  first_eliminated_node = parent;
588  }
589  }
590 
591  // first_eliminated_node contains the first var (node or one of its
592  // parents) eliminated => the clique created during its elimination
593  // contains node and all of its parents => it can contain the potential
594  // assigned to the node in the BN
596  node,
597  __triangulation->createdJunctionTreeClique(first_eliminated_node));
598  }
599  }
600 
601  // indicate for each joint_target a clique that contains it
602  __joint_target_to_clique.clear();
603  for (const auto& set : this->jointTargets()) {
604  // remove from set all the nodes that received hard evidence (since they
605  // do not belong to the join tree)
606  NodeSet nodeset = set;
607  for (const auto node : __hard_ev_nodes)
608  if (nodeset.contains(node)) nodeset.erase(node);
609 
610  if (!nodeset.empty()) {
611  // the clique we are looking for is the one that was created when
612  // the first element of nodeset was eliminated
613  NodeId first_eliminated_node = *(nodeset.begin());
614  int elim_number = elim_order[first_eliminated_node];
615  for (const auto node : nodeset) {
616  if (elim_order[node] < elim_number) {
617  elim_number = elim_order[node];
618  first_eliminated_node = node;
619  }
620  }
621 
623  set, __triangulation->createdJunctionTreeClique(first_eliminated_node));
624  }
625  }
626 
627 
628  // compute the roots of __JT's connected components
630 
631  // create empty potential lists into the cliques of the joint tree as well
632  // as empty lists of evidence
633  __PotentialSet empty_set;
634  __clique_potentials.clear();
635  __node_to_soft_evidence.clear();
636  for (const auto node : *__JT) {
637  __clique_potentials.insert(node, empty_set);
638  }
639 
640  // remove all the potentials created during the last inference
641  for (const auto& potlist : __created_potentials)
642  for (const auto pot : potlist.second)
643  delete pot;
644  __created_potentials.clear();
645 
646  // remove all the potentials created to take into account hard evidence
647  // during the last inference
648  for (const auto pot_pair : __hard_ev_projected_CPTs)
649  delete pot_pair.second;
650  __hard_ev_projected_CPTs.clear();
651 
652  // remove all the constants created due to projections of CPTs that were
653  // defined over only hard evidence nodes
654  __constants.clear();
655 
656  // create empty lists of potentials for the messages and indicate that no
657  // message has been computed yet
658  __separator_potentials.clear();
659  __messages_computed.clear();
660  for (const auto& edge : __JT->edges()) {
661  const Arc arc1(edge.first(), edge.second());
662  __separator_potentials.insert(arc1, empty_set);
663  __messages_computed.insert(arc1, false);
664  const Arc arc2(Arc(edge.second(), edge.first()));
665  __separator_potentials.insert(arc2, empty_set);
666  __messages_computed.insert(arc2, false);
667  }
668 
669  // remove all the posteriors computed so far
670  for (const auto& pot : __target_posteriors)
671  delete pot.second;
672  __target_posteriors.clear();
673  for (const auto& pot : __joint_target_posteriors)
674  delete pot.second;
675  __joint_target_posteriors.clear();
676 
677 
678  // put all the CPT's of the Bayes net nodes into the cliques
679  // here, beware: all the potentials that are defined over some nodes
680  // including hard evidence must be projected so that these nodes are
681  // removed from the potential
682  const auto& evidence = this->evidence();
683  const auto& hard_evidence = this->hardEvidence();
684  for (const auto node : dag) {
685  if (__graph.exists(node) || __hard_ev_nodes.contains(node)) {
686  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
687 
688  // get the list of nodes with hard evidence in cpt
689  NodeSet hard_nodes;
690  const auto& variables = cpt.variablesSequence();
691  for (const auto var : variables) {
692  NodeId xnode = bn.nodeId(*var);
693  if (__hard_ev_nodes.contains(xnode)) hard_nodes.insert(xnode);
694  }
695 
696  // if hard_nodes contains hard evidence nodes, perform a projection
697  // and insert the result into the appropriate clique, else insert
698  // directly cpt into the clique
699  if (hard_nodes.empty()) {
700  __clique_potentials[__node_to_clique[node]].insert(&cpt);
701  } else {
702  // marginalize out the hard evidence nodes: if the cpt is defined
703  // only over nodes that received hard evidence, do not consider it
704  // as a potential anymore but as a constant
705  if (hard_nodes.size() == variables.size()) {
706  Instantiation inst;
707  const auto& vars = cpt.variablesSequence();
708  for (const auto var : vars)
709  inst << *var;
710  for (Size i = 0; i < hard_nodes.size(); ++i) {
711  inst.chgVal(variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
712  }
713  __constants.insert(node, cpt.get(inst));
714  } else {
715  // perform the projection with a combine and project instance
716  Set< const DiscreteVariable* > hard_variables;
717  __PotentialSet marg_cpt_set{&cpt};
718  for (const auto xnode : hard_nodes) {
719  marg_cpt_set.insert(evidence[xnode]);
720  hard_variables.insert(&(bn.variable(xnode)));
721  }
722 
723  // perform the combination of those potentials and their projection
724  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
725  combine_and_project(__combination_op, LPNewprojPotential);
726  __PotentialSet new_cpt_list =
727  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
728 
729  // there should be only one potential in new_cpt_list
730  if (new_cpt_list.size() != 1) {
731  // remove the CPT created to avoid memory leaks
732  for (const auto pot : new_cpt_list) {
733  if (!marg_cpt_set.contains(pot)) delete pot;
734  }
735  GUM_ERROR(FatalError,
736  "the projection of a potential containing "
737  << "hard evidence is empty!");
738  }
739  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
740  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
741  __hard_ev_projected_CPTs.insert(node, projected_cpt);
742  }
743  }
744  }
745  }
746 
747  // we shall now add all the potentials of the soft evidence
748  for (const auto node : this->softEvidenceNodes()) {
749  __node_to_soft_evidence.insert(node, evidence[node]);
750  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
751  }
752 
753  // indicate that the data structures are up to date.
754  __evidence_changes.clear();
755  __is_new_jt_needed = false;
756  }
757 
758 
760  template < typename GUM_SCALAR >
762  // check if a new JT is really needed. If so, create it
763  if (__isNewJTNeeded()) {
764  __createNewJT();
765  } else {
766  // here, we can answer the next queries without reconstructing all the
767  // junction tree. All we need to do is to indicate that we should
768  // update the potentials and messages for these queries
770  }
771  }
772 
773 
775  template < typename GUM_SCALAR >
777  NodeId from_id, NodeId to_id, NodeSet& invalidated_cliques) {
778  // invalidate the current clique
779  invalidated_cliques.insert(to_id);
780 
781  // invalidate the current arc
782  const Arc arc(from_id, to_id);
783  bool& message_computed = __messages_computed[arc];
784  if (message_computed) {
785  message_computed = false;
786  __separator_potentials[arc].clear();
787  if (__created_potentials.exists(arc)) {
788  auto& arc_created_potentials = __created_potentials[arc];
789  for (const auto pot : arc_created_potentials)
790  delete pot;
791  arc_created_potentials.clear();
792  }
793 
794  // go on with the diffusion
795  for (const auto node_id : __JT->neighbours(to_id)) {
796  if (node_id != from_id)
797  __diffuseMessageInvalidations(to_id, node_id, invalidated_cliques);
798  }
799  }
800  }
801 
802 
805  template < typename GUM_SCALAR >
807  // compute the set of CPTs that were projected due to hard evidence and
808  // whose hard evidence have changed, so that they need a new projection.
809  // By the way, remove these CPTs since they are no more needed
810  // Here only the values of the hard evidence can have changed (else a
811  // fully new join tree would have been computed).
812  // Note also that we know that the CPTs still contain some variable(s) after
813  // the projection (else they should be constants)
814  NodeSet hard_nodes_changed(__hard_ev_nodes.size());
815  for (const auto node : __hard_ev_nodes)
816  if (__evidence_changes.exists(node)) hard_nodes_changed.insert(node);
817 
818  NodeSet nodes_with_projected_CPTs_changed;
819  const auto& bn = this->BN();
820  for (auto pot_iter = __hard_ev_projected_CPTs.beginSafe();
821  pot_iter != __hard_ev_projected_CPTs.endSafe();
822  ++pot_iter) {
823  for (const auto var : bn.cpt(pot_iter.key()).variablesSequence()) {
824  if (hard_nodes_changed.contains(bn.nodeId(*var))) {
825  nodes_with_projected_CPTs_changed.insert(pot_iter.key());
826  delete pot_iter.val();
827  __clique_potentials[__node_to_clique[pot_iter.key()]].erase(
828  pot_iter.val());
829  __hard_ev_projected_CPTs.erase(pot_iter);
830  break;
831  }
832  }
833  }
834 
835 
836  // invalidate all the messages that are no more correct: start from each of
837  // the nodes whose soft evidence has changed and perform a diffusion from
838  // the clique into which the soft evidence has been entered, indicating that
839  // the messages spreading from this clique are now invalid. At the same time,
840  // if there were potentials created on the arcs over which the messages were
841  // sent, remove them from memory. For all the cliques that received some
842  // projected CPT that should now be changed, do the same.
843  NodeSet invalidated_cliques(__JT->size());
844  for (const auto& pair : __evidence_changes) {
845  if (__node_to_clique.exists(pair.first)) {
846  const auto clique = __node_to_clique[pair.first];
847  invalidated_cliques.insert(clique);
848  for (const auto neighbor : __JT->neighbours(clique)) {
849  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
850  }
851  }
852  }
853 
854  // now, add to the set of invalidated cliques those that contain projected
855  // CPTs that were changed.
856  for (const auto node : nodes_with_projected_CPTs_changed) {
857  const auto clique = __node_to_clique[node];
858  invalidated_cliques.insert(clique);
859  for (const auto neighbor : __JT->neighbours(clique)) {
860  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
861  }
862  }
863 
864 
865  // now we shall remove all the posteriors that belong to the
866  // invalidated cliques. First, cope only with the nodes that did not
867  // received hard evidence since the other nodes do not belong to the
868  // join tree
869  for (auto iter = __target_posteriors.beginSafe();
870  iter != __target_posteriors.endSafe();
871  ++iter) {
872  if (__graph.exists(iter.key())
873  && (invalidated_cliques.exists(__node_to_clique[iter.key()]))) {
874  delete iter.val();
875  __target_posteriors.erase(iter);
876  }
877  }
878 
879  // now cope with the nodes that received hard evidence
880  for (auto iter = __target_posteriors.beginSafe();
881  iter != __target_posteriors.endSafe();
882  ++iter) {
883  if (hard_nodes_changed.contains(iter.key())) {
884  delete iter.val();
885  __target_posteriors.erase(iter);
886  }
887  }
888 
889  // finally, cope with joint targets
890  for (auto iter = __joint_target_posteriors.beginSafe();
891  iter != __joint_target_posteriors.endSafe();
892  ++iter) {
893  if (invalidated_cliques.exists(__joint_target_to_clique[iter.key()])) {
894  delete iter.val();
895  __joint_target_posteriors.erase(iter);
896  }
897  }
898 
899 
900  // remove all the evidence that were entered into __node_to_soft_evidence
901  // and __clique_potentials and add the new soft ones
902  for (const auto& pot_pair : __node_to_soft_evidence) {
903  __clique_potentials[__node_to_clique[pot_pair.first]].erase(pot_pair.second);
904  }
905  __node_to_soft_evidence.clear();
906 
907  const auto& evidence = this->evidence();
908  for (const auto node : this->softEvidenceNodes()) {
909  __node_to_soft_evidence.insert(node, evidence[node]);
910  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
911  }
912 
913 
914  // Now add the projections of the CPTs due to newly changed hard evidence:
915  // if we are performing _updateOutdatedBNPotentials, this means that the
916  // set of nodes that received hard evidence has not changed, only
917  // their instantiations can have changed. So, if there is an entry
918  // for node in __constants, there will still be such an entry after
919  // performing the new projections. Idem for __hard_ev_projected_CPTs
920  for (const auto node : nodes_with_projected_CPTs_changed) {
921  // perform the projection with a combine and project instance
922  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
923  const auto& variables = cpt.variablesSequence();
924  Set< const DiscreteVariable* > hard_variables;
925  __PotentialSet marg_cpt_set{&cpt};
926  for (const auto var : variables) {
927  NodeId xnode = bn.nodeId(*var);
928  if (__hard_ev_nodes.exists(xnode)) {
929  marg_cpt_set.insert(evidence[xnode]);
930  hard_variables.insert(var);
931  }
932  }
933 
934  // perform the combination of those potentials and their projection
935  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
936  combine_and_project(__combination_op, LPNewprojPotential);
937  __PotentialSet new_cpt_list =
938  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
939 
940  // there should be only one potential in new_cpt_list
941  if (new_cpt_list.size() != 1) {
942  // remove the CPT created to avoid memory leaks
943  for (const auto pot : new_cpt_list) {
944  if (!marg_cpt_set.contains(pot)) delete pot;
945  }
946  GUM_ERROR(FatalError,
947  "the projection of a potential containing "
948  << "hard evidence is empty!");
949  }
950  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
951  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
952  __hard_ev_projected_CPTs.insert(node, projected_cpt);
953  }
954 
955  // update the constants
956  const auto& hard_evidence = this->hardEvidence();
957  for (auto& node_cst : __constants) {
958  const Potential< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
959  const auto& variables = cpt.variablesSequence();
960  Instantiation inst;
961  for (const auto var : variables)
962  inst << *var;
963  for (const auto var : variables) {
964  inst.chgVal(var, hard_evidence[bn.nodeId(*var)]);
965  }
966  node_cst.second = cpt.get(inst);
967  }
968 
969  // indicate that all changes have been performed
970  __evidence_changes.clear();
971  }
972 
973 
975  template < typename GUM_SCALAR >
977  // get the set of cliques in which we can find the targets and joint_targets
978  NodeSet clique_targets;
979  for (const auto node : this->targets()) {
980  try {
981  clique_targets.insert(__node_to_clique[node]);
982  } catch (Exception&) {}
983  }
984  for (const auto& set : this->jointTargets()) {
985  try {
986  clique_targets.insert(__joint_target_to_clique[set]);
987  } catch (Exception&) {}
988  }
989 
990  // put in a vector these cliques and their size
991  std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
992  const auto& bn = this->BN();
993  std::size_t i = 0;
994  for (const auto clique_id : clique_targets) {
995  const auto& clique = __JT->clique(clique_id);
996  Size dom_size = 1;
997  for (const auto node : clique) {
998  dom_size *= bn.variable(node).domainSize();
999  }
1000  possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
1001  ++i;
1002  }
1003 
1004  // sort the cliques by increasing domain size
1005  std::sort(possible_roots.begin(),
1006  possible_roots.end(),
1007  [](const std::pair< NodeId, Size >& a,
1008  const std::pair< NodeId, Size >& b) -> bool {
1009  return a.second < b.second;
1010  });
1011 
1012  // pick up the clique with the smallest size in each connected component
1013  NodeProperty< bool > marked = __JT->nodesProperty(false);
1014  std::function< void(NodeId, NodeId) > diffuse_marks =
1015  [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
1016  if (!marked[node]) {
1017  marked[node] = true;
1018  for (const auto neigh : __JT->neighbours(node))
1019  if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1020  }
1021  };
1022  __roots.clear();
1023  for (const auto xclique : possible_roots) {
1024  NodeId clique = xclique.first;
1025  if (!marked[clique]) {
1026  __roots.insert(clique);
1027  diffuse_marks(clique, clique);
1028  }
1029  }
1030  }
1031 
1032 
1033  // performs the collect phase of Lazy Propagation
1034  template < typename GUM_SCALAR >
1036  NodeId from) {
1037  for (const auto other : __JT->neighbours(id)) {
1038  if ((other != from) && !__messages_computed[Arc(other, id)])
1039  __collectMessage(other, id);
1040  }
1041 
1042  if ((id != from) && !__messages_computed[Arc(id, from)]) {
1043  __produceMessage(id, from);
1044  }
1045  }
1046 
1047 
1048  // find the potentials d-connected to a set of variables
1049  template < typename GUM_SCALAR >
1051  Set< const Potential< GUM_SCALAR >* >& pot_list,
1052  Set< const DiscreteVariable* >& kept_vars) {}
1053 
1054 
1055  // find the potentials d-connected to a set of variables
1056  template < typename GUM_SCALAR >
1058  Set< const Potential< GUM_SCALAR >* >& pot_list,
1059  Set< const DiscreteVariable* >& kept_vars) {
1060  // find the node ids of the kept variables
1061  NodeSet kept_ids;
1062  const auto& bn = this->BN();
1063  for (const auto var : kept_vars) {
1064  kept_ids.insert(bn.nodeId(*var));
1065  }
1066 
1067  // determine the set of potentials d-connected with the kept variables
1068  NodeSet requisite_nodes;
1069  BayesBall::requisiteNodes(bn.dag(),
1070  kept_ids,
1071  this->hardEvidenceNodes(),
1072  this->softEvidenceNodes(),
1073  requisite_nodes);
1074  for (auto iter = pot_list.beginSafe(); iter != pot_list.endSafe(); ++iter) {
1075  const Sequence< const DiscreteVariable* >& vars =
1076  (**iter).variablesSequence();
1077  bool found = false;
1078  for (const auto var : vars) {
1079  if (requisite_nodes.exists(bn.nodeId(*var))) {
1080  found = true;
1081  break;
1082  }
1083  }
1084 
1085  if (!found) { pot_list.erase(iter); }
1086  }
1087  }
1088 
1089 
1090  // find the potentials d-connected to a set of variables
1091  template < typename GUM_SCALAR >
1093  Set< const Potential< GUM_SCALAR >* >& pot_list,
1094  Set< const DiscreteVariable* >& kept_vars) {
1095  // find the node ids of the kept variables
1096  NodeSet kept_ids;
1097  const auto& bn = this->BN();
1098  for (const auto var : kept_vars) {
1099  kept_ids.insert(bn.nodeId(*var));
1100  }
1101 
1102  // determine the set of potentials d-connected with the kept variables
1104  kept_ids,
1105  this->hardEvidenceNodes(),
1106  this->softEvidenceNodes(),
1107  pot_list);
1108  }
1109 
1110 
1111  // find the potentials d-connected to a set of variables
1112  template < typename GUM_SCALAR >
1114  Set< const Potential< GUM_SCALAR >* >& pot_list,
1115  Set< const DiscreteVariable* >& kept_vars) {
1116  // find the node ids of the kept variables
1117  NodeSet kept_ids;
1118  const auto& bn = this->BN();
1119  for (const auto var : kept_vars) {
1120  kept_ids.insert(bn.nodeId(*var));
1121  }
1122 
1123  // determine the set of potentials d-connected with the kept variables
1124  dSeparation dsep;
1125  dsep.relevantPotentials(bn,
1126  kept_ids,
1127  this->hardEvidenceNodes(),
1128  this->softEvidenceNodes(),
1129  pot_list);
1130  }
1131 
1132 
1133  // find the potentials d-connected to a set of variables
1134  template < typename GUM_SCALAR >
1136  Set< const Potential< GUM_SCALAR >* >& pot_list,
1137  Set< const DiscreteVariable* >& kept_vars) {
1140  __findRelevantPotentialsWithdSeparation2(pot_list, kept_vars);
1141  break;
1142 
1144  __findRelevantPotentialsWithdSeparation(pot_list, kept_vars);
1145  break;
1146 
1148  __findRelevantPotentialsWithdSeparation3(pot_list, kept_vars);
1149  break;
1150 
1152  __findRelevantPotentialsGetAll(pot_list, kept_vars);
1153  break;
1154 
1155  default: GUM_ERROR(FatalError, "not implemented yet");
1156  }
1157  }
1158 
1159 
1160  // remove barren variables
1161  template < typename GUM_SCALAR >
1162  Set< const Potential< GUM_SCALAR >* >
1164  __PotentialSet& pot_list, Set< const DiscreteVariable* >& del_vars) {
1165  // remove from del_vars the variables that received some evidence:
1166  // only those that did not received evidence can be barren variables
1167  Set< const DiscreteVariable* > the_del_vars = del_vars;
1168  for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe();
1169  ++iter) {
1170  NodeId id = this->BN().nodeId(**iter);
1171  if (this->hardEvidenceNodes().exists(id)
1172  || this->softEvidenceNodes().exists(id)) {
1173  the_del_vars.erase(iter);
1174  }
1175  }
1176 
1177  // assign to each random variable the set of potentials that contain it
1178  HashTable< const DiscreteVariable*, __PotentialSet > var2pots;
1179  __PotentialSet empty_pot_set;
1180  for (const auto pot : pot_list) {
1181  const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1182  for (const auto var : vars) {
1183  if (the_del_vars.exists(var)) {
1184  if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1185  var2pots[var].insert(pot);
1186  }
1187  }
1188  }
1189 
1190  // each variable with only one potential is a barren variable
1191  // assign to each potential with barren nodes its set of barren variables
1192  HashTable< const Potential< GUM_SCALAR >*, Set< const DiscreteVariable* > >
1193  pot2barren_var;
1194  Set< const DiscreteVariable* > empty_var_set;
1195  for (const auto elt : var2pots) {
1196  if (elt.second.size() == 1) { // here we have a barren variable
1197  const Potential< GUM_SCALAR >* pot = *(elt.second.begin());
1198  if (!pot2barren_var.exists(pot)) {
1199  pot2barren_var.insert(pot, empty_var_set);
1200  }
1201  pot2barren_var[pot].insert(elt.first); // insert the barren variable
1202  }
1203  }
1204 
1205  // for each potential with barren variables, marginalize them.
1206  // if the potential has only barren variables, simply remove them from the
1207  // set of potentials, else just project the potential
1208  MultiDimProjection< GUM_SCALAR, Potential > projector(LPNewprojPotential);
1209  __PotentialSet projected_pots;
1210  for (const auto elt : pot2barren_var) {
1211  // remove the current potential from pot_list as, anyway, we will change
1212  // it
1213  const Potential< GUM_SCALAR >* pot = elt.first;
1214  pot_list.erase(pot);
1215 
1216  // check whether we need to add a projected new potential or not (i.e.,
1217  // whether there exist non-barren variables or not)
1218  if (pot->variablesSequence().size() != elt.second.size()) {
1219  auto new_pot = projector.project(*pot, elt.second);
1220  pot_list.insert(new_pot);
1221  projected_pots.insert(new_pot);
1222  }
1223  }
1224 
1225  return projected_pots;
1226  }
1227 
1228 
1229  // remove variables del_vars from the list of potentials pot_list
1230  template < typename GUM_SCALAR >
1231  Set< const Potential< GUM_SCALAR >* >
1233  Set< const Potential< GUM_SCALAR >* > pot_list,
1234  Set< const DiscreteVariable* >& del_vars,
1235  Set< const DiscreteVariable* >& kept_vars) {
1236  // use d-separation analysis to check which potentials shall be combined
1237  __findRelevantPotentialsXX(pot_list, kept_vars);
1238 
1239  // remove the potentials corresponding to barren variables if we want
1240  // to exploit barren nodes
1241  __PotentialSet barren_projected_potentials;
1243  barren_projected_potentials = __removeBarrenVariables(pot_list, del_vars);
1244  }
1245 
1246  // create a combine and project operator that will perform the
1247  // marginalization
1248  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential > combine_and_project(
1250  __PotentialSet new_pot_list =
1251  combine_and_project.combineAndProject(pot_list, del_vars);
1252 
1253  // remove all the potentials that were created due to projections of
1254  // barren nodes and that are not part of the new_pot_list: these
1255  // potentials were just temporary potentials
1256  for (auto iter = barren_projected_potentials.beginSafe();
1257  iter != barren_projected_potentials.endSafe();
1258  ++iter) {
1259  if (!new_pot_list.exists(*iter)) delete *iter;
1260  }
1261 
1262  // remove all the potentials that have no dimension
1263  for (auto iter_pot = new_pot_list.beginSafe();
1264  iter_pot != new_pot_list.endSafe();
1265  ++iter_pot) {
1266  if ((*iter_pot)->variablesSequence().size() == 0) {
1267  // as we have already marginalized out variables that received evidence,
1268  // it may be the case that, after combining and projecting, some
1269  // potentials might be empty. In this case, we shall keep their
1270  // constant and remove them from memory
1271  // # TODO: keep the constants!
1272  delete *iter_pot;
1273  new_pot_list.erase(iter_pot);
1274  }
1275  }
1276 
1277  return new_pot_list;
1278  }
1279 
1280 
1281  // creates the message sent by clique from_id to clique to_id
1282  template < typename GUM_SCALAR >
1284  NodeId to_id) {
1285  // get the potentials of the clique.
1286  __PotentialSet pot_list = __clique_potentials[from_id];
1287 
1288  // add the messages sent by adjacent nodes to from_id
1289  for (const auto other_id : __JT->neighbours(from_id))
1290  if (other_id != to_id)
1291  pot_list += __separator_potentials[Arc(other_id, from_id)];
1292 
1293  // get the set of variables that need be removed from the potentials
1294  const NodeSet& from_clique = __JT->clique(from_id);
1295  const NodeSet& separator = __JT->separator(from_id, to_id);
1296  Set< const DiscreteVariable* > del_vars(from_clique.size());
1297  Set< const DiscreteVariable* > kept_vars(separator.size());
1298  const auto& bn = this->BN();
1299 
1300  for (const auto node : from_clique) {
1301  if (!separator.contains(node)) {
1302  del_vars.insert(&(bn.variable(node)));
1303  } else {
1304  kept_vars.insert(&(bn.variable(node)));
1305  }
1306  }
1307 
1308  // pot_list now contains all the potentials to multiply and marginalize
1309  // => combine the messages
1310  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1311 
1312  // keep track of the newly created potentials but remove first all the
1313  // potentials that are equal to ones (as probability matrix multiplications
1314  // are tensorial, such potentials are useless)
1315  const Arc arc(from_id, to_id);
1316  for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe();
1317  ++iter) {
1318  const auto pot = *iter;
1319  if (pot->variablesSequence().size() == 1) {
1320  bool is_all_ones = true;
1321  for (Instantiation inst(*pot); !inst.end(); ++inst) {
1322  if (pot->get(inst) < __1_minus_epsilon) {
1323  is_all_ones = false;
1324  break;
1325  }
1326  }
1327  if (is_all_ones) {
1328  if (!pot_list.exists(pot)) delete pot;
1329  new_pot_list.erase(iter);
1330  continue;
1331  }
1332  }
1333 
1334  if (!pot_list.exists(pot)) {
1335  if (!__created_potentials.exists(arc))
1336  __created_potentials.insert(arc, __PotentialSet());
1337  __created_potentials[arc].insert(pot);
1338  }
1339  }
1340 
1341  __separator_potentials[arc] = std::move(new_pot_list);
1342  __messages_computed[arc] = true;
1343  }
1344 
1345 
1346  // performs a whole inference
1347  template < typename GUM_SCALAR >
1349  // collect messages for all single targets
1350  for (const auto node : this->targets()) {
1351  // perform only collects in the join tree for nodes that have
1352  // not received hard evidence (those that received hard evidence were
1353  // not included into the join tree for speed-up reasons)
1354  if (__graph.exists(node)) {
1356  }
1357  }
1358 
1359  // collect messages for all set targets
1360  // by parsing __joint_target_to_clique, we ensure that the cliques that
1361  // are referenced belong to the join tree (even if some of the nodes in
1362  // their associated joint_target do not belong to __graph)
1363  for (const auto set : __joint_target_to_clique)
1364  __collectMessage(set.second, set.second);
1365  }
1366 
1367 
1369  template < typename GUM_SCALAR >
1370  Potential< GUM_SCALAR >*
1372  const auto& bn = this->BN();
1373 
1374  // hard evidence do not belong to the join tree
1375  // # TODO: check for sets of inconsistent hard evidence
1376  if (this->hardEvidenceNodes().contains(id)) {
1377  return new Potential< GUM_SCALAR >(*(this->evidence()[id]));
1378  }
1379 
1380  // if we still need to perform some inference task, do it (this should
1381  // already have been done by _makeInference)
1382  NodeId clique_of_id = __node_to_clique[id];
1383  __collectMessage(clique_of_id, clique_of_id);
1384 
1385  // now we just need to create the product of the potentials of the clique
1386  // containing id with the messages received by this clique and
1387  // marginalize out all variables except id
1388  __PotentialSet pot_list = __clique_potentials[clique_of_id];
1389 
1390  // add the messages sent by adjacent nodes to targetClique
1391  for (const auto other : __JT->neighbours(clique_of_id))
1392  pot_list += __separator_potentials[Arc(other, clique_of_id)];
1393 
1394  // get the set of variables that need be removed from the potentials
1395  const NodeSet& nodes = __JT->clique(clique_of_id);
1396  Set< const DiscreteVariable* > kept_vars{&(bn.variable(id))};
1397  Set< const DiscreteVariable* > del_vars(nodes.size());
1398  for (const auto node : nodes) {
1399  if (node != id) del_vars.insert(&(bn.variable(node)));
1400  }
1401 
1402  // pot_list now contains all the potentials to multiply and marginalize
1403  // => combine the messages
1404  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1405  Potential< GUM_SCALAR >* joint = nullptr;
1406 
1407  if (new_pot_list.size() == 1) {
1408  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1409  // if pot already existed, create a copy, so that we can put it into
1410  // the __target_posteriors property
1411  if (pot_list.exists(joint)) {
1412  joint = new Potential< GUM_SCALAR >(*joint);
1413  } else {
1414  // remove the joint from new_pot_list so that it will not be
1415  // removed just after the else block
1416  new_pot_list.clear();
1417  }
1418  } else {
1419  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1421  joint = fast_combination.combine(new_pot_list);
1422  }
1423 
1424  // remove the potentials that were created in new_pot_list
1425  for (const auto pot : new_pot_list)
1426  if (!pot_list.exists(pot)) delete pot;
1427 
1428  // check that the joint posterior is different from a 0 vector: this would
1429  // indicate that some hard evidence are not compatible (their joint
1430  // probability is equal to 0)
1431  bool nonzero_found = false;
1432  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1433  if (joint->get(inst)) {
1434  nonzero_found = true;
1435  break;
1436  }
1437  }
1438  if (!nonzero_found) {
1439  // remove joint from memory to avoid memory leaks
1440  delete joint;
1441  GUM_ERROR(IncompatibleEvidence,
1442  "some evidence entered into the Bayes "
1443  "net are incompatible (their joint proba = 0)");
1444  }
1445  return joint;
1446  }
1447 
1448 
1450  template < typename GUM_SCALAR >
1451  const Potential< GUM_SCALAR >&
1453  // check if we have already computed the posterior
1454  if (__target_posteriors.exists(id)) { return *(__target_posteriors[id]); }
1455 
1456  // compute the joint posterior and normalize
1457  auto joint = _unnormalizedJointPosterior(id);
1458  joint->normalize();
1459  __target_posteriors.insert(id, joint);
1460 
1461  return *joint;
1462  }
1463 
1464 
1465  // returns the marginal a posteriori proba of a given node
1466  template < typename GUM_SCALAR >
1467  Potential< GUM_SCALAR >*
1469  const NodeSet& set) {
1470  // hard evidence do not belong to the join tree, so extract the nodes
1471  // from targets that are not hard evidence
1472  NodeSet targets = set, hard_ev_nodes;
1473  for (const auto node : this->hardEvidenceNodes()) {
1474  if (targets.contains(node)) {
1475  targets.erase(node);
1476  hard_ev_nodes.insert(node);
1477  }
1478  }
1479 
1480  // if all the nodes have received hard evidence, then compute the
1481  // joint posterior directly by multiplying the hard evidence potentials
1482  const auto& evidence = this->evidence();
1483  if (targets.empty()) {
1484  __PotentialSet pot_list;
1485  for (const auto node : set) {
1486  pot_list.insert(evidence[node]);
1487  }
1488  if (pot_list.size() == 1) {
1489  auto pot = new Potential< GUM_SCALAR >(**(pot_list.begin()));
1490  return pot;
1491  } else {
1492  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1494  return fast_combination.combine(pot_list);
1495  }
1496  }
1497 
1498 
1499  // if we still need to perform some inference task, do it: so, first,
1500  // determine the clique on which we should perform collect to compute
1501  // the unnormalized joint posterior of a set of nodes containing "targets"
1502  NodeId clique_of_set;
1503  try {
1504  clique_of_set = __joint_target_to_clique[set];
1505  } catch (NotFound&) {
1506  // here, the precise set of targets does not belong to the set of targets
1507  // defined by the user. So we will try to find a clique in the junction
1508  // tree that contains "targets":
1509 
1510  // 1/ we should check that all the nodes belong to the join tree
1511  for (const auto node : targets) {
1512  if (!__graph.exists(node)) {
1513  GUM_ERROR(UndefinedElement, node << " is not a target node");
1514  }
1515  }
1516 
1517  // 2/ the clique created by the first eliminated node among target is the
1518  // one we are looking for
1519  const std::vector< NodeId >& JT_elim_order =
1521 
1522  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
1523  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
1524  ++i)
1525  elim_order.insert(JT_elim_order[i], (int)i);
1526  NodeId first_eliminated_node = *(targets.begin());
1527  int elim_number = elim_order[first_eliminated_node];
1528  for (const auto node : targets) {
1529  if (elim_order[node] < elim_number) {
1530  elim_number = elim_order[node];
1531  first_eliminated_node = node;
1532  }
1533  }
1534 
1535  clique_of_set =
1536  __triangulation->createdJunctionTreeClique(first_eliminated_node);
1537 
1538 
1539  // 3/ check that clique_of_set contains the all the nodes in the target
1540  const NodeSet& clique_nodes = __JT->clique(clique_of_set);
1541  for (const auto node : targets) {
1542  if (!clique_nodes.contains(node)) {
1543  GUM_ERROR(UndefinedElement, set << " is not a joint target");
1544  }
1545  }
1546 
1547  // add the discovered clique to __joint_target_to_clique
1548  __joint_target_to_clique.insert(set, clique_of_set);
1549  }
1550 
1551  // now perform a collect on the clique
1552  __collectMessage(clique_of_set, clique_of_set);
1553 
1554  // now we just need to create the product of the potentials of the clique
1555  // containing set with the messages received by this clique and
1556  // marginalize out all variables except set
1557  __PotentialSet pot_list = __clique_potentials[clique_of_set];
1558 
1559  // add the messages sent by adjacent nodes to targetClique
1560  for (const auto other : __JT->neighbours(clique_of_set))
1561  pot_list += __separator_potentials[Arc(other, clique_of_set)];
1562 
1563  // get the set of variables that need be removed from the potentials
1564  const NodeSet& nodes = __JT->clique(clique_of_set);
1565  Set< const DiscreteVariable* > del_vars(nodes.size());
1566  Set< const DiscreteVariable* > kept_vars(targets.size());
1567  const auto& bn = this->BN();
1568  for (const auto node : nodes) {
1569  if (!targets.contains(node)) {
1570  del_vars.insert(&(bn.variable(node)));
1571  } else {
1572  kept_vars.insert(&(bn.variable(node)));
1573  }
1574  }
1575 
1576  // pot_list now contains all the potentials to multiply and marginalize
1577  // => combine the messages
1578  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1579  Potential< GUM_SCALAR >* joint = nullptr;
1580 
1581  if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
1582  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1583 
1584  // if pot already existed, create a copy, so that we can put it into
1585  // the __target_posteriors property
1586  if (pot_list.exists(joint)) {
1587  joint = new Potential< GUM_SCALAR >(*joint);
1588  } else {
1589  // remove the joint from new_pot_list so that it will not be
1590  // removed just after the next else block
1591  new_pot_list.clear();
1592  }
1593  } else {
1594  // combine all the potentials in new_pot_list with all the hard evidence
1595  // of the nodes in set
1596  __PotentialSet new_new_pot_list = new_pot_list;
1597  for (const auto node : hard_ev_nodes) {
1598  new_new_pot_list.insert(evidence[node]);
1599  }
1600  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1602  joint = fast_combination.combine(new_new_pot_list);
1603  }
1604 
1605  // remove the potentials that were created in new_pot_list
1606  for (const auto pot : new_pot_list)
1607  if (!pot_list.exists(pot)) delete pot;
1608 
1609  // check that the joint posterior is different from a 0 vector: this would
1610  // indicate that some hard evidence are not compatible
1611  bool nonzero_found = false;
1612  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1613  if ((*joint)[inst]) {
1614  nonzero_found = true;
1615  break;
1616  }
1617  }
1618  if (!nonzero_found) {
1619  // remove joint from memory to avoid memory leaks
1620  delete joint;
1621  GUM_ERROR(IncompatibleEvidence,
1622  "some evidence entered into the Bayes "
1623  "net are incompatible (their joint proba = 0)");
1624  }
1625 
1626  return joint;
1627  }
1628 
1629 
1631  template < typename GUM_SCALAR >
1632  const Potential< GUM_SCALAR >&
1634  // check if we have already computed the posterior
1635  if (__joint_target_posteriors.exists(set)) {
1636  return *(__joint_target_posteriors[set]);
1637  }
1638 
1639  // compute the joint posterior and normalize
1640  auto joint = _unnormalizedJointPosterior(set);
1641  joint->normalize();
1642  __joint_target_posteriors.insert(set, joint);
1643 
1644  return *joint;
1645  }
1646 
1647 
1649  template < typename GUM_SCALAR >
1650  const Potential< GUM_SCALAR >& LazyPropagation< GUM_SCALAR >::_jointPosterior(
1651  const NodeSet& wanted_target, const NodeSet& declared_target) {
1652  // check if we have already computed the posterior of wanted_target
1653  if (__joint_target_posteriors.exists(wanted_target))
1654  return *(__joint_target_posteriors[wanted_target]);
1655 
1656  // here, we will have to compute the posterior of declared_target and
1657  // marginalize out all the variables that do not belong to wanted_target
1658 
1659  // check if we have already computed the posterior of declared_target
1660  if (!__joint_target_posteriors.exists(declared_target)) {
1661  _jointPosterior(declared_target);
1662  }
1663 
1664  // marginalize out all the variables that do not belong to wanted_target
1665  const auto& bn = this->BN();
1666  Set< const DiscreteVariable* > del_vars;
1667  for (const auto node : declared_target)
1668  if (!wanted_target.contains(node)) del_vars.insert(&(bn.variable(node)));
1669  Potential< GUM_SCALAR >* pot = new Potential< GUM_SCALAR >(
1670  __joint_target_posteriors[declared_target]->margSumOut(del_vars));
1671 
1672  // save the result into the cache
1673  __joint_target_posteriors.insert(wanted_target, pot);
1674 
1675  return *pot;
1676  }
1677 
1678 
1679  template < typename GUM_SCALAR >
1681  // here, we should check that __find_relevant_potential_type is equal to
1682  // FIND_ALL. Otherwise, the computations could be wrong.
1683  RelevantPotentialsFinderType old_relevant_type =
1685 
1686  // if the relevant potentials finder is not equal to FIND_ALL, all the
1687  // current computations may lead to incorrect results, so we shall
1688  // discard them
1689  if (old_relevant_type != RelevantPotentialsFinderType::FIND_ALL) {
1691  __is_new_jt_needed = true;
1693  }
1694 
1695  // perform inference in each connected component
1696  this->makeInference();
1697 
1698  // for each connected component, select a variable X and compute the
1699  // joint probability of X and evidence e. Then marginalize-out X to get
1700  // p(e) in this connected component. Finally, multiply all the p(e) that
1701  // we got and the elements in __constants. The result is the probability
1702  // of evidence
1703 
1704  GUM_SCALAR prob_ev = 1;
1705  for (const auto root : __roots) {
1706  // get a node in the clique
1707  NodeId node = *(__JT->clique(root).begin());
1708  Potential< GUM_SCALAR >* tmp = _unnormalizedJointPosterior(node);
1709  GUM_SCALAR sum = 0;
1710  for (Instantiation iter(*tmp); !iter.end(); ++iter)
1711  sum += tmp->get(iter);
1712  prob_ev *= sum;
1713  delete tmp;
1714  }
1715 
1716  for (const auto& projected_cpt : __constants)
1717  prob_ev *= projected_cpt.second;
1718 
1719  // put back the relevant potential type selected by the user
1720  __find_relevant_potential_type = old_relevant_type;
1721 
1722  return prob_ev;
1723  }
1724 
1725 } /* namespace gum */
1726 
1727 #endif // DOXYGEN_SHOULD_SKIP_THIS
~LazyPropagation() final
destructor
NodeProperty< const Potential< GUM_SCALAR > *> __node_to_soft_evidence
the soft evidence stored in the cliques per their assigned node in the BN
HashTable< NodeSet, const Potential< GUM_SCALAR > *> __joint_target_posteriors
the set of set target posteriors computed during the last inference
ArcProperty< bool > __messages_computed
indicates whether a message (from one clique to another) has been computed
void setFindBarrenNodesType(FindBarrenNodesType type)
sets how we determine barren nodes
NodeProperty< const Potential< GUM_SCALAR > *> __hard_ev_projected_CPTs
the CPTs that were projected due to hard evidence nodes
Set< const Potential< GUM_SCALAR > *> __PotentialSet
void _updateOutdatedBNStructure() final
prepares inference when the latter is in OutdatedBNStructure state
virtual void clear()
removes all the nodes and edges from the graph
Definition: undiGraph_inl.h:43
virtual void addNodeWithId(const NodeId id)
try to insert a node with the given id
const NodeProperty< const Potential< GUM_SCALAR > *> & evidence() const
returns the set of evidence
bool __isNewJTNeeded() const
check whether a new join tree is really needed for the next inference
void setRelevantPotentialsFinderType(RelevantPotentialsFinderType type)
sets how we determine the relevant potentials to combine
JunctionTree * __junctionTree
the junction tree to answer the last inference query
Triangulation * __triangulation
the triangulation class creating the junction tree used for inference
void _onAllEvidenceErased(bool has_hard_evidence) final
fired before all the evidence are erased
void __findRelevantPotentialsWithdSeparation3(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
bool __is_new_jt_needed
indicates whether a new join tree is needed for the next inference
Potential< GUM_SCALAR > * _unnormalizedJointPosterior(NodeId id) final
returns a fresh potential equal to P(argument,evidence)
ArcProperty< __PotentialSet > __created_potentials
the set of potentials created for the last inference messages
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
void setTriangulation(const Triangulation &new_triangulation)
use a new triangulation algorithm
void __findRelevantPotentialsXX(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
static void relevantPotentials(const IBayesNet< GUM_SCALAR > &bn, const NodeSet &query, const NodeSet &hardEvidence, const NodeSet &softEvidence, Set< const TABLE< GUM_SCALAR > * > &potentials)
update a set of potentials, keeping only those d-connected with query variables given evidence ...
Definition: BayesBall_tpl.h:35
void __findRelevantPotentialsWithdSeparation2(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
NodeSet __hard_ev_nodes
the hard evidence nodes which were projected in CPTs
void erase(const Key &k)
Erases an element from the set.
Definition: set_tpl.h:656
bool exists(const NodeId id) const
alias for existsNode
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
Definition: agrum.h:25
GUM_SCALAR evidenceProbability() final
returns the probability of evidence
void _onJointTargetAdded(const NodeSet &set) final
fired after a new joint target is inserted
void _onAllJointTargetsErased() final
fired before a all the joint targets are removed
const Potential< GUM_SCALAR > & _jointPosterior(const NodeSet &set) final
returns the posterior of a declared target set
RelevantPotentialsFinderType
type of algorithm for determining the relevant potentials for combinations using some d-separation an...
virtual void eraseNode(const NodeId id)
remove a node and its adjacent edges from the graph
Definition: undiGraph_inl.h:58
FindBarrenNodesType __barren_nodes_type
the type of barren nodes computation we wish
NodeProperty< const Potential< GUM_SCALAR > *> __target_posteriors
the set of single posteriors computed during the last inference
virtual void makeInference() final
perform the heavy computations needed to compute the targets&#39; posteriors
const GUM_SCALAR __1_minus_epsilon
for comparisons with 1 - epsilon
void _onAllTargetsErased() final
fired before a all single and joint_targets are removed
NodeSet __roots
a clique node used as a root in each connected component of __JT
void _onMarginalTargetErased(const NodeId id) final
fired before a single target is removed
virtual void _onBayesNetChanged(const IBayesNet< GUM_SCALAR > *bn) final
fired after a new Bayes net has been assigned to the engine
void _onEvidenceChanged(const NodeId id, bool hasChangedSoftHard) final
fired after an evidence is changed, in particular when its status (soft/hard) changes ...
FindBarrenNodesType
type of algorithm to determine barren nodes
const Potential< GUM_SCALAR > & _posterior(NodeId id) final
returns the posterior of a given variable
const NodeSet & softEvidenceNodes() const
returns the set of nodes with soft evidence
void __collectMessage(NodeId id, NodeId from)
actually perform the collect phase
ArcProperty< __PotentialSet > __separator_potentials
the list of all potentials stored in the separators after inferences
virtual const NodeProperty< Size > & domainSizes() const final
get the domain sizes of the random variables of the BN
virtual NodeId createdJunctionTreeClique(const NodeId id)=0
returns the Id of the clique created by the elimination of a given node during the triangulation proc...
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
const NodeProperty< Idx > & hardEvidence() const
indicate for each node with hard evidence which value it took
virtual bool isDone() const noexcept final
returns whether the inference object is in a done state
const JoinTree * joinTree()
returns the current join tree used
NodeProperty< GUM_SCALAR > __constants
the constants resulting from the projections of CPTs defined over only hard evidence nodes remove th...
JoinTree * __JT
the join (or junction) tree used to answer the last inference query
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
Definition: cliqueGraph.h:306
void _updateOutdatedBNPotentials() final
prepares inference when the latter is in OutdatedBNPotentials state
void _onEvidenceAdded(const NodeId id, bool isHardEvidence) final
fired after a new evidence is inserted
virtual const Set< NodeSet > & jointTargets() const noexcept final
returns the list of joint targets
void _makeInference() final
called when the inference has to be performed effectively
void __diffuseMessageInvalidations(NodeId from_id, NodeId to_id, NodeSet &invalidated_cliques)
invalidate all the messages sent from a given clique
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
void _onAllMarginalTargetsAdded() final
fired after all the nodes of the BN are added as single targets
void __setProjectionFunction(Potential< GUM_SCALAR > *(*proj)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &))
sets the operator for performing the projections
void _setOutdatedBNStructureState()
put the inference into an outdated BN structure state
const NodeSet & clique(const NodeId idClique) const
returns the set of nodes included into a given clique
const NodeSet & hardEvidenceNodes() const
returns the set of nodes with hard evidence
NodeProperty< __PotentialSet > __clique_potentials
the list of all potentials stored in the cliques
void __computeJoinTreeRoots()
compute a root for each connected component of __JT
HashTable< NodeSet, NodeId > __joint_target_to_clique
for each set target, assign a clique in the JT that contains it
__PotentialSet __removeBarrenVariables(__PotentialSet &pot_list, Set< const DiscreteVariable * > &del_vars)
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
Definition: cliqueGraph.h:302
void __createNewJT()
create a new junction tree as well as its related data structures
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
static INLINE Potential< GUM_SCALAR > * LPNewprojPotential(const Potential< GUM_SCALAR > &t1, const Set< const DiscreteVariable * > &del_vars)
bool __use_binary_join_tree
indicates whether we should transform junction trees into binary join trees
void __invalidateAllMessages()
invalidate all messages, posteriors and created potentials
Potential< GUM_SCALAR > *(* __projection_op)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &)
the operator for performing the projections
__PotentialSet __marginalizeOut(__PotentialSet pot_list, Set< const DiscreteVariable * > &del_vars, Set< const DiscreteVariable * > &kept_vars)
removes variables del_vars from a list of potentials and returns the resulting list ...
void clear()
Removes all the elements in the hash table.
LazyPropagation(const IBayesNet< GUM_SCALAR > *BN, RelevantPotentialsFinderType=RelevantPotentialsFinderType::DSEP_BAYESBALL_POTENTIALS, FindBarrenNodesType=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
const JunctionTree * junctionTree()
returns the current junction tree
void _onJointTargetErased(const NodeSet &set) final
fired before a joint target is removed
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
virtual bool isInferenceReady() const noexcept final
returns whether the inference object is in a ready state
virtual Triangulation * newFactory() const =0
returns a fresh triangulation of the same type as the current object but with an empty graph ...
void _onAllMarginalTargetsErased() final
fired before a all the single targets are removed
virtual const NodeSet & targets() const noexcept final
returns the list of marginal targets
HashTable< NodeId, NodeId > __node_to_clique
for each node of __graph (~ in the Bayes net), associate an ID in the JT
void clear()
Removes all the elements, if any, from the set.
Definition: set_tpl.h:375
void __produceMessage(NodeId from_id, NodeId to_id)
creates the message sent by clique from_id to clique to_id
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:48
RelevantPotentialsFinderType __find_relevant_potential_type
the type of relevant potential finding algorithm to be used
void(LazyPropagation< GUM_SCALAR >::* __findRelevantPotentials)(Set< const Potential< GUM_SCALAR > * > &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
virtual const CliqueGraph & junctionTree()=0
returns a compatible junction tree
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
void __findRelevantPotentialsGetAll(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
virtual const std::vector< NodeId > & eliminationOrder()=0
returns an elimination ordering compatible with the triangulated graph
void _onMarginalTargetAdded(const NodeId id) final
fired after a new single target is inserted
virtual const IBayesNet< GUM_SCALAR > & BN() const final
Returns a constant reference over the IBayesNet referenced by this class.
Size NodeId
Type for node ids.
Definition: graphElements.h:98
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
UndiGraph __graph
the undigraph extracted from the BN and used to construct the join tree
void __setCombinationFunction(Potential< GUM_SCALAR > *(*comb)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &))
sets the operator for performing the combinations
void _onEvidenceErased(const NodeId id, bool isHardEvidence) final
fired before an evidence is removed
#define GUM_ERROR(type, msg)
Definition: exceptions.h:55
virtual void setGraph(const UndiGraph *graph, const NodeProperty< Size > *domsizes)=0
initialize the triangulation data structures for a new graph
static void requisiteNodes(const DAG &dag, const NodeSet &query, const NodeSet &hardEvidence, const NodeSet &softEvidence, NodeSet &requisite)
Fill the &#39;requisite&#39; nodeset with the requisite nodes in dag given a query and evidence.
Definition: BayesBall.cpp:36
Potential< GUM_SCALAR > *(* __combination_op)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &)
the operator for performing the combinations
void _setOutdatedBNPotentialsState()
puts the inference into an OutdatedBNPotentials state if it is not already in an OutdatedBNStructure ...
NodeProperty< EvidenceChangeType > __evidence_changes
indicates which nodes of the BN have evidence that changed since the last inference ...
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
void __findRelevantPotentialsWithdSeparation(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...