aGrUM  0.13.2
lazyPropagation_tpl.h
Go to the documentation of this file.
1 /***************************************************************************
2  * Copyright (C) 2005 by Christophe GONZALES et Pierre-Henri WUILLEMIN *
3  * {prenom.nom}_at_lip6.fr *
4  * *
5  * This program is free software; you can redistribute it and/or modify *
6  * it under the terms of the GNU General Public License as published by *
7  * the Free Software Foundation; either version 2 of the License, or *
8  * (at your option) any later version. *
9  * *
10  * This program is distributed in the hope that it will be useful, *
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13  * GNU General Public License for more details. *
14  * *
15  * You should have received a copy of the GNU General Public License *
16  * along with this program; if not, write to the *
17  * Free Software Foundation, Inc., *
18  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19  ***************************************************************************/
28 #ifndef DOXYGEN_SHOULD_SKIP_THIS
29 # include <algorithm>
30 
38 
39 
40 namespace gum {
41  // default constructor
42  template < typename GUM_SCALAR >
44  const IBayesNet< GUM_SCALAR >* BN,
45  RelevantPotentialsFinderType relevant_type,
46  FindBarrenNodesType barren_type,
47  bool use_binary_join_tree) :
48  JointTargetedInference< GUM_SCALAR >(BN),
49  EvidenceInference< GUM_SCALAR >(BN),
50  __use_binary_join_tree(use_binary_join_tree) {
51  // sets the relevant potential and the barren nodes finding algorithm
52  setRelevantPotentialsFinderType(relevant_type);
53  setFindBarrenNodesType(barren_type);
54 
55  // create a default triangulation (the user can change it afterwards)
56  __triangulation = new DefaultTriangulation;
57 
58  // for debugging purposessetRequiredInference
59  GUM_CONSTRUCTOR(LazyPropagation);
60  }
61 
62 
63  // destructor
64  template < typename GUM_SCALAR >
66  // remove all the potentials created during the last message passing
67  for (const auto& pots : __created_potentials)
68  for (const auto pot : pots.second)
69  delete pot;
70 
71  // remove the potentials created after removing the nodes that received
72  // hard evidence
73  for (const auto& pot : __hard_ev_projected_CPTs)
74  delete pot.second;
75 
76  // remove all the posteriors computed
77  for (const auto& pot : __target_posteriors)
78  delete pot.second;
79  for (const auto& pot : __joint_target_posteriors)
80  delete pot.second;
81 
82  // remove the junction tree and the triangulation algorithm
83  if (__JT != nullptr) delete __JT;
84  if (__junctionTree != nullptr) delete __junctionTree;
85  delete __triangulation;
86 
87  // for debugging purposes
88  GUM_DESTRUCTOR(LazyPropagation);
89  }
90 
91 
93  template < typename GUM_SCALAR >
95  const Triangulation& new_triangulation) {
96  delete __triangulation;
97  __triangulation = new_triangulation.newFactory();
98  __is_new_jt_needed = true;
100  }
101 
102 
104  template < typename GUM_SCALAR >
106  __createNewJT();
107 
108  return __JT;
109  }
110 
112  template < typename GUM_SCALAR >
114  __createNewJT();
115 
116  return __junctionTree;
117  }
118 
119 
121  template < typename GUM_SCALAR >
124  if (type != __find_relevant_potential_type) {
125  switch (type) {
129  break;
130 
134  break;
135 
139  break;
140 
144  break;
145 
146  default:
147  GUM_ERROR(InvalidArgument,
148  "setRelevantPotentialsFinderType for type "
149  << (unsigned int)type << " is not implemented yet");
150  }
151 
153 
154  // indicate that all messages need be reconstructed to take into account
155  // the change in d-separation analysis
157  }
158  }
159 
160 
162  template < typename GUM_SCALAR >
164  Potential< GUM_SCALAR >* (*proj)(const Potential< GUM_SCALAR >&,
165  const Set< const DiscreteVariable* >&)) {
166  __projection_op = proj;
167  }
168 
169 
171  template < typename GUM_SCALAR >
173  Potential< GUM_SCALAR >* (*comb)(const Potential< GUM_SCALAR >&,
174  const Potential< GUM_SCALAR >&)) {
175  __combination_op = comb;
176  }
177 
178 
180  template < typename GUM_SCALAR >
182  // remove all the messages computed
183  for (auto& potset : __separator_potentials)
184  potset.second.clear();
185  for (auto& mess_computed : __messages_computed)
186  mess_computed.second = false;
187 
188  // remove all the created potentials
189  for (const auto& potset : __created_potentials)
190  for (const auto pot : potset.second)
191  delete pot;
192 
193  // remove all the posteriors
194  for (const auto& pot : __target_posteriors)
195  delete pot.second;
196  for (const auto& pot : __joint_target_posteriors)
197  delete pot.second;
198 
199  // indicate that new messages need be computed
200  if (this->isInferenceReady() || this->isDone())
202  }
203 
204 
206  template < typename GUM_SCALAR >
208  FindBarrenNodesType type) {
209  if (type != __barren_nodes_type) {
210  // WARNING: if a new type is added here, method __createJT should
211  // certainly
212  // be updated as well, in particular its step 2.
213  switch (type) {
216 
217  default:
218  GUM_ERROR(InvalidArgument,
219  "setFindBarrenNodesType for type "
220  << (unsigned int)type << " is not implemented yet");
221  }
222 
223  __barren_nodes_type = type;
224 
225  // potentially, we may need to reconstruct a junction tree
227  }
228  }
229 
230 
232  template < typename GUM_SCALAR >
233  INLINE void
235  bool isHardEvidence) {
236  // if we have a new hard evidence, this modifies the undigraph over which
237  // the join tree is created. This is also the case if id is not a node of
238  // of the undigraph
239  if (isHardEvidence || !__graph.exists(id))
240  __is_new_jt_needed = true;
241  else {
242  try {
243  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ADDED);
244  } catch (DuplicateElement&) {
245  // here, the evidence change already existed. This necessarily means
246  // that the current saved change is an EVIDENCE_ERASED. So if we
247  // erased the evidence and added some again, this corresponds to an
248  // EVIDENCE_MODIFIED
249  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
250  }
251  }
252  }
253 
254 
256  template < typename GUM_SCALAR >
257  INLINE void
259  bool isHardEvidence) {
260  // if we delete a hard evidence, this modifies the undigraph over which
261  // the join tree is created.
262  if (isHardEvidence)
263  __is_new_jt_needed = true;
264  else {
265  try {
266  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_ERASED);
267  } catch (DuplicateElement&) {
268  // here, the evidence change already existed and it is necessarily an
269  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
270  // been added and is now erased, this is similar to not having created
271  // it. If the evidence was only modified, it already existed in the
272  // last inference and we should now indicate that it has been removed.
273  if (__evidence_changes[id] == EvidenceChangeType::EVIDENCE_ADDED)
274  __evidence_changes.erase(id);
275  else
276  __evidence_changes[id] = EvidenceChangeType::EVIDENCE_ERASED;
277  }
278  }
279  }
280 
281 
283  template < typename GUM_SCALAR >
284  void
286  if (has_hard_evidence || !this->hardEvidenceNodes().empty())
287  __is_new_jt_needed = true;
288  else {
289  for (const auto node : this->softEvidenceNodes()) {
290  try {
291  __evidence_changes.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
292  } catch (DuplicateElement&) {
293  // here, the evidence change already existed and it is necessarily an
294  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
295  // been added and is now erased, this is similar to not having created
296  // it. If the evidence was only modified, it already existed in the
297  // last inference and we should now indicate that it has been removed.
298  if (__evidence_changes[node] == EvidenceChangeType::EVIDENCE_ADDED)
299  __evidence_changes.erase(node);
300  else
301  __evidence_changes[node] = EvidenceChangeType::EVIDENCE_ERASED;
302  }
303  }
304  }
305  }
306 
307 
309  template < typename GUM_SCALAR >
310  INLINE void
312  bool hasChangedSoftHard) {
313  if (hasChangedSoftHard)
314  __is_new_jt_needed = true;
315  else {
316  try {
317  __evidence_changes.insert(id, EvidenceChangeType::EVIDENCE_MODIFIED);
318  } catch (DuplicateElement&) {
319  // here, the evidence change already existed and it is necessarily an
320  // EVIDENCE_ADDED. So we should keep this state to indicate that this
321  // evidence is new w.r.t. the last inference
322  }
323  }
324  }
325 
326 
328  template < typename GUM_SCALAR >
330 
331 
333  template < typename GUM_SCALAR >
335 
336 
338  template < typename GUM_SCALAR >
339  INLINE void
341 
342 
344  template < typename GUM_SCALAR >
345  INLINE void
347 
348 
350  template < typename GUM_SCALAR >
352 
353 
355  template < typename GUM_SCALAR >
357 
358 
360  template < typename GUM_SCALAR >
362 
363 
365  template < typename GUM_SCALAR >
367 
368 
369  // check whether a new junction tree is really needed for the next inference
370  template < typename GUM_SCALAR >
372  // if we do not have a JT or if __new_jt_needed is set to true, then
373  // we know that we need to create a new join tree
374  if ((__JT == nullptr) || __is_new_jt_needed) return true;
375 
376  // if some some targets do not belong to the join tree and, consequently,
377  // to the undigraph that was used to construct the join tree, then we need
378  // to create a new JT. This situation may occur if we constructed the
379  // join tree after pruning irrelevant/barren nodes from the BN)
380  // however, note that the nodes that received hard evidence do not belong to
381  // the graph and, therefore, should not be taken into account
382  const auto& hard_ev_nodes = this->hardEvidenceNodes();
383  for (const auto node : this->targets()) {
384  if (!__graph.exists(node) && !hard_ev_nodes.exists(node)) return true;
385  }
386  for (const auto& nodes : this->jointTargets()) {
387  // here, we need to check that at least one clique contains all the nodes.
388  bool containing_clique_found = false;
389  for (const auto node : nodes) {
390  bool found = true;
391  const NodeSet& clique = __JT->clique(__node_to_clique[node]);
392  for (const auto xnode : nodes) {
393  if (!clique.contains(xnode) && !hard_ev_nodes.exists(xnode)) {
394  found = false;
395  break;
396  }
397  }
398  if (found) {
399  containing_clique_found = true;
400  break;
401  }
402  }
403 
404  if (!containing_clique_found) return true;
405  }
406 
407  // if some new evidence have been added on nodes that do not belong
408  // to __graph, then we potentially have to reconstruct the join tree
409  for (const auto& change : __evidence_changes) {
410  if ((change.second == EvidenceChangeType::EVIDENCE_ADDED)
411  && !__graph.exists(change.first))
412  return true;
413  }
414 
415  // here, the current JT is exactly what we need for the next inference
416  return false;
417  }
418 
419 
421  template < typename GUM_SCALAR >
423  // to create the JT, we first create the moral graph of the BN in the
424  // following way in order to take into account the barren nodes and the
425  // nodes that received evidence:
426  // 1/ we create an undirected graph containing only the nodes and no edge
427  // 2/ if we take into account barren nodes, remove them from the graph
428  // 3/ add edges so that each node and its parents in the BN form a clique
429  // 4/ add edges so that set targets are cliques of the moral graph
430  // 5/ remove the nodes that received hard evidence (by step 3/, their
431  // parents are linked by edges, which is necessary for inference)
432  //
433  // At the end of step 5/, we have our moral graph and we can triangulate it
434  // to get the new junction tree
435 
436  // 1/ create an undirected graph containing only the nodes and no edge
437  const auto& bn = this->BN();
438  __graph.clear();
439  for (const auto node : bn.dag())
440  __graph.addNodeWithId(node);
441 
442  // 2/ if we wish to exploit barren nodes, we shall remove them from the BN
443  // to do so: we identify all the nodes that are not targets and have
444  // received
445  // no evidence and such that their descendants are neither targets nor
446  // evidence nodes. Such nodes can be safely discarded from the BN without
447  // altering the inference output
449  // identify the barren nodes
450  NodeSet target_nodes = this->targets();
451  for (const auto& nodeset : this->jointTargets()) {
452  target_nodes += nodeset;
453  }
454 
455  // check that all the nodes are not targets, otherwise, there is no
456  // barren node
457  if (target_nodes.size() != bn.size()) {
458  BarrenNodesFinder finder(&(bn.dag()));
459  finder.setTargets(&target_nodes);
460 
461  NodeSet evidence_nodes;
462  for (const auto& pair : this->evidence()) {
463  evidence_nodes.insert(pair.first);
464  }
465  finder.setEvidence(&evidence_nodes);
466 
467  NodeSet barren_nodes = finder.barrenNodes();
468 
469  // remove the barren nodes from the moral graph
470  for (const auto node : barren_nodes) {
471  __graph.eraseNode(node);
472  }
473  }
474  }
475 
476  // 3/ add edges so that each node and its parents in the BN form a clique
477  for (const auto node : __graph) {
478  const NodeSet& parents = bn.parents(node);
479  for (auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
480  __graph.addEdge(*iter1, node);
481  auto iter2 = iter1;
482  for (++iter2; iter2 != parents.cend(); ++iter2) {
483  __graph.addEdge(*iter1, *iter2);
484  }
485  }
486  }
487 
488  // 4/ if there exist some joint targets, we shall add new edges
489  // into the moral graph in order to ensure that there exists a clique
490  // containing each joint
491  for (const auto& nodeset : this->jointTargets()) {
492  for (auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
493  auto iter2 = iter1;
494  for (++iter2; iter2 != nodeset.cend(); ++iter2) {
495  __graph.addEdge(*iter1, *iter2);
496  }
497  }
498  }
499 
500  // 5/ remove all the nodes that received hard evidence
502  for (const auto node : __hard_ev_nodes) {
503  __graph.eraseNode(node);
504  }
505 
506 
507  // now, we can compute the new junction tree. To speed-up computations
508  // (essentially, those of a distribution phase), we construct from this
509  // junction tree a binary join tree
510  if (__JT != nullptr) delete __JT;
511  if (__junctionTree != nullptr) delete __junctionTree;
512 
513  __triangulation->setGraph(&__graph, &(this->domainSizes()));
514  const JunctionTree& triang_jt = __triangulation->junctionTree();
516  BinaryJoinTreeConverterDefault bjt_converter;
517  NodeSet emptyset;
518  __JT = new CliqueGraph(
519  bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
520  } else {
521  __JT = new CliqueGraph(triang_jt);
522  }
523  __junctionTree = new CliqueGraph(triang_jt);
524 
525 
526  // indicate, for each node of the moral graph a clique in __JT that can
527  // contain its conditional probability table
529  const std::vector< NodeId >& JT_elim_order =
531  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
532  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
533  ++i)
534  elim_order.insert(JT_elim_order[i], (int)i);
535  const DAG& dag = bn.dag();
536  for (const auto node : __graph) {
537  // get the variables in the potential of node (and its parents)
538  NodeId first_eliminated_node = node;
539  int elim_number = elim_order[first_eliminated_node];
540 
541  for (const auto parent : dag.parents(node)) {
542  if (__graph.existsNode(parent) && (elim_order[parent] < elim_number)) {
543  elim_number = elim_order[parent];
544  first_eliminated_node = parent;
545  }
546  }
547 
548  // first_eliminated_node contains the first var (node or one of its
549  // parents) eliminated => the clique created during its elimination
550  // contains node and all of its parents => it can contain the potential
551  // assigned to the node in the BN
553  node, __triangulation->createdJunctionTreeClique(first_eliminated_node));
554  }
555 
556  // do the same for the nodes that received evidence. Here, we only store
557  // the nodes for which at least one parent belongs to __graph (otherwise
558  // their CPT is just a constant real number).
559  for (const auto node : __hard_ev_nodes) {
560  // get the set of parents of the node that belong to __graph
561  NodeSet pars(dag.parents(node).size());
562  for (const auto par : dag.parents(node))
563  if (__graph.exists(par)) pars.insert(par);
564 
565  if (!pars.empty()) {
566  NodeId first_eliminated_node = *(pars.begin());
567  int elim_number = elim_order[first_eliminated_node];
568 
569  for (const auto parent : pars) {
570  if (elim_order[parent] < elim_number) {
571  elim_number = elim_order[parent];
572  first_eliminated_node = parent;
573  }
574  }
575 
576  // first_eliminated_node contains the first var (node or one of its
577  // parents) eliminated => the clique created during its elimination
578  // contains node and all of its parents => it can contain the potential
579  // assigned to the node in the BN
581  node, __triangulation->createdJunctionTreeClique(first_eliminated_node));
582  }
583  }
584 
585  // indicate for each joint_target a clique that contains it
586  __joint_target_to_clique.clear();
587  for (const auto& set : this->jointTargets()) {
588  // remove from set all the nodes that received hard evidence (since they
589  // do not belong to the join tree)
590  NodeSet nodeset = set;
591  for (const auto node : __hard_ev_nodes)
592  if (nodeset.contains(node)) nodeset.erase(node);
593 
594  if (!nodeset.empty()) {
595  // the clique we are looking for is the one that was created when
596  // the first element of nodeset was eliminated
597  NodeId first_eliminated_node = *(nodeset.begin());
598  int elim_number = elim_order[first_eliminated_node];
599  for (const auto node : nodeset) {
600  if (elim_order[node] < elim_number) {
601  elim_number = elim_order[node];
602  first_eliminated_node = node;
603  }
604  }
605 
607  set, __triangulation->createdJunctionTreeClique(first_eliminated_node));
608  }
609  }
610 
611 
612  // compute the roots of __JT's connected components
614 
615  // create empty potential lists into the cliques of the joint tree as well
616  // as empty lists of evidence
617  __PotentialSet empty_set;
618  __clique_potentials.clear();
619  __node_to_soft_evidence.clear();
620  for (const auto node : *__JT) {
621  __clique_potentials.insert(node, empty_set);
622  }
623 
624  // remove all the potentials created during the last inference
625  for (const auto& potlist : __created_potentials)
626  for (const auto pot : potlist.second)
627  delete pot;
628  __created_potentials.clear();
629 
630  // remove all the potentials created to take into account hard evidence
631  // during the last inference
632  for (const auto pot_pair : __hard_ev_projected_CPTs)
633  delete pot_pair.second;
634  __hard_ev_projected_CPTs.clear();
635 
636  // remove all the constants created due to projections of CPTs that were
637  // defined over only hard evidence nodes
638  __constants.clear();
639 
640  // create empty lists of potentials for the messages and indicate that no
641  // message has been computed yet
642  __separator_potentials.clear();
643  __messages_computed.clear();
644  for (const auto& edge : __JT->edges()) {
645  const Arc arc1(edge.first(), edge.second());
646  __separator_potentials.insert(arc1, empty_set);
647  __messages_computed.insert(arc1, false);
648  const Arc arc2(Arc(edge.second(), edge.first()));
649  __separator_potentials.insert(arc2, empty_set);
650  __messages_computed.insert(arc2, false);
651  }
652 
653  // remove all the posteriors computed so far
654  for (const auto& pot : __target_posteriors)
655  delete pot.second;
656  __target_posteriors.clear();
657  for (const auto& pot : __joint_target_posteriors)
658  delete pot.second;
659  __joint_target_posteriors.clear();
660 
661 
662  // put all the CPT's of the Bayes net nodes into the cliques
663  // here, beware: all the potentials that are defined over some nodes
664  // including hard evidence must be projected so that these nodes are
665  // removed from the potential
666  const auto& evidence = this->evidence();
667  const auto& hard_evidence = this->hardEvidence();
668  for (const auto node : dag) {
669  if (__graph.exists(node) || __hard_ev_nodes.contains(node)) {
670  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
671 
672  // get the list of nodes with hard evidence in cpt
673  NodeSet hard_nodes;
674  const auto& variables = cpt.variablesSequence();
675  for (const auto var : variables) {
676  NodeId xnode = bn.nodeId(*var);
677  if (__hard_ev_nodes.contains(xnode)) hard_nodes.insert(xnode);
678  }
679 
680  // if hard_nodes contains hard evidence nodes, perform a projection
681  // and insert the result into the appropriate clique, else insert
682  // directly cpt into the clique
683  if (hard_nodes.empty()) {
684  __clique_potentials[__node_to_clique[node]].insert(&cpt);
685  } else {
686  // marginalize out the hard evidence nodes: if the cpt is defined
687  // only over nodes that received hard evidence, do not consider it
688  // as a potential anymore but as a constant
689  if (hard_nodes.size() == variables.size()) {
690  Instantiation inst;
691  const auto& vars = cpt.variablesSequence();
692  for (const auto var : vars)
693  inst << *var;
694  for (Size i = 0; i < hard_nodes.size(); ++i) {
695  inst.chgVal(variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
696  }
697  __constants.insert(node, cpt.get(inst));
698  } else {
699  // perform the projection with a combine and project instance
700  Set< const DiscreteVariable* > hard_variables;
701  __PotentialSet marg_cpt_set{&cpt};
702  for (const auto xnode : hard_nodes) {
703  marg_cpt_set.insert(evidence[xnode]);
704  hard_variables.insert(&(bn.variable(xnode)));
705  }
706 
707  // perform the combination of those potentials and their projection
708  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
709  combine_and_project(__combination_op, LPNewprojPotential);
710  __PotentialSet new_cpt_list =
711  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
712 
713  // there should be only one potential in new_cpt_list
714  if (new_cpt_list.size() != 1) {
715  // remove the CPT created to avoid memory leaks
716  for (const auto pot : new_cpt_list) {
717  if (!marg_cpt_set.contains(pot)) delete pot;
718  }
719  GUM_ERROR(FatalError,
720  "the projection of a potential containing "
721  << "hard evidence is empty!");
722  }
723  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
724  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
725  __hard_ev_projected_CPTs.insert(node, projected_cpt);
726  }
727  }
728  }
729  }
730 
731  // we shall now add all the potentials of the soft evidence
732  for (const auto node : this->softEvidenceNodes()) {
733  __node_to_soft_evidence.insert(node, evidence[node]);
734  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
735  }
736 
737  // indicate that the data structures are up to date.
738  __evidence_changes.clear();
739  __is_new_jt_needed = false;
740  }
741 
742 
744  template < typename GUM_SCALAR >
746  // check if a new JT is really needed. If so, create it
747  if (__isNewJTNeeded()) {
748  __createNewJT();
749  } else {
750  // here, we can answer the next queries without reconstructing all the
751  // junction tree. All we need to do is to indicate that we should
752  // update the potentials and messages for these queries
754  }
755  }
756 
757 
759  template < typename GUM_SCALAR >
761  NodeId from_id, NodeId to_id, NodeSet& invalidated_cliques) {
762  // invalidate the current clique
763  invalidated_cliques.insert(to_id);
764 
765  // invalidate the current arc
766  const Arc arc(from_id, to_id);
767  bool& message_computed = __messages_computed[arc];
768  if (message_computed) {
769  message_computed = false;
770  __separator_potentials[arc].clear();
771  if (__created_potentials.exists(arc)) {
772  auto& arc_created_potentials = __created_potentials[arc];
773  for (const auto pot : arc_created_potentials)
774  delete pot;
775  arc_created_potentials.clear();
776  }
777 
778  // go on with the diffusion
779  for (const auto node_id : __JT->neighbours(to_id)) {
780  if (node_id != from_id)
781  __diffuseMessageInvalidations(to_id, node_id, invalidated_cliques);
782  }
783  }
784  }
785 
786 
789  template < typename GUM_SCALAR >
791  // compute the set of CPTs that were projected due to hard evidence and
792  // whose hard evidence have changed, so that they need a new projection.
793  // By the way, remove these CPTs since they are no more needed
794  // Here only the values of the hard evidence can have changed (else a
795  // fully new join tree would have been computed).
796  // Note also that we know that the CPTs still contain some variable(s) after
797  // the projection (else they should be constants)
798  NodeSet hard_nodes_changed(__hard_ev_nodes.size());
799  for (const auto node : __hard_ev_nodes)
800  if (__evidence_changes.exists(node)) hard_nodes_changed.insert(node);
801 
802  NodeSet nodes_with_projected_CPTs_changed;
803  const auto& bn = this->BN();
804  for (auto pot_iter = __hard_ev_projected_CPTs.beginSafe();
805  pot_iter != __hard_ev_projected_CPTs.endSafe();
806  ++pot_iter) {
807  for (const auto var : bn.cpt(pot_iter.key()).variablesSequence()) {
808  if (hard_nodes_changed.contains(bn.nodeId(*var))) {
809  nodes_with_projected_CPTs_changed.insert(pot_iter.key());
810  delete pot_iter.val();
811  __clique_potentials[__node_to_clique[pot_iter.key()]].erase(
812  pot_iter.val());
813  __hard_ev_projected_CPTs.erase(pot_iter);
814  break;
815  }
816  }
817  }
818 
819 
820  // invalidate all the messages that are no more correct: start from each of
821  // the nodes whose soft evidence has changed and perform a diffusion from
822  // the clique into which the soft evidence has been entered, indicating that
823  // the messages spreading from this clique are now invalid. At the same time,
824  // if there were potentials created on the arcs over which the messages were
825  // sent, remove them from memory. For all the cliques that received some
826  // projected CPT that should now be changed, do the same.
827  NodeSet invalidated_cliques(__JT->size());
828  for (const auto& pair : __evidence_changes) {
829  if (__node_to_clique.exists(pair.first)) {
830  const auto clique = __node_to_clique[pair.first];
831  invalidated_cliques.insert(clique);
832  for (const auto neighbor : __JT->neighbours(clique)) {
833  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
834  }
835  }
836  }
837 
838  // now, add to the set of invalidated cliques those that contain projected
839  // CPTs that were changed.
840  for (const auto node : nodes_with_projected_CPTs_changed) {
841  const auto clique = __node_to_clique[node];
842  invalidated_cliques.insert(clique);
843  for (const auto neighbor : __JT->neighbours(clique)) {
844  __diffuseMessageInvalidations(clique, neighbor, invalidated_cliques);
845  }
846  }
847 
848 
849  // now we shall remove all the posteriors that belong to the
850  // invalidated cliques. First, cope only with the nodes that did not
851  // received hard evidence since the other nodes do not belong to the
852  // join tree
853  for (auto iter = __target_posteriors.beginSafe();
854  iter != __target_posteriors.endSafe();
855  ++iter) {
856  if (__graph.exists(iter.key())
857  && (invalidated_cliques.exists(__node_to_clique[iter.key()]))) {
858  delete iter.val();
859  __target_posteriors.erase(iter);
860  }
861  }
862 
863  // now cope with the nodes that received hard evidence
864  for (auto iter = __target_posteriors.beginSafe();
865  iter != __target_posteriors.endSafe();
866  ++iter) {
867  if (hard_nodes_changed.contains(iter.key())) {
868  delete iter.val();
869  __target_posteriors.erase(iter);
870  }
871  }
872 
873  // finally, cope with joint targets
874  for (auto iter = __joint_target_posteriors.beginSafe();
875  iter != __joint_target_posteriors.endSafe();
876  ++iter) {
877  if (invalidated_cliques.exists(__joint_target_to_clique[iter.key()])) {
878  delete iter.val();
879  __joint_target_posteriors.erase(iter);
880  }
881  }
882 
883 
884  // remove all the evidence that were entered into __node_to_soft_evidence
885  // and __clique_potentials and add the new soft ones
886  for (const auto& pot_pair : __node_to_soft_evidence) {
887  __clique_potentials[__node_to_clique[pot_pair.first]].erase(pot_pair.second);
888  }
889  __node_to_soft_evidence.clear();
890 
891  const auto& evidence = this->evidence();
892  for (const auto node : this->softEvidenceNodes()) {
893  __node_to_soft_evidence.insert(node, evidence[node]);
894  __clique_potentials[__node_to_clique[node]].insert(evidence[node]);
895  }
896 
897 
898  // Now add the projections of the CPTs due to newly changed hard evidence:
899  // if we are performing _updateOutdatedBNPotentials, this means that the
900  // set of nodes that received hard evidence has not changed, only
901  // their instantiations can have changed. So, if there is an entry
902  // for node in __constants, there will still be such an entry after
903  // performing the new projections. Idem for __hard_ev_projected_CPTs
904  for (const auto node : nodes_with_projected_CPTs_changed) {
905  // perform the projection with a combine and project instance
906  const Potential< GUM_SCALAR >& cpt = bn.cpt(node);
907  const auto& variables = cpt.variablesSequence();
908  Set< const DiscreteVariable* > hard_variables;
909  __PotentialSet marg_cpt_set{&cpt};
910  for (const auto var : variables) {
911  NodeId xnode = bn.nodeId(*var);
912  if (__hard_ev_nodes.exists(xnode)) {
913  marg_cpt_set.insert(evidence[xnode]);
914  hard_variables.insert(var);
915  }
916  }
917 
918  // perform the combination of those potentials and their projection
919  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential >
920  combine_and_project(__combination_op, LPNewprojPotential);
921  __PotentialSet new_cpt_list =
922  combine_and_project.combineAndProject(marg_cpt_set, hard_variables);
923 
924  // there should be only one potential in new_cpt_list
925  if (new_cpt_list.size() != 1) {
926  // remove the CPT created to avoid memory leaks
927  for (const auto pot : new_cpt_list) {
928  if (!marg_cpt_set.contains(pot)) delete pot;
929  }
930  GUM_ERROR(FatalError,
931  "the projection of a potential containing "
932  << "hard evidence is empty!");
933  }
934  const Potential< GUM_SCALAR >* projected_cpt = *(new_cpt_list.begin());
935  __clique_potentials[__node_to_clique[node]].insert(projected_cpt);
936  __hard_ev_projected_CPTs.insert(node, projected_cpt);
937  }
938 
939  // update the constants
940  const auto& hard_evidence = this->hardEvidence();
941  for (auto& node_cst : __constants) {
942  const Potential< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
943  const auto& variables = cpt.variablesSequence();
944  Instantiation inst;
945  for (const auto var : variables)
946  inst << *var;
947  for (const auto var : variables) {
948  inst.chgVal(var, hard_evidence[bn.nodeId(*var)]);
949  }
950  node_cst.second = cpt.get(inst);
951  }
952 
953  // indicate that all changes have been performed
954  __evidence_changes.clear();
955  }
956 
957 
959  template < typename GUM_SCALAR >
961  // get the set of cliques in which we can find the targets and joint_targets
962  NodeSet clique_targets;
963  for (const auto node : this->targets()) {
964  try {
965  clique_targets.insert(__node_to_clique[node]);
966  } catch (Exception&) {}
967  }
968  for (const auto& set : this->jointTargets()) {
969  try {
970  clique_targets.insert(__joint_target_to_clique[set]);
971  } catch (Exception&) {}
972  }
973 
974  // put in a vector these cliques and their size
975  std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
976  const auto& bn = this->BN();
977  std::size_t i = 0;
978  for (const auto clique_id : clique_targets) {
979  const auto& clique = __JT->clique(clique_id);
980  Size dom_size = 1;
981  for (const auto node : clique) {
982  dom_size *= bn.variable(node).domainSize();
983  }
984  possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
985  ++i;
986  }
987 
988  // sort the cliques by increasing domain size
989  std::sort(possible_roots.begin(),
990  possible_roots.end(),
991  [](const std::pair< NodeId, Size >& a,
992  const std::pair< NodeId, Size >& b) -> bool {
993  return a.second < b.second;
994  });
995 
996  // pick up the clique with the smallest size in each connected component
997  NodeProperty< bool > marked = __JT->nodesProperty(false);
998  std::function< void(NodeId, NodeId) > diffuse_marks =
999  [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
1000  if (!marked[node]) {
1001  marked[node] = true;
1002  for (const auto neigh : __JT->neighbours(node))
1003  if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1004  }
1005  };
1006  __roots.clear();
1007  for (const auto xclique : possible_roots) {
1008  NodeId clique = xclique.first;
1009  if (!marked[clique]) {
1010  __roots.insert(clique);
1011  diffuse_marks(clique, clique);
1012  }
1013  }
1014  }
1015 
1016 
1017  // performs the collect phase of Lazy Propagation
1018  template < typename GUM_SCALAR >
1020  NodeId from) {
1021  for (const auto other : __JT->neighbours(id)) {
1022  if ((other != from) && !__messages_computed[Arc(other, id)])
1023  __collectMessage(other, id);
1024  }
1025 
1026  if ((id != from) && !__messages_computed[Arc(id, from)]) {
1027  __produceMessage(id, from);
1028  }
1029  }
1030 
1031 
1032  // find the potentials d-connected to a set of variables
1033  template < typename GUM_SCALAR >
1035  Set< const Potential< GUM_SCALAR >* >& pot_list,
1036  Set< const DiscreteVariable* >& kept_vars) {}
1037 
1038 
1039  // find the potentials d-connected to a set of variables
1040  template < typename GUM_SCALAR >
1042  Set< const Potential< GUM_SCALAR >* >& pot_list,
1043  Set< const DiscreteVariable* >& kept_vars) {
1044  // find the node ids of the kept variables
1045  NodeSet kept_ids;
1046  const auto& bn = this->BN();
1047  for (const auto var : kept_vars) {
1048  kept_ids.insert(bn.nodeId(*var));
1049  }
1050 
1051  // determine the set of potentials d-connected with the kept variables
1052  NodeSet requisite_nodes;
1053  BayesBall::requisiteNodes(bn.dag(),
1054  kept_ids,
1055  this->hardEvidenceNodes(),
1056  this->softEvidenceNodes(),
1057  requisite_nodes);
1058  for (auto iter = pot_list.beginSafe(); iter != pot_list.endSafe(); ++iter) {
1059  const Sequence< const DiscreteVariable* >& vars =
1060  (**iter).variablesSequence();
1061  bool found = false;
1062  for (const auto var : vars) {
1063  if (requisite_nodes.exists(bn.nodeId(*var))) {
1064  found = true;
1065  break;
1066  }
1067  }
1068 
1069  if (!found) { pot_list.erase(iter); }
1070  }
1071  }
1072 
1073 
1074  // find the potentials d-connected to a set of variables
1075  template < typename GUM_SCALAR >
1077  Set< const Potential< GUM_SCALAR >* >& pot_list,
1078  Set< const DiscreteVariable* >& kept_vars) {
1079  // find the node ids of the kept variables
1080  NodeSet kept_ids;
1081  const auto& bn = this->BN();
1082  for (const auto var : kept_vars) {
1083  kept_ids.insert(bn.nodeId(*var));
1084  }
1085 
1086  // determine the set of potentials d-connected with the kept variables
1088  kept_ids,
1089  this->hardEvidenceNodes(),
1090  this->softEvidenceNodes(),
1091  pot_list);
1092  }
1093 
1094 
1095  // find the potentials d-connected to a set of variables
1096  template < typename GUM_SCALAR >
1098  Set< const Potential< GUM_SCALAR >* >& pot_list,
1099  Set< const DiscreteVariable* >& kept_vars) {
1100  // find the node ids of the kept variables
1101  NodeSet kept_ids;
1102  const auto& bn = this->BN();
1103  for (const auto var : kept_vars) {
1104  kept_ids.insert(bn.nodeId(*var));
1105  }
1106 
1107  // determine the set of potentials d-connected with the kept variables
1108  dSeparation dsep;
1109  dsep.relevantPotentials(bn,
1110  kept_ids,
1111  this->hardEvidenceNodes(),
1112  this->softEvidenceNodes(),
1113  pot_list);
1114  }
1115 
1116 
1117  // find the potentials d-connected to a set of variables
1118  template < typename GUM_SCALAR >
1120  Set< const Potential< GUM_SCALAR >* >& pot_list,
1121  Set< const DiscreteVariable* >& kept_vars) {
1124  __findRelevantPotentialsWithdSeparation2(pot_list, kept_vars);
1125  break;
1126 
1128  __findRelevantPotentialsWithdSeparation(pot_list, kept_vars);
1129  break;
1130 
1132  __findRelevantPotentialsWithdSeparation3(pot_list, kept_vars);
1133  break;
1134 
1136  __findRelevantPotentialsGetAll(pot_list, kept_vars);
1137  break;
1138 
1139  default: GUM_ERROR(FatalError, "not implemented yet");
1140  }
1141  }
1142 
1143 
1144  // remove barren variables
1145  template < typename GUM_SCALAR >
1146  Set< const Potential< GUM_SCALAR >* >
1148  __PotentialSet& pot_list, Set< const DiscreteVariable* >& del_vars) {
1149  // remove from del_vars the variables that received some evidence:
1150  // only those that did not received evidence can be barren variables
1151  Set< const DiscreteVariable* > the_del_vars = del_vars;
1152  for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe();
1153  ++iter) {
1154  NodeId id = this->BN().nodeId(**iter);
1155  if (this->hardEvidenceNodes().exists(id)
1156  || this->softEvidenceNodes().exists(id)) {
1157  the_del_vars.erase(iter);
1158  }
1159  }
1160 
1161  // assign to each random variable the set of potentials that contain it
1162  HashTable< const DiscreteVariable*, __PotentialSet > var2pots;
1163  __PotentialSet empty_pot_set;
1164  for (const auto pot : pot_list) {
1165  const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1166  for (const auto var : vars) {
1167  if (the_del_vars.exists(var)) {
1168  if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1169  var2pots[var].insert(pot);
1170  }
1171  }
1172  }
1173 
1174  // each variable with only one potential is a barren variable
1175  // assign to each potential with barren nodes its set of barren variables
1176  HashTable< const Potential< GUM_SCALAR >*, Set< const DiscreteVariable* > >
1177  pot2barren_var;
1178  Set< const DiscreteVariable* > empty_var_set;
1179  for (const auto elt : var2pots) {
1180  if (elt.second.size() == 1) { // here we have a barren variable
1181  const Potential< GUM_SCALAR >* pot = *(elt.second.begin());
1182  if (!pot2barren_var.exists(pot)) {
1183  pot2barren_var.insert(pot, empty_var_set);
1184  }
1185  pot2barren_var[pot].insert(elt.first); // insert the barren variable
1186  }
1187  }
1188 
1189  // for each potential with barren variables, marginalize them.
1190  // if the potential has only barren variables, simply remove them from the
1191  // set of potentials, else just project the potential
1192  MultiDimProjection< GUM_SCALAR, Potential > projector(LPNewprojPotential);
1193  __PotentialSet projected_pots;
1194  for (const auto elt : pot2barren_var) {
1195  // remove the current potential from pot_list as, anyway, we will change
1196  // it
1197  const Potential< GUM_SCALAR >* pot = elt.first;
1198  pot_list.erase(pot);
1199 
1200  // check whether we need to add a projected new potential or not (i.e.,
1201  // whether there exist non-barren variables or not)
1202  if (pot->variablesSequence().size() != elt.second.size()) {
1203  auto new_pot = projector.project(*pot, elt.second);
1204  pot_list.insert(new_pot);
1205  projected_pots.insert(new_pot);
1206  }
1207  }
1208 
1209  return projected_pots;
1210  }
1211 
1212 
1213  // remove variables del_vars from the list of potentials pot_list
1214  template < typename GUM_SCALAR >
1215  Set< const Potential< GUM_SCALAR >* >
1217  Set< const Potential< GUM_SCALAR >* > pot_list,
1218  Set< const DiscreteVariable* >& del_vars,
1219  Set< const DiscreteVariable* >& kept_vars) {
1220  // use d-separation analysis to check which potentials shall be combined
1221  __findRelevantPotentialsXX(pot_list, kept_vars);
1222 
1223  // remove the potentials corresponding to barren variables if we want
1224  // to exploit barren nodes
1225  __PotentialSet barren_projected_potentials;
1227  barren_projected_potentials = __removeBarrenVariables(pot_list, del_vars);
1228  }
1229 
1230  // create a combine and project operator that will perform the
1231  // marginalization
1232  MultiDimCombineAndProjectDefault< GUM_SCALAR, Potential > combine_and_project(
1234  __PotentialSet new_pot_list =
1235  combine_and_project.combineAndProject(pot_list, del_vars);
1236 
1237  // remove all the potentials that were created due to projections of
1238  // barren nodes and that are not part of the new_pot_list: these
1239  // potentials were just temporary potentials
1240  for (auto iter = barren_projected_potentials.beginSafe();
1241  iter != barren_projected_potentials.endSafe();
1242  ++iter) {
1243  if (!new_pot_list.exists(*iter)) delete *iter;
1244  }
1245 
1246  // remove all the potentials that have no dimension
1247  for (auto iter_pot = new_pot_list.beginSafe();
1248  iter_pot != new_pot_list.endSafe();
1249  ++iter_pot) {
1250  if ((*iter_pot)->variablesSequence().size() == 0) {
1251  // as we have already marginalized out variables that received evidence,
1252  // it may be the case that, after combining and projecting, some
1253  // potentials might be empty. In this case, we shall keep their
1254  // constant and remove them from memory
1255  // # TODO: keep the constants!
1256  delete *iter_pot;
1257  new_pot_list.erase(iter_pot);
1258  }
1259  }
1260 
1261  return new_pot_list;
1262  }
1263 
1264 
1265  // creates the message sent by clique from_id to clique to_id
1266  template < typename GUM_SCALAR >
1268  NodeId to_id) {
1269  // get the potentials of the clique.
1270  __PotentialSet pot_list = __clique_potentials[from_id];
1271 
1272  // add the messages sent by adjacent nodes to from_id
1273  for (const auto other_id : __JT->neighbours(from_id))
1274  if (other_id != to_id)
1275  pot_list += __separator_potentials[Arc(other_id, from_id)];
1276 
1277  // get the set of variables that need be removed from the potentials
1278  const NodeSet& from_clique = __JT->clique(from_id);
1279  const NodeSet& separator = __JT->separator(from_id, to_id);
1280  Set< const DiscreteVariable* > del_vars(from_clique.size());
1281  Set< const DiscreteVariable* > kept_vars(separator.size());
1282  const auto& bn = this->BN();
1283 
1284  for (const auto node : from_clique) {
1285  if (!separator.contains(node)) {
1286  del_vars.insert(&(bn.variable(node)));
1287  } else {
1288  kept_vars.insert(&(bn.variable(node)));
1289  }
1290  }
1291 
1292  // pot_list now contains all the potentials to multiply and marginalize
1293  // => combine the messages
1294  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1295 
1296  // keep track of the newly created potentials but remove first all the
1297  // potentials that are equal to ones (as probability matrix multiplications
1298  // are tensorial, such potentials are useless)
1299  const Arc arc(from_id, to_id);
1300  for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe();
1301  ++iter) {
1302  const auto pot = *iter;
1303  if (pot->variablesSequence().size() == 1) {
1304  bool is_all_ones = true;
1305  for (Instantiation inst(*pot); !inst.end(); ++inst) {
1306  if (pot->get(inst) < __1_minus_epsilon) {
1307  is_all_ones = false;
1308  break;
1309  }
1310  }
1311  if (is_all_ones) {
1312  if (!pot_list.exists(pot)) delete pot;
1313  new_pot_list.erase(iter);
1314  continue;
1315  }
1316  }
1317 
1318  if (!pot_list.exists(pot)) {
1319  if (!__created_potentials.exists(arc))
1320  __created_potentials.insert(arc, __PotentialSet());
1321  __created_potentials[arc].insert(pot);
1322  }
1323  }
1324 
1325  __separator_potentials[arc] = std::move(new_pot_list);
1326  __messages_computed[arc] = true;
1327  }
1328 
1329 
1330  // performs a whole inference
1331  template < typename GUM_SCALAR >
1333  // collect messages for all single targets
1334  for (const auto node : this->targets()) {
1335  // perform only collects in the join tree for nodes that have
1336  // not received hard evidence (those that received hard evidence were
1337  // not included into the join tree for speed-up reasons)
1338  if (__graph.exists(node)) {
1340  }
1341  }
1342 
1343  // collect messages for all set targets
1344  // by parsing __joint_target_to_clique, we ensure that the cliques that
1345  // are referenced belong to the join tree (even if some of the nodes in
1346  // their associated joint_target do not belong to __graph)
1347  for (const auto set : __joint_target_to_clique)
1348  __collectMessage(set.second, set.second);
1349  }
1350 
1351 
1353  template < typename GUM_SCALAR >
1354  Potential< GUM_SCALAR >*
1356  const auto& bn = this->BN();
1357 
1358  // hard evidence do not belong to the join tree
1359  // # TODO: check for sets of inconsistent hard evidence
1360  if (this->hardEvidenceNodes().contains(id)) {
1361  return new Potential< GUM_SCALAR >(*(this->evidence()[id]));
1362  }
1363 
1364  // if we still need to perform some inference task, do it (this should
1365  // already have been done by _makeInference)
1366  NodeId clique_of_id = __node_to_clique[id];
1367  __collectMessage(clique_of_id, clique_of_id);
1368 
1369  // now we just need to create the product of the potentials of the clique
1370  // containing id with the messages received by this clique and
1371  // marginalize out all variables except id
1372  __PotentialSet pot_list = __clique_potentials[clique_of_id];
1373 
1374  // add the messages sent by adjacent nodes to targetClique
1375  for (const auto other : __JT->neighbours(clique_of_id))
1376  pot_list += __separator_potentials[Arc(other, clique_of_id)];
1377 
1378  // get the set of variables that need be removed from the potentials
1379  const NodeSet& nodes = __JT->clique(clique_of_id);
1380  Set< const DiscreteVariable* > kept_vars{&(bn.variable(id))};
1381  Set< const DiscreteVariable* > del_vars(nodes.size());
1382  for (const auto node : nodes) {
1383  if (node != id) del_vars.insert(&(bn.variable(node)));
1384  }
1385 
1386  // pot_list now contains all the potentials to multiply and marginalize
1387  // => combine the messages
1388  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1389  Potential< GUM_SCALAR >* joint = nullptr;
1390 
1391  if (new_pot_list.size() == 1) {
1392  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1393  // if pot already existed, create a copy, so that we can put it into
1394  // the __target_posteriors property
1395  if (pot_list.exists(joint)) {
1396  joint = new Potential< GUM_SCALAR >(*joint);
1397  } else {
1398  // remove the joint from new_pot_list so that it will not be
1399  // removed just after the else block
1400  new_pot_list.clear();
1401  }
1402  } else {
1403  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1405  joint = fast_combination.combine(new_pot_list);
1406  }
1407 
1408  // remove the potentials that were created in new_pot_list
1409  for (const auto pot : new_pot_list)
1410  if (!pot_list.exists(pot)) delete pot;
1411 
1412  // check that the joint posterior is different from a 0 vector: this would
1413  // indicate that some hard evidence are not compatible (their joint
1414  // probability is equal to 0)
1415  bool nonzero_found = false;
1416  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1417  if (joint->get(inst)) {
1418  nonzero_found = true;
1419  break;
1420  }
1421  }
1422  if (!nonzero_found) {
1423  // remove joint from memory to avoid memory leaks
1424  delete joint;
1425  GUM_ERROR(IncompatibleEvidence,
1426  "some evidence entered into the Bayes "
1427  "net are incompatible (their joint proba = 0)");
1428  }
1429  return joint;
1430  }
1431 
1432 
1434  template < typename GUM_SCALAR >
1435  const Potential< GUM_SCALAR >&
1437  // check if we have already computed the posterior
1438  if (__target_posteriors.exists(id)) { return *(__target_posteriors[id]); }
1439 
1440  // compute the joint posterior and normalize
1441  auto joint = _unnormalizedJointPosterior(id);
1442  joint->normalize();
1443  __target_posteriors.insert(id, joint);
1444 
1445  return *joint;
1446  }
1447 
1448 
1449  // returns the marginal a posteriori proba of a given node
1450  template < typename GUM_SCALAR >
1451  Potential< GUM_SCALAR >*
1453  const NodeSet& set) {
1454  // hard evidence do not belong to the join tree, so extract the nodes
1455  // from targets that are not hard evidence
1456  NodeSet targets = set, hard_ev_nodes;
1457  for (const auto node : this->hardEvidenceNodes()) {
1458  if (targets.contains(node)) {
1459  targets.erase(node);
1460  hard_ev_nodes.insert(node);
1461  }
1462  }
1463 
1464  // if all the nodes have received hard evidence, then compute the
1465  // joint posterior directly by multiplying the hard evidence potentials
1466  const auto& evidence = this->evidence();
1467  if (targets.empty()) {
1468  __PotentialSet pot_list;
1469  for (const auto node : set) {
1470  pot_list.insert(evidence[node]);
1471  }
1472  if (pot_list.size() == 1) {
1473  auto pot = new Potential< GUM_SCALAR >(**(pot_list.begin()));
1474  return pot;
1475  } else {
1476  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1478  return fast_combination.combine(pot_list);
1479  }
1480  }
1481 
1482 
1483  // if we still need to perform some inference task, do it: so, first,
1484  // determine the clique on which we should perform collect to compute
1485  // the unnormalized joint posterior of a set of nodes containing "targets"
1486  NodeId clique_of_set;
1487  try {
1488  clique_of_set = __joint_target_to_clique[set];
1489  } catch (NotFound&) {
1490  // here, the precise set of targets does not belong to the set of targets
1491  // defined by the user. So we will try to find a clique in the junction
1492  // tree that contains "targets":
1493 
1494  // 1/ we should check that all the nodes belong to the join tree
1495  for (const auto node : targets) {
1496  if (!__graph.exists(node)) {
1497  GUM_ERROR(UndefinedElement, node << " is not a target node");
1498  }
1499  }
1500 
1501  // 2/ the clique created by the first eliminated node among target is the
1502  // one we are looking for
1503  const std::vector< NodeId >& JT_elim_order =
1505 
1506  NodeProperty< int > elim_order(Size(JT_elim_order.size()));
1507  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size;
1508  ++i)
1509  elim_order.insert(JT_elim_order[i], (int)i);
1510  NodeId first_eliminated_node = *(targets.begin());
1511  int elim_number = elim_order[first_eliminated_node];
1512  for (const auto node : targets) {
1513  if (elim_order[node] < elim_number) {
1514  elim_number = elim_order[node];
1515  first_eliminated_node = node;
1516  }
1517  }
1518 
1519  clique_of_set =
1520  __triangulation->createdJunctionTreeClique(first_eliminated_node);
1521 
1522 
1523  // 3/ check that clique_of_set contains the all the nodes in the target
1524  const NodeSet& clique_nodes = __JT->clique(clique_of_set);
1525  for (const auto node : targets) {
1526  if (!clique_nodes.contains(node)) {
1527  GUM_ERROR(UndefinedElement, set << " is not a joint target");
1528  }
1529  }
1530 
1531  // add the discovered clique to __joint_target_to_clique
1532  __joint_target_to_clique.insert(set, clique_of_set);
1533  }
1534 
1535  // now perform a collect on the clique
1536  __collectMessage(clique_of_set, clique_of_set);
1537 
1538  // now we just need to create the product of the potentials of the clique
1539  // containing set with the messages received by this clique and
1540  // marginalize out all variables except set
1541  __PotentialSet pot_list = __clique_potentials[clique_of_set];
1542 
1543  // add the messages sent by adjacent nodes to targetClique
1544  for (const auto other : __JT->neighbours(clique_of_set))
1545  pot_list += __separator_potentials[Arc(other, clique_of_set)];
1546 
1547  // get the set of variables that need be removed from the potentials
1548  const NodeSet& nodes = __JT->clique(clique_of_set);
1549  Set< const DiscreteVariable* > del_vars(nodes.size());
1550  Set< const DiscreteVariable* > kept_vars(targets.size());
1551  const auto& bn = this->BN();
1552  for (const auto node : nodes) {
1553  if (!targets.contains(node)) {
1554  del_vars.insert(&(bn.variable(node)));
1555  } else {
1556  kept_vars.insert(&(bn.variable(node)));
1557  }
1558  }
1559 
1560  // pot_list now contains all the potentials to multiply and marginalize
1561  // => combine the messages
1562  __PotentialSet new_pot_list = __marginalizeOut(pot_list, del_vars, kept_vars);
1563  Potential< GUM_SCALAR >* joint = nullptr;
1564 
1565  if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
1566  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1567 
1568  // if pot already existed, create a copy, so that we can put it into
1569  // the __target_posteriors property
1570  if (pot_list.exists(joint)) {
1571  joint = new Potential< GUM_SCALAR >(*joint);
1572  } else {
1573  // remove the joint from new_pot_list so that it will not be
1574  // removed just after the next else block
1575  new_pot_list.clear();
1576  }
1577  } else {
1578  // combine all the potentials in new_pot_list with all the hard evidence
1579  // of the nodes in set
1580  __PotentialSet new_new_pot_list = new_pot_list;
1581  for (const auto node : hard_ev_nodes) {
1582  new_new_pot_list.insert(evidence[node]);
1583  }
1584  MultiDimCombinationDefault< GUM_SCALAR, Potential > fast_combination(
1586  joint = fast_combination.combine(new_new_pot_list);
1587  }
1588 
1589  // remove the potentials that were created in new_pot_list
1590  for (const auto pot : new_pot_list)
1591  if (!pot_list.exists(pot)) delete pot;
1592 
1593  // check that the joint posterior is different from a 0 vector: this would
1594  // indicate that some hard evidence are not compatible
1595  bool nonzero_found = false;
1596  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1597  if ((*joint)[inst]) {
1598  nonzero_found = true;
1599  break;
1600  }
1601  }
1602  if (!nonzero_found) {
1603  // remove joint from memory to avoid memory leaks
1604  delete joint;
1605  GUM_ERROR(IncompatibleEvidence,
1606  "some evidence entered into the Bayes "
1607  "net are incompatible (their joint proba = 0)");
1608  }
1609 
1610  return joint;
1611  }
1612 
1613 
1615  template < typename GUM_SCALAR >
1616  const Potential< GUM_SCALAR >&
1618  // check if we have already computed the posterior
1619  if (__joint_target_posteriors.exists(set)) {
1620  return *(__joint_target_posteriors[set]);
1621  }
1622 
1623  // compute the joint posterior and normalize
1624  auto joint = _unnormalizedJointPosterior(set);
1625  joint->normalize();
1626  __joint_target_posteriors.insert(set, joint);
1627 
1628  return *joint;
1629  }
1630 
1631 
1633  template < typename GUM_SCALAR >
1634  const Potential< GUM_SCALAR >& LazyPropagation< GUM_SCALAR >::_jointPosterior(
1635  const NodeSet& wanted_target, const NodeSet& declared_target) {
1636  // check if we have already computed the posterior of wanted_target
1637  if (__joint_target_posteriors.exists(wanted_target))
1638  return *(__joint_target_posteriors[wanted_target]);
1639 
1640  // here, we will have to compute the posterior of declared_target and
1641  // marginalize out all the variables that do not belong to wanted_target
1642 
1643  // check if we have already computed the posterior of declared_target
1644  if (!__joint_target_posteriors.exists(declared_target)) {
1645  _jointPosterior(declared_target);
1646  }
1647 
1648  // marginalize out all the variables that do not belong to wanted_target
1649  const auto& bn = this->BN();
1650  Set< const DiscreteVariable* > del_vars;
1651  for (const auto node : declared_target)
1652  if (!wanted_target.contains(node)) del_vars.insert(&(bn.variable(node)));
1653  Potential< GUM_SCALAR >* pot = new Potential< GUM_SCALAR >(
1654  __joint_target_posteriors[declared_target]->margSumOut(del_vars));
1655 
1656  // save the result into the cache
1657  __joint_target_posteriors.insert(wanted_target, pot);
1658 
1659  return *pot;
1660  }
1661 
1662 
1663  template < typename GUM_SCALAR >
1665  // here, we should check that __find_relevant_potential_type is equal to
1666  // FIND_ALL. Otherwise, the computations could be wrong.
1667  RelevantPotentialsFinderType old_relevant_type =
1669 
1670  // if the relevant potentials finder is not equal to FIND_ALL, all the
1671  // current computations may lead to incorrect results, so we shall
1672  // discard them
1673  if (old_relevant_type != RelevantPotentialsFinderType::FIND_ALL) {
1675  __is_new_jt_needed = true;
1677  }
1678 
1679  // perform inference in each connected component
1680  this->makeInference();
1681 
1682  // for each connected component, select a variable X and compute the
1683  // joint probability of X and evidence e. Then marginalize-out X to get
1684  // p(e) in this connected component. Finally, multiply all the p(e) that
1685  // we got and the elements in __constants. The result is the probability
1686  // of evidence
1687 
1688  GUM_SCALAR prob_ev = 1;
1689  for (const auto root : __roots) {
1690  // get a node in the clique
1691  NodeId node = *(__JT->clique(root).begin());
1692  Potential< GUM_SCALAR >* tmp = _unnormalizedJointPosterior(node);
1693  GUM_SCALAR sum = 0;
1694  for (Instantiation iter(*tmp); !iter.end(); ++iter)
1695  sum += tmp->get(iter);
1696  prob_ev *= sum;
1697  delete tmp;
1698  }
1699 
1700  for (const auto& projected_cpt : __constants)
1701  prob_ev *= projected_cpt.second;
1702 
1703  // put back the relevant potential type selected by the user
1704  __find_relevant_potential_type = old_relevant_type;
1705 
1706  return prob_ev;
1707  }
1708 
1709 } /* namespace gum */
1710 
1711 #endif // DOXYGEN_SHOULD_SKIP_THIS
~LazyPropagation() final
destructor
NodeProperty< const Potential< GUM_SCALAR > * > __node_to_soft_evidence
the soft evidence stored in the cliques per their assigned node in the BN
HashTable< NodeSet, const Potential< GUM_SCALAR > * > __joint_target_posteriors
the set of set target posteriors computed during the last inference
ArcProperty< bool > __messages_computed
indicates whether a message (from one clique to another) has been computed
void setFindBarrenNodesType(FindBarrenNodesType type)
sets how we determine barren nodes
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
NodeProperty< const Potential< GUM_SCALAR > * > __hard_ev_projected_CPTs
the CPTs that were projected due to hard evidence nodes
Set< const Potential< GUM_SCALAR > * > __PotentialSet
void _updateOutdatedBNStructure() final
prepares inference when the latter is in OutdatedBNStructure state
virtual void clear()
removes all the nodes and edges from the graph
Definition: undiGraph_inl.h:40
virtual void addNodeWithId(const NodeId id)
try to insert a node with the given id
void setRelevantPotentialsFinderType(RelevantPotentialsFinderType type)
sets how we determine the relevant potentials to combine
JunctionTree * __junctionTree
the junction tree to answer the last inference query
Triangulation * __triangulation
the triangulation class creating the junction tree used for inference
unsigned int NodeId
Type for node ids.
Definition: graphElements.h:97
const NodeProperty< const Potential< GUM_SCALAR > * > & evidence() const
returns the set of evidence
void _onAllEvidenceErased(bool has_hard_evidence) final
fired before all the evidence are erased
void __findRelevantPotentialsWithdSeparation3(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
bool __is_new_jt_needed
indicates whether a new join tree is needed for the next inference
Potential< GUM_SCALAR > * _unnormalizedJointPosterior(NodeId id) final
returns a fresh potential equal to P(argument,evidence)
ArcProperty< __PotentialSet > __created_potentials
the set of potentials created for the last inference messages
bool __isNewJTNeeded() const
check whether a new join tree is really needed for the next inference
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
Potential< GUM_SCALAR > *(* __projection_op)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &)
the operator for performing the projections
void setTriangulation(const Triangulation &new_triangulation)
use a new triangulation algorithm
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
void __findRelevantPotentialsXX(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
virtual bool isInferenceReady() const noexceptfinal
returns whether the inference object is in a ready state
const NodeSet & clique(const NodeId idClique) const
returns the set of nodes included into a given clique
d-separation analysis (as described in Koller & Friedman 2009)
static void relevantPotentials(const IBayesNet< GUM_SCALAR > &bn, const NodeSet &query, const NodeSet &hardEvidence, const NodeSet &softEvidence, Set< const TABLE< GUM_SCALAR > * > &potentials)
update a set of potentials, keeping only those d-connected with query variables given evidence ...
Definition: BayesBall_tpl.h:32
void _onEvidenceErased(NodeId id, bool isHardEvidence) final
fired before an evidence is removed
void __findRelevantPotentialsWithdSeparation2(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
The BayesBall algorithm (as described by Schachter).
NodeSet __hard_ev_nodes
the hard evidence nodes which were projected in CPTs
void erase(const Key &k)
Erases an element from the set.
Definition: set_tpl.h:656
gum is the global namespace for all aGrUM entities
Definition: agrum.h:25
GUM_SCALAR evidenceProbability() final
returns the probability of evidence
void _onJointTargetAdded(const NodeSet &set) final
fired after a new joint target is inserted
void _onAllJointTargetsErased() final
fired before a all the joint targets are removed
const Potential< GUM_SCALAR > & _jointPosterior(const NodeSet &set) final
returns the posterior of a declared target set
const NodeProperty< Idx > & hardEvidence() const
indicate for each node with hard evidence which value it took
RelevantPotentialsFinderType
type of algorithm for determining the relevant potentials for combinations using some d-separation an...
virtual void eraseNode(const NodeId id)
remove a node and its adjacent edges from the graph
Definition: undiGraph_inl.h:55
FindBarrenNodesType __barren_nodes_type
the type of barren nodes computation we wish
NodeProperty< const Potential< GUM_SCALAR > * > __target_posteriors
the set of single posteriors computed during the last inference
virtual void makeInference() final
perform the heavy computations needed to compute the targets&#39; posteriors
void _onEvidenceChanged(NodeId id, bool hasChangedSoftHard) final
fired after an evidence is changed, in particular when its status (soft/hard) changes ...
const GUM_SCALAR __1_minus_epsilon
for comparisons with 1 - epsilon
void _onAllTargetsErased() final
fired before a all single and joint_targets are removed
NodeSet __roots
a clique node used as a root in each connected component of __JT
void _onMarginalTargetAdded(NodeId id) final
fired after a new single target is inserted
FindBarrenNodesType
type of algorithm to determine barren nodes
const Potential< GUM_SCALAR > & _posterior(NodeId id) final
returns the posterior of a given variable
void _onMarginalTargetErased(NodeId id) final
fired before a single target is removed
void __collectMessage(NodeId id, NodeId from)
actually perform the collect phase
ArcProperty< __PotentialSet > __separator_potentials
the list of all potentials stored in the separators after inferences
virtual const NodeProperty< Size > & domainSizes() const final
get the domain sizes of the random variables of the BN
virtual NodeId createdJunctionTreeClique(const NodeId id)=0
returns the Id of the clique created by the elimination of a given node during the triangulation proc...
void _onEvidenceAdded(NodeId id, bool isHardEvidence) final
fired after a new evidence is inserted
const JoinTree * joinTree()
returns the current join tree used
NodeProperty< GUM_SCALAR > __constants
the constants resulting from the projections of CPTs defined over only hard evidence nodes remove th...
JoinTree * __JT
the join (or junction) tree used to answer the last inference query
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
Definition: cliqueGraph.h:303
void _updateOutdatedBNPotentials() final
prepares inference when the latter is in OutdatedBNPotentials state
void _makeInference() final
called when the inference has to be performed effectively
void __diffuseMessageInvalidations(NodeId from_id, NodeId to_id, NodeSet &invalidated_cliques)
invalidate all the messages sent from a given clique
Header files of gum::Instantiation.
void _onAllMarginalTargetsAdded() final
fired after all the nodes of the BN are added as single targets
void __setProjectionFunction(Potential< GUM_SCALAR > *(*proj)(const Potential< GUM_SCALAR > &, const Set< const DiscreteVariable * > &))
sets the operator for performing the projections
void _setOutdatedBNStructureState()
put the inference into an outdated BN structure state
void(LazyPropagation< GUM_SCALAR >::* __findRelevantPotentials)(Set< const Potential< GUM_SCALAR > * > &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
const NodeSet & hardEvidenceNodes() const
returns the set of nodes with hard evidence
NodeProperty< __PotentialSet > __clique_potentials
the list of all potentials stored in the cliques
void __computeJoinTreeRoots()
compute a root for each connected component of __JT
HashTable< NodeSet, NodeId > __joint_target_to_clique
for each set target, assign a clique in the JT that contains it
__PotentialSet __removeBarrenVariables(__PotentialSet &pot_list, Set< const DiscreteVariable * > &del_vars)
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
Definition: cliqueGraph.h:299
void __createNewJT()
create a new junction tree as well as its related data structures
Detect barren nodes for inference in Bayesian networks.
static INLINE Potential< GUM_SCALAR > * LPNewprojPotential(const Potential< GUM_SCALAR > &t1, const Set< const DiscreteVariable * > &del_vars)
bool __use_binary_join_tree
indicates whether we should transform junction trees into binary join trees
void __invalidateAllMessages()
invalidate all messages, posteriors and created potentials
__PotentialSet __marginalizeOut(__PotentialSet pot_list, Set< const DiscreteVariable * > &del_vars, Set< const DiscreteVariable * > &kept_vars)
removes variables del_vars from a list of potentials and returns the resulting list ...
void clear()
Removes all the elements in the hash table.
LazyPropagation(const IBayesNet< GUM_SCALAR > *BN, RelevantPotentialsFinderType=RelevantPotentialsFinderType::DSEP_BAYESBALL_POTENTIALS, FindBarrenNodesType=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
const JunctionTree * junctionTree()
returns the current junction tree
virtual const NodeSet & targets() const noexceptfinal
returns the list of marginal targets
void _onJointTargetErased(const NodeSet &set) final
fired before a joint target is removed
An algorithm for converting a join tree into a binary join tree.
const NodeSet & softEvidenceNodes() const
returns the set of nodes with soft evidence
virtual Triangulation * newFactory() const =0
returns a fresh triangulation of the same type as the current object but with an empty graph ...
void _onAllMarginalTargetsErased() final
fired before a all the single targets are removed
HashTable< NodeId, NodeId > __node_to_clique
for each node of __graph (~ in the Bayes net), associate an ID in the JT
void clear()
Removes all the elements, if any, from the set.
Definition: set_tpl.h:375
void __produceMessage(NodeId from_id, NodeId to_id)
creates the message sent by clique from_id to clique to_id
RelevantPotentialsFinderType __find_relevant_potential_type
the type of relevant potential finding algorithm to be used
virtual const Set< NodeSet > & jointTargets() const noexceptfinal
returns the list of joint targets
virtual const CliqueGraph & junctionTree()=0
returns a compatible junction tree
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
void __findRelevantPotentialsGetAll(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...
bool exists(const NodeId id) const
alias for existsNode
virtual const std::vector< NodeId > & eliminationOrder()=0
returns an elimination ordering compatible with the triangulated graph
virtual const IBayesNet< GUM_SCALAR > & BN() const final
Returns a constant reference over the IBayesNet referenced by this class.
virtual bool isDone() const noexceptfinal
returns whether the inference object is in a done state
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
UndiGraph __graph
the undigraph extracted from the BN and used to construct the join tree
void __setCombinationFunction(Potential< GUM_SCALAR > *(*comb)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &))
sets the operator for performing the combinations
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66
virtual void setGraph(const UndiGraph *graph, const NodeProperty< Size > *domsizes)=0
initialize the triangulation data structures for a new graph
static void requisiteNodes(const DAG &dag, const NodeSet &query, const NodeSet &hardEvidence, const NodeSet &softEvidence, NodeSet &requisite)
Fill the &#39;requisite&#39; nodeset with the requisite nodes in dag given a query and evidence.
Definition: BayesBall.cpp:33
Potential< GUM_SCALAR > *(* __combination_op)(const Potential< GUM_SCALAR > &, const Potential< GUM_SCALAR > &)
the operator for performing the combinations
void _setOutdatedBNPotentialsState()
puts the inference into an OutdatedBNPotentials state if it is not already in an OutdatedBNStructure ...
NodeProperty< EvidenceChangeType > __evidence_changes
indicates which nodes of the BN have evidence that changed since the last inference ...
void __findRelevantPotentialsWithdSeparation(__PotentialSet &pot_list, Set< const DiscreteVariable * > &kept_vars)
update a set of potentials: the remaining are those to be combined to produce a message on a separato...