aGrUM  0.20.3
a C++ library for (probabilistic) graphical models
ShaferShenoyMNInference_tpl.h
Go to the documentation of this file.
1 /**
2  *
3  * Copyright (c) 2005-2021 by Pierre-Henri WUILLEMIN(@LIP6) et Christophe GONZALES(@AMU)
4  * (@AMU) info_at_agrum_dot_org
5  *
6  * This library is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Lesser General Public License as published by
8  * the Free Software Foundation, either version 3 of the License, or
9  * (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with this library. If not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 
22 /**
23  * @file
24  * @brief Implementation of Shafer-Shenoy's propagation for inference in
25  * Markov Networks.
26  */
27 
28 #ifndef DOXYGEN_SHOULD_SKIP_THIS
29 # include <agrum/MN/inference/ShaferShenoyMNInference.h>
30 
31 # include <agrum/tools/graphs/algorithms/binaryJoinTreeConverterDefault.h>
32 # include <agrum/tools/multidim/instantiation.h>
33 # include <agrum/tools/multidim/utils/operators/multiDimCombineAndProjectDefault.h>
34 # include <agrum/tools/multidim/utils/operators/multiDimProjection.h>
35 
36 
37 namespace gum {
38  // default constructor
39  template < typename GUM_SCALAR >
40  INLINE ShaferShenoyMNInference< GUM_SCALAR >::ShaferShenoyMNInference(
41  const IMarkovNet< GUM_SCALAR >* MN,
42  bool use_binary_join_tree) :
45  // create a default triangulation (the user can change it afterwards)
47 
48  // for debugging purposes
50  }
51 
52 
53  // destructor
54  template < typename GUM_SCALAR >
56  // remove all the potentials created during the last message passing
57  for (const auto& pots: _created_messages_) {
58  for (const auto pot: pots.second)
59  delete pot;
60  }
61 
62  // remove the potentials created after removing the nodes that received
63  // hard evidence
64  for (const auto& pot: _hard_ev_projected_factors_) {
65  delete pot.second;
66  }
67 
68  // remove all the potentials in _clique_potentials_ that do not belong
69  // to _clique_potentials_list_ : in this case, those potentials have been
70  // created by combination of the corresponding list of potentials in
71  // _clique_potentials_list_. In other words, the size of this list is strictly
72  // greater than 1.
73  for (auto pot: _clique_potentials_) {
74  if (_clique_potentials_list_[pot.first].size() > 1) { delete pot.second; }
75  }
76 
77  // remove all the posteriors computed
78  for (const auto& pot: _target_posteriors_)
79  delete pot.second;
80  for (const auto& pot: _joint_target_posteriors_)
81  delete pot.second;
82 
83  // remove the junction tree and the triangulation algorithm
84  if (_propagator_ != nullptr) delete _propagator_;
85  if (_junctionTree_ != nullptr) delete _junctionTree_;
86  delete _triangulation_;
87 
88  // for debugging purposes
90  }
91 
92 
93  /// set a new triangulation algorithm
94  template < typename GUM_SCALAR >
97  delete _triangulation_;
99  _is_new_jt_needed_ = true;
101  }
102 
103 
104  /// returns the current join tree used
105  template < typename GUM_SCALAR >
107  _createNewJT_();
108 
109  return _propagator_;
110  }
111 
112  /// returns the current junction tree
113  template < typename GUM_SCALAR >
115  _createNewJT_();
116 
117  return _junctionTree_;
118  }
119 
120 
121  /// sets the operator for performing the projections
122  template < typename GUM_SCALAR >
124  Potential< GUM_SCALAR >* (*proj)(const Potential< GUM_SCALAR >&,
125  const Set< const DiscreteVariable* >&)) {
127  }
128 
129 
130  /// sets the operator for performing the combinations
131  template < typename GUM_SCALAR >
133  Potential< GUM_SCALAR >* (*comb)(const Potential< GUM_SCALAR >&,
134  const Potential< GUM_SCALAR >&)) {
136  }
137 
138 
139  /// invalidate all messages, posteriors and created potentials
140  template < typename GUM_SCALAR >
142  // remove all the messages computed
143  for (auto& potset: _separator_potentials_)
144  potset.second.clear();
146  mess_computed.second = false;
147 
148  // remove all the created potentials
149  for (const auto& potset: _created_messages_)
150  for (const auto pot: potset.second)
151  delete pot;
152 
153  // remove all the posteriors
154  for (const auto& pot: _target_posteriors_)
155  delete pot.second;
156  for (const auto& pot: _joint_target_posteriors_)
157  delete pot.second;
158 
159  // indicate that new messages need be computed
160  if (this->isInferenceReady() || this->isInferenceDone()) this->setOutdatedPotentialsState_();
161  }
162 
163  /// fired when a new evidence is inserted
164  template < typename GUM_SCALAR >
166  bool isHardEvidence) {
167  // if we have a new hard evidence, this modifies the undigraph over which
168  // the join tree is created. This is also the case if id is not a node of
169  // of the undigraph
171  _is_new_jt_needed_ = true;
172  else {
173  try {
175  } catch (DuplicateElement&) {
176  // here, the evidence change already existed. This necessarily means
177  // that the current saved change is an EVIDENCE_ERASED. So if we
178  // erased the evidence and added some again, this corresponds to an
179  // EVIDENCE_MODIFIED
181  }
182  }
183  }
184 
185 
186  /// fired when an evidence is removed
187  template < typename GUM_SCALAR >
189  bool isHardEvidence) {
190  // if we delete a hard evidence, this modifies the undigraph over which
191  // the join tree is created.
192  if (isHardEvidence)
193  _is_new_jt_needed_ = true;
194  else {
195  try {
197  } catch (DuplicateElement&) {
198  // here, the evidence change already existed and it is necessarily an
199  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
200  // been added and is now erased, this is similar to not having created
201  // it. If the evidence was only modified, it already existed in the
202  // last inference and we should now indicate that it has been removed.
205  else
207  }
208  }
209  }
210 
211 
212  /// fired when all the evidence are erased
213  template < typename GUM_SCALAR >
215  if (has_hard_evidence || !this->hardEvidenceNodes().empty())
216  _is_new_jt_needed_ = true;
217  else {
218  for (const auto node: this->softEvidenceNodes()) {
219  try {
221  } catch (DuplicateElement&) {
222  // here, the evidence change already existed and it is necessarily an
223  // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
224  // been added and is now erased, this is similar to not having created
225  // it. If the evidence was only modified, it already existed in the
226  // last inference and we should now indicate that it has been removed.
229  else
231  }
232  }
233  }
234  }
235 
236 
237  /// fired when an evidence is changed
238  template < typename GUM_SCALAR >
240  bool hasChangedSoftHard) {
241  if (hasChangedSoftHard)
242  _is_new_jt_needed_ = true;
243  else {
244  try {
246  } catch (DuplicateElement&) {
247  // here, the evidence change already existed and it is necessarily an
248  // EVIDENCE_ADDED. So we should keep this state to indicate that this
249  // evidence is new w.r.t. the last inference
250  }
251  }
252  }
253 
254 
255  /// fired after a new target is inserted
256  template < typename GUM_SCALAR >
258 
259 
260  /// fired before a target is removed
261  template < typename GUM_SCALAR >
263 
264 
265  /// fired after a new set target is inserted
266  template < typename GUM_SCALAR >
268 
269 
270  /// fired before a set target is removed
271  template < typename GUM_SCALAR >
273 
274 
275  /// fired after all the nodes of the MN are added as single targets
276  template < typename GUM_SCALAR >
278 
279 
280  /// fired before a all the single_targets are removed
281  template < typename GUM_SCALAR >
283 
284  /// fired after a new Markov net has been assigned to the engine
285  template < typename GUM_SCALAR >
287  const IMarkovNet< GUM_SCALAR >* mn) {}
288 
289  /// fired before a all the joint_targets are removed
290  template < typename GUM_SCALAR >
292 
293 
294  /// fired before a all the single and joint_targets are removed
295  template < typename GUM_SCALAR >
297 
298 
299  // check whether a new junction tree is really needed for the next inference
300  template < typename GUM_SCALAR >
302  // if we do not have a JT or if _new_jt_needed_ is set to true, then
303  // we know that we need to create a new join tree
304  if ((_propagator_ == nullptr) || _is_new_jt_needed_) return true;
305 
306  // if some some targets do not belong to the join tree and, consequently,
307  // to the undigraph that was used to construct the join tree, then we need
308  // to create a new JT. This situation may occur if we constructed the
309  // join tree after pruning irrelevant/barren nodes from the MN)
310  // however, note that the nodes that received hard evidence do not belong to
311  // the graph and, therefore, should not be taken into account
312  const auto& hard_ev_nodes = this->hardEvidenceNodes();
313  for (const auto node: this->targets()) {
314  if (!_reduced_graph_.exists(node) && !hard_ev_nodes.exists(node)) return true;
315  }
316  for (const auto& joint_target: this->jointTargets()) {
317  // here, we need to check that at least one clique contains all the
318  // nodes of the joint target.
319  bool containing_clique_found = false;
320  for (const auto node: joint_target) {
321  bool found = true;
322  try {
324  for (const auto xnode: joint_target) {
326  found = false;
327  break;
328  }
329  }
330  } catch (NotFound&) { found = false; }
331 
332  if (found) {
334  break;
335  }
336  }
337 
338  if (!containing_clique_found) return true;
339  }
340 
341  // if some new evidence have been added on nodes that do not belong
342  // to _reduced_graph_, then we potentially have to reconstruct the join tree
343  for (const auto& change: _evidence_changes_) {
346  return true;
347  }
348 
349  // here, the current JT is exactly what we need for the next inference
350  return false;
351  }
352 
353 
354  /// create a new junction tree as well as its related data structures
355  template < typename GUM_SCALAR >
357  // to create the JT, we first create the moral graph of the MN in the
358  // following way in order to take into account the barren nodes and the
359  // nodes that received evidence:
360  // 1/ we create an undirected graph containing only the nodes and no edge
361  // 2/ add edges so that set targets are cliques of the moral graph
362  // 3/ remove the nodes that received hard evidence (by step 3/, their
363  // parents are linked by edges, which is necessary for inference)
364  //
365  // At the end of step 3/, we have our moral graph and we can triangulate it
366  // to get the new junction tree
367 
368  // 1/ create an undirected graph containing only the nodes and no edge
369  const auto& mn = this->MN();
371 
372  // 2/ if there exists some joint targets, we shall add new edges into the
373  // moral graph in order to ensure that there exists a clique containing
374  // each joint
375  for (const auto& nodeset: this->jointTargets())
376  for (const auto n1: nodeset)
377  for (const auto n2: nodeset)
378  if (n1 < n2) _reduced_graph_.addEdge(n1, n2);
379 
380  // 3/ remove all the nodes that received hard evidence
382  for (const auto node: _hard_ev_nodes_) {
384  }
385 
386  // now, we can compute the new junction tree. To speed-up computations
387  // (essentially, those of a distribution phase), we construct from this
388  // junction tree a binary join tree
389  if (_propagator_ != nullptr) delete _propagator_;
390  if (_junctionTree_ != nullptr) delete _junctionTree_;
391 
399  } else {
401  }
403 
407  for (Idx i = Idx(0); i < size_elim_order; ++i)
409 
410  // indicate, for each factor of the markov network a clique in _propagator_
411  // that can contain its conditional probability table
413  // TO REMOVE const UndiGraph& graph = mn.graph();
414  for (const auto& kv: mn.factors()) {
415  const auto& factor = kv.first; // kv.second is the Potential()
416  // not a true id since smallest_elim_number==size_elim_order+100
418  // (impossible smallest_elim_number \in [0...size_elim_order-1]
420  for (const auto nod: factor) {
421  if (_reduced_graph_.exists(nod)) {
425  }
426  }
427  }
428 
430  // first_eliminated_node contains the first var (node or one of its
431  // parents) eliminated => the clique created during its elimination
432  // contains node and all of its parents => it can contain the potential
433  // assigned to the node in the MN
435  factor,
437  }
438  }
439 
440  // attribute a clique in the joint tree MN for each node in the MN
441  // (using a NodeProperty instead of using mn.smallestFactorFromNode()
442  // could it have a better choice ?)
444  for (const NodeId node: mn.nodes()) {
445  if (_reduced_graph_.exists(node)) {
447  }
448  }
449 
450  // indicate for each joint_target a clique that contains it
452  for (const auto& target: this->jointTargets()) {
453  // remove from target all the nodes that received hard evidence (since they
454  // do not belong to the join tree)
456  for (const auto node: _hard_ev_nodes_)
458 
459  if (!nodeset.empty()) {
460  // not a true id since smallest_elim_number==size_elim_order
462  // (smallest_elim_number \in [0...size_elim_order-1])
464  for (const auto nod: nodeset) {
465  if (_reduced_graph_.exists(nod)) {
469  }
470  }
471  }
472 
474  // first_eliminated_node contains the first var (node or one of its
475  // parents) eliminated => the clique created during its elimination
476  // contains node and all of its parents => it can contain the potential
477  // assigned to the node in the MN
479  nodeset,
481  }
482  }
483  }
484 
485  // compute the roots of _propagator_'s connected components
487 
488  // remove all the Shafer-Shenoy potentials stored into the cliques
489  for (const auto& xpot: _clique_potentials_) {
490  if (_clique_potentials_list_[xpot.first].size() > 1) delete xpot.second;
491  }
493 
494  // create empty potential lists into the cliques of the joint tree as well
495  // as empty lists of evidence
498  for (const auto clik: *_propagator_) {
500  }
501 
502  // remove all the potentials created during the last inference
503  for (const auto& potlist: _created_messages_)
504  for (auto pot: potlist.second)
505  delete pot;
507 
508  // remove all the potentials created to take into account hard evidence
509  // during the last inference
511  delete pot_pair.second;
513 
514  // remove all the constants created due to projections of factors that were
515  // defined over only hard evidence nodes
516  // __constants.clear();
517 
518  // create empty lists of potentials for the messages and indicate that no
519  // message has been computed yet
522  for (const auto& edge: _propagator_->edges()) {
523  const Arc arc1(edge.first(), edge.second());
526  const Arc arc2(Arc(edge.second(), edge.first()));
529  }
530 
531  // remove all the posteriors computed so far
532  for (const auto& pot: _target_posteriors_)
533  delete pot.second;
535  for (const auto& pot: _joint_target_posteriors_)
536  delete pot.second;
538 
539 
540  // put all the factors of the Markov net nodes into the cliques
541  // here, beware: all the potentials that are defined over some nodes
542  // including hard evidence must be projected so that these nodes are
543  // removed from the potential
544  const auto& evidence = this->evidence();
545  for (const auto& kv: mn.factors()) {
546  const auto& factor = kv.first;
547  const auto& pot = *(kv.second);
548 
550  for (const auto xnode: factor) {
552  }
553 
554  // if hard_nodes_in_factor contains hard evidence nodes, perform a projection
555  // and insert the result into the appropriate clique, else insert
556  // directly factor into the clique
557  if (hard_nodes_in_factor.empty()) {
559  } else {
560  // marginalize out the hard evidence nodes: if the factor is defined
561  // only over nodes that received hard evidence, do not consider it
562  // as a potential anymore but as a constant
563  if (hard_nodes_in_factor.size() != factor.size()) {
564  // perform the projection with a combine and project instance
568  for (const auto xnode: hard_nodes_in_factor) {
571  }
572 
573  // perform the combination of those potentials and their projection
579 
580  // there should be only one potential in new_factor_list
581  if (new_factor_list.size() != 1) {
582  for (auto pot: new_factor_list) {
583  if (!marg_factor_set.contains(pot)) delete pot;
584  }
586  "the projection of a potential containing "
587  << "hard evidence is empty!");
588  }
592  }
593  }
594  }
595 
596  // we shall now add all the potentials of the soft evidence
597  for (const auto node: this->softEvidenceNodes()) {
600  }
601 
602  // now, in _clique_potentials_list_, for each clique, we have the list of
603  // potentials that must be combined in order to produce the Shafer-Shenoy's
604  // potential stored into the clique. So, perform this combination and
605  // store the result in _clique_potentials_
608  for (const auto& xpotset: _clique_potentials_list_) {
609  const auto& potset = xpotset.second;
610  if (potset.size() > 0) {
611  // here, there will be an entry in _clique_potentials_
612  // If there is only one element in potset, this element shall be
613  // stored into _clique_potentials_, else all the elements of potset
614  // shall be combined and their result shall be stored
615  if (potset.size() == 1) {
617  } else {
620  }
621  }
622  }
623 
624  // indicate that the data structures are up to date.
626  _is_new_jt_needed_ = false;
627  } // namespace gum
628 
629 
630  /// prepare the inference structures w.r.t. new targets, soft/hard evidence
631  template < typename GUM_SCALAR >
633  // check if a new JT is really needed. If so, create it
634  if (_isNewJTNeeded_()) {
635  _createNewJT_();
636  } else {
637  // here, we can answer the next queries without reconstructing all the
638  // junction tree. All we need to do is to indicate that we should
639  // update the potentials and messages for these queries
641  }
642  }
643 
644 
645  /// invalidate all the messages sent from a given clique
646  template < typename GUM_SCALAR >
648  NodeId from_id,
649  NodeId to_id,
651  // invalidate the current clique
653 
654  // invalidate the current arc
655  const Arc arc(from_id, to_id);
657  if (message_computed) {
658  message_computed = false;
662  for (auto pot: arc_created_potentials)
663  delete pot;
665  }
666 
667  // go on with the diffusion
668  for (const auto node_id: _propagator_->neighbours(to_id)) {
670  }
671  }
672  }
673 
674 
675  /// update the potentials stored in the cliques and invalidate outdated
676  /// messages
677  template < typename GUM_SCALAR >
679  // for each clique, indicate whether the potential stored into
680  // _clique_ss_potentials_[clique] is the result of a combination. In this
681  // case, it has been allocated by the combination and will need to be
682  // deallocated if its clique has been invalidated
683  const auto& mn = this->MN();
684 
688  ++pot_iter) {
690  }
691 
692  // compute the set of factors that were projected due to hard evidence and
693  // whose hard evidence have changed, so that they need a new projection.
694  // By the way, remove these factors since they are no more needed
695  // Here only the values of the hard evidence can have changed (else a
696  // fully new join tree would have been computed).
697  // Note also that we know that the factors still contain some variable(s) after
698  // the projection (else they should be constants)
702  for (const auto node: _hard_ev_nodes_) {
705  for (const auto& elt: mn.factors()) {
706  const auto& chgFactor = elt.first;
707  if (chgFactor.contains(node)) {
710 
714  delete chgPot;
716 
718  }
719  }
720  }
721  }
722  }
723 
724  // invalidate all the messages that are no more correct: start from each of
725  // the nodes whose soft evidence has changed and perform a diffusion from
726  // the clique into which the soft evidence has been entered, indicating that
727  // the messages spreading from this clique are now invalid. At the same time,
728  // if there were potentials created on the arcs over which the messages were
729  // sent, remove them from memory. For all the cliques that received some
730  // projected factors that should now be changed, do the same.
732  for (const auto& pair: _evidence_changes_) {
734  const auto clique = _node_to_clique_[pair.first];
736  for (const auto neighbor: _propagator_->neighbours(clique)) {
738  }
739  }
740  }
741 
742  // now, add to the set of invalidated cliques those that contain projected
743  // factors that were changed.
744  for (const auto clique: hard_cliques_changed) {
746  for (const auto neighbor: _propagator_->neighbours(clique)) {
748  }
749  }
750 
751  // now that we know the cliques whose set of potentials have been changed,
752  // we can discard their corresponding Shafer-Shenoy potential
753  for (const auto clique: invalidated_cliques)
755  delete _clique_potentials_[clique];
756 
757  // now we shall remove all the posteriors that belong to the
758  // invalidated cliques. First, cope only with the nodes that did not
759  // received hard evidence since the other nodes do not belong to the
760  // join tree
761  if (!_target_posteriors_.empty()) {
763  ++iter) {
766  delete iter.val();
768  }
769  }
770 
771  // now cope with the nodes that received hard evidence
773  ++iter) {
775  delete iter.val();
777  }
778  }
779  }
780 
781  // finally, cope with joint targets
784  ++iter) {
786  delete iter.val();
788  }
789  }
790 
791  // remove all the evidence that were entered into _node_to_soft_evidence_
792  // and _clique_potentials_list_ and add the new soft ones
793  for (auto& pot_pair: _node_to_soft_evidence_)
796 
797  const auto& evidence = this->evidence();
798  for (const auto node: this->softEvidenceNodes()) {
801  }
802 
803 
804  // Now add the projections of the factors due to newly changed hard evidence:
805  // if we are performing updateOutdatedPotentials_, this means that the
806  // set of nodes that received hard evidence has not been changed, only
807  // their instantiations can have been changed. So, if there is an entry
808  // for node in _constants_, there will still be such an entry after
809  // performing the new projections. Idem for _hard_ev_projected_factors_
810 
811  // for (const auto node: factors_with_projected_factors_changed) {
812  for (const auto& iter_factor: mn.factors()) {
814  if (hard_nodes.empty()) continue; // no change in iter_factor
815 
816  // perform the projection with a combine and project instance
819  for (const auto xnode: hard_nodes) {
822  }
823 
824  // perform the combination of those potentials and their projection
830 
831  // there should be only one potential in new_potentials_list
832  if (new_potentials_list.size() != 1) {
833  // remove the potentials created to avoid memory leaks
834  for (auto pot: new_potentials_list) {
835  if (!marg_factor_set.contains(pot)) delete pot;
836  }
838  "the projection of a potential containing "
839  << "hard evidence is empty!");
840  }
841 
845  }
846 
847  // here, the list of potentials stored in the invalidated cliques have
848  // been updated. So, now, we can combine them to produce the Shafer-Shenoy
849  // potential stored into the clique
851  for (const auto clique: invalidated_cliques) {
852  const auto& potset = _clique_potentials_list_[clique];
853 
854  if (potset.size() > 0) {
855  // here, there will be an entry in _clique_potentials_
856  // If there is only one element in potset, this element shall be
857  // stored into _clique_potentials_, else all the elements of potset
858  // shall be combined and their result shall be stored
859  if (potset.size() == 1) {
861  } else {
864  }
865  }
866  }
867 
868  // update the constants
869  /*const auto& hard_evidence = this->hardEvidence();
870  for (auto& node_cst: _constants_) {
871  const Potential< GUM_SCALAR >& cpt = mn.cpt(node_cst.first);
872  const auto& variables = cpt.variablesSequence();
873  Instantiation inst;
874  for (const auto var: variables)
875  inst << *var;
876  for (const auto var: variables) {
877  inst.chgVal(var, hard_evidence[mn.nodeId(*var)]);
878  }
879  node_cst.second = cpt[inst];
880  }*/
881 
882  // indicate that all changes have been performed
884  }
885 
886 
887  /// compute a root for each connected component of _propagator_
888  template < typename GUM_SCALAR >
890  const auto& mn = this->MN();
891 
892  // get the set of cliques in which we can find the targets and joint_targets
894  for (const auto node: this->targets()) {
895  try {
897  } catch (Exception&) {}
898  }
899  for (const auto& set: this->jointTargets()) {
900  try {
902  } catch (Exception&) {}
903  }
904 
905  // put in a vector these cliques and their size
907  std::size_t i = 0;
908  for (const auto clique_id: clique_targets) {
909  const auto& clique = _propagator_->clique(clique_id);
910  Size dom_size = 1;
911  for (const auto node: clique) {
913  }
915  ++i;
916  }
917 
918  // sort the cliques by increasing domain size
921  [](const std::pair< NodeId, Size >& a, const std::pair< NodeId, Size >& b) -> bool {
922  return a.second < b.second;
923  });
924 
925  // pick up the clique with the smallest size in each connected component
926  NodeProperty< bool > marked = _propagator_->nodesProperty(false);
928  = [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
929  if (!marked[node]) {
930  marked[node] = true;
931  for (const auto neigh: _propagator_->neighbours(node))
932  if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
933  }
934  };
935  _roots_.clear();
936  for (const auto xclique: possible_roots) {
938  if (!marked[clique]) {
941  }
942  }
943  }
944 
945 
946  // performs the collect phase of Lazy Propagation
947  template < typename GUM_SCALAR >
949  for (const auto other: _propagator_->neighbours(id)) {
951  }
952 
953  if ((id != from) && !_messages_computed_[Arc(id, from)]) { _produceMessage_(id, from); }
954  }
955 
956  // remove variables del_vars from the list of potentials pot_list
957  template < typename GUM_SCALAR >
959  Set< const Potential< GUM_SCALAR >* > pot_list,
960  Set< const DiscreteVariable* >& del_vars,
961  Set< const DiscreteVariable* >& kept_vars) {
962  // remove the potentials corresponding to barren variables if we want
963  // to exploit barren nodes
964  /* _PotentialSet_ barren_projected_potentials;
965  if ( _barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES) {
966  barren_projected_potentials = _removeBarrenVariables_(pot_list, del_vars);
967  }*/
968 
969  // create a combine and project operator that will perform the
970  // marginalization
974 
975  // remove all the potentials that were created due to projections of
976  // barren nodes and that are not part of the new_pot_list: these
977  // potentials were just temporary potentials
978  /*for (auto iter = barren_projected_potentials.beginSafe();
979  iter != barren_projected_potentials.endSafe();
980  ++iter) {
981  if (!new_pot_list.exists(*iter)) delete *iter;
982  }*/
983 
984  // remove all the potentials that have no dimension
986  if ((*iter_pot)->variablesSequence().size() == 0) {
987  // as we have already marginalized out variables that received evidence,
988  // it may be the case that, after combining and projecting, some
989  // potentials might be empty. In this case, we shall keep their
990  // constant and remove them from memory
991  // # TODO: keep the constants!
992  delete *iter_pot;
994  }
995  }
996 
997  return new_pot_list;
998  }
999 
1000 
1001  // creates the message sent by clique from_id to clique to_id
1002  template < typename GUM_SCALAR >
1004  // get the potentials of the clique.
1007 
1008  // add the messages sent by adjacent nodes to from_id
1009  for (const auto other_id: _propagator_->neighbours(from_id))
1011 
1012  // get the set of variables that need be removed from the potentials
1017  const auto& mn = this->MN();
1018 
1019  for (const auto node: from_clique) {
1020  if (!separator.contains(node)) {
1022  } else {
1024  }
1025  }
1026 
1027  // pot_list now contains all the potentials to multiply and marginalize
1028  // => combine the messages
1030 
1031  /*
1032  for the moment, remove this test: due to some optimizations, some
1033  potentials might have all their cells greater than 1.
1034 
1035  // remove all the potentials that are equal to ones (as probability
1036  // matrix multiplications are tensorial, such potentials are useless)
1037  for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe();
1038  ++iter) {
1039  const auto pot = *iter;
1040  if (pot->variablesSequence().size() == 1) {
1041  bool is_all_ones = true;
1042  for (Instantiation inst(*pot); !inst.end(); ++inst) {
1043  if ((*pot)[inst] < _one_minus_epsilon_) {
1044  is_all_ones = false;
1045  break;
1046  }
1047  }
1048  if (is_all_ones) {
1049  if (!pot_list.exists(pot)) delete pot;
1050  new_pot_list.erase(iter);
1051  continue;
1052  }
1053  }
1054  }
1055  */
1056 
1057  // if there are still potentials in new_pot_list, combine them to
1058  // produce the message on the separator
1059  const Arc arc(from_id, to_id);
1060  if (!new_pot_list.empty()) {
1061  if (new_pot_list.size() == 1) { // there is only one potential
1062  // in new_pot_list, so this corresponds to our message on
1063  // the separator
1064  auto pot = *(new_pot_list.begin());
1066  if (!pot_list.exists(pot)) {
1069  }
1070  } else {
1071  // create the message in the separator
1077 
1078  // remove the temporary messages created in new_pot_list
1079  for (const auto pot: new_pot_list) {
1080  if (!pot_list.exists(pot)) { delete pot; }
1081  }
1082  }
1083  }
1084 
1085  _messages_computed_[arc] = true;
1086  }
1087 
1088 
1089  // performs a whole inference
1090  template < typename GUM_SCALAR >
1092  // collect messages for all single targets
1093  for (const auto node: this->targets()) {
1094  // perform only collects in the join tree for nodes that have
1095  // not received hard evidence (those that received hard evidence were
1096  // not included into the join tree for speed-up reasons)
1097  if (_reduced_graph_.exists(node)) {
1099  }
1100  }
1101 
1102  // collect messages for all set targets
1103  // by parsing _joint_target_to_clique_, we ensure that the cliques that
1104  // are referenced belong to the join tree (even if some of the nodes in
1105  // their associated joint_target do not belong to _reduced_graph_)
1106  for (const auto set: _joint_target_to_clique_)
1108  }
1109 
1110 
1111  /// returns a fresh potential equal to P(1st arg,evidence)
1112  template < typename GUM_SCALAR >
1113  Potential< GUM_SCALAR >*
1115  const auto& mn = this->MN();
1116 
1117  // hard evidence do not belong to the join tree
1118  // # TODO: check for sets of inconsistent hard evidence
1119  if (this->hardEvidenceNodes().contains(id)) {
1120  return new Potential< GUM_SCALAR >(*(this->evidence()[id]));
1121  }
1122 
1123  // if we still need to perform some inference task, do it (this should
1124  // already have been done by makeInference_)
1127 
1128  // now we just need to create the product of the potentials of the clique
1129  // containing id with the messages received by this clique and
1130  // marginalize out all variables except id
1134 
1135  // add the messages sent by adjacent nodes to targetCliquxse
1136  for (const auto other: _propagator_->neighbours(clique_of_id))
1138 
1139  // get the set of variables that need be removed from the potentials
1141  Set< const DiscreteVariable* > kept_vars{&(mn.variable(id))};
1142  Set< const DiscreteVariable* > del_vars(nodes.size());
1143  for (const auto node: nodes) {
1144  if (node != id) del_vars.insert(&(mn.variable(node)));
1145  }
1146 
1147  // pot_list now contains all the potentials to multiply and marginalize
1148  // => combine the messages
1150  Potential< GUM_SCALAR >* joint = nullptr;
1151 
1152  if (new_pot_list.size() == 1) {
1153  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1154  // if pot already existed, create a copy, so that we can put it into
1155  // the _target_posteriors_ property
1156  if (pot_list.exists(joint)) {
1157  joint = new Potential< GUM_SCALAR >(*joint);
1158  } else {
1159  // remove the joint from new_pot_list so that it will not be
1160  // removed just after the else block
1161  new_pot_list.clear();
1162  }
1163  } else {
1166  }
1167 
1168  // remove the potentials that were created in new_pot_list
1169  for (auto pot: new_pot_list)
1170  if (!pot_list.exists(pot)) delete pot;
1171 
1172  // check that the joint posterior is different from a 0 vector: this would
1173  // indicate that some hard evidence are not compatible (their joint
1174  // probability is equal to 0)
1175  bool nonzero_found = false;
1176  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1177  if ((*joint)[inst]) {
1178  nonzero_found = true;
1179  break;
1180  }
1181  }
1182  if (!nonzero_found) {
1183  // remove joint from memory to avoid memory leaks
1184  delete joint;
1186  "some evidence entered into the Markov "
1187  "net are incompatible (their joint proba = 0)");
1188  }
1189  return joint;
1190  }
1191 
1192 
1193  /// returns the posterior of a given variable
1194  template < typename GUM_SCALAR >
1196  // check if we have already computed the posterior
1197  if (_target_posteriors_.exists(id)) { return *(_target_posteriors_[id]); }
1198 
1199  // compute the joint posterior and normalize
1201  joint->normalize();
1203 
1204  return *joint;
1205  }
1206 
1207 
1208  // returns the marginal a posteriori proba of a given node
1209  template < typename GUM_SCALAR >
1210  Potential< GUM_SCALAR >*
1212  // hard evidence do not belong to the join tree, so extract the nodes
1213  // from targets that are not hard evidence
1215  for (const auto node: this->hardEvidenceNodes()) {
1216  if (targets.contains(node)) {
1217  targets.erase(node);
1219  }
1220  }
1221 
1222  // if all the nodes have received hard evidence, then compute the
1223  // joint posterior directly by multiplying the hard evidence potentials
1224  const auto& evidence = this->evidence();
1225  if (targets.empty()) {
1227  for (const auto node: set) {
1229  }
1230  if (pot_list.size() == 1) {
1231  auto pot = new Potential< GUM_SCALAR >(**(pot_list.begin()));
1232  return pot;
1233  } else {
1236  }
1237  }
1238 
1239 
1240  // if we still need to perform some inference task, do it: so, first,
1241  // determine the clique on which we should perform collect to compute
1242  // the unnormalized joint posterior of a set of nodes containing "set"
1244  try {
1246  } catch (NotFound&) {
1247  // here, the precise set of targets does not belong to the set of targets
1248  // defined by the user. So we will try to find a clique in the junction
1249  // tree that contains "targets":
1250 
1251  // 1/ we should check that all the nodes belong to the join tree
1252  for (const auto node: targets) {
1253  if (!_reduced_graph_.exists(node)) {
1254  GUM_ERROR(UndefinedElement, node << " is not a target node")
1255  }
1256  }
1257 
1258  // 2/ the clique created by the first eliminated node among target is the
1259  // one we are looking for
1262  for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
1263  elim_order.insert(JT_elim_order[i], (int)i);
1266  for (const auto node: targets) {
1267  if (elim_order[node] < elim_number) {
1270  }
1271  }
1273 
1274  // 3/ check that cliquee_of_set contains the all the nodes in the target
1276  for (const auto node: targets) {
1277  if (!clique_nodes.contains(node)) {
1278  GUM_ERROR(UndefinedElement, set << " is not a joint target")
1279  }
1280  }
1281 
1282  // add the discovered clique to _joint_target_to_clique_
1284  }
1285 
1286  // now perform a collect on the clique
1288 
1289  // now we just need to create the product of the potentials of the clique
1290  // containing set with the messages received by this clique and
1291  // marginalize out all variables except set
1295 
1296  // add the messages sent by adjacent nodes to targetClique
1297  for (const auto other: _propagator_->neighbours(clique_of_set))
1299 
1300  // get the set of variables that need be removed from the potentials
1302  Set< const DiscreteVariable* > del_vars(nodes.size());
1303  Set< const DiscreteVariable* > kept_vars(targets.size());
1304  const auto& mn = this->MN();
1305  for (const auto node: nodes) {
1306  if (!targets.contains(node)) {
1308  } else {
1310  }
1311  }
1312 
1313  // pot_list now contains all the potentials to multiply and marginalize
1314  // => combine the messages
1316  Potential< GUM_SCALAR >* joint = nullptr;
1317 
1318  if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
1319  joint = const_cast< Potential< GUM_SCALAR >* >(*(new_pot_list.begin()));
1320  // if pot already existed, create a copy, so that we can put it into
1321  // the _target_posteriors_ property
1322  if (pot_list.exists(joint)) {
1323  joint = new Potential< GUM_SCALAR >(*joint);
1324  } else {
1325  // remove the joint from new_pot_list so that it will not be
1326  // removed just after the next else block
1327  new_pot_list.clear();
1328  }
1329  } else {
1330  // combine all the potentials in new_pot_list with all the hard evidence
1331  // of the nodes in set
1333  for (const auto node: hard_ev_nodes) {
1335  }
1338  }
1339 
1340  // remove the potentials that were created in new_pot_list
1341  for (auto pot: new_pot_list)
1342  if (!pot_list.exists(pot)) delete pot;
1343 
1344  // check that the joint posterior is different from a 0 vector: this would
1345  // indicate that some hard evidence are not compatible
1346  bool nonzero_found = false;
1347  for (Instantiation inst(*joint); !inst.end(); ++inst) {
1348  if ((*joint)[inst]) {
1349  nonzero_found = true;
1350  break;
1351  }
1352  }
1353  if (!nonzero_found) {
1354  // remove joint from memory to avoid memory leaks
1355  delete joint;
1357  "some evidence entered into the Markov "
1358  "net are incompatible (their joint proba = 0)");
1359  }
1360 
1361  return joint;
1362  }
1363 
1364 
1365  /// returns the posterior of a given set of variables
1366  template < typename GUM_SCALAR >
1367  const Potential< GUM_SCALAR >&
1369  // check if we have already computed the posterior
1371 
1372  // compute the joint posterior and normalize
1374  joint->normalize();
1376 
1377  return *joint;
1378  }
1379 
1380 
1381  /// returns the posterior of a given set of variables
1382  template < typename GUM_SCALAR >
1383  const Potential< GUM_SCALAR >&
1385  const NodeSet& declared_target) {
1386  // check if we have already computed the posterior of wanted_target
1389 
1390  // here, we will have to compute the posterior of declared_target and
1391  // marginalize out all the variables that do not belong to wanted_target
1392 
1393  // check if we have already computed the posterior of declared_target
1395 
1396  // marginalize out all the variables that do not belong to wanted_target
1397  const auto& mn = this->MN();
1398  Set< const DiscreteVariable* > del_vars;
1399  for (const auto node: declared_target)
1401  auto pot = new Potential< GUM_SCALAR >(
1403 
1404  // save the result into the cache
1406 
1407  return *pot;
1408  }
1409 
1410 
1411  template < typename GUM_SCALAR >
1413  // perform inference in each connected component
1414  this->makeInference();
1415 
1416  // for each connected component, select a variable X and compute the
1417  // joint probability of X and evidence e. Then marginalize-out X to get
1418  // p(e) in this connected component. Finally, multiply all the p(e) that
1419  // we got and the elements in _constants_. The result is the probability
1420  // of evidence
1421 
1422  GUM_SCALAR prob_ev = 1;
1423  for (const auto root: _roots_) {
1424  // get a node in the clique
1427  GUM_SCALAR sum = 0;
1428  for (Instantiation iter(*tmp); !iter.end(); ++iter)
1429  sum += tmp->get(iter);
1430  prob_ev *= sum;
1431  delete tmp;
1432  }
1433 
1434  // for (const auto& projected_cpt: _constants_)
1435  // prob_ev *= projected_cpt.second;
1436 
1437  return prob_ev;
1438  }
1439 
1440  template < typename GUM_SCALAR >
1443 
1444  this->prepareInference();
1445 
1446  for (const auto& cliknode: this->_propagator_->nodes()) {
1447  const auto clique = _propagator_->clique(cliknode);
1448  if (vars == clique) return true;
1449  }
1450  return false;
1451  }
1452 
1453  template < typename GUM_SCALAR >
1456  if (!superset.empty()) return superset;
1457 
1458  this->prepareInference();
1459 
1460  for (const auto& cliknode: _propagator_->nodes()) {
1461  const auto clique = _propagator_->clique(cliknode);
1462  if (vars.isProperSubsetOf(clique)) return clique;
1463  }
1464 
1465 
1466  return NodeSet();
1467  }
1468 
1469 } /* namespace gum */
1470 
1471 #endif // DOXYGEN_SHOULD_SKIP_THIS
INLINE void emplace(Args &&... args)
Definition: set_tpl.h:643