aGrUM  0.20.2
a C++ library for (probabilistic) graphical models
loopyBeliefPropagation_tpl.h
Go to the documentation of this file.
1 /**
2  *
3  * Copyright 2005-2020 Pierre-Henri WUILLEMIN(@LIP6) & Christophe GONZALES(@AMU)
4  * info_at_agrum_dot_org
5  *
6  * This library is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Lesser General Public License as published by
8  * the Free Software Foundation, either version 3 of the License, or
9  * (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public License
17  * along with this library. If not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 
22 /**
23  * @file
24  * @brief Implementation of Loopy Belief Propagation in Bayesian networks.
25  */
26 #ifndef DOXYGEN_SHOULD_SKIP_THIS
27 
28 # include <algorithm>
29 # include <sstream>
30 # include <string>
31 
32 # define LBP_DEFAULT_MAXITER 100
33 # define LBP_DEFAULT_EPSILON 1e-8
34 # define LBP_DEFAULT_MIN_EPSILON_RATE 1e-10
35 # define LBP_DEFAULT_PERIOD_SIZE 1
36 # define LBP_DEFAULT_VERBOSITY false
37 
38 
39 // to ease parsing for IDE
40 # include <agrum/BN/inference/loopyBeliefPropagation.h>
41 
42 
43 namespace gum {
44 
45  /// default constructor
46  template < typename GUM_SCALAR >
47  LoopyBeliefPropagation< GUM_SCALAR >::LoopyBeliefPropagation(
48  const IBayesNet< GUM_SCALAR >* bn) :
49  ApproximateInference< GUM_SCALAR >(bn) {
50  // for debugging purposes
51  GUM_CONSTRUCTOR(LoopyBeliefPropagation);
52 
53  this->setEpsilon(LBP_DEFAULT_EPSILON);
54  this->setMinEpsilonRate(LBP_DEFAULT_MIN_EPSILON_RATE);
55  this->setMaxIter(LBP_DEFAULT_MAXITER);
56  this->setVerbosity(LBP_DEFAULT_VERBOSITY);
57  this->setPeriodSize(LBP_DEFAULT_PERIOD_SIZE);
58 
59  init_messages__();
60  }
61 
62  /// destructor
63  template < typename GUM_SCALAR >
64  INLINE LoopyBeliefPropagation< GUM_SCALAR >::~LoopyBeliefPropagation() {
65  GUM_DESTRUCTOR(LoopyBeliefPropagation);
66  }
67 
68 
69  template < typename GUM_SCALAR >
70  void LoopyBeliefPropagation< GUM_SCALAR >::init_messages__() {
71  messages__.clear();
72  for (const auto& tail: this->BN().nodes()) {
73  Potential< GUM_SCALAR > p;
74  p.add(this->BN().variable(tail));
75  p.fill(static_cast< GUM_SCALAR >(1));
76 
77  for (const auto& head: this->BN().children(tail)) {
78  messages__.insert(Arc(head, tail), p);
79  messages__.insert(Arc(tail, head), p);
80  }
81  }
82  }
83 
84  template < typename GUM_SCALAR >
85  void LoopyBeliefPropagation< GUM_SCALAR >::updateOutdatedStructure_() {
86  init_messages__();
87  }
88 
89 
90  template < typename GUM_SCALAR >
91  Potential< GUM_SCALAR >
92  LoopyBeliefPropagation< GUM_SCALAR >::computeProdPi__(NodeId X) {
93  const auto& varX = this->BN().variable(X);
94 
95  auto piX = this->BN().cpt(X);
96  for (const auto& U: this->BN().parents(X)) {
97  piX *= messages__[Arc(U, X)];
98  }
99  piX = piX.margSumIn({&varX});
100 
101  return piX;
102  }
103 
104  template < typename GUM_SCALAR >
105  Potential< GUM_SCALAR >
106  LoopyBeliefPropagation< GUM_SCALAR >::computeProdPi__(NodeId X,
107  NodeId except) {
108  const auto& varX = this->BN().variable(X);
109  const auto& varExcept = this->BN().variable(except);
110  auto piXexcept = this->BN().cpt(X);
111  for (const auto& U: this->BN().parents(X)) {
112  if (U != except) { piXexcept *= messages__[Arc(U, X)]; }
113  }
114  piXexcept = piXexcept.margSumIn({&varX, &varExcept});
115  return piXexcept;
116  }
117 
118 
119  template < typename GUM_SCALAR >
120  Potential< GUM_SCALAR >
121  LoopyBeliefPropagation< GUM_SCALAR >::computeProdLambda__(NodeId X) {
122  Potential< GUM_SCALAR > lamX;
123  if (this->hasEvidence(X)) {
124  lamX = *(this->evidence()[X]);
125  } else {
126  lamX.add(this->BN().variable(X));
127  lamX.fill(1);
128  }
129  for (const auto& Y: this->BN().children(X)) {
130  lamX *= messages__[Arc(Y, X)];
131  }
132 
133  return lamX;
134  }
135 
136  template < typename GUM_SCALAR >
137  Potential< GUM_SCALAR >
138  LoopyBeliefPropagation< GUM_SCALAR >::computeProdLambda__(NodeId X,
139  NodeId except) {
140  Potential< GUM_SCALAR > lamXexcept;
141  if (this->hasEvidence(X)) { //
142  lamXexcept = *this->evidence()[X];
143  } else {
144  lamXexcept.add(this->BN().variable(X));
145  lamXexcept.fill(1);
146  }
147  for (const auto& Y: this->BN().children(X)) {
148  if (Y != except) { lamXexcept *= messages__[Arc(Y, X)]; }
149  }
150 
151  return lamXexcept;
152  }
153 
154 
155  template < typename GUM_SCALAR >
156  GUM_SCALAR LoopyBeliefPropagation< GUM_SCALAR >::updateNodeMessage__(NodeId X) {
157  auto piX = computeProdPi__(X);
158  auto lamX = computeProdLambda__(X);
159 
160  GUM_SCALAR KL = 0;
161  Arc argKL(0, 0);
162 
163  // update lambda_par (for arc U->x)
164  for (const auto& U: this->BN().parents(X)) {
165  auto newLambda
166  = (computeProdPi__(X, U) * lamX).margSumIn({&this->BN().variable(U)});
167  newLambda.normalize();
168  auto ekl = static_cast< GUM_SCALAR >(0);
169  try {
170  ekl = messages__[Arc(X, U)].KL(newLambda);
171  } catch (InvalidArgument&) {
172  GUM_ERROR(InvalidArgument, "Not compatible pi during computation");
173  } catch (FatalError&) { // 0 misplaced
174  ekl = std::numeric_limits< GUM_SCALAR >::infinity();
175  }
176  if (ekl > KL) {
177  KL = ekl;
178  argKL = Arc(X, U);
179  }
180  messages__.set(Arc(X, U), newLambda);
181  }
182 
183  // update pi_child (for arc x->child)
184  for (const auto& Y: this->BN().children(X)) {
185  auto newPi = (piX * computeProdLambda__(X, Y));
186  newPi.normalize();
187  GUM_SCALAR ekl = KL;
188  try {
189  ekl = messages__[Arc(X, Y)].KL(newPi);
190  } catch (InvalidArgument&) {
191  GUM_ERROR(InvalidArgument, "Not compatible pi during computation");
192  } catch (FatalError&) { // 0 misplaced
193  ekl = std::numeric_limits< GUM_SCALAR >::infinity();
194  }
195  if (ekl > KL) {
196  KL = ekl;
197  argKL = Arc(X, Y);
198  }
199  messages__.set(Arc(X, Y), newPi);
200  }
201 
202  return KL;
203  }
204 
205  template < typename GUM_SCALAR >
206  INLINE void LoopyBeliefPropagation< GUM_SCALAR >::initStats__() {
207  init_messages__();
208  for (const auto& node: this->BN().topologicalOrder()) {
209  updateNodeMessage__(node);
210  }
211  }
212 
213 
214  /// Returns the probability of the variables.
215  template < typename GUM_SCALAR >
216  void LoopyBeliefPropagation< GUM_SCALAR >::makeInference_() {
217  initStats__();
218  this->initApproximationScheme();
219 
220  std::vector< NodeId > shuffleIds;
221  for (const auto& node: this->BN().nodes())
222  shuffleIds.push_back(node);
223 
224  auto engine = std::default_random_engine{};
225 
226  GUM_SCALAR error = 0.0;
227  do {
228  std::shuffle(std::begin(shuffleIds), std::end(shuffleIds), engine);
229  this->updateApproximationScheme();
230  for (const auto& node: shuffleIds) {
231  GUM_SCALAR e = updateNodeMessage__(node);
232  if (e > error) error = e;
233  }
234  } while (this->continueApproximationScheme(error));
235  }
236 
237 
238  /// Returns the probability of the variable.
239  template < typename GUM_SCALAR >
240  INLINE const Potential< GUM_SCALAR >&
241  LoopyBeliefPropagation< GUM_SCALAR >::posterior_(NodeId id) {
242  auto p = computeProdPi__(id) * computeProdLambda__(id);
243  p.normalize();
244  posteriors__.set(id, p);
245 
246  return posteriors__[id];
247  }
248 } /* namespace gum */
249 
250 #endif // DOXYGEN_SHOULD_SKIP_THIS