aGrUM
0.20.3
a C++ library for (probabilistic) graphical models
ShaferShenoyMNInference_tpl.h
Go to the documentation of this file.
1
/**
2
*
3
* Copyright (c) 2005-2021 by Pierre-Henri WUILLEMIN(@LIP6) et Christophe GONZALES(@AMU)
4
* (@AMU) info_at_agrum_dot_org
5
*
6
* This library is free software: you can redistribute it and/or modify
7
* it under the terms of the GNU Lesser General Public License as published by
8
* the Free Software Foundation, either version 3 of the License, or
9
* (at your option) any later version.
10
*
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU Lesser General Public License for more details.
15
*
16
* You should have received a copy of the GNU Lesser General Public License
17
* along with this library. If not, see <http://www.gnu.org/licenses/>.
18
*
19
*/
20
21
22
/**
23
* @file
24
* @brief Implementation of Shafer-Shenoy's propagation for inference in
25
* Markov Networks.
26
*/
27
28
#
ifndef
DOXYGEN_SHOULD_SKIP_THIS
29
#
include
<
agrum
/
MN
/
inference
/
ShaferShenoyMNInference
.
h
>
30
31
#
include
<
agrum
/
tools
/
graphs
/
algorithms
/
binaryJoinTreeConverterDefault
.
h
>
32
#
include
<
agrum
/
tools
/
multidim
/
instantiation
.
h
>
33
#
include
<
agrum
/
tools
/
multidim
/
utils
/
operators
/
multiDimCombineAndProjectDefault
.
h
>
34
#
include
<
agrum
/
tools
/
multidim
/
utils
/
operators
/
multiDimProjection
.
h
>
35
36
37
namespace
gum
{
38
// default constructor
39
template
<
typename
GUM_SCALAR >
40
INLINE ShaferShenoyMNInference<
GUM_SCALAR
>::
ShaferShenoyMNInference
(
41
const
IMarkovNet
<
GUM_SCALAR
>*
MN
,
42
bool
use_binary_join_tree
) :
43
JointTargetedMNInference
<
GUM_SCALAR
>(
MN
),
44
EvidenceMNInference
<
GUM_SCALAR
>(
MN
),
_use_binary_join_tree_
(
use_binary_join_tree
) {
45
// create a default triangulation (the user can change it afterwards)
46
_triangulation_
=
new
DefaultTriangulation
;
47
48
// for debugging purposes
49
GUM_CONSTRUCTOR
(
ShaferShenoyMNInference
);
50
}
51
52
53
// destructor
54
template
<
typename
GUM_SCALAR
>
55
INLINE
ShaferShenoyMNInference
<
GUM_SCALAR
>::~
ShaferShenoyMNInference
() {
56
// remove all the potentials created during the last message passing
57
for
(
const
auto
&
pots
:
_created_messages_
) {
58
for
(
const
auto
pot
:
pots
.
second
)
59
delete
pot
;
60
}
61
62
// remove the potentials created after removing the nodes that received
63
// hard evidence
64
for
(
const
auto
&
pot
:
_hard_ev_projected_factors_
) {
65
delete
pot
.
second
;
66
}
67
68
// remove all the potentials in _clique_potentials_ that do not belong
69
// to _clique_potentials_list_ : in this case, those potentials have been
70
// created by combination of the corresponding list of potentials in
71
// _clique_potentials_list_. In other words, the size of this list is strictly
72
// greater than 1.
73
for
(
auto
pot
:
_clique_potentials_
) {
74
if
(
_clique_potentials_list_
[
pot
.
first
].
size
() > 1) {
delete
pot
.
second
; }
75
}
76
77
// remove all the posteriors computed
78
for
(
const
auto
&
pot
:
_target_posteriors_
)
79
delete
pot
.
second
;
80
for
(
const
auto
&
pot
:
_joint_target_posteriors_
)
81
delete
pot
.
second
;
82
83
// remove the junction tree and the triangulation algorithm
84
if
(
_propagator_
!=
nullptr
)
delete
_propagator_
;
85
if
(
_junctionTree_
!=
nullptr
)
delete
_junctionTree_
;
86
delete
_triangulation_
;
87
88
// for debugging purposes
89
GUM_DESTRUCTOR
(
ShaferShenoyMNInference
);
90
}
91
92
93
/// set a new triangulation algorithm
94
template
<
typename
GUM_SCALAR
>
95
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
setTriangulation
(
96
const
Triangulation
&
new_triangulation
) {
97
delete
_triangulation_
;
98
_triangulation_
=
new_triangulation
.
newFactory
();
99
_is_new_jt_needed_
=
true
;
100
this
->
setOutdatedStructureState_
();
101
}
102
103
104
/// returns the current join tree used
105
template
<
typename
GUM_SCALAR
>
106
INLINE
const
JoinTree
*
ShaferShenoyMNInference
<
GUM_SCALAR
>::
joinTree
() {
107
_createNewJT_
();
108
109
return
_propagator_
;
110
}
111
112
/// returns the current junction tree
113
template
<
typename
GUM_SCALAR
>
114
INLINE
const
JunctionTree
*
ShaferShenoyMNInference
<
GUM_SCALAR
>::
junctionTree
() {
115
_createNewJT_
();
116
117
return
_junctionTree_
;
118
}
119
120
121
/// sets the operator for performing the projections
122
template
<
typename
GUM_SCALAR
>
123
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_setProjectionFunction_
(
124
Potential
<
GUM_SCALAR
>* (*
proj
)(
const
Potential
<
GUM_SCALAR
>&,
125
const
Set
<
const
DiscreteVariable
* >&)) {
126
_projection_op_
=
proj
;
127
}
128
129
130
/// sets the operator for performing the combinations
131
template
<
typename
GUM_SCALAR
>
132
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_setCombinationFunction_
(
133
Potential
<
GUM_SCALAR
>* (*
comb
)(
const
Potential
<
GUM_SCALAR
>&,
134
const
Potential
<
GUM_SCALAR
>&)) {
135
_combination_op_
=
comb
;
136
}
137
138
139
/// invalidate all messages, posteriors and created potentials
140
template
<
typename
GUM_SCALAR
>
141
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_invalidateAllMessages_
() {
142
// remove all the messages computed
143
for
(
auto
&
potset
:
_separator_potentials_
)
144
potset
.
second
.
clear
();
145
for
(
auto
&
mess_computed
:
_messages_computed_
)
146
mess_computed
.
second
=
false
;
147
148
// remove all the created potentials
149
for
(
const
auto
&
potset
:
_created_messages_
)
150
for
(
const
auto
pot
:
potset
.
second
)
151
delete
pot
;
152
153
// remove all the posteriors
154
for
(
const
auto
&
pot
:
_target_posteriors_
)
155
delete
pot
.
second
;
156
for
(
const
auto
&
pot
:
_joint_target_posteriors_
)
157
delete
pot
.
second
;
158
159
// indicate that new messages need be computed
160
if
(
this
->
isInferenceReady
() ||
this
->
isInferenceDone
())
this
->
setOutdatedPotentialsState_
();
161
}
162
163
/// fired when a new evidence is inserted
164
template
<
typename
GUM_SCALAR
>
165
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onEvidenceAdded_
(
const
NodeId
id
,
166
bool
isHardEvidence
) {
167
// if we have a new hard evidence, this modifies the undigraph over which
168
// the join tree is created. This is also the case if id is not a node of
169
// of the undigraph
170
if
(
isHardEvidence
|| !
_reduced_graph_
.
exists
(
id
))
171
_is_new_jt_needed_
=
true
;
172
else
{
173
try
{
174
_evidence_changes_
.
insert
(
id
,
EvidenceChangeType
::
EVIDENCE_ADDED
);
175
}
catch
(
DuplicateElement
&) {
176
// here, the evidence change already existed. This necessarily means
177
// that the current saved change is an EVIDENCE_ERASED. So if we
178
// erased the evidence and added some again, this corresponds to an
179
// EVIDENCE_MODIFIED
180
_evidence_changes_
[
id
] =
EvidenceChangeType
::
EVIDENCE_MODIFIED
;
181
}
182
}
183
}
184
185
186
/// fired when an evidence is removed
187
template
<
typename
GUM_SCALAR
>
188
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onEvidenceErased_
(
const
NodeId
id
,
189
bool
isHardEvidence
) {
190
// if we delete a hard evidence, this modifies the undigraph over which
191
// the join tree is created.
192
if
(
isHardEvidence
)
193
_is_new_jt_needed_
=
true
;
194
else
{
195
try
{
196
_evidence_changes_
.
insert
(
id
,
EvidenceChangeType
::
EVIDENCE_ERASED
);
197
}
catch
(
DuplicateElement
&) {
198
// here, the evidence change already existed and it is necessarily an
199
// EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
200
// been added and is now erased, this is similar to not having created
201
// it. If the evidence was only modified, it already existed in the
202
// last inference and we should now indicate that it has been removed.
203
if
(
_evidence_changes_
[
id
] ==
EvidenceChangeType
::
EVIDENCE_ADDED
)
204
_evidence_changes_
.
erase
(
id
);
205
else
206
_evidence_changes_
[
id
] =
EvidenceChangeType
::
EVIDENCE_ERASED
;
207
}
208
}
209
}
210
211
212
/// fired when all the evidence are erased
213
template
<
typename
GUM_SCALAR
>
214
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onAllEvidenceErased_
(
bool
has_hard_evidence
) {
215
if
(
has_hard_evidence
|| !
this
->
hardEvidenceNodes
().
empty
())
216
_is_new_jt_needed_
=
true
;
217
else
{
218
for
(
const
auto
node
:
this
->
softEvidenceNodes
()) {
219
try
{
220
_evidence_changes_
.
insert
(
node
,
EvidenceChangeType
::
EVIDENCE_ERASED
);
221
}
catch
(
DuplicateElement
&) {
222
// here, the evidence change already existed and it is necessarily an
223
// EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
224
// been added and is now erased, this is similar to not having created
225
// it. If the evidence was only modified, it already existed in the
226
// last inference and we should now indicate that it has been removed.
227
if
(
_evidence_changes_
[
node
] ==
EvidenceChangeType
::
EVIDENCE_ADDED
)
228
_evidence_changes_
.
erase
(
node
);
229
else
230
_evidence_changes_
[
node
] =
EvidenceChangeType
::
EVIDENCE_ERASED
;
231
}
232
}
233
}
234
}
235
236
237
/// fired when an evidence is changed
238
template
<
typename
GUM_SCALAR
>
239
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onEvidenceChanged_
(
const
NodeId
id
,
240
bool
hasChangedSoftHard
) {
241
if
(
hasChangedSoftHard
)
242
_is_new_jt_needed_
=
true
;
243
else
{
244
try
{
245
_evidence_changes_
.
insert
(
id
,
EvidenceChangeType
::
EVIDENCE_MODIFIED
);
246
}
catch
(
DuplicateElement
&) {
247
// here, the evidence change already existed and it is necessarily an
248
// EVIDENCE_ADDED. So we should keep this state to indicate that this
249
// evidence is new w.r.t. the last inference
250
}
251
}
252
}
253
254
255
/// fired after a new target is inserted
256
template
<
typename
GUM_SCALAR
>
257
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onMarginalTargetAdded_
(
const
NodeId
id
) {}
258
259
260
/// fired before a target is removed
261
template
<
typename
GUM_SCALAR
>
262
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onMarginalTargetErased_
(
const
NodeId
id
) {}
263
264
265
/// fired after a new set target is inserted
266
template
<
typename
GUM_SCALAR
>
267
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onJointTargetAdded_
(
const
NodeSet
&
set
) {}
268
269
270
/// fired before a set target is removed
271
template
<
typename
GUM_SCALAR
>
272
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onJointTargetErased_
(
const
NodeSet
&
set
) {}
273
274
275
/// fired after all the nodes of the MN are added as single targets
276
template
<
typename
GUM_SCALAR
>
277
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onAllMarginalTargetsAdded_
() {}
278
279
280
/// fired before a all the single_targets are removed
281
template
<
typename
GUM_SCALAR
>
282
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onAllMarginalTargetsErased_
() {}
283
284
/// fired after a new Markov net has been assigned to the engine
285
template
<
typename
GUM_SCALAR
>
286
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onMarkovNetChanged_
(
287
const
IMarkovNet
<
GUM_SCALAR
>*
mn
) {}
288
289
/// fired before a all the joint_targets are removed
290
template
<
typename
GUM_SCALAR
>
291
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onAllJointTargetsErased_
() {}
292
293
294
/// fired before a all the single and joint_targets are removed
295
template
<
typename
GUM_SCALAR
>
296
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
onAllTargetsErased_
() {}
297
298
299
// check whether a new junction tree is really needed for the next inference
300
template
<
typename
GUM_SCALAR
>
301
bool
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_isNewJTNeeded_
()
const
{
302
// if we do not have a JT or if _new_jt_needed_ is set to true, then
303
// we know that we need to create a new join tree
304
if
((
_propagator_
==
nullptr
) ||
_is_new_jt_needed_
)
return
true
;
305
306
// if some some targets do not belong to the join tree and, consequently,
307
// to the undigraph that was used to construct the join tree, then we need
308
// to create a new JT. This situation may occur if we constructed the
309
// join tree after pruning irrelevant/barren nodes from the MN)
310
// however, note that the nodes that received hard evidence do not belong to
311
// the graph and, therefore, should not be taken into account
312
const
auto
&
hard_ev_nodes
=
this
->
hardEvidenceNodes
();
313
for
(
const
auto
node
:
this
->
targets
()) {
314
if
(!
_reduced_graph_
.
exists
(
node
) && !
hard_ev_nodes
.
exists
(
node
))
return
true
;
315
}
316
for
(
const
auto
&
joint_target
:
this
->
jointTargets
()) {
317
// here, we need to check that at least one clique contains all the
318
// nodes of the joint target.
319
bool
containing_clique_found
=
false
;
320
for
(
const
auto
node
:
joint_target
) {
321
bool
found
=
true
;
322
try
{
323
const
NodeSet
&
clique
=
_propagator_
->
clique
(
_node_to_clique_
[
node
]);
324
for
(
const
auto
xnode
:
joint_target
) {
325
if
(!
clique
.
contains
(
xnode
) && !
hard_ev_nodes
.
exists
(
xnode
)) {
326
found
=
false
;
327
break
;
328
}
329
}
330
}
catch
(
NotFound
&) {
found
=
false
; }
331
332
if
(
found
) {
333
containing_clique_found
=
true
;
334
break
;
335
}
336
}
337
338
if
(!
containing_clique_found
)
return
true
;
339
}
340
341
// if some new evidence have been added on nodes that do not belong
342
// to _reduced_graph_, then we potentially have to reconstruct the join tree
343
for
(
const
auto
&
change
:
_evidence_changes_
) {
344
if
((
change
.
second
==
EvidenceChangeType
::
EVIDENCE_ADDED
)
345
&& !
_reduced_graph_
.
exists
(
change
.
first
))
346
return
true
;
347
}
348
349
// here, the current JT is exactly what we need for the next inference
350
return
false
;
351
}
352
353
354
/// create a new junction tree as well as its related data structures
355
template
<
typename
GUM_SCALAR
>
356
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_createNewJT_
() {
357
// to create the JT, we first create the moral graph of the MN in the
358
// following way in order to take into account the barren nodes and the
359
// nodes that received evidence:
360
// 1/ we create an undirected graph containing only the nodes and no edge
361
// 2/ add edges so that set targets are cliques of the moral graph
362
// 3/ remove the nodes that received hard evidence (by step 3/, their
363
// parents are linked by edges, which is necessary for inference)
364
//
365
// At the end of step 3/, we have our moral graph and we can triangulate it
366
// to get the new junction tree
367
368
// 1/ create an undirected graph containing only the nodes and no edge
369
const
auto
&
mn
=
this
->
MN
();
370
_reduced_graph_
=
mn
.
graph
();
371
372
// 2/ if there exists some joint targets, we shall add new edges into the
373
// moral graph in order to ensure that there exists a clique containing
374
// each joint
375
for
(
const
auto
&
nodeset
:
this
->
jointTargets
())
376
for
(
const
auto
n1
:
nodeset
)
377
for
(
const
auto
n2
:
nodeset
)
378
if
(
n1
<
n2
)
_reduced_graph_
.
addEdge
(
n1
,
n2
);
379
380
// 3/ remove all the nodes that received hard evidence
381
_hard_ev_nodes_
=
this
->
hardEvidenceNodes
();
382
for
(
const
auto
node
:
_hard_ev_nodes_
) {
383
_reduced_graph_
.
eraseNode
(
node
);
384
}
385
386
// now, we can compute the new junction tree. To speed-up computations
387
// (essentially, those of a distribution phase), we construct from this
388
// junction tree a binary join tree
389
if
(
_propagator_
!=
nullptr
)
delete
_propagator_
;
390
if
(
_junctionTree_
!=
nullptr
)
delete
_junctionTree_
;
391
392
_triangulation_
->
setGraph
(&
_reduced_graph_
, &(
this
->
domainSizes
()));
393
const
JunctionTree
&
triang_jt
=
_triangulation_
->
junctionTree
();
394
if
(
_use_binary_join_tree_
) {
395
BinaryJoinTreeConverterDefault
bjt_converter
;
396
NodeSet
emptyset
;
397
_propagator_
398
=
new
CliqueGraph
(
bjt_converter
.
convert
(
triang_jt
,
this
->
domainSizes
(),
emptyset
));
399
}
else
{
400
_propagator_
=
new
CliqueGraph
(
triang_jt
);
401
}
402
_junctionTree_
=
new
CliqueGraph
(
triang_jt
);
403
404
const
std
::
vector
<
NodeId
>&
JT_elim_order
=
_triangulation_
->
eliminationOrder
();
405
Size
size_elim_order
=
JT_elim_order
.
size
();
406
NodeProperty
<
Idx
>
elim_order
(
size_elim_order
);
407
for
(
Idx
i
=
Idx
(0);
i
<
size_elim_order
; ++
i
)
408
elim_order
.
insert
(
JT_elim_order
[
i
],
i
);
409
410
// indicate, for each factor of the markov network a clique in _propagator_
411
// that can contain its conditional probability table
412
_factor_to_clique_
.
clear
();
413
// TO REMOVE const UndiGraph& graph = mn.graph();
414
for
(
const
auto
&
kv
:
mn
.
factors
()) {
415
const
auto
&
factor
=
kv
.
first
;
// kv.second is the Potential()
416
// not a true id since smallest_elim_number==size_elim_order+100
417
NodeId
first_eliminated_node
= 0;
418
// (impossible smallest_elim_number \in [0...size_elim_order-1]
419
Idx
smallest_elim_number
=
size_elim_order
;
420
for
(
const
auto
nod
:
factor
) {
421
if
(
_reduced_graph_
.
exists
(
nod
)) {
422
if
(
elim_order
[
nod
] <
smallest_elim_number
) {
423
smallest_elim_number
=
elim_order
[
nod
];
424
first_eliminated_node
=
nod
;
425
}
426
}
427
}
428
429
if
(
smallest_elim_number
!=
size_elim_order
) {
430
// first_eliminated_node contains the first var (node or one of its
431
// parents) eliminated => the clique created during its elimination
432
// contains node and all of its parents => it can contain the potential
433
// assigned to the node in the MN
434
_factor_to_clique_
.
insert
(
435
factor
,
436
_triangulation_
->
createdJunctionTreeClique
(
first_eliminated_node
));
437
}
438
}
439
440
// attribute a clique in the joint tree MN for each node in the MN
441
// (using a NodeProperty instead of using mn.smallestFactorFromNode()
442
// could it have a better choice ?)
443
_node_to_clique_
.
clear
();
444
for
(
const
NodeId
node
:
mn
.
nodes
()) {
445
if
(
_reduced_graph_
.
exists
(
node
)) {
446
_node_to_clique_
.
insert
(
node
,
_factor_to_clique_
[
mn
.
smallestFactorFromNode
(
node
)]);
447
}
448
}
449
450
// indicate for each joint_target a clique that contains it
451
_joint_target_to_clique_
.
clear
();
452
for
(
const
auto
&
target
:
this
->
jointTargets
()) {
453
// remove from target all the nodes that received hard evidence (since they
454
// do not belong to the join tree)
455
NodeSet
nodeset
=
target
;
456
for
(
const
auto
node
:
_hard_ev_nodes_
)
457
if
(
nodeset
.
contains
(
node
))
nodeset
.
erase
(
node
);
458
459
if
(!
nodeset
.
empty
()) {
460
// not a true id since smallest_elim_number==size_elim_order
461
NodeId
first_eliminated_node
= 0;
462
// (smallest_elim_number \in [0...size_elim_order-1])
463
Idx
smallest_elim_number
=
size_elim_order
;
464
for
(
const
auto
nod
:
nodeset
) {
465
if
(
_reduced_graph_
.
exists
(
nod
)) {
466
if
(
elim_order
[
nod
] <
smallest_elim_number
) {
467
smallest_elim_number
=
elim_order
[
nod
];
468
first_eliminated_node
=
nod
;
469
}
470
}
471
}
472
473
if
(
smallest_elim_number
!=
size_elim_order
) {
474
// first_eliminated_node contains the first var (node or one of its
475
// parents) eliminated => the clique created during its elimination
476
// contains node and all of its parents => it can contain the potential
477
// assigned to the node in the MN
478
_factor_to_clique_
.
insert
(
479
nodeset
,
480
_triangulation_
->
createdJunctionTreeClique
(
first_eliminated_node
));
481
}
482
}
483
}
484
485
// compute the roots of _propagator_'s connected components
486
_computeJoinTreeRoots_
();
487
488
// remove all the Shafer-Shenoy potentials stored into the cliques
489
for
(
const
auto
&
xpot
:
_clique_potentials_
) {
490
if
(
_clique_potentials_list_
[
xpot
.
first
].
size
() > 1)
delete
xpot
.
second
;
491
}
492
_clique_potentials_list_
.
clear
();
493
494
// create empty potential lists into the cliques of the joint tree as well
495
// as empty lists of evidence
496
_PotentialSet_
empty_set
;
497
_node_to_soft_evidence_
.
clear
();
498
for
(
const
auto
clik
: *
_propagator_
) {
499
_clique_potentials_list_
.
insert
(
clik
,
empty_set
);
500
}
501
502
// remove all the potentials created during the last inference
503
for
(
const
auto
&
potlist
:
_created_messages_
)
504
for
(
auto
pot
:
potlist
.
second
)
505
delete
pot
;
506
_created_messages_
.
clear
();
507
508
// remove all the potentials created to take into account hard evidence
509
// during the last inference
510
for
(
auto
pot_pair
:
_hard_ev_projected_factors_
)
511
delete
pot_pair
.
second
;
512
_hard_ev_projected_factors_
.
clear
();
513
514
// remove all the constants created due to projections of factors that were
515
// defined over only hard evidence nodes
516
// __constants.clear();
517
518
// create empty lists of potentials for the messages and indicate that no
519
// message has been computed yet
520
_separator_potentials_
.
clear
();
521
_messages_computed_
.
clear
();
522
for
(
const
auto
&
edge
:
_propagator_
->
edges
()) {
523
const
Arc
arc1
(
edge
.
first
(),
edge
.
second
());
524
_separator_potentials_
.
insert
(
arc1
,
empty_set
);
525
_messages_computed_
.
insert
(
arc1
,
false
);
526
const
Arc
arc2
(
Arc
(
edge
.
second
(),
edge
.
first
()));
527
_separator_potentials_
.
insert
(
arc2
,
empty_set
);
528
_messages_computed_
.
insert
(
arc2
,
false
);
529
}
530
531
// remove all the posteriors computed so far
532
for
(
const
auto
&
pot
:
_target_posteriors_
)
533
delete
pot
.
second
;
534
_target_posteriors_
.
clear
();
535
for
(
const
auto
&
pot
:
_joint_target_posteriors_
)
536
delete
pot
.
second
;
537
_joint_target_posteriors_
.
clear
();
538
539
540
// put all the factors of the Markov net nodes into the cliques
541
// here, beware: all the potentials that are defined over some nodes
542
// including hard evidence must be projected so that these nodes are
543
// removed from the potential
544
const
auto
&
evidence
=
this
->
evidence
();
545
for
(
const
auto
&
kv
:
mn
.
factors
()) {
546
const
auto
&
factor
=
kv
.
first
;
547
const
auto
&
pot
= *(
kv
.
second
);
548
549
NodeSet
hard_nodes_in_factor
;
550
for
(
const
auto
xnode
:
factor
) {
551
if
(
_hard_ev_nodes_
.
contains
(
xnode
))
hard_nodes_in_factor
.
insert
(
xnode
);
552
}
553
554
// if hard_nodes_in_factor contains hard evidence nodes, perform a projection
555
// and insert the result into the appropriate clique, else insert
556
// directly factor into the clique
557
if
(
hard_nodes_in_factor
.
empty
()) {
558
_clique_potentials_list_
[
_factor_to_clique_
[
factor
]].
insert
(&
pot
);
559
}
else
{
560
// marginalize out the hard evidence nodes: if the factor is defined
561
// only over nodes that received hard evidence, do not consider it
562
// as a potential anymore but as a constant
563
if
(
hard_nodes_in_factor
.
size
() !=
factor
.
size
()) {
564
// perform the projection with a combine and project instance
565
Set
<
const
DiscreteVariable
* >
hard_variables
;
566
_PotentialSet_
marg_factor_set
;
567
marg_factor_set
.
insert
(&
pot
);
568
for
(
const
auto
xnode
:
hard_nodes_in_factor
) {
569
marg_factor_set
.
insert
(
evidence
[
xnode
]);
570
hard_variables
.
insert
(&(
mn
.
variable
(
xnode
)));
571
}
572
573
// perform the combination of those potentials and their projection
574
MultiDimCombineAndProjectDefault
<
GUM_SCALAR
,
Potential
>
combine_and_project
(
575
_combination_op_
,
576
SSNewMNprojPotential
);
577
_PotentialSet_
new_factor_list
578
=
combine_and_project
.
combineAndProject
(
marg_factor_set
,
hard_variables
);
579
580
// there should be only one potential in new_factor_list
581
if
(
new_factor_list
.
size
() != 1) {
582
for
(
auto
pot
:
new_factor_list
) {
583
if
(!
marg_factor_set
.
contains
(
pot
))
delete
pot
;
584
}
585
GUM_ERROR
(
FatalError
,
586
"the projection of a potential containing "
587
<<
"hard evidence is empty!"
);
588
}
589
const
Potential
<
GUM_SCALAR
>*
projected_factor
= *(
new_factor_list
.
begin
());
590
_clique_potentials_list_
[
_factor_to_clique_
[
factor
]].
insert
(
projected_factor
);
591
_hard_ev_projected_factors_
.
insert
(
factor
,
projected_factor
);
592
}
593
}
594
}
595
596
// we shall now add all the potentials of the soft evidence
597
for
(
const
auto
node
:
this
->
softEvidenceNodes
()) {
598
_node_to_soft_evidence_
.
insert
(
node
,
evidence
[
node
]);
599
_clique_potentials_list_
[
_node_to_clique_
[
node
]].
insert
(
evidence
[
node
]);
600
}
601
602
// now, in _clique_potentials_list_, for each clique, we have the list of
603
// potentials that must be combined in order to produce the Shafer-Shenoy's
604
// potential stored into the clique. So, perform this combination and
605
// store the result in _clique_potentials_
606
_clique_potentials_
.
clear
();
607
MultiDimCombinationDefault
<
GUM_SCALAR
,
Potential
>
fast_combination
(
_combination_op_
);
608
for
(
const
auto
&
xpotset
:
_clique_potentials_list_
) {
609
const
auto
&
potset
=
xpotset
.
second
;
610
if
(
potset
.
size
() > 0) {
611
// here, there will be an entry in _clique_potentials_
612
// If there is only one element in potset, this element shall be
613
// stored into _clique_potentials_, else all the elements of potset
614
// shall be combined and their result shall be stored
615
if
(
potset
.
size
() == 1) {
616
_clique_potentials_
.
insert
(
xpotset
.
first
, *(
potset
.
cbegin
()));
617
}
else
{
618
auto
joint
=
fast_combination
.
combine
(
potset
);
619
_clique_potentials_
.
insert
(
xpotset
.
first
,
joint
);
620
}
621
}
622
}
623
624
// indicate that the data structures are up to date.
625
_evidence_changes_
.
clear
();
626
_is_new_jt_needed_
=
false
;
627
}
// namespace gum
628
629
630
/// prepare the inference structures w.r.t. new targets, soft/hard evidence
631
template
<
typename
GUM_SCALAR
>
632
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
updateOutdatedStructure_
() {
633
// check if a new JT is really needed. If so, create it
634
if
(
_isNewJTNeeded_
()) {
635
_createNewJT_
();
636
}
else
{
637
// here, we can answer the next queries without reconstructing all the
638
// junction tree. All we need to do is to indicate that we should
639
// update the potentials and messages for these queries
640
updateOutdatedPotentials_
();
641
}
642
}
643
644
645
/// invalidate all the messages sent from a given clique
646
template
<
typename
GUM_SCALAR
>
647
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_diffuseMessageInvalidations_
(
648
NodeId
from_id
,
649
NodeId
to_id
,
650
NodeSet
&
invalidated_cliques
) {
651
// invalidate the current clique
652
invalidated_cliques
.
insert
(
to_id
);
653
654
// invalidate the current arc
655
const
Arc
arc
(
from_id
,
to_id
);
656
bool
&
message_computed
=
_messages_computed_
[
arc
];
657
if
(
message_computed
) {
658
message_computed
=
false
;
659
_separator_potentials_
[
arc
].
clear
();
660
if
(
_created_messages_
.
exists
(
arc
)) {
661
auto
&
arc_created_potentials
=
_created_messages_
[
arc
];
662
for
(
auto
pot
:
arc_created_potentials
)
663
delete
pot
;
664
arc_created_potentials
.
clear
();
665
}
666
667
// go on with the diffusion
668
for
(
const
auto
node_id
:
_propagator_
->
neighbours
(
to_id
)) {
669
if
(
node_id
!=
from_id
)
_diffuseMessageInvalidations_
(
to_id
,
node_id
,
invalidated_cliques
);
670
}
671
}
672
}
673
674
675
/// update the potentials stored in the cliques and invalidate outdated
676
/// messages
677
template
<
typename
GUM_SCALAR
>
678
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
updateOutdatedPotentials_
() {
679
// for each clique, indicate whether the potential stored into
680
// _clique_ss_potentials_[clique] is the result of a combination. In this
681
// case, it has been allocated by the combination and will need to be
682
// deallocated if its clique has been invalidated
683
const
auto
&
mn
=
this
->
MN
();
684
685
NodeProperty
<
bool
>
to_deallocate
(
_clique_potentials_list_
.
size
());
686
for
(
auto
pot_iter
=
_clique_potentials_list_
.
cbegin
();
687
pot_iter
!=
_clique_potentials_list_
.
cend
();
688
++
pot_iter
) {
689
to_deallocate
.
insert
(
pot_iter
.
key
(), (
pot_iter
.
val
().
size
() > 1));
690
}
691
692
// compute the set of factors that were projected due to hard evidence and
693
// whose hard evidence have changed, so that they need a new projection.
694
// By the way, remove these factors since they are no more needed
695
// Here only the values of the hard evidence can have changed (else a
696
// fully new join tree would have been computed).
697
// Note also that we know that the factors still contain some variable(s) after
698
// the projection (else they should be constants)
699
NodeSet
hard_nodes_changed
(
_hard_ev_nodes_
.
size
());
700
Set
<
NodeId
>
hard_cliques_changed
;
701
Set
<
NodeSet
>
hard_factors_changed
;
702
for
(
const
auto
node
:
_hard_ev_nodes_
) {
703
if
(
_evidence_changes_
.
exists
(
node
)) {
704
hard_nodes_changed
.
insert
(
node
);
705
for
(
const
auto
&
elt
:
mn
.
factors
()) {
706
const
auto
&
chgFactor
=
elt
.
first
;
707
if
(
chgFactor
.
contains
(
node
)) {
708
if
(!
hard_factors_changed
.
contains
(
chgFactor
)) {
709
hard_factors_changed
.
insert
(
chgFactor
);
710
711
const
auto
&
chgPot
=
_hard_ev_projected_factors_
[
chgFactor
];
712
const
NodeId
chgClique
=
_factor_to_clique_
[
chgFactor
];
713
_clique_potentials_list_
[
chgClique
].
erase
(
chgPot
);
714
delete
chgPot
;
715
_hard_ev_projected_factors_
.
erase
(
chgFactor
);
716
717
if
(!
hard_cliques_changed
.
contains
(
chgClique
))
hard_cliques_changed
.
insert
(
chgClique
);
718
}
719
}
720
}
721
}
722
}
723
724
// invalidate all the messages that are no more correct: start from each of
725
// the nodes whose soft evidence has changed and perform a diffusion from
726
// the clique into which the soft evidence has been entered, indicating that
727
// the messages spreading from this clique are now invalid. At the same time,
728
// if there were potentials created on the arcs over which the messages were
729
// sent, remove them from memory. For all the cliques that received some
730
// projected factors that should now be changed, do the same.
731
NodeSet
invalidated_cliques
(
_propagator_
->
size
());
732
for
(
const
auto
&
pair
:
_evidence_changes_
) {
733
if
(
_node_to_clique_
.
exists
(
pair
.
first
)) {
734
const
auto
clique
=
_node_to_clique_
[
pair
.
first
];
735
invalidated_cliques
.
insert
(
clique
);
736
for
(
const
auto
neighbor
:
_propagator_
->
neighbours
(
clique
)) {
737
_diffuseMessageInvalidations_
(
clique
,
neighbor
,
invalidated_cliques
);
738
}
739
}
740
}
741
742
// now, add to the set of invalidated cliques those that contain projected
743
// factors that were changed.
744
for
(
const
auto
clique
:
hard_cliques_changed
) {
745
invalidated_cliques
.
insert
(
clique
);
746
for
(
const
auto
neighbor
:
_propagator_
->
neighbours
(
clique
)) {
747
_diffuseMessageInvalidations_
(
clique
,
neighbor
,
invalidated_cliques
);
748
}
749
}
750
751
// now that we know the cliques whose set of potentials have been changed,
752
// we can discard their corresponding Shafer-Shenoy potential
753
for
(
const
auto
clique
:
invalidated_cliques
)
754
if
(
_clique_potentials_
.
exists
(
clique
) &&
to_deallocate
[
clique
])
755
delete
_clique_potentials_
[
clique
];
756
757
// now we shall remove all the posteriors that belong to the
758
// invalidated cliques. First, cope only with the nodes that did not
759
// received hard evidence since the other nodes do not belong to the
760
// join tree
761
if
(!
_target_posteriors_
.
empty
()) {
762
for
(
auto
iter
=
_target_posteriors_
.
beginSafe
();
iter
!=
_target_posteriors_
.
endSafe
();
763
++
iter
) {
764
if
(
_reduced_graph_
.
exists
(
iter
.
key
())
765
&& (
invalidated_cliques
.
exists
(
_node_to_clique_
[
iter
.
key
()]))) {
766
delete
iter
.
val
();
767
_target_posteriors_
.
erase
(
iter
);
768
}
769
}
770
771
// now cope with the nodes that received hard evidence
772
for
(
auto
iter
=
_target_posteriors_
.
beginSafe
();
iter
!=
_target_posteriors_
.
endSafe
();
773
++
iter
) {
774
if
(
hard_nodes_changed
.
contains
(
iter
.
key
())) {
775
delete
iter
.
val
();
776
_target_posteriors_
.
erase
(
iter
);
777
}
778
}
779
}
780
781
// finally, cope with joint targets
782
for
(
auto
iter
=
_joint_target_posteriors_
.
beginSafe
();
783
iter
!=
_joint_target_posteriors_
.
endSafe
();
784
++
iter
) {
785
if
(
invalidated_cliques
.
exists
(
_joint_target_to_clique_
[
iter
.
key
()])) {
786
delete
iter
.
val
();
787
_joint_target_posteriors_
.
erase
(
iter
);
788
}
789
}
790
791
// remove all the evidence that were entered into _node_to_soft_evidence_
792
// and _clique_potentials_list_ and add the new soft ones
793
for
(
auto
&
pot_pair
:
_node_to_soft_evidence_
)
794
_clique_potentials_list_
[
_node_to_clique_
[
pot_pair
.
first
]].
erase
(
pot_pair
.
second
);
795
_node_to_soft_evidence_
.
clear
();
796
797
const
auto
&
evidence
=
this
->
evidence
();
798
for
(
const
auto
node
:
this
->
softEvidenceNodes
()) {
799
_node_to_soft_evidence_
.
insert
(
node
,
evidence
[
node
]);
800
_clique_potentials_list_
[
_node_to_clique_
[
node
]].
insert
(
evidence
[
node
]);
801
}
802
803
804
// Now add the projections of the factors due to newly changed hard evidence:
805
// if we are performing updateOutdatedPotentials_, this means that the
806
// set of nodes that received hard evidence has not been changed, only
807
// their instantiations can have been changed. So, if there is an entry
808
// for node in _constants_, there will still be such an entry after
809
// performing the new projections. Idem for _hard_ev_projected_factors_
810
811
// for (const auto node: factors_with_projected_factors_changed) {
812
for
(
const
auto
&
iter_factor
:
mn
.
factors
()) {
813
NodeSet
hard_nodes
=
hard_nodes_changed
*
iter_factor
.
first
;
814
if
(
hard_nodes
.
empty
())
continue
;
// no change in iter_factor
815
816
// perform the projection with a combine and project instance
817
_PotentialSet_
marg_factor_set
{
iter_factor
.
second
};
818
Set
<
const
DiscreteVariable
* >
hard_variables
;
819
for
(
const
auto
xnode
:
hard_nodes
) {
820
marg_factor_set
.
insert
(
evidence
[
xnode
]);
821
hard_variables
.
insert
(&
mn
.
variable
(
xnode
));
822
}
823
824
// perform the combination of those potentials and their projection
825
MultiDimCombineAndProjectDefault
<
GUM_SCALAR
,
Potential
>
combine_and_project
(
826
_combination_op_
,
827
SSNewMNprojPotential
);
828
_PotentialSet_
new_potentials_list
829
=
combine_and_project
.
combineAndProject
(
marg_factor_set
,
hard_variables
);
830
831
// there should be only one potential in new_potentials_list
832
if
(
new_potentials_list
.
size
() != 1) {
833
// remove the potentials created to avoid memory leaks
834
for
(
auto
pot
:
new_potentials_list
) {
835
if
(!
marg_factor_set
.
contains
(
pot
))
delete
pot
;
836
}
837
GUM_ERROR
(
FatalError
,
838
"the projection of a potential containing "
839
<<
"hard evidence is empty!"
);
840
}
841
842
const
Potential
<
GUM_SCALAR
>*
projected_factor
= *(
new_potentials_list
.
begin
());
843
_clique_potentials_list_
[
_factor_to_clique_
[
iter_factor
.
first
]].
insert
(
projected_factor
);
844
_hard_ev_projected_factors_
.
insert
(
iter_factor
.
first
,
projected_factor
);
845
}
846
847
// here, the list of potentials stored in the invalidated cliques have
848
// been updated. So, now, we can combine them to produce the Shafer-Shenoy
849
// potential stored into the clique
850
MultiDimCombinationDefault
<
GUM_SCALAR
,
Potential
>
fast_combination
(
_combination_op_
);
851
for
(
const
auto
clique
:
invalidated_cliques
) {
852
const
auto
&
potset
=
_clique_potentials_list_
[
clique
];
853
854
if
(
potset
.
size
() > 0) {
855
// here, there will be an entry in _clique_potentials_
856
// If there is only one element in potset, this element shall be
857
// stored into _clique_potentials_, else all the elements of potset
858
// shall be combined and their result shall be stored
859
if
(
potset
.
size
() == 1) {
860
_clique_potentials_
[
clique
] = *(
potset
.
cbegin
());
861
}
else
{
862
auto
joint
=
fast_combination
.
combine
(
potset
);
863
_clique_potentials_
[
clique
] =
joint
;
864
}
865
}
866
}
867
868
// update the constants
869
/*const auto& hard_evidence = this->hardEvidence();
870
for (auto& node_cst: _constants_) {
871
const Potential< GUM_SCALAR >& cpt = mn.cpt(node_cst.first);
872
const auto& variables = cpt.variablesSequence();
873
Instantiation inst;
874
for (const auto var: variables)
875
inst << *var;
876
for (const auto var: variables) {
877
inst.chgVal(var, hard_evidence[mn.nodeId(*var)]);
878
}
879
node_cst.second = cpt[inst];
880
}*/
881
882
// indicate that all changes have been performed
883
_evidence_changes_
.
clear
();
884
}
885
886
887
/// compute a root for each connected component of _propagator_
888
template
<
typename
GUM_SCALAR
>
889
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_computeJoinTreeRoots_
() {
890
const
auto
&
mn
=
this
->
MN
();
891
892
// get the set of cliques in which we can find the targets and joint_targets
893
NodeSet
clique_targets
;
894
for
(
const
auto
node
:
this
->
targets
()) {
895
try
{
896
clique_targets
.
insert
(
_node_to_clique_
[
node
]);
897
}
catch
(
Exception
&) {}
898
}
899
for
(
const
auto
&
set
:
this
->
jointTargets
()) {
900
try
{
901
clique_targets
.
insert
(
_joint_target_to_clique_
[
set
]);
902
}
catch
(
Exception
&) {}
903
}
904
905
// put in a vector these cliques and their size
906
std
::
vector
<
std
::
pair
<
NodeId
,
Size
> >
possible_roots
(
clique_targets
.
size
());
907
std
::
size_t
i
= 0;
908
for
(
const
auto
clique_id
:
clique_targets
) {
909
const
auto
&
clique
=
_propagator_
->
clique
(
clique_id
);
910
Size
dom_size
= 1;
911
for
(
const
auto
node
:
clique
) {
912
dom_size
*=
mn
.
variable
(
node
).
domainSize
();
913
}
914
possible_roots
[
i
] =
std
::
pair
<
NodeId
,
Size
>(
clique_id
,
dom_size
);
915
++
i
;
916
}
917
918
// sort the cliques by increasing domain size
919
std
::
sort
(
possible_roots
.
begin
(),
920
possible_roots
.
end
(),
921
[](
const
std
::
pair
<
NodeId
,
Size
>&
a
,
const
std
::
pair
<
NodeId
,
Size
>&
b
) ->
bool
{
922
return
a
.
second
<
b
.
second
;
923
});
924
925
// pick up the clique with the smallest size in each connected component
926
NodeProperty
<
bool
>
marked
=
_propagator_
->
nodesProperty
(
false
);
927
std
::
function
<
void
(
NodeId
,
NodeId
) >
diffuse_marks
928
= [&
marked
, &
diffuse_marks
,
this
](
NodeId
node
,
NodeId
from
) {
929
if
(!
marked
[
node
]) {
930
marked
[
node
] =
true
;
931
for
(
const
auto
neigh
:
_propagator_
->
neighbours
(
node
))
932
if
((
neigh
!=
from
) && !
marked
[
neigh
])
diffuse_marks
(
neigh
,
node
);
933
}
934
};
935
_roots_
.
clear
();
936
for
(
const
auto
xclique
:
possible_roots
) {
937
NodeId
clique
=
xclique
.
first
;
938
if
(!
marked
[
clique
]) {
939
_roots_
.
insert
(
clique
);
940
diffuse_marks
(
clique
,
clique
);
941
}
942
}
943
}
944
945
946
// performs the collect phase of Lazy Propagation
947
template
<
typename
GUM_SCALAR
>
948
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_collectMessage_
(
NodeId
id
,
NodeId
from
) {
949
for
(
const
auto
other
:
_propagator_
->
neighbours
(
id
)) {
950
if
((
other
!=
from
) && !
_messages_computed_
[
Arc
(
other
,
id
)])
_collectMessage_
(
other
,
id
);
951
}
952
953
if
((
id
!=
from
) && !
_messages_computed_
[
Arc
(
id
,
from
)]) {
_produceMessage_
(
id
,
from
); }
954
}
955
956
// remove variables del_vars from the list of potentials pot_list
957
template
<
typename
GUM_SCALAR
>
958
Set
<
const
Potential
<
GUM_SCALAR
>* >
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_marginalizeOut_
(
959
Set
<
const
Potential
<
GUM_SCALAR
>* >
pot_list
,
960
Set
<
const
DiscreteVariable
* >&
del_vars
,
961
Set
<
const
DiscreteVariable
* >&
kept_vars
) {
962
// remove the potentials corresponding to barren variables if we want
963
// to exploit barren nodes
964
/* _PotentialSet_ barren_projected_potentials;
965
if ( _barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES) {
966
barren_projected_potentials = _removeBarrenVariables_(pot_list, del_vars);
967
}*/
968
969
// create a combine and project operator that will perform the
970
// marginalization
971
MultiDimCombineAndProjectDefault
<
GUM_SCALAR
,
Potential
>
combine_and_project
(
_combination_op_
,
972
_projection_op_
);
973
_PotentialSet_
new_pot_list
=
combine_and_project
.
combineAndProject
(
pot_list
,
del_vars
);
974
975
// remove all the potentials that were created due to projections of
976
// barren nodes and that are not part of the new_pot_list: these
977
// potentials were just temporary potentials
978
/*for (auto iter = barren_projected_potentials.beginSafe();
979
iter != barren_projected_potentials.endSafe();
980
++iter) {
981
if (!new_pot_list.exists(*iter)) delete *iter;
982
}*/
983
984
// remove all the potentials that have no dimension
985
for
(
auto
iter_pot
=
new_pot_list
.
beginSafe
();
iter_pot
!=
new_pot_list
.
endSafe
(); ++
iter_pot
) {
986
if
((*
iter_pot
)->
variablesSequence
().
size
() == 0) {
987
// as we have already marginalized out variables that received evidence,
988
// it may be the case that, after combining and projecting, some
989
// potentials might be empty. In this case, we shall keep their
990
// constant and remove them from memory
991
// # TODO: keep the constants!
992
delete
*
iter_pot
;
993
new_pot_list
.
erase
(
iter_pot
);
994
}
995
}
996
997
return
new_pot_list
;
998
}
999
1000
1001
// creates the message sent by clique from_id to clique to_id
1002
template
<
typename
GUM_SCALAR
>
1003
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
_produceMessage_
(
NodeId
from_id
,
NodeId
to_id
) {
1004
// get the potentials of the clique.
1005
_PotentialSet_
pot_list
;
1006
if
(
_clique_potentials_
.
exists
(
from_id
))
pot_list
.
insert
(
_clique_potentials_
[
from_id
]);
1007
1008
// add the messages sent by adjacent nodes to from_id
1009
for
(
const
auto
other_id
:
_propagator_
->
neighbours
(
from_id
))
1010
if
(
other_id
!=
to_id
)
pot_list
+=
_separator_potentials_
[
Arc
(
other_id
,
from_id
)];
1011
1012
// get the set of variables that need be removed from the potentials
1013
const
NodeSet
&
from_clique
=
_propagator_
->
clique
(
from_id
);
1014
const
NodeSet
&
separator
=
_propagator_
->
separator
(
from_id
,
to_id
);
1015
Set
<
const
DiscreteVariable
* >
del_vars
(
from_clique
.
size
());
1016
Set
<
const
DiscreteVariable
* >
kept_vars
(
separator
.
size
());
1017
const
auto
&
mn
=
this
->
MN
();
1018
1019
for
(
const
auto
node
:
from_clique
) {
1020
if
(!
separator
.
contains
(
node
)) {
1021
del_vars
.
insert
(&(
mn
.
variable
(
node
)));
1022
}
else
{
1023
kept_vars
.
insert
(&(
mn
.
variable
(
node
)));
1024
}
1025
}
1026
1027
// pot_list now contains all the potentials to multiply and marginalize
1028
// => combine the messages
1029
_PotentialSet_
new_pot_list
=
_marginalizeOut_
(
pot_list
,
del_vars
,
kept_vars
);
1030
1031
/*
1032
for the moment, remove this test: due to some optimizations, some
1033
potentials might have all their cells greater than 1.
1034
1035
// remove all the potentials that are equal to ones (as probability
1036
// matrix multiplications are tensorial, such potentials are useless)
1037
for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe();
1038
++iter) {
1039
const auto pot = *iter;
1040
if (pot->variablesSequence().size() == 1) {
1041
bool is_all_ones = true;
1042
for (Instantiation inst(*pot); !inst.end(); ++inst) {
1043
if ((*pot)[inst] < _one_minus_epsilon_) {
1044
is_all_ones = false;
1045
break;
1046
}
1047
}
1048
if (is_all_ones) {
1049
if (!pot_list.exists(pot)) delete pot;
1050
new_pot_list.erase(iter);
1051
continue;
1052
}
1053
}
1054
}
1055
*/
1056
1057
// if there are still potentials in new_pot_list, combine them to
1058
// produce the message on the separator
1059
const
Arc
arc
(
from_id
,
to_id
);
1060
if
(!
new_pot_list
.
empty
()) {
1061
if
(
new_pot_list
.
size
() == 1) {
// there is only one potential
1062
// in new_pot_list, so this corresponds to our message on
1063
// the separator
1064
auto
pot
= *(
new_pot_list
.
begin
());
1065
_separator_potentials_
[
arc
] =
std
::
move
(
new_pot_list
);
1066
if
(!
pot_list
.
exists
(
pot
)) {
1067
if
(!
_created_messages_
.
exists
(
arc
))
_created_messages_
.
insert
(
arc
,
_PotentialSet_
());
1068
_created_messages_
[
arc
].
insert
(
pot
);
1069
}
1070
}
else
{
1071
// create the message in the separator
1072
MultiDimCombinationDefault
<
GUM_SCALAR
,
Potential
>
fast_combination
(
_combination_op_
);
1073
auto
joint
=
fast_combination
.
combine
(
new_pot_list
);
1074
_separator_potentials_
[
arc
].
insert
(
joint
);
1075
if
(!
_created_messages_
.
exists
(
arc
))
_created_messages_
.
insert
(
arc
,
_PotentialSet_
());
1076
_created_messages_
[
arc
].
insert
(
joint
);
1077
1078
// remove the temporary messages created in new_pot_list
1079
for
(
const
auto
pot
:
new_pot_list
) {
1080
if
(!
pot_list
.
exists
(
pot
)) {
delete
pot
; }
1081
}
1082
}
1083
}
1084
1085
_messages_computed_
[
arc
] =
true
;
1086
}
1087
1088
1089
// performs a whole inference
1090
template
<
typename
GUM_SCALAR
>
1091
INLINE
void
ShaferShenoyMNInference
<
GUM_SCALAR
>::
makeInference_
() {
1092
// collect messages for all single targets
1093
for
(
const
auto
node
:
this
->
targets
()) {
1094
// perform only collects in the join tree for nodes that have
1095
// not received hard evidence (those that received hard evidence were
1096
// not included into the join tree for speed-up reasons)
1097
if
(
_reduced_graph_
.
exists
(
node
)) {
1098
_collectMessage_
(
_node_to_clique_
[
node
],
_node_to_clique_
[
node
]);
1099
}
1100
}
1101
1102
// collect messages for all set targets
1103
// by parsing _joint_target_to_clique_, we ensure that the cliques that
1104
// are referenced belong to the join tree (even if some of the nodes in
1105
// their associated joint_target do not belong to _reduced_graph_)
1106
for
(
const
auto
set
:
_joint_target_to_clique_
)
1107
_collectMessage_
(
set
.
second
,
set
.
second
);
1108
}
1109
1110
1111
/// returns a fresh potential equal to P(1st arg,evidence)
1112
template
<
typename
GUM_SCALAR
>
1113
Potential
<
GUM_SCALAR
>*
1114
ShaferShenoyMNInference
<
GUM_SCALAR
>::
unnormalizedJointPosterior_
(
NodeId
id
) {
1115
const
auto
&
mn
=
this
->
MN
();
1116
1117
// hard evidence do not belong to the join tree
1118
// # TODO: check for sets of inconsistent hard evidence
1119
if
(
this
->
hardEvidenceNodes
().
contains
(
id
)) {
1120
return
new
Potential
<
GUM_SCALAR
>(*(
this
->
evidence
()[
id
]));
1121
}
1122
1123
// if we still need to perform some inference task, do it (this should
1124
// already have been done by makeInference_)
1125
NodeId
clique_of_id
=
_node_to_clique_
[
id
];
1126
_collectMessage_
(
clique_of_id
,
clique_of_id
);
1127
1128
// now we just need to create the product of the potentials of the clique
1129
// containing id with the messages received by this clique and
1130
// marginalize out all variables except id
1131
_PotentialSet_
pot_list
;
1132
if
(
_clique_potentials_
.
exists
(
clique_of_id
))
1133
pot_list
.
insert
(
_clique_potentials_
[
clique_of_id
]);
1134
1135
// add the messages sent by adjacent nodes to targetCliquxse
1136
for
(
const
auto
other
:
_propagator_
->
neighbours
(
clique_of_id
))
1137
pot_list
+=
_separator_potentials_
[
Arc
(
other
,
clique_of_id
)];
1138
1139
// get the set of variables that need be removed from the potentials
1140
const
NodeSet
&
nodes
=
_propagator_
->
clique
(
clique_of_id
);
1141
Set
<
const
DiscreteVariable
* >
kept_vars
{&(
mn
.
variable
(
id
))};
1142
Set
<
const
DiscreteVariable
* >
del_vars
(
nodes
.
size
());
1143
for
(
const
auto
node
:
nodes
) {
1144
if
(
node
!=
id
)
del_vars
.
insert
(&(
mn
.
variable
(
node
)));
1145
}
1146
1147
// pot_list now contains all the potentials to multiply and marginalize
1148
// => combine the messages
1149
_PotentialSet_
new_pot_list
=
_marginalizeOut_
(
pot_list
,
del_vars
,
kept_vars
);
1150
Potential
<
GUM_SCALAR
>*
joint
=
nullptr
;
1151
1152
if
(
new_pot_list
.
size
() == 1) {
1153
joint
=
const_cast
<
Potential
<
GUM_SCALAR
>* >(*(
new_pot_list
.
begin
()));
1154
// if pot already existed, create a copy, so that we can put it into
1155
// the _target_posteriors_ property
1156
if
(
pot_list
.
exists
(
joint
)) {
1157
joint
=
new
Potential
<
GUM_SCALAR
>(*
joint
);
1158
}
else
{
1159
// remove the joint from new_pot_list so that it will not be
1160
// removed just after the else block
1161
new_pot_list
.
clear
();
1162
}
1163
}
else
{
1164
MultiDimCombinationDefault
<
GUM_SCALAR
,
Potential
>
fast_combination
(
_combination_op_
);
1165
joint
=
fast_combination
.
combine
(
new_pot_list
);
1166
}
1167
1168
// remove the potentials that were created in new_pot_list
1169
for
(
auto
pot
:
new_pot_list
)
1170
if
(!
pot_list
.
exists
(
pot
))
delete
pot
;
1171
1172
// check that the joint posterior is different from a 0 vector: this would
1173
// indicate that some hard evidence are not compatible (their joint
1174
// probability is equal to 0)
1175
bool
nonzero_found
=
false
;
1176
for
(
Instantiation
inst
(*
joint
); !
inst
.
end
(); ++
inst
) {
1177
if
((*
joint
)[
inst
]) {
1178
nonzero_found
=
true
;
1179
break
;
1180
}
1181
}
1182
if
(!
nonzero_found
) {
1183
// remove joint from memory to avoid memory leaks
1184
delete
joint
;
1185
GUM_ERROR
(
IncompatibleEvidence
,
1186
"some evidence entered into the Markov "
1187
"net are incompatible (their joint proba = 0)"
);
1188
}
1189
return
joint
;
1190
}
1191
1192
1193
/// returns the posterior of a given variable
1194
template
<
typename
GUM_SCALAR
>
1195
const
Potential
<
GUM_SCALAR
>&
ShaferShenoyMNInference
<
GUM_SCALAR
>::
posterior_
(
NodeId
id
) {
1196
// check if we have already computed the posterior
1197
if
(
_target_posteriors_
.
exists
(
id
)) {
return
*(
_target_posteriors_
[
id
]); }
1198
1199
// compute the joint posterior and normalize
1200
auto
joint
=
unnormalizedJointPosterior_
(
id
);
1201
joint
->
normalize
();
1202
_target_posteriors_
.
insert
(
id
,
joint
);
1203
1204
return
*
joint
;
1205
}
1206
1207
1208
// returns the marginal a posteriori proba of a given node
1209
template
<
typename
GUM_SCALAR
>
1210
Potential
<
GUM_SCALAR
>*
1211
ShaferShenoyMNInference
<
GUM_SCALAR
>::
unnormalizedJointPosterior_
(
const
NodeSet
&
set
) {
1212
// hard evidence do not belong to the join tree, so extract the nodes
1213
// from targets that are not hard evidence
1214
NodeSet
targets
=
set
,
hard_ev_nodes
;
1215
for
(
const
auto
node
:
this
->
hardEvidenceNodes
()) {
1216
if
(
targets
.
contains
(
node
)) {
1217
targets
.
erase
(
node
);
1218
hard_ev_nodes
.
insert
(
node
);
1219
}
1220
}
1221
1222
// if all the nodes have received hard evidence, then compute the
1223
// joint posterior directly by multiplying the hard evidence potentials
1224
const
auto
&
evidence
=
this
->
evidence
();
1225
if
(
targets
.
empty
()) {
1226
_PotentialSet_
pot_list
;
1227
for
(
const
auto
node
:
set
) {
1228
pot_list
.
insert
(
evidence
[
node
]);
1229
}
1230
if
(
pot_list
.
size
() == 1) {
1231
auto
pot
=
new
Potential
<
GUM_SCALAR
>(**(
pot_list
.
begin
()));
1232
return
pot
;
1233
}
else
{
1234
MultiDimCombinationDefault
<
GUM_SCALAR
,
Potential
>
fast_combination
(
_combination_op_
);
1235
return
fast_combination
.
combine
(
pot_list
);
1236
}
1237
}
1238
1239
1240
// if we still need to perform some inference task, do it: so, first,
1241
// determine the clique on which we should perform collect to compute
1242
// the unnormalized joint posterior of a set of nodes containing "set"
1243
NodeId
clique_of_set
;
1244
try
{
1245
clique_of_set
=
_joint_target_to_clique_
[
set
];
1246
}
catch
(
NotFound
&) {
1247
// here, the precise set of targets does not belong to the set of targets
1248
// defined by the user. So we will try to find a clique in the junction
1249
// tree that contains "targets":
1250
1251
// 1/ we should check that all the nodes belong to the join tree
1252
for
(
const
auto
node
:
targets
) {
1253
if
(!
_reduced_graph_
.
exists
(
node
)) {
1254
GUM_ERROR
(
UndefinedElement
,
node
<<
" is not a target node"
)
1255
}
1256
}
1257
1258
// 2/ the clique created by the first eliminated node among target is the
1259
// one we are looking for
1260
const
std
::
vector
<
NodeId
>&
JT_elim_order
=
_triangulation_
->
eliminationOrder
();
1261
NodeProperty
<
int
>
elim_order
(
Size
(
JT_elim_order
.
size
()));
1262
for
(
std
::
size_t
i
=
std
::
size_t
(0),
size
=
JT_elim_order
.
size
();
i
<
size
; ++
i
)
1263
elim_order
.
insert
(
JT_elim_order
[
i
], (
int
)
i
);
1264
NodeId
first_eliminated_node
= *(
targets
.
begin
());
1265
int
elim_number
=
elim_order
[
first_eliminated_node
];
1266
for
(
const
auto
node
:
targets
) {
1267
if
(
elim_order
[
node
] <
elim_number
) {
1268
elim_number
=
elim_order
[
node
];
1269
first_eliminated_node
=
node
;
1270
}
1271
}
1272
clique_of_set
=
_triangulation_
->
createdJunctionTreeClique
(
first_eliminated_node
);
1273
1274
// 3/ check that cliquee_of_set contains the all the nodes in the target
1275
const
NodeSet
&
clique_nodes
=
_propagator_
->
clique
(
clique_of_set
);
1276
for
(
const
auto
node
:
targets
) {
1277
if
(!
clique_nodes
.
contains
(
node
)) {
1278
GUM_ERROR
(
UndefinedElement
,
set
<<
" is not a joint target"
)
1279
}
1280
}
1281
1282
// add the discovered clique to _joint_target_to_clique_
1283
_joint_target_to_clique_
.
insert
(
set
,
clique_of_set
);
1284
}
1285
1286
// now perform a collect on the clique
1287
_collectMessage_
(
clique_of_set
,
clique_of_set
);
1288
1289
// now we just need to create the product of the potentials of the clique
1290
// containing set with the messages received by this clique and
1291
// marginalize out all variables except set
1292
_PotentialSet_
pot_list
;
1293
if
(
_clique_potentials_
.
exists
(
clique_of_set
))
1294
pot_list
.
insert
(
_clique_potentials_
[
clique_of_set
]);
1295
1296
// add the messages sent by adjacent nodes to targetClique
1297
for
(
const
auto
other
:
_propagator_
->
neighbours
(
clique_of_set
))
1298
pot_list
+=
_separator_potentials_
[
Arc
(
other
,
clique_of_set
)];
1299
1300
// get the set of variables that need be removed from the potentials
1301
const
NodeSet
&
nodes
=
_propagator_
->
clique
(
clique_of_set
);
1302
Set
<
const
DiscreteVariable
* >
del_vars
(
nodes
.
size
());
1303
Set
<
const
DiscreteVariable
* >
kept_vars
(
targets
.
size
());
1304
const
auto
&
mn
=
this
->
MN
();
1305
for
(
const
auto
node
:
nodes
) {
1306
if
(!
targets
.
contains
(
node
)) {
1307
del_vars
.
insert
(&(
mn
.
variable
(
node
)));
1308
}
else
{
1309
kept_vars
.
insert
(&(
mn
.
variable
(
node
)));
1310
}
1311
}
1312
1313
// pot_list now contains all the potentials to multiply and marginalize
1314
// => combine the messages
1315
_PotentialSet_
new_pot_list
=
_marginalizeOut_
(
pot_list
,
del_vars
,
kept_vars
);
1316
Potential
<
GUM_SCALAR
>*
joint
=
nullptr
;
1317
1318
if
((
new_pot_list
.
size
() == 1) &&
hard_ev_nodes
.
empty
()) {
1319
joint
=
const_cast
<
Potential
<
GUM_SCALAR
>* >(*(
new_pot_list
.
begin
()));
1320
// if pot already existed, create a copy, so that we can put it into
1321
// the _target_posteriors_ property
1322
if
(
pot_list
.
exists
(
joint
)) {
1323
joint
=
new
Potential
<
GUM_SCALAR
>(*
joint
);
1324
}
else
{
1325
// remove the joint from new_pot_list so that it will not be
1326
// removed just after the next else block
1327
new_pot_list
.
clear
();
1328
}
1329
}
else
{
1330
// combine all the potentials in new_pot_list with all the hard evidence
1331
// of the nodes in set
1332
_PotentialSet_
new_new_pot_list
=
new_pot_list
;
1333
for
(
const
auto
node
:
hard_ev_nodes
) {
1334
new_new_pot_list
.
insert
(
evidence
[
node
]);
1335
}
1336
MultiDimCombinationDefault
<
GUM_SCALAR
,
Potential
>
fast_combination
(
_combination_op_
);
1337
joint
=
fast_combination
.
combine
(
new_new_pot_list
);
1338
}
1339
1340
// remove the potentials that were created in new_pot_list
1341
for
(
auto
pot
:
new_pot_list
)
1342
if
(!
pot_list
.
exists
(
pot
))
delete
pot
;
1343
1344
// check that the joint posterior is different from a 0 vector: this would
1345
// indicate that some hard evidence are not compatible
1346
bool
nonzero_found
=
false
;
1347
for
(
Instantiation
inst
(*
joint
); !
inst
.
end
(); ++
inst
) {
1348
if
((*
joint
)[
inst
]) {
1349
nonzero_found
=
true
;
1350
break
;
1351
}
1352
}
1353
if
(!
nonzero_found
) {
1354
// remove joint from memory to avoid memory leaks
1355
delete
joint
;
1356
GUM_ERROR
(
IncompatibleEvidence
,
1357
"some evidence entered into the Markov "
1358
"net are incompatible (their joint proba = 0)"
);
1359
}
1360
1361
return
joint
;
1362
}
1363
1364
1365
/// returns the posterior of a given set of variables
1366
template
<
typename
GUM_SCALAR
>
1367
const
Potential
<
GUM_SCALAR
>&
1368
ShaferShenoyMNInference
<
GUM_SCALAR
>::
jointPosterior_
(
const
NodeSet
&
set
) {
1369
// check if we have already computed the posterior
1370
if
(
_joint_target_posteriors_
.
exists
(
set
)) {
return
*(
_joint_target_posteriors_
[
set
]); }
1371
1372
// compute the joint posterior and normalize
1373
auto
joint
=
unnormalizedJointPosterior_
(
set
);
1374
joint
->
normalize
();
1375
_joint_target_posteriors_
.
insert
(
set
,
joint
);
1376
1377
return
*
joint
;
1378
}
1379
1380
1381
/// returns the posterior of a given set of variables
1382
template
<
typename
GUM_SCALAR
>
1383
const
Potential
<
GUM_SCALAR
>&
1384
ShaferShenoyMNInference
<
GUM_SCALAR
>::
jointPosterior_
(
const
NodeSet
&
wanted_target
,
1385
const
NodeSet
&
declared_target
) {
1386
// check if we have already computed the posterior of wanted_target
1387
if
(
_joint_target_posteriors_
.
exists
(
wanted_target
))
1388
return
*(
_joint_target_posteriors_
[
wanted_target
]);
1389
1390
// here, we will have to compute the posterior of declared_target and
1391
// marginalize out all the variables that do not belong to wanted_target
1392
1393
// check if we have already computed the posterior of declared_target
1394
if
(!
_joint_target_posteriors_
.
exists
(
declared_target
)) {
jointPosterior_
(
declared_target
); }
1395
1396
// marginalize out all the variables that do not belong to wanted_target
1397
const
auto
&
mn
=
this
->
MN
();
1398
Set
<
const
DiscreteVariable
* >
del_vars
;
1399
for
(
const
auto
node
:
declared_target
)
1400
if
(!
wanted_target
.
contains
(
node
))
del_vars
.
insert
(&(
mn
.
variable
(
node
)));
1401
auto
pot
=
new
Potential
<
GUM_SCALAR
>(
1402
_joint_target_posteriors_
[
declared_target
]->
margSumOut
(
del_vars
));
1403
1404
// save the result into the cache
1405
_joint_target_posteriors_
.
insert
(
wanted_target
,
pot
);
1406
1407
return
*
pot
;
1408
}
1409
1410
1411
template
<
typename
GUM_SCALAR
>
1412
GUM_SCALAR
ShaferShenoyMNInference
<
GUM_SCALAR
>::
evidenceProbability
() {
1413
// perform inference in each connected component
1414
this
->
makeInference
();
1415
1416
// for each connected component, select a variable X and compute the
1417
// joint probability of X and evidence e. Then marginalize-out X to get
1418
// p(e) in this connected component. Finally, multiply all the p(e) that
1419
// we got and the elements in _constants_. The result is the probability
1420
// of evidence
1421
1422
GUM_SCALAR
prob_ev
= 1;
1423
for
(
const
auto
root
:
_roots_
) {
1424
// get a node in the clique
1425
NodeId
node
= *(
_propagator_
->
clique
(
root
).
begin
());
1426
Potential
<
GUM_SCALAR
>*
tmp
=
unnormalizedJointPosterior_
(
node
);
1427
GUM_SCALAR
sum
= 0;
1428
for
(
Instantiation
iter
(*
tmp
); !
iter
.
end
(); ++
iter
)
1429
sum
+=
tmp
->
get
(
iter
);
1430
prob_ev
*=
sum
;
1431
delete
tmp
;
1432
}
1433
1434
// for (const auto& projected_cpt: _constants_)
1435
// prob_ev *= projected_cpt.second;
1436
1437
return
prob_ev
;
1438
}
1439
1440
template
<
typename
GUM_SCALAR
>
1441
bool
ShaferShenoyMNInference
<
GUM_SCALAR
>::
isExactJointComputable_
(
const
NodeSet
&
vars
) {
1442
if
(
JointTargetedMNInference
<
GUM_SCALAR
>::
isExactJointComputable_
(
vars
))
return
true
;
1443
1444
this
->
prepareInference
();
1445
1446
for
(
const
auto
&
cliknode
:
this
->
_propagator_
->
nodes
()) {
1447
const
auto
clique
=
_propagator_
->
clique
(
cliknode
);
1448
if
(
vars
==
clique
)
return
true
;
1449
}
1450
return
false
;
1451
}
1452
1453
template
<
typename
GUM_SCALAR
>
1454
NodeSet
ShaferShenoyMNInference
<
GUM_SCALAR
>::
superForJointComputable_
(
const
NodeSet
&
vars
) {
1455
const
auto
superset
=
JointTargetedMNInference
<
GUM_SCALAR
>::
superForJointComputable_
(
vars
);
1456
if
(!
superset
.
empty
())
return
superset
;
1457
1458
this
->
prepareInference
();
1459
1460
for
(
const
auto
&
cliknode
:
_propagator_
->
nodes
()) {
1461
const
auto
clique
=
_propagator_
->
clique
(
cliknode
);
1462
if
(
vars
.
isProperSubsetOf
(
clique
))
return
clique
;
1463
}
1464
1465
1466
return
NodeSet
();
1467
}
1468
1469
}
/* namespace gum */
1470
1471
#
endif
// DOXYGEN_SHOULD_SKIP_THIS
gum::Set::emplace
INLINE void emplace(Args &&... args)
Definition:
set_tpl.h:643