aGrUM
0.20.2
a C++ library for (probabilistic) graphical models
multipleInferenceEngine_tpl.h
Go to the documentation of this file.
1
/**
2
*
3
* Copyright 2005-2020 Pierre-Henri WUILLEMIN(@LIP6) & Christophe GONZALES(@AMU)
4
* info_at_agrum_dot_org
5
*
6
* This library is free software: you can redistribute it and/or modify
7
* it under the terms of the GNU Lesser General Public License as published by
8
* the Free Software Foundation, either version 3 of the License, or
9
* (at your option) any later version.
10
*
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU Lesser General Public License for more details.
15
*
16
* You should have received a copy of the GNU Lesser General Public License
17
* along with this library. If not, see <http://www.gnu.org/licenses/>.
18
*
19
*/
20
21
22
#
include
<
agrum
/
CN
/
inference
/
inferenceEngine
.
h
>
23
#
include
<
agrum
/
CN
/
inference
/
multipleInferenceEngine
.
h
>
24
25
namespace
gum
{
26
namespace
credal
{
27
28
template
<
typename
GUM_SCALAR,
class
BNInferenceEngine >
29
MultipleInferenceEngine< GUM_SCALAR, BNInferenceEngine >::
30
MultipleInferenceEngine(
const
CredalNet< GUM_SCALAR >& credalNet) :
31
InferenceEngine< GUM_SCALAR >::InferenceEngine(credalNet) {
32
GUM_CONSTRUCTOR(MultipleInferenceEngine);
33
}
34
35
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
36
MultipleInferenceEngine
<
GUM_SCALAR
,
37
BNInferenceEngine
>::~
MultipleInferenceEngine
() {
38
GUM_DESTRUCTOR
(
MultipleInferenceEngine
);
39
}
40
41
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
42
inline
void
43
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
initThreadsData_
(
44
const
Size
&
num_threads
,
45
const
bool
storeVertices__
,
46
const
bool
storeBNOpt__
) {
47
workingSet_
.
clear
();
48
workingSet_
.
resize
(
num_threads
,
nullptr
);
49
workingSetE_
.
clear
();
50
workingSetE_
.
resize
(
num_threads
,
nullptr
);
51
52
l_marginalMin_
.
clear
();
53
l_marginalMin_
.
resize
(
num_threads
);
54
l_marginalMax_
.
clear
();
55
l_marginalMax_
.
resize
(
num_threads
);
56
l_expectationMin_
.
clear
();
57
l_expectationMin_
.
resize
(
num_threads
);
58
l_expectationMax_
.
clear
();
59
l_expectationMax_
.
resize
(
num_threads
);
60
61
l_clusters_
.
clear
();
62
l_clusters_
.
resize
(
num_threads
);
63
64
if
(
storeVertices__
) {
65
l_marginalSets_
.
clear
();
66
l_marginalSets_
.
resize
(
num_threads
);
67
}
68
69
if
(
storeBNOpt__
) {
70
for
(
Size
ptr
= 0;
ptr
<
this
->
l_optimalNet_
.
size
();
ptr
++)
71
if
(
this
->
l_optimalNet_
[
ptr
] !=
nullptr
)
delete
l_optimalNet_
[
ptr
];
72
73
l_optimalNet_
.
clear
();
74
l_optimalNet_
.
resize
(
num_threads
);
75
}
76
77
l_modal_
.
clear
();
78
l_modal_
.
resize
(
num_threads
);
79
80
infE__
::
oldMarginalMin_
.
clear
();
81
this
->
oldMarginalMin_
=
this
->
marginalMin_
;
82
this
->
oldMarginalMax_
.
clear
();
83
this
->
oldMarginalMax_
=
this
->
marginalMax_
;
84
}
85
86
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
87
inline
bool
88
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
updateThread_
(
89
const
NodeId
&
id
,
90
const
std
::
vector
<
GUM_SCALAR
>&
vertex
,
91
const
bool
&
elimRedund
) {
92
int
tId
=
getThreadNumber
();
93
94
// save E(X) if we don't save vertices
95
if
(!
infE__
::
storeVertices_
&& !
l_modal_
[
tId
].
empty
()) {
96
std
::
string
var_name
=
workingSet_
[
tId
]->
variable
(
id
).
name
();
97
auto
delim
=
var_name
.
find_first_of
(
"_"
);
98
var_name
=
var_name
.
substr
(0,
delim
);
99
100
if
(
l_modal_
[
tId
].
exists
(
var_name
)) {
101
GUM_SCALAR
exp
= 0;
102
Size
vsize
=
Size
(
vertex
.
size
());
103
104
for
(
Size
mod
= 0;
mod
<
vsize
;
mod
++)
105
exp
+=
vertex
[
mod
] *
l_modal_
[
tId
][
var_name
][
mod
];
106
107
if
(
exp
>
l_expectationMax_
[
tId
][
id
])
l_expectationMax_
[
tId
][
id
] =
exp
;
108
109
if
(
exp
<
l_expectationMin_
[
tId
][
id
])
l_expectationMin_
[
tId
][
id
] =
exp
;
110
}
111
}
// end of : if modal (map) not empty
112
113
bool
newOne
=
false
;
114
bool
added
=
false
;
115
bool
result
=
false
;
116
// for burn in, we need to keep checking on local marginals and not global
117
// ones
118
// (faster inference)
119
// we also don't want to store dbn for observed variables since there will
120
// be a
121
// huge number of them (probably all of them).
122
Size
vsize
=
Size
(
vertex
.
size
());
123
124
for
(
Size
mod
= 0;
mod
<
vsize
;
mod
++) {
125
if
(
vertex
[
mod
] <
l_marginalMin_
[
tId
][
id
][
mod
]) {
126
l_marginalMin_
[
tId
][
id
][
mod
] =
vertex
[
mod
];
127
newOne
=
true
;
128
129
if
(
infE__
::
storeBNOpt_
&& !
infE__
::
evidence_
.
exists
(
id
)) {
130
std
::
vector
<
Size
>
key
(3);
131
key
[0] =
id
;
132
key
[1] =
mod
;
133
key
[2] = 0;
134
135
if
(
l_optimalNet_
[
tId
]->
insert
(
key
,
true
))
result
=
true
;
136
}
137
}
138
139
if
(
vertex
[
mod
] >
l_marginalMax_
[
tId
][
id
][
mod
]) {
140
l_marginalMax_
[
tId
][
id
][
mod
] =
vertex
[
mod
];
141
newOne
=
true
;
142
143
if
(
infE__
::
storeBNOpt_
&& !
infE__
::
evidence_
.
exists
(
id
)) {
144
std
::
vector
<
Size
>
key
(3);
145
key
[0] =
id
;
146
key
[1] =
mod
;
147
key
[2] = 1;
148
149
if
(
l_optimalNet_
[
tId
]->
insert
(
key
,
true
))
result
=
true
;
150
}
151
}
else
if
(
vertex
[
mod
] ==
l_marginalMin_
[
tId
][
id
][
mod
]
152
||
vertex
[
mod
] ==
l_marginalMax_
[
tId
][
id
][
mod
]) {
153
newOne
=
true
;
154
155
if
(
infE__
::
storeBNOpt_
&&
vertex
[
mod
] ==
l_marginalMin_
[
tId
][
id
][
mod
]
156
&& !
infE__
::
evidence_
.
exists
(
id
)) {
157
std
::
vector
<
Size
>
key
(3);
158
key
[0] =
id
;
159
key
[1] =
mod
;
160
key
[2] = 0;
161
162
if
(
l_optimalNet_
[
tId
]->
insert
(
key
,
false
))
result
=
true
;
163
}
164
165
if
(
infE__
::
storeBNOpt_
&&
vertex
[
mod
] ==
l_marginalMax_
[
tId
][
id
][
mod
]
166
&& !
infE__
::
evidence_
.
exists
(
id
)) {
167
std
::
vector
<
Size
>
key
(3);
168
key
[0] =
id
;
169
key
[1] =
mod
;
170
key
[2] = 1;
171
172
if
(
l_optimalNet_
[
tId
]->
insert
(
key
,
false
))
result
=
true
;
173
}
174
}
175
176
// store point to compute credal set vertices.
177
// check for redundancy at each step or at the end ?
178
if
(
infE__
::
storeVertices_
&& !
added
&&
newOne
) {
179
updateThreadCredalSets__
(
id
,
vertex
,
elimRedund
);
180
added
=
true
;
181
}
182
}
183
184
// if all variables didn't get better marginals, we will delete
185
if
(
infE__
::
storeBNOpt_
&&
result
)
return
true
;
186
187
return
false
;
188
}
189
190
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
191
inline
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
192
updateThreadCredalSets__
(
const
NodeId
&
id
,
193
const
std
::
vector
<
GUM_SCALAR
>&
vertex
,
194
const
bool
&
elimRedund
) {
195
int
tId
=
getThreadNumber
();
196
auto
&
nodeCredalSet
=
l_marginalSets_
[
tId
][
id
];
197
Size
dsize
=
Size
(
vertex
.
size
());
198
199
bool
eq
=
true
;
200
201
for
(
auto
it
=
nodeCredalSet
.
cbegin
(),
itEnd
=
nodeCredalSet
.
cend
();
202
it
!=
itEnd
;
203
++
it
) {
204
eq
=
true
;
205
206
for
(
Size
i
= 0;
i
<
dsize
;
i
++) {
207
if
(
std
::
fabs
(
vertex
[
i
] - (*
it
)[
i
]) > 1e-6) {
208
eq
=
false
;
209
break
;
210
}
211
}
212
213
if
(
eq
)
break
;
214
}
215
216
if
(!
eq
||
nodeCredalSet
.
size
() == 0) {
217
nodeCredalSet
.
push_back
(
vertex
);
218
return
;
219
}
else
220
return
;
221
222
/// we need this because of the next lambda return contidion fabs ( *minIt
223
/// -
224
/// *maxIt ) > 1e-6 which never happens if there is only one vertice
225
if
(
nodeCredalSet
.
size
() == 1)
return
;
226
227
// check that the point and all previously added ones are not inside the
228
// actual
229
// polytope
230
auto
itEnd
=
std
::
remove_if
(
231
nodeCredalSet
.
begin
(),
232
nodeCredalSet
.
end
(),
233
[&](
const
std
::
vector
<
GUM_SCALAR
>&
v
) ->
bool
{
234
for
(
auto
jt
=
v
.
cbegin
(),
235
jtEnd
=
v
.
cend
(),
236
minIt
=
l_marginalMin_
[
tId
][
id
].
cbegin
(),
237
minItEnd
=
l_marginalMin_
[
tId
][
id
].
cend
(),
238
maxIt
=
l_marginalMax_
[
tId
][
id
].
cbegin
(),
239
maxItEnd
=
l_marginalMax_
[
tId
][
id
].
cend
();
240
jt
!=
jtEnd
&&
minIt
!=
minItEnd
&&
maxIt
!=
maxItEnd
;
241
++
jt
, ++
minIt
, ++
maxIt
) {
242
if
((
std
::
fabs
(*
jt
- *
minIt
) < 1e-6 ||
std
::
fabs
(*
jt
- *
maxIt
) < 1e-6)
243
&&
std
::
fabs
(*
minIt
- *
maxIt
) > 1e-6)
244
return
false
;
245
}
246
return
true
;
247
});
248
249
nodeCredalSet
.
erase
(
itEnd
,
nodeCredalSet
.
end
());
250
251
// we need at least 2 points to make a convex combination
252
if
(!
elimRedund
||
nodeCredalSet
.
size
() <= 2)
return
;
253
254
// there may be points not inside the polytope but on one of it's facet,
255
// meaning it's still a convex combination of vertices of this facet. Here
256
// we
257
// need lrs.
258
Size
setSize
=
Size
(
nodeCredalSet
.
size
());
259
260
LRSWrapper
<
GUM_SCALAR
>
lrsWrapper
;
261
lrsWrapper
.
setUpV
(
dsize
,
setSize
);
262
263
for
(
const
auto
&
vtx
:
nodeCredalSet
)
264
lrsWrapper
.
fillV
(
vtx
);
265
266
lrsWrapper
.
elimRedundVrep
();
267
268
l_marginalSets_
[
tId
][
id
] =
lrsWrapper
.
getOutput
();
269
}
270
271
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
272
inline
void
MultipleInferenceEngine
<
GUM_SCALAR
,
273
BNInferenceEngine
>::
updateMarginals_
() {
274
#
pragma
omp
parallel
275
{
276
int
threadId
=
getThreadNumber
();
277
long
nsize
=
long
(
workingSet_
[
threadId
]->
size
());
278
279
#
pragma
omp
for
280
281
for
(
long
i
= 0;
i
<
nsize
;
i
++) {
282
Size
dSize
=
Size
(
l_marginalMin_
[
threadId
][
i
].
size
());
283
284
for
(
Size
j
= 0;
j
<
dSize
;
j
++) {
285
Size
tsize
=
Size
(
l_marginalMin_
.
size
());
286
287
// go through all threads
288
for
(
Size
tId
= 0;
tId
<
tsize
;
tId
++) {
289
if
(
l_marginalMin_
[
tId
][
i
][
j
] <
this
->
marginalMin_
[
i
][
j
])
290
this
->
marginalMin_
[
i
][
j
] =
l_marginalMin_
[
tId
][
i
][
j
];
291
292
if
(
l_marginalMax_
[
tId
][
i
][
j
] >
this
->
marginalMax_
[
i
][
j
])
293
this
->
marginalMax_
[
i
][
j
] =
l_marginalMax_
[
tId
][
i
][
j
];
294
}
// end of : all threads
295
}
// end of : all modalities
296
}
// end of : all variables
297
}
// end of : parallel region
298
}
299
300
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
301
inline
const
GUM_SCALAR
302
MultipleInferenceEngine
<
GUM_SCALAR
,
303
BNInferenceEngine
>::
computeEpsilon_
() {
304
GUM_SCALAR
eps
= 0;
305
#
pragma
omp
parallel
306
{
307
GUM_SCALAR
tEps
= 0;
308
GUM_SCALAR
delta
;
309
310
int
tId
=
getThreadNumber
();
311
long
nsize
=
long
(
workingSet_
[
tId
]->
size
());
312
313
#
pragma
omp
for
314
315
for
(
long
i
= 0;
i
<
nsize
;
i
++) {
316
Size
dSize
=
Size
(
l_marginalMin_
[
tId
][
i
].
size
());
317
318
for
(
Size
j
= 0;
j
<
dSize
;
j
++) {
319
// on min
320
delta
=
this
->
marginalMin_
[
i
][
j
] -
this
->
oldMarginalMin_
[
i
][
j
];
321
delta
= (
delta
< 0) ? (-
delta
) :
delta
;
322
tEps
= (
tEps
<
delta
) ?
delta
:
tEps
;
323
324
// on max
325
delta
=
this
->
marginalMax_
[
i
][
j
] -
this
->
oldMarginalMax_
[
i
][
j
];
326
delta
= (
delta
< 0) ? (-
delta
) :
delta
;
327
tEps
= (
tEps
<
delta
) ?
delta
:
tEps
;
328
329
this
->
oldMarginalMin_
[
i
][
j
] =
this
->
marginalMin_
[
i
][
j
];
330
this
->
oldMarginalMax_
[
i
][
j
] =
this
->
marginalMax_
[
i
][
j
];
331
}
332
}
// end of : all variables
333
334
#
pragma
omp
critical
(
epsilon_max
)
335
{
336
#
pragma
omp
flush
(
eps
)
337
eps
= (
eps
<
tEps
) ?
tEps
:
eps
;
338
}
339
340
}
// end of : parallel region
341
return
eps
;
342
}
343
344
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
345
void
MultipleInferenceEngine
<
GUM_SCALAR
,
346
BNInferenceEngine
>::
updateOldMarginals_
() {
347
#
pragma
omp
parallel
348
{
349
int
threadId
=
getThreadNumber
();
350
long
nsize
=
long
(
workingSet_
[
threadId
]->
size
());
351
352
#
pragma
omp
for
353
354
for
(
long
i
= 0;
i
<
nsize
;
i
++) {
355
Size
dSize
=
Size
(
l_marginalMin_
[
threadId
][
i
].
size
());
356
357
for
(
Size
j
= 0;
j
<
dSize
;
j
++) {
358
Size
tsize
=
Size
(
l_marginalMin_
.
size
());
359
360
// go through all threads
361
for
(
Size
tId
= 0;
tId
<
tsize
;
tId
++) {
362
if
(
l_marginalMin_
[
tId
][
i
][
j
] <
this
->
oldMarginalMin_
[
i
][
j
])
363
this
->
oldMarginalMin_
[
i
][
j
] =
l_marginalMin_
[
tId
][
i
][
j
];
364
365
if
(
l_marginalMax_
[
tId
][
i
][
j
] >
this
->
oldMarginalMax_
[
i
][
j
])
366
this
->
oldMarginalMax_
[
i
][
j
] =
l_marginalMax_
[
tId
][
i
][
j
];
367
}
// end of : all threads
368
}
// end of : all modalities
369
}
// end of : all variables
370
}
// end of : parallel region
371
}
372
373
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
374
void
MultipleInferenceEngine
<
GUM_SCALAR
,
375
BNInferenceEngine
>::
verticesFusion_
() {
376
// don't create threads if there are no vertices saved
377
if
(!
infE__
::
storeVertices_
)
return
;
378
379
#
pragma
omp
parallel
380
{
381
int
threadId
=
getThreadNumber
();
382
Size
nsize
=
Size
(
workingSet_
[
threadId
]->
size
());
383
384
#
pragma
omp
for
385
386
for
(
long
i
= 0;
i
<
long
(
nsize
);
i
++) {
387
Size
tsize
=
Size
(
l_marginalMin_
.
size
());
388
389
// go through all threads
390
for
(
long
tId
= 0;
tId
<
long
(
tsize
);
tId
++) {
391
auto
&
nodeThreadCredalSet
=
l_marginalSets_
[
tId
][
i
];
392
393
// for each vertex, if we are at any opt marginal, add it to the set
394
for
(
const
auto
&
vtx
:
nodeThreadCredalSet
) {
395
// we run redundancy elimination at each step
396
// because there could be 100000 threads and the set will be so
397
// huge
398
// ...
399
// BUT not if vertices are of dimension 2 ! opt check and equality
400
// should be enough
401
infE__
::
updateCredalSets_
(
i
,
vtx
, (
vtx
.
size
() > 2) ?
true
:
false
);
402
}
// end of : nodeThreadCredalSet
403
}
// end of : all threads
404
}
// end of : all variables
405
}
// end of : parallel region
406
}
407
408
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
409
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
expFusion_
() {
410
// don't create threads if there are no modalities to compute expectations
411
if
(
this
->
modal_
.
empty
())
return
;
412
413
// we can compute expectations from vertices of the final credal set
414
if
(
infE__
::
storeVertices_
) {
415
#
pragma
omp
parallel
416
{
417
int
threadId
=
getThreadNumber
();
418
419
if
(!
this
->
l_modal_
[
threadId
].
empty
()) {
420
Size
nsize
=
Size
(
workingSet_
[
threadId
]->
size
());
421
422
#
pragma
omp
for
423
424
for
(
long
i
= 0;
i
<
long
(
nsize
);
425
i
++) {
// i needs to be signed (due to omp with visual c++
426
// 15)
427
std
::
string
var_name
=
workingSet_
[
threadId
]->
variable
(
i
).
name
();
428
auto
delim
=
var_name
.
find_first_of
(
"_"
);
429
var_name
=
var_name
.
substr
(0,
delim
);
430
431
if
(!
l_modal_
[
threadId
].
exists
(
var_name
))
continue
;
432
433
for
(
const
auto
&
vertex
:
infE__
::
marginalSets_
[
i
]) {
434
GUM_SCALAR
exp
= 0;
435
Size
vsize
=
Size
(
vertex
.
size
());
436
437
for
(
Size
mod
= 0;
mod
<
vsize
;
mod
++)
438
exp
+=
vertex
[
mod
] *
l_modal_
[
threadId
][
var_name
][
mod
];
439
440
if
(
exp
>
infE__
::
expectationMax_
[
i
])
441
infE__
::
expectationMax_
[
i
] =
exp
;
442
443
if
(
exp
<
infE__
::
expectationMin_
[
i
])
444
infE__
::
expectationMin_
[
i
] =
exp
;
445
}
446
}
// end of : each variable parallel for
447
}
// end of : if this thread has modals
448
}
// end of parallel region
449
return
;
450
}
451
452
#
pragma
omp
parallel
453
{
454
int
threadId
=
getThreadNumber
();
455
456
if
(!
this
->
l_modal_
[
threadId
].
empty
()) {
457
Size
nsize
=
Size
(
workingSet_
[
threadId
]->
size
());
458
#
pragma
omp
for
459
for
(
long
i
= 0;
i
<
long
(
nsize
);
460
i
++) {
// long instead of Idx due to omp for visual C++15
461
std
::
string
var_name
=
workingSet_
[
threadId
]->
variable
(
i
).
name
();
462
auto
delim
=
var_name
.
find_first_of
(
"_"
);
463
var_name
=
var_name
.
substr
(0,
delim
);
464
465
if
(!
l_modal_
[
threadId
].
exists
(
var_name
))
continue
;
466
467
Size
tsize
=
Size
(
l_expectationMax_
.
size
());
468
469
for
(
Idx
tId
= 0;
tId
<
tsize
;
tId
++) {
470
if
(
l_expectationMax_
[
tId
][
i
] >
this
->
expectationMax_
[
i
])
471
this
->
expectationMax_
[
i
] =
l_expectationMax_
[
tId
][
i
];
472
473
if
(
l_expectationMin_
[
tId
][
i
] <
this
->
expectationMin_
[
i
])
474
this
->
expectationMin_
[
i
] =
l_expectationMin_
[
tId
][
i
];
475
}
// end of : each thread
476
}
// end of : each variable
477
}
// end of : if modals not empty
478
}
// end of : parallel region
479
}
480
481
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
482
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
optFusion_
() {
483
typedef
std
::
vector
<
bool
>
dBN
;
484
485
Size
nsize
=
Size
(
workingSet_
[0]->
size
());
486
487
// no parallel insert in hash-tables (OptBN)
488
for
(
Idx
i
= 0;
i
<
nsize
;
i
++) {
489
// we don't store anything for observed variables
490
if
(
infE__
::
evidence_
.
exists
(
i
))
continue
;
491
492
Size
dSize
=
Size
(
l_marginalMin_
[0][
i
].
size
());
493
494
for
(
Size
j
= 0;
j
<
dSize
;
j
++) {
495
// go through all threads
496
std
::
vector
<
Size
>
keymin
(3);
497
keymin
[0] =
i
;
498
keymin
[1] =
j
;
499
keymin
[2] = 0;
500
std
::
vector
<
Size
>
keymax
(
keymin
);
501
keymax
[2] = 1;
502
503
Size
tsize
=
Size
(
l_marginalMin_
.
size
());
504
505
for
(
Size
tId
= 0;
tId
<
tsize
;
tId
++) {
506
if
(
l_marginalMin_
[
tId
][
i
][
j
] ==
this
->
marginalMin_
[
i
][
j
]) {
507
const
std
::
vector
<
dBN
* >&
tOpts
508
=
l_optimalNet_
[
tId
]->
getBNOptsFromKey
(
keymin
);
509
Size
osize
=
Size
(
tOpts
.
size
());
510
511
for
(
Size
bn
= 0;
bn
<
osize
;
bn
++) {
512
infE__
::
dbnOpt_
.
insert
(*
tOpts
[
bn
],
keymin
);
513
}
514
}
515
516
if
(
l_marginalMax_
[
tId
][
i
][
j
] ==
this
->
marginalMax_
[
i
][
j
]) {
517
const
std
::
vector
<
dBN
* >&
tOpts
518
=
l_optimalNet_
[
tId
]->
getBNOptsFromKey
(
keymax
);
519
Size
osize
=
Size
(
tOpts
.
size
());
520
521
for
(
Size
bn
= 0;
bn
<
osize
;
bn
++) {
522
infE__
::
dbnOpt_
.
insert
(*
tOpts
[
bn
],
keymax
);
523
}
524
}
525
}
// end of : all threads
526
}
// end of : all modalities
527
}
// end of : all variables
528
}
529
530
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
531
void
MultipleInferenceEngine
<
GUM_SCALAR
,
532
BNInferenceEngine
>::
eraseAllEvidence
() {
533
infE__
::
eraseAllEvidence
();
534
Size
tsize
=
Size
(
workingSet_
.
size
());
535
536
// delete pointers
537
for
(
Size
bn
= 0;
bn
<
tsize
;
bn
++) {
538
if
(
infE__
::
storeVertices_
)
l_marginalSets_
[
bn
].
clear
();
539
540
if
(
workingSet_
[
bn
] !=
nullptr
)
delete
workingSet_
[
bn
];
541
542
if
(
infE__
::
storeBNOpt_
)
543
if
(
l_inferenceEngine_
[
bn
] !=
nullptr
)
delete
l_optimalNet_
[
bn
];
544
545
if
(
this
->
workingSetE_
[
bn
] !=
nullptr
) {
546
for
(
const
auto
ev
: *
workingSetE_
[
bn
])
547
delete
ev
;
548
549
delete
workingSetE_
[
bn
];
550
}
551
552
if
(
l_inferenceEngine_
[
bn
] !=
nullptr
)
delete
l_inferenceEngine_
[
bn
];
553
}
554
555
// this is important, those will be resized with the correct number of
556
// threads.
557
558
workingSet_
.
clear
();
559
workingSetE_
.
clear
();
560
l_inferenceEngine_
.
clear
();
561
l_optimalNet_
.
clear
();
562
563
l_marginalMin_
.
clear
();
564
l_marginalMax_
.
clear
();
565
l_expectationMin_
.
clear
();
566
l_expectationMax_
.
clear
();
567
l_modal_
.
clear
();
568
l_marginalSets_
.
clear
();
569
l_evidence_
.
clear
();
570
l_clusters_
.
clear
();
571
}
572
}
// namespace credal
573
}
// namespace gum
gum::Set::emplace
INLINE void emplace(Args &&... args)
Definition:
set_tpl.h:669
gum::credal
namespace for all credal networks entities
Definition:
LpInterface.cpp:37