aGrUM
0.20.3
a C++ library for (probabilistic) graphical models
multipleInferenceEngine_tpl.h
Go to the documentation of this file.
1
/**
2
*
3
* Copyright (c) 2005-2021 by Pierre-Henri WUILLEMIN(@LIP6) & Christophe GONZALES(@AMU)
4
* info_at_agrum_dot_org
5
*
6
* This library is free software: you can redistribute it and/or modify
7
* it under the terms of the GNU Lesser General Public License as published by
8
* the Free Software Foundation, either version 3 of the License, or
9
* (at your option) any later version.
10
*
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU Lesser General Public License for more details.
15
*
16
* You should have received a copy of the GNU Lesser General Public License
17
* along with this library. If not, see <http://www.gnu.org/licenses/>.
18
*
19
*/
20
21
22
#
include
<
agrum
/
CN
/
inference
/
inferenceEngine
.
h
>
23
#
include
<
agrum
/
CN
/
inference
/
multipleInferenceEngine
.
h
>
24
25
namespace
gum
{
26
namespace
credal
{
27
28
template
<
typename
GUM_SCALAR,
class
BNInferenceEngine >
29
MultipleInferenceEngine< GUM_SCALAR, BNInferenceEngine >::MultipleInferenceEngine(
30
const
CredalNet< GUM_SCALAR >& credalNet) :
31
InferenceEngine< GUM_SCALAR >::InferenceEngine(credalNet) {
32
GUM_CONSTRUCTOR(MultipleInferenceEngine);
33
}
34
35
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
36
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::~
MultipleInferenceEngine
() {
37
GUM_DESTRUCTOR
(
MultipleInferenceEngine
);
38
}
39
40
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
41
inline
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
initThreadsData_
(
42
const
Size
&
num_threads
,
43
const
bool
_storeVertices_
,
44
const
bool
_storeBNOpt_
) {
45
workingSet_
.
clear
();
46
workingSet_
.
resize
(
num_threads
,
nullptr
);
47
workingSetE_
.
clear
();
48
workingSetE_
.
resize
(
num_threads
,
nullptr
);
49
50
l_marginalMin_
.
clear
();
51
l_marginalMin_
.
resize
(
num_threads
);
52
l_marginalMax_
.
clear
();
53
l_marginalMax_
.
resize
(
num_threads
);
54
l_expectationMin_
.
clear
();
55
l_expectationMin_
.
resize
(
num_threads
);
56
l_expectationMax_
.
clear
();
57
l_expectationMax_
.
resize
(
num_threads
);
58
59
l_clusters_
.
clear
();
60
l_clusters_
.
resize
(
num_threads
);
61
62
if
(
_storeVertices_
) {
63
l_marginalSets_
.
clear
();
64
l_marginalSets_
.
resize
(
num_threads
);
65
}
66
67
if
(
_storeBNOpt_
) {
68
for
(
Size
ptr
= 0;
ptr
<
this
->
l_optimalNet_
.
size
();
ptr
++)
69
if
(
this
->
l_optimalNet_
[
ptr
] !=
nullptr
)
delete
l_optimalNet_
[
ptr
];
70
71
l_optimalNet_
.
clear
();
72
l_optimalNet_
.
resize
(
num_threads
);
73
}
74
75
l_modal_
.
clear
();
76
l_modal_
.
resize
(
num_threads
);
77
78
_infE_
::
oldMarginalMin_
.
clear
();
79
this
->
oldMarginalMin_
=
this
->
marginalMin_
;
80
this
->
oldMarginalMax_
.
clear
();
81
this
->
oldMarginalMax_
=
this
->
marginalMax_
;
82
}
83
84
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
85
inline
bool
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
updateThread_
(
86
const
NodeId
&
id
,
87
const
std
::
vector
<
GUM_SCALAR
>&
vertex
,
88
const
bool
&
elimRedund
) {
89
int
tId
=
getThreadNumber
();
90
91
// save E(X) if we don't save vertices
92
if
(!
_infE_
::
storeVertices_
&& !
l_modal_
[
tId
].
empty
()) {
93
std
::
string
var_name
=
workingSet_
[
tId
]->
variable
(
id
).
name
();
94
auto
delim
=
var_name
.
find_first_of
(
"_"
);
95
var_name
=
var_name
.
substr
(0,
delim
);
96
97
if
(
l_modal_
[
tId
].
exists
(
var_name
)) {
98
GUM_SCALAR
exp
= 0;
99
Size
vsize
=
Size
(
vertex
.
size
());
100
101
for
(
Size
mod
= 0;
mod
<
vsize
;
mod
++)
102
exp
+=
vertex
[
mod
] *
l_modal_
[
tId
][
var_name
][
mod
];
103
104
if
(
exp
>
l_expectationMax_
[
tId
][
id
])
l_expectationMax_
[
tId
][
id
] =
exp
;
105
106
if
(
exp
<
l_expectationMin_
[
tId
][
id
])
l_expectationMin_
[
tId
][
id
] =
exp
;
107
}
108
}
// end of : if modal (map) not empty
109
110
bool
newOne
=
false
;
111
bool
added
=
false
;
112
bool
result
=
false
;
113
// for burn in, we need to keep checking on local marginals and not global
114
// ones
115
// (faster inference)
116
// we also don't want to store dbn for observed variables since there will
117
// be a
118
// huge number of them (probably all of them).
119
Size
vsize
=
Size
(
vertex
.
size
());
120
121
for
(
Size
mod
= 0;
mod
<
vsize
;
mod
++) {
122
if
(
vertex
[
mod
] <
l_marginalMin_
[
tId
][
id
][
mod
]) {
123
l_marginalMin_
[
tId
][
id
][
mod
] =
vertex
[
mod
];
124
newOne
=
true
;
125
126
if
(
_infE_
::
storeBNOpt_
&& !
_infE_
::
evidence_
.
exists
(
id
)) {
127
std
::
vector
<
Size
>
key
(3);
128
key
[0] =
id
;
129
key
[1] =
mod
;
130
key
[2] = 0;
131
132
if
(
l_optimalNet_
[
tId
]->
insert
(
key
,
true
))
result
=
true
;
133
}
134
}
135
136
if
(
vertex
[
mod
] >
l_marginalMax_
[
tId
][
id
][
mod
]) {
137
l_marginalMax_
[
tId
][
id
][
mod
] =
vertex
[
mod
];
138
newOne
=
true
;
139
140
if
(
_infE_
::
storeBNOpt_
&& !
_infE_
::
evidence_
.
exists
(
id
)) {
141
std
::
vector
<
Size
>
key
(3);
142
key
[0] =
id
;
143
key
[1] =
mod
;
144
key
[2] = 1;
145
146
if
(
l_optimalNet_
[
tId
]->
insert
(
key
,
true
))
result
=
true
;
147
}
148
}
else
if
(
vertex
[
mod
] ==
l_marginalMin_
[
tId
][
id
][
mod
]
149
||
vertex
[
mod
] ==
l_marginalMax_
[
tId
][
id
][
mod
]) {
150
newOne
=
true
;
151
152
if
(
_infE_
::
storeBNOpt_
&&
vertex
[
mod
] ==
l_marginalMin_
[
tId
][
id
][
mod
]
153
&& !
_infE_
::
evidence_
.
exists
(
id
)) {
154
std
::
vector
<
Size
>
key
(3);
155
key
[0] =
id
;
156
key
[1] =
mod
;
157
key
[2] = 0;
158
159
if
(
l_optimalNet_
[
tId
]->
insert
(
key
,
false
))
result
=
true
;
160
}
161
162
if
(
_infE_
::
storeBNOpt_
&&
vertex
[
mod
] ==
l_marginalMax_
[
tId
][
id
][
mod
]
163
&& !
_infE_
::
evidence_
.
exists
(
id
)) {
164
std
::
vector
<
Size
>
key
(3);
165
key
[0] =
id
;
166
key
[1] =
mod
;
167
key
[2] = 1;
168
169
if
(
l_optimalNet_
[
tId
]->
insert
(
key
,
false
))
result
=
true
;
170
}
171
}
172
173
// store point to compute credal set vertices.
174
// check for redundancy at each step or at the end ?
175
if
(
_infE_
::
storeVertices_
&& !
added
&&
newOne
) {
176
_updateThreadCredalSets_
(
id
,
vertex
,
elimRedund
);
177
added
=
true
;
178
}
179
}
180
181
// if all variables didn't get better marginals, we will delete
182
if
(
_infE_
::
storeBNOpt_
&&
result
)
return
true
;
183
184
return
false
;
185
}
186
187
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
188
inline
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
_updateThreadCredalSets_
(
189
const
NodeId
&
id
,
190
const
std
::
vector
<
GUM_SCALAR
>&
vertex
,
191
const
bool
&
elimRedund
) {
192
int
tId
=
getThreadNumber
();
193
auto
&
nodeCredalSet
=
l_marginalSets_
[
tId
][
id
];
194
Size
dsize
=
Size
(
vertex
.
size
());
195
196
bool
eq
=
true
;
197
198
for
(
auto
it
=
nodeCredalSet
.
cbegin
(),
itEnd
=
nodeCredalSet
.
cend
();
it
!=
itEnd
; ++
it
) {
199
eq
=
true
;
200
201
for
(
Size
i
= 0;
i
<
dsize
;
i
++) {
202
if
(
std
::
fabs
(
vertex
[
i
] - (*
it
)[
i
]) > 1e-6) {
203
eq
=
false
;
204
break
;
205
}
206
}
207
208
if
(
eq
)
break
;
209
}
210
211
if
(!
eq
||
nodeCredalSet
.
size
() == 0) {
212
nodeCredalSet
.
push_back
(
vertex
);
213
return
;
214
}
else
215
return
;
216
217
/// we need this because of the next lambda return contidion fabs ( *minIt
218
/// -
219
/// *maxIt ) > 1e-6 which never happens if there is only one vertice
220
if
(
nodeCredalSet
.
size
() == 1)
return
;
221
222
// check that the point and all previously added ones are not inside the
223
// actual
224
// polytope
225
auto
itEnd
226
=
std
::
remove_if
(
nodeCredalSet
.
begin
(),
227
nodeCredalSet
.
end
(),
228
[&](
const
std
::
vector
<
GUM_SCALAR
>&
v
) ->
bool
{
229
for
(
auto
jt
=
v
.
cbegin
(),
230
jtEnd
=
v
.
cend
(),
231
minIt
=
l_marginalMin_
[
tId
][
id
].
cbegin
(),
232
minItEnd
=
l_marginalMin_
[
tId
][
id
].
cend
(),
233
maxIt
=
l_marginalMax_
[
tId
][
id
].
cbegin
(),
234
maxItEnd
=
l_marginalMax_
[
tId
][
id
].
cend
();
235
jt
!=
jtEnd
&&
minIt
!=
minItEnd
&&
maxIt
!=
maxItEnd
;
236
++
jt
, ++
minIt
, ++
maxIt
) {
237
if
((
std
::
fabs
(*
jt
- *
minIt
) < 1e-6 ||
std
::
fabs
(*
jt
- *
maxIt
) < 1e-6)
238
&&
std
::
fabs
(*
minIt
- *
maxIt
) > 1e-6)
239
return
false
;
240
}
241
return
true
;
242
});
243
244
nodeCredalSet
.
erase
(
itEnd
,
nodeCredalSet
.
end
());
245
246
// we need at least 2 points to make a convex combination
247
if
(!
elimRedund
||
nodeCredalSet
.
size
() <= 2)
return
;
248
249
// there may be points not inside the polytope but on one of it's facet,
250
// meaning it's still a convex combination of vertices of this facet. Here
251
// we
252
// need lrs.
253
Size
setSize
=
Size
(
nodeCredalSet
.
size
());
254
255
LRSWrapper
<
GUM_SCALAR
>
lrsWrapper
;
256
lrsWrapper
.
setUpV
(
dsize
,
setSize
);
257
258
for
(
const
auto
&
vtx
:
nodeCredalSet
)
259
lrsWrapper
.
fillV
(
vtx
);
260
261
lrsWrapper
.
elimRedundVrep
();
262
263
l_marginalSets_
[
tId
][
id
] =
lrsWrapper
.
getOutput
();
264
}
265
266
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
267
inline
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
updateMarginals_
() {
268
#
pragma
omp
parallel
269
{
270
int
threadId
=
getThreadNumber
();
271
long
nsize
=
long
(
workingSet_
[
threadId
]->
size
());
272
273
#
pragma
omp
for
274
275
for
(
long
i
= 0;
i
<
nsize
;
i
++) {
276
Size
dSize
=
Size
(
l_marginalMin_
[
threadId
][
i
].
size
());
277
278
for
(
Size
j
= 0;
j
<
dSize
;
j
++) {
279
Size
tsize
=
Size
(
l_marginalMin_
.
size
());
280
281
// go through all threads
282
for
(
Size
tId
= 0;
tId
<
tsize
;
tId
++) {
283
if
(
l_marginalMin_
[
tId
][
i
][
j
] <
this
->
marginalMin_
[
i
][
j
])
284
this
->
marginalMin_
[
i
][
j
] =
l_marginalMin_
[
tId
][
i
][
j
];
285
286
if
(
l_marginalMax_
[
tId
][
i
][
j
] >
this
->
marginalMax_
[
i
][
j
])
287
this
->
marginalMax_
[
i
][
j
] =
l_marginalMax_
[
tId
][
i
][
j
];
288
}
// end of : all threads
289
}
// end of : all modalities
290
}
// end of : all variables
291
}
// end of : parallel region
292
}
293
294
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
295
inline
const
GUM_SCALAR
296
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
computeEpsilon_
() {
297
GUM_SCALAR
eps
= 0;
298
#
pragma
omp
parallel
299
{
300
GUM_SCALAR
tEps
= 0;
301
GUM_SCALAR
delta
;
302
303
int
tId
=
getThreadNumber
();
304
long
nsize
=
long
(
workingSet_
[
tId
]->
size
());
305
306
#
pragma
omp
for
307
308
for
(
long
i
= 0;
i
<
nsize
;
i
++) {
309
Size
dSize
=
Size
(
l_marginalMin_
[
tId
][
i
].
size
());
310
311
for
(
Size
j
= 0;
j
<
dSize
;
j
++) {
312
// on min
313
delta
=
this
->
marginalMin_
[
i
][
j
] -
this
->
oldMarginalMin_
[
i
][
j
];
314
delta
= (
delta
< 0) ? (-
delta
) :
delta
;
315
tEps
= (
tEps
<
delta
) ?
delta
:
tEps
;
316
317
// on max
318
delta
=
this
->
marginalMax_
[
i
][
j
] -
this
->
oldMarginalMax_
[
i
][
j
];
319
delta
= (
delta
< 0) ? (-
delta
) :
delta
;
320
tEps
= (
tEps
<
delta
) ?
delta
:
tEps
;
321
322
this
->
oldMarginalMin_
[
i
][
j
] =
this
->
marginalMin_
[
i
][
j
];
323
this
->
oldMarginalMax_
[
i
][
j
] =
this
->
marginalMax_
[
i
][
j
];
324
}
325
}
// end of : all variables
326
327
#
pragma
omp
critical
(
epsilon_max
)
328
{
329
#
pragma
omp
flush
(
eps
)
330
eps
= (
eps
<
tEps
) ?
tEps
:
eps
;
331
}
332
333
}
// end of : parallel region
334
return
eps
;
335
}
336
337
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
338
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
updateOldMarginals_
() {
339
#
pragma
omp
parallel
340
{
341
int
threadId
=
getThreadNumber
();
342
long
nsize
=
long
(
workingSet_
[
threadId
]->
size
());
343
344
#
pragma
omp
for
345
346
for
(
long
i
= 0;
i
<
nsize
;
i
++) {
347
Size
dSize
=
Size
(
l_marginalMin_
[
threadId
][
i
].
size
());
348
349
for
(
Size
j
= 0;
j
<
dSize
;
j
++) {
350
Size
tsize
=
Size
(
l_marginalMin_
.
size
());
351
352
// go through all threads
353
for
(
Size
tId
= 0;
tId
<
tsize
;
tId
++) {
354
if
(
l_marginalMin_
[
tId
][
i
][
j
] <
this
->
oldMarginalMin_
[
i
][
j
])
355
this
->
oldMarginalMin_
[
i
][
j
] =
l_marginalMin_
[
tId
][
i
][
j
];
356
357
if
(
l_marginalMax_
[
tId
][
i
][
j
] >
this
->
oldMarginalMax_
[
i
][
j
])
358
this
->
oldMarginalMax_
[
i
][
j
] =
l_marginalMax_
[
tId
][
i
][
j
];
359
}
// end of : all threads
360
}
// end of : all modalities
361
}
// end of : all variables
362
}
// end of : parallel region
363
}
364
365
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
366
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
verticesFusion_
() {
367
// don't create threads if there are no vertices saved
368
if
(!
_infE_
::
storeVertices_
)
return
;
369
370
#
pragma
omp
parallel
371
{
372
int
threadId
=
getThreadNumber
();
373
Size
nsize
=
Size
(
workingSet_
[
threadId
]->
size
());
374
375
#
pragma
omp
for
376
377
for
(
long
i
= 0;
i
<
long
(
nsize
);
i
++) {
378
Size
tsize
=
Size
(
l_marginalMin_
.
size
());
379
380
// go through all threads
381
for
(
long
tId
= 0;
tId
<
long
(
tsize
);
tId
++) {
382
auto
&
nodeThreadCredalSet
=
l_marginalSets_
[
tId
][
i
];
383
384
// for each vertex, if we are at any opt marginal, add it to the set
385
for
(
const
auto
&
vtx
:
nodeThreadCredalSet
) {
386
// we run redundancy elimination at each step
387
// because there could be 100000 threads and the set will be so
388
// huge
389
// ...
390
// BUT not if vertices are of dimension 2 ! opt check and equality
391
// should be enough
392
_infE_
::
updateCredalSets_
(
i
,
vtx
, (
vtx
.
size
() > 2) ?
true
:
false
);
393
}
// end of : nodeThreadCredalSet
394
}
// end of : all threads
395
}
// end of : all variables
396
}
// end of : parallel region
397
}
398
399
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
400
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
expFusion_
() {
401
// don't create threads if there are no modalities to compute expectations
402
if
(
this
->
modal_
.
empty
())
return
;
403
404
// we can compute expectations from vertices of the final credal set
405
if
(
_infE_
::
storeVertices_
) {
406
#
pragma
omp
parallel
407
{
408
int
threadId
=
getThreadNumber
();
409
410
if
(!
this
->
l_modal_
[
threadId
].
empty
()) {
411
Size
nsize
=
Size
(
workingSet_
[
threadId
]->
size
());
412
413
#
pragma
omp
for
414
415
for
(
long
i
= 0;
i
<
long
(
nsize
);
i
++) {
// i needs to be signed (due to omp with
416
// visual c++ 15)
417
std
::
string
var_name
=
workingSet_
[
threadId
]->
variable
(
i
).
name
();
418
auto
delim
=
var_name
.
find_first_of
(
"_"
);
419
var_name
=
var_name
.
substr
(0,
delim
);
420
421
if
(!
l_modal_
[
threadId
].
exists
(
var_name
))
continue
;
422
423
for
(
const
auto
&
vertex
:
_infE_
::
marginalSets_
[
i
]) {
424
GUM_SCALAR
exp
= 0;
425
Size
vsize
=
Size
(
vertex
.
size
());
426
427
for
(
Size
mod
= 0;
mod
<
vsize
;
mod
++)
428
exp
+=
vertex
[
mod
] *
l_modal_
[
threadId
][
var_name
][
mod
];
429
430
if
(
exp
>
_infE_
::
expectationMax_
[
i
])
_infE_
::
expectationMax_
[
i
] =
exp
;
431
432
if
(
exp
<
_infE_
::
expectationMin_
[
i
])
_infE_
::
expectationMin_
[
i
] =
exp
;
433
}
434
}
// end of : each variable parallel for
435
}
// end of : if this thread has modals
436
}
// end of parallel region
437
return
;
438
}
439
440
#
pragma
omp
parallel
441
{
442
int
threadId
=
getThreadNumber
();
443
444
if
(!
this
->
l_modal_
[
threadId
].
empty
()) {
445
Size
nsize
=
Size
(
workingSet_
[
threadId
]->
size
());
446
#
pragma
omp
for
447
for
(
long
i
= 0;
i
<
long
(
nsize
);
448
i
++) {
// long instead of Idx due to omp for visual C++15
449
std
::
string
var_name
=
workingSet_
[
threadId
]->
variable
(
i
).
name
();
450
auto
delim
=
var_name
.
find_first_of
(
"_"
);
451
var_name
=
var_name
.
substr
(0,
delim
);
452
453
if
(!
l_modal_
[
threadId
].
exists
(
var_name
))
continue
;
454
455
Size
tsize
=
Size
(
l_expectationMax_
.
size
());
456
457
for
(
Idx
tId
= 0;
tId
<
tsize
;
tId
++) {
458
if
(
l_expectationMax_
[
tId
][
i
] >
this
->
expectationMax_
[
i
])
459
this
->
expectationMax_
[
i
] =
l_expectationMax_
[
tId
][
i
];
460
461
if
(
l_expectationMin_
[
tId
][
i
] <
this
->
expectationMin_
[
i
])
462
this
->
expectationMin_
[
i
] =
l_expectationMin_
[
tId
][
i
];
463
}
// end of : each thread
464
}
// end of : each variable
465
}
// end of : if modals not empty
466
}
// end of : parallel region
467
}
468
469
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
470
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
optFusion_
() {
471
typedef
std
::
vector
<
bool
>
dBN
;
472
473
Size
nsize
=
Size
(
workingSet_
[0]->
size
());
474
475
// no parallel insert in hash-tables (OptBN)
476
for
(
Idx
i
= 0;
i
<
nsize
;
i
++) {
477
// we don't store anything for observed variables
478
if
(
_infE_
::
evidence_
.
exists
(
i
))
continue
;
479
480
Size
dSize
=
Size
(
l_marginalMin_
[0][
i
].
size
());
481
482
for
(
Size
j
= 0;
j
<
dSize
;
j
++) {
483
// go through all threads
484
std
::
vector
<
Size
>
keymin
(3);
485
keymin
[0] =
i
;
486
keymin
[1] =
j
;
487
keymin
[2] = 0;
488
std
::
vector
<
Size
>
keymax
(
keymin
);
489
keymax
[2] = 1;
490
491
Size
tsize
=
Size
(
l_marginalMin_
.
size
());
492
493
for
(
Size
tId
= 0;
tId
<
tsize
;
tId
++) {
494
if
(
l_marginalMin_
[
tId
][
i
][
j
] ==
this
->
marginalMin_
[
i
][
j
]) {
495
const
std
::
vector
<
dBN
* >&
tOpts
=
l_optimalNet_
[
tId
]->
getBNOptsFromKey
(
keymin
);
496
Size
osize
=
Size
(
tOpts
.
size
());
497
498
for
(
Size
bn
= 0;
bn
<
osize
;
bn
++) {
499
_infE_
::
dbnOpt_
.
insert
(*
tOpts
[
bn
],
keymin
);
500
}
501
}
502
503
if
(
l_marginalMax_
[
tId
][
i
][
j
] ==
this
->
marginalMax_
[
i
][
j
]) {
504
const
std
::
vector
<
dBN
* >&
tOpts
=
l_optimalNet_
[
tId
]->
getBNOptsFromKey
(
keymax
);
505
Size
osize
=
Size
(
tOpts
.
size
());
506
507
for
(
Size
bn
= 0;
bn
<
osize
;
bn
++) {
508
_infE_
::
dbnOpt_
.
insert
(*
tOpts
[
bn
],
keymax
);
509
}
510
}
511
}
// end of : all threads
512
}
// end of : all modalities
513
}
// end of : all variables
514
}
515
516
template
<
typename
GUM_SCALAR
,
class
BNInferenceEngine
>
517
void
MultipleInferenceEngine
<
GUM_SCALAR
,
BNInferenceEngine
>::
eraseAllEvidence
() {
518
_infE_
::
eraseAllEvidence
();
519
Size
tsize
=
Size
(
workingSet_
.
size
());
520
521
// delete pointers
522
for
(
Size
bn
= 0;
bn
<
tsize
;
bn
++) {
523
if
(
_infE_
::
storeVertices_
)
l_marginalSets_
[
bn
].
clear
();
524
525
if
(
workingSet_
[
bn
] !=
nullptr
)
delete
workingSet_
[
bn
];
526
527
if
(
_infE_
::
storeBNOpt_
)
528
if
(
l_inferenceEngine_
[
bn
] !=
nullptr
)
delete
l_optimalNet_
[
bn
];
529
530
if
(
this
->
workingSetE_
[
bn
] !=
nullptr
) {
531
for
(
const
auto
ev
: *
workingSetE_
[
bn
])
532
delete
ev
;
533
534
delete
workingSetE_
[
bn
];
535
}
536
537
if
(
l_inferenceEngine_
[
bn
] !=
nullptr
)
delete
l_inferenceEngine_
[
bn
];
538
}
539
540
// this is important, those will be resized with the correct number of
541
// threads.
542
543
workingSet_
.
clear
();
544
workingSetE_
.
clear
();
545
l_inferenceEngine_
.
clear
();
546
l_optimalNet_
.
clear
();
547
548
l_marginalMin_
.
clear
();
549
l_marginalMax_
.
clear
();
550
l_expectationMin_
.
clear
();
551
l_expectationMax_
.
clear
();
552
l_modal_
.
clear
();
553
l_marginalSets_
.
clear
();
554
l_evidence_
.
clear
();
555
l_clusters_
.
clear
();
556
}
557
}
// namespace credal
558
}
// namespace gum
gum::Set::emplace
INLINE void emplace(Args &&... args)
Definition:
set_tpl.h:643
gum::credal
namespace for all credal networks entities
Definition:
LpInterface.cpp:37