29 template <
typename GUM_SCALAR,
class BNInferenceEngine >
36 template <
typename GUM_SCALAR,
class BNInferenceEngine >
42 template <
typename GUM_SCALAR,
class BNInferenceEngine >
45 const Size& num_threads,
46 const bool __storeVertices,
47 const bool __storeBNOpt) {
65 if (__storeVertices) {
87 template <
typename GUM_SCALAR,
class BNInferenceEngine >
91 const std::vector< GUM_SCALAR >& vertex,
92 const bool& elimRedund) {
97 std::string var_name =
_workingSet[tId]->variable(
id).name();
98 auto delim = var_name.find_first_of(
"_");
99 var_name = var_name.substr(0, delim);
101 if (
_l_modal[tId].exists(var_name)) {
105 for (
Size mod = 0; mod < vsize; mod++)
106 exp += vertex[mod] *
_l_modal[tId][var_name][mod];
125 for (
Size mod = 0; mod < vsize; mod++) {
131 std::vector< Size > key(3);
145 std::vector< Size > key(3);
158 std::vector< Size > key(3);
168 std::vector< Size > key(3);
191 template <
typename GUM_SCALAR,
class BNInferenceEngine >
194 const std::vector< GUM_SCALAR >& vertex,
195 const bool& elimRedund) {
202 for (
auto it = nodeCredalSet.cbegin(), itEnd = nodeCredalSet.cend();
207 for (
Size i = 0; i < dsize; i++) {
208 if (std::fabs(vertex[i] - (*it)[i]) > 1e-6) {
217 if (!eq || nodeCredalSet.size() == 0) {
218 nodeCredalSet.push_back(vertex);
226 if (nodeCredalSet.size() == 1)
return;
231 auto itEnd = std::remove_if(
232 nodeCredalSet.begin(),
234 [&](
const std::vector< GUM_SCALAR >& v) ->
bool {
235 for (
auto jt = v.cbegin(),
241 jt != jtEnd && minIt != minItEnd && maxIt != maxItEnd;
242 ++jt, ++minIt, ++maxIt) {
243 if ((std::fabs(*jt - *minIt) < 1e-6 || std::fabs(*jt - *maxIt) < 1e-6)
244 && std::fabs(*minIt - *maxIt) > 1e-6)
250 nodeCredalSet.erase(itEnd, nodeCredalSet.end());
253 if (!elimRedund || nodeCredalSet.size() <= 2)
return;
259 Size setSize =
Size(nodeCredalSet.size());
262 lrsWrapper.
setUpV(dsize, setSize);
264 for (
const auto& vtx : nodeCredalSet)
265 lrsWrapper.fillV(vtx);
267 lrsWrapper.elimRedundVrep();
272 template <
typename GUM_SCALAR,
class BNInferenceEngine >
282 for (
long i = 0; i < nsize; i++) {
285 for (
Size j = 0; j < dSize; j++) {
289 for (
Size tId = 0; tId < tsize; tId++) {
301 template <
typename GUM_SCALAR,
class BNInferenceEngine >
302 inline const GUM_SCALAR
316 for (
long i = 0; i < nsize; i++) {
319 for (
Size j = 0; j < dSize; j++) {
322 delta = (delta < 0) ? (-delta) : delta;
323 tEps = (tEps < delta) ? delta : tEps;
327 delta = (delta < 0) ? (-delta) : delta;
328 tEps = (tEps < delta) ? delta : tEps;
335 #pragma omp critical(epsilon_max) 337 #pragma omp flush(eps) 338 eps = (eps < tEps) ? tEps : eps;
345 template <
typename GUM_SCALAR,
class BNInferenceEngine >
355 for (
long i = 0; i < nsize; i++) {
358 for (
Size j = 0; j < dSize; j++) {
362 for (
Size tId = 0; tId < tsize; tId++) {
374 template <
typename GUM_SCALAR,
class BNInferenceEngine >
387 for (
long i = 0; i < long(nsize); i++) {
391 for (
long tId = 0; tId < long(tsize); tId++) {
395 for (
const auto& vtx : nodeThreadCredalSet) {
409 template <
typename GUM_SCALAR,
class BNInferenceEngine >
412 if (this->
_modal.empty())
return;
420 if (!this->
_l_modal[threadId].empty()) {
425 for (
long i = 0; i < long(nsize);
428 std::string var_name =
_workingSet[threadId]->variable(i).name();
429 auto delim = var_name.find_first_of(
"_");
430 var_name = var_name.substr(0, delim);
432 if (!
_l_modal[threadId].exists(var_name))
continue;
438 for (
Size mod = 0; mod < vsize; mod++)
439 exp += vertex[mod] *
_l_modal[threadId][var_name][mod];
457 if (!this->
_l_modal[threadId].empty()) {
460 for (
long i = 0; i < long(nsize);
462 std::string var_name =
_workingSet[threadId]->variable(i).name();
463 auto delim = var_name.find_first_of(
"_");
464 var_name = var_name.substr(0, delim);
466 if (!
_l_modal[threadId].exists(var_name))
continue;
470 for (
Idx tId = 0; tId < tsize; tId++) {
482 template <
typename GUM_SCALAR,
class BNInferenceEngine >
484 typedef std::vector< bool > dBN;
489 for (
Idx i = 0; i < nsize; i++) {
495 for (
Size j = 0; j < dSize; j++) {
497 std::vector< Size > keymin(3);
501 std::vector< Size > keymax(keymin);
506 for (
Size tId = 0; tId < tsize; tId++) {
508 const std::vector< dBN* >& tOpts =
512 for (
Size bn = 0; bn < osize; bn++) {
518 const std::vector< dBN* >& tOpts =
522 for (
Size bn = 0; bn < osize; bn++) {
531 template <
typename GUM_SCALAR,
class BNInferenceEngine >
538 for (
Size bn = 0; bn < tsize; bn++) {
std::vector< BNInferenceEngine *> _l_inferenceEngine
Threads BNInferenceEngine.
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
void _initThreadsData(const Size &num_threads, const bool __storeVertices, const bool __storeBNOpt)
Initialize threads data.
margi _oldMarginalMin
Old lower marginals used to compute epsilon.
__expes _l_expectationMin
Threads lower expectations, one per thread.
unsigned int getThreadNumber()
Get the calling thread id.
bool _storeBNOpt
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
credalSet _marginalSets
Credal sets vertices, if enabled.
margi _marginalMin
Lower marginals.
void _optFusion()
Fusion of threads optimal IBayesNet.
__margis _l_marginalMin
Threads lower marginals, one per thread.
void _expFusion()
Fusion of threads expectations.
margi _oldMarginalMax
Old upper marginals used to compute epsilon.
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
std::vector< List< const Potential< GUM_SCALAR > *> *> _workingSetE
Threads evidence.
Copyright 2005-2019 Pierre-Henri WUILLEMIN et Christophe GONZALES (LIP6) {prenom.nom}_at_lip6.fr.
MultipleInferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
std::vector< VarMod2BNsMap< GUM_SCALAR > *> _l_optimalNet
Threads optimal IBayesNet.
std::vector< __bnet *> _workingSet
Threads IBayesNet.
Class template representing a Credal Network.
Class template acting as a wrapper for Lexicographic Reverse Search by David Avis.
__margis _l_evidence
Threads evidence.
void _updateMarginals()
Fusion of threads marginals.
VarMod2BNsMap< GUM_SCALAR > _dbnOpt
Object used to efficiently store optimal bayes net during inference, for some algorithms.
void _updateOldMarginals()
Update old marginals (from current marginals).
__expes _l_expectationMax
Threads upper expectations, one per thread.
void setUpV(const Size &card, const Size &vertices)
Sets up a V-representation.
dynExpe _modal
Variables modalities used to compute expectations.
void _updateCredalSets(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Given a node id and one of it's possible vertex, update it's credal set.
__clusters _l_clusters
Threads clusters.
Abstract class template representing a CredalNet inference engine.
margi _evidence
Holds observed variables states.
void clear()
Removes all the elements in the hash table.
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
bool _updateThread(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Update thread information (marginals, expectations, IBayesNet, vertices) for a given node id...
virtual ~MultipleInferenceEngine()
Destructor.
__modals _l_modal
Threads modalities.
Size Idx
Type for indexes.
Class template representing a CredalNet inference engine using one or more IBayesNet inference engine...
__margis _l_marginalMax
Threads upper marginals, one per thread.
bool _storeVertices
True if credal sets vertices are stored, False otherwise.
std::size_t Size
In aGrUM, hashed values are unsigned long int.
__credalSets _l_marginalSets
Threads vertices.
void __updateThreadCredalSets(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund)
Ask for redundancy elimination of a node credal set of a calling thread.
Size NodeId
Type for node ids.
margi _marginalMax
Upper marginals.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
const GUM_SCALAR _computeEpsilon()
Compute epsilon and update old marginals.