23 #include <agrum/tools/core/exceptions.h> 28 template <
typename GUM_SCALAR,
class BNInferenceEngine >
30 const CredalNet< GUM_SCALAR >& credalNet) :
33 infEs__::repetitiveInd_ =
false;
35 infEs__::storeVertices_ =
false;
36 infEs__::storeBNOpt_ =
false;
39 this->enableMaxTime();
42 this->setPeriodSize(1000);
44 GUM_CONSTRUCTOR(CNMonteCarloSampling);
47 template <
typename GUM_SCALAR,
class BNInferenceEngine >
50 GUM_DESTRUCTOR(CNMonteCarloSampling);
53 template <
typename GUM_SCALAR,
class BNInferenceEngine >
55 if (infEs__::repetitiveInd_) {
57 this->repetitiveInit_();
58 }
catch (InvalidArgument& err) {
60 infEs__::repetitiveInd_ =
false;
77 auto psize =
this->periodSize();
119 if (
this->continueApproximationScheme(eps)) {
124 #pragma omp parallel for 126 for (
int iter = 0; iter <
int(psize); iter++) {
131 this->updateApproximationScheme(
int(psize));
133 this->updateMarginals_();
135 eps =
this->computeEpsilon_();
137 }
while (
this->continueApproximationScheme(eps));
140 if (!
this->modal_.empty()) {
this->expFusion_(); }
142 if (infEs__::storeBNOpt_) {
this->optFusion_(); }
144 if (infEs__::storeVertices_) {
this->verticesFusion_(); }
146 if (!
this->modal_.empty()) {
147 this->dynamicExpectations_();
153 template <
typename GUM_SCALAR,
class BNInferenceEngine >
156 int tId = getThreadNumber();
159 if (
this->l_inferenceEngine_[tId]->evidenceProbability() > 0) {
160 const DAG& tDag =
this->workingSet_[tId]->dag();
162 for (
auto node: tDag.nodes()) {
163 const Potential< GUM_SCALAR >& potential(
164 this->l_inferenceEngine_[tId]->posterior(node));
165 Instantiation ins(potential);
166 std::vector< GUM_SCALAR > vertex;
168 for (ins.setFirst(); !ins.end(); ++ins) {
169 vertex.push_back(potential[ins]);
176 this->updateThread_(node, vertex,
false);
182 template <
typename GUM_SCALAR,
class BNInferenceEngine >
185 int tId = getThreadNumber();
188 this->l_inferenceEngine_[tId]->eraseAllEvidence();
190 this->l_inferenceEngine_[tId]->makeInference();
193 template <
typename GUM_SCALAR,
class BNInferenceEngine >
200 this->setEpsilon(0.);
201 this->enableEpsilon();
203 this->disableMinEpsilonRate();
204 this->disableMaxIter();
206 this->initApproximationScheme();
209 template <
typename GUM_SCALAR,
class BNInferenceEngine >
215 int this_thread = getThreadNumber();
223 num_threads = getNumberOfRunningThreads();
225 this->initThreadsData_(num_threads,
226 infEs__::storeVertices_,
227 infEs__::storeBNOpt_);
228 this->l_inferenceEngine_.resize(num_threads,
nullptr);
241 BayesNet< GUM_SCALAR >* thread_bn =
new BayesNet< GUM_SCALAR >();
242 #pragma omp critical(Init) 246 *thread_bn =
this->credalNet_->current_bn();
248 this->workingSet_[this_thread] = thread_bn;
250 this->l_marginalMin_[this_thread] =
this->marginalMin_;
251 this->l_marginalMax_[this_thread] =
this->marginalMax_;
252 this->l_expectationMin_[this_thread] =
this->expectationMin_;
253 this->l_expectationMax_[this_thread] =
this->expectationMax_;
254 this->l_modal_[this_thread] =
this->modal_;
256 infEs__::l_clusters_[this_thread].resize(2);
257 infEs__::l_clusters_[this_thread][0] = infEs__::t0_;
258 infEs__::l_clusters_[this_thread][1] = infEs__::t1_;
260 if (infEs__::storeVertices_) {
261 this->l_marginalSets_[this_thread] =
this->marginalSets_;
264 List<
const Potential< GUM_SCALAR >* >* evi_list
265 =
new List<
const Potential< GUM_SCALAR >* >();
266 this->workingSetE_[this_thread] = evi_list;
270 BNInferenceEngine* inference_engine
271 =
new BNInferenceEngine((
this->workingSet_[this_thread]),
272 RelevantPotentialsFinderType::FIND_ALL);
274 this->l_inferenceEngine_[this_thread] = inference_engine;
276 if (infEs__::storeBNOpt_) {
277 VarMod2BNsMap< GUM_SCALAR >* threadOpt
278 =
new VarMod2BNsMap< GUM_SCALAR >(*
this->credalNet_);
279 this->l_optimalNet_[this_thread] = threadOpt;
284 template <
typename GUM_SCALAR,
class BNInferenceEngine >
298 template <
typename GUM_SCALAR,
class BNInferenceEngine >
301 int this_thread = getThreadNumber();
302 IBayesNet< GUM_SCALAR >* working_bn =
this->workingSet_[this_thread];
304 const auto cpt = &
this->credalNet_->credalNet_currentCpt();
306 using dBN = std::vector< std::vector< std::vector<
bool > > >;
310 if (infEs__::storeBNOpt_) {
311 sample = dBN(
this->l_optimalNet_[this_thread]->getSampleDef());
314 if (infEs__::repetitiveInd_) {
315 const auto& t0 = infEs__::l_clusters_[this_thread][0];
316 const auto& t1 = infEs__::l_clusters_[this_thread][1];
318 for (
const auto& elt: t0) {
319 auto dSize = working_bn->variable(elt.first).domainSize();
320 Potential< GUM_SCALAR >* potential(
321 const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
322 std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
324 Size pconfs = Size((*cpt)[elt.first].size());
326 for (Size pconf = 0; pconf < pconfs; pconf++) {
327 Size choosen_vertex = rand() % (*cpt)[elt.first][pconf].size();
329 if (infEs__::storeBNOpt_) {
330 binaryRep__(sample[elt.first][pconf], choosen_vertex);
333 for (Size mod = 0; mod < dSize; mod++) {
334 var_cpt[pconf * dSize + mod]
335 = (*cpt)[elt.first][pconf][choosen_vertex][mod];
339 potential->fillWith(var_cpt);
341 Size t0esize = Size(elt.second.size());
343 for (Size pos = 0; pos < t0esize; pos++) {
344 if (infEs__::storeBNOpt_) {
345 sample[elt.second[pos]] = sample[elt.first];
348 Potential< GUM_SCALAR >* potential2(
349 const_cast< Potential< GUM_SCALAR >* >(
350 &working_bn->cpt(elt.second[pos])));
351 potential2->fillWith(var_cpt);
355 for (
const auto& elt: t1) {
356 auto dSize = working_bn->variable(elt.first).domainSize();
357 Potential< GUM_SCALAR >* potential(
358 const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
359 std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
361 for (Size pconf = 0; pconf < (*cpt)[elt.first].size(); pconf++) {
362 Idx choosen_vertex = Idx(rand() % (*cpt)[elt.first][pconf].size());
364 if (infEs__::storeBNOpt_) {
365 binaryRep__(sample[elt.first][pconf], choosen_vertex);
368 for (
decltype(dSize) mod = 0; mod < dSize; mod++) {
369 var_cpt[pconf * dSize + mod]
370 = (*cpt)[elt.first][pconf][choosen_vertex][mod];
374 potential->fillWith(var_cpt);
376 auto t1esize = elt.second.size();
378 for (
decltype(t1esize) pos = 0; pos < t1esize; pos++) {
379 if (infEs__::storeBNOpt_) {
380 sample[elt.second[pos]] = sample[elt.first];
383 Potential< GUM_SCALAR >* potential2(
384 const_cast< Potential< GUM_SCALAR >* >(
385 &working_bn->cpt(elt.second[pos])));
386 potential2->fillWith(var_cpt);
390 if (infEs__::storeBNOpt_) {
391 this->l_optimalNet_[this_thread]->setCurrentSample(sample);
394 for (
auto node: working_bn->nodes()) {
395 auto dSize = working_bn->variable(node).domainSize();
396 Potential< GUM_SCALAR >* potential(
397 const_cast< Potential< GUM_SCALAR >* >(&working_bn->cpt(node)));
398 std::vector< GUM_SCALAR > var_cpt(potential->domainSize());
400 auto pConfs = (*cpt)[node].size();
402 for (
decltype(pConfs) pconf = 0; pconf < pConfs; pconf++) {
403 Size nVertices = Size((*cpt)[node][pconf].size());
404 Idx choosen_vertex = Idx(rand() % nVertices);
406 if (infEs__::storeBNOpt_) {
407 binaryRep__(sample[node][pconf], choosen_vertex);
410 for (
decltype(dSize) mod = 0; mod < dSize; mod++) {
411 var_cpt[pconf * dSize + mod]
412 = (*cpt)[node][pconf][choosen_vertex][mod];
416 potential->fillWith(var_cpt);
419 if (infEs__::storeBNOpt_) {
420 this->l_optimalNet_[this_thread]->setCurrentSample(sample);
425 template <
typename GUM_SCALAR,
class BNInferenceEngine >
428 if (
this->evidence_.size() == 0) {
return; }
430 int this_thread = getThreadNumber();
432 BNInferenceEngine* inference_engine =
this->l_inferenceEngine_[this_thread];
434 IBayesNet< GUM_SCALAR >* working_bn =
this->workingSet_[this_thread];
436 List<
const Potential< GUM_SCALAR >* >* evi_list
437 =
this->workingSetE_[this_thread];
439 if (evi_list->size() > 0) {
440 for (
const auto pot: *evi_list)
441 inference_engine->addEvidence(*pot);
445 for (
const auto& elt:
this->evidence_) {
446 Potential< GUM_SCALAR >* p =
new Potential< GUM_SCALAR >;
447 (*p) << working_bn->variable(elt.first);
450 p->fillWith(elt.second);
451 }
catch (Exception& err) {
459 if (evi_list->size() > 0) {
460 for (
const auto pot: *evi_list)
461 inference_engine->addEvidence(*pot);
void threadUpdate__()
Update thread data after a IBayesNet inference.
void verticesSampling__()
Thread samples a IBayesNet from the CredalNet.
INLINE void emplace(Args &&... args)
void mcThreadDataCopy__()
Initialize threads data.
virtual ~CNMonteCarloSampling()
Destructor.
void threadInference__()
Thread performs an inference using BNInferenceEngine.
void mcInitApproximationScheme__()
Initialize approximation Scheme.
CNMonteCarloSampling(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
void makeInference()
Starts the inference.
<agrum/CN/CNMonteCarloSampling.h>
void insertEvidence__()
Insert CredalNet evidence into a thread BNInferenceEngine.
namespace for all credal networks entities