aGrUM  0.13.2
gum::credal::CNLoopyPropagation< GUM_SCALAR > Class Template Reference

<agrum/CN/CNLoopyPropagation.h> More...

#include <CNLoopyPropagation.h>

+ Inheritance diagram for gum::credal::CNLoopyPropagation< GUM_SCALAR >:
+ Collaboration diagram for gum::credal::CNLoopyPropagation< GUM_SCALAR >:

Public Attributes

Signaler3< Size, double, doubleonProgress
 Progression, error and time. More...
 
Signaler1< std::string > onStop
 Criteria messageApproximationScheme. More...
 

Public Member Functions

virtual void insertEvidenceFile (const std::string &path)
 Insert evidence from file. More...
 
Public algorithm methods
void makeInference ()
 Starts the inference. More...
 
Getters and setters
void inferenceType (InferenceType inft)
 Set the inference type. More...
 
InferenceType inferenceType ()
 Get the inference type. More...
 
Post-inference methods
void eraseAllEvidence ()
 Erase all inference related data to perform another one. More...
 
void saveInference (const std::string &path)
 
Constructors / Destructors
 CNLoopyPropagation (const CredalNet< GUM_SCALAR > &cnet)
 Constructor. More...
 
virtual ~CNLoopyPropagation ()
 Destructor. More...
 
Getters and setters
VarMod2BNsMap< GUM_SCALAR > * getVarMod2BNsMap ()
 Get optimum IBayesNet. More...
 
const CredalNet< GUM_SCALAR > & credalNet ()
 Get this creadal network. More...
 
const NodeProperty< std::vector< NodeId > > & getT0Cluster () const
 Get the _t0 cluster. More...
 
const NodeProperty< std::vector< NodeId > > & getT1Cluster () const
 Get the _t1 cluster. More...
 
void setRepetitiveInd (const bool repetitive)
 
void storeVertices (const bool value)
 
bool storeVertices () const
 Get the number of iterations without changes used to stop some algorithms. More...
 
void storeBNOpt (const bool value)
 
bool storeBNOpt () const
 
bool repetitiveInd () const
 Get the current independence status. More...
 
Pre-inference initialization methods
void insertModalsFile (const std::string &path)
 Insert variables modalities from file to compute expectations. More...
 
void insertModals (const std::map< std::string, std::vector< GUM_SCALAR > > &modals)
 Insert variables modalities from map to compute expectations. More...
 
void insertEvidence (const std::map< std::string, std::vector< GUM_SCALAR > > &eviMap)
 Insert evidence from map. More...
 
void insertEvidence (const NodeProperty< std::vector< GUM_SCALAR > > &evidence)
 Insert evidence from Property. More...
 
void insertQueryFile (const std::string &path)
 Insert query variables states from file. More...
 
void insertQuery (const NodeProperty< std::vector< bool > > &query)
 Insert query variables and states from Property. More...
 
Post-inference methods
const std::vector< GUM_SCALAR > & marginalMin (const NodeId id) const
 Get the lower marginals of a given node id. More...
 
const std::vector< GUM_SCALAR > & marginalMin (const std::string &varName) const
 Get the lower marginals of a given variable name. More...
 
const std::vector< GUM_SCALAR > & marginalMax (const NodeId id) const
 Get the upper marginals of a given node id. More...
 
const std::vector< GUM_SCALAR > & marginalMax (const std::string &varName) const
 Get the upper marginals of a given variable name. More...
 
const GUM_SCALAR & expectationMin (const NodeId id) const
 Get the lower expectation of a given node id. More...
 
const GUM_SCALAR & expectationMin (const std::string &varName) const
 Get the lower expectation of a given variable name. More...
 
const GUM_SCALAR & expectationMax (const NodeId id) const
 Get the upper expectation of a given node id. More...
 
const GUM_SCALAR & expectationMax (const std::string &varName) const
 Get the upper expectation of a given variable name. More...
 
const std::vector< GUM_SCALAR > & dynamicExpMin (const std::string &varName) const
 Get the lower dynamic expectation of a given variable prefix (without the time step included, i.e. More...
 
const std::vector< GUM_SCALAR > & dynamicExpMax (const std::string &varName) const
 Get the upper dynamic expectation of a given variable prefix (without the time step included, i.e. More...
 
const std::vector< std::vector< GUM_SCALAR > > & vertices (const NodeId id) const
 Get the vertice of a given node id. More...
 
void saveMarginals (const std::string &path) const
 Saves marginals to file. More...
 
void saveExpectations (const std::string &path) const
 Saves expectations to file. More...
 
void saveVertices (const std::string &path) const
 Saves vertices to file. More...
 
void dynamicExpectations ()
 Compute dynamic expectations. More...
 
std::string toString () const
 Print all nodes marginals to standart output. More...
 
const std::string getApproximationSchemeMsg ()
 Get approximation scheme state. More...
 
Getters and setters
void setEpsilon (double eps)
 Given that we approximate f(t), stopping criterion on |f(t+1)-f(t)|. More...
 
double epsilon () const
 Returns the value of epsilon. More...
 
void disableEpsilon ()
 Disable stopping criterion on epsilon. More...
 
void enableEpsilon ()
 Enable stopping criterion on epsilon. More...
 
bool isEnabledEpsilon () const
 Returns true if stopping criterion on epsilon is enabled, false otherwise. More...
 
void setMinEpsilonRate (double rate)
 Given that we approximate f(t), stopping criterion on d/dt(|f(t+1)-f(t)|). More...
 
double minEpsilonRate () const
 Returns the value of the minimal epsilon rate. More...
 
void disableMinEpsilonRate ()
 Disable stopping criterion on epsilon rate. More...
 
void enableMinEpsilonRate ()
 Enable stopping criterion on epsilon rate. More...
 
bool isEnabledMinEpsilonRate () const
 Returns true if stopping criterion on epsilon rate is enabled, false otherwise. More...
 
void setMaxIter (Size max)
 Stopping criterion on number of iterations. More...
 
Size maxIter () const
 Returns the criterion on number of iterations. More...
 
void disableMaxIter ()
 Disable stopping criterion on max iterations. More...
 
void enableMaxIter ()
 Enable stopping criterion on max iterations. More...
 
bool isEnabledMaxIter () const
 Returns true if stopping criterion on max iterations is enabled, false otherwise. More...
 
void setMaxTime (double timeout)
 Stopping criterion on timeout. More...
 
double maxTime () const
 Returns the timeout (in seconds). More...
 
double currentTime () const
 Returns the current running time in second. More...
 
void disableMaxTime ()
 Disable stopping criterion on timeout. More...
 
void enableMaxTime ()
 Enable stopping criterion on timeout. More...
 
bool isEnabledMaxTime () const
 Returns true if stopping criterion on timeout is enabled, false otherwise. More...
 
void setPeriodSize (Size p)
 How many samples between two stopping is enable. More...
 
Size periodSize () const
 Returns the period size. More...
 
void setVerbosity (bool v)
 Set the verbosity on (true) or off (false). More...
 
bool verbosity () const
 Returns true if verbosity is enabled. More...
 
ApproximationSchemeSTATE stateApproximationScheme () const
 Returns the approximation scheme state. More...
 
Size nbrIterations () const
 Returns the number of iterations. More...
 
const std::vector< double > & history () const
 Returns the scheme history. More...
 
void initApproximationScheme ()
 Initialise the scheme. More...
 
bool startOfPeriod ()
 Returns true if we are at the beginning of a period (compute error is mandatory). More...
 
void updateApproximationScheme (unsigned int incr=1)
 Update the scheme w.r.t the new error and increment steps. More...
 
Size remainingBurnIn ()
 Returns the remaining burn in. More...
 
void stopApproximationScheme ()
 Stop the approximation scheme. More...
 
bool continueApproximationScheme (double error)
 Update the scheme w.r.t the new error. More...
 
Getters and setters
std::string messageApproximationScheme () const
 Returns the approximation scheme message. More...
 

Public Types

enum  InferenceType : char { InferenceType::nodeToNeighbours, InferenceType::ordered, InferenceType::randomOrder }
 Inference type to be used by the algorithm. More...
 
using msg = std::vector< Potential< GUM_SCALAR > * >
 
using cArcP = const Arc *
 
enum  ApproximationSchemeSTATE : char {
  ApproximationSchemeSTATE::Undefined, ApproximationSchemeSTATE::Continue, ApproximationSchemeSTATE::Epsilon, ApproximationSchemeSTATE::Rate,
  ApproximationSchemeSTATE::Limit, ApproximationSchemeSTATE::TimeLimit, ApproximationSchemeSTATE::Stopped
}
 The different state of an approximation scheme. More...
 

Protected Attributes

NodeProperty< bool_update_p
 Used to keep track of which node needs to update it's information coming from it's parents. More...
 
NodeProperty< bool_update_l
 Used to keep track of which node needs to update it's information coming from it's children. More...
 
NodeSet active_nodes_set
 The current node-set to iterate through at this current step. More...
 
NodeSet next_active_nodes_set
 The next node-set, i.e. More...
 
NodeProperty< NodeSet * > _msg_l_sent
 Used to keep track of one's messages sent to it's parents. More...
 
ArcProperty< GUM_SCALAR > _ArcsL_min
 "Lower" information \( \Lambda \) coming from one's children. More...
 
ArcProperty< GUM_SCALAR > _ArcsP_min
 "Lower" information \( \pi \) coming from one's parent. More...
 
NodeProperty< GUM_SCALAR > _NodesL_min
 "Lower" node information \( \Lambda \) obtained by combinaison of children messages. More...
 
NodeProperty< GUM_SCALAR > _NodesP_min
 "Lower" node information \( \pi \) obtained by combinaison of parent's messages. More...
 
ArcProperty< GUM_SCALAR > _ArcsL_max
 "Upper" information \( \Lambda \) coming from one's children. More...
 
ArcProperty< GUM_SCALAR > _ArcsP_max
 "Upper" information \( \pi \) coming from one's parent. More...
 
NodeProperty< GUM_SCALAR > _NodesL_max
 "Upper" node information \( \Lambda \) obtained by combinaison of children messages. More...
 
NodeProperty< GUM_SCALAR > _NodesP_max
 "Upper" node information \( \pi \) obtained by combinaison of parent's messages. More...
 
bool _InferenceUpToDate
 TRUE if inference has already been performed, FALSE otherwise. More...
 
const CredalNet< GUM_SCALAR > * _credalNet
 A pointer to the Credal Net used. More...
 
margi _oldMarginalMin
 Old lower marginals used to compute epsilon. More...
 
margi _oldMarginalMax
 Old upper marginals used to compute epsilon. More...
 
margi _marginalMin
 Lower marginals. More...
 
margi _marginalMax
 Upper marginals. More...
 
credalSet _marginalSets
 Credal sets vertices, if enabled. More...
 
expe _expectationMin
 Lower expectations, if some variables modalities were inserted. More...
 
expe _expectationMax
 Upper expectations, if some variables modalities were inserted. More...
 
dynExpe _dynamicExpMin
 Lower dynamic expectations. More...
 
dynExpe _dynamicExpMax
 Upper dynamic expectations. More...
 
dynExpe _modal
 Variables modalities used to compute expectations. More...
 
margi _evidence
 Holds observed variables states. More...
 
query _query
 Holds the query nodes states. More...
 
cluster _t0
 Clusters of nodes used with dynamic networks. More...
 
cluster _t1
 Clusters of nodes used with dynamic networks. More...
 
bool _storeVertices
 True if credal sets vertices are stored, False otherwise. More...
 
bool _repetitiveInd
 True if using repetitive independence ( dynamic network only ), False otherwise. More...
 
bool _storeBNOpt
 Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling. More...
 
VarMod2BNsMap< GUM_SCALAR > _dbnOpt
 Object used to efficiently store optimal bayes net during inference, for some algorithms. More...
 
int _timeSteps
 The number of time steps of this network (only usefull for dynamic networks). More...
 
double _current_epsilon
 Current epsilon. More...
 
double _last_epsilon
 Last epsilon value. More...
 
double _current_rate
 Current rate. More...
 
Size _current_step
 The current step. More...
 
Timer _timer
 The timer. More...
 
ApproximationSchemeSTATE _current_state
 The current state. More...
 
std::vector< double_history
 The scheme history, used only if verbosity == true. More...
 
double _eps
 Threshold for convergence. More...
 
bool _enabled_eps
 If true, the threshold convergence is enabled. More...
 
double _min_rate_eps
 Threshold for the epsilon rate. More...
 
bool _enabled_min_rate_eps
 If true, the minimal threshold for epsilon rate is enabled. More...
 
double _max_time
 The timeout. More...
 
bool _enabled_max_time
 If true, the timeout is enabled. More...
 
Size _max_iter
 The maximum iterations. More...
 
bool _enabled_max_iter
 If true, the maximum iterations stopping criterion is enabled. More...
 
Size _burn_in
 Number of iterations before checking stopping criteria. More...
 
Size _period_size
 Checking criteria frequency. More...
 
bool _verbosity
 If true, verbosity is enabled. More...
 

Protected Member Functions

Protected initialization methods
void _initialize ()
 Topological forward propagation to initialize old marginals & messages. More...
 
Protected algorithm methods
void _makeInferenceNodeToNeighbours ()
 Starts the inference with this inference type. More...
 
void _makeInferenceByOrderedArcs ()
 Starts the inference with this inference type. More...
 
void _makeInferenceByRandomOrder ()
 Starts the inference with this inference type. More...
 
void _updateMarginals ()
 Compute marginals from up-to-date messages. More...
 
void _msgL (const NodeId X, const NodeId demanding_parent)
 Sends a message to one's parent, i.e. More...
 
void _compute_ext (GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, GUM_SCALAR &num_min, GUM_SCALAR &num_max, GUM_SCALAR &den_min, GUM_SCALAR &den_max)
 Used by _msgL. More...
 
void _compute_ext (std::vector< std::vector< GUM_SCALAR > > &combi_msg_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
 Used by _msgL. More...
 
void _enum_combi (std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
 Used by _msgL. More...
 
void _msgP (const NodeId X, const NodeId demanding_child)
 Sends a message to one's child, i.e. More...
 
void _enum_combi (std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_p_min, GUM_SCALAR &msg_p_max)
 Used by _msgP. More...
 
void _compute_ext (std::vector< std::vector< GUM_SCALAR > > &combi_msg_p, const NodeId &id, GUM_SCALAR &msg_p_min, GUM_SCALAR &msg_p_max)
 Used by _msgP. More...
 
void _refreshLMsPIs (bool refreshIndic=false)
 Get the last messages from one's parents and children. More...
 
GUM_SCALAR _calculateEpsilon ()
 Compute epsilon. More...
 
Post-inference protected methods
void _computeExpectations ()
 Since the network is binary, expectations can be computed from the final marginals which give us the credal set vertices. More...
 
void _updateIndicatrices ()
 Only update indicatrices variables at the end of computations ( calls _msgP ). More...
 
Protected initialization methods
void _repetitiveInit ()
 Initialize _t0 and _t1 clusters. More...
 
void _initExpectations ()
 Initialize lower and upper expectations before inference, with the lower expectation being initialized on the highest modality and the upper expectation being initialized on the lowest modality. More...
 
void _initMarginals ()
 Initialize lower and upper old marginals and marginals before inference, with the lower marginal being 1 and the upper 0. More...
 
void _initMarginalSets ()
 Initialize credal set vertices with empty sets. More...
 
Protected algorithms methods
const GUM_SCALAR _computeEpsilon ()
 Compute approximation scheme epsilon using the old marginals and the new ones. More...
 
void _updateExpectations (const NodeId &id, const std::vector< GUM_SCALAR > &vertex)
 Given a node id and one of it's possible vertex obtained during inference, update this node lower and upper expectations. More...
 
void _updateCredalSets (const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
 Given a node id and one of it's possible vertex, update it's credal set. More...
 
Proptected post-inference methods
void _dynamicExpectations ()
 Rearrange lower and upper expectations to suit dynamic networks. More...
 

Detailed Description

template<typename GUM_SCALAR>
class gum::credal::CNLoopyPropagation< GUM_SCALAR >

<agrum/CN/CNLoopyPropagation.h>

Class implementing loopy-propagation with binary networks - L2U algorithm.

Template Parameters
GUM_SCALARA floating type ( float, double, long double ... ).
Author
Matthieu HOURBRACQ and Pierre-Henri WUILLEMIN

Definition at line 56 of file CNLoopyPropagation.h.

Member Typedef Documentation

template<typename GUM_SCALAR >
using gum::credal::CNLoopyPropagation< GUM_SCALAR >::__infE = InferenceEngine< GUM_SCALAR >
private

To easily access InferenceEngine< GUM_SCALAR > methods.

Definition at line 366 of file CNLoopyPropagation.h.

template<typename GUM_SCALAR >
using gum::credal::CNLoopyPropagation< GUM_SCALAR >::cArcP = const Arc*

Definition at line 59 of file CNLoopyPropagation.h.

template<typename GUM_SCALAR >
using gum::credal::CNLoopyPropagation< GUM_SCALAR >::msg = std::vector< Potential< GUM_SCALAR >* >

Definition at line 58 of file CNLoopyPropagation.h.

Member Enumeration Documentation

The different state of an approximation scheme.

Enumerator
Undefined 
Continue 
Epsilon 
Rate 
Limit 
TimeLimit 
Stopped 

Definition at line 64 of file IApproximationSchemeConfiguration.h.

64  : char {
65  Undefined,
66  Continue,
67  Epsilon,
68  Rate,
69  Limit,
70  TimeLimit,
71  Stopped
72  };
template<typename GUM_SCALAR >
enum gum::credal::CNLoopyPropagation::InferenceType : char
strong

Inference type to be used by the algorithm.

Enumerator
nodeToNeighbours 

Uses a node-set so we don't iterate on nodes that can't send a new message.

Should be the fastest inference type. A step is going through the node-set.

ordered 

Chooses an arc ordering and sends messages accordingly at all steps.

Avoid it since it can give slightly worse results than other inference types. A step is going through all arcs.

randomOrder 

Chooses a random arc ordering and sends messages accordingly.

A new order is set at each step. A step is going through all arcs.

Definition at line 64 of file CNLoopyPropagation.h.

64  : char {
65  nodeToNeighbours,
70  ordered,
76  randomOrder
80  };

Constructor & Destructor Documentation

template<typename GUM_SCALAR >
gum::credal::CNLoopyPropagation< GUM_SCALAR >::CNLoopyPropagation ( const CredalNet< GUM_SCALAR > &  cnet)
explicit

Constructor.

Parameters
cnetThe CredalNet to be used with this algorithm.

Definition at line 1512 of file CNLoopyPropagation_tpl.h.

References gum::credal::CNLoopyPropagation< GUM_SCALAR >::__bnet, gum::credal::CNLoopyPropagation< GUM_SCALAR >::__cn, gum::credal::CNLoopyPropagation< GUM_SCALAR >::__inferenceType, gum::credal::CNLoopyPropagation< GUM_SCALAR >::_InferenceUpToDate, gum::credal::CredalNet< GUM_SCALAR >::current_bn(), GUM_ERROR, gum::credal::CredalNet< GUM_SCALAR >::hasComputedCPTMinMax(), gum::credal::CredalNet< GUM_SCALAR >::isSeparatelySpecified(), and gum::credal::CNLoopyPropagation< GUM_SCALAR >::nodeToNeighbours.

1513  :
1515  if (!cnet.isSeparatelySpecified()) {
1516  GUM_ERROR(OperationNotAllowed,
1517  "CNLoopyPropagation is only available "
1518  "with separately specified nets");
1519  }
1520 
1521  // test for binary cn
1522  for (auto node : cnet.current_bn().nodes())
1523  if (cnet.current_bn().variable(node).domainSize() != 2) {
1524  GUM_ERROR(OperationNotAllowed,
1525  "CNLoopyPropagation is only available "
1526  "with binary credal networks");
1527  }
1528 
1529  // test if compute CPTMinMax has been called
1530  if (!cnet.hasComputedCPTMinMax()) {
1531  GUM_ERROR(OperationNotAllowed,
1532  "CNLoopyPropagation only works when "
1533  "\"computeCPTMinMax()\" has been called for "
1534  "this credal net");
1535  }
1536 
1537  __cn = &cnet;
1538  __bnet = &cnet.current_bn();
1539 
1541  _InferenceUpToDate = false;
1542 
1543  GUM_CONSTRUCTOR(CNLoopyPropagation);
1544  }
InferenceType __inferenceType
The choosen inference type.
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
InferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Construtor.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
bool _InferenceUpToDate
TRUE if inference has already been performed, FALSE otherwise.
CNLoopyPropagation(const CredalNet< GUM_SCALAR > &cnet)
Constructor.
Uses a node-set so we don&#39;t iterate on nodes that can&#39;t send a new message.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
gum::credal::CNLoopyPropagation< GUM_SCALAR >::~CNLoopyPropagation ( )
virtual

Destructor.

Definition at line 1547 of file CNLoopyPropagation_tpl.h.

References gum::credal::CNLoopyPropagation< GUM_SCALAR >::__bnet, gum::credal::CNLoopyPropagation< GUM_SCALAR >::_InferenceUpToDate, and gum::credal::CNLoopyPropagation< GUM_SCALAR >::_msg_l_sent.

1547  {
1548  _InferenceUpToDate = false;
1549 
1550  if (_msg_l_sent.size() > 0) {
1551  for (auto node : __bnet->nodes()) {
1552  delete _msg_l_sent[node];
1553  }
1554  }
1555 
1556  //_msg_l_sent.clear();
1557  //_update_l.clear();
1558  //_update_p.clear();
1559 
1560  GUM_DESTRUCTOR(CNLoopyPropagation);
1561  }
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
bool _InferenceUpToDate
TRUE if inference has already been performed, FALSE otherwise.
CNLoopyPropagation(const CredalNet< GUM_SCALAR > &cnet)
Constructor.
NodeProperty< NodeSet * > _msg_l_sent
Used to keep track of one&#39;s messages sent to it&#39;s parents.

Member Function Documentation

template<typename GUM_SCALAR >
GUM_SCALAR gum::credal::CNLoopyPropagation< GUM_SCALAR >::_calculateEpsilon ( )
protected

Compute epsilon.

Returns
Epsilon.

Definition at line 1461 of file CNLoopyPropagation_tpl.h.

1461  {
1462  _refreshLMsPIs();
1463  _updateMarginals();
1464 
1465  return __infE::_computeEpsilon();
1466  }
const GUM_SCALAR _computeEpsilon()
Compute approximation scheme epsilon using the old marginals and the new ones.
void _updateMarginals()
Compute marginals from up-to-date messages.
void _refreshLMsPIs(bool refreshIndic=false)
Get the last messages from one&#39;s parents and children.
template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_compute_ext ( GUM_SCALAR &  msg_l_min,
GUM_SCALAR &  msg_l_max,
std::vector< GUM_SCALAR > &  lx,
GUM_SCALAR &  num_min,
GUM_SCALAR &  num_max,
GUM_SCALAR &  den_min,
GUM_SCALAR &  den_max 
)
protected

Used by _msgL.

pour les fonctions suivantes, les GUM_SCALAR min/max doivent etre initialises (min a 1 et max a 0) pour comparer avec les resultats intermediaires

Compute the final message for the given parent's message and likelihood (children's messages), numerators & denominators.

Parameters
msg_l_minThe reference to the current lower value of the message to be sent.
msg_l_maxThe reference to the current upper value of the message to be sent.
lxThe lower and upper likelihood.
num_minThe reference to the previously computed lower numerator.
num_maxThe reference to the previously computed upper numerator.
den_minThe reference to the previously computed lower denominator.
den_maxThe reference to the previously computed upper denominator.

une fois les cpts marginalises sur X et Ui, on calcul le min/max,

Definition at line 182 of file CNLoopyPropagation_tpl.h.

References _INF.

188  {
189  GUM_SCALAR old_msg_min = msg_l_min;
190  GUM_SCALAR old_msg_max = msg_l_max;
191 
192  GUM_SCALAR num_min_tmp = 1.;
193  GUM_SCALAR den_min_tmp = 1.;
194  GUM_SCALAR num_max_tmp = 1.;
195  GUM_SCALAR den_max_tmp = 1.;
196 
197  GUM_SCALAR res_min = 1.0, res_max = 0.0;
198 
199  auto lsize = lx.size();
200 
201  for (decltype(lsize) i = 0; i < lsize; i++) {
202  bool non_defini_min = false;
203  bool non_defini_max = false;
204 
205  if (lx[i] == _INF) {
206  num_min_tmp = num_min;
207  den_min_tmp = den_max;
208  num_max_tmp = num_max;
209  den_max_tmp = den_min;
210  } else if (lx[i] == (GUM_SCALAR)1.) {
211  num_min_tmp = GUM_SCALAR(1.);
212  den_min_tmp = GUM_SCALAR(1.);
213  num_max_tmp = GUM_SCALAR(1.);
214  den_max_tmp = GUM_SCALAR(1.);
215  } else if (lx[i] > (GUM_SCALAR)1.) {
216  GUM_SCALAR li = GUM_SCALAR(1.) / (lx[i] - GUM_SCALAR(1.));
217  num_min_tmp = num_min + li;
218  den_min_tmp = den_max + li;
219  num_max_tmp = num_max + li;
220  den_max_tmp = den_min + li;
221  } else if (lx[i] < (GUM_SCALAR)1.) {
222  GUM_SCALAR li = GUM_SCALAR(1.) / (lx[i] - GUM_SCALAR(1.));
223  num_min_tmp = num_max + li;
224  den_min_tmp = den_min + li;
225  num_max_tmp = num_min + li;
226  den_max_tmp = den_max + li;
227  }
228 
229  if (den_min_tmp == 0. && num_min_tmp == 0.) {
230  non_defini_min = true;
231  } else if (den_min_tmp == 0. && num_min_tmp != 0.) {
232  res_min = _INF;
233  } else if (den_min_tmp != _INF || num_min_tmp != _INF) {
234  res_min = num_min_tmp / den_min_tmp;
235  }
236 
237  if (den_max_tmp == 0. && num_max_tmp == 0.) {
238  non_defini_max = true;
239  } else if (den_max_tmp == 0. && num_max_tmp != 0.) {
240  res_max = _INF;
241  } else if (den_max_tmp != _INF || num_max_tmp != _INF) {
242  res_max = num_max_tmp / den_max_tmp;
243  }
244 
245  if (non_defini_max && non_defini_min) {
246  std::cout << "undefined msg" << std::endl;
247  continue;
248  } else if (non_defini_min && !non_defini_max) {
249  res_min = res_max;
250  } else if (non_defini_max && !non_defini_min) {
251  res_max = res_min;
252  }
253 
254  if (res_min < 0.) { res_min = 0.; }
255 
256  if (res_max < 0.) { res_max = 0.; }
257 
258  if (msg_l_min == msg_l_max && msg_l_min == -2.) {
259  msg_l_min = res_min;
260  msg_l_max = res_max;
261  }
262 
263  if (res_max > msg_l_max) { msg_l_max = res_max; }
264 
265  if (res_min < msg_l_min) { msg_l_min = res_min; }
266 
267  } // end of : for each lx
268  }
#define _INF
template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_compute_ext ( std::vector< std::vector< GUM_SCALAR > > &  combi_msg_p,
const NodeId id,
GUM_SCALAR &  msg_l_min,
GUM_SCALAR &  msg_l_max,
std::vector< GUM_SCALAR > &  lx,
const Idx pos 
)
protected

Used by _msgL.

extremes pour une combinaison des parents, message vers parent

Compute the numerators & denominators for the given parent's message and likelihood (children's messages). Marginalisation.

Parameters
combi_msg_pThe parent's choosen message.
idThe constant id of the node sending the message.
msg_l_minThe reference to the current lower value of the message to be sent.
msg_l_maxThe reference to the current upper value of the message to be sent.
lxThe lower and upper likelihood.
posThe position of the parent node to receive the message in the CPT of the one sending the message ( first parent, second ... ).

Definition at line 274 of file CNLoopyPropagation_tpl.h.

References gum::intPow().

280  {
281  GUM_SCALAR num_min = 0.;
282  GUM_SCALAR num_max = 0.;
283  GUM_SCALAR den_min = 0.;
284  GUM_SCALAR den_max = 0.;
285 
286  auto taille = combi_msg_p.size();
287 
288  std::vector< typename std::vector< GUM_SCALAR >::iterator > it(taille);
289 
290  for (decltype(taille) i = 0; i < taille; i++) {
291  it[i] = combi_msg_p[i].begin();
292  }
293 
294  Size pp = pos;
295  Size pas = Size(intPow(2, pp));
296 
297  Size combi_den = 0;
298  Size combi_num = pp;
299 
300  // marginalisation
301  while (it[taille - 1] != combi_msg_p[taille - 1].end()) {
302  GUM_SCALAR prod = 1.;
303 
304  for (decltype(taille) k = 0; k < taille; k++) {
305  prod *= *it[k];
306  }
307 
308  den_min += (__cn->get_CPT_min()[id][combi_den] * prod);
309  den_max += (__cn->get_CPT_max()[id][combi_den] * prod);
310 
311  num_min += (__cn->get_CPT_min()[id][combi_num] * prod);
312  num_max += (__cn->get_CPT_max()[id][combi_num] * prod);
313 
314  combi_den++;
315  combi_num++;
316 
317  if (combi_den % pp == 0) {
318  combi_den += pp;
319  combi_num += pp;
320  }
321 
322  // incrementation
323  ++it[0];
324 
325  for (decltype(taille) i = 0;
326  (i < taille - 1) && (it[i] == combi_msg_p[i].end());
327  ++i) {
328  it[i] = combi_msg_p[i].begin();
329  ++it[i + 1];
330  }
331  } // end of : marginalisation
332 
333  _compute_ext(msg_l_min, msg_l_max, lx, num_min, num_max, den_min, den_max);
334  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
void _compute_ext(GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, GUM_SCALAR &num_min, GUM_SCALAR &num_max, GUM_SCALAR &den_min, GUM_SCALAR &den_max)
Used by _msgL.
unsigned long intPow(unsigned long base, unsigned long exponent)
Specialized pow function with integers (faster implementation).
Definition: pow_inl.h:33

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_compute_ext ( std::vector< std::vector< GUM_SCALAR > > &  combi_msg_p,
const NodeId id,
GUM_SCALAR &  msg_p_min,
GUM_SCALAR &  msg_p_max 
)
protected

Used by _msgP.

extremes pour une combinaison des parents, message vers enfant marginalisation cpts

Marginalisation.

Parameters
combi_msg_pThe parent's choosen message.
idThe constant id of the node sending the message.
msg_p_minThe reference to the current lower value of the message to be sent.
msg_p_maxThe reference to the current upper value of the message to be sent.

Definition at line 341 of file CNLoopyPropagation_tpl.h.

345  {
346  GUM_SCALAR min = 0.;
347  GUM_SCALAR max = 0.;
348 
349  auto taille = combi_msg_p.size();
350 
351  std::vector< typename std::vector< GUM_SCALAR >::iterator > it(taille);
352 
353  for (decltype(taille) i = 0; i < taille; i++) {
354  it[i] = combi_msg_p[i].begin();
355  }
356 
357  int combi = 0;
358  auto theEnd = combi_msg_p[taille - 1].end();
359 
360  while (it[taille - 1] != theEnd) {
361  GUM_SCALAR prod = 1.;
362 
363  for (decltype(taille) k = 0; k < taille; k++) {
364  prod *= *it[k];
365  }
366 
367  min += (__cn->get_CPT_min()[id][combi] * prod);
368  max += (__cn->get_CPT_max()[id][combi] * prod);
369 
370  combi++;
371 
372  // incrementation
373  ++it[0];
374 
375  for (decltype(taille) i = 0;
376  (i < taille - 1) && (it[i] == combi_msg_p[i].end());
377  ++i) {
378  it[i] = combi_msg_p[i].begin();
379  ++it[i + 1];
380  }
381  }
382 
383  if (min < msg_p_min) { msg_p_min = min; }
384 
385  if (max > msg_p_max) { msg_p_max = max; }
386  }
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
template<typename GUM_SCALAR >
const GUM_SCALAR gum::credal::InferenceEngine< GUM_SCALAR >::_computeEpsilon ( )
inlineprotectedinherited

Compute approximation scheme epsilon using the old marginals and the new ones.

Highest delta on either lower or upper marginal is epsilon.

Also updates oldMarginals to current marginals.

Returns
Epsilon.

Definition at line 1012 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMax, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMin, gum::credal::InferenceEngine< GUM_SCALAR >::_oldMarginalMax, gum::credal::InferenceEngine< GUM_SCALAR >::_oldMarginalMin, and gum::HashTable< Key, Val, Alloc >::size().

1012  {
1013  GUM_SCALAR eps = 0;
1014 #pragma omp parallel
1015  {
1016  GUM_SCALAR tEps = 0;
1017  GUM_SCALAR delta;
1018 
1020  int nsize = _marginalMin.size();
1021 
1022 #pragma omp for
1023 
1024  for (int i = 0; i < nsize; i++) {
1025  auto dSize = _marginalMin[i].size();
1026 
1027  for (Size j = 0; j < dSize; j++) {
1028  // on min
1029  delta = _marginalMin[i][j] - _oldMarginalMin[i][j];
1030  delta = (delta < 0) ? (-delta) : delta;
1031  tEps = (tEps < delta) ? delta : tEps;
1032 
1033  // on max
1034  delta = _marginalMax[i][j] - _oldMarginalMax[i][j];
1035  delta = (delta < 0) ? (-delta) : delta;
1036  tEps = (tEps < delta) ? delta : tEps;
1037 
1038  _oldMarginalMin[i][j] = _marginalMin[i][j];
1039  _oldMarginalMax[i][j] = _marginalMax[i][j];
1040  }
1041  } // end of : all variables
1042 
1043 #pragma omp critical(epsilon_max)
1044  {
1045 #pragma omp flush(eps)
1046  eps = (eps < tEps) ? tEps : eps;
1047  }
1048  }
1049 
1050  return eps;
1051  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
margi _oldMarginalMin
Old lower marginals used to compute epsilon.
Size size() const noexcept
Returns the number of elements stored into the hashtable.
margi _marginalMin
Lower marginals.
margi _oldMarginalMax
Old upper marginals used to compute epsilon.
margi _marginalMax
Upper marginals.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_computeExpectations ( )
protected

Since the network is binary, expectations can be computed from the final marginals which give us the credal set vertices.

Definition at line 1486 of file CNLoopyPropagation_tpl.h.

1486  {
1487  if (__infE::_modal.empty()) { return; }
1488 
1489  std::vector< std::vector< GUM_SCALAR > > vertices(
1490  2, std::vector< GUM_SCALAR >(2));
1491 
1492  for (auto node : __bnet->nodes()) {
1493  vertices[0][0] = __infE::_marginalMin[node][0];
1494  vertices[0][1] = __infE::_marginalMax[node][1];
1495 
1496  vertices[1][0] = __infE::_marginalMax[node][0];
1497  vertices[1][1] = __infE::_marginalMin[node][1];
1498 
1499  for (auto vertex = 0, vend = 2; vertex != vend; vertex++) {
1500  __infE::_updateExpectations(node, vertices[vertex]);
1501  // test credal sets vertices elim
1502  // remove with L2U since variables are binary
1503  // but does the user know that ?
1505  node,
1506  vertices[vertex]); // no redundancy elimination with 2 vertices
1507  }
1508  }
1509  }
const std::vector< std::vector< GUM_SCALAR > > & vertices(const NodeId id) const
Get the vertice of a given node id.
margi _marginalMin
Lower marginals.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
dynExpe _modal
Variables modalities used to compute expectations.
void _updateCredalSets(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Given a node id and one of it&#39;s possible vertex, update it&#39;s credal set.
void _updateExpectations(const NodeId &id, const std::vector< GUM_SCALAR > &vertex)
Given a node id and one of it&#39;s possible vertex obtained during inference, update this node lower and...
margi _marginalMax
Upper marginals.
template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpectations ( )
protectedinherited

Rearrange lower and upper expectations to suit dynamic networks.

Definition at line 718 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpMax, gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpMin, gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMax, gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMin, gum::credal::InferenceEngine< GUM_SCALAR >::_modal, and gum::HashTable< Key, Val, Alloc >::empty().

Referenced by gum::credal::InferenceEngine< GUM_SCALAR >::dynamicExpectations(), and gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::makeInference().

718  {
719  // no modals, no expectations computed during inference
720  if (_expectationMin.empty() || _modal.empty()) return;
721 
722  // already called by the algorithm or the user
723  if (_dynamicExpMax.size() > 0 && _dynamicExpMin.size() > 0) return;
724 
725  // typedef typename std::map< int, GUM_SCALAR > innerMap;
726  using innerMap = typename gum::HashTable< int, GUM_SCALAR >;
727 
728  // typedef typename std::map< std::string, innerMap > outerMap;
729  using outerMap = typename gum::HashTable< std::string, innerMap >;
730 
731  // typedef typename std::map< std::string, std::vector< GUM_SCALAR > >
732  // mod;
733 
734  // si non dynamique, sauver directement _expectationMin et Max (revient au
735  // meme
736  // mais plus rapide)
737  outerMap expectationsMin, expectationsMax;
738 
739  for (const auto& elt : _expectationMin) {
740  std::string var_name, time_step;
741 
742  var_name = _credalNet->current_bn().variable(elt.first).name();
743  auto delim = var_name.find_first_of("_");
744  time_step = var_name.substr(delim + 1, var_name.size());
745  var_name = var_name.substr(0, delim);
746 
747  // to be sure (don't store not monitored variables' expectations)
748  // although it
749  // should be taken care of before this point
750  if (!_modal.exists(var_name)) continue;
751 
752  expectationsMin.getWithDefault(var_name, innerMap())
753  .getWithDefault(atoi(time_step.c_str()), 0) =
754  elt.second; // we iterate with min iterators
755  expectationsMax.getWithDefault(var_name, innerMap())
756  .getWithDefault(atoi(time_step.c_str()), 0) = _expectationMax[elt.first];
757  }
758 
759  for (const auto& elt : expectationsMin) {
760  typename std::vector< GUM_SCALAR > dynExp(elt.second.size());
761 
762  for (const auto& elt2 : elt.second)
763  dynExp[elt2.first] = elt2.second;
764 
765  _dynamicExpMin.insert(elt.first, dynExp);
766  }
767 
768  for (const auto& elt : expectationsMax) {
769  typename std::vector< GUM_SCALAR > dynExp(elt.second.size());
770 
771  for (const auto& elt2 : elt.second) {
772  dynExp[elt2.first] = elt2.second;
773  }
774 
775  _dynamicExpMax.insert(elt.first, dynExp);
776  }
777  }
dynExpe _dynamicExpMin
Lower dynamic expectations.
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
The class for generic Hash Tables.
Definition: hashTable.h:676
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
dynExpe _dynamicExpMax
Upper dynamic expectations.
dynExpe _modal
Variables modalities used to compute expectations.
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
bool empty() const noexcept
Indicates whether the hash table is empty.

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_enum_combi ( std::vector< std::vector< std::vector< GUM_SCALAR > > > &  msgs_p,
const NodeId id,
GUM_SCALAR &  msg_l_min,
GUM_SCALAR &  msg_l_max,
std::vector< GUM_SCALAR > &  lx,
const Idx pos 
)
protected

Used by _msgL.

comme precedemment mais pour message parent, vraisemblance prise en compte

Enumerate parent's messages.

Parameters
msgs_pAll the messages from the parents which will be enumerated.
idThe constant id of the node sending the message.
msg_l_minThe reference to the current lower value of the message to be sent.
msg_l_maxThe reference to the current upper value of the message to be sent.
lxThe lower and upper likelihood.
posThe position of the parent node to receive the message in the CPT of the one sending the message ( first parent, second ... ).

Definition at line 468 of file CNLoopyPropagation_tpl.h.

474  {
475  GUM_SCALAR msg_l_min = real_msg_l_min;
476  GUM_SCALAR msg_l_max = real_msg_l_max;
477 
478  auto taille = msgs_p.size();
479 
480  // one parent node, the one receiving the message
481  if (taille == 0) {
482  GUM_SCALAR num_min = __cn->get_CPT_min()[id][1];
483  GUM_SCALAR num_max = __cn->get_CPT_max()[id][1];
484  GUM_SCALAR den_min = __cn->get_CPT_min()[id][0];
485  GUM_SCALAR den_max = __cn->get_CPT_max()[id][0];
486 
487  _compute_ext(msg_l_min, msg_l_max, lx, num_min, num_max, den_min, den_max);
488 
489  real_msg_l_min = msg_l_min;
490  real_msg_l_max = msg_l_max;
491  return;
492  }
493 
494  decltype(taille) msgPerm = 1;
495 #pragma omp parallel
496  {
497  GUM_SCALAR msg_lmin = msg_l_min;
498  GUM_SCALAR msg_lmax = msg_l_max;
499  std::vector< std::vector< GUM_SCALAR > > combi_msg_p(taille);
500 
501  decltype(taille) confs = 1;
502 #pragma omp for
503 
504  for (int i = 0; i < int(taille); i++) {
505  confs *= msgs_p[i].size();
506  }
507 
508 #pragma omp atomic
509  msgPerm *= confs;
510 #pragma omp barrier
511 #pragma omp flush(msgPerm)
512 
513 // direct binary representation of config, no need for iterators
514 #pragma omp for
515 
516  for (long j = 0; j < long(msgPerm); j++) {
517  // get jth msg :
518  auto jvalue = j;
519 
520  for (decltype(taille) i = 0; i < taille; i++) {
521  if (msgs_p[i].size() == 2) {
522  combi_msg_p[i] = (jvalue & 1) ? msgs_p[i][1] : msgs_p[i][0];
523  jvalue /= 2;
524  } else {
525  combi_msg_p[i] = msgs_p[i][0];
526  }
527  }
528 
529  _compute_ext(combi_msg_p, id, msg_lmin, msg_lmax, lx, pos);
530  }
531 
532 // there may be more threads here than in the for loop, therefor positive test
533 // is NECESSARY (init is -2)
534 #pragma omp critical(msglminmax)
535  {
536 #pragma omp flush(msg_l_min)
537 #pragma omp flush(msg_l_max)
538 
539  if ((msg_l_min > msg_lmin || msg_l_min == -2) && msg_lmin > 0) {
540  msg_l_min = msg_lmin;
541  }
542 
543  if ((msg_l_max < msg_lmax || msg_l_max == -2) && msg_lmax > 0) {
544  msg_l_max = msg_lmax;
545  }
546  }
547  }
548 
549  real_msg_l_min = msg_l_min;
550  real_msg_l_max = msg_l_max;
551  }
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
void _compute_ext(GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, GUM_SCALAR &num_min, GUM_SCALAR &num_max, GUM_SCALAR &den_min, GUM_SCALAR &den_max)
Used by _msgL.
template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_enum_combi ( std::vector< std::vector< std::vector< GUM_SCALAR > > > &  msgs_p,
const NodeId id,
GUM_SCALAR &  msg_p_min,
GUM_SCALAR &  msg_p_max 
)
protected

Used by _msgP.

enumere combinaisons messages parents, pour message vers enfant

Enumerate parent's messages.

Parameters
msgs_pAll the messages from the parents which will be enumerated.
idThe constant id of the node sending the message.
msg_p_minThe reference to the current lower value of the message to be sent.
msg_p_maxThe reference to the current upper value of the message to be sent.

Definition at line 392 of file CNLoopyPropagation_tpl.h.

396  {
397  auto taille = msgs_p.size();
398 
399  // source node
400  if (taille == 0) {
401  msg_p_min = __cn->get_CPT_min()[id][0];
402  msg_p_max = __cn->get_CPT_max()[id][0];
403  return;
404  }
405 
406  decltype(taille) msgPerm = 1;
407 #pragma omp parallel
408  {
409  GUM_SCALAR msg_pmin = msg_p_min;
410  GUM_SCALAR msg_pmax = msg_p_max;
411 
412  std::vector< std::vector< GUM_SCALAR > > combi_msg_p(taille);
413 
414  decltype(taille) confs = 1;
415 
416 #pragma omp for
417 
418  for (long i = 0; i < long(taille); i++) {
419  confs *= msgs_p[i].size();
420  }
421 
422 #pragma omp atomic
423  msgPerm *= confs;
424 #pragma omp barrier
425 #pragma omp \
426  flush // ( msgPerm ) let the compiler choose what to flush (due to mvsc)
427 
428 #pragma omp for
429 
430  for (int j = 0; j < int(msgPerm); j++) {
431  // get jth msg :
432  auto jvalue = j;
433 
434  for (decltype(taille) i = 0; i < taille; i++) {
435  if (msgs_p[i].size() == 2) {
436  combi_msg_p[i] = (jvalue & 1) ? msgs_p[i][1] : msgs_p[i][0];
437  jvalue /= 2;
438  } else {
439  combi_msg_p[i] = msgs_p[i][0];
440  }
441  }
442 
443  _compute_ext(combi_msg_p, id, msg_pmin, msg_pmax);
444  }
445 
446 // since min is _INF and max is 0 at init, there is no issue having more threads
447 // here
448 // than during for loop
449 #pragma omp critical(msgpminmax)
450  {
451 #pragma omp flush //( msg_p_min )
452  //#pragma omp flush ( msg_p_max ) let the compiler choose what to
453  // flush (due to mvsc)
454 
455  if (msg_p_min > msg_pmin) { msg_p_min = msg_pmin; }
456 
457  if (msg_p_max < msg_pmax) { msg_p_max = msg_pmax; }
458  }
459  }
460  return;
461  }
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
void _compute_ext(GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, GUM_SCALAR &num_min, GUM_SCALAR &num_max, GUM_SCALAR &den_min, GUM_SCALAR &den_max)
Used by _msgL.
template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::_initExpectations ( )
protectedinherited

Initialize lower and upper expectations before inference, with the lower expectation being initialized on the highest modality and the upper expectation being initialized on the lowest modality.

Definition at line 692 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMax, gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMin, gum::credal::InferenceEngine< GUM_SCALAR >::_modal, gum::HashTable< Key, Val, Alloc >::clear(), and gum::HashTable< Key, Val, Alloc >::insert().

Referenced by gum::credal::InferenceEngine< GUM_SCALAR >::eraseAllEvidence(), gum::credal::InferenceEngine< GUM_SCALAR >::insertModals(), and gum::credal::InferenceEngine< GUM_SCALAR >::insertModalsFile().

692  {
695 
696  if (_modal.empty()) return;
697 
698  for (auto node : _credalNet->current_bn().nodes()) {
699  std::string var_name, time_step;
700 
701  var_name = _credalNet->current_bn().variable(node).name();
702  auto delim = var_name.find_first_of("_");
703  var_name = var_name.substr(0, delim);
704 
705  if (!_modal.exists(var_name)) continue;
706 
707  _expectationMin.insert(node, _modal[var_name].back());
708  _expectationMax.insert(node, _modal[var_name].front());
709  }
710  }
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
dynExpe _modal
Variables modalities used to compute expectations.
void clear()
Removes all the elements in the hash table.
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_initialize ( )
protected

Topological forward propagation to initialize old marginals & messages.

Definition at line 609 of file CNLoopyPropagation_tpl.h.

References _INF, gum::ArcGraphPart::children(), GUM_ERROR, gum::Set< Key, Alloc >::insert(), gum::ArcGraphPart::parents(), and gum::Set< Key, Alloc >::size().

609  {
610  const DAG& graphe = __bnet->dag();
611 
612  // use const iterators with cbegin when available
613  for (auto node : __bnet->topologicalOrder()) {
614  _update_p.set(node, false);
615  _update_l.set(node, false);
616  NodeSet* _parents = new NodeSet();
617  _msg_l_sent.set(node, _parents);
618 
619  // accelerer init pour evidences
620  if (__infE::_evidence.exists(node)) {
621  if (__infE::_evidence[node][1] != 0.
622  && __infE::_evidence[node][1] != 1.) {
623  GUM_ERROR(OperationNotAllowed,
624  "CNLoopyPropagation can only handle HARD evidences");
625  }
626 
627  active_nodes_set.insert(node);
628  _update_l.set(node, true);
629  _update_p.set(node, true);
630 
631  if (__infE::_evidence[node][1] == (GUM_SCALAR)1.) {
632  _NodesL_min.set(node, _INF);
633  _NodesP_min.set(node, (GUM_SCALAR)1.);
634  } else if (__infE::_evidence[node][1] == (GUM_SCALAR)0.) {
635  _NodesL_min.set(node, (GUM_SCALAR)0.);
636  _NodesP_min.set(node, (GUM_SCALAR)0.);
637  }
638 
639  std::vector< GUM_SCALAR > marg(2);
640  marg[1] = _NodesP_min[node];
641  marg[0] = 1 - marg[1];
642 
643  __infE::_oldMarginalMin.set(node, marg);
644  __infE::_oldMarginalMax.set(node, marg);
645 
646  continue;
647  }
648 
649  NodeSet _par = graphe.parents(node);
650  NodeSet _enf = graphe.children(node);
651 
652  if (_par.size() == 0) {
653  active_nodes_set.insert(node);
654  _update_p.set(node, true);
655  _update_l.set(node, true);
656  }
657 
658  if (_enf.size() == 0) {
659  active_nodes_set.insert(node);
660  _update_p.set(node, true);
661  _update_l.set(node, true);
662  }
663 
668  const auto parents = &__bnet->cpt(node).variablesSequence();
669 
670  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
671  std::vector< std::vector< GUM_SCALAR > > msg_p;
672  std::vector< GUM_SCALAR > distri(2);
673 
674  // +1 from start to avoid _counting itself
675  // use const iterators when available with cbegin
676  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
677  ++jt) {
678  // compute probability distribution to avoid doing it multiple times
679  // (at
680  // each combination of messages)
681  distri[1] = _NodesP_min[__bnet->nodeId(**jt)];
682  distri[0] = (GUM_SCALAR)1. - distri[1];
683  msg_p.push_back(distri);
684 
685  if (_NodesP_max.exists(__bnet->nodeId(**jt))) {
686  distri[1] = _NodesP_max[__bnet->nodeId(**jt)];
687  distri[0] = (GUM_SCALAR)1. - distri[1];
688  msg_p.push_back(distri);
689  }
690 
691  msgs_p.push_back(msg_p);
692  msg_p.clear();
693  }
694 
695  GUM_SCALAR msg_p_min = 1.;
696  GUM_SCALAR msg_p_max = 0.;
697 
698  if (__cn->currentNodeType(node)
700  _enum_combi(msgs_p, node, msg_p_min, msg_p_max);
701  }
702 
703  if (msg_p_min <= (GUM_SCALAR)0.) { msg_p_min = (GUM_SCALAR)0.; }
704 
705  if (msg_p_max <= (GUM_SCALAR)0.) { msg_p_max = (GUM_SCALAR)0.; }
706 
707  _NodesP_min.set(node, msg_p_min);
708  std::vector< GUM_SCALAR > marg(2);
709  marg[1] = msg_p_min;
710  marg[0] = 1 - msg_p_min;
711 
712  __infE::_oldMarginalMin.set(node, marg);
713 
714  if (msg_p_min != msg_p_max) {
715  marg[1] = msg_p_max;
716  marg[0] = 1 - msg_p_max;
717  _NodesP_max.insert(node, msg_p_max);
718  }
719 
720  __infE::_oldMarginalMax.set(node, marg);
721 
722  _NodesL_min.set(node, (GUM_SCALAR)1.);
723  }
724 
725  for (auto arc : __bnet->arcs()) {
726  _ArcsP_min.set(arc, _NodesP_min[arc.tail()]);
727 
728  if (_NodesP_max.exists(arc.tail())) {
729  _ArcsP_max.set(arc, _NodesP_max[arc.tail()]);
730  }
731 
732  _ArcsL_min.set(arc, _NodesL_min[arc.tail()]);
733  }
734  }
NodeProperty< bool > _update_p
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s parents...
NodeProperty< GUM_SCALAR > _NodesP_min
"Lower" node information obtained by combinaison of parent&#39;s messages.
margi _oldMarginalMin
Old lower marginals used to compute epsilon.
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
#define _INF
ArcProperty< GUM_SCALAR > _ArcsP_min
"Lower" information coming from one&#39;s parent.
NodeSet active_nodes_set
The current node-set to iterate through at this current step.
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
margi _oldMarginalMax
Old upper marginals used to compute epsilon.
ArcProperty< GUM_SCALAR > _ArcsP_max
"Upper" information coming from one&#39;s parent.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
void set(const Key &key, const Val &default_value)
Add a new property or modify it if it already existed.
ArcProperty< GUM_SCALAR > _ArcsL_min
"Lower" information coming from one&#39;s children.
void _enum_combi(std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
Used by _msgL.
NodeProperty< NodeSet * > _msg_l_sent
Used to keep track of one&#39;s messages sent to it&#39;s parents.
margi _evidence
Holds observed variables states.
NodeProperty< GUM_SCALAR > _NodesL_min
"Lower" node information obtained by combinaison of children messages.
NodeProperty< GUM_SCALAR > _NodesP_max
"Upper" node information obtained by combinaison of parent&#39;s messages.
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
NodeProperty< bool > _update_l
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s children...
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::_initMarginals ( )
protectedinherited

Initialize lower and upper old marginals and marginals before inference, with the lower marginal being 1 and the upper 0.

Definition at line 660 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMax, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMin, gum::credal::InferenceEngine< GUM_SCALAR >::_oldMarginalMax, gum::credal::InferenceEngine< GUM_SCALAR >::_oldMarginalMin, gum::HashTable< Key, Val, Alloc >::clear(), and gum::HashTable< Key, Val, Alloc >::insert().

Referenced by gum::credal::InferenceEngine< GUM_SCALAR >::eraseAllEvidence(), and gum::credal::InferenceEngine< GUM_SCALAR >::InferenceEngine().

660  {
665 
666  for (auto node : _credalNet->current_bn().nodes()) {
667  auto dSize = _credalNet->current_bn().variable(node).domainSize();
668  _marginalMin.insert(node, std::vector< GUM_SCALAR >(dSize, 1));
669  _oldMarginalMin.insert(node, std::vector< GUM_SCALAR >(dSize, 1));
670 
671  _marginalMax.insert(node, std::vector< GUM_SCALAR >(dSize, 0));
672  _oldMarginalMax.insert(node, std::vector< GUM_SCALAR >(dSize, 0));
673  }
674  }
margi _oldMarginalMin
Old lower marginals used to compute epsilon.
margi _marginalMin
Lower marginals.
margi _oldMarginalMax
Old upper marginals used to compute epsilon.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
void clear()
Removes all the elements in the hash table.
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
margi _marginalMax
Upper marginals.

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::_initMarginalSets ( )
protectedinherited

Initialize credal set vertices with empty sets.

Definition at line 677 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalSets, gum::credal::InferenceEngine< GUM_SCALAR >::_storeVertices, gum::HashTable< Key, Val, Alloc >::clear(), and gum::HashTable< Key, Val, Alloc >::insert().

Referenced by gum::credal::InferenceEngine< GUM_SCALAR >::eraseAllEvidence(), and gum::credal::InferenceEngine< GUM_SCALAR >::storeVertices().

677  {
679 
680  if (!_storeVertices) return;
681 
682  for (auto node : _credalNet->current_bn().nodes())
683  _marginalSets.insert(node, std::vector< std::vector< GUM_SCALAR > >());
684  }
credalSet _marginalSets
Credal sets vertices, if enabled.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
void clear()
Removes all the elements in the hash table.
bool _storeVertices
True if credal sets vertices are stored, False otherwise.
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_makeInferenceByOrderedArcs ( )
protected

Starts the inference with this inference type.

Definition at line 828 of file CNLoopyPropagation_tpl.h.

References gum::credal::CredalNet< GUM_SCALAR >::currentNodeType().

828  {
829  Size nbrArcs = __bnet->dag().sizeArcs();
830 
831  std::vector< cArcP > seq;
832  seq.reserve(nbrArcs);
833 
834  for (const auto& arc : __bnet->arcs()) {
835  seq.push_back(&arc);
836  }
837 
838  GUM_SCALAR eps;
839  // validate TestSuite
841 
842  do {
843  for (const auto it : seq) {
844  if (__cn->currentNodeType(it->tail())
846  || __cn->currentNodeType(it->head())
848  continue;
849  }
850 
851  _msgP(it->tail(), it->head());
852  _msgL(it->head(), it->tail());
853  }
854 
855  eps = _calculateEpsilon();
856 
858 
860  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
bool continueApproximationScheme(double error)
Update the scheme w.r.t the new error.
GUM_SCALAR _calculateEpsilon()
Compute epsilon.
void _msgL(const NodeId X, const NodeId demanding_parent)
Sends a message to one&#39;s parent, i.e.
void _msgP(const NodeId X, const NodeId demanding_child)
Sends a message to one&#39;s child, i.e.
void updateApproximationScheme(unsigned int incr=1)
Update the scheme w.r.t the new error and increment steps.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_makeInferenceByRandomOrder ( )
protected

Starts the inference with this inference type.

Definition at line 782 of file CNLoopyPropagation_tpl.h.

References gum::credal::CredalNet< GUM_SCALAR >::currentNodeType(), and gum::credal::lp::swap().

782  {
783  Size nbrArcs = __bnet->dag().sizeArcs();
784 
785  std::vector< cArcP > seq;
786  seq.reserve(nbrArcs);
787 
788  for (const auto& arc : __bnet->arcs()) {
789  seq.push_back(&arc);
790  }
791 
792  GUM_SCALAR eps;
793  // validate TestSuite
795 
796  do {
797  for (Size j = 0, theEnd = nbrArcs / 2; j < theEnd; j++) {
798  auto w1 = rand() % nbrArcs, w2 = rand() % nbrArcs;
799 
800  if (w1 == w2) { continue; }
801 
802  std::swap(seq[w1], seq[w2]);
803  }
804 
805  for (const auto it : seq) {
806  if (__cn->currentNodeType(it->tail())
808  || __cn->currentNodeType(it->head())
810  continue;
811  }
812 
813  _msgP(it->tail(), it->head());
814  _msgL(it->head(), it->tail());
815  }
816 
817  eps = _calculateEpsilon();
818 
820 
822  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
void swap(HashTable< LpCol, double > *&a, HashTable< LpCol, double > *&b)
Swap the addresses of two pointers to hashTables.
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
bool continueApproximationScheme(double error)
Update the scheme w.r.t the new error.
GUM_SCALAR _calculateEpsilon()
Compute epsilon.
void _msgL(const NodeId X, const NodeId demanding_parent)
Sends a message to one&#39;s parent, i.e.
void _msgP(const NodeId X, const NodeId demanding_child)
Sends a message to one&#39;s child, i.e.
void updateApproximationScheme(unsigned int incr=1)
Update the scheme w.r.t the new error and increment steps.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_makeInferenceNodeToNeighbours ( )
protected

Starts the inference with this inference type.

Definition at line 737 of file CNLoopyPropagation_tpl.h.

References gum::ArcGraphPart::children(), and gum::ArcGraphPart::parents().

737  {
738  const DAG& graphe = __bnet->dag();
739 
740  GUM_SCALAR eps;
741  // to validate TestSuite
743 
744  do {
745  for (auto node : active_nodes_set) {
746  for (auto chil : graphe.children(node)) {
747  if (__cn->currentNodeType(chil)
749  continue;
750  }
751 
752  _msgP(node, chil);
753  }
754 
755  for (auto par : graphe.parents(node)) {
756  if (__cn->currentNodeType(node)
758  continue;
759  }
760 
761  _msgL(node, par);
762  }
763  }
764 
765  eps = _calculateEpsilon();
766 
768 
769  active_nodes_set.clear();
770  active_nodes_set = next_active_nodes_set;
772 
774  && active_nodes_set.size() > 0);
775 
776  __infE::stopApproximationScheme(); // just to be sure of the
777  // approximationScheme has been notified of
778  // the end of looop
779  }
NodeSet active_nodes_set
The current node-set to iterate through at this current step.
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
bool continueApproximationScheme(double error)
Update the scheme w.r.t the new error.
void stopApproximationScheme()
Stop the approximation scheme.
GUM_SCALAR _calculateEpsilon()
Compute epsilon.
void _msgL(const NodeId X, const NodeId demanding_parent)
Sends a message to one&#39;s parent, i.e.
void clear()
Removes all the elements, if any, from the set.
Definition: set_tpl.h:375
void _msgP(const NodeId X, const NodeId demanding_child)
Sends a message to one&#39;s child, i.e.
NodeSet next_active_nodes_set
The next node-set, i.e.
void updateApproximationScheme(unsigned int incr=1)
Update the scheme w.r.t the new error and increment steps.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_msgL ( const NodeId  X,
const NodeId  demanding_parent 
)
protected

Sends a message to one's parent, i.e.

X is sending a message to a demanding_parent.

Parameters
XThe constant node id of the node sending the message.
demanding_parentThe constant node id of the node receiving the message.

Definition at line 863 of file CNLoopyPropagation_tpl.h.

References gum::Set< Key, Alloc >::empty(), gum::Set< Key, Alloc >::insert(), and gum::Set< Key, Alloc >::size().

863  {
864  NodeSet const& children = __bnet->children(Y);
865  NodeSet const& _parents = __bnet->parents(Y);
866 
867  const auto parents = &__bnet->cpt(Y).variablesSequence();
868 
869  if (((children.size() + parents->size() - 1) == 1)
870  && (!__infE::_evidence.exists(Y))) {
871  return;
872  }
873 
874  bool update_l = _update_l[Y];
875  bool update_p = _update_p[Y];
876 
877  if (!update_p && !update_l) { return; }
878 
879  _msg_l_sent[Y]->insert(X);
880 
881  // for future refresh LM/PI
882  if (_msg_l_sent[Y]->size() == _parents.size()) {
883  _msg_l_sent[Y]->clear();
884  _update_l[Y] = false;
885  }
886 
887  // refresh LM_part
888  if (update_l) {
889  if (!children.empty() && !__infE::_evidence.exists(Y)) {
890  GUM_SCALAR lmin = 1.;
891  GUM_SCALAR lmax = 1.;
892 
893  for (auto chil : children) {
894  lmin *= _ArcsL_min[Arc(Y, chil)];
895 
896  if (_ArcsL_max.exists(Arc(Y, chil))) {
897  lmax *= _ArcsL_max[Arc(Y, chil)];
898  } else {
899  lmax *= _ArcsL_min[Arc(Y, chil)];
900  }
901  }
902 
903  lmin = lmax;
904 
905  if (lmax != lmax && lmin == lmin) { lmax = lmin; }
906 
907  if (lmax != lmax && lmin != lmin) {
908  std::cout << "no likelihood defined [lmin, lmax] (incompatibles "
909  "evidence ?)"
910  << std::endl;
911  }
912 
913  if (lmin < 0.) { lmin = 0.; }
914 
915  if (lmax < 0.) { lmax = 0.; }
916 
917  // no need to update nodeL if evidence since nodeL will never be used
918 
919  _NodesL_min[Y] = lmin;
920 
921  if (lmin != lmax) {
922  _NodesL_max.set(Y, lmax);
923  } else if (_NodesL_max.exists(Y)) {
924  _NodesL_max.erase(Y);
925  }
926 
927  } // end of : node has children & no evidence
928 
929  } // end of : if update_l
930 
931  GUM_SCALAR lmin = _NodesL_min[Y];
932  GUM_SCALAR lmax;
933 
934  if (_NodesL_max.exists(Y)) {
935  lmax = _NodesL_max[Y];
936  } else {
937  lmax = lmin;
938  }
939 
944  if (lmin == lmax && lmin == 1.) {
945  _ArcsL_min[Arc(X, Y)] = lmin;
946 
947  if (_ArcsL_max.exists(Arc(X, Y))) { _ArcsL_max.erase(Arc(X, Y)); }
948 
949  return;
950  }
951 
952  // garder pour chaque noeud un table des parents maj, une fois tous maj,
953  // stop
954  // jusque notification msg L ou P
955 
956  if (update_p || update_l) {
957  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
958  std::vector< std::vector< GUM_SCALAR > > msg_p;
959  std::vector< GUM_SCALAR > distri(2);
960 
961  Idx pos;
962 
963  // +1 from start to avoid _counting itself
964  // use const iterators with cbegin when available
965  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
966  ++jt) {
967  if (__bnet->nodeId(**jt) == X) {
968  // retirer la variable courante de la taille
969  pos = parents->pos(*jt) - 1;
970  continue;
971  }
972 
973  // compute probability distribution to avoid doing it multiple times
974  // (at
975  // each combination of messages)
976  distri[1] = _ArcsP_min[Arc(__bnet->nodeId(**jt), Y)];
977  distri[0] = GUM_SCALAR(1.) - distri[1];
978  msg_p.push_back(distri);
979 
980  if (_ArcsP_max.exists(Arc(__bnet->nodeId(**jt), Y))) {
981  distri[1] = _ArcsP_max[Arc(__bnet->nodeId(**jt), Y)];
982  distri[0] = GUM_SCALAR(1.) - distri[1];
983  msg_p.push_back(distri);
984  }
985 
986  msgs_p.push_back(msg_p);
987  msg_p.clear();
988  }
989 
990  GUM_SCALAR min = -2.;
991  GUM_SCALAR max = -2.;
992 
993  std::vector< GUM_SCALAR > lx;
994  lx.push_back(lmin);
995 
996  if (lmin != lmax) { lx.push_back(lmax); }
997 
998  _enum_combi(msgs_p, Y, min, max, lx, pos);
999 
1000  if (min == -2. || max == -2.) {
1001  if (min != -2.) {
1002  max = min;
1003  } else if (max != -2.) {
1004  min = max;
1005  } else {
1006  std::cout << std::endl;
1007  std::cout << "!!!! pas de message L calculable !!!!" << std::endl;
1008  return;
1009  }
1010  }
1011 
1012  if (min < 0.) { min = 0.; }
1013 
1014  if (max < 0.) { max = 0.; }
1015 
1016  bool update = false;
1017 
1018  if (min != _ArcsL_min[Arc(X, Y)]) {
1019  _ArcsL_min[Arc(X, Y)] = min;
1020  update = true;
1021  }
1022 
1023  if (_ArcsL_max.exists(Arc(X, Y))) {
1024  if (max != _ArcsL_max[Arc(X, Y)]) {
1025  if (max != min) {
1026  _ArcsL_max[Arc(X, Y)] = max;
1027  } else { // if ( max == min )
1028  _ArcsL_max.erase(Arc(X, Y));
1029  }
1030 
1031  update = true;
1032  }
1033  } else {
1034  if (max != min) {
1035  _ArcsL_max.insert(Arc(X, Y), max);
1036  update = true;
1037  }
1038  }
1039 
1040  if (update) {
1041  _update_l.set(X, true);
1043  }
1044 
1045  } // end of update_p || update_l
1046  }
NodeProperty< bool > _update_p
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s parents...
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
ArcProperty< GUM_SCALAR > _ArcsP_min
"Lower" information coming from one&#39;s parent.
ArcProperty< GUM_SCALAR > _ArcsP_max
"Upper" information coming from one&#39;s parent.
ArcProperty< GUM_SCALAR > _ArcsL_max
"Upper" information coming from one&#39;s children.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
ArcProperty< GUM_SCALAR > _ArcsL_min
"Lower" information coming from one&#39;s children.
void _enum_combi(std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
Used by _msgL.
NodeProperty< NodeSet * > _msg_l_sent
Used to keep track of one&#39;s messages sent to it&#39;s parents.
margi _evidence
Holds observed variables states.
NodeProperty< GUM_SCALAR > _NodesL_min
"Lower" node information obtained by combinaison of children messages.
unsigned long Idx
Type for indexes.
Definition: types.h:43
NodeSet next_active_nodes_set
The next node-set, i.e.
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
NodeProperty< bool > _update_l
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s children...
NodeProperty< GUM_SCALAR > _NodesL_max
"Upper" node information obtained by combinaison of children messages.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_msgP ( const NodeId  X,
const NodeId  demanding_child 
)
protected

Sends a message to one's child, i.e.

X is sending a message to a demanding_child.

Parameters
XThe constant node id of the node sending the message.
demanding_childThe constant node id of the node receiving the message.

Definition at line 1049 of file CNLoopyPropagation_tpl.h.

References _INF, gum::Set< Key, Alloc >::erase(), and gum::Set< Key, Alloc >::size().

1050  {
1051  NodeSet const& children = __bnet->children(X);
1052 
1053  const auto parents = &__bnet->cpt(X).variablesSequence();
1054 
1055  if (((children.size() + parents->size() - 1) == 1)
1056  && (!__infE::_evidence.exists(X))) {
1057  return;
1058  }
1059 
1060  // LM_part ---- from all children but one --- the lonely one will get the
1061  // message
1062 
1063  if (__infE::_evidence.exists(X)) {
1064  _ArcsP_min[Arc(X, demanding_child)] = __infE::_evidence[X][1];
1065 
1066  if (_ArcsP_max.exists(Arc(X, demanding_child))) {
1067  _ArcsP_max.erase(Arc(X, demanding_child));
1068  }
1069 
1070  return;
1071  }
1072 
1073  bool update_l = _update_l[X];
1074  bool update_p = _update_p[X];
1075 
1076  if (!update_p && !update_l) { return; }
1077 
1078  GUM_SCALAR lmin = 1.;
1079  GUM_SCALAR lmax = 1.;
1080 
1081  // use cbegin if available
1082  for (auto chil : children) {
1083  if (chil == demanding_child) { continue; }
1084 
1085  lmin *= _ArcsL_min[Arc(X, chil)];
1086 
1087  if (_ArcsL_max.exists(Arc(X, chil))) {
1088  lmax *= _ArcsL_max[Arc(X, chil)];
1089  } else {
1090  lmax *= _ArcsL_min[Arc(X, chil)];
1091  }
1092  }
1093 
1094  if (lmin != lmin && lmax == lmax) { lmin = lmax; }
1095 
1096  if (lmax != lmax && lmin == lmin) { lmax = lmin; }
1097 
1098  if (lmax != lmax && lmin != lmin) {
1099  std::cout << "pas de vraisemblance definie [lmin, lmax] (observations "
1100  "incompatibles ?)"
1101  << std::endl;
1102  return;
1103  }
1104 
1105  if (lmin < 0.) { lmin = 0.; }
1106 
1107  if (lmax < 0.) { lmax = 0.; }
1108 
1109  // refresh PI_part
1110  GUM_SCALAR min = _INF;
1111  GUM_SCALAR max = 0.;
1112 
1113  if (update_p) {
1114  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
1115  std::vector< std::vector< GUM_SCALAR > > msg_p;
1116  std::vector< GUM_SCALAR > distri(2);
1117 
1118  // +1 from start to avoid _counting itself
1119  // use const_iterators if available
1120  for (auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd;
1121  ++jt) {
1122  // compute probability distribution to avoid doing it multiple times
1123  // (at
1124  // each combination of messages)
1125  distri[1] = _ArcsP_min[Arc(__bnet->nodeId(**jt), X)];
1126  distri[0] = GUM_SCALAR(1.) - distri[1];
1127  msg_p.push_back(distri);
1128 
1129  if (_ArcsP_max.exists(Arc(__bnet->nodeId(**jt), X))) {
1130  distri[1] = _ArcsP_max[Arc(__bnet->nodeId(**jt), X)];
1131  distri[0] = GUM_SCALAR(1.) - distri[1];
1132  msg_p.push_back(distri);
1133  }
1134 
1135  msgs_p.push_back(msg_p);
1136  msg_p.clear();
1137  }
1138 
1139  _enum_combi(msgs_p, X, min, max);
1140 
1141  if (min < 0.) { min = 0.; }
1142 
1143  if (max < 0.) { max = 0.; }
1144 
1145  if (min == _INF || max == _INF) {
1146  std::cout << " ERREUR msg P min = max = INF " << std::endl;
1147  std::cout.flush();
1148  return;
1149  }
1150 
1151  _NodesP_min[X] = min;
1152 
1153  if (min != max) {
1154  _NodesP_max.set(X, max);
1155  } else if (_NodesP_max.exists(X)) {
1156  _NodesP_max.erase(X);
1157  }
1158 
1159  _update_p.set(X, false);
1160 
1161  } // end of update_p
1162  else {
1163  min = _NodesP_min[X];
1164 
1165  if (_NodesP_max.exists(X)) {
1166  max = _NodesP_max[X];
1167  } else {
1168  max = min;
1169  }
1170  }
1171 
1172  if (update_p || update_l) {
1173  GUM_SCALAR msg_p_min;
1174  GUM_SCALAR msg_p_max;
1175 
1176  // cas limites sur min
1177  if (min == _INF && lmin == 0.) {
1178  std::cout << "MESSAGE P ERR (negatif) : pi = inf, l = 0" << std::endl;
1179  }
1180 
1181  if (lmin == _INF) { // cas infini
1182  msg_p_min = GUM_SCALAR(1.);
1183  } else if (min == 0. || lmin == 0.) {
1184  msg_p_min = 0;
1185  } else {
1186  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
1187  }
1188 
1189  // cas limites sur max
1190  if (max == _INF && lmax == 0.) {
1191  std::cout << "MESSAGE P ERR (negatif) : pi = inf, l = 0" << std::endl;
1192  }
1193 
1194  if (lmax == _INF) { // cas infini
1195  msg_p_max = GUM_SCALAR(1.);
1196  } else if (max == 0. || lmax == 0.) {
1197  msg_p_max = 0;
1198  } else {
1199  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
1200  }
1201 
1202  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
1203  msg_p_min = msg_p_max;
1204  std::cout << std::endl;
1205  std::cout << "msg_p_min is NaN" << std::endl;
1206  }
1207 
1208  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
1209  msg_p_max = msg_p_min;
1210  std::cout << std::endl;
1211  std::cout << "msg_p_max is NaN" << std::endl;
1212  }
1213 
1214  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
1215  std::cout << std::endl;
1216  std::cout << "pas de message P calculable (verifier observations)"
1217  << std::endl;
1218  return;
1219  }
1220 
1221  if (msg_p_min < 0.) { msg_p_min = 0.; }
1222 
1223  if (msg_p_max < 0.) { msg_p_max = 0.; }
1224 
1225  bool update = false;
1226 
1227  if (msg_p_min != _ArcsP_min[Arc(X, demanding_child)]) {
1228  _ArcsP_min[Arc(X, demanding_child)] = msg_p_min;
1229  update = true;
1230  }
1231 
1232  if (_ArcsP_max.exists(Arc(X, demanding_child))) {
1233  if (msg_p_max != _ArcsP_max[Arc(X, demanding_child)]) {
1234  if (msg_p_max != msg_p_min) {
1235  _ArcsP_max[Arc(X, demanding_child)] = msg_p_max;
1236  } else { // if ( msg_p_max == msg_p_min )
1237  _ArcsP_max.erase(Arc(X, demanding_child));
1238  }
1239 
1240  update = true;
1241  }
1242  } else {
1243  if (msg_p_max != msg_p_min) {
1244  _ArcsP_max.insert(Arc(X, demanding_child), msg_p_max);
1245  update = true;
1246  }
1247  }
1248 
1249  if (update) {
1250  _update_p.set(demanding_child, true);
1251  next_active_nodes_set.insert(demanding_child);
1252  }
1253 
1254  } // end of : update_l || update_p
1255  }
NodeProperty< bool > _update_p
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s parents...
NodeProperty< GUM_SCALAR > _NodesP_min
"Lower" node information obtained by combinaison of parent&#39;s messages.
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
#define _INF
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
ArcProperty< GUM_SCALAR > _ArcsP_min
"Lower" information coming from one&#39;s parent.
ArcProperty< GUM_SCALAR > _ArcsP_max
"Upper" information coming from one&#39;s parent.
ArcProperty< GUM_SCALAR > _ArcsL_max
"Upper" information coming from one&#39;s children.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
ArcProperty< GUM_SCALAR > _ArcsL_min
"Lower" information coming from one&#39;s children.
void _enum_combi(std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
Used by _msgL.
margi _evidence
Holds observed variables states.
NodeSet next_active_nodes_set
The next node-set, i.e.
NodeProperty< GUM_SCALAR > _NodesP_max
"Upper" node information obtained by combinaison of parent&#39;s messages.
void insert(const Key &k)
Inserts a new element into the set.
Definition: set_tpl.h:613
NodeProperty< bool > _update_l
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s children...

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_refreshLMsPIs ( bool  refreshIndic = false)
protected

Get the last messages from one's parents and children.

Definition at line 1258 of file CNLoopyPropagation_tpl.h.

References _INF, gum::Set< Key, Alloc >::empty(), and gum::Set< Key, Alloc >::erase().

1258  {
1259  for (auto node : __bnet->nodes()) {
1260  if ((!refreshIndic)
1261  && __cn->currentNodeType(node)
1263  continue;
1264  }
1265 
1266  NodeSet const& children = __bnet->children(node);
1267 
1268  auto parents = &__bnet->cpt(node).variablesSequence();
1269 
1270  if (_update_l[node]) {
1271  GUM_SCALAR lmin = 1.;
1272  GUM_SCALAR lmax = 1.;
1273 
1274  if (!children.empty() && !__infE::_evidence.exists(node)) {
1275  for (auto chil : children) {
1276  lmin *= _ArcsL_min[Arc(node, chil)];
1277 
1278  if (_ArcsL_max.exists(Arc(node, chil))) {
1279  lmax *= _ArcsL_max[Arc(node, chil)];
1280  } else {
1281  lmax *= _ArcsL_min[Arc(node, chil)];
1282  }
1283  }
1284 
1285  if (lmin != lmin && lmax == lmax) { lmin = lmax; }
1286 
1287  lmax = lmin;
1288 
1289  if (lmax != lmax && lmin != lmin) {
1290  std::cout
1291  << "pas de vraisemblance definie [lmin, lmax] (observations "
1292  "incompatibles ?)"
1293  << std::endl;
1294  return;
1295  }
1296 
1297  if (lmin < 0.) { lmin = 0.; }
1298 
1299  if (lmax < 0.) { lmax = 0.; }
1300 
1301  _NodesL_min[node] = lmin;
1302 
1303  if (lmin != lmax) {
1304  _NodesL_max.set(node, lmax);
1305  } else if (_NodesL_max.exists(node)) {
1306  _NodesL_max.erase(node);
1307  }
1308  }
1309 
1310  } // end of : update_l
1311 
1312  if (_update_p[node]) {
1313  if ((parents->size() - 1) > 0 && !__infE::_evidence.exists(node)) {
1314  std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
1315  std::vector< std::vector< GUM_SCALAR > > msg_p;
1316  std::vector< GUM_SCALAR > distri(2);
1317 
1318  // +1 from start to avoid _counting itself
1319  // cbegin
1320  for (auto jt = ++parents->begin(), theEnd = parents->end();
1321  jt != theEnd;
1322  ++jt) {
1323  // compute probability distribution to avoid doing it multiple
1324  // times
1325  // (at each combination of messages)
1326  distri[1] = _ArcsP_min[Arc(__bnet->nodeId(**jt), node)];
1327  distri[0] = GUM_SCALAR(1.) - distri[1];
1328  msg_p.push_back(distri);
1329 
1330  if (_ArcsP_max.exists(Arc(__bnet->nodeId(**jt), node))) {
1331  distri[1] = _ArcsP_max[Arc(__bnet->nodeId(**jt), node)];
1332  distri[0] = GUM_SCALAR(1.) - distri[1];
1333  msg_p.push_back(distri);
1334  }
1335 
1336  msgs_p.push_back(msg_p);
1337  msg_p.clear();
1338  }
1339 
1340  GUM_SCALAR min = _INF;
1341  GUM_SCALAR max = 0.;
1342 
1343  _enum_combi(msgs_p, node, min, max);
1344 
1345  if (min < 0.) { min = 0.; }
1346 
1347  if (max < 0.) { max = 0.; }
1348 
1349  _NodesP_min[node] = min;
1350 
1351  if (min != max) {
1352  _NodesP_max.set(node, max);
1353  } else if (_NodesP_max.exists(node)) {
1354  _NodesP_max.erase(node);
1355  }
1356 
1357  _update_p[node] = false;
1358  }
1359  } // end of update_p
1360 
1361  } // end of : for each node
1362  }
NodeProperty< bool > _update_p
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s parents...
NodeProperty< GUM_SCALAR > _NodesP_min
"Lower" node information obtained by combinaison of parent&#39;s messages.
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
#define _INF
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
ArcProperty< GUM_SCALAR > _ArcsP_min
"Lower" information coming from one&#39;s parent.
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
ArcProperty< GUM_SCALAR > _ArcsP_max
"Upper" information coming from one&#39;s parent.
ArcProperty< GUM_SCALAR > _ArcsL_max
"Upper" information coming from one&#39;s children.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
ArcProperty< GUM_SCALAR > _ArcsL_min
"Lower" information coming from one&#39;s children.
void _enum_combi(std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
Used by _msgL.
margi _evidence
Holds observed variables states.
NodeProperty< GUM_SCALAR > _NodesL_min
"Lower" node information obtained by combinaison of children messages.
NodeProperty< GUM_SCALAR > _NodesP_max
"Upper" node information obtained by combinaison of parent&#39;s messages.
NodeProperty< bool > _update_l
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s children...
NodeProperty< GUM_SCALAR > _NodesL_max
"Upper" node information obtained by combinaison of children messages.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::_repetitiveInit ( )
protectedinherited

Initialize _t0 and _t1 clusters.

Definition at line 780 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_t0, gum::credal::InferenceEngine< GUM_SCALAR >::_t1, gum::credal::InferenceEngine< GUM_SCALAR >::_timeSteps, gum::HashTable< Key, Val, Alloc >::clear(), GUM_ERROR, and gum::HashTable< Key, Val, Alloc >::insert().

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::makeInference(), and gum::credal::InferenceEngine< GUM_SCALAR >::setRepetitiveInd().

780  {
781  _timeSteps = 0;
782  _t0.clear();
783  _t1.clear();
784 
785  // t = 0 vars belongs to _t0 as keys
786  for (auto node : _credalNet->current_bn().dag().nodes()) {
787  std::string var_name = _credalNet->current_bn().variable(node).name();
788  auto delim = var_name.find_first_of("_");
789 
790  if (delim > var_name.size()) {
791  GUM_ERROR(InvalidArgument,
792  "void InferenceEngine< GUM_SCALAR "
793  ">::_repetitiveInit() : the network does not "
794  "appear to be dynamic");
795  }
796 
797  std::string time_step = var_name.substr(delim + 1, 1);
798 
799  if (time_step.compare("0") == 0) _t0.insert(node, std::vector< NodeId >());
800  }
801 
802  // t = 1 vars belongs to either _t0 as member value or _t1 as keys
803  for (const auto& node : _credalNet->current_bn().dag().nodes()) {
804  std::string var_name = _credalNet->current_bn().variable(node).name();
805  auto delim = var_name.find_first_of("_");
806  std::string time_step = var_name.substr(delim + 1, var_name.size());
807  var_name = var_name.substr(0, delim);
808  delim = time_step.find_first_of("_");
809  time_step = time_step.substr(0, delim);
810 
811  if (time_step.compare("1") == 0) {
812  bool found = false;
813 
814  for (const auto& elt : _t0) {
815  std::string var_0_name =
816  _credalNet->current_bn().variable(elt.first).name();
817  delim = var_0_name.find_first_of("_");
818  var_0_name = var_0_name.substr(0, delim);
819 
820  if (var_name.compare(var_0_name) == 0) {
821  const Potential< GUM_SCALAR >* potential(
822  &_credalNet->current_bn().cpt(node));
823  const Potential< GUM_SCALAR >* potential2(
824  &_credalNet->current_bn().cpt(elt.first));
825 
826  if (potential->domainSize() == potential2->domainSize())
827  _t0[elt.first].push_back(node);
828  else
829  _t1.insert(node, std::vector< NodeId >());
830 
831  found = true;
832  break;
833  }
834  }
835 
836  if (!found) { _t1.insert(node, std::vector< NodeId >()); }
837  }
838  }
839 
840  // t > 1 vars belongs to either _t0 or _t1 as member value
841  // remember _timeSteps
842  for (auto node : _credalNet->current_bn().dag().nodes()) {
843  std::string var_name = _credalNet->current_bn().variable(node).name();
844  auto delim = var_name.find_first_of("_");
845  std::string time_step = var_name.substr(delim + 1, var_name.size());
846  var_name = var_name.substr(0, delim);
847  delim = time_step.find_first_of("_");
848  time_step = time_step.substr(0, delim);
849 
850  if (time_step.compare("0") != 0 && time_step.compare("1") != 0) {
851  // keep max time_step
852  if (atoi(time_step.c_str()) > _timeSteps)
853  _timeSteps = atoi(time_step.c_str());
854 
855  std::string var_0_name;
856  bool found = false;
857 
858  for (const auto& elt : _t0) {
859  std::string var_0_name =
860  _credalNet->current_bn().variable(elt.first).name();
861  delim = var_0_name.find_first_of("_");
862  var_0_name = var_0_name.substr(0, delim);
863 
864  if (var_name.compare(var_0_name) == 0) {
865  const Potential< GUM_SCALAR >* potential(
866  &_credalNet->current_bn().cpt(node));
867  const Potential< GUM_SCALAR >* potential2(
868  &_credalNet->current_bn().cpt(elt.first));
869 
870  if (potential->domainSize() == potential2->domainSize()) {
871  _t0[elt.first].push_back(node);
872  found = true;
873  break;
874  }
875  }
876  }
877 
878  if (!found) {
879  for (const auto& elt : _t1) {
880  std::string var_0_name =
881  _credalNet->current_bn().variable(elt.first).name();
882  auto delim = var_0_name.find_first_of("_");
883  var_0_name = var_0_name.substr(0, delim);
884 
885  if (var_name.compare(var_0_name) == 0) {
886  const Potential< GUM_SCALAR >* potential(
887  &_credalNet->current_bn().cpt(node));
888  const Potential< GUM_SCALAR >* potential2(
889  &_credalNet->current_bn().cpt(elt.first));
890 
891  if (potential->domainSize() == potential2->domainSize()) {
892  _t1[elt.first].push_back(node);
893  break;
894  }
895  }
896  }
897  }
898  }
899  }
900  }
int _timeSteps
The number of time steps of this network (only usefull for dynamic networks).
cluster _t0
Clusters of nodes used with dynamic networks.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
cluster _t1
Clusters of nodes used with dynamic networks.
void clear()
Removes all the elements in the hash table.
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::_updateCredalSets ( const NodeId id,
const std::vector< GUM_SCALAR > &  vertex,
const bool elimRedund = false 
)
inlineprotectedinherited

Given a node id and one of it's possible vertex, update it's credal set.

To maximise efficiency, don't pass a vertex we know is inside the polytope (i.e. not at an extreme value for any modality)

Parameters
idThe id of the node to be updated
vertexA (potential) vertex of the node credal set
elimRedundremove redundant vertex (inside a facet)

Definition at line 924 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMax, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMin, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalSets, gum::HashTable< Key, Val, Alloc >::cbegin(), gum::HashTable< Key, Val, Alloc >::cend(), gum::credal::LRSWrapper< GUM_SCALAR >::elimRedundVrep(), gum::credal::LRSWrapper< GUM_SCALAR >::fillV(), gum::credal::LRSWrapper< GUM_SCALAR >::getOutput(), and gum::credal::LRSWrapper< GUM_SCALAR >::setUpV().

Referenced by gum::credal::MultipleInferenceEngine< GUM_SCALAR, BNInferenceEngine >::_verticesFusion().

927  {
928  auto& nodeCredalSet = _marginalSets[id];
929  auto dsize = vertex.size();
930 
931  bool eq = true;
932 
933  for (auto it = nodeCredalSet.cbegin(), itEnd = nodeCredalSet.cend();
934  it != itEnd;
935  ++it) {
936  eq = true;
937 
938  for (Size i = 0; i < dsize; i++) {
939  if (std::fabs(vertex[i] - (*it)[i]) > 1e-6) {
940  eq = false;
941  break;
942  }
943  }
944 
945  if (eq) break;
946  }
947 
948  if (!eq || nodeCredalSet.size() == 0) {
949  nodeCredalSet.push_back(vertex);
950  return;
951  } else
952  return;
953 
954  // because of next lambda return condition
955  if (nodeCredalSet.size() == 1) return;
956 
957  // check that the point and all previously added ones are not inside the
958  // actual
959  // polytope
960  auto itEnd = std::remove_if(
961  nodeCredalSet.begin(),
962  nodeCredalSet.end(),
963  [&](const std::vector< GUM_SCALAR >& v) -> bool {
964  for (auto jt = v.cbegin(),
965  jtEnd = v.cend(),
966  minIt = _marginalMin[id].cbegin(),
967  minItEnd = _marginalMin[id].cend(),
968  maxIt = _marginalMax[id].cbegin(),
969  maxItEnd = _marginalMax[id].cend();
970  jt != jtEnd && minIt != minItEnd && maxIt != maxItEnd;
971  ++jt, ++minIt, ++maxIt) {
972  if ((std::fabs(*jt - *minIt) < 1e-6 || std::fabs(*jt - *maxIt) < 1e-6)
973  && std::fabs(*minIt - *maxIt) > 1e-6)
974  return false;
975  }
976  return true;
977  });
978 
979  nodeCredalSet.erase(itEnd, nodeCredalSet.end());
980 
981  // we need at least 2 points to make a convex combination
982  if (!elimRedund || nodeCredalSet.size() <= 2) return;
983 
984  // there may be points not inside the polytope but on one of it's facet,
985  // meaning it's still a convex combination of vertices of this facet. Here
986  // we
987  // need lrs.
988  LRSWrapper< GUM_SCALAR > lrsWrapper;
989  lrsWrapper.setUpV((unsigned int)dsize, (unsigned int)(nodeCredalSet.size()));
990 
991  for (const auto& vtx : nodeCredalSet)
992  lrsWrapper.fillV(vtx);
993 
994  lrsWrapper.elimRedundVrep();
995 
996  _marginalSets[id] = lrsWrapper.getOutput();
997  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
credalSet _marginalSets
Credal sets vertices, if enabled.
margi _marginalMin
Lower marginals.
const_iterator cbegin() const
Returns an unsafe const_iterator pointing to the beginning of the hashtable.
const const_iterator & cend() const noexcept
Returns the unsafe const_iterator pointing to the end of the hashtable.
margi _marginalMax
Upper marginals.

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::_updateExpectations ( const NodeId id,
const std::vector< GUM_SCALAR > &  vertex 
)
inlineprotectedinherited

Given a node id and one of it's possible vertex obtained during inference, update this node lower and upper expectations.

Parameters
idThe id of the node to be updated
vertexA (potential) vertex of the node credal set

Definition at line 903 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMax, gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMin, and gum::credal::InferenceEngine< GUM_SCALAR >::_modal.

904  {
905  std::string var_name = _credalNet->current_bn().variable(id).name();
906  auto delim = var_name.find_first_of("_");
907 
908  var_name = var_name.substr(0, delim);
909 
910  if (_modal.exists(var_name) /*_modal.find(var_name) != _modal.end()*/) {
911  GUM_SCALAR exp = 0;
912  auto vsize = vertex.size();
913 
914  for (Size mod = 0; mod < vsize; mod++)
915  exp += vertex[mod] * _modal[var_name][mod];
916 
917  if (exp > _expectationMax[id]) _expectationMax[id] = exp;
918 
919  if (exp < _expectationMin[id]) _expectationMin[id] = exp;
920  }
921  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
dynExpe _modal
Variables modalities used to compute expectations.
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_updateIndicatrices ( )
protected

Only update indicatrices variables at the end of computations ( calls _msgP ).

Definition at line 1469 of file CNLoopyPropagation_tpl.h.

1469  {
1470  for (auto node : __bnet->nodes()) {
1471  if (__cn->currentNodeType(node)
1473  continue;
1474  }
1475 
1476  for (auto pare : __bnet->parents(node)) {
1477  _msgP(pare, node);
1478  }
1479  }
1480 
1481  _refreshLMsPIs(true);
1482  _updateMarginals();
1483  }
const CredalNet< GUM_SCALAR > * __cn
A pointer to the CredalNet to be used.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
void _updateMarginals()
Compute marginals from up-to-date messages.
void _refreshLMsPIs(bool refreshIndic=false)
Get the last messages from one&#39;s parents and children.
void _msgP(const NodeId X, const NodeId demanding_child)
Sends a message to one&#39;s child, i.e.
template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::_updateMarginals ( )
protected

Compute marginals from up-to-date messages.

Definition at line 1365 of file CNLoopyPropagation_tpl.h.

References _INF.

1365  {
1366  for (auto node : __bnet->nodes()) {
1367  GUM_SCALAR msg_p_min = 1.;
1368  GUM_SCALAR msg_p_max = 0.;
1369 
1370  if (__infE::_evidence.exists(node)) {
1371  if (__infE::_evidence[node][1] == 0.) {
1372  msg_p_min = (GUM_SCALAR)0.;
1373  } else if (__infE::_evidence[node][1] == 1.) {
1374  msg_p_min = 1.;
1375  }
1376 
1377  msg_p_max = msg_p_min;
1378  } else {
1379  GUM_SCALAR min = _NodesP_min[node];
1380  GUM_SCALAR max;
1381 
1382  if (_NodesP_max.exists(node)) {
1383  max = _NodesP_max[node];
1384  } else {
1385  max = min;
1386  }
1387 
1388  GUM_SCALAR lmin = _NodesL_min[node];
1389  GUM_SCALAR lmax;
1390 
1391  if (_NodesL_max.exists(node)) {
1392  lmax = _NodesL_max[node];
1393  } else {
1394  lmax = lmin;
1395  }
1396 
1397  if (min == _INF || max == _INF) {
1398  std::cout << " min ou max === _INF !!!!!!!!!!!!!!!!!!!!!!!!!! "
1399  << std::endl;
1400  return;
1401  }
1402 
1403  if (min == _INF && lmin == 0.) {
1404  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
1405  return;
1406  }
1407 
1408  if (lmin == _INF) {
1409  msg_p_min = GUM_SCALAR(1.);
1410  } else if (min == 0. || lmin == 0.) {
1411  msg_p_min = GUM_SCALAR(0.);
1412  } else {
1413  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
1414  }
1415 
1416  if (max == _INF && lmax == 0.) {
1417  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
1418  return;
1419  }
1420 
1421  if (lmax == _INF) {
1422  msg_p_max = GUM_SCALAR(1.);
1423  } else if (max == 0. || lmax == 0.) {
1424  msg_p_max = GUM_SCALAR(0.);
1425  } else {
1426  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
1427  }
1428  }
1429 
1430  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
1431  msg_p_min = msg_p_max;
1432  std::cout << std::endl;
1433  std::cout << "msg_p_min is NaN" << std::endl;
1434  }
1435 
1436  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
1437  msg_p_max = msg_p_min;
1438  std::cout << std::endl;
1439  std::cout << "msg_p_max is NaN" << std::endl;
1440  }
1441 
1442  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
1443  std::cout << std::endl;
1444  std::cout << "Please check the observations (no proba can be computed)"
1445  << std::endl;
1446  return;
1447  }
1448 
1449  if (msg_p_min < 0.) { msg_p_min = 0.; }
1450 
1451  if (msg_p_max < 0.) { msg_p_max = 0.; }
1452 
1453  __infE::_marginalMin[node][0] = 1 - msg_p_max;
1454  __infE::_marginalMax[node][0] = 1 - msg_p_min;
1455  __infE::_marginalMin[node][1] = msg_p_min;
1456  __infE::_marginalMax[node][1] = msg_p_max;
1457  }
1458  }
NodeProperty< GUM_SCALAR > _NodesP_min
"Lower" node information obtained by combinaison of parent&#39;s messages.
#define _INF
margi _marginalMin
Lower marginals.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
margi _evidence
Holds observed variables states.
NodeProperty< GUM_SCALAR > _NodesL_min
"Lower" node information obtained by combinaison of children messages.
NodeProperty< GUM_SCALAR > _NodesP_max
"Upper" node information obtained by combinaison of parent&#39;s messages.
NodeProperty< GUM_SCALAR > _NodesL_max
"Upper" node information obtained by combinaison of children messages.
margi _marginalMax
Upper marginals.
INLINE bool gum::ApproximationScheme::continueApproximationScheme ( double  error)
inherited

Update the scheme w.r.t the new error.

Test the stopping criterion that are enabled.

Parameters
errorThe new error value.
Returns
false if state become != ApproximationSchemeSTATE::Continue
Exceptions
OperationNotAllowedRaised if state != ApproximationSchemeSTATE::Continue.

Definition at line 225 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_current_epsilon, gum::ApproximationScheme::_current_rate, gum::ApproximationScheme::_current_state, gum::ApproximationScheme::_current_step, gum::ApproximationScheme::_enabled_eps, gum::ApproximationScheme::_enabled_max_iter, gum::ApproximationScheme::_enabled_max_time, gum::ApproximationScheme::_enabled_min_rate_eps, gum::ApproximationScheme::_eps, gum::ApproximationScheme::_history, gum::ApproximationScheme::_last_epsilon, gum::ApproximationScheme::_max_iter, gum::ApproximationScheme::_max_time, gum::ApproximationScheme::_min_rate_eps, gum::ApproximationScheme::_stopScheme(), gum::ApproximationScheme::_timer, gum::IApproximationSchemeConfiguration::Continue, gum::IApproximationSchemeConfiguration::Epsilon, GUM_EMIT3, GUM_ERROR, gum::IApproximationSchemeConfiguration::Limit, gum::IApproximationSchemeConfiguration::messageApproximationScheme(), gum::IApproximationSchemeConfiguration::onProgress, gum::IApproximationSchemeConfiguration::Rate, gum::ApproximationScheme::startOfPeriod(), gum::ApproximationScheme::stateApproximationScheme(), gum::Timer::step(), gum::IApproximationSchemeConfiguration::TimeLimit, and gum::ApproximationScheme::verbosity().

Referenced by gum::GibbsKL< GUM_SCALAR >::_computeKL(), gum::SamplingInference< GUM_SCALAR >::_loopApproxInference(), gum::learning::GreedyHillClimbing::learnStructure(), gum::learning::LocalSearchWithTabuList::learnStructure(), and gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::makeInference().

225  {
226  // For coherence, we fix the time used in the method
227 
228  double timer_step = _timer.step();
229 
230  if (_enabled_max_time) {
231  if (timer_step > _max_time) {
233  return false;
234  }
235  }
236 
237  if (!startOfPeriod()) { return true; }
238 
240  GUM_ERROR(OperationNotAllowed,
241  "state of the approximation scheme is not correct : "
243  }
244 
245  if (verbosity()) { _history.push_back(error); }
246 
247  if (_enabled_max_iter) {
248  if (_current_step > _max_iter) {
250  return false;
251  }
252  }
253 
255  _current_epsilon = error; // eps rate isEnabled needs it so affectation was
256  // moved from eps isEnabled below
257 
258  if (_enabled_eps) {
259  if (_current_epsilon <= _eps) {
261  return false;
262  }
263  }
264 
265  if (_last_epsilon >= 0.) {
266  if (_current_epsilon > .0) {
267  // ! _current_epsilon can be 0. AND epsilon
268  // isEnabled can be disabled !
269  _current_rate =
271  }
272  // limit with current eps ---> 0 is | 1 - ( last_eps / 0 ) | --->
273  // infinity the else means a return false if we isEnabled the rate below,
274  // as we would have returned false if epsilon isEnabled was enabled
275  else {
277  }
278 
279  if (_enabled_min_rate_eps) {
280  if (_current_rate <= _min_rate_eps) {
282  return false;
283  }
284  }
285  }
286 
288  if (onProgress.hasListener()) {
290  }
291 
292  return true;
293  } else {
294  return false;
295  }
296  }
ApproximationSchemeSTATE stateApproximationScheme() const
Returns the approximation scheme state.
bool verbosity() const
Returns true if verbosity is enabled.
Signaler3< Size, double, double > onProgress
Progression, error and time.
bool _enabled_max_iter
If true, the maximum iterations stopping criterion is enabled.
std::string messageApproximationScheme() const
Returns the approximation scheme message.
bool _enabled_eps
If true, the threshold convergence is enabled.
void _stopScheme(ApproximationSchemeSTATE new_state)
Stop the scheme given a new state.
double _current_epsilon
Current epsilon.
bool _enabled_min_rate_eps
If true, the minimal threshold for epsilon rate is enabled.
bool startOfPeriod()
Returns true if we are at the beginning of a period (compute error is mandatory). ...
double _eps
Threshold for convergence.
double step() const
Returns the delta time between now and the last reset() call (or the constructor).
Definition: timer_inl.h:39
double _current_rate
Current rate.
bool _enabled_max_time
If true, the timeout is enabled.
Size _current_step
The current step.
std::vector< double > _history
The scheme history, used only if verbosity == true.
double _min_rate_eps
Threshold for the epsilon rate.
double _last_epsilon
Last epsilon value.
Size _max_iter
The maximum iterations.
#define GUM_EMIT3(signal, arg1, arg2, arg3)
Definition: signaler3.h:40
ApproximationSchemeSTATE _current_state
The current state.
double _max_time
The timeout.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
const CredalNet< GUM_SCALAR > & gum::credal::InferenceEngine< GUM_SCALAR >::credalNet ( )
inherited

Get this creadal network.

Returns
A constant reference to this CredalNet.

Definition at line 56 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet.

Referenced by gum::credal::InferenceEngine< GUM_SCALAR >::InferenceEngine().

56  {
57  return *_credalNet;
58  }
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.

+ Here is the caller graph for this function:

INLINE double gum::ApproximationScheme::currentTime ( ) const
virtualinherited

Returns the current running time in second.

Returns
Returns the current running time in second.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 126 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_timer, and gum::Timer::step().

Referenced by gum::learning::genericBNLearner::currentTime().

126 { return _timer.step(); }
double step() const
Returns the delta time between now and the last reset() call (or the constructor).
Definition: timer_inl.h:39

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::disableEpsilon ( )
virtualinherited

Disable stopping criterion on epsilon.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 52 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_eps.

Referenced by gum::learning::genericBNLearner::disableEpsilon().

52 { _enabled_eps = false; }
bool _enabled_eps
If true, the threshold convergence is enabled.

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::disableMaxIter ( )
virtualinherited

Disable stopping criterion on max iterations.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 103 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_max_iter.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::__mcInitApproximationScheme(), gum::learning::genericBNLearner::disableMaxIter(), and gum::learning::GreedyHillClimbing::GreedyHillClimbing().

103 { _enabled_max_iter = false; }
bool _enabled_max_iter
If true, the maximum iterations stopping criterion is enabled.

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::disableMaxTime ( )
virtualinherited

Disable stopping criterion on timeout.

Returns
Disable stopping criterion on timeout.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 129 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_max_time.

Referenced by gum::learning::genericBNLearner::disableMaxTime(), and gum::learning::GreedyHillClimbing::GreedyHillClimbing().

129 { _enabled_max_time = false; }
bool _enabled_max_time
If true, the timeout is enabled.

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::disableMinEpsilonRate ( )
virtualinherited

Disable stopping criterion on epsilon rate.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 77 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_min_rate_eps.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::__mcInitApproximationScheme(), gum::GibbsKL< GUM_SCALAR >::_computeKL(), gum::learning::genericBNLearner::disableMinEpsilonRate(), and gum::learning::GreedyHillClimbing::GreedyHillClimbing().

77  {
78  _enabled_min_rate_eps = false;
79  }
bool _enabled_min_rate_eps
If true, the minimal threshold for epsilon rate is enabled.

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::dynamicExpectations ( )
inherited

Compute dynamic expectations.

See also
_dynamicExpectations Only call this if an algorithm does not call it by itself.

Definition at line 713 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpectations().

713  {
715  }
void _dynamicExpectations()
Rearrange lower and upper expectations to suit dynamic networks.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
const std::vector< GUM_SCALAR > & gum::credal::InferenceEngine< GUM_SCALAR >::dynamicExpMax ( const std::string &  varName) const
inherited

Get the upper dynamic expectation of a given variable prefix (without the time step included, i.e.

call with "temp" to get "temp_0", ..., "temp_T").

Parameters
varNameThe variable name prefix which upper expectation we want.
Returns
A constant reference to the variable upper expectation over all time steps.

Definition at line 501 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpMax, and GUM_ERROR.

502  {
503  std::string errTxt = "const std::vector< GUM_SCALAR > & InferenceEngine< "
504  "GUM_SCALAR >::dynamicExpMax ( const std::string & "
505  "varName ) const : ";
506 
507  if (_dynamicExpMax.empty())
508  GUM_ERROR(OperationNotAllowed,
509  errTxt + "_dynamicExpectations() needs to be called before");
510 
511  if (!_dynamicExpMax.exists(
512  varName) /*_dynamicExpMin.find(varName) == _dynamicExpMin.end()*/)
513  GUM_ERROR(NotFound, errTxt + "variable name not found : " << varName);
514 
515  return _dynamicExpMax[varName];
516  }
dynExpe _dynamicExpMax
Upper dynamic expectations.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66
template<typename GUM_SCALAR >
const std::vector< GUM_SCALAR > & gum::credal::InferenceEngine< GUM_SCALAR >::dynamicExpMin ( const std::string &  varName) const
inherited

Get the lower dynamic expectation of a given variable prefix (without the time step included, i.e.

call with "temp" to get "temp_0", ..., "temp_T").

Parameters
varNameThe variable name prefix which lower expectation we want.
Returns
A constant reference to the variable lower expectation over all time steps.

Definition at line 483 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpMin, and GUM_ERROR.

484  {
485  std::string errTxt = "const std::vector< GUM_SCALAR > & InferenceEngine< "
486  "GUM_SCALAR >::dynamicExpMin ( const std::string & "
487  "varName ) const : ";
488 
489  if (_dynamicExpMin.empty())
490  GUM_ERROR(OperationNotAllowed,
491  errTxt + "_dynamicExpectations() needs to be called before");
492 
493  if (!_dynamicExpMin.exists(
494  varName) /*_dynamicExpMin.find(varName) == _dynamicExpMin.end()*/)
495  GUM_ERROR(NotFound, errTxt + "variable name not found : " << varName);
496 
497  return _dynamicExpMin[varName];
498  }
dynExpe _dynamicExpMin
Lower dynamic expectations.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66
INLINE void gum::ApproximationScheme::enableEpsilon ( )
virtualinherited

Enable stopping criterion on epsilon.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 55 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_eps.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::__mcInitApproximationScheme(), and gum::learning::genericBNLearner::enableEpsilon().

55 { _enabled_eps = true; }
bool _enabled_eps
If true, the threshold convergence is enabled.

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::enableMaxIter ( )
virtualinherited

Enable stopping criterion on max iterations.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 106 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_max_iter.

Referenced by gum::learning::genericBNLearner::enableMaxIter().

106 { _enabled_max_iter = true; }
bool _enabled_max_iter
If true, the maximum iterations stopping criterion is enabled.

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::enableMaxTime ( )
virtualinherited

Enable stopping criterion on timeout.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 132 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_max_time.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::CNMonteCarloSampling(), and gum::learning::genericBNLearner::enableMaxTime().

132 { _enabled_max_time = true; }
bool _enabled_max_time
If true, the timeout is enabled.

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::enableMinEpsilonRate ( )
virtualinherited

Enable stopping criterion on epsilon rate.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 82 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_min_rate_eps.

Referenced by gum::GibbsKL< GUM_SCALAR >::_computeKL(), and gum::learning::genericBNLearner::enableMinEpsilonRate().

82  {
83  _enabled_min_rate_eps = true;
84  }
bool _enabled_min_rate_eps
If true, the minimal threshold for epsilon rate is enabled.

+ Here is the caller graph for this function:

INLINE double gum::ApproximationScheme::epsilon ( ) const
virtualinherited

Returns the value of epsilon.

Returns
Returns the value of epsilon.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 49 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_eps.

Referenced by gum::ImportanceSampling< GUM_SCALAR >::_onContextualize(), and gum::learning::genericBNLearner::epsilon().

49 { return _eps; }
double _eps
Threshold for convergence.

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::eraseAllEvidence ( )
virtual

Erase all inference related data to perform another one.

You need to insert evidence again if needed but modalities are kept. You can insert new ones by using the appropriate method which will delete the old ones.

Reimplemented from gum::credal::InferenceEngine< GUM_SCALAR >.

Definition at line 580 of file CNLoopyPropagation_tpl.h.

580  {
582 
583  _ArcsL_min.clear();
584  _ArcsL_max.clear();
585  _ArcsP_min.clear();
586  _ArcsP_max.clear();
587  _NodesL_min.clear();
588  _NodesL_max.clear();
589  _NodesP_min.clear();
590  _NodesP_max.clear();
591 
592  _InferenceUpToDate = false;
593 
594  if (_msg_l_sent.size() > 0) {
595  for (auto node : __bnet->nodes()) {
596  delete _msg_l_sent[node];
597  }
598  }
599 
600  _msg_l_sent.clear();
601  _update_l.clear();
602  _update_p.clear();
603 
606  }
NodeProperty< bool > _update_p
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s parents...
NodeProperty< GUM_SCALAR > _NodesP_min
"Lower" node information obtained by combinaison of parent&#39;s messages.
ArcProperty< GUM_SCALAR > _ArcsP_min
"Lower" information coming from one&#39;s parent.
NodeSet active_nodes_set
The current node-set to iterate through at this current step.
ArcProperty< GUM_SCALAR > _ArcsP_max
"Upper" information coming from one&#39;s parent.
ArcProperty< GUM_SCALAR > _ArcsL_max
"Upper" information coming from one&#39;s children.
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
bool _InferenceUpToDate
TRUE if inference has already been performed, FALSE otherwise.
ArcProperty< GUM_SCALAR > _ArcsL_min
"Lower" information coming from one&#39;s children.
NodeProperty< NodeSet * > _msg_l_sent
Used to keep track of one&#39;s messages sent to it&#39;s parents.
NodeProperty< GUM_SCALAR > _NodesL_min
"Lower" node information obtained by combinaison of children messages.
void clear()
Removes all the elements, if any, from the set.
Definition: set_tpl.h:375
NodeSet next_active_nodes_set
The next node-set, i.e.
NodeProperty< GUM_SCALAR > _NodesP_max
"Upper" node information obtained by combinaison of parent&#39;s messages.
NodeProperty< bool > _update_l
Used to keep track of which node needs to update it&#39;s information coming from it&#39;s children...
NodeProperty< GUM_SCALAR > _NodesL_max
"Upper" node information obtained by combinaison of children messages.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
template<typename GUM_SCALAR >
const GUM_SCALAR & gum::credal::InferenceEngine< GUM_SCALAR >::expectationMax ( const NodeId  id) const
inherited

Get the upper expectation of a given node id.

Parameters
idThe node id which upper expectation we want.
Returns
A constant reference to this node upper expectation.

Definition at line 476 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMax.

476  {
477  try {
478  return _expectationMax[id];
479  } catch (NotFound& err) { throw(err); }
480  }
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
template<typename GUM_SCALAR >
const GUM_SCALAR & gum::credal::InferenceEngine< GUM_SCALAR >::expectationMax ( const std::string &  varName) const
inherited

Get the upper expectation of a given variable name.

Parameters
varNameThe variable name which upper expectation we want.
Returns
A constant reference to this variable upper expectation.

Definition at line 459 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, and gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMax.

460  {
461  try {
462  return _expectationMax[_credalNet->current_bn().idFromName(varName)];
463  } catch (NotFound& err) { throw(err); }
464  }
expe _expectationMax
Upper expectations, if some variables modalities were inserted.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
template<typename GUM_SCALAR >
const GUM_SCALAR & gum::credal::InferenceEngine< GUM_SCALAR >::expectationMin ( const NodeId  id) const
inherited

Get the lower expectation of a given node id.

Parameters
idThe node id which lower expectation we want.
Returns
A constant reference to this node lower expectation.

Definition at line 468 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMin.

468  {
469  try {
470  return _expectationMin[id];
471  } catch (NotFound& err) { throw(err); }
472  }
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
template<typename GUM_SCALAR >
const GUM_SCALAR & gum::credal::InferenceEngine< GUM_SCALAR >::expectationMin ( const std::string &  varName) const
inherited

Get the lower expectation of a given variable name.

Parameters
varNameThe variable name which lower expectation we want.
Returns
A constant reference to this variable lower expectation.

Definition at line 451 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, and gum::credal::InferenceEngine< GUM_SCALAR >::_expectationMin.

452  {
453  try {
454  return _expectationMin[_credalNet->current_bn().idFromName(varName)];
455  } catch (NotFound& err) { throw(err); }
456  }
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
expe _expectationMin
Lower expectations, if some variables modalities were inserted.
template<typename GUM_SCALAR >
const std::string gum::credal::InferenceEngine< GUM_SCALAR >::getApproximationSchemeMsg ( )
inlineinherited

Get approximation scheme state.

Returns
A constant string about approximation scheme state.

Definition at line 513 of file inferenceEngine.h.

References gum::IApproximationSchemeConfiguration::messageApproximationScheme().

513  {
514  return this->messageApproximationScheme();
515  }
std::string messageApproximationScheme() const
Returns the approximation scheme message.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
const NodeProperty< std::vector< NodeId > > & gum::credal::InferenceEngine< GUM_SCALAR >::getT0Cluster ( ) const
inherited

Get the _t0 cluster.

Returns
A constant reference to the _t0 cluster.

Definition at line 1001 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_t0.

1001  {
1002  return _t0;
1003  }
cluster _t0
Clusters of nodes used with dynamic networks.
template<typename GUM_SCALAR >
const NodeProperty< std::vector< NodeId > > & gum::credal::InferenceEngine< GUM_SCALAR >::getT1Cluster ( ) const
inherited

Get the _t1 cluster.

Returns
A constant reference to the _t1 cluster.

Definition at line 1007 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_t1.

1007  {
1008  return _t1;
1009  }
cluster _t1
Clusters of nodes used with dynamic networks.
template<typename GUM_SCALAR >
VarMod2BNsMap< GUM_SCALAR > * gum::credal::InferenceEngine< GUM_SCALAR >::getVarMod2BNsMap ( )
inherited

Get optimum IBayesNet.

Returns
A pointer to the optimal net object.

Definition at line 138 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_dbnOpt.

138  {
139  return &_dbnOpt;
140  }
VarMod2BNsMap< GUM_SCALAR > _dbnOpt
Object used to efficiently store optimal bayes net during inference, for some algorithms.
INLINE const std::vector< double > & gum::ApproximationScheme::history ( ) const
virtualinherited

Returns the scheme history.

Returns
Returns the scheme history.
Exceptions
OperationNotAllowedRaised if the scheme did not performed or if verbosity is set to false.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 171 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_history, GUM_ERROR, gum::ApproximationScheme::stateApproximationScheme(), gum::IApproximationSchemeConfiguration::Undefined, and gum::ApproximationScheme::verbosity().

Referenced by gum::learning::genericBNLearner::history().

171  {
173  GUM_ERROR(OperationNotAllowed,
174  "state of the approximation scheme is udefined");
175  }
176 
177  if (verbosity() == false) {
178  GUM_ERROR(OperationNotAllowed, "No history when verbosity=false");
179  }
180 
181  return _history;
182  }
ApproximationSchemeSTATE stateApproximationScheme() const
Returns the approximation scheme state.
bool verbosity() const
Returns true if verbosity is enabled.
std::vector< double > _history
The scheme history, used only if verbosity == true.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::inferenceType ( InferenceType  inft)

Set the inference type.

Parameters
inftThe choosen InferenceType.

Definition at line 1564 of file CNLoopyPropagation_tpl.h.

References gum::credal::CNLoopyPropagation< GUM_SCALAR >::__inferenceType.

1564  {
1565  __inferenceType = inft;
1566  }
InferenceType __inferenceType
The choosen inference type.
template<typename GUM_SCALAR >
CNLoopyPropagation< GUM_SCALAR >::InferenceType gum::credal::CNLoopyPropagation< GUM_SCALAR >::inferenceType ( )

Get the inference type.

Returns
The inference type.

Definition at line 1570 of file CNLoopyPropagation_tpl.h.

References gum::credal::CNLoopyPropagation< GUM_SCALAR >::__inferenceType.

1570  {
1571  return __inferenceType;
1572  }
InferenceType __inferenceType
The choosen inference type.
INLINE void gum::ApproximationScheme::initApproximationScheme ( )
inherited

Initialise the scheme.

Definition at line 185 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_current_epsilon, gum::ApproximationScheme::_current_rate, gum::ApproximationScheme::_current_state, gum::ApproximationScheme::_current_step, gum::ApproximationScheme::_history, gum::ApproximationScheme::_timer, gum::IApproximationSchemeConfiguration::Continue, and gum::Timer::reset().

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::__mcInitApproximationScheme(), gum::GibbsKL< GUM_SCALAR >::_computeKL(), gum::SamplingInference< GUM_SCALAR >::_loopApproxInference(), gum::SamplingInference< GUM_SCALAR >::_onStateChanged(), gum::learning::GreedyHillClimbing::learnStructure(), and gum::learning::LocalSearchWithTabuList::learnStructure().

185  {
187  _current_step = 0;
189  _history.clear();
190  _timer.reset();
191  }
double _current_epsilon
Current epsilon.
void reset()
Reset the timer.
Definition: timer_inl.h:29
double _current_rate
Current rate.
Size _current_step
The current step.
std::vector< double > _history
The scheme history, used only if verbosity == true.
ApproximationSchemeSTATE _current_state
The current state.

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::insertEvidence ( const std::map< std::string, std::vector< GUM_SCALAR > > &  eviMap)
inherited

Insert evidence from map.

Parameters
eviMapThe map variable name - likelihood.

Definition at line 226 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_evidence, gum::HashTable< Key, Val, Alloc >::clear(), gum::HashTable< Key, Val, Alloc >::empty(), GUM_SHOWERROR, and gum::HashTable< Key, Val, Alloc >::insert().

227  {
228  if (!_evidence.empty()) _evidence.clear();
229 
230  for (auto it = eviMap.cbegin(), theEnd = eviMap.cend(); it != theEnd; ++it) {
231  NodeId id;
232 
233  try {
234  id = _credalNet->current_bn().idFromName(it->first);
235  } catch (NotFound& err) {
236  GUM_SHOWERROR(err);
237  continue;
238  }
239 
240  _evidence.insert(id, it->second);
241  }
242  }
unsigned int NodeId
Type for node ids.
Definition: graphElements.h:97
#define GUM_SHOWERROR(e)
Definition: exceptions.h:73
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
margi _evidence
Holds observed variables states.
void clear()
Removes all the elements in the hash table.
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
bool empty() const noexcept
Indicates whether the hash table is empty.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::insertEvidence ( const NodeProperty< std::vector< GUM_SCALAR > > &  evidence)
inherited

Insert evidence from Property.

Parameters
evidenceThe on nodes Property containing likelihoods.

Definition at line 248 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_evidence, gum::HashTable< Key, Val, Alloc >::clear(), gum::HashTable< Key, Val, Alloc >::empty(), GUM_SHOWERROR, and gum::HashTable< Key, Val, Alloc >::insert().

249  {
250  if (!_evidence.empty()) _evidence.clear();
251 
252  // use cbegin() to get const_iterator when available in aGrUM hashtables
253  for (const auto& elt : evidence) {
254  try {
255  _credalNet->current_bn().variable(elt.first);
256  } catch (NotFound& err) {
257  GUM_SHOWERROR(err);
258  continue;
259  }
260 
261  _evidence.insert(elt.first, elt.second);
262  }
263  }
#define GUM_SHOWERROR(e)
Definition: exceptions.h:73
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
margi _evidence
Holds observed variables states.
void clear()
Removes all the elements in the hash table.
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
bool empty() const noexcept
Indicates whether the hash table is empty.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
virtual void gum::credal::CNLoopyPropagation< GUM_SCALAR >::insertEvidenceFile ( const std::string &  path)
inlinevirtual

Insert evidence from file.

Parameters
pathThe path to the evidence file.

Reimplemented from gum::credal::InferenceEngine< GUM_SCALAR >.

Definition at line 380 of file CNLoopyPropagation.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::insertEvidenceFile().

380  {
382  };
virtual void insertEvidenceFile(const std::string &path)
Insert evidence from file.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::insertModals ( const std::map< std::string, std::vector< GUM_SCALAR > > &  modals)
inherited

Insert variables modalities from map to compute expectations.

Parameters
modalsThe map variable name - modalities.

Definition at line 190 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_initExpectations(), gum::credal::InferenceEngine< GUM_SCALAR >::_modal, and GUM_SHOWERROR.

191  {
192  if (!_modal.empty()) _modal.clear();
193 
194  for (auto it = modals.cbegin(), theEnd = modals.cend(); it != theEnd; ++it) {
195  NodeId id;
196 
197  try {
198  id = _credalNet->current_bn().idFromName(it->first);
199  } catch (NotFound& err) {
200  GUM_SHOWERROR(err);
201  continue;
202  }
203 
204  // check that modals are net compatible
205  auto dSize = _credalNet->current_bn().variable(id).domainSize();
206 
207  if (dSize != it->second.size()) continue;
208 
209  // GUM_ERROR(OperationNotAllowed, "void InferenceEngine< GUM_SCALAR
210  // >::insertModals( const std::map< std::string, std::vector< GUM_SCALAR
211  // > >
212  // &modals) : modalities does not respect variable cardinality : " <<
213  // _credalNet->current_bn().variable( id ).name() << " : " << dSize << "
214  // != "
215  // << it->second.size());
216 
217  _modal.insert(it->first, it->second); //[ it->first ] = it->second;
218  }
219 
220  //_modal = modals;
221 
223  }
unsigned int NodeId
Type for node ids.
Definition: graphElements.h:97
#define GUM_SHOWERROR(e)
Definition: exceptions.h:73
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
dynExpe _modal
Variables modalities used to compute expectations.
void _initExpectations()
Initialize lower and upper expectations before inference, with the lower expectation being initialize...

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::insertModalsFile ( const std::string &  path)
inherited

Insert variables modalities from file to compute expectations.

Parameters
pathThe path to the modalities file.

Definition at line 143 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_initExpectations(), gum::credal::InferenceEngine< GUM_SCALAR >::_modal, and GUM_ERROR.

143  {
144  std::ifstream mod_stream(path.c_str(), std::ios::in);
145 
146  if (!mod_stream.good()) {
147  GUM_ERROR(OperationNotAllowed,
148  "void InferenceEngine< GUM_SCALAR "
149  ">::insertModals(const std::string & path) : "
150  "could not open input file : "
151  << path);
152  }
153 
154  if (!_modal.empty()) _modal.clear();
155 
156  std::string line, tmp;
157  char * cstr, *p;
158 
159  while (mod_stream.good()) {
160  getline(mod_stream, line);
161 
162  if (line.size() == 0) continue;
163 
164  cstr = new char[line.size() + 1];
165  strcpy(cstr, line.c_str());
166 
167  p = strtok(cstr, " ");
168  tmp = p;
169 
170  std::vector< GUM_SCALAR > values;
171  p = strtok(nullptr, " ");
172 
173  while (p != nullptr) {
174  values.push_back(GUM_SCALAR(atof(p)));
175  p = strtok(nullptr, " ");
176  } // end of : line
177 
178  _modal.insert(tmp, values); //[tmp] = values;
179 
180  delete[] p;
181  delete[] cstr;
182  } // end of : file
183 
184  mod_stream.close();
185 
187  }
dynExpe _modal
Variables modalities used to compute expectations.
void _initExpectations()
Initialize lower and upper expectations before inference, with the lower expectation being initialize...
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::insertQuery ( const NodeProperty< std::vector< bool > > &  query)
inherited

Insert query variables and states from Property.

Parameters
queryThe on nodes Property containing queried variables states.

Definition at line 328 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_query, gum::HashTable< Key, Val, Alloc >::clear(), gum::HashTable< Key, Val, Alloc >::empty(), GUM_SHOWERROR, and gum::HashTable< Key, Val, Alloc >::insert().

329  {
330  if (!_query.empty()) _query.clear();
331 
332  for (const auto& elt : query) {
333  try {
334  _credalNet->current_bn().variable(elt.first);
335  } catch (NotFound& err) {
336  GUM_SHOWERROR(err);
337  continue;
338  }
339 
340  _query.insert(elt.first, elt.second);
341  }
342  }
#define GUM_SHOWERROR(e)
Definition: exceptions.h:73
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
query _query
Holds the query nodes states.
void clear()
Removes all the elements in the hash table.
NodeProperty< std::vector< bool > > query
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
bool empty() const noexcept
Indicates whether the hash table is empty.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::insertQueryFile ( const std::string &  path)
inherited

Insert query variables states from file.

Parameters
pathThe path to the query file.

Definition at line 345 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_query, gum::HashTable< Key, Val, Alloc >::clear(), gum::HashTable< Key, Val, Alloc >::empty(), GUM_ERROR, GUM_SHOWERROR, and gum::HashTable< Key, Val, Alloc >::insert().

345  {
346  std::ifstream evi_stream(path.c_str(), std::ios::in);
347 
348  if (!evi_stream.good()) {
349  GUM_ERROR(IOError,
350  "void InferenceEngine< GUM_SCALAR >::insertQuery(const "
351  "std::string & path) : could not open input file : "
352  << path);
353  }
354 
355  if (!_query.empty()) _query.clear();
356 
357  std::string line, tmp;
358  char * cstr, *p;
359 
360  while (evi_stream.good() && std::strcmp(line.c_str(), "[QUERY]") != 0) {
361  getline(evi_stream, line);
362  }
363 
364  while (evi_stream.good()) {
365  getline(evi_stream, line);
366 
367  if (std::strcmp(line.c_str(), "[EVIDENCE]") == 0) break;
368 
369  if (line.size() == 0) continue;
370 
371  cstr = new char[line.size() + 1];
372  strcpy(cstr, line.c_str());
373 
374  p = strtok(cstr, " ");
375  tmp = p;
376 
377  // if user input is wrong
378  NodeId node = -1;
379 
380  try {
381  node = _credalNet->current_bn().idFromName(tmp);
382  } catch (NotFound& err) {
383  GUM_SHOWERROR(err);
384  continue;
385  }
386 
387  auto dSize = _credalNet->current_bn().variable(node).domainSize();
388 
389  p = strtok(nullptr, " ");
390 
391  if (p == nullptr) {
392  _query.insert(node, std::vector< bool >(dSize, true));
393  } else {
394  std::vector< bool > values(dSize, false);
395 
396  while (p != nullptr) {
397  if ((Size)atoi(p) >= dSize)
398  GUM_ERROR(OutOfBounds,
399  "void InferenceEngine< GUM_SCALAR "
400  ">::insertQuery(const std::string & path) : "
401  "query modality is higher or equal to "
402  "cardinality");
403 
404  values[atoi(p)] = true;
405  p = strtok(nullptr, " ");
406  } // end of : line
407 
408  _query.insert(node, values);
409  }
410 
411  delete[] p;
412  delete[] cstr;
413  } // end of : file
414 
415  evi_stream.close();
416  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
unsigned int NodeId
Type for node ids.
Definition: graphElements.h:97
#define GUM_SHOWERROR(e)
Definition: exceptions.h:73
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
query _query
Holds the query nodes states.
void clear()
Removes all the elements in the hash table.
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
bool empty() const noexcept
Indicates whether the hash table is empty.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the call graph for this function:

INLINE bool gum::ApproximationScheme::isEnabledEpsilon ( ) const
virtualinherited

Returns true if stopping criterion on epsilon is enabled, false otherwise.

Returns
Returns true if stopping criterion on epsilon is enabled, false otherwise.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 59 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_eps.

Referenced by gum::learning::genericBNLearner::isEnabledEpsilon().

59  {
60  return _enabled_eps;
61  }
bool _enabled_eps
If true, the threshold convergence is enabled.

+ Here is the caller graph for this function:

INLINE bool gum::ApproximationScheme::isEnabledMaxIter ( ) const
virtualinherited

Returns true if stopping criterion on max iterations is enabled, false otherwise.

Returns
Returns true if stopping criterion on max iterations is enabled, false otherwise.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 110 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_max_iter.

Referenced by gum::learning::genericBNLearner::isEnabledMaxIter().

110  {
111  return _enabled_max_iter;
112  }
bool _enabled_max_iter
If true, the maximum iterations stopping criterion is enabled.

+ Here is the caller graph for this function:

INLINE bool gum::ApproximationScheme::isEnabledMaxTime ( ) const
virtualinherited

Returns true if stopping criterion on timeout is enabled, false otherwise.

Returns
Returns true if stopping criterion on timeout is enabled, false otherwise.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 136 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_max_time.

Referenced by gum::learning::genericBNLearner::isEnabledMaxTime().

136  {
137  return _enabled_max_time;
138  }
bool _enabled_max_time
If true, the timeout is enabled.

+ Here is the caller graph for this function:

INLINE bool gum::ApproximationScheme::isEnabledMinEpsilonRate ( ) const
virtualinherited

Returns true if stopping criterion on epsilon rate is enabled, false otherwise.

Returns
Returns true if stopping criterion on epsilon rate is enabled, false otherwise.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 88 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_min_rate_eps.

Referenced by gum::GibbsKL< GUM_SCALAR >::_computeKL(), and gum::learning::genericBNLearner::isEnabledMinEpsilonRate().

88  {
89  return _enabled_min_rate_eps;
90  }
bool _enabled_min_rate_eps
If true, the minimal threshold for epsilon rate is enabled.

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::makeInference ( )
virtual

Starts the inference.

Implements gum::credal::InferenceEngine< GUM_SCALAR >.

Definition at line 554 of file CNLoopyPropagation_tpl.h.

554  {
555  if (_InferenceUpToDate) { return; }
556 
557  _initialize();
558 
560 
561  switch (__inferenceType) {
564  break;
565 
567 
569  }
570 
571  //_updateMarginals();
572  _updateIndicatrices(); // will call _updateMarginals()
573 
575 
576  _InferenceUpToDate = true;
577  }
void _makeInferenceByOrderedArcs()
Starts the inference with this inference type.
InferenceType __inferenceType
The choosen inference type.
void _initialize()
Topological forward propagation to initialize old marginals & messages.
void initApproximationScheme()
Initialise the scheme.
Chooses an arc ordering and sends messages accordingly at all steps.
void _computeExpectations()
Since the network is binary, expectations can be computed from the final marginals which give us the ...
void _makeInferenceByRandomOrder()
Starts the inference with this inference type.
bool _InferenceUpToDate
TRUE if inference has already been performed, FALSE otherwise.
Uses a node-set so we don&#39;t iterate on nodes that can&#39;t send a new message.
void _updateIndicatrices()
Only update indicatrices variables at the end of computations ( calls _msgP ).
Chooses a random arc ordering and sends messages accordingly.
void _makeInferenceNodeToNeighbours()
Starts the inference with this inference type.
template<typename GUM_SCALAR >
const std::vector< GUM_SCALAR > & gum::credal::InferenceEngine< GUM_SCALAR >::marginalMax ( const NodeId  id) const
inherited

Get the upper marginals of a given node id.

Parameters
idThe node id which upper marginals we want.
Returns
A constant reference to this node upper marginals.

Definition at line 444 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMax.

444  {
445  try {
446  return _marginalMax[id];
447  } catch (NotFound& err) { throw(err); }
448  }
margi _marginalMax
Upper marginals.
template<typename GUM_SCALAR >
const std::vector< GUM_SCALAR > & gum::credal::InferenceEngine< GUM_SCALAR >::marginalMax ( const std::string &  varName) const
inherited

Get the upper marginals of a given variable name.

Parameters
varNameThe variable name which upper marginals we want.
Returns
A constant reference to this variable upper marginals.

Definition at line 427 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, and gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMax.

428  {
429  try {
430  return _marginalMax[_credalNet->current_bn().idFromName(varName)];
431  } catch (NotFound& err) { throw(err); }
432  }
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
margi _marginalMax
Upper marginals.
template<typename GUM_SCALAR >
const std::vector< GUM_SCALAR > & gum::credal::InferenceEngine< GUM_SCALAR >::marginalMin ( const NodeId  id) const
inherited

Get the lower marginals of a given node id.

Parameters
idThe node id which lower marginals we want.
Returns
A constant reference to this node lower marginals.

Definition at line 436 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMin.

436  {
437  try {
438  return _marginalMin[id];
439  } catch (NotFound& err) { throw(err); }
440  }
margi _marginalMin
Lower marginals.
template<typename GUM_SCALAR >
const std::vector< GUM_SCALAR > & gum::credal::InferenceEngine< GUM_SCALAR >::marginalMin ( const std::string &  varName) const
inherited

Get the lower marginals of a given variable name.

Parameters
varNameThe variable name which lower marginals we want.
Returns
A constant reference to this variable lower marginals.

Definition at line 419 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, and gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMin.

420  {
421  try {
422  return _marginalMin[_credalNet->current_bn().idFromName(varName)];
423  } catch (NotFound& err) { throw(err); }
424  }
margi _marginalMin
Lower marginals.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
INLINE Size gum::ApproximationScheme::maxIter ( ) const
virtualinherited

Returns the criterion on number of iterations.

Returns
Returns the criterion on number of iterations.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 100 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_max_iter.

Referenced by gum::learning::genericBNLearner::maxIter().

100 { return _max_iter; }
Size _max_iter
The maximum iterations.

+ Here is the caller graph for this function:

INLINE double gum::ApproximationScheme::maxTime ( ) const
virtualinherited

Returns the timeout (in seconds).

Returns
Returns the timeout (in seconds).

Implements gum::IApproximationSchemeConfiguration.

Definition at line 123 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_max_time.

Referenced by gum::learning::genericBNLearner::maxTime().

123 { return _max_time; }
double _max_time
The timeout.

+ Here is the caller graph for this function:

INLINE std::string gum::IApproximationSchemeConfiguration::messageApproximationScheme ( ) const
inherited

Returns the approximation scheme message.

Returns
Returns the approximation scheme message.

Definition at line 38 of file IApproximationSchemeConfiguration_inl.h.

References gum::IApproximationSchemeConfiguration::Continue, gum::IApproximationSchemeConfiguration::Epsilon, gum::IApproximationSchemeConfiguration::epsilon(), gum::IApproximationSchemeConfiguration::Limit, gum::IApproximationSchemeConfiguration::maxIter(), gum::IApproximationSchemeConfiguration::maxTime(), gum::IApproximationSchemeConfiguration::minEpsilonRate(), gum::IApproximationSchemeConfiguration::Rate, gum::IApproximationSchemeConfiguration::stateApproximationScheme(), gum::IApproximationSchemeConfiguration::Stopped, gum::IApproximationSchemeConfiguration::TimeLimit, and gum::IApproximationSchemeConfiguration::Undefined.

Referenced by gum::ApproximationScheme::_stopScheme(), gum::ApproximationScheme::continueApproximationScheme(), and gum::credal::InferenceEngine< GUM_SCALAR >::getApproximationSchemeMsg().

38  {
39  std::stringstream s;
40 
41  switch (stateApproximationScheme()) {
42  case ApproximationSchemeSTATE::Continue: s << "in progress"; break;
43 
45  s << "stopped with epsilon=" << epsilon();
46  break;
47 
49  s << "stopped with rate=" << minEpsilonRate();
50  break;
51 
53  s << "stopped with max iteration=" << maxIter();
54  break;
55 
57  s << "stopped with timeout=" << maxTime();
58  break;
59 
60  case ApproximationSchemeSTATE::Stopped: s << "stopped on request"; break;
61 
62  case ApproximationSchemeSTATE::Undefined: s << "undefined state"; break;
63  };
64 
65  return s.str();
66  }
virtual double epsilon() const =0
Returns the value of epsilon.
virtual ApproximationSchemeSTATE stateApproximationScheme() const =0
Returns the approximation scheme state.
virtual double maxTime() const =0
Returns the timeout (in seconds).
virtual Size maxIter() const =0
Returns the criterion on number of iterations.
virtual double minEpsilonRate() const =0
Returns the value of the minimal epsilon rate.

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

INLINE double gum::ApproximationScheme::minEpsilonRate ( ) const
virtualinherited

Returns the value of the minimal epsilon rate.

Returns
Returns the value of the minimal epsilon rate.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 72 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_min_rate_eps.

Referenced by gum::learning::genericBNLearner::minEpsilonRate().

72  {
73  return _min_rate_eps;
74  }
double _min_rate_eps
Threshold for the epsilon rate.

+ Here is the caller graph for this function:

INLINE Size gum::ApproximationScheme::nbrIterations ( ) const
virtualinherited

Returns the number of iterations.

Returns
Returns the number of iterations.
Exceptions
OperationNotAllowedRaised if the scheme did not perform.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 161 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_current_step, GUM_ERROR, gum::ApproximationScheme::stateApproximationScheme(), and gum::IApproximationSchemeConfiguration::Undefined.

Referenced by gum::GibbsKL< GUM_SCALAR >::_computeKL(), and gum::learning::genericBNLearner::nbrIterations().

161  {
163  GUM_ERROR(OperationNotAllowed,
164  "state of the approximation scheme is undefined");
165  }
166 
167  return _current_step;
168  }
ApproximationSchemeSTATE stateApproximationScheme() const
Returns the approximation scheme state.
Size _current_step
The current step.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

INLINE Size gum::ApproximationScheme::periodSize ( ) const
virtualinherited

Returns the period size.

Returns
Returns the period size.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 147 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_period_size.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::makeInference(), and gum::learning::genericBNLearner::periodSize().

147 { return _period_size; }
Size _period_size
Checking criteria frequency.

+ Here is the caller graph for this function:

INLINE Size gum::ApproximationScheme::remainingBurnIn ( )
inherited

Returns the remaining burn in.

Returns
Returns the remaining burn in.

Definition at line 208 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_burn_in, and gum::ApproximationScheme::_current_step.

208  {
209  if (_burn_in > _current_step) {
210  return _burn_in - _current_step;
211  } else {
212  return 0;
213  }
214  }
Size _burn_in
Number of iterations before checking stopping criteria.
Size _current_step
The current step.
template<typename GUM_SCALAR >
bool gum::credal::InferenceEngine< GUM_SCALAR >::repetitiveInd ( ) const
inherited

Get the current independence status.

Returns
True if repetitive, False otherwise.

Definition at line 117 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_repetitiveInd.

117  {
118  return _repetitiveInd;
119  }
bool _repetitiveInd
True if using repetitive independence ( dynamic network only ), False otherwise.
template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::saveExpectations ( const std::string &  path) const
inherited

Saves expectations to file.

Parameters
pathThe path to the file to be used.

Definition at line 551 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpMax, gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpMin, and GUM_ERROR.

552  {
553  if (_dynamicExpMin.empty()) //_modal.empty())
554  return;
555 
556  // else not here, to keep the const (natural with a saving process)
557  // else if(_dynamicExpMin.empty() || _dynamicExpMax.empty())
558  //_dynamicExpectations(); // works with or without a dynamic network
559 
560  std::ofstream m_stream(path.c_str(), std::ios::out | std::ios::trunc);
561 
562  if (!m_stream.good()) {
563  GUM_ERROR(IOError,
564  "void InferenceEngine< GUM_SCALAR "
565  ">::saveExpectations(const std::string & path) : could "
566  "not open output file : "
567  << path);
568  }
569 
570  for (const auto& elt : _dynamicExpMin) {
571  m_stream << elt.first; // it->first;
572 
573  // iterates over a vector
574  for (const auto& elt2 : elt.second) {
575  m_stream << " " << elt2;
576  }
577 
578  m_stream << std::endl;
579  }
580 
581  for (const auto& elt : _dynamicExpMax) {
582  m_stream << elt.first;
583 
584  // iterates over a vector
585  for (const auto& elt2 : elt.second) {
586  m_stream << " " << elt2;
587  }
588 
589  m_stream << std::endl;
590  }
591 
592  m_stream.close();
593  }
dynExpe _dynamicExpMin
Lower dynamic expectations.
dynExpe _dynamicExpMax
Upper dynamic expectations.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66
template<typename GUM_SCALAR >
void gum::credal::CNLoopyPropagation< GUM_SCALAR >::saveInference ( const std::string &  path)
Deprecated:
Use saveMarginals() from InferenceEngine instead.

This one is easier to read but harder for scripts to parse.

Parameters
pathThe path to the file to save marginals.

Definition at line 29 of file CNLoopyPropagation_tpl.h.

References _INF, and GUM_ERROR.

29  {
30  std::string path_name = path.substr(0, path.size() - 4);
31  path_name = path_name + ".res";
32 
33  std::ofstream res(path_name.c_str(), std::ios::out | std::ios::trunc);
34 
35  if (!res.good()) {
36  GUM_ERROR(NotFound,
37  "CNLoopyPropagation<GUM_SCALAR>::saveInference(std::"
38  "string & path) : could not open file : "
39  + path_name);
40  }
41 
42  std::string ext = path.substr(path.size() - 3, path.size());
43 
44  if (std::strcmp(ext.c_str(), "evi") == 0) {
45  std::ifstream evi(path.c_str(), std::ios::in);
46  std::string ligne;
47 
48  if (!evi.good()) {
49  GUM_ERROR(NotFound,
50  "CNLoopyPropagation<GUM_SCALAR>::saveInference(std::"
51  "string & path) : could not open file : "
52  + ext);
53  }
54 
55  while (evi.good()) {
56  getline(evi, ligne);
57  res << ligne << "\n";
58  }
59 
60  evi.close();
61  }
62 
63  res << "[RESULTATS]"
64  << "\n";
65 
66  for (auto node : __bnet->nodes()) {
67  // calcul distri posteriori
68  GUM_SCALAR msg_p_min = 1.0;
69  GUM_SCALAR msg_p_max = 0.0;
70 
71  // cas evidence, calcul immediat
72  if (__infE::_evidence.exists(node)) {
73  if (__infE::_evidence[node][1] == 0.) {
74  msg_p_min = 0.;
75  } else if (__infE::_evidence[node][1] == 1.) {
76  msg_p_min = 1.;
77  }
78 
79  msg_p_max = msg_p_min;
80  }
81  // sinon depuis node P et node L
82  else {
83  GUM_SCALAR min = _NodesP_min[node];
84  GUM_SCALAR max;
85 
86  if (_NodesP_max.exists(node)) {
87  max = _NodesP_max[node];
88  } else {
89  max = min;
90  }
91 
92  GUM_SCALAR lmin = _NodesL_min[node];
93  GUM_SCALAR lmax;
94 
95  if (_NodesL_max.exists(node)) {
96  lmax = _NodesL_max[node];
97  } else {
98  lmax = lmin;
99  }
100 
101  // cas limites sur min
102  if (min == _INF && lmin == 0.) {
103  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
104  }
105 
106  if (lmin == _INF) { // cas infini
107  msg_p_min = GUM_SCALAR(1.);
108  } else if (min == 0. || lmin == 0.) {
109  msg_p_min = GUM_SCALAR(0.);
110  } else {
111  msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
112  }
113 
114  // cas limites sur max
115  if (max == _INF && lmax == 0.) {
116  std::cout << "proba ERR (negatif) : pi = inf, l = 0" << std::endl;
117  }
118 
119  if (lmax == _INF) { // cas infini
120  msg_p_max = GUM_SCALAR(1.);
121  } else if (max == 0. || lmax == 0.) {
122  msg_p_max = GUM_SCALAR(0.);
123  } else {
124  msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
125  }
126  }
127 
128  if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
129  msg_p_min = msg_p_max;
130  }
131 
132  if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
133  msg_p_max = msg_p_min;
134  }
135 
136  if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
137  std::cout << std::endl;
138  std::cout << "pas de proba calculable (verifier observations)"
139  << std::endl;
140  }
141 
142  res << "P(" << __bnet->variable(node).name() << " | e) = ";
143 
144  if (__infE::_evidence.exists(node)) {
145  res << "(observe)" << std::endl;
146  } else {
147  res << std::endl;
148  }
149 
150  res << "\t\t" << __bnet->variable(node).label(0) << " [ "
151  << (GUM_SCALAR)1. - msg_p_max;
152 
153  if (msg_p_min != msg_p_max) {
154  res << ", " << (GUM_SCALAR)1. - msg_p_min << " ] | ";
155  } else {
156  res << " ] | ";
157  }
158 
159  res << __bnet->variable(node).label(1) << " [ " << msg_p_min;
160 
161  if (msg_p_min != msg_p_max) {
162  res << ", " << msg_p_max << " ]" << std::endl;
163  } else {
164  res << " ]" << std::endl;
165  }
166  } // end of : for each node
167 
168  res.close();
169  }
NodeProperty< GUM_SCALAR > _NodesP_min
"Lower" node information obtained by combinaison of parent&#39;s messages.
#define _INF
const IBayesNet< GUM_SCALAR > * __bnet
A pointer to it&#39;s IBayesNet used as a DAG.
margi _evidence
Holds observed variables states.
NodeProperty< GUM_SCALAR > _NodesL_min
"Lower" node information obtained by combinaison of children messages.
NodeProperty< GUM_SCALAR > _NodesP_max
"Upper" node information obtained by combinaison of parent&#39;s messages.
NodeProperty< GUM_SCALAR > _NodesL_max
"Upper" node information obtained by combinaison of children messages.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66
template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::saveMarginals ( const std::string &  path) const
inherited

Saves marginals to file.

Parameters
pathThe path to the file to be used.

Definition at line 526 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMax, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMin, and GUM_ERROR.

526  {
527  std::ofstream m_stream(path.c_str(), std::ios::out | std::ios::trunc);
528 
529  if (!m_stream.good()) {
530  GUM_ERROR(IOError,
531  "void InferenceEngine< GUM_SCALAR >::saveMarginals(const "
532  "std::string & path) const : could not open output file "
533  ": "
534  << path);
535  }
536 
537  for (const auto& elt : _marginalMin) {
538  Size esize = Size(elt.second.size());
539 
540  for (Size mod = 0; mod < esize; mod++) {
541  m_stream << _credalNet->current_bn().variable(elt.first).name() << " "
542  << mod << " " << (elt.second)[mod] << " "
543  << _marginalMax[elt.first][mod] << std::endl;
544  }
545  }
546 
547  m_stream.close();
548  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
margi _marginalMin
Lower marginals.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
margi _marginalMax
Upper marginals.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66
template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::saveVertices ( const std::string &  path) const
inherited

Saves vertices to file.

Parameters
pathThe path to the file to be used.

Definition at line 625 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalSets, and GUM_ERROR.

625  {
626  std::ofstream m_stream(path.c_str(), std::ios::out | std::ios::trunc);
627 
628  if (!m_stream.good()) {
629  GUM_ERROR(IOError,
630  "void InferenceEngine< GUM_SCALAR >::saveVertices(const "
631  "std::string & path) : could not open outpul file : "
632  << path);
633  }
634 
635  for (const auto& elt : _marginalSets) {
636  m_stream << _credalNet->current_bn().variable(elt.first).name()
637  << std::endl;
638 
639  for (const auto& elt2 : elt.second) {
640  m_stream << "[";
641  bool first = true;
642 
643  for (const auto& elt3 : elt2) {
644  if (!first) {
645  m_stream << ",";
646  first = false;
647  }
648 
649  m_stream << elt3;
650  }
651 
652  m_stream << "]\n";
653  }
654  }
655 
656  m_stream.close();
657  }
credalSet _marginalSets
Credal sets vertices, if enabled.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66
INLINE void gum::ApproximationScheme::setEpsilon ( double  eps)
virtualinherited

Given that we approximate f(t), stopping criterion on |f(t+1)-f(t)|.

If the criterion was disabled it will be enabled.

Parameters
epsThe new epsilon value.
Exceptions
OutOfLowerBoundRaised if eps < 0.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 41 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_eps, gum::ApproximationScheme::_eps, and GUM_ERROR.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::__mcInitApproximationScheme(), gum::GibbsKL< GUM_SCALAR >::GibbsKL(), gum::GibbsSampling< GUM_SCALAR >::GibbsSampling(), gum::learning::GreedyHillClimbing::GreedyHillClimbing(), gum::SamplingInference< GUM_SCALAR >::SamplingInference(), and gum::learning::genericBNLearner::setEpsilon().

41  {
42  if (eps < 0.) { GUM_ERROR(OutOfLowerBound, "eps should be >=0"); }
43 
44  _eps = eps;
45  _enabled_eps = true;
46  }
bool _enabled_eps
If true, the threshold convergence is enabled.
double _eps
Threshold for convergence.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::setMaxIter ( Size  max)
virtualinherited

Stopping criterion on number of iterations.

If the criterion was disabled it will be enabled.

Parameters
maxThe maximum number of iterations.
Exceptions
OutOfLowerBoundRaised if max <= 1.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 93 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_max_iter, gum::ApproximationScheme::_max_iter, and GUM_ERROR.

Referenced by gum::GibbsKL< GUM_SCALAR >::GibbsKL(), gum::SamplingInference< GUM_SCALAR >::SamplingInference(), and gum::learning::genericBNLearner::setMaxIter().

93  {
94  if (max < 1) { GUM_ERROR(OutOfLowerBound, "max should be >=1"); }
95  _max_iter = max;
96  _enabled_max_iter = true;
97  }
bool _enabled_max_iter
If true, the maximum iterations stopping criterion is enabled.
Size _max_iter
The maximum iterations.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::setMaxTime ( double  timeout)
virtualinherited

Stopping criterion on timeout.

If the criterion was disabled it will be enabled.

Parameters
timeoutThe timeout value in seconds.
Exceptions
OutOfLowerBoundRaised if timeout <= 0.0.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 116 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_max_time, gum::ApproximationScheme::_max_time, and GUM_ERROR.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::CNMonteCarloSampling(), gum::GibbsKL< GUM_SCALAR >::GibbsKL(), gum::SamplingInference< GUM_SCALAR >::SamplingInference(), and gum::learning::genericBNLearner::setMaxTime().

116  {
117  if (timeout <= 0.) { GUM_ERROR(OutOfLowerBound, "timeout should be >0."); }
118  _max_time = timeout;
119  _enabled_max_time = true;
120  }
bool _enabled_max_time
If true, the timeout is enabled.
double _max_time
The timeout.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::setMinEpsilonRate ( double  rate)
virtualinherited

Given that we approximate f(t), stopping criterion on d/dt(|f(t+1)-f(t)|).

If the criterion was disabled it will be enabled

Parameters
rateThe minimal epsilon rate.
Exceptions
OutOfLowerBoundif rate<0

Implements gum::IApproximationSchemeConfiguration.

Definition at line 64 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_enabled_min_rate_eps, gum::ApproximationScheme::_min_rate_eps, and GUM_ERROR.

Referenced by gum::GibbsKL< GUM_SCALAR >::GibbsKL(), gum::GibbsSampling< GUM_SCALAR >::GibbsSampling(), gum::SamplingInference< GUM_SCALAR >::SamplingInference(), and gum::learning::genericBNLearner::setMinEpsilonRate().

64  {
65  if (rate < 0) { GUM_ERROR(OutOfLowerBound, "rate should be >=0"); }
66 
67  _min_rate_eps = rate;
68  _enabled_min_rate_eps = true;
69  }
bool _enabled_min_rate_eps
If true, the minimal threshold for epsilon rate is enabled.
double _min_rate_eps
Threshold for the epsilon rate.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::setPeriodSize ( Size  p)
virtualinherited

How many samples between two stopping is enable.

Parameters
pThe new period value.
Exceptions
OutOfLowerBoundRaised if p < 1.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 141 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_period_size, and GUM_ERROR.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::CNMonteCarloSampling(), gum::GibbsKL< GUM_SCALAR >::GibbsKL(), gum::SamplingInference< GUM_SCALAR >::SamplingInference(), and gum::learning::genericBNLearner::setPeriodSize().

141  {
142  if (p < 1) { GUM_ERROR(OutOfLowerBound, "p should be >=1"); }
143 
144  _period_size = p;
145  }
Size _period_size
Checking criteria frequency.
#define GUM_ERROR(type, msg)
Definition: exceptions.h:66

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::setRepetitiveInd ( const bool  repetitive)
inherited
Parameters
repetitiveTrue if repetitive independence is to be used, false otherwise. Only usefull with dynamic networks.

Definition at line 108 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_repetitiveInd, and gum::credal::InferenceEngine< GUM_SCALAR >::_repetitiveInit().

108  {
109  bool oldValue = _repetitiveInd;
110  _repetitiveInd = repetitive;
111 
112  // do not compute clusters more than once
113  if (_repetitiveInd && !oldValue) _repetitiveInit();
114  }
void _repetitiveInit()
Initialize _t0 and _t1 clusters.
bool _repetitiveInd
True if using repetitive independence ( dynamic network only ), False otherwise.

+ Here is the call graph for this function:

INLINE void gum::ApproximationScheme::setVerbosity ( bool  v)
virtualinherited

Set the verbosity on (true) or off (false).

Parameters
vIf true, then verbosity is turned on.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 150 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_verbosity.

Referenced by gum::GibbsKL< GUM_SCALAR >::GibbsKL(), gum::SamplingInference< GUM_SCALAR >::SamplingInference(), and gum::learning::genericBNLearner::setVerbosity().

150 { _verbosity = v; }
bool _verbosity
If true, verbosity is enabled.

+ Here is the caller graph for this function:

INLINE bool gum::ApproximationScheme::startOfPeriod ( )
inherited

Returns true if we are at the beginning of a period (compute error is mandatory).

Returns
Returns true if we are at the beginning of a period (compute error is mandatory).

Definition at line 195 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_burn_in, gum::ApproximationScheme::_current_step, and gum::ApproximationScheme::_period_size.

Referenced by gum::ApproximationScheme::continueApproximationScheme().

195  {
196  if (_current_step < _burn_in) { return false; }
197 
198  if (_period_size == 1) { return true; }
199 
200  return ((_current_step - _burn_in) % _period_size == 0);
201  }
Size _burn_in
Number of iterations before checking stopping criteria.
Size _current_step
The current step.
Size _period_size
Checking criteria frequency.

+ Here is the caller graph for this function:

INLINE IApproximationSchemeConfiguration::ApproximationSchemeSTATE gum::ApproximationScheme::stateApproximationScheme ( ) const
virtualinherited

Returns the approximation scheme state.

Returns
Returns the approximation scheme state.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 156 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_current_state.

Referenced by gum::ApproximationScheme::continueApproximationScheme(), gum::ApproximationScheme::history(), gum::ApproximationScheme::nbrIterations(), and gum::learning::genericBNLearner::stateApproximationScheme().

156  {
157  return _current_state;
158  }
ApproximationSchemeSTATE _current_state
The current state.

+ Here is the caller graph for this function:

INLINE void gum::ApproximationScheme::stopApproximationScheme ( )
inherited

Stop the approximation scheme.

Definition at line 217 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_current_state, gum::ApproximationScheme::_stopScheme(), gum::IApproximationSchemeConfiguration::Continue, and gum::IApproximationSchemeConfiguration::Stopped.

Referenced by gum::learning::GreedyHillClimbing::learnStructure(), and gum::learning::LocalSearchWithTabuList::learnStructure().

+ Here is the call graph for this function:

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::storeBNOpt ( const bool  value)
inherited
Parameters
valueTrue if optimal bayesian networks are to be stored for each variable and each modality.

Definition at line 96 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_storeBNOpt.

96  {
97  _storeBNOpt = value;
98  }
bool _storeBNOpt
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
template<typename GUM_SCALAR >
bool gum::credal::InferenceEngine< GUM_SCALAR >::storeBNOpt ( ) const
inherited
Returns
True if optimal bayes net are stored for each variable and each modality, False otherwise.

Definition at line 132 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_storeBNOpt.

132  {
133  return _storeBNOpt;
134  }
bool _storeBNOpt
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
template<typename GUM_SCALAR >
void gum::credal::InferenceEngine< GUM_SCALAR >::storeVertices ( const bool  value)
inherited
Parameters
valueTrue if vertices are to be stored, false otherwise.

Definition at line 101 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_initMarginalSets(), and gum::credal::InferenceEngine< GUM_SCALAR >::_storeVertices.

101  {
102  _storeVertices = value;
103 
104  if (value) _initMarginalSets();
105  }
void _initMarginalSets()
Initialize credal set vertices with empty sets.
bool _storeVertices
True if credal sets vertices are stored, False otherwise.

+ Here is the call graph for this function:

template<typename GUM_SCALAR >
bool gum::credal::InferenceEngine< GUM_SCALAR >::storeVertices ( ) const
inherited

Get the number of iterations without changes used to stop some algorithms.

Returns
the number of iterations.int iterStop () const;
True if vertice are stored, False otherwise.

Definition at line 127 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_storeVertices.

127  {
128  return _storeVertices;
129  }
bool _storeVertices
True if credal sets vertices are stored, False otherwise.
template<typename GUM_SCALAR >
std::string gum::credal::InferenceEngine< GUM_SCALAR >::toString ( ) const
inherited

Print all nodes marginals to standart output.

Definition at line 596 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMax, gum::credal::InferenceEngine< GUM_SCALAR >::_marginalMin, gum::credal::InferenceEngine< GUM_SCALAR >::_query, gum::HashTable< Key, Val, Alloc >::empty(), and gum::HashTable< Key, Val, Alloc >::exists().

596  {
597  std::stringstream output;
598  output << std::endl;
599 
600  // use cbegin() when available
601  for (const auto& elt : _marginalMin) {
602  Size esize = Size(elt.second.size());
603 
604  for (Size mod = 0; mod < esize; mod++) {
605  output << "P(" << _credalNet->current_bn().variable(elt.first).name()
606  << "=" << mod << "|e) = [ ";
607  output << _marginalMin[elt.first][mod] << ", "
608  << _marginalMax[elt.first][mod] << " ]";
609 
610  if (!_query.empty())
611  if (_query.exists(elt.first) && _query[elt.first][mod])
612  output << " QUERY";
613 
614  output << std::endl;
615  }
616 
617  output << std::endl;
618  }
619 
620  return output.str();
621  }
unsigned long Size
In aGrUM, hashed values are unsigned long int.
Definition: types.h:50
bool exists(const Key &key) const
Checks whether there exists an element with a given key in the hashtable.
margi _marginalMin
Lower marginals.
const CredalNet< GUM_SCALAR > * _credalNet
A pointer to the Credal Net used.
query _query
Holds the query nodes states.
bool empty() const noexcept
Indicates whether the hash table is empty.
margi _marginalMax
Upper marginals.

+ Here is the call graph for this function:

INLINE void gum::ApproximationScheme::updateApproximationScheme ( unsigned int  incr = 1)
inherited

Update the scheme w.r.t the new error and increment steps.

Parameters
incrThe new increment steps.

Definition at line 204 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_current_step.

Referenced by gum::GibbsKL< GUM_SCALAR >::_computeKL(), gum::SamplingInference< GUM_SCALAR >::_loopApproxInference(), gum::learning::GreedyHillClimbing::learnStructure(), gum::learning::LocalSearchWithTabuList::learnStructure(), and gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::makeInference().

204  {
205  _current_step += incr;
206  }
Size _current_step
The current step.

+ Here is the caller graph for this function:

INLINE bool gum::ApproximationScheme::verbosity ( ) const
virtualinherited

Returns true if verbosity is enabled.

Returns
Returns true if verbosity is enabled.

Implements gum::IApproximationSchemeConfiguration.

Definition at line 152 of file approximationScheme_inl.h.

References gum::ApproximationScheme::_verbosity.

Referenced by gum::ApproximationScheme::continueApproximationScheme(), gum::ApproximationScheme::history(), and gum::learning::genericBNLearner::verbosity().

152 { return _verbosity; }
bool _verbosity
If true, verbosity is enabled.

+ Here is the caller graph for this function:

template<typename GUM_SCALAR >
const std::vector< std::vector< GUM_SCALAR > > & gum::credal::InferenceEngine< GUM_SCALAR >::vertices ( const NodeId  id) const
inherited

Get the vertice of a given node id.

Parameters
idThe node id which vertice we want.
Returns
A constant reference to this node vertice.

Definition at line 520 of file inferenceEngine_tpl.h.

References gum::credal::InferenceEngine< GUM_SCALAR >::_marginalSets.

520  {
521  return _marginalSets[id];
522  }
credalSet _marginalSets
Credal sets vertices, if enabled.

Member Data Documentation

template<typename GUM_SCALAR >
const IBayesNet< GUM_SCALAR >* gum::credal::CNLoopyPropagation< GUM_SCALAR >::__bnet
private
template<typename GUM_SCALAR >
const CredalNet< GUM_SCALAR >* gum::credal::CNLoopyPropagation< GUM_SCALAR >::__cn
private

A pointer to the CredalNet to be used.

Definition at line 372 of file CNLoopyPropagation.h.

Referenced by gum::credal::CNLoopyPropagation< GUM_SCALAR >::CNLoopyPropagation().

template<typename GUM_SCALAR >
InferenceType gum::credal::CNLoopyPropagation< GUM_SCALAR >::__inferenceType
private

The choosen inference type.

nodeToNeighbours by Default.

Definition at line 369 of file CNLoopyPropagation.h.

Referenced by gum::credal::CNLoopyPropagation< GUM_SCALAR >::CNLoopyPropagation(), and gum::credal::CNLoopyPropagation< GUM_SCALAR >::inferenceType().

template<typename GUM_SCALAR >
ArcProperty< GUM_SCALAR > gum::credal::CNLoopyPropagation< GUM_SCALAR >::_ArcsL_max
protected

"Upper" information \( \Lambda \) coming from one's children.

Definition at line 349 of file CNLoopyPropagation.h.

template<typename GUM_SCALAR >
ArcProperty< GUM_SCALAR > gum::credal::CNLoopyPropagation< GUM_SCALAR >::_ArcsL_min
protected

"Lower" information \( \Lambda \) coming from one's children.

Definition at line 337 of file CNLoopyPropagation.h.

template<typename GUM_SCALAR >
ArcProperty< GUM_SCALAR > gum::credal::CNLoopyPropagation< GUM_SCALAR >::_ArcsP_max
protected

"Upper" information \( \pi \) coming from one's parent.

Definition at line 351 of file CNLoopyPropagation.h.

template<typename GUM_SCALAR >
ArcProperty< GUM_SCALAR > gum::credal::CNLoopyPropagation< GUM_SCALAR >::_ArcsP_min
protected

"Lower" information \( \pi \) coming from one's parent.

Definition at line 339 of file CNLoopyPropagation.h.

template<typename GUM_SCALAR >
const CredalNet< GUM_SCALAR >* gum::credal::InferenceEngine< GUM_SCALAR >::_credalNet
protectedinherited

A pointer to the Credal Net used.

Definition at line 72 of file inferenceEngine.h.

Referenced by gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::__mcThreadDataCopy(), gum::credal::CNMonteCarloSampling< GUM_SCALAR, BNInferenceEngine >::__verticesSampling(), gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpectations(), gum::credal::InferenceEngine< GUM_SCALAR >::_initExpectations(), gum::credal::InferenceEngine< GUM_SCALAR >::_initMarginals(), gum::credal::InferenceEngine< GUM_SCALAR >::_initMarginalSets(), gum::credal::InferenceEngine< GUM_SCALAR >::_repetitiveInit(), gum::credal::InferenceEngine< GUM_SCALAR >::_updateExpectations(), gum::credal::InferenceEngine< GUM_SCALAR >::credalNet(), gum::credal::InferenceEngine< GUM_SCALAR >::expectationMax(), gum::credal::InferenceEngine< GUM_SCALAR >::expectationMin(), gum::credal::InferenceEngine< GUM_SCALAR >::InferenceEngine(), gum::credal::InferenceEngine< GUM_SCALAR >::insertEvidence(), gum::credal::InferenceEngine< GUM_SCALAR >::insertEvidenceFile(), gum::credal::InferenceEngine< GUM_SCALAR >::insertModals(), gum::credal::InferenceEngine< GUM_SCALAR >::insertQuery(), gum::credal::InferenceEngine< GUM_SCALAR >::insertQueryFile(), gum::credal::InferenceEngine< GUM_SCALAR >::marginalMax(), gum::credal::InferenceEngine< GUM_SCALAR >::marginalMin(), gum::credal::InferenceEngine< GUM_SCALAR >::saveMarginals(), gum::credal::InferenceEngine< GUM_SCALAR >::saveVertices(), and gum::credal::InferenceEngine< GUM_SCALAR >::toString().

double gum::ApproximationScheme::_current_epsilon
protectedinherited
double gum::ApproximationScheme::_current_rate
protectedinherited
template<typename GUM_SCALAR >
VarMod2BNsMap< GUM_SCALAR > gum::credal::InferenceEngine< GUM_SCALAR >::_dbnOpt
protectedinherited
template<typename GUM_SCALAR >
dynExpe gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpMax
protectedinherited
template<typename GUM_SCALAR >
dynExpe gum::credal::InferenceEngine< GUM_SCALAR >::_dynamicExpMin
protectedinherited
bool gum::ApproximationScheme::_enabled_max_iter
protectedinherited
bool gum::ApproximationScheme::_enabled_min_rate_eps
protectedinherited
double gum::ApproximationScheme::_eps
protectedinherited