probability.py 25 ko
Newer Older
"""Probability models. (Chapter 13-15)
"""

from utils import *  # noqa
from logic import extend
MircoT's avatar
MircoT a validé
import random
from collections import defaultdict
MircoT's avatar
MircoT a validé
from functools import reduce
# ______________________________________________________________________________
MircoT's avatar
MircoT a validé

def DTAgentProgram(belief_state):
    "A decision-theoretic agent. [Fig. 13.1]"
    def program(percept):
        belief_state.observe(program.action, percept)
withal's avatar
withal a validé
        program.action = argmax(belief_state.actions(),
                                belief_state.expected_outcome_utility)
        return program.action
    program.action = None
    return program
# ______________________________________________________________________________
MircoT's avatar
MircoT a validé

MircoT's avatar
MircoT a validé

    """A discrete probability distribution.  You name the random variable
    in the constructor, then assign and query probability of values.
    >>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H']
    0.25
    >>> P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500})
withal's avatar
withal a validé
    >>> P['lo'], P['med'], P['hi']
    (0.125, 0.375, 0.5)
MircoT's avatar
MircoT a validé

    def __init__(self, varname='?', freqs=None):
        """If freqs is given, it is a dictionary of value: frequency pairs,
        and the ProbDist then is normalized."""
        self.prob = {}
        self.varname = varname
        self.values = []
MircoT's avatar
MircoT a validé
            for (v, p) in list(freqs.items()):
                self[v] = p
            self.normalize()

    def __getitem__(self, val):
        "Given a value, return P(value)."
MircoT's avatar
MircoT a validé
        try:
            return self.prob[val]
        except KeyError:
            return 0
withal's avatar
withal a validé
        "Set P(val) = p."
        if val not in self.values:
            self.values.append(val)
        self.prob[val] = p

    def normalize(self):
        """Make sure the probabilities of all values sum to 1.
        Returns the normalized distribution.
        Raises a ZeroDivisionError if the sum of the values is 0.
        >>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65
        >>> P = P.normalize()
        >>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T'])
        0.350 0.650
        """
        total = float(sum(self.prob.values()))
        if not (1.0-epsilon < total < 1.0+epsilon):
            for val in self.prob:
                self.prob[val] /= total
        return self

    def show_approx(self, numfmt='%.3g'):
        """Show the probabilities rounded and sorted by key, for the
        sake of portable doctests."""
        return ', '.join([('%s: ' + numfmt) % (v, p)
                          for (v, p) in sorted(self.prob.items())])

MircoT's avatar
MircoT a validé

class JointProbDist(ProbDist):
MircoT's avatar
MircoT a validé

    """A discrete probability distribute over a set of variables.
    >>> P = JointProbDist(['X', 'Y']); P[1, 1] = 0.25
    >>> P[1, 1]
    0.25
    >>> P[dict(X=0, Y=1)] = 0.5
    >>> P[dict(X=0, Y=1)]
MircoT's avatar
MircoT a validé

    def __init__(self, variables):
        self.prob = {}
        self.variables = variables
        self.vals = defaultdict(list)

    def __getitem__(self, values):
        "Given a tuple or dict of values, return P(values)."
        values = event_values(values, self.variables)
        return ProbDist.__getitem__(self, values)

    def __setitem__(self, values, p):
        """Set P(values) = p.  Values can be a tuple or a dict; it must
        have a value for each of the variables in the joint. Also keep track
        of the values we have seen so far for each variable."""
        values = event_values(values, self.variables)
withal's avatar
withal a validé
        for var, val in zip(self.variables, values):
            if val not in self.vals[var]:
                self.vals[var].append(val)

    def values(self, var):
        "Return the set of possible values for a variable."
        return self.vals[var]

    def __repr__(self):
        return "P(%s)" % self.variables

def event_values(event, variables):
    """Return a tuple of the values of variables variables in event.
    >>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A'])
    (8, 10)
    >>> event_values ((1, 2), ['C', 'A'])
    (1, 2)
    """
    if isinstance(event, tuple) and len(event) == len(variables):
        return event
    else:
        return tuple([event[var] for var in variables])
# ______________________________________________________________________________
def enumerate_joint_ask(X, e, P):
    """Return a probability distribution over the values of the variable X,
    given the {var:val} observations e, in the JointProbDist P. [Section 13.3]
    >>> P = JointProbDist(['X', 'Y'])
    >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125
    >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx()
    '0: 0.667, 1: 0.167, 2: 0.167'
    """
    assert X not in e, "Query variable must be distinct from evidence"
MircoT's avatar
MircoT a validé
    Q = ProbDist(X)  # probability distribution for X, initially empty
    Y = [v for v in P.variables if v != X and v not in e]  # hidden variables.
    for xi in P.values(X):
        Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)
    return Q.normalize()

withal's avatar
withal a validé
    """Return the sum of those entries in P consistent with e,
    provided variables is P's remaining variables (the ones not in e)."""
    if not variables:
withal's avatar
withal a validé
        return P[e]
withal's avatar
withal a validé
    return sum([enumerate_joint(rest, extend(e, Y, y), P)
                for y in P.values(Y)])
# ______________________________________________________________________________
withal's avatar
withal a validé
    "Bayesian network containing only boolean-variable nodes."
withal's avatar
withal a validé
        "nodes must be ordered with parents before children."
withal's avatar
withal a validé
        """Add a node to the net. Its parents must already be in the
        net, and its variable must not."""
        node = BayesNode(*node_spec)
        assert node.variable not in self.variables
        assert every(lambda parent: parent in self.variables, node.parents)
withal's avatar
withal a validé
        for parent in node.parents:
            self.variable_node(parent).children.append(node)
withal's avatar
withal a validé
    def variable_node(self, var):
        """Return the node for the variable named var.
        >>> burglary.variable_node('Burglary').variable
        'Burglary'"""
        for n in self.nodes:
            if n.variable == var:
                return n
        raise Exception("No such variable: %s" % var)

withal's avatar
withal a validé
    def variable_values(self, var):
withal's avatar
withal a validé
        "Return the domain of var."
        return [True, False]
withal's avatar
withal a validé
    def __repr__(self):
        return 'BayesNet(%r)' % self.nodes
    """A conditional probability distribution for a boolean variable,
    P(X | parents). Part of a BayesNet."""

    def __init__(self, X, parents, cpt):
        """X is a variable name, and parents a sequence of variable
        names or a space-separated string.  cpt, the conditional
        probability table, takes one of these forms:

        * A number, the unconditional probability P(X=true). You can
          use this form when there are no parents.

        * A dict {v: p, ...}, the conditional probability distribution
          P(X=true | parent=v) = p. When there's just one parent.

        * A dict {(v1, v2, ...): p, ...}, the distribution P(X=true |
          parent1=v1, parent2=v2, ...) = p. Each key must have as many
          values as there are parents. You can use this form always;
          the first two are just conveniences.

        In all cases the probability of X being false is left implicit,
        since it follows from P(X=true).

        >>> X = BayesNode('X', '', 0.2)
        >>> Y = BayesNode('Y', 'P', {T: 0.2, F: 0.7})
withal's avatar
withal a validé
        >>> Z = BayesNode('Z', 'P Q',
        ...    {(T, T): 0.2, (T, F): 0.3, (F, T): 0.5, (F, F): 0.7})
        """
MircoT's avatar
MircoT a validé
        if isinstance(parents, str):
            parents = parents.split()

        # We store the table always in the third form above.
MircoT's avatar
MircoT a validé
        if isinstance(cpt, (float, int)):  # no parents, 0-tuple
            cpt = {(): cpt}
        elif isinstance(cpt, dict):
MircoT's avatar
MircoT a validé
            # one parent, 1-tuple
            if cpt and isinstance(list(cpt.keys())[0], bool):
MircoT's avatar
MircoT a validé
                cpt = dict(((v,), p) for v, p in list(cpt.items()))
MircoT's avatar
MircoT a validé
        for vs, p in list(cpt.items()):
            assert isinstance(vs, tuple) and len(vs) == len(parents)
            assert every(lambda v: isinstance(v, bool), vs)
            assert 0 <= p <= 1
        self.variable = X
        self.parents = parents
        self.cpt = cpt
        self.children = []

    def p(self, value, event):
withal's avatar
withal a validé
        """Return the conditional probability
        P(X=value | parents=parent_values), where parent_values
        are the values of parents in event. (event must assign each
        parent a value.)
        >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})
        >>> bn.p(False, {'Burglary': False, 'Earthquake': True})
        0.375"""
        assert isinstance(value, bool)
        ptrue = self.cpt[event_values(event, self.parents)]
        return (ptrue if value else 1 - ptrue)

    def sample(self, event):
        """Sample from the distribution for this variable conditioned
        on event's values for parent_variables. That is, return True/False
        at random according with the conditional probability given the
        parents."""
withal's avatar
withal a validé
        return probability(self.p(True, event))
withal's avatar
withal a validé
    def __repr__(self):
        return repr((self.variable, ' '.join(self.parents)))
# Burglary example [Fig. 14.2]

T, F = True, False

burglary = BayesNet([
    ('Burglary', '', 0.001),
    ('Earthquake', '', 0.002),
    ('Alarm', 'Burglary Earthquake',
MircoT's avatar
MircoT a validé
     {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
    ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
    ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
MircoT's avatar
MircoT a validé
])
# ______________________________________________________________________________
def enumeration_ask(X, e, bn):
withal's avatar
withal a validé
    """Return the conditional probability distribution of variable X
    given evidence e, from BayesNet bn. [Fig. 14.9]
withal's avatar
withal a validé
    >>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
    ...  ).show_approx()
    'False: 0.716, True: 0.284'"""
    assert X not in e, "Query variable must be distinct from evidence"
withal's avatar
withal a validé
    Q = ProbDist(X)
    for xi in bn.variable_values(X):
        Q[xi] = enumerate_all(bn.variables, extend(e, X, xi), bn)
def enumerate_all(variables, e, bn):
    """Return the sum of those entries in P(variables | e{others})
    consistent with e, where P is the joint distribution represented
    by bn, and e{others} means e restricted to bn's other variables
    (the ones other than variables). Parents must precede children in variables."""
    if not variables:
withal's avatar
withal a validé
    Ynode = bn.variable_node(Y)
    if Y in e:
        return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn)
withal's avatar
withal a validé
    else:
        return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn)
withal's avatar
withal a validé
                   for y in bn.variable_values(Y))
# ______________________________________________________________________________
def elimination_ask(X, e, bn):
withal's avatar
withal a validé
    """Compute bn's P(X|e) by variable elimination. [Fig. 14.11]
    >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
    ...  ).show_approx()
    'False: 0.716, True: 0.284'"""
withal's avatar
withal a validé
    assert X not in e, "Query variable must be distinct from evidence"
        factors.append(make_factor(var, e, bn))
            factors = sum_out(var, factors, bn)
    return pointwise_product(factors, bn).normalize()
def is_hidden(var, X, e):
withal's avatar
withal a validé
    "Is var a hidden variable when querying P(X|e)?"
    return var != X and var not in e

def make_factor(var, e, bn):
withal's avatar
withal a validé
    """Return the factor for var in bn's joint distribution given e.
    That is, bn's full joint distribution, projected to accord with e,
    is the pointwise product of these factors for bn's variables."""
    node = bn.variable_node(var)
    variables = [X for X in [var] + node.parents if X not in e]
    cpt = dict((event_values(e1, variables), node.p(e1[var], e1))
               for e1 in all_events(variables, bn, e))
    return Factor(variables, cpt)
def pointwise_product(factors, bn):
    return reduce(lambda f, g: f.pointwise_product(g, bn), factors)

def sum_out(var, factors, bn):
withal's avatar
withal a validé
    "Eliminate var from all factors by summing over its values."
    result, var_factors = [], []
    for f in factors:
        (var_factors if var in f.variables else result).append(f)
    result.append(pointwise_product(var_factors, bn).sum_out(var, bn))
    return result

class Factor:
withal's avatar
withal a validé
    "A factor in a joint distribution."
    def __init__(self, variables, cpt):
        self.variables = variables

    def pointwise_product(self, other, bn):
withal's avatar
withal a validé
        "Multiply two factors, combining their variables."
        variables = list(set(self.variables) | set(other.variables))
        cpt = dict((event_values(e, variables), self.p(e) * other.p(e))
                   for e in all_events(variables, bn, {}))
        return Factor(variables, cpt)

    def sum_out(self, var, bn):
withal's avatar
withal a validé
        "Make a factor eliminating var by summing over its values."
        variables = [X for X in self.variables if X != var]
        cpt = dict((event_values(e, variables),
                    sum(self.p(extend(e, var, val))
                        for val in bn.variable_values(var)))
                   for e in all_events(variables, bn, {}))
        return Factor(variables, cpt)

    def normalize(self):
withal's avatar
withal a validé
        "Return my probabilities; must be down to one variable."
        assert len(self.variables) == 1
        return ProbDist(self.variables[0],
MircoT's avatar
MircoT a validé
                        dict((k, v) for ((k,), v) in list(self.cpt.items())))
    def p(self, e):
withal's avatar
withal a validé
        "Look up my value tabulated for e."
        return self.cpt[event_values(e, self.variables)]
def all_events(variables, bn, e):
    "Yield every way of extending e with values for all variables."
    if not variables:
withal's avatar
withal a validé
        yield e
withal's avatar
withal a validé
        for e1 in all_events(rest, bn, e):
            for x in bn.variable_values(X):
withal's avatar
withal a validé
                yield extend(e1, X, x)
# ______________________________________________________________________________
# Fig. 14.12a: sprinkler network

sprinkler = BayesNet([
    ('Cloudy', '', 0.5),
    ('Sprinkler', 'Cloudy', {T: 0.10, F: 0.50}),
    ('Rain', 'Cloudy', {T: 0.80, F: 0.20}),
    ('WetGrass', 'Sprinkler Rain',
MircoT's avatar
MircoT a validé
     {(T, T): 0.99, (T, F): 0.90, (F, T): 0.90, (F, F): 0.00})])
# ______________________________________________________________________________
withal's avatar
withal a validé
    """Randomly sample from bn's full joint distribution. The result
    is a {variable: value} dict. [Fig. 14.13]"""
    event = {}
    for node in bn.nodes:
        event[node.variable] = node.sample(event)
withal's avatar
withal a validé
    return event
# _________________________________________________________________________
def rejection_sampling(X, e, bn, N):
withal's avatar
withal a validé
    """Estimate the probability distribution of variable X given
    evidence e in BayesNet bn, using N samples.  [Fig. 14.14]
    Raises a ZeroDivisionError if all the N samples are rejected,
    i.e., inconsistent with e.
    >>> random.seed(47)
withal's avatar
withal a validé
    >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T),
    ...   burglary, 10000).show_approx()
    'False: 0.7, True: 0.3'
MircoT's avatar
MircoT a validé
    counts = dict((x, 0)
                  for x in bn.variable_values(X))  # bold N in Fig. 14.14
MircoT's avatar
MircoT a validé
    for j in range(N):
MircoT's avatar
MircoT a validé
        sample = prior_sample(bn)  # boldface x in Fig. 14.14
        if consistent_with(sample, e):
            counts[sample[X]] += 1
    return ProbDist(X, counts)
withal's avatar
withal a validé
def consistent_with(event, evidence):
    "Is event consistent with the given evidence?"
    return all(evidence.get(k, v) == v
MircoT's avatar
MircoT a validé
               for k, v in list(event.items()))
# _________________________________________________________________________
def likelihood_weighting(X, e, bn, N):
withal's avatar
withal a validé
    """Estimate the probability distribution of variable X given
    evidence e in BayesNet bn.  [Fig. 14.15]
    >>> random.seed(1017)
withal's avatar
withal a validé
    >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T),
    ...   burglary, 10000).show_approx()
    'False: 0.702, True: 0.298'
    W = dict((x, 0) for x in bn.variable_values(X))
MircoT's avatar
MircoT a validé
    for j in range(N):
MircoT's avatar
MircoT a validé
        sample, weight = weighted_sample(bn, e)  # boldface x, w in Fig. 14.15
withal's avatar
withal a validé
        W[sample[X]] += weight
    return ProbDist(X, W)
def weighted_sample(bn, e):
withal's avatar
withal a validé
    """Sample an event from bn that's consistent with the evidence e;
    return the event and its weight, the likelihood that the event
    accords to the evidence."""
    w = 1
MircoT's avatar
MircoT a validé
    event = dict(e)  # boldface x in Fig. 14.15
    for node in bn.nodes:
        Xi = node.variable
withal's avatar
withal a validé
        if Xi in e:
            w *= node.p(e[Xi], event)
            event[Xi] = node.sample(event)
withal's avatar
withal a validé
    return event, w
# _________________________________________________________________________
withal's avatar
withal a validé

def gibbs_ask(X, e, bn, N):
withal's avatar
withal a validé
    """[Fig. 14.16]
    >>> random.seed(1017)
withal's avatar
withal a validé
    >>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000
    ...  ).show_approx()
    'False: 0.738, True: 0.262'
    """
    assert X not in e, "Query variable must be distinct from evidence"
MircoT's avatar
MircoT a validé
    counts = dict((x, 0)
                  for x in bn.variable_values(X))  # bold N in Fig. 14.16
    Z = [var for var in bn.variables if var not in e]
MircoT's avatar
MircoT a validé
    state = dict(e)  # boldface x in Fig. 14.16
withal's avatar
withal a validé
    for Zi in Z:
        state[Zi] = random.choice(bn.variable_values(Zi))
MircoT's avatar
MircoT a validé
    for j in range(N):
withal's avatar
withal a validé
        for Zi in Z:
withal's avatar
withal a validé
            state[Zi] = markov_blanket_sample(Zi, state, bn)
withal's avatar
withal a validé
            counts[state[X]] += 1
    return ProbDist(X, counts)

withal's avatar
withal a validé
def markov_blanket_sample(X, e, bn):
    """Return a sample from P(X | mb) where mb denotes that the
    variables in the Markov blanket of X take their values from event
    e (which must assign a value to each). The Markov blanket of X is
    X's parents, children, and children's parents."""
    Xnode = bn.variable_node(X)
    Q = ProbDist(X)
    for xi in bn.variable_values(X):
        ei = extend(e, X, xi)
        # [Equation 14.12:]
        Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei)
                                         for Yj in Xnode.children)
MircoT's avatar
MircoT a validé
    # (assuming a Boolean variable here)
    return probability(Q.normalize()[True])

# _________________________________________________________________________
Sidharth Sindhra's avatar
Sidharth Sindhra a validé

class HiddenMarkovModel:

    """ A Hidden markov model which takes Transition model and Sensor model as inputs"""

    def __init__(self, transition_model, sensor_model, prior= [0.5, 0.5]):
        self.transition_model = transition_model
        self.sensor_model = sensor_model
        self.prior = prior

    def transition_model(self):
        return self.transition_model

    def sensor_dist(self, ev):
        if ev is True:
            return self.sensor_model[0]
        else:
            return self.sensor_model[1]


def forward(HMM, fv, ev):
    prediction = vector_add(scalar_vector_product(fv[0], HMM.transition_model[0]),
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
                            scalar_vector_product(fv[1], HMM.transition_model[1]))
    sensor_dist = HMM.sensor_dist(ev)
    return(normalize(element_wise_product(sensor_dist, prediction)))
Sidharth Sindhra's avatar
Sidharth Sindhra a validé

def backward(HMM, b, ev):
    sensor_dist = HMM.sensor_dist(ev)
    prediction = element_wise_product(sensor_dist, b)
    return(normalize(vector_add(scalar_vector_product(prediction[0], HMM.transition_model[0]),
                                scalar_vector_product(prediction[1], HMM.transition_model[1]))))


def forward_backward(HMM, ev, prior):
    """[Fig. 15.4]
    Forward-Backward algorithm for smoothing. Computes posterior probabilities
    of a sequence of states given a sequence of observations.

    umbrella_evidence = [T, T, F, T, T]
    umbrella_prior = [0.5, 0.5]
    umbrella_transition = [[0.7, 0.3], [0.3, 0.7]]
    umbrella_sensor = [[0.9, 0.2], [0.1, 0.8]]
    umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)

    >>> forward_backward(umbrellaHMM, umbrella_evidence, umbrella_prior)
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    [[0.6469, 0.3531], [0.8673, 0.1327], [0.8204, 0.1796],
     [0.3075, 0.6925], [0.8204, 0.1796], [0.8673, 0.1327]]
    t = len(ev)
    ev.insert(0, None)  # to make the code look similar to pseudo code

    fv = [[0.0, 0.0] for i in range(len(ev))]
    b = [1.0, 1.0]
    bv = [b]    # we don't need bv; but we will have a list of all backward messages here
    sv = [[0, 0] for i in range(len(ev))]
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    for i in range(1, t + 1):
        fv[i] = forward(HMM, fv[i - 1], ev[i])
    for i in range(t, -1, -1):
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
        sv[i - 1] = normalize(element_wise_product(fv[i], b))
        b = backward(HMM, b, ev[i])
        bv.append(b)

    sv = sv[::-1]

    return(sv)

# _________________________________________________________________________
Sidharth Sindhra's avatar
Sidharth Sindhra a validé

def fixed_lag_smoothing(e_t, HMM, d, ev, t):
    """[Fig. 15.6]
    Smoothing algorithm with a fixed time lag of 'd' steps.
    Online algorithm that outputs the new smoothed estimate if observation
    for new time step is given.

    umbrella_evidence = [T, T, F, T, T]
    e_t = T
    t = 4
    d = 3
    umbrella_transition = [[0.7, 0.3], [0.3, 0.7]]
    umbrella_sensor = [[0.9, 0.2], [0.1, 0.8]]
    umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)

    >>> fixed_lag_smoothing(T, umbrellaHMM, d)
    """
    ev.insert(0, None)

    T_model = HMM.transition_model
    f = HMM.prior
    B = [[1, 0], [0, 1]]
    evidence = []

    evidence.append(e_t)
    O_t = vector_to_diagonal(HMM.sensor_dist(e_t))
    if t > d:
        f = forward(HMM, f, e_t)
        O_tmd = vector_to_diagonal(HMM.sensor_dist(ev[t- d]))
        B = matrix_multiplication(inverse_matrix(O_tmd), inverse_matrix(T_model), B, T_model, O_t)
    else:
        B = matrix_multiplication(B, T_model, O_t)
    t = t + 1

    if t > d:
        # always returns a 1x2 matrix
        return([normalize(i) for i in matrix_multiplication([f], B)][0])
    else:
        return None

# _________________________________________________________________________
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
def particle_filtering(e, N, HMM):
    """
    Particle filtering considering two states variables
    N = 10
    umbrella_evidence = T
    umbrella_prior = [0.5, 0.5]
    umbrella_transition = [[0.7, 0.3], [0.3, 0.7]]
    umbrella_sensor = [[0.9, 0.2], [0.1, 0.8]]
    umbrellaHMM = HiddenMarkovModel(umbrella_transition, umbrella_sensor)

    >>> particle_filtering(umbrella_evidence, N, umbrellaHMM)
    ['A', 'A', 'A', 'B', 'A', 'A', 'B', 'A', 'A', 'A', 'B']
Sidharth Sindhra's avatar
Sidharth Sindhra a validé

Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    NOTE: Output is an probabilistic answer, therfore can vary
    """
    s = []
    dist = [0.5, 0.5]
    # State Initialization
    s = ['A' if probability(dist[0]) else 'B' for i in range(N)]
    # Weight Initialization
    w = [0 for i in range(N)]
    # STEP 1
    # Propagate one step using transition model given prior state
    dist = vector_add(scalar_vector_product(dist[0], HMM.transition_model[0]),
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
                      scalar_vector_product(dist[1], HMM.transition_model[1]))
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    # Assign state according to probability
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    s = ['A' if probability(dist[0]) else 'B' for i in range(N)]
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    w_tot = 0
    # Calculate importance weight given evidence e
    for i in range(N):
        if s[i] == 'A':
            # P(U|A)*P(A)
            w_i = HMM.sensor_dist(e)[0]*dist[0]
        if s[i] == 'B':
            # P(U|B)*P(B)
            w_i = HMM.sensor_dist(e)[1]*dist[1]
        w[i] = w_i
        w_tot += w_i
Sidharth Sindhra's avatar
Sidharth Sindhra a validé

Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    # Normalize all the weights
    for i in range(N):
        w[i] = w[i]/w_tot

    # Limit weights to 4 digits
    for i in range(N):
        w[i] = float("{0:.4f}".format(w[i]))

    # STEP 2
    s = weighted_sample_with_replacement(N, s, w)
    return s

Sidharth Sindhra's avatar
Sidharth Sindhra a validé

Sidharth Sindhra's avatar
Sidharth Sindhra a validé
def weighted_sample_with_replacement(N, s, w):
    """
    Performs Weighted sampling over the paricles given weights of each particle.
    We keep on picking random states unitll we fill N number states in new distribution
    """
    s_wtd = []
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    cnt = 0
Sidharth Sindhra's avatar
Sidharth Sindhra a validé
    while (cnt <= N):
        # Generate a random number from 0 to N-1
        i = random.randint(0, N-1)
        if (probability(w[i])):
            s_wtd.append(s[i])
            cnt += 1
    return s_wtd
# _________________________________________________________________________
peter.norvig's avatar
peter.norvig a validé
__doc__ += """
withal's avatar
withal a validé
# We can build up a probability distribution like this (p. 469):
peter.norvig's avatar
peter.norvig a validé
>>> P = ProbDist()
>>> P['sunny'] = 0.7
>>> P['rain'] = 0.2
>>> P['cloudy'] = 0.08
>>> P['snow'] = 0.02

withal's avatar
withal a validé
# and query it like this:  (Never mind this ELLIPSIS option
#                           added to make the doctest portable.)
>>> P['rain']               #doctest:+ELLIPSIS
0.2...
peter.norvig's avatar
peter.norvig a validé

# A Joint Probability Distribution is dealt with like this (Fig. 13.3):  # noqa
peter.norvig's avatar
peter.norvig a validé
>>> P = JointProbDist(['Toothache', 'Cavity', 'Catch'])
>>> T, F = True, False
>>> P[T, T, T] = 0.108; P[T, T, F] = 0.012; P[F, T, T] = 0.072; P[F, T, F] = 0.008
>>> P[T, F, T] = 0.016; P[T, F, F] = 0.064; P[F, F, T] = 0.144; P[F, F, F] = 0.576

withal's avatar
withal a validé
>>> P[T, T, T]
peter.norvig's avatar
peter.norvig a validé
0.108

withal's avatar
withal a validé
# Ask for P(Cavity|Toothache=T)
withal's avatar
withal a validé
>>> PC = enumerate_joint_ask('Cavity', {'Toothache': T}, P)
>>> PC.show_approx()
'False: 0.4, True: 0.6'
peter.norvig's avatar
peter.norvig a validé

withal's avatar
withal a validé
>>> 0.6-epsilon < PC[T] < 0.6+epsilon
peter.norvig's avatar
peter.norvig a validé
True

withal's avatar
withal a validé
>>> 0.4-epsilon < PC[F] < 0.4+epsilon
peter.norvig's avatar
peter.norvig a validé
True
"""