Example

# Import numpy and hmmlearn

import numpy as np

from hmmlearn import hmm

import matplotlib.pyplot as plt

# create a discrete HMM for the unfair casino with 2 states (fair and unfair coin)

unfair_casino=hmm.MultinomialHMM(n_components=2)

# number of features per state (Head=0 and Tail=1)

unfair_casino.n_features_=2

#probability to start in a given dice

unfair_casino.startprob_ = np.array([0.5, 0.5])

# transition matrix (A)

unfair_casino.transmat_=np.array([[0.9, 0.1],[0.1,0.9]])

# emission probability

unfair_casino.emissionprob_=np.array([[0.5, 0.5],[0.9,0.1]])

#evaluate sequence with results (HTHHHHHHHHTTHTHT) 0

example=[0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,1]

# decode the most problable path (using viterbi)

res=unfair_casino.decode(np.transpose([example]))

# plot the posterior distribution

plt.plot(range(len(example)),res[1], “.-“, label=“observations”, ms=6,mfc=“orange”, alpha=0.7,lw=2)

plt.axis([0,len(example),-0.01,1.01])

plt.show()

# sample data from model

res=unfair_casino.sample(100)

example=res[1]

# do viterbi and plot

res=unfair_casino.decode(np.transpose([example]))

# plot the posterior distribution

plt.plot(range(len(example)),res[1], “.-“, label=“observations”, ms=6,mfc=“orange”, alpha=0.7,lw=2)

plt.axis([0,len(example),-0.01,1.01])

plt.show()

# Exercises:

# 1 – Train model using Baum-welch

# 2 – Use Posterior Decoding instead of viterbi

# 3 – Increase the number of coin changes and print the posterior.

# see here for documentation:

http://hmmlearn.readthedocs.io/en/stable/