# A simple implementation of the forward-backward algorithm (written for # illustration, not for computational optimality) # The starting probabilities for x_1: startprobs <- c(1, 0, 0, 0, 0, 0, 0, 0, 0, 0) M <- length(startprobs) # A hidden Markov model, with each x_i having possible values 1,...,M, # and with transition probabilities # 1/3 increase by 1 (if possible) # 1/3 decrease by 1 (if possible) # remaining probability: stay at same value. # Matrix of transition probailities: transprobs <- diag(M)/3 + rbind(cbind(0,diag(M-1)/3),0) + rbind(0,cbind(diag(M-1)/3,0)) transprobs[1,1] <- transprobs[M,M] <- 2/3 # For the data y_i we assume # y_i | x_i ~ Poisson(x_i) y <- c(3, 5, 8, 8, 8 ,8, 4, 3, 4, 2, 1, 1, 1, 0, 1, 3, 2, 1, 3, 8, 12, 14, 11, 10) N <- length(y) # Compute and store forward probabilities: start <- startprobs*dpois(y[1], 1:M) probsForward <- matrix(start/sum(start), N, M, byrow=T) for (i in 2:N) { res <- dpois(y[i], 1:M)*probsForward[i-1,]%*%transprobs probsForward[i,] <- res/sum(res) # only to improve numerics } # Compute and store backward probabilities: probsBackward <- matrix(1, N, M, byrow=T) for (i in (N-1):1) { res <- transprobs%*%probsBackward[i+1,]*dpois(y[i+1], 1:M) probsBackward[i,] <- res/sum(res) # only to improve numerics } # Marginal posterior probabilities result <- probsForward*probsBackward sumresult <- apply(result, 1, sum) result <- result/sumresult # Expected value for x_i: expected <- apply(t(result)*(1:M), 2, sum) plot(y) points(expected, col="red")