## Note the start time
<-proc.time()[3]
start.time<- "figures/chap5figures/"
figurepath <- "txttables/" filepath
library(bmstdr)
## Warning in .recacheSubclasses(def@className, def, env): undefined subclass
## "numericVector" of class "Mnumeric"; definition not updated
library(ggplot2)
library(ggpubr)
1 Code to reproduce Figure 5.1
<- 2000
n <- seq(from=-6, to=7, length=n)
x <- dnorm(x, mean=-2)
y <- dnorm(x, mean=2.5, sd=1.5)
y1
<- dnorm(x, mean=-3)
y2 <- dnorm(x, mean=3)
y3
<- dnorm(x, sd=0.6)
y4 <- dt(x, df=4)
y5
<- dt(x, df=5)
y6 <- dt(x, df=1, ncp=1)
y7
par(mfrow=c(2,2))
plot(x, y, type="l")
lines(x, y1, lty=2)
plot(x, y2, type="l")
lines(x, y3, lty=2)
plot(x, y4, type="l")
lines(x, y5, lty=2)
plot(x, y6, type="l")
lines(x, y7, lty=2)
<- c( "Target", "Importance")
a <- rep(a, each=n)
b #u <- c(1, 2)
<- data.frame(Type=as.factor(b), x=rep(x, 2), Density=c(y, y1))
dp $Type <- factor(b, levels=rev(levels(dp$Type)))
dp
<- data.frame(Type=as.factor(b), x=rep(x, 2), Density=c(y2, y3))
dp1 $Type <- factor(b, levels=rev(levels(dp1$Type)))
dp1
<- data.frame(Type=as.factor(b), x=rep(x, 2), Density=c(y4, y5))
dp2 $Type <- factor(b, levels=rev(levels(dp2$Type)))
dp2
<- data.frame(Type=as.factor(b), x=rep(x, 2), Density=c(y6, y7))
dp3 $Type <- factor(b, levels=rev(levels(dp3$Type)))
dp3
# head(dp)
<- ggplot(data=dp, aes(x=x, y=Density, group=Type, col=Type)) +
p1 geom_line(aes(linetype=Type), size=0.8) +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position=c(0.85, 0.85),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
p1
<- ggplot(data=dp1, aes(x=x, y=Density, group=Type, col=Type)) +
p2 geom_line(aes(linetype=Type), size=0.8) +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
p2
<- ggplot(data=dp2, aes(x=x, y=Density, group=Type, col=Type)) +
p3 geom_line(aes(linetype=Type), size=0.8) +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
p3
<- ggplot(data=dp3, aes(x=x, y=Density, group=Type, col=Type)) +
p4 geom_line(aes(linetype=Type), size=0.8) +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
p4
ggarrange(p1, p2, p3, p4,
labels = c("Bad", "Very Bad", "Good", "May be"),
ncol = 2, nrow = 2)
ggsave(file=paste0(figurepath, "target_importance.png"))
## Saving 7 x 5 in image
2 Reproduce the results for the Cauchy prior example
BCauchy(method="importance", true.theta = 1, n=25, N=10000)
## Results from using importance sampling.
## $is_estimate
## [1] 0.8110299
##
## $exact_estimate
## [1] 0.8091696
##
## $ybar
## [1] 0.846953
BCauchy(method="rejection", true.theta = 1, n=25, N=10000)
## Results from using rejection sampling.
## $rejection_estimate
## [1] 0.816424
##
## $exact_estimate
## [1] 0.8091696
##
## $acceptance_rate
## [1] 9.37
##
## $interval
## 2.5% 97.5%
## 0.4335218 1.2221992
##
## $ybar
## [1] 0.846953
BCauchy(method="independence", true.theta = 1, n=25, N=10000)
## Results from independence sampler.
## $MH_ind_samp_estimate
## [1] 0.8064928
##
## $exact_estimate
## [1] 0.8091696
##
## $acceptance_rate
## [1] 12.03
##
## $interval
## 2.5% 97.5%
## 0.4187873 1.2261501
##
## $ybar
## [1] 0.846953
BCauchy(method="randomwalk", true.theta = 1, n=25, tuning.sd =1)
## Results from the random walk sampler.
## $MH_rand_walk_estimate
## [1] 0.8012946
##
## $exact_estimate
## [1] 0.8091696
##
## $acceptance_rate
## [1] 24.2
##
## $interval
## 2.5% 97.5%
## 0.4109837 1.1928867
##
## $ybar
## [1] 0.846953
##
## $tuning.sd
## [1] 1
3 Reproduce Table 5.1
<- ydata
y <- mean(y)
mu0 <- 1
kprior <- 1
prior.M <- c(2, 1)
prior.sigma2 <- 10000
N <- Bnormal(package="exact", y=y, mu0=mu0,
eresults kprior=kprior, prior.M=prior.M, prior.sigma2=prior.sigma2)
## Results from exact methods.
#Run Gibbs sampling
<- Bnormal(package="RGibbs", y=y, mu0=mu0, kprior=kprior,
samps prior.M=prior.M, prior.sigma2=prior.sigma2, N=N)
## $mean_theta
## [1] 47.90894
##
## $var_theta_estimate
## [1] 0.6875541
##
## $mean_sigma2
## [1] 19.93586
##
## $var_sigma2_estimate
## [1] 27.93716
## Results from Gibbs sampler coded in R.
<- list(mean_theta = mean(samps[,1]), var_theta = var(samps[,1]),
gres mean_sigma2=mean(samps[,2]), var_sigma2=var(samps[,2]))
<- list(theta_low=quantile(samps[,1], probs=0.025), var_theta=NA,
glow sigma2_low =quantile(samps[,2], probs=0.025), var_sigma2=NA)
<- list(theta_low=quantile(samps[,1], probs=0.975), var_theta=NA,
gupp sigma2_low =quantile(samps[,2], probs=0.975), var_sigma2=NA)
<- rbind(unlist(eresults), unlist(gres), unlist(glow), unlist(gupp))
a <- sqrt(samps[,2])/samps[,1]
cvsamp <- c(NA, mean(cvsamp), quantile(cvsamp, probs=c(0.025, 0.975)))
cv .1 <- data.frame(a, cv)
table5rownames(table5.1) <- c("Exact", "Estimate", "2.5%", "97.5%")
print(round(table5.1, 3))
## mean_theta var_theta mean_sigma2 var_sigma2 cv
## Exact 47.906 0.686 19.901 28.289 NA
## Estimate 47.909 0.688 19.936 27.937 0.092
## 2.5% 46.290 NA 12.099 NA 0.072
## 97.5% 49.551 NA 32.747 NA 0.120
dput(table5.1, file=paste0(filepath, "table5.1.txt"))
4 Reproduce Table 5.2
set.seed(44)
.2 <- Bnormal(package="stan", kprior=1, prior.M =1, prior.sigma=c(2, 1), N=10000)
table5##
## SAMPLING FOR MODEL 'normal' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 5e-06 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.05 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 10000 [ 0%] (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%] (Warmup)
## Chain 1: Iteration: 1001 / 10000 [ 10%] (Sampling)
## Chain 1: Iteration: 2000 / 10000 [ 20%] (Sampling)
## Chain 1: Iteration: 3000 / 10000 [ 30%] (Sampling)
## Chain 1: Iteration: 4000 / 10000 [ 40%] (Sampling)
## Chain 1: Iteration: 5000 / 10000 [ 50%] (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%] (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%] (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%] (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%] (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.008387 seconds (Warm-up)
## Chain 1: 0.076652 seconds (Sampling)
## Chain 1: 0.085039 seconds (Total)
## Chain 1:
## Results from Hamiltonian Monte Carlo in Stan.
print(round(table5.2, 3))
## mean_theta var_theta mean_sigma2 var_sigma2 cv
## Exact 47.906 0.686 19.901 28.289 NA
## Estimate 47.915 0.678 19.779 27.726 0.092
## 2.5% 46.297 NA 12.076 NA 0.072
## 97.5% 49.558 NA 32.447 NA 0.119
dput(table5.2, file=paste0(filepath, "table5.2.txt"))
5 Reproduce Table 5.3
.3 <- Bnormal(package="inla", kprior=1, prior.M =1, prior.sigma=c(2, 1))
table5## Results from INLA.
print(round(table5.3, 3))
## mean_theta var_theta mean_sigma2 var_sigma2 cv
## Exact 47.906 0.686 19.901 28.289 NA
## Estimate 48.700 0.043 20.665 29.313 0.093
## 2.5% 48.292 NA 12.574 NA 0.073
## 97.5% 49.114 NA 33.528 NA 0.119
dput(table5.3, file=paste0(filepath, "table5.3.txt"))
6 Reproduce Table 5.4
<- Bmchoice(case="Exact.sigma2.known")
a1 ## $posterior.mean
## [1] 47.90599
##
## $posterior.var
## [1] 0.7586207
##
## $data.mean
## [1] 47.87542
##
## $sigma2byn
## [1] 0.7857143
##
## $aic
## [1] 167.0221
##
## $bic
## [1] 168.3543
## Results by using exact theoretical calculations assuming known sigma2.
<- Bmchoice(case="MC.sigma2.known")
a2 ## $posterior.mean
## [1] 47.90599
##
## $posterior.var
## [1] 0.7586207
##
## $data.mean
## [1] 47.87542
##
## $sigma2byn
## [1] 0.7857143
## Results by using Monte Carlo Samples from the posterior distribution of theta for known sigma2.
<- Bmchoice(case="MC.sigma2.unknown")
a3 ## $mean_theta
## [1] 47.90894
##
## $var_theta_estimate
## [1] 0.6875541
##
## $mean_sigma2
## [1] 19.93586
##
## $var_sigma2_estimate
## [1] 27.93716
## Results by using Monte Carlo Samples from the joint posterior distribution of theta and sigma2.
.4 <- cbind(unlist(a1), unlist(a2), unlist(a3))
table5print(round(table5.4, 2))
## [,1] [,2] [,3]
## pdic 0.97 0.96 2.07
## pdicalt 0.93 0.95 13.58
## dic 166.95 166.94 169.20
## dicalt 166.89 166.93 192.22
## pwaic1 0.92 0.91 1.82
## pwaic2 0.96 0.94 2.52
## waic1 166.91 166.90 168.95
## waic2 167.00 166.96 170.35
## gof 594.30 595.44 591.82
## penalty 637.24 634.73 577.13
## pmcc 1231.54 1230.17 1168.95
# xtable(mc, digits=2)
dput(table5.4, file=paste0(filepath, "table5.4.txt"))
# All done
<- proc.time()[3]
end.time <- (end.time-start.time)/60
comp.time# comp.time<-fancy.time(comp.time)
print(comp.time)
## elapsed
## 0.1433333