You are on page 1of 90

rethinking

ulam
map
quap

map2stan ulam ulam map2stan

map2stan ulam

extract.prior
brms rstanarm

y ~ (1|x) + z
y<-7
y

print( "All models are wrong, but some are useful." )

[1] "All models are wrong, but some are useful."


x <- 1:2
x <- x*10
x <- log(x)
x <- sum(x)
x <- exp(x)
x

200

= ( . )
= × ( . )

( log( 0.01^200 ) )
( 200 * log(0.01) )

[1] -Inf
[1] -921.034
#

# Load the data:


# car braking distances in feet paired with speeds in km/h
# see ?cars for details
data(cars)

# fit a linear regression of distance on speed


m <- lm( dist ~ speed , data=cars )

# estimated coefficients from the model


coef(m)

# plot residuals against speed


plot( resid(m) ~ speed , data=cars )

PROC GLM

rethinking
rethinking

rethinking rstan

rstan mc-stan.org

rstan
mc-stan.org rethinking

install.packages(c("coda","mvtnorm","devtools","dagitty"))
library(devtools)
devtools::install_github("rmcelreath/rethinking")

rethinking

rethinking
github.com/rmcelreath/rethinking
Hypotheses Process models Statistical models

P0A
Neutral,
H0 equilibrium
MI
“Evolution
is neutral” P0B
Neutral,
non-equilibrium
MII
P1A
H1 Constant
selection
“Selection
matters” MIII
P1B
Fluctuating
selection
=
× × =
× × =
× × =
× × =
× × =
× =
× =
× =
× =
× =

×
= = =
= × =
× =
× =
× =
× =
× =


= / = . =
∝ ×

×
=

.
.
.

ways <- c( 0 , 3 , 8 , 9 , 0 )
ways/sum(ways)

[1] 0.00 0.15 0.40 0.45 0.00


ways sum(ways)



> .
W L W W W L W L W

W Ln W
= 0W W L W L W W L WWW L W L W W L WWW L W L W
n=1 n=2 n=3
plausibility
confidence

W L W W W L W L W
0 0.5 1 0 0.5 1 0 0.5 1
0 0.5 1
nW= 0W W L W L W W L W W W L W L W
W Lprobability W Lprobability
W W W ofL water
W L W
of waterwater
proportion probability of water
n=4 n=5 n=6
plausibility
confidence

W L W W W L W WL LW W W W L W WL LW W W W L W L W
0W L W0.5W W L 1 W 0 L W 0.5 1 0 0.5 1
n =00 0.5 n=0 1 n=0
nW
= 0W W ofL water
W Lprobability W L W W Lprobability
W W W ofL water
W L W W Lprobability
W W W ofL water
W L W
proportion water
n=7 n=8 n=9
plausibility

plausibility

plausibility
plausibility
confidence

0 0.5 1 0 0.5 1 0 0.5 1


0 0.5 0 1 0.5 0 1 0.5 1
0 0.5
proportion water 1
proportion water proportion water
probability of waterwater
proportion probability of water probability of water
= +

( + )!
( , | )= ( − )
! !

dbinom( 6 , size=9 , prob=0.5 )

[1] 0.1640625
= +

0.5

d dbinom
r
p ?dbinom




=
( )= = .

/( − )

/( − )
∼ ( , )
= +

∼ ( , )

( | , )
( , , )= ( , | ) ( )
, ( , | )
( )

( , , )= ( | , ) ( , )

( , , )

( , | ) ( )= ( | , ) ( , )
( | , )
( , | ) ( )
( | , )=
( , )

( , )

×
=

( , )

( , )

#
! "
( , )= ( , | ) = ( , | ) ( )
prior likelihood posterior

⇥ /
prior likelihood posterior
0 0.5 1 0 0.5 1 0 0.5 1

⇥ /
prior likelihood posterior
0 0.5 1 0 0.5 1 0 0.5 1

⇥ /

0 0.5 1 0 0.5 1 0 0.5 1



′ ′

# define grid
p_grid <- seq( from=0 , to=1 , length.out=20 )

# define prior
prior <- rep( 1 , 20 )

# compute likelihood at each value in grid


likelihood <- dbinom( 6 , size=9 , prob=p_grid )

# compute product of likelihood and prior


unstd.posterior <- likelihood * prior

# standardize the posterior, so it sums to 1


posterior <- unstd.posterior / sum(unstd.posterior)

plot( p_grid , posterior , type="b" ,


xlab="probability of water" , ylab="posterior probability" )
mtext( "20 points" )
5 points 20 points
0.1 0.2 0.3 0.4 0.5

0.12
posterior probability

posterior probability
0.04 0.08
0.00
0.0

0.0 0.2 0.4 0.6 0.8 1.0 0.0 0.2 0.4 0.6 0.8 1.0
probability of water probability of water

prior <- ifelse( p_grid < 0.5 , 0 , 1 )


prior <- exp( -5*abs( p_grid - 0.5 ) )

p_grid
prior

p_grid

=
rethinking quap quap

library(rethinking)
globe.qa <- quap(
alist(
W ~ dbinom( W+L ,p) , # binomial likelihood
p ~ dunif(0,1) # uniform prior
) ,
data=list(W=6,L=3) )

# display summary of quadratic approximation


precis( globe.qa )

quap

Mean StdDev 5.5% 94.5%


n=9 n = 18 n = 36

5
2.5

4
1.0 1.5 2.0
Density

Density

Density
3
2

2
1
0.5

1
0.0

0
0.0 0.5 1.0 0.0 0.5 1.0 0.0 0.5 1.0
proportion water proportion water proportion water

=
=
= =
= =

p 0.67 0.16 0.42 0.92


precis
= .

precis

dbeta

# analytical calculation
W <- 6
L <- 3
curve( dbeta( x , W+1 , L+1 ) , from=0 , to=1 )
# quadratic approximation
curve( dnorm( x , 0.67 , 0.16 ) , lty=2 , add=TRUE )

=
n_samples <- 1000
p <- rep( NA , n_samples )
p[1] <- 0.5
W <- 6
L <- 3
for ( i in 2:n_samples ) {
p_new <- rnorm( 1 , p[i-1] , 0.1 )
if ( p_new < 0 ) p_new <- abs( p_new )
if ( p_new > 1 ) p_new <- 2 - p_new
q0 <- dbinom( W , W+L , p[i-1] )
q1 <- dbinom( W , W+L , p_new )
p[i] <- ifelse( runif(1) < q1/q0 , p_new , p[i-1] )
}

dens( p , xlim=c(0,1) )
curve( dbeta( x , W+1 , L+1 ) , lty=2 , add=TRUE )
( )
( | )
( | )
( , )/ ( )

( | )

( | )
( | )
( | ) ( )
( | ) ( )/ ( )
( | ) ( )/ ( )
< .
≥ .

( | )


L(θ| )
( |θ)

dbeta(p,w+1,n-w+1)
p w n



$


axis

complete.cases
data(Howell1)
data(milk)
GPL2
data(WaffleDivorce)
dbetabinom

extract.samples

Kline2
link

log_sum_exp

n_eff

mcreplicate


pairs rlkjcorr

rugged

sim
sim.train.test
sim_train_test

quap
ulam

You might also like