# Edps 590BAY ---------- post this one ----------- # Spring 2019 # C.J.Anderson # # Nels 23 schools and HLM --- Baysian and lmer equivalent # # Random intercept math~ 1 + homew + (1 |school.id) # Random intercept & slope (un-correlated): # math~ 1 + homew + (1 |school.id) + (0 + homew | school.id # Random intercept & slope (correlated): # math~ 1 + homew + (1 + homew |school.id) # Random intercept & slope (correlated) with more predictors: # math~ 1 + homew + ses + public + homew*public + (1 + homew|school.id) # # # What the variables are (data from Kreft & DeLeeuw's text): # SCHOOL = SCHOOL ID # STUDENT = STUDENT ID # SEX = STUDENT SEX # RACE = STUDENT RACE # HOMEW = TIME ON MATH HOMEWORK # SCHTYPE = SCHOOL TYPE # SES = SOCIOECONOMIC STATUS # PARED = PARENTAL EDUCATION # MATH = MATH SCORE # CLASSSTR = CLASS STRUCTURE # SCHSIZE = SCHOOL SIZE # URBAN = URBANICITY # GEO = GEOGRAPHIC REGION # MINORITY = PERCENT MINORITY # RATIO = STUDENT-TEACHER RATIO # PUBLIC = PUBLIC SCHOOL BINARY # WHITE = WHITE RACE BINARY # # library(lme4) library(lmerTest) library(coda) library(rjags) library(runjags) library(mvtnorm) library(lattice) #setwd("C:\\Users\\cja\\Dropbox\\edps 590BAY\\Lectures\\10 Multilevel models") setwd("C:\\Users\\cja\\Documents\\Dropbox\\edps 590BAY\\Lectures\\10 Multilevel models") # Read in data: nels <- read.table("school23_data.txt",header=TRUE) head(nels) tail(nels) # sort by schools: nels <- nels[order(nels$school,nels$student) , ] # Get information on number schools & create consecutive integer school.id school <- unique(nels$school) ## N <- length(school) ## need N and school.id school.int <- as.data.frame(cbind(school,seq(1:N))) ## for dataList & model names(school.int) <- c("school", "school.id") ## nels <- merge(nels,school.int,by="school") ## # total number of students n <- length(nels$math) ## need n for dataList & model # Double-check set-up head(nels,n=50) # You don't need this but might want it nj <- matrix(999,nrow=N,ncol=1) for (j in 1:N) { nj[j] <- nrow(subset(nels,nels$school.id==j)) } # Lattice plot with regressions xyplot(math ~ homew | school.id, data=nels, col.line='black', type=c('p','r'), main='Variability in math ~ homew| school', xlab="Time Spent Doing Homework", ylab="Math Scores") # Graph to illustrate need for random intercept and slope plot(nels$homew,nels$math, type="n", col="blue", lwd=2, main="NELS: Linear Regression by School \n(for where there is data)", xlab="Time Spent Doing Homework", ylab="Math Scores" ) for (j in 1:N) { sub <- subset(nels,nels$school.id==j) fitted <- fitted(lm(math~homew,sub)) lines(sub$homew,fitted,col=j) } ################################################################### # Random intercept math~ 1 + homew + (1 |school.id) # ################################################################### ri1.lmer <- lmer(math~ 1 + homew + (1 |school.id), data=nels, REML=TRUE) summary(ri1.lmer) N = 23 n = length(nels$math) dataList <- list( y = nels$math, hmwk = nels$homew, school.id = nels$school.id, N = N, n = n, sdY = sd(nels$math) ) ri.mod1 <- "model { for (i in 1:n) { y[i] ~ dnorm(mu[i],precision) mu[i] <- g0 + U0j[school.id[i]] + g1*hmwk[i] } for (j in 1:N) { U0j[j] ~ dnorm(0,ptau) } g0 ~ dnorm(0,1/(100*sdY^2)) g1 ~ dnorm(0,1/(100*sdY^2)) tau ~ dunif(0.0001,200) ptau <- 1/tau^2 sigma ~ dunif(0.0001,2000) precision <- 1/sigma^2 }" writeLines(ri.mod1, con="ri.mod1.txt") start1 = list("g0"=mean(nels$math), "g1"=rnorm(1,1,3), "sigma"=sd(nels$math), "tau"=.5, .RNG.name="base::Wichmann-Hill", .RNG.seed=523) start2 = list("g0"=rnorm(1,0,3), "g1"=rnorm(1,-2,3), "sigma"=runif(1,.001,10), "tau"=runif(1,0.0001,10), .RNG.name="base::Marsaglia-Multicarry", .RNG.seed=57) start3 = list("g0"=rnorm(1,3,4), "g1"=rnorm(1,0,3), "sigma"=runif(1,.001,10), "tau"=runif(1,0.0001,10), .RNG.name="base::Super-Duper", .RNG.seed=24) start4 = list("g0"=rnorm(1,-3,10), "g1"=rnorm(1,5,3), "sigma"=runif(1,.001,10), "tau"=runif(1,0.0001,10), .RNG.name="base::Mersenne-Twister", .RNG.seed=72100) start <- list(start1,start2,start3,start4) ################################################################################## # Fast way to check your model: rjags and then run for sampling runjags with parallel. ri.mod1x <- jags.model(file="ri.mod1.txt", # compiles and intializes model data=dataList, inits=start1, n.chains=4, n.adapt=500) ################################################################################### ri.mod1.runjags <- run.jags(model=ri.mod1, method="parallel", monitor=c("g0","g1","sigma", "tau"), data=dataList, sample=10000, n.chains=4, inits=start) add.summary(ri.mod1.runjags) summary(ri1.lmer) plot(ri.mod1.runjags) #------------- # Graph to illustrate need for random intercept and slope par(mfrow=c(2,2)) plot(nels$homew,nels$math, type="n", lwd=2, main="NELS: Linear Regression by School \n(for where there is data)", xlab="Time Spent Doing Homework", ylab="Math Scores" ) for (j in 1:N) { sub <- subset(nels,nels$school.id==j) fitted <- fitted(lm(math~homew,sub)) lines(sub$homew,fitted,col=j) } # Compute conditional means (regressions) S<-1 math.post <- matrix(999,nrow=S,ncol=n) homework <- matrix(nels$homew,nrow=n,ncol=1) for (s in 1:S) { pointer <- 1 for (j in 1:N) { U0 <-rnorm(1, 46.32, sd=4.9156) for (i in 1:nj[j]) { math.post[s,pointer] = U0 + 2.4006*homework[pointer] pointer <- pointer + 1 } } } nels$math.ri1 <- matrix(math.post,nrow=n,ncol=1) plot(nels$homew,nels$math, type="n", lwd=2, main="Fitted from Random Intercept \nmath~ 1 + homew + (1 |school.id)", xlab="Time Spent Doing Homework", ylab="Math Scores" ) for (j in 1:N) { sub <- subset(nels,nels$school.id==j) lines(sub$homew,sub$math.ri1,col=j) } ################################################################### # Random intercept & slope math~ 1 + homew + (1 |school.id) # # Un-correlated random effects ################################################################### ris1.lmer <- lmer(math~ 1 + homew + (1 |school.id) + (0 + homew | school.id), data=nels, REML=TRUE) summary(ris1.lmer) dataList <- list( math = nels$math, hmwk = nels$homew, school.id = nels$school.id, N = N, n = n, sdY = sd(nels$math) ) ris.mod1 <- "model { for (i in 1:n) { math[i] ~ dnorm(mu[i],precision) mu[i] <- g0 + g1*hmwk[i] + b0j[school.id[i]] + b1j[school.id[i]]*hmwk[i] } for (j in 1:N) { b0j[j] ~ dnorm(0,ptau0) b1j[j] ~ dnorm(0,ptau1) } g0 ~ dnorm(0,1/(100*sdY^2)) g1 ~ dnorm(0,1/(100*sdY^2)) tau0 ~ dunif(0.0001,200) ptau0 <- 1/tau0^2 tau1 ~ dunif(0.0001,200) ptau1 <- 1/tau1^2 sigma ~ dunif(0.0001,2000) precision <- 1/sigma^2 }" writeLines(ris.mod1, con="ris.mod1.txt") start1 = list("g0"=mean(nels$math), "g1"=rnorm(1,1,3), "sigma"=sd(nels$math), "tau0"=.5, "tau1"=.5, .RNG.name="base::Wichmann-Hill", .RNG.seed=523) start2 = list("g0"=rnorm(1,0,3), "g1"=rnorm(1,-2,3), "sigma"=runif(1,.001,10), "tau0"=runif(1,0.0001,10), "tau1"=runif(1,0.0001,10), .RNG.name="base::Marsaglia-Multicarry", .RNG.seed=57) start3 = list("g0"=rnorm(1,3,4), "g1"=rnorm(1,0,3), "sigma"=runif(1,.001,10), "tau0"=runif(1,0.0001,10), "tau1"=runif(1,0.0001,10), .RNG.name="base::Super-Duper", .RNG.seed=24) start4 = list("g0"=rnorm(1,-3,10), "g1"=rnorm(1,5,3), "sigma"=runif(1,.001,10), "tau0"=runif(1,0.0001,10), "tau1"=runif(1,0.0001,10), .RNG.name="base::Mersenne-Twister", .RNG.seed=72100) start <- list(start1,start2,start3,start4) ################################################################################## # Fast way to check your model: rjags and then run for sampling runjags with parallel. ris.mod1 <- jags.model(file="ris.mod1.txt", # compiles and initializes model data=dataList, inits=start1, n.chains=4, n.adapt=500) ################################################################################### ris.mod1.runjags <- run.jags(model=ris.mod1, method="parallel", monitor=c("g0","g1","sigma", "tau0","tau1"), data=dataList, sample=10000, n.chains=4, inits=start, thin=10) add.summary(ris.mod1.runjags) plot(ris.mod1.runjags) summary(ris1.lmer) ###### To get idea of what school conditional regressions would look like ######## S<-1 math.post <- matrix(999,nrow=S,ncol=n) homework <- matrix(nels$homew,nrow=n,ncol=1) for (s in 1:S) { pointer <- 1 for (j in 1:N) { U0 <-rnorm(1, 46.443, sd=7.5859) U1 <-rnorm(1, 1.9677, sd=3.9674) for (i in 1:nj[j]) { math.post[s,pointer] = U0 + U1*homework[pointer] # for estimates include + rnorm(1,g0,sigma) pointer <- pointer + 1 } } } nels$math.ris12 <- matrix(math.post,nrow=n,ncol=1) plot(nels$homew,nels$math, type="n", lwd=2, main="Fitted from Random Intercept & Slope \nmath~1+homew+(1|school.id)+(0+homew|school.id)", xlab="Time Spent Doing Homework", ylab="Math Scores", ylim=c(30,70) ) for (j in 1:N) { sub <- subset(nels,nels$school.id==j) lines(sub$homew,sub$math.ris1,col=j) } ################################################################### # Random intercept & slope Wishart # # math~ 1 + homew + (1 + homew|school.id) # # with correlated random effects: use wishart # ################################################################### ris2.lmer <- lmer(math~ 1 + homew + (1 + homew|school.id), data=nels, REML=TRUE) summary(ris2.lmer) dataList <- list( y = nels$math, hmwk = nels$homew, school.id=nels$school.id, n = n, N = N, sdY = sd(nels$math) ) re.mod2 <- "model { # Likelihood: the data model for (i in 1:n) { y[i] ~ dnorm(meanY[i],precision) meanY[i] <- betaj[school.id[i],1] + betaj[school.id[i],2]*hmwk[i] } # Random Effects -- note multivariate normal density is used for (j in 1:N) { betaj[j,1:2] ~ dmnorm(mu[1:2],Omega[1:2,1:2]) } # Priors precision ~ dgamma(0.01,0.01) sigma <- 1/sqrt(precision) mu[1] ~ dnorm(0,1/(100*sdY^2)) # or mu[i]<- 0 and add g0 & g1 to meanY mu[2] ~ dnorm(0,1/(100*sdY^2)) # and add g0<- dnorm(0,1,(100*sdY^2)) # & for g1. This will make it easier # to add models for intercept & slopes. # See next model. Omega[1:2,1:2] ~ dwish(R[,],2.1) R[1,1] <- 1/2.1 R[1,2] <- 0 # Un-correlated--there are alternative R[2,1] <- 0 # ways of setting this up by just adding R[2,2] <- 1/2.1 # uni-variate distribution for each tau Tau <- inverse(Omega) # This turn into our taus. }" writeLines(re.mod2, con="re.mod2.txt") start1 = list("precision"=1, .RNG.name="base::Wichmann-Hill", .RNG.seed=523) start2 = list("precision"=1, .RNG.name="base::Marsaglia-Multicarry", .RNG.seed=57) start3 = list("precision"=1, .RNG.name="base::Super-Duper", .RNG.seed=24) start4 = list("precision"=1, .RNG.name="base::Mersenne-Twister", .RNG.seed=72100) start <- list(start1,start2,start3,start4) ################################################################################## # Fast way to check your model: rjags and then run for sampling runjags with parallel. re.mod2x <- jags.model(file="re.mod2.txt", # compiles and initializes model data=dataList, inits=start1, n.chains=4, n.adapt=500) ################################################################################### re.mod2.runjags <- run.jags(model=re.mod2, method="parallel", monitor=c("mu","sigma","Tau"), data=dataList, sample=20000, n.chains=4, inits=start) add.summary(re.mod2.runjags) plot(re.mod2.runjags) summary(ris2.lmer) ########### What conditional regressions might look like ############ S<- 1 math.post <- matrix(999,nrow=S,ncol=n) homework <- matrix(nels$homew,nrow=n,ncol=1) Tau <- matrix(c(63.01,-27.717,-27.717,17.804),nrow=2,ncol=2) for (s in 1:S) { pointer <- 1 for (j in 1:N) { Uj <- rmvnorm(1,mean=c(46.296,2.0062),sigma=Tau) for (i in 1:nj[j]) { math.post[s,pointer] = Uj[1,1] + Uj[1,2]*homework[pointer] # for estimates include + rnorm(1,g0,sigma) pointer <- pointer + 1 } } } nels$math.ris2 <- matrix(math.post,nrow=n,ncol=1) plot(nels$homew,nels$math, type="n", col="blue", lwd=2, main="Fitted from Random Intercept \nmath~ 1 + homew + (1 + homew|school.id)", xlab="Time Spent Doing Homework", ylab="Math Scores", ylim=c(30,70) ) for (j in 1:N) { sub <- subset(nels,nels$school.id==j) lines(sub$homew,sub$math.ris2,col=j) } ############################################################################# # Random intercept & slope with more predictors including a cross-level # # interaction # # math~ 1 + homew + ses + public + homew*public +(1 + homew|school.id) # # # ############################################################################# # Turn into dummy codes nels$public <- ifelse(nels$schtype==1,1,0) ris3.lmer <- lmer(math~ 1 + homew + ses + public + homew*public + (1 + homew|school.id), data=nels, REML=TRUE) summary(ris3.lmer) dataList <- list( math = nels$math, hmwk = nels$homew, ses = nels$ses, public=nels$public, school.id=nels$school.id, n = n, N = N, sdY = sd(nels$math) ) re.mod3 <- "model { # Likelihood: the data model for (i in 1:n) { math[i] ~ dnorm(mmath[i],precision) mmath[i] <- g0 + Uj[school.id[i],1] + g3*public[i] + (g10 + Uj[school.id[i],2] + g11*public[i])*hmwk[i] + g2*ses[i] } # Random Effects -- note multivariate normal density is used for (j in 1:N) { Uj[j,1:2] ~ dmnorm(mu[1:2],Omega[1:2,1:2]) } # Priors precision ~ dgamma(0.01,0.01) sigma <- 1/sqrt(precision) g0 ~ dnorm(0,1/(100*sdY^2)) g2 ~ dnorm(0,1/(100*sdY^2)) g3 ~ dnorm(0,1/(100*sdY^2)) g10 ~ dnorm(0,1/(100*sdY^2)) g11 ~ dnorm(0,1/(100*sdY^2)) mu[1] <- 0 # This identifies the model mu[2] <- 0 # Omega[1:2,1:2] ~ dwish(R[,],2.1) R[1,1] <- 1/2.1 R[1,2] <- 0 R[2,1] <- 0 R[2,2] <- 1/2.1 Tau <- inverse(Omega) }" writeLines(re.mod3, con="re.mod3.txt") start1 = list("precision"=1, "g0"=rnorm(1,0,10),"g2"=rnorm(1,0,10), "g3"=rnorm(1,0,10),"g10"=rnorm(1,0,10), "g11"=rnorm(1,0,10), .RNG.name="base::Wichmann-Hill", .RNG.seed=523) start2 = list("precision"=0.5, "g0"=rnorm(1,0,10),"g2"=rnorm(1,0,10), "g3"=rnorm(1,0,10),"g10"=rnorm(1,0,10), "g11"=rnorm(1,0,10), .RNG.name="base::Marsaglia-Multicarry", .RNG.seed=57) start3 = list("precision"=0.05,"g0"=rnorm(1,0,10),"g2"=rnorm(1,0,10), "g3"=rnorm(1,0,10),"g10"=rnorm(1,0,10), "g11"=rnorm(1,0,10),.RNG.name="base::Super-Duper", .RNG.seed=24) start4 = list("precision"=.002,"g0"=rnorm(1,0,10),"g2"=rnorm(1,0,10), "g3"=rnorm(1,0,10),"g10"=rnorm(1,0,10), "g11"=rnorm(1,0,10),.RNG.name="base::Mersenne-Twister", .RNG.seed=72100) start <- list(start1,start2,start3,start4) ################################################################################## # Fast way to check your model: rjags and then run for sampling runjags with parallel. re.mod3 <- jags.model(file="re.mod3.txt", # compiles and initializes model data=dataList, inits=start1, n.chains=4, n.adapt=500) ################################################################################### re.mod3.runjags <- run.jags(model=re.mod3, method="parallel", monitor=c("g0", "g10", "g11","g2","g3","sigma","Tau"), data=dataList, sample=10000, n.chains=4, inits=start, thin=10) add.summary(re.mod3.runjags) plot(re.mod3.runjags) summary(ris3.lmer) ################################## ############################################### ################################################################################# # Model evaluation --- some ################################################################################# ################################################################################ dataList <- list( math = nels$math, hmwk = nels$homew, ses = nels$ses, public=nels$public, school.id=nels$school.id, n = n, N = N, sdY = sd(nels$math) ) re.mod4 <- "model { # Likelihood: the data model for (i in 1:n) { math[i] ~ dnorm(mmath[i],precision) mmath[i] <- g00 + Uj[school.id[i],1] + g3*public[i] + (g10 + Uj[school.id[i],2] + g11*public[i])*hmwk[i] + g2*ses[i] marg.post[i] ~ dnorm(mmath[i],precision) intercept.blup[i] <- Uj[school.id[i],1] # intercept rand effect(posterior) slope.blup[i] <- Uj[school.id[i],2] # slope random effect } # Random Effects -- note multivariate normal density is used for (j in 1:N) { Uj[j,1:2] ~ dmnorm(mu[1:2],Omega[1:2,1:2]) } # Priors precision ~ dgamma(0.01,0.01) sigma <- 1/sqrt(precision) g00 ~ dnorm(0,1/(100*sdY^2)) g2 ~ dnorm(0,1/(100*sdY^2)) g3 ~ dnorm(0,1/(100*sdY^2)) g10 ~ dnorm(0,1/(100*sdY^2)) g11 ~ dnorm(0,1/(100*sdY^2)) mu[1] <- 0 # This identifies the model mu[2] <- 0 # Omega[1:2,1:2] ~ dwish(R[,],2.1) R[1,1] <- 1/2.1 R[1,2] <- 0 R[2,1] <- 0 R[2,2] <- 1/2.1 Tau <- inverse(Omega) }" writeLines(re.mod4, con="re.mod4.txt") start1 = list("precision"=1, "g00"=rnorm(1,0,10),"g2"=rnorm(1,0,10), "g3"=rnorm(1,0,10),"g10"=rnorm(1,0,10), "g11"=rnorm(1,0,10), .RNG.name="base::Wichmann-Hill", .RNG.seed=523) start2 = list("precision"=0.5, "g00"=rnorm(1,0,10),"g2"=rnorm(1,0,10), "g3"=rnorm(1,0,10),"g10"=rnorm(1,0,10), "g11"=rnorm(1,0,10), .RNG.name="base::Marsaglia-Multicarry", .RNG.seed=57) start3 = list("precision"=0.05,"g00"=rnorm(1,0,10),"g2"=rnorm(1,0,10), "g3"=rnorm(1,0,10),"g10"=rnorm(1,0,10), "g11"=rnorm(1,0,10),.RNG.name="base::Super-Duper", .RNG.seed=24) start4 = list("precision"=.002,"g00"=rnorm(1,0,10),"g2"=rnorm(1,0,10), "g3"=rnorm(1,0,10),"g10"=rnorm(1,0,10), "g11"=rnorm(1,0,10),.RNG.name="base::Mersenne-Twister", .RNG.seed=72100) start <- list(start1,start2,start3,start4) re.mod4.runjags <- run.jags(model=re.mod4, method="parallel", monitor=c("g00", "g2","g3","g10","g11","sigma","Tau","marg.post","intercept.blup","slope.blup"), data=dataList, sample=10000, n.chains=4, inits=start, thin=10) add.summary(re.mod4.runjags) plot(re.mod4.runjags) summary(ris4.lmer) ########################################################################## ## Posterior predictive check stuff ---- not sure about this yet. ## Needs some work to put into format that can work with, which is ## shown below. ########################################################################## ffit <- mcmc(re.mod4.runjags) names(ffit) # lets you know what is available # posterior samples are in ffit$mcmc ---> there are separate columns for each chain, i.e., 10000 x 4 g00 <- as.array(ffit$mcmc[,1]) g10 <- as.array(ffit$mcmc[,2]) g11 <- as.array(ffit$mcmc[,3]) g2 <- as.array(ffit$mcmc[,4]) g3 <- as.array(ffit$mcmc[,5]) sigma <- as.array(ffit$mcmc[,6]) tau11 <- as.array(ffit$mcmc[,7]) tau12 <- as.array(ffit$mcmc[,8]) tau22 <- as.array(ffit$mcmc[,10]) margin.post <- as.array(ffit$mcmc[,11:529]) # separate matrices for each chain, i.e. 10000 x 519 x 4 intercept.blup <- as.array(ffit$mcmc[,530:1048]) slope.blup <- as.array(ffit$mcmc[,1049:1567]) # just want fit and fit.new # to get 40,000 x 519: marginal.means <- rbind(margin.post[,1:519,1],margin.post[,1:519,2],margin.post[,1:519,3],margin.post[,1:519,4]) int.blup <- rbind(intercept.blup[,1:519,1],intercept.blup[,1:519,2],intercept.blup[,1:519,3],intercept.blup[,1:519,4]) slp.blup <- rbind(slope.blup[,1:519,1],slope.blup[,1:519,2],slope.blup[,1:519,3],slope.blup[,1:519,4]) # Sum over rows to get fitted values (marginal) for each student nels$student.margin <- apply(marginal.means,2,"mean") # Data & fitted margin par(mfrow=c(2,2)) hist(nels$math,breaks=20,main="Data: Math margin") hist(nels$student.margin,breaks=20,main="Marginal Distribution") # to get the conditional margin: cond.mean <- marginal.means + int.blup + slp.blup nels$conditional.means <- apply(cond.mean,2,"mean") hist(nels$conditional.mean,breaks=20,main="Conditional Means") cor(nels$conditional.mean,nels$math) # Compute statistics over columns (students) ymeans <- apply(cond.mean,1,"mean") ymed <- apply(cond.mean,1,"median") ymax <- apply(cond.mean,1,"max") ysd <- apply(cond.mean,1,"sd") # Bayesian p-values pmean <- mean(mean(nels$math) < ymeans) pmed <- mean(median(nels$math) < ymed) pmax <- mean(max(nels$math) < ymax) psd <- mean(sd(nels$math) < ysd) # Graphs of descriptive statistics from posterior predictive distribution par(mfrow=c(2,2)) hist(ymed,breaks=10,main=paste("Conditional Medians", "\nBayesian P-value =", round(pmed, 2))) lines(c(median(nels$math),median(nels$math)),c(0,10000),col="blue",lwd=2) hist(ymax,breaks=10,main=paste("Conditional Maximums", "\nBayesian P-value =", round(pmax, 2))) lines(c(max(nels$math),max(nels$math)),c(0,10000),col="blue",lwd=2) hist(ymeans,breaks=10,main=paste("Conditional Means", "\nBayesian P-value =", round(pmean, 2))) lines(c(mean(nels$math),mean(nels$math)),c(0,10000),col="blue",lwd=2) hist(ysd,breaks=10,main=paste("Conditional SDs", "\nBayesian P-value =", round(psd, 2))) lines(c(sd(nels$math),sd(nels$math)),c(0,10000),col="blue",lwd=2) plot(nels$math,nels$conditional.means,type="p") plot(nels$math,nels$student.margin,type="p") hist(int.blup) hist(slp.blup)