Tagged: longitudinal

Time series modeling with MLM

library(dplyr)
library(ggplot2)
library(tidyr)
library(nlme)
library(knitr)

# Make sure you set your working dir to the location of the csv
# setwd('C:\\Users\\flournoy\\Downloads') 
aDF<-read.csv('timeseries_DanceRandom_RAW_forRClub_28Apr2015.csv')

First, we’ll lengthen the data set so we get a row for every dwell time observations using tidyr::gather and some dplyr functions

We also want to know which slide number each dwell time observation is from as well as which within-element position it represents.

aDF_l<-aDF %>%
  dplyr::select(-TDT,-Order) %>%
  gather(slide,dt,-SubjID) %>%
  mutate(slide_num=as.numeric(sub('X([0-9]+)','\\1',slide))) %>%
  arrange(SubjID,slide_num) %>%
  mutate(
        element_position=rep(1:4,n()/4),
        slide_num=slide_num-min(slide_num))

kable(head(aDF_l))
SubjID slide dt slide_num element_position
10 X18 0.186 0 1
10 X19 0.796 1 2
10 X20 0.216 2 3
10 X21 0.211 3 4
10 X22 0.453 4 1
10 X23 0.352 5 2

We can plot each participant’s curve dwell time curve:

ggplot(aDF_l,aes(x=slide_num,y=dt))+
  geom_line(aes(group=SubjID),alpha=.05)+
  coord_trans(ytrans='log10')+
  geom_smooth(aes(group=NULL),method=loess,se=F)+
  theme(panel.background=element_rect(fill='white'))

spg_long_plot-1

To test autocorrelation at different lags we’re going to make an expected correlation matrix structure for the residuals. For example, for a lag-1 autocorrelation structure we’re imposing a structure such that there will be a correlation between the size of the residual for slide 0 and slide 1 that is the same as the correlation between the residual for slide 1 and slide 2.

We’re really interested in the lag 4 correlation – that is, the correlation between the residual for slide i and slide i+4. In our case, the residual will be whatever is left over after we account for the mean (i.e., intercept) and the linear trend across the entire experiment of 720 slides.

First, set our autoregressive moving average options:

lag_4_pos_by_subj<-corARMA(
    value=c(0,0,0,.2), # Initial values for lag 1-4
    p=4,q=0, # We want 4 lags, and 0 moving averages estimated
    form=~slide_num|SubjID) # slide_num is our time variable, grouped by SubjID

This uses our data to initialize a correlation matrix, which we only do for illustration purposes. nlme will do this automatically later on. The figure is what our expected residual correlation matrix looks like.

lag_4_pos_by_subj_initd<-Initialize(lag_4_pos_by_subj,data=aDF_l)
aMat<-corMatrix(lag_4_pos_by_subj_initd)
single_mat<-aMat$`10`
single_mat_l<-single_mat %>% as.data.frame %>%
    mutate(y=1:n()) %>%
    gather(x_col,value,-y,convert=T) %>% 
    mutate(
        x=as.numeric(sub('V([0-9]+)','\\1',x_col)),
        group=rep(1:10,each=51840))
single_mat_l %>% 
    filter(value > .01) %>%
    ggplot(aes(x=x,y=y))+
        geom_tile(aes(fill=(value)))+
        # facet_wrap(~group,scales='free',shrink=T) +
        theme(
            line=element_line(color='white'),
            panel.background=element_rect(fill='white'))

cor_plot-1

Now we can build and compare our models. We’ll test models with lag-1, -2, -3, and -4 AR structures.

Create our different residual correlation matrices:

lag_1_pos_by_subj<-corARMA(
    value=c(0), 
    p=1,q=0, 
    form=~slide_num|SubjID)
lag_2_pos_by_subj<-corARMA(
    value=c(0,0), 
    p=2,q=0, 
    form=~slide_num|SubjID)
lag_3_pos_by_subj<-corARMA(
    value=c(0,0,0), 
    p=3,q=0, 
    form=~slide_num|SubjID)
lag_4_pos_by_subj<-corARMA(
    value=c(0,0,0,0), 
    p=4,q=0, 
    form=~slide_num|SubjID)

Build our null model (which includes a linear effect of time to account for that long linear trend we saw in the above plot).

nullModel<-lme(
    dt~1+slide_num,
    aDF_l,
    random=~1|SubjID)
summary(nullModel)
## Linear mixed-effects model fit by REML
##  Data: aDF_l 
##        AIC      BIC    logLik
##   31533.89 31566.97 -15762.95
## 
## Random effects:
##  Formula: ~1 | SubjID
##         (Intercept)  Residual
## StdDev:   0.4327605 0.4161845
## 
## Fixed effects: dt ~ 1 + slide_num 
##                  Value  Std.Error    DF   t-value p-value
## (Intercept)  0.7002197 0.06860064 28759  10.20719       0
## slide_num   -0.0005525 0.00001180 28759 -46.82514       0
##  Correlation: 
##           (Intr)
## slide_num -0.062
## 
## Standardized Within-Group Residuals:
##         Min          Q1         Med          Q3         Max 
## -2.83833764 -0.30453025 -0.06975014  0.16771644 88.41714587 
## 
## Number of Observations: 28800
## Number of Groups: 40
lag1Model<-lme(
    dt~1+slide_num,
    aDF_l,
    random=~1|SubjID,
    correlation=lag_1_pos_by_subj)
summary(lag1Model)
## Linear mixed-effects model fit by REML
##  Data: aDF_l 
##        AIC      BIC    logLik
##   29359.82 29401.16 -14674.91
## 
## Random effects:
##  Formula: ~1 | SubjID
##         (Intercept) Residual
## StdDev:   0.4325128 0.416731
## 
## Correlation Structure: AR(1)
##  Formula: ~slide_num | SubjID 
##  Parameter estimate(s):
##       Phi 
## 0.2730642 
## Fixed effects: dt ~ 1 + slide_num 
##                  Value  Std.Error    DF   t-value p-value
## (Intercept)  0.7026000 0.06869298 28759  10.22812       0
## slide_num   -0.0005573 0.00001561 28759 -35.69883       0
##  Correlation: 
##           (Intr)
## slide_num -0.082
## 
## Standardized Within-Group Residuals:
##         Min          Q1         Med          Q3         Max 
## -2.83058935 -0.30722993 -0.07147524  0.16737678 88.29390180 
## 
## Number of Observations: 28800
## Number of Groups: 40
anova(nullModel,lag1Model)
##           Model df      AIC      BIC    logLik   Test  L.Ratio p-value
## nullModel     1  4 31533.90 31566.97 -15762.95                        
## lag1Model     2  5 29359.82 29401.16 -14674.91 1 vs 2 2176.075  <.0001

Computing the next three models takes a really long time for each (90 minutes for lag-4), so I’ll parallelize it.

ar_lags<-list(
    lag1=lag_1_pos_by_subj,
    lag2=lag_2_pos_by_subj,
    lag3=lag_3_pos_by_subj,
    lag4=lag_4_pos_by_subj)

library(doParallel)
registerDoParallel(cores=4)
lag_models<-foreach(
    corstruct=ar_lags,
    .packages=c('nlme','dplyr'),
    .export=c('nullModel'),
    .combine=bind_rows,
    .multicombine=T
    ) %dopar% {
        aModel<-update(
            nullModel,
            correlation=corstruct)
        aSummary<-summary(aModel)
        as_data_frame(list(
            model=list(aModel),
            summary=list(aSummary)))
}
save(lag_models,file='lag_models.RData',compress=T)
print(lag_models)
## Source: local data frame [4 x 2]
## 
##      model               summary
## 1 <S3:lme> <S3:summary.lme, lme>
## 2 <S3:lme> <S3:summary.lme, lme>
## 3 <S3:lme> <S3:summary.lme, lme>
## 4 <S3:lme> <S3:summary.lme, lme>

Now we can compare them…

lag_models %>%
    mutate(Total_Lags=1:4) %>%
    group_by(Total_Lags) %>%
    do({
        phis<-coef(.$model[[1]]$modelStruct$corStruct,unconstrained=F)
        nphis<-paste('lag',1:length(phis))
        data.frame(
            lag=nphis,
            phi=phis
            )
    }) %>%
    spread(lag,phi) %>% 
    kable(digits=2)
## Warning in rbind_all(out[[1]]): Unequal factor levels: coercing to
## character
Total_Lags lag 1 lag 2 lag 3 lag 4
1 0.27 NA NA NA
2 0.23 0.18 NA NA
3 0.20 0.14 0.15 NA
4 0.18 0.12 0.11 0.19
kable(
    cbind(
        list(Lags=c(1,2)),
        anova(lag_models$model[[1]],lag_models$model[[2]])[,c(-1,-2)]),
    row.names=F)
Lags df AIC BIC logLik Test L.Ratio p-value
1 5 29359.82 29401.16 -14674.91 NA NA
2 6 28486.57 28536.17 -14237.28 1 vs 2 875.2539 0
kable(
    cbind(
        list(Lags=c(2,3)),
        anova(lag_models$model[[2]],lag_models$model[[3]])[,c(-1,-2)]),
    row.names=F)
Lags df AIC BIC logLik Test L.Ratio p-value
2 6 28486.57 28536.17 -14237.28 NA NA
3 7 27892.35 27950.22 -13939.17 1 vs 2 596.2176 0
kable(
    cbind(
        list(Lags=c(3,4)),
        anova(lag_models$model[[3]],lag_models$model[[4]])[,c(-1,-2)]),
    row.names=F)
Lags df AIC BIC logLik Test L.Ratio p-value
3 7 27892.35 27950.22 -13939.17 NA NA
4 8 26898.26 26964.40 -13441.13 1 vs 2 996.0902 0

We can look at the predicted correlation matrix:

predicted_ARMA<-corARMA(value=c(.18,.12,.11,.19),p=4,form=~slide_num)

predicted_ARMA_init<-Initialize(predicted_ARMA,data=predict_df)
corMatARMA<-corMatrix(predicted_ARMA_init)

corMatARMA %>%
as.data.frame %>%
mutate(x=1:n()) %>%
gather(y,value,-x,convert=T) %>%
mutate(y=as.numeric(sub('V(.*)','\\1',y))) %>%
filter(value > .01) %>%
ggplot(aes(x=x,y=y))+
        geom_tile(aes(fill=(value)))+
        theme(
            line=element_line(color='white'),
            panel.background=element_rect(fill='white'))

unnamed-chunk-11-1

And we can produce simulated data with our fixed effects estimates for the linear trend as our mu and our correlation matrix as our Sigma for mvrnorm.

predict_df<-data.frame(slide_num=1:720)
predict_df$dt<-predict(
    lag_models$model[[4]],
    predict_df,
    level=0)

library(MASS)

predict_df$dt_err<-mvrnorm(1,mu=scale(predict_df$dt),Sigma=corMatARMA)

Our predictions look like this:

predict_df %>% 
    ggplot(aes(x=slide_num))+
    geom_line(aes(y=dt_err))+
    theme(panel.background=element_rect(fill='white'))

unnamed-chunk-13-1

Our data look like this:

aDF_l %>%
    ggplot(aes(x=slide_num,y=dt))+
    geom_line()+
    facet_wrap(~SubjID,scale='free_y')+
    theme(panel.background=element_rect(fill='white'))

unnamed-chunk-14-1

Not bad, visually speaking. I suspect we could do better if we modeled the the underlying rate parameter for a Poisson process, but that’s a whole other can of worms.

Here’s what happens if we just dummy-variable each element position:


summary(update(nullModel,.~.+as.factor(element_position)))

## Linear mixed-effects model fit by REML
##  Data: aDF_l 
##        AIC      BIC    logLik
##   31554.64 31612.51 -15770.32
## 
## Random effects:
##  Formula: ~1 | SubjID
##         (Intercept)  Residual
## StdDev:   0.4327605 0.4161319
## 
## Fixed effects: dt ~ slide_num + as.factor(element_position) 
##                                   Value  Std.Error    DF   t-value p-value
## (Intercept)                   0.7135225 0.06873087 28756  10.38140  0.0000
## slide_num                    -0.0005524 0.00001180 28756 -46.81881  0.0000
## as.factor(element_position)2 -0.0202103 0.00693554 28756  -2.91402  0.0036
## as.factor(element_position)3 -0.0161110 0.00693557 28756  -2.32295  0.0202
## as.factor(element_position)4 -0.0170860 0.00693562 28756  -2.46351  0.0138
##  Correlation: 
##                              (Intr) sld_nm a.(_)2 a.(_)3
## slide_num                    -0.061                     
## as.factor(element_position)2 -0.050 -0.002              
## as.factor(element_position)3 -0.050 -0.003  0.500       
## as.factor(element_position)4 -0.050 -0.005  0.500  0.500
## 
## Standardized Within-Group Residuals:
##         Min          Q1         Med          Q3         Max 
## -2.82219320 -0.30539798 -0.07029029  0.16628406 88.39624069 
## 
## Number of Observations: 28800
## Number of Groups: 40 

Latent Growth Curves in R

This is based in part on Nicole’s code found in this post.



sem_in_r.r




First, reading in the data:

library(foreign)
library(dplyr)
library(tidyr)
library(ggplot2)
library(lavaan)
library(semPlot)
library(knitr)

setwd('/home/jflournoy/code/sem_in_r/')

pdr2<-read.spss("PDR Wave 2.sav", to.data.frame=T)
# pdr4<-read.spss("PDR Wave 4.sav", to.data.frame=T)

Generate a time variable

This indexes each call for each family.

pdr2_time <- pdr2 %>%
group_by(FAMILY) %>% #do the count by family
  arrange(YEAR,MONTH,DAY) %>% #sort by date
  mutate(callindex=1:n()) #create call index that's 1:end for each family

head(pdr2_time[,c('FAMILY','callindex')])
## Source: local data frame [6 x 2]
## Groups: FAMILY
## 
##   FAMILY callindex
## 1  TP001         1
## 2  TP001         2
## 3  TP001         3
## 4  TP001         4
## 5  TP001         5
## 6  TP001         6

Create composite score of kid bex

pdr2_time_bxtrans<-pdr2_time %>% ungroup %>% 
  mutate_each(
    funs(as.numeric(!(.=='DID NOT OCCUR'))),
    P31201:P31240)

To break down the above statement:

pdr2_time gets sent to ungroup, which removes the grouping by FAMILY we did above, and that gets sent to mutate_each. This takes a range of columns that we define in the second argument as P31201:P31240 which reads ‘from P31201 to P31240’.

The meat of mutate_each is what goes in the first argument, within the function funs(). You can list a bunch of functions here if you wanted to mutate all the columns in a number of different ways. The period character, ., represents the column that will be passed to that function. In this case, we just check if each element of the column is ‘DID NOT OCCUR’, and if so, we negate it (giving us FALSE) and then as.numeric it giving us ‘0’. If the response is any other option, we get a ‘1’, which is what we want. Importantly, this will return NA if the data is NA.

If you want to learn more ?mutate_each. Moving on now…

head(pdr2_time_bxtrans)
## Source: local data frame [6 x 63]
## 
##   FAMILY RESP MONTH DAY YEAR INT   WEEKDAY P31201 P31202 P31203 P31204
## 1  TP001    3     7  22 2004  4H WEDNESDAY      0      1      1      0
## 2  TP001    3     7  23 2004  4H   THURSAY      0      1      1      0
## 3  TP001    3     7  27 2004  4H    MONDAY      0      1      1      0
## 4  TP001    3     7  28 2004  4H   TUESDAY      0      1      1      0
## 5  TP001    3     8   3 2004  4H    MONDAY      0      1      1      0
## 6  TP001    3     8   4 2004  4H   TUESDAY      0      1      1      0
## Variables not shown: P31205 (dbl), P31206 (dbl), P31207 (dbl), P31208
##   (dbl), P31209 (dbl), P31210 (dbl), P31211 (dbl), P31212 (dbl), P31213
##   (dbl), P31214 (dbl), P31215 (dbl), P31216 (dbl), P31217 (dbl), P31218
##   (dbl), P31219 (dbl), P31220 (dbl), P31221 (dbl), P31222 (dbl), P31223
##   (dbl), P31224 (dbl), P31225 (dbl), P31226 (dbl), P31227 (dbl), P31228
##   (dbl), P31229 (dbl), P31230 (dbl), P31231 (dbl), P31232 (dbl), P31233
##   (dbl), P31234 (dbl), P31235 (dbl), P31236 (dbl), P31237 (dbl), P31238
##   (dbl), P31239 (dbl), P31240 (dbl), P31241 (fctr), P31242 (fctr), P31242A
##   (fctr), P31242B (fctr), P31242C (fctr), P31242D (fctr), P31243 (fctr),
##   P31243A (fctr), P31243B (fctr), P31243C (fctr), P31243D (fctr), P31244
##   (dbl), P31245 (dbl), WAVE (dbl), PILOT1 (fctr), callindex (int)

Now we can create the composite variable using a sum. There is missing data, so that should be delt with, but we’ll ignore that for now.

pdr2_time_bxtrans$bextot<-pdr2_time_bxtrans %>%
  select(P31201:P31240) %>% rowSums(na.rm=T)

summary(pdr2_time_bxtrans$bextot)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##   0.000   1.000   4.000   5.307   8.000  26.000
hist(pdr2_time_bxtrans$bextot)

We have a call-index varibale, but if you want a time index that is really the number of days since they were first contacted, here’s how to do that.

pdr2_time_bxtrans_t2<-
  pdr2_time_bxtrans %>% 
  mutate(formd_date=as.Date(paste(MONTH,DAY,YEAR,sep='/'),"%m/%d/%Y")) %>% 
  group_by(FAMILY) %>%
  arrange(YEAR,MONTH,DAY) %>%
  mutate(days_since_1st=formd_date-min(formd_date))

Some descriptive plots

Let’s see what we’re working with.

First, the raw data – a line for every family:

pdr2_time_bxtrans_t2 %>% filter(callindex < 33) %>%
  ggplot(aes(x=callindex,y=bextot))+
    geom_line(aes(group=FAMILY),alpha=.1)+
    theme(panel.background=element_rect(fill='white'))

Next, a smoothed loess curve for every family:

pdr2_time_bxtrans_t2 %>% filter(callindex < 33) %>%
  ggplot(aes(x=callindex,y=bextot))+
    geom_smooth(aes(group=FAMILY),method=loess,se=F)+
    theme(panel.background=element_rect(fill='white'))

Finally, the first plot again, but with a group-level smoothed curve – a line, and a second and third degree polynomial just to see what that looks like:

pdr2_time_bxtrans_t2 %>% filter(callindex < 33) %>%
  ggplot(aes(x=callindex,y=bextot))+
    geom_line(aes(group=FAMILY),alpha=.05)+
    geom_smooth(method=lm,formula=y~poly(x,1),color='dark orange',se=F)+
    geom_smooth(method=lm,formula=y~poly(x,2),color='red',se=F)+
    geom_smooth(method=lm,formula=y~poly(x,3),color='black',se=F)+
    theme(panel.background=element_rect(fill='white'))

From the plot it looks like either a linear or quadratic would capture this pretty well.

Widen the data by call index

Now we can make a wide data file to examine the trend of bextot.

pdr2_bex_w_by_index<-
  pdr2_time_bxtrans_t2 %>% select(FAMILY,bextot,callindex) %>% 
  filter(callindex<33) %>% # just the first 32 to minimize missingness
  mutate(callindex=paste('call',formatC(callindex, width = 2, format = "d", flag = "0"),sep='_')) %>%
  spread(callindex,bextot)

Make R make the lavaan model code

let’s make the lavaan code. We don’t want to write out each ‘call_N’ var, so we can do it programatically

(allthecallnames<-grep('call',names(pdr2_bex_w_by_index),value=T))
##  [1] "call_01" "call_02" "call_03" "call_04" "call_05" "call_06" "call_07"
##  [8] "call_08" "call_09" "call_10" "call_11" "call_12" "call_13" "call_14"
## [15] "call_15" "call_16" "call_17" "call_18" "call_19" "call_20" "call_21"
## [22] "call_22" "call_23" "call_24" "call_25" "call_26" "call_27" "call_28"
## [29] "call_29" "call_30" "call_31" "call_32"
(callnameswith_i_weights<-paste('1*',allthecallnames,sep=''))
##  [1] "1*call_01" "1*call_02" "1*call_03" "1*call_04" "1*call_05"
##  [6] "1*call_06" "1*call_07" "1*call_08" "1*call_09" "1*call_10"
## [11] "1*call_11" "1*call_12" "1*call_13" "1*call_14" "1*call_15"
## [16] "1*call_16" "1*call_17" "1*call_18" "1*call_19" "1*call_20"
## [21] "1*call_21" "1*call_22" "1*call_23" "1*call_24" "1*call_25"
## [26] "1*call_26" "1*call_27" "1*call_28" "1*call_29" "1*call_30"
## [31] "1*call_31" "1*call_32"

Centering is really important for interpretting the linear effect in the presence of a quadratic. The intercept and linear slope term here are interpreted only at the first timepoint, though one can easily calculate the expected value of a point at any timepoint.

(callnameswith_s_weights<-paste(0:31,'*',allthecallnames,sep=''))
##  [1] "0*call_01"  "1*call_02"  "2*call_03"  "3*call_04"  "4*call_05" 
##  [6] "5*call_06"  "6*call_07"  "7*call_08"  "8*call_09"  "9*call_10" 
## [11] "10*call_11" "11*call_12" "12*call_13" "13*call_14" "14*call_15"
## [16] "15*call_16" "16*call_17" "17*call_18" "18*call_19" "19*call_20"
## [21] "20*call_21" "21*call_22" "22*call_23" "23*call_24" "24*call_25"
## [26] "25*call_26" "26*call_27" "27*call_28" "28*call_29" "29*call_30"
## [31] "30*call_31" "31*call_32"
(callnameswith_q_weights<-paste((0:31)^2,'*',allthecallnames,sep='')) 
##  [1] "0*call_01"   "1*call_02"   "4*call_03"   "9*call_04"   "16*call_05" 
##  [6] "25*call_06"  "36*call_07"  "49*call_08"  "64*call_09"  "81*call_10" 
## [11] "100*call_11" "121*call_12" "144*call_13" "169*call_14" "196*call_15"
## [16] "225*call_16" "256*call_17" "289*call_18" "324*call_19" "361*call_20"
## [21] "400*call_21" "441*call_22" "484*call_23" "529*call_24" "576*call_25"
## [26] "625*call_26" "676*call_27" "729*call_28" "784*call_29" "841*call_30"
## [31] "900*call_31" "961*call_32"
(i_weights_collapsed<-paste(callnameswith_i_weights,collapse=' + '))
## [1] "1*call_01 + 1*call_02 + 1*call_03 + 1*call_04 + 1*call_05 + 1*call_06 + 1*call_07 + 1*call_08 + 1*call_09 + 1*call_10 + 1*call_11 + 1*call_12 + 1*call_13 + 1*call_14 + 1*call_15 + 1*call_16 + 1*call_17 + 1*call_18 + 1*call_19 + 1*call_20 + 1*call_21 + 1*call_22 + 1*call_23 + 1*call_24 + 1*call_25 + 1*call_26 + 1*call_27 + 1*call_28 + 1*call_29 + 1*call_30 + 1*call_31 + 1*call_32"
(s_weights_collapsed<-paste(callnameswith_s_weights,collapse=' + '))
## [1] "0*call_01 + 1*call_02 + 2*call_03 + 3*call_04 + 4*call_05 + 5*call_06 + 6*call_07 + 7*call_08 + 8*call_09 + 9*call_10 + 10*call_11 + 11*call_12 + 12*call_13 + 13*call_14 + 14*call_15 + 15*call_16 + 16*call_17 + 17*call_18 + 18*call_19 + 19*call_20 + 20*call_21 + 21*call_22 + 22*call_23 + 23*call_24 + 24*call_25 + 25*call_26 + 26*call_27 + 27*call_28 + 28*call_29 + 29*call_30 + 30*call_31 + 31*call_32"
(q_weights_collapsed<-paste(callnameswith_q_weights,collapse=' + '))
## [1] "0*call_01 + 1*call_02 + 4*call_03 + 9*call_04 + 16*call_05 + 25*call_06 + 36*call_07 + 49*call_08 + 64*call_09 + 81*call_10 + 100*call_11 + 121*call_12 + 144*call_13 + 169*call_14 + 196*call_15 + 225*call_16 + 256*call_17 + 289*call_18 + 324*call_19 + 361*call_20 + 400*call_21 + 441*call_22 + 484*call_23 + 529*call_24 + 576*call_25 + 625*call_26 + 676*call_27 + 729*call_28 + 784*call_29 + 841*call_30 + 900*call_31 + 961*call_32"
model <- paste(
  ' i =~ ',i_weights_collapsed,'\n\n',
  ' s =~ ',s_weights_collapsed,'\n\n',
  ' q =~ ',q_weights_collapsed,sep='')
cat(model)
##  i =~ 1*call_01 + 1*call_02 + 1*call_03 + 1*call_04 + 1*call_05 + 1*call_06 + 1*call_07 + 1*call_08 + 1*call_09 + 1*call_10 + 1*call_11 + 1*call_12 + 1*call_13 + 1*call_14 + 1*call_15 + 1*call_16 + 1*call_17 + 1*call_18 + 1*call_19 + 1*call_20 + 1*call_21 + 1*call_22 + 1*call_23 + 1*call_24 + 1*call_25 + 1*call_26 + 1*call_27 + 1*call_28 + 1*call_29 + 1*call_30 + 1*call_31 + 1*call_32
## 
##  s =~ 0*call_01 + 1*call_02 + 2*call_03 + 3*call_04 + 4*call_05 + 5*call_06 + 6*call_07 + 7*call_08 + 8*call_09 + 9*call_10 + 10*call_11 + 11*call_12 + 12*call_13 + 13*call_14 + 14*call_15 + 15*call_16 + 16*call_17 + 17*call_18 + 18*call_19 + 19*call_20 + 20*call_21 + 21*call_22 + 22*call_23 + 23*call_24 + 24*call_25 + 25*call_26 + 26*call_27 + 27*call_28 + 28*call_29 + 29*call_30 + 30*call_31 + 31*call_32
## 
##  q =~ 0*call_01 + 1*call_02 + 4*call_03 + 9*call_04 + 16*call_05 + 25*call_06 + 36*call_07 + 49*call_08 + 64*call_09 + 81*call_10 + 100*call_11 + 121*call_12 + 144*call_13 + 169*call_14 + 196*call_15 + 225*call_16 + 256*call_17 + 289*call_18 + 324*call_19 + 361*call_20 + 400*call_21 + 441*call_22 + 484*call_23 + 529*call_24 + 576*call_25 + 625*call_26 + 676*call_27 + 729*call_28 + 784*call_29 + 841*call_30 + 900*call_31 + 961*call_32

Fit the model!

fit <- growth(model, data=pdr2_bex_w_by_index)

Here’s the model we fit:

semPaths(fit)

And here’s the summary:

summary(fit)
## lavaan (0.5-18) converged normally after 169 iterations
## 
##                                                   Used       Total
##   Number of observations                            72         100
## 
##   Estimator                                         ML
##   Minimum Function Test Statistic              838.225
##   Degrees of freedom                               519
##   P-value (Chi-square)                           0.000
## 
## Parameter estimates:
## 
##   Information                                 Expected
##   Standard Errors                             Standard
## 
##                    Estimate  Std.err  Z-value  P(>|z|)
## Latent variables:
##   i =~
##     call_01           1.000
##     call_02           1.000
##     call_03           1.000
##     call_04           1.000
##     call_05           1.000
##     call_06           1.000
##     call_07           1.000
##     call_08           1.000
##     call_09           1.000
##     call_10           1.000
##     call_11           1.000
##     call_12           1.000
##     call_13           1.000
##     call_14           1.000
##     call_15           1.000
##     call_16           1.000
##     call_17           1.000
##     call_18           1.000
##     call_19           1.000
##     call_20           1.000
##     call_21           1.000
##     call_22           1.000
##     call_23           1.000
##     call_24           1.000
##     call_25           1.000
##     call_26           1.000
##     call_27           1.000
##     call_28           1.000
##     call_29           1.000
##     call_30           1.000
##     call_31           1.000
##     call_32           1.000
##   s =~
##     call_01           0.000
##     call_02           1.000
##     call_03           2.000
##     call_04           3.000
##     call_05           4.000
##     call_06           5.000
##     call_07           6.000
##     call_08           7.000
##     call_09           8.000
##     call_10           9.000
##     call_11          10.000
##     call_12          11.000
##     call_13          12.000
##     call_14          13.000
##     call_15          14.000
##     call_16          15.000
##     call_17          16.000
##     call_18          17.000
##     call_19          18.000
##     call_20          19.000
##     call_21          20.000
##     call_22          21.000
##     call_23          22.000
##     call_24          23.000
##     call_25          24.000
##     call_26          25.000
##     call_27          26.000
##     call_28          27.000
##     call_29          28.000
##     call_30          29.000
##     call_31          30.000
##     call_32          31.000
##   q =~
##     call_01           0.000
##     call_02           1.000
##     call_03           4.000
##     call_04           9.000
##     call_05          16.000
##     call_06          25.000
##     call_07          36.000
##     call_08          49.000
##     call_09          64.000
##     call_10          81.000
##     call_11         100.000
##     call_12         121.000
##     call_13         144.000
##     call_14         169.000
##     call_15         196.000
##     call_16         225.000
##     call_17         256.000
##     call_18         289.000
##     call_19         324.000
##     call_20         361.000
##     call_21         400.000
##     call_22         441.000
##     call_23         484.000
##     call_24         529.000
##     call_25         576.000
##     call_26         625.000
##     call_27         676.000
##     call_28         729.000
##     call_29         784.000
##     call_30         841.000
##     call_31         900.000
##     call_32         961.000
## 
## Covariances:
##   i ~~
##     s                -0.266    0.154   -1.721    0.085
##     q                 0.004    0.004    1.008    0.313
##   s ~~
##     q                -0.001    0.000   -2.018    0.044
## 
## Intercepts:
##     call_01           0.000
##     call_02           0.000
##     call_03           0.000
##     call_04           0.000
##     call_05           0.000
##     call_06           0.000
##     call_07           0.000
##     call_08           0.000
##     call_09           0.000
##     call_10           0.000
##     call_11           0.000
##     call_12           0.000
##     call_13           0.000
##     call_14           0.000
##     call_15           0.000
##     call_16           0.000
##     call_17           0.000
##     call_18           0.000
##     call_19           0.000
##     call_20           0.000
##     call_21           0.000
##     call_22           0.000
##     call_23           0.000
##     call_24           0.000
##     call_25           0.000
##     call_26           0.000
##     call_27           0.000
##     call_28           0.000
##     call_29           0.000
##     call_30           0.000
##     call_31           0.000
##     call_32           0.000
##     i                 7.402    0.417   17.760    0.000
##     s                -0.261    0.038   -6.913    0.000
##     q                 0.005    0.001    5.015    0.000
## 
## Variances:
##     call_01          16.731    3.107
##     call_02           9.807    1.915
##     call_03          11.202    2.087
##     call_04           9.089    1.700
##     call_05          17.265    3.023
##     call_06          15.249    2.667
##     call_07          18.412    3.178
##     call_08          15.511    2.684
##     call_09          16.776    2.886
##     call_10          10.061    1.764
##     call_11          12.751    2.208
##     call_12          14.664    2.524
##     call_13          12.104    2.097
##     call_14          13.257    2.289
##     call_15          11.621    2.017
##     call_16           7.045    1.257
##     call_17          10.488    1.828
##     call_18          11.085    1.927
##     call_19           9.291    1.628
##     call_20           8.794    1.545
##     call_21          11.203    1.945
##     call_22          11.655    2.020
##     call_23           7.416    1.316
##     call_24           7.003    1.249
##     call_25          10.300    1.800
##     call_26          13.740    2.377
##     call_27          13.083    2.275
##     call_28           8.501    1.526
##     call_29           8.287    1.507
##     call_30           8.291    1.533
##     call_31           7.677    1.469
##     call_32          10.034    1.899
##     i                 9.498    2.095
##     s                 0.042    0.017
##     q                 0.000    0.000

Starting with this growth model, we could now add in predictors and outcomes for the latent i, s, and q variables.



SEM in R: Adding a time index and recoding the Bahevioral Scores

#' First, reading in the data:
#' 
library(foreign)
setwd('C:/Users/flournoy/Downloads')

pdr2<-read.spss("PDR Wave 2.sav", to.data.frame=T)
pdr4<-read.spss("PDR Wave 4.sav", to.data.frame=T)
head(pdr2)
summary(pdr2)

#' ## Generate a time variable
#' 
#' This indexes each call for each family. This time variable
#' will be important later for widening the data for SEM.
#' 
library(dplyr)

pdr2_time <- pdr2 %>%
group_by(FAMILY) %>% #do the count by family
 arrange(YEAR,MONTH,DAY) %>% #sort by date
 mutate(callindex=1:n()) #create call index that’s 1:end for each family

head(pdr2_time)

#' ## Create composite score of kid bex
#' 
#' Scores Found in (cols 8:47).

library(car)
#?recode
## recode all of the kid behavior items into numeric variables in one go
# ignore difference between “occurred, stressed” and “occurred, not stressed”
# (if it occurred at all, it gets a 1; if it didn’t occur, it’s a 0)
# the defaults for as.XX.result are both TRUE; if you leave it that way,
# it will return character variables.

pdr2_time[,8:47]<-sapply(pdr2_time[,8:47],
 function(x)
 {x<-recode(x,"'DID NOT OCCUR'='0'; else = '1'",
 as.factor.result=F, as.numeric.result=T)})

head(pdr2_time)
str(pdr2_time)

#' Now we can create the composite (sum) variable
#' 
pdr2_time$bextot<-rowSums(pdr2_time[,8:47],na.rm=F)

summary(pdr2_time$bextot)