## Theorem 6.3(for monotone missing data) ## (a) (delta is MSE) test.CI.Phi<-function(data, covariate, i, j, alpha){ data <- as.matrix(data) Z <- as.matrix(covariate) m <- ncol(Z) Z <- Z[!is.na(data[,i]),] data <- data[!is.na(data[,i]),] N <- nrow(data) #Ni A <- data[, i] B <- cbind(Z, as.matrix(data[, 1:(i-1)]) ) coef <- summary(lm(A~B))$coefficients phi <- coef[j+1] sd <- coef[i+j+1] stat <- coef[i*2+j+1] pvalue <- coef[i*3+j+1] df <- N-m-i+1 CI.phi <- c(phi+qt(alpha/2, df)*sd, phi-qt(alpha/2, df)*sd) list(stat=stat, pvalue=pvalue, CI.phi=CI.phi) } ## Conclusion flag.ar<-function(data, covariate, alpha){ n <- ncol(data) r.test <- matrix(rep(0, n*n), nrow = n) r.CI <- matrix(rep(0, n*n), nrow = n) for(i in 2:n){ for(j in 1:(i-1)){ pvalue <- test.CI.Phi(data, covariate, i, j, alpha)$pvalue if(pvalue < 0.05) r.test[i, j] <- "r" else r.test[i, j] <- "a" CI <- test.CI.Phi(data, covariate, i, j, alpha)$CI if(0 > CI[1] && 0 < CI[2]) r.CI[i, j] <- "a" else r.CI[i, j] <- "r" } } list(flag.ar.test = r.test, flag.ar.CI = r.CI) } ## (b) (delta is MSE): call the function of theorem 5.8(a) CI.Delta<-function(data, covariate, i, alpha){ data <- as.matrix(data) Z <- as.matrix(covariate) N <- nrow(data[!is.na(data[,i]),]) #Ni n <- ncol(data) m <- ncol(Z) ## theorem 5.8(a) delta.MSE <- REMLE.phi.delta(data, Z, n-1, REMLE=TRUE)[i,i]*(N-1)/(N-m-i+1) CI.delta <- c((N-m-i+1)*delta.MSE/ qchisq(1-alpha/2, N-m-i+1), (N-m-i+1)*delta.MSE/ qchisq(alpha/2, N-m-i+1)) list(CI.delta=CI.delta) } ## (c): call the functions of theorem 5.8 (a)(b) test.Phi_i<-function(data, covariate, i, alpha){ data <- as.matrix(data) Z <- as.matrix(covariate) N <- nrow(data[!is.na(data[,i]),]) #Ni n <- ncol(data) m <- ncol(Z) phi_i.MLE <- REMLE.phi.delta(data, Z, n-1, REMLE=FALSE)[i,1:(i-1)] ## theorem 5.8(a) cov.A <- REMLE.cov(data, Z, n-1, REMLE=FALSE) ## theorem 5.8(b) R.sq <- t(phi_i.MLE) %*% cov.A[1:(i-1), 1:(i-1)] %*% phi_i.MLE/cov.A[i,i] stat <- ((N-m-i+1)/(i-1))*R.sq/(1-R.sq) df1 <- i-1 df2 <- N-m-i+1 pvalue <- 1-pf(stat, df1, df2) list(stat=stat, pvalue=pvalue) } #################################################################################### ## the following functions are of theorem 5.8 (a)(b) ###############Theorem 5.8 (a)############### REMLE.phi.delta<-function(data, covariate, p, REMLE){ data <- as.matrix(data) Z <- as.matrix(covariate) n <- ncol(data) m <- ncol(Z) V <- matrix(rep(0,n*n), nrow = n) if(length(p)==1) p <- rep(0:p, c(rep(1,p),(n-p))) for(i in 1:n){ Z <- as.matrix(Z[!is.na(data[,i]),]) data <- data[!is.na(data[,i]),] N <- nrow(data) if(m==1) A <- cov(matrix(data[ ,(1:i)], ncol=i))*(N-m)/N else{ Y <- NULL for(j in 1:N) Y <- cbind(Y, t(data[j,1:i])) Y <- t(Y) ## p.147 beta.bar <- kronecker(solve(t(Z)%*%Z)%*%t(Z), diag(i))%*%Y A <- matrix(rep(0,i*i), nrow = i) for(s in 1:N){ M <- matrix(data[s,1:i], nr=i)-kronecker(matrix(Z[s,], nr=1), diag(i))%*%beta.bar A <- A + M %*% t(M) } A <- A/N } if(REMLE==TRUE) cov <- (N/(N-m))*A else cov <- A ## innovation variances(2.18) for i=1 if(i==1) V[i,i] <- cov else{ ## innovation variances(2.18) V[i,i] <- cov[i,i]-t(cov[(i-min(p[i],(i-1))):(i-1), i]) %*% solve(cov[(i-min(p[i],(i-1))):(i-1), (i-min(p[i],(i-1))):(i-1)]) %*% cov[(i-min(p[i],(i-1))):(i-1), i] ## autoregressive coefficients(2.19) phi <- as.vector(t(cov[(i-min(p[i],(i-1))):(i-1),i]) %*% solve(cov[(i-min(p[i],(i-1))):(i-1),(i-min(p[i],(i-1))):(i-1)])) for(j in (i-min(p[i],(i-1))):(i-1)) V[i,j] <- phi[j+1-(i-min(p[i],(i-1)))] } } V } ###############Theorem 5.8 (b)############### ## calculate the correlations below and above the main diagonals cov.cor<-function(V){ for(i in 2: nrow(V)){ for(j in 1:(i-1)) V[j,i] <- V[i,j] <- V[i,j]/sqrt(V[i,i]*V[j,j]) } V } REMLE.cov<-function(data, covariate, p, REMLE){ n <- ncol(data) T <- -REMLE.phi.delta(data, covariate, p, REMLE=REMLE) D <- matrix(rep(0,n*n), nrow = n) diag(D) <- -diag(T) diag(T) <- rep(1,n) solve(T) %*% D %*% solve(t(T)) }