### Clearing all of the data so that I am starting with a clean set of data. rm(list=ls()) ### Setting my working directory to where I want to save results setwd("~/Dropbox/vigh/d43-nigeria/workshops/workshop2023/analyses") ### Reading in the AKTH dataset d<-read.csv("Nigeria-R3-Data-Simple.csv") head(d) ######### Let's do some basic descriptives of the data with(d, summary(age)) with(d, hist(age)) mean(d$male) with(d, summary(bmi)) with(d, hist(bmi)) mean(d$smoke) with(d, summary(cd4)) with(d, hist(cd4)) mean(d$tdf) mean(d$dtg) table(d$risk.alleles) mean(d$htn) d$jnc_bp<-factor(d$jnc_bp) table(d$jnc_bp) summary(d$years.ART) summary(d$uACR) with(d, hist(uACR)) with(d, boxplot(uACR)) d$log10uACR<-log10(d$uACR) with(d, hist(log10uACR)) with(d, boxplot(log10uACR)) summary(d$eGFR) with(d, hist(eGFR)) summary(d$potassium) with(d, hist(potassium)) summary(d$sodium) with(d, hist(sodium)) summary(d$bicarbonate) with(d, hist(bicarbonate)) summary(d$chloride) with(d, hist(chloride)) summary(d$urea) with(d, hist(urea)) summary(d$SBP) with(d, hist(SBP)) summary(d$DBP) with(d, hist(DBP)) hist(d$bmi) summary(d$bmi) boxplot(d$bmi) head(d) summary(d$cd4) mean(d$tdf) table(d$male,d$smoke) plot(d$age,d$bmi) cor(d$age,d$bmi) boxplot(d$bmi ~ d$male) sum(is.na(d$cd4)) for (i in names(d)){ print(paste(i,sum(is.na(d[i])))) } mean(d$age) mean(d$cd4,na.rm=TRUE) summary(d$cd4) quantile(d$cd4, c(.10,.25,.99), na.rm=TRUE) quantile(d$age, c(.10,.25,.99)) hist(d$cd4) hist(log(d$uACR)) with(d, plot(log(uACR),eGFR)) with(d, cor(log(uACR),eGFR)) mean(d$htn) ##### Univariate Associations with htn (self-reported hypertension) #### Logistic regression model looking at association between age and hypertension ## We obtain the model coefficients and p-values, and then we compute odds ratios ## and 95% confidence intervals for the odds ratios. mod.age<-glm(htn ~ age, data=d, family=binomial) summary(mod.age) exp(mod.age$coefficients) exp(confint.default(mod.age)) #### Logistic regression model looking at the association between cd4 and hypertension mod.cd4<-glm(htn ~ cd4, data=d, family=binomial) summary(mod.cd4) exp(mod.cd4$coefficients*100) exp(confint.default(mod.cd4)*100) mod.dm<-glm(htn ~ DM, data=d, family=binomial) summary(mod.dm) mod.tdf<-glm(htn ~ tdf, data=d, family=binomial) summary(mod.tdf) mod.ra<-glm(htn ~ risk.alleles, data=d, family=binomial) summary(mod.ra) exp(mod.ra$coefficients) exp(confint.default(mod.ra)) mod.raf<-glm(htn ~ factor(risk.alleles), data=d, family=binomial) summary(mod.raf) exp(mod.raf$coefficients) exp(confint.default(mod.raf)) d$hr.risk.alleles<-ifelse(d$risk.alleles==2,1,0) table(d$hr.risk.alleles,d$risk.alleles) mod.hra<-glm(htn ~ hr.risk.alleles, data=d, family=binomial) summary(mod.hra) exp(mod.hra$coefficients) exp(confint.default(mod.hra)) #### Chisquare test looking at association between sex and hypertension ## The first uses a continuity correction, the second does not. ## We compare this with logistic regression, which results in a p-value which is ## the same as the one that does not use the continuity correction. with(d, chisq.test(htn,male)) with(d, chisq.test(htn,male, correct=FALSE)) mod.male<-glm(htn~male, data=d, family=binomial) summary(mod.male) #### I created this function that takes the fitted mmodel and extracts the odds ratio, ## 95% confidence interval for the odds ratio, and the p-value. It also allows me to ## quickly multiply the coefficient by a number, "multiplier", in case I want to ## interpret an association on a different scale (e.g., instead of a 1-unit increase ## in CD4, maybe a 100-unit increase in CD4.) or.function<-function(coeff.info, multiplier=1){ or<-exp(coeff.info[1]*multiplier) lower<-exp((coeff.info[1]-1.96*coeff.info[2])*multiplier) upper<-exp((coeff.info[1]+1.96*coeff.info[2])*multiplier) pval<-coeff.info[4] names(or)<-"Odds Ratio" names(lower)<-"95% Lower CI" names(upper)<-"95% Upper CI" names(pval)<-"P-Value" print(c(or, lower, upper, pval)) } or.function(summary(mod.male)$coefficients[2,]) ### Now I am fitting logistic regression using the lrm function of the rms package. ## Everything is the same, but the functions are slightly different. library(rms) dd<-datadist(d) ## This line and the next are used to help with summarizing lrm(.) results options(datadist=dd) fit.male<-lrm(htn ~ male, data=d) summary(fit.male) ## This gives estimates for contrasts anova(fit.male) ## This computes p-values for the model mod.age<-glm(htn~age, data=d, family=binomial) summary(mod.age) or.function(summary(mod.age)$coefficients[2,]) mod.bmi<-glm(htn~bmi, data=d, family=binomial) summary(mod.bmi) or.function(summary(mod.bmi)$coefficients[2,]) mod.smoke<-glm(htn~smoke, data=d, family=binomial) summary(mod.smoke) or.function(summary(mod.smoke)$coefficients[2,]) mod.cd4<-glm(htn~cd4, data=d, family=binomial) summary(mod.cd4) or.function(summary(mod.cd4)$coefficients[2,], multiplier=100) mod.tdf<-glm(htn~tdf, data=d, family=binomial) summary(mod.tdf) or.function(summary(mod.tdf)$coefficients[2,]) mod.dtg<-glm(htn~dtg, data=d, family=binomial) summary(mod.dtg) or.function(summary(mod.dtg)$coefficients[2,]) ### jnc_bp is a categorical variable, so there are multiple odds ratios comparing ## to the reference category mod.jnc<-glm(htn~jnc_bp, data=d, family=binomial) summary(mod.jnc) or.function(summary(mod.jnc)$coefficients[2,]) or.function(summary(mod.jnc)$coefficients[3,]) or.function(summary(mod.jnc)$coefficients[4,]) mod.art<-glm(htn~years.ART, data=d, family=binomial) summary(mod.art) or.function(summary(mod.art)$coefficients[2,]) mod.uacr<-glm(htn~uACR, data=d, family=binomial) summary(mod.uacr) or.function(summary(mod.uacr)$coefficients[2,]) ### Analyzing log-transformed uACR, because it is so skewed. mod.luacr<-glm(htn~log10uACR, data=d, family=binomial) summary(mod.luacr) or.function(summary(mod.luacr)$coefficients[2,]) ############################################################################# ##### Day 2 ############################################################################# ####### Now let's do multivariable logistic regression analysis length(d$htn) # number of records in the database sum(d$htn) # number of peopel with hypertension ## Clinically relevant variable from Muktar: ## age, male, bmi, smoke, eGFR, potassium, sodium, DM ## maybe an interaction between male and age ## Let's also see if CD4 is a good predictor ## Logistic regression model with all predictor variables suggested by Muktar mod1<-glm(htn ~ age + male + bmi + smoke + eGFR + potassium + sodium + DM, data=d, family=binomial) summary(mod1) ## DM was not a good predictor of hypertension, despite it being a good predictor ## in the univariate analyses. There were very few that had DM. sum(d$DM) exp(mod1$coefficients) mod.htn<-glm(htn ~ DM, data=d, family=binomial) summary(mod.htn) exp(mod.htn$coeff) #### Examining interaction between sex and age. Interaction was not statistically ## significant, and interpretation is more difficult with an interaction term, ## so from this point forward we will not include an interaction. mod.inter<-glm(htn ~ age * male + bmi + smoke + eGFR + potassium + sodium + DM, data=d, family=binomial) summary(mod.inter) exp(mod.inter$coeff) #### Checking to see if CD4 is a good predictor. It is. Notice that 162 records were ## not included in this analysis because they were missing CD4 count. mod.cd4<-glm(htn ~ age + male + bmi + smoke + eGFR + potassium + sodium + DM + cd4, data=d, family=binomial) summary(mod.cd4) exp(mod.cd4$coeff) #### I am looking at the distribution of continuous variables again. If they are highly ## skewed, I might want to transform them first. Only potassium seems highly skewed, ## so we log-transform it. hist(d$age) hist(d$bmi) hist(d$eGFR) hist(d$potassium) summary(d$potassium) hist(log(d$potassium)) hist(d$sodium) summary(d$sodium) hist(d$cd4) hist(sqrt(d$cd4)) #### Fitting mod.cd4 again except with potassium log transformed mod2<-glm(htn ~ age + male + bmi + smoke + eGFR + I(log(potassium)) + sodium + DM + cd4, data=d, family=binomial) summary(mod2) #### I am expanding all continuous variables using natural splines with 3 df each to avoid ## assuming linearity. mod.nonlin<-glm(htn ~ ns(age, df=3) + male + ns(bmi, df=3) + smoke + ns(eGFR, df=3) + ns(I(log(potassium)),df=3)+ ns(sodium, df=3) + DM + ns(cd4, df=3), data=d, family=binomial) mod.nonlin #### This does a test for non-linearity by comparing the model with all of the non-linear ## terms with the model with everything assuming linearity. I need to calculate the ## p-value myself by comparing it to a chisquare distribution with 12 degrees of freedom anova(mod2, mod.nonlin) 1-pchisq(anova(mod2, mod.nonlin)$Deviance, df=anova(mod2, mod.nonlin)$Df) ############################################################################# ##### Day 3 ############################################################################# ### Now repeating mod2 using lrm() to fit logistic regression instead of glm() dd<-datadist(d) options(datadist="dd") fit2<-lrm(htn ~ age + male + bmi + smoke + eGFR + I(log(potassium)) + sodium + DM + cd4, data=d, x=TRUE, y=TRUE) ### I get the same beta coefficient estimates with both approaches fit2$coefficients mod2$coefficients ### Now fitting a nonlinear model with 3 df (4 parms) allocated to each continuous variable fit.nonlin<-lrm(htn ~ rcs(age,parms=4) + male + rcs(bmi,parms=4) + smoke + rcs(eGFR,parms=4) + rcs(I(log(potassium)),parms=4) + rcs(sodium,parms=4) + DM + rcs(cd4,parms=4), data=d, x=TRUE, y=TRUE) fit.nonlin ### This allows us to see tests for non-linearity: anova(fit.nonlin) ### Because the non-linear terms for BMI, sodium, and CD4 were far from statistically significant, I remove them ### and refit the model: fit.nonlin1<-lrm(htn ~ rcs(age,parms=4) + male + bmi + smoke + rcs(eGFR,parms=4) + rcs(I(log(potassium)),parms=4) + sodium + DM + cd4, data=d, x=TRUE, y=TRUE) anova(fit.nonlin1) ### Let's try it again, but only using 2 df for potassium instead of 3: fit.nonlin2<-lrm(htn ~ rcs(age,parms=4) + male + bmi + smoke + rcs(eGFR,parms=4) + rcs(I(log(potassium)),parms=3) + sodium + DM + cd4, data=d, x=TRUE, y=TRUE) anova(fit.nonlin2) ### Let's try it again using 2 df for all remaining variables with non-linear terms instead of 3: fit.nonlin3<-lrm(htn ~ rcs(age,parms=3) + male + bmi + smoke + rcs(eGFR,parms=3) + rcs(I(log(potassium)),parms=3) + sodium + DM + cd4, data=d, x=TRUE, y=TRUE) anova(fit.nonlin3) #### Comparing the AIC between models. fit.nonlin2 appears to be the best (lowest AIC). AIC(fit.nonlin) AIC(fit.nonlin1) AIC(fit.nonlin2) AIC(fit.nonlin3) fit.nonlin2 ### The summary(.) function when applied to a model fit using lrm() shows contrasts -- i.e., odds ratios comparing ### specific values. Default is to compare the 75th and 25th percentiles of continuous variables. We can change ### which valuees of variables we want to compare by specifying the covariate and the values. summary(fit.nonlin2) summary(fit.nonlin2, age=c(40,40,20), cd4=c(200,100,300)) anova(fit.nonlin2) ###### This is the start of a table that one might want to create to present the results in an interpretable ###### manner. ######################################## #### OR 95% CI p-value #### Age <0.0001 #### 20 vs. 40 0.230880 0.056849 0.937630 #### 30 vs. 40 #### 40 vs. 40 #### 50 vs. 40 2.635300 2.000600 3.471300 #### 60 vs. 40 #### CD4 (per 100 cells) 1.065900 1.017200 1.116800