library(FactoMineR);library(reshape);library(psych);library(polycor);library(lmerTest);library(car); library(mice);library(forestplot);library(AICcmodavg);library(extrafont);library(lm.beta) vif.mer <- function (fit) { ## adapted from rms::vif v <- vcov(fit) nam <- names(fixef(fit)) ## exclude intercepts ns <- sum(1 * (nam == "Intercept" | nam == "(Intercept)")) if (ns > 0) { v <- v[-(1:ns), -(1:ns), drop = FALSE] nam <- nam[-(1:ns)] } d <- diag(v)^0.5 v <- diag(solve(v/(d %o% d))) names(v) <- nam v } #Variance inflation factors for mixed-effect models generated with nlme or lme4, courtesy of aufrank and denohora: https://github.com/aufrank/R-hacks/blob/master/mer-utils.R dat<-read.csv("[Insert path here]/Fessler et al. Study 1 Data.csv",header=TRUE) ##### Rename ##### ## Values scale dat<-rename(dat,c(q0158_0001="WP_SP",q0158_0002="WP_Pac",q0158_0003="WP_Soc",q0158_0004="WP_Por",q0158_0005="WP_II",q0158_0006="WP_WE",q0158_0007="WP_DP",q0158_0008="WP_PA",q0158_0009="WP_PS",q0158_0010="WP_GM",q0158_0011="WP_AR",q0158_0012="WP_Evo",q0158_0013="WP_Pat",q0158_0014="WP_BT",q0159_0001="WP_Ira",q0159_0002="WP_WS",q0159_0003="WP_Tax",q0159_0004="WP_GC",q0159_0005="WP_MS",q0159_0006="WP_War",q0159_0007="WP_Glo",q0159_0008="WP_PC",q0159_0009="WP_SG",q0159_0010="WP_CS",q0159_0011="WP_FA",q0159_0012="WP_FT",q0159_0013="WP_Obe",q0159_0014="WP_Com")) #See Appendix for items ## Society Works scale dat<-rename(dat,c(q0160="SW_Val",q0161="SW_Beh",q0162="SW_LB",q0163="SW_Hel",q0164="SW_Bre",q0165="SW_Con",q0166="SW_Rew",q0167="SW_Res",q0168="SW_Soc",q0169="SW_LQ",q0170="SW_LL",q0171="SW_Rec",q0172="SW_LC")) #See appendix for items ## Other naming dat<-rename(dat,c(q0178="Inc",q0179="Edu",q0184="Par",q0191="YAg")) #income, education, parent status. ## Recode categorical variable concerning whether participants had a half inch in their height (e.g. 69.5 inches tall) dat$HaInch<-rep(NA,nrow(dat));dat$HaInch[dat$Part_Half_Inch==1 | is.na(dat$Part_Half_Inch)]<-0;dat$HaInch[dat$Part_Half_Inch==2]<-0.5 ##### Exclusions ##### ### Identify those excluded by exclusion criteria. ### # Imputed individuals who were missing less than 10% of the questions per section. tmpc<-subset(dat,select=grep("_Belief",colnames(dat))) maxi_c<-0.1*length(tmpc) dat$Cred_Incomplete[apply(apply(tmpc,2,is.na),1,sum,na.rm=TRUE)>maxi_c]<-1 tmpv<-subset(dat,select=grep("WP_",colnames(dat))) maxi_v<-0.1*length(tmpv) dat$Values_Incomplete[apply(apply(tmpv,2,is.na),1,sum,na.rm=TRUE)>maxi_v]<-1 tmps<-subset(dat,select=grep("SW_",colnames(dat))) maxi_s<-0.1*length(tmps) dat$SW_Incomplete[apply(apply(tmps,2,is.na),1,sum,na.rm=TRUE)>maxi_s]<-1 ## Time spent on the survey as.POSIXlt(dat$Time_Spent,format="%T")->dat$Time_Spent dat$ThreeMins<-(as.POSIXlt(dat$Time_Spent,format="%T")-180)>as.POSIXlt("00:00:00",format="%T") strftime(as.POSIXlt(dat$StartDate,format="%T"),"%T",usetz=FALSE)->tmp excluded<-subset(dat,(Cred_Incomplete == 1 | Values_Incomplete == 1 | SW_Incomplete == 1 | !(Catch_1_Letters %in% seq(23,36,by=1)) | is.na(Catch_2_1_is_Correct) | !(Catch_3_Letters_in_Obligatory %in% c(9,10)) | is.na(Consider_Self_American) | Consider_Self_American == 2 | English_First_Language==2 | is.na(English_First_Language) | ThreeMins==FALSE)) #We provided participants with some leniency as to the number of letters in the alphabet. We excluded participants who spent less than 3 minutes on the survey as we had determined that one could not pay attention to the survey and complete it in less than 3 minutes. fun.exc <- function(x.1,x.2,...){ x.1p <- do.call("paste", x.1) x.2p <- do.call("paste", x.2) x.1p %in% x.2p} dat$Excluded<-fun.exc(dat,excluded) #Code that excludes. Run only to exclude participants who met exclusion criteria. dat1<-dat[dat$Excluded==FALSE,] #Code that includes. Run only to include participants who met exclusion criteria. #dat1<-dat dat1[dat1$PID!=132,]->dat1 #IP addresses where a second survey was begun within a minute of finishing the previous. dat1[dat1$PID!=95,]->dat1 dat1[dat1$PID!=87,]->dat1 ## Remove variables no longer in use dat1<-subset(dat1,select=c(-Cred_Complete,-Cred_Incomplete,-Values_Complete,-Values_Incomplete,-Society_Works_Complete,-SW_Incomplete, -Catch_1_Letters, -Catch_2_1_is_Correct, -Catch_3_Letters_in_Obligatory, -Consider_Self_American, -English_First_Language, -ThreeMins, -Part_Half_Inch, -Part_Political_Category_other,-Time_Spent,-Part_Defensive_Fighting_Ability,-StartDate,-EndDate)) ############################# Impute missing values using predictive mean matching ################################## len<-rep('',length(dat1)) nms<-which( colnames(dat1) %in% colnames(dat1)[colSums(is.na(dat1)) > 0]) #These are the columns that contain at least one NA. We want to impute those, but... nms[which(nms %in% as.character(which(colnames(dat1) %in% c("Part_Height","Part_Inches","HaInch","YAg","Excluded"))))]<-"" #We don't want to impute for height or age of youngest child (which are not used in these analyses). len[c(as.integer(nms))]<-'pmm' #PMM signals "predictive mean matching" to the mice function. ### Set seed for mice to stabilize results across re-runs of this code. ### #x1 <- runif(1, 0, 100000000) #45575935 on Oct 8 2016. Results are robust to the seed selected. x1<-45575935; x2<-4990306;x3<-96058240;x4<-54475644;x5<-49487042 set.seed(x3) mice(dat1,method=len)->imp complete(imp)->dat2 #Imputation happens five times by default; the complete function takes the average across these imputations and gives you back the data frame with average imputed values filled in. # To examine sample without imputation, as we did for Table S5, run code immediately below to override imputation. #dat1->dat2 ########################### Prepare variables #################################### ##### Values scale, created by Wilson & Patterson, modified by Dodd et al. ##### ### Code responses that are "liberal" -1, "conservative" 1, and unknown 0. ### ## First, take responses geared towards conservatives and make agreement 1. cons<-subset(dat2,select=c(WP_SP,WP_DP,WP_PA,WP_Pat,WP_BT,WP_Ira,WP_Tax,WP_MS,WP_SG,WP_Obe)) #Excludes free trade, charter schools, and warrantless search. See Appendix 2 for explanation. adjcons<-matrix(rep(NA,nrow(cons)*length(cons)),ncol=length(cons)) colnames(adjcons)<-colnames(cons) for (i in 1:length(cons)){ adjcons[,i][cons[,i]==1]<-1 -1->adjcons[,i][cons[,i]==2] adjcons[,i][cons[,i]==3]<-0 } as.data.frame(adjcons)->adjcons ## Second, responses geared towards liberals and make agreement -1. lib<-subset(dat2,select=c(WP_Pac,WP_Soc,WP_Por,WP_II,WP_WE,WP_PS,WP_GM,WP_AR,WP_Evo,WP_WS,WP_GC,WP_Glo,WP_PC,WP_FA,WP_Com)) adjlib<-matrix(rep(NA,nrow(lib)*length(lib)),ncol=length(lib)) colnames(adjlib)<-colnames(lib) for (i in 1:length(lib)){ -1->adjlib[,i][lib[,i]==1] adjlib[,i][lib[,i]==2]<-1 adjlib[,i][lib[,i]==3]<-0 } as.data.frame(adjlib)->adjlib cbind(adjcons,adjlib)->adj rowSums(adj)->dat2$WiPa #Sum of responses to the scale, per Dodd et al. Higher values on this mean more conservative. ### Break the items into those concerning social conservatism, economic conservatism, and military conservatism ### soccons<-subset(adj,select=c(WP_GC,WP_SP,WP_BT,WP_Por,WP_II,WP_WE,WP_PS,WP_GM,WP_AR,WP_Evo)) PCA(soccons,graph=FALSE)$ind$coord[,1]->dat2$SocCons #Extracting first principal component. ecocons<-subset(adj,select=c(WP_Tax,WP_SG,WP_Soc,WP_WS,WP_Glo,WP_PC,WP_FA)) PCA(ecocons,graph=FALSE)$ind$coord[,1]->dat2$EcoCons milcons<-subset(adj,select=c(WP_DP,WP_Obe,WP_PA,WP_Pat,WP_Ira,WP_MS,WP_Pac,WP_Com)) PCA(milcons,graph=FALSE)$ind$coord[,1]->dat2$MilCons ##### Dodd et al. Society Works scale ##### c(reverse.code(dat2$SW_LC,keys=c(-1)))->dat2$SW_LC #Reverse code one item accidentally reversed by Dodd et al. #Code responses that are "liberal" -1, "conservative" 1. dat2[,grep("SW_",colnames(dat2))]->dodd adjdodd<-matrix(rep(NA,nrow(dodd)*length(dodd)),ncol=length(dodd)) colnames(adjdodd)<-colnames(dodd) for (i in 1:length(dodd)){ adjdodd[,i][dodd[,i]==1]<-1 -1->adjdodd[,i][dodd[,i]==2] } as.data.frame(adjdodd)->adjdodd rowSums(adjdodd)->dat2$Dodd #Sum of responses to the scale, per Dodd et al. Higher values on this mean more conservative. ##### Political category ##### ## Break parties into conservative, liberal, and neither. # 1=Republican, 2=Democratic, 3=Tea Party, 4=Libertarian, 5=Green, 6=none. # As before, -1 is liberal affiliation, 1 is conservative affiliation, 0 is none or libertarian. rep(NA,nrow(dat2))->dat2$NumPolCat dat2$NumPolCat[as.numeric(dat2$Part_Political_Category)==1]<-1 -1->dat2$NumPolCat[as.numeric(dat2$Part_Political_Category)==2] dat2$NumPolCat[as.numeric(dat2$Part_Political_Category)==3]<-1 dat2$NumPolCat[as.numeric(dat2$Part_Political_Category)==4]<-0 -1->dat2$NumPolCat[as.numeric(dat2$Part_Political_Category)==5] dat2$NumPolCat[as.numeric(dat2$Part_Political_Category)==6]<-0 ##### Create summary political orientation measures ##### dat2$Part_Political_Orientation->dat2$PoliOr #Make the name more brief. subset(dat2,select=c(PoliOr,WiPa,Dodd,NumPolCat))->polit sapply(polit,scale,center=TRUE,scale=TRUE)->polit #Z-scores each of the four political orientation measures so that they have comparable scales. ## PCA 1st comp for four political orientation measures. PCA(polit,graph=FALSE)$ind$coord[,1]->dat2$PolPCA ## Dodd et al. summed the four political orientation measures. We use this summed summary measure as a robusticity check (Table S7). rowSums(polit,na.rm=FALSE)->dat2$PolSum ################ Adjust other varis of interest ################ as.factor(dat2$Part_sex)->dat2$Part_sex # 1 female 2 male ## Bin ethnicity because sample is majority white. dat2$EthBin<-rep(NA,nrow(dat2)) dat2$EthBin[dat2$Part_Ethnicity==7]<-"w" #white dat2$EthBin[dat2$Part_Ethnicity!=7]<-"o" #other as.factor(dat2$EthBin)->dat2$EthBin ## Round extreme values of education to avoid undue influence of outliers. All rounded variables listed in Appendix 2. quantile(dat2$Edu,0.975,na.rm=TRUE)->hi quantile(dat2$Edu,0.025,na.rm=TRUE)->lo dat2$Edu[dat2$Edu>=hi]<-hi dat2$Edu[dat2$Edu<=lo]<-lo ## Convert education numeric values to what they represent. dat2$EduCat<-rep(NA,nrow(dat2)) dat2$EduCat[dat2$Edu==1]<-"Middle" dat2$EduCat[dat2$Edu==2]<-"Some HS" dat2$EduCat[dat2$Edu==3]<-"HS Grad" dat2$EduCat[dat2$Edu==4]<-"Some AA" dat2$EduCat[dat2$Edu==5]<-"AA Grad" dat2$EduCat[dat2$Edu==6]<-"BA Grad" dat2$EduCat[dat2$Edu==7]<-"Some Adv" dat2$EduCat[dat2$Edu==8]<-"Adv Grad" as.factor(dat2$EduCat)->dat2$EduCat dat2$EduCat[dat2$EduCat=="Adv Grad"]<-"Some Adv" #Binned because so few values for in some advanced grad category (14). as.factor(dat2$Par)->dat2$Par levels(dat2$Par)<-c("Parous","Nullip") #Parous and nulliparious, i.e., have had kids and haven't had kids. ## Calculate participant height - orthogonal to present hypotheses; included here because included in the survey and exploratory analyses. dat2$Part_Height*12->dat2$FeetInInch c(which(colnames(dat2)=="FeetInInch"),which(colnames(dat2)=="HaInch"),which(colnames(dat2)=="Part_Inches"))->z dat2$Height<-ifelse(is.na(dat2$Part_Height),NA,rowSums(dat2[,z],na.rm=TRUE)) ## Age of participant's youngest child; not used in the present analyses. as.character(dat2$YAg)->dat2$YAg dat2$YAg[dat2$YAg=="18 months"]<-as.character(18/12);dat2$YAg[dat2$YAg=="2 WEEKS"]<-as.character(2/52);dat2$YAg[dat2$YAg=="6 months"]<-as.character(6/12);dat2$YAg[dat2$YAg=="10 months"]<-as.character(10/12) as.numeric(dat2$YAg)->dat2$YAg ## Center participant's age at youngest age (19) minz<-min(dat2$Part_age,na.rm=TRUE) dat2$Part_age<-dat2$Part_age-minz ## Center income such that under 20k (represented by #1) is the baseline. There are so many bins for income, we treat this as continuous. dat2$Inc<-dat2$Inc-1 ### Center Likert responses so that intercepts are interpretable ### for (i in 1:length(dat2[,grep("_Belief",colnames(dat2))])) {dat2[,grep("_Belief",colnames(dat2))[i]]<-dat2[,grep("_Belief",colnames(dat2))[i]]-1} ### Round down (and, for height, round up) additional outliers. ### dat2$Inc[dat2$Inc>=quantile(dat2$Inc,0.975,na.rm=TRUE)]<-quantile(dat2$Inc,0.975,na.rm=TRUE) #Extreme values rounded. See Appendix 2. dat2$Part_age[dat2$Part_age>=quantile(dat2$Part_age,0.975,na.rm=TRUE)]<-quantile(dat2$Part_age,0.975,na.rm=TRUE) dat2$SocCons[dat2$SocCons>quantile(dat2$SocCons,0.975,na.rm=TRUE)]<-quantile(dat2$SocCons,0.975,na.rm=TRUE) dat2$WiPa[dat2$WiPa>quantile(dat2$WiPa,0.975,na.rm=TRUE)]<-quantile(dat2$WiPa,0.975,na.rm=TRUE) #Height and child age included only in exploratory analyses. hi<-quantile(dat2$Height,0.975,na.rm=TRUE) lo<-quantile(dat2$Height,0.025,na.rm=TRUE) dat2$Height[dat2$Height>=hi]<-hi dat2$Height[dat2$Height<=lo]<-lo dat2$YAg[dat2$YAg>quantile(dat2$YAg,0.975,na.rm=TRUE)]<-quantile(dat2$YAg,0.975,na.rm=TRUE) ### Relevel two factors such that the values held at zero offer a more interpretable contrast ### relevel(dat2$EduCat,"HS Grad")->dat2$EduCat relevel(dat2$Par,"Nullip")->dat2$Par ################################################## #################### ANALYSES #################### # Separate out benefit credulity, benefit gravity, hazard credulity, hazard gravity. c("Battery_Belief","Carrots_Belief","Stomach_Belief","CreditCards_Belief","Cats_Belief","Stockwood_Belief","Airline_Belief","Car_Belief")->bennm grep("_Benefit",colnames(dat2))->gravnm c("CellPhone_Belief","Kale_Belief","Knees_Belief","Keycards_Belief","Sharks_Belief","Terrorism_Belief","Masks_Belief","Lightning_Belief")->haznm grep("_Risk",colnames(dat2))->rsknm #### Calculate and analyze bias in hazard relative to benefit credulity, weighted by individual perceptions of question gravity #### toMatch<-c("_Belief","_Risk","_Benefit") dat3<-dat2[rowSums(is.na(dat2[,grep(paste(toMatch,collapse="|"),colnames(dat2))])) == 0,] #NAs exist for credulity and gravity measures when imputation is not performed. Individuals with NAs need to be excluded to avoid biasing their difference scores. This function will not eliminate anyone when imputation has been performed (as there will be no NAs). crhaz<-dat3[,haznm] crben<-dat3[,bennm] perben<-subset(dat3,select=c(grep("_Benefit",colnames(dat2)))) perrsk<-subset(dat3,select=c(grep("_Risk",colnames(dat2)))) # Here, we weight a participant's response for each credulity item by his or her own perceived gravity (either in terms of risks or benefits) for that item. for (i in 1:16) {dat3[,paste(grep("_Belief",colnames(dat3),value=TRUE)[i],"_adj",sep="")]<-rep(NA,nrow(dat3))} for (i in 1:nrow(dat3)){ for (j in 1:length(crben)){ stm<-sub("_Belief.*", "", colnames(crben))[j] k<-which(sub("_Benefit.*", "", colnames(perben)) %in% stm) perben[i,k]*crben[i,j]->dat3[i,paste0(stm,"_Belief_adj")] rm(stm,k) } } for (i in 1:nrow(dat3)){ for (j in 1:length(crhaz)){ stm<-sub("_Belief.*", "", colnames(crhaz))[j] k<-which(sub("_Risk.*", "", colnames(perrsk)) %in% stm) perrsk[i,k]*crhaz[i,j]->dat3[i,paste0(stm,"_Belief_adj")] rm(stm,k) } } ################### Main analysis #################### hadj<-paste0(haznm,"_adj");badj<-paste0(bennm,"_adj") dat3$Difference<-rowMeans(dat3[,paste0(haznm,"_adj")])-rowMeans(dat3[,paste0(bennm,"_adj")]) #The difference between the average of hazard credulity, each item weighted by perceived gravity, and the average of benefit credulity, each item weighted by perceived gravity. ### Table 1 (and Table S4) model ### sa_diff<-lm(Difference~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par,dat3) vif(sa_diff) #Variance inflation factor function used with all LMs. Just fill in the appropriate model name. t(t(lm.beta(sa_diff)$standardized.coefficients)) #Provides the standardized beta for each model reported in the manuscript. ### Table S12 ### sa_sex<-lm(Difference~PolPCA*Part_sex+Part_age+EthBin+Inc+EduCat+Par,dat3) ### Table S13. Investigated because age has a significant main effect in some models. ### sa_age<-lm(Difference~PolPCA*Part_age+Part_sex+EthBin+Inc+EduCat+Par,dat3) # Do interaction models provide a better fit? Examine weighted AIC comparison. aictab(list(sa_sex,sa_diff)) aictab(list(sa_age,sa_diff)) ### Table 2 ### #Values (Wilson & Patterson) sa_wp<-lm(Difference~WiPa+Part_sex+Part_age+EthBin+Inc+EduCat+Par,data=dat3) #Society Works (Dodd et al.) sa_dd<-lm(Difference~Dodd+Part_sex+Part_age+EthBin+Inc+EduCat+Par,data=dat3) #Political Likert sa_po<-lm(Difference~PoliOr+Part_sex+Part_age+EthBin+Inc+EduCat+Par,data=dat3) #Political category sa_cat<-lm(Difference~as.factor(NumPolCat)+Part_sex+Part_age+EthBin+Inc+EduCat+Par,data=dat3) ### Table 3 ### sa_wpall<-lm(Difference~SocCons+EcoCons+MilCons+Part_sex+Part_age+EthBin+Inc+EduCat+Par,data=dat3) ### Exclusion of one item (and its gravity) at a time (Figure S1) ### exclusions=list() alladj<-c(hadj,badj) for (i in 1:length(alladj)) { tmp<-dat3[,alladj[-i]] bennm1<-badj[badj %in% alladj[-i]] haznm1<-hadj[hadj %in% alladj[-i]] oneout<-rowMeans(tmp[,haznm1])-rowMeans(tmp[,bennm1]) exclusions[[i]]<-summary(lm(oneout~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par,dat3)) } exclusions[[6]]$coef[c(2,11*1+2,11*3+2)] #Extracts parameter, s.e., and p value for terrorism exlcuded fit. These are then used in Figure S1 (see below). ### Hazards and benefits separately (Tables S6a-b) ### dat3$HazWtd<-rowMeans(dat3[,paste0(haznm,"_adj")]) dat3$BenWtd<-rowMeans(dat3[,paste0(bennm,"_adj")]) sa_haz<-lm(HazWtd~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par,dat3) sa_ben<-lm(BenWtd~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par,dat3) ### Exclusion of one item (and its gravity) at a time from HAZARDS credulity (Figure S2) ### exclusions_h=list() for (i in 1:length(hadj)) { tmp<-dat3[,hadj[-i]] haznm1<-hadj[-i] oneout<-rowMeans(tmp[,haznm1]) exclusions_h[[i]]<-summary(lm(oneout~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par,dat3)) } exclusions_h[[6]]$coef[c(2,11*1+2,11*3+2)] #Again, provides the parameter, s.e., and p value for the fit excluding the terrorism item. ### Exclusion of one item (and its gravity) at a time from BENEFIT credulity ### exclusions_b=list() for (i in 1:length(badj)) { tmp<-dat3[,badj[-i]] bennm1<-badj[-i] oneout<-rowMeans(tmp[,bennm1]) exclusions_b[[i]]<-summary(lm(oneout~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par,dat3)) } ### Robusticity check: political summary as a sum (Table S7) ### sa_podiff<-lm(Difference~PolSum+Part_sex+Part_age+EthBin+Inc+EduCat+Par,dat3) ### Robusticity check: multilevel model with random effects for individual and question (Table S9a-b) ### datlh<-reshape(dat2,varying=list(haznm,rsknm),v.names=c("HazResp","Gravity"),timevar="Quest",times=haznm,direction="long") datlb<-reshape(dat2,varying=list(bennm,gravnm),v.names=c("BenResp","Gravity"),timevar="Quest",times=bennm,direction="long") #Switches responses from wide format (one participant per row) to long format (one item for each participant in each row). Creates an ID for each question ("Quest") and an "id" for each participant (although unnecessary because we have participant IDs already, so we ignore these). multib<-subset(datlb,select=c(BenResp,PolPCA,Gravity,Part_sex,Part_age,EthBin,Inc,EduCat,Par,WiPa,Dodd,PoliOr,NumPolCat,SocCons,EcoCons,MilCons,PolSum,Quest,PID)) multih<-subset(datlh,select=c(HazResp,PolPCA,Gravity,Part_sex,Part_age,EthBin,Inc,EduCat,Par,WiPa,Dodd,PoliOr,NumPolCat,SocCons,EcoCons,MilCons,PolSum,Quest,PID)) multib1<-na.omit(multib) multih1<-na.omit(multih) #NAs must be removed to fit an linear mixed-effect model with lmer. me_hpc<-lmer(HazResp~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par+Gravity+(1|PID)+(1|Quest),data=multih1) #Crossed random effects for ID and question. me_bpc<-lmer(BenResp~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par+Gravity+(1|PID)+(1|Quest),data=multib1) vif.mer(me_hpc) #Check the variance inflation of a lmer model. ### The difference between unweighted average credulity for hazards and for benefits (Table S8) ### dat3$Unwtd_diff<-rowMeans(dat3[,haznm])-rowMeans(dat3[,bennm]) nc_diff<-lm(Unwtd_diff~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par,data=dat3) #################### SUMMARY STATS ################### ## In Tables S1a and b, reported on participants without imputation. (Return to code immediately following mice and overwrite results produce by mice.) dat4<-dat3 ## Use below code to explore descriptive stats for excluded participants. #dat4<-dat3[dat3$Excluded==TRUE,] summs<-subset(dat4,select=c(Unwtd_diff,Difference,HazWtd,BenWtd,PolPCA,WiPa,Dodd,PoliOr,NumPolCat,SocCons,EcoCons,MilCons,Part_age,Inc,Part_sex,EthBin,EduCat,Par,PolSum)) subset(summs,select=c(-EthBin,-EduCat,-Part_sex,-Par,-NumPolCat))->matnum subset(summs,select=c(NumPolCat,Part_sex,EthBin,EduCat,Par))->matint numdes<-matrix(c(rep(NA,length(matnum)*7)),ncol=7,byrow=FALSE) colnames(numdes)<-c("Variable","Mean","SD","Median","Min","Max","N") for (i in 1:length(matnum)) { colnames(matnum[i])->numdes[i,1] mean(matnum[,i],na.rm=TRUE)->numdes[i,2] sd(matnum[,i],na.rm=TRUE)->numdes[i,3] median(matnum[,i],na.rm=TRUE)->numdes[i,4] min(matnum[,i],na.rm=TRUE)->numdes[i,5] max(matnum[,i],na.rm=TRUE)->numdes[i,6] nrow(matnum[!is.na(matnum[,i]),])->numdes[i,7] } facdes<-matrix(c(rep(NA,length(matint)*7)),ncol=7,byrow=FALSE) colnames(facdes)<-c("Variable","Mean","SD","Median","Min","Max","N") for (i in 1:length(matint)) { colnames(matint[i])->facdes[i,1] if (class(matint[,i])=="integer") median(matint[,i],na.rm=TRUE)->facdes[i,4] else median(as.numeric(matint[,i]),na.rm=T)->facdes[i,4] nrow(matint[!is.na(matint[,i]),])->facdes[i,7] } i=1 summary(as.factor(matint[,i][!is.na(matint[,i])]))/length(as.factor(matint[,i][!is.na(matint[,i])])) #Gives breakdown of percentage per level for each of the categorical variables if you change the value of i (i=1, just above; contextualizes the "medians" provided by the facdes table). ### Any differences in demographics or political orientation for excluded vs included people? Must be run on sample with no exclusion: go back to where participants were excluded (near the top) and overwrite the exclusion using the code just below. summary(demodiffs<-glm(Excluded~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+Par,data=dat3,family=binomial)) ################## Figure 1 Part A ##################### betas<-sa_wpall$coef[c("SocCons","MilCons","EcoCons")] sds<-coef(summary(sa_wpall))[, 2][c("SocCons","MilCons","EcoCons")] lci<-betas-1.96*sds;hci<-betas+1.96*sds betas.cis<-data.frame(betas,lci,hci) loadfonts(device="postscript") cairo_ps("Figure 1b.eps",width=6.5,height=4,family="Neue Helvetica Condensed BQ",pointsize=90,fallback_resolution=400,onefile=TRUE) options(forestplot_new_page = FALSE) xticks <- seq(-0.4,0.6,by=0.1) xtlab <- rep(c(TRUE, FALSE), length.out = length(xticks)) attr(xticks, "labels") <- xtlab attr(xticks,"cex")<-5 forestplot(labeltext=c("Social","Military","Economic"),mean=betas.cis$betas,lower=betas.cis$lci,upper=betas.cis$hci,hrzl_lines=TRUE,zero=0,xticks.digits=1,grid=FALSE,lwd.zero=1.5,lwd.ci=3,lwd.xaxis=1.5,lty.ci=1,txt_gp=fpTxtGp(label=gpar(fontfamily = "",cex=0.11),ticks = gpar(fontfamily = "",cex=0.10)),col=fpColors(zero="black",lines="lightseagreen", box="lightseagreen"),boxsize=0.15,xticks = xticks,new_page=FALSE,graphwidth=unit(2.5,"inches"),mar=unit(c(0,0,0,2.7),"inches"),graph.pos="right",colgap=unit(0.03,"npc"),lineheight=unit(0.2,"npc")) x1 <- unit(.035, 'npc') y1 <- unit(0.85, 'npc') grid.text('a', x1, y1, gp = gpar(fontsize=18,font=2)) x2 <- unit(.625, 'npc') y2 <- unit(0.85, 'npc') grid.text('b', x2, y2, gp = gpar(fontsize=18,font=2)) ################## Figure S1 (difference) FOR BOTH STUDIES ############################# ### Part A ### mn1.seed<-c(0.34371283,0.3384891,0.34756092,0.35608085,0.35238371) sd1.seed<-c(0.14799232,0.1481123,0.14761449,0.14785464,0.14832646) for.forest1<-data.frame(matrix(rep(NA,length(mn1.seed)*3),ncol=3)) #Create 95% CIs. colnames(for.forest1)<-c("Mean","Lower95","Upper95") for (i in 1:length(mn1.seed)) { for.forest1$Mean[i]<-mn1.seed[i] for.forest1$Upper95[i]<-mn1.seed[i]+1.96*sd1.seed[i] for.forest1$Lower95[i]<-mn1.seed[i]-1.96*sd1.seed[i] } pdf("Figure S1.pdf",width=14,height=12) options(forestplot_new_page = FALSE) xticks <- seq(-0.2,0.7,by=0.1) xtlab <- rep(c(TRUE, FALSE), length.out = length(xticks)) attr(xticks, "labels") <- xtlab attr(xticks,"cex")<-5 forestplot(labeltext=c("Seed 1", "Seed 2", "Seed 3", "Seed 4", "Seed 5"),mean=for.forest1$Mean,lower=for.forest1$Lower95,upper=for.forest1$Upper95,hrzl_lines=TRUE,zero=0,xticks.digits=1,grid=FALSE,lwd.zero=3,lty.zero=3,lwd.ci=3,lwd.xaxis=2,lty.ci=1,txt_gp=fpTxtGp(label=gpar(fontfamily = "",cex=3),ticks = gpar(fontfamily = "",cex = 2.5)),col=fpColors(zero="black",lines="dodgerblue3", box="dodgerblue3"),clip=c(-.2, 0.7),boxsize=0.1,xticks = xticks,new_page=FALSE,graphwidth=unit(4,"inches"),mar=unit(c(.1,.1,.1,7),"inches"), ticks = gpar(fontfamily = "",cex = 8)) ### Part B ### mn2.seed<-c(0.1635170,0.2053504,0.2738185,0.2681585,0.24880972) sd2.seed<-c(0.1455440,0.1424563,0.1398130,0.1426056,0.14474512) for.forest2<-data.frame(matrix(rep(NA,length(mn2.seed)*3),ncol=3)) colnames(for.forest2)<-c("Mean","Lower95","Upper95") for (i in 1:length(mn2.seed)) { for.forest2$Mean[i]<-mn2.seed[i] for.forest2$Upper95[i]<-mn2.seed[i]+1.96*sd2.seed[i] for.forest2$Lower95[i]<-mn2.seed[i]-1.96*sd2.seed[i] } forestplot(labeltext=c("Seed 1", "Seed 2", "Seed 3", "Seed 4", "Seed 5"),mean=for.forest2$Mean,lower=for.forest2$Lower95,upper=for.forest2$Upper95,hrzl_lines=TRUE,zero=0,xticks.digits=1,grid=FALSE,lwd.zero=3,lty.zero=3,lwd.ci=3,lwd.xaxis=2,lty.ci=1,txt_gp=fpTxtGp(label=gpar(fontfamily = "",cex=3),ticks = gpar(fontfamily = "",cex = 2.5)),col=fpColors(zero="black",lines="dodgerblue3", box="dodgerblue3"),clip=c(-.2, 0.7),boxsize=0.1,xticks = xticks,new_page=FALSE,graphwidth=unit(4,"inches"),mar=unit(c(.1,7,.1,.1),"inches")) x1 <- unit(.025, 'npc') y1 <- unit(.95, 'npc') grid.text('A', x1, y1, gp = gpar(fontsize=40,font=2)) x2 <- unit(.525, 'npc') y2 <- unit(.95, 'npc') grid.text('B', x2, y2, gp = gpar(fontsize=40,font=2)) dev.off() #################### Figure S2 ###################### ### Part A ### mn3.seed<-c(0.448617492,0.455101610,0.4688799668,0.4683771607,0.4625074206) sd3.seed<-c(0.138363075,0.138020240,0.1379706453,0.1378186898,0.1381462809) for.forest3<-data.frame(matrix(rep(NA,length(mn3.seed)*3),ncol=3)) colnames(for.forest3)<-c("Mean","Lower95","Upper95") for (i in 1:length(mn3.seed)) { for.forest3$Mean[i]<-mn3.seed[i] for.forest3$Upper95[i]<-mn3.seed[i]+1.96*sd3.seed[i] for.forest3$Lower95[i]<-mn3.seed[i]-1.96*sd3.seed[i] } pdf("Figure S2.pdf",width=14,height=12) options(forestplot_new_page = FALSE) xticks <- seq(-0.2,0.8,by=0.1) xtlab <- rep(c(TRUE, FALSE), length.out = length(xticks)) attr(xticks, "labels") <- xtlab attr(xticks,"cex")<-5 forestplot(labeltext=c("Seed 1", "Seed 2", "Seed 3", "Seed 4", "Seed 5"),mean=for.forest3$Mean,lower=for.forest3$Lower95,upper=for.forest3$Upper95,hrzl_lines=TRUE,zero=0,xticks.digits=1,grid=FALSE,lwd.zero=3,lty.zero=3,lwd.ci=3,lwd.xaxis=2,lty.ci=1,txt_gp=fpTxtGp(label=gpar(fontfamily = "",cex=3),ticks = gpar(fontfamily = "",cex = 2.5)),col=fpColors(zero="black",lines="lightseagreen", box="lightseagreen"),clip=c(-.2, 0.8),boxsize=0.1,xticks = xticks,new_page=FALSE,graphwidth=unit(4,"inches"),mar=unit(c(.1,.1,.1,7),"inches"), ticks = gpar(fontfamily = "",cex = 8)) ### Part B ### mn4.seed<-c(0.24984745,0.32961018,0.2134822,0.28430382,0.2125608) sd4.seed<-c(0.13883124,0.13811373,0.1444514,0.14271048,0.1402058) for.forest4<-data.frame(matrix(rep(NA,length(mn4.seed)*3),ncol=3)) colnames(for.forest4)<-c("Mean","Lower95","Upper95") for (i in 1:length(mn4.seed)) { for.forest4$Mean[i]<-mn4.seed[i] for.forest4$Upper95[i]<-mn4.seed[i]+1.96*sd4.seed[i] for.forest4$Lower95[i]<-mn4.seed[i]-1.96*sd4.seed[i] } forestplot(labeltext=c("Seed 1", "Seed 2", "Seed 3", "Seed 4", "Seed 5"),mean=for.forest4$Mean,lower=for.forest4$Lower95,upper=for.forest4$Upper95,hrzl_lines=TRUE,zero=0,xticks.digits=1,grid=FALSE,lwd.zero=3,lty.zero=3,lwd.ci=3,lwd.xaxis=2,lty.ci=1,txt_gp=fpTxtGp(label=gpar(fontfamily = "",cex=3),ticks = gpar(fontfamily = "",cex = 2.5)),col=fpColors(zero="black",lines="lightseagreen", box="lightseagreen"),clip=c(-.2, 0.7),boxsize=0.1,xticks = xticks,new_page=FALSE,graphwidth=unit(4,"inches"),mar=unit(c(.1,7,.1,.1),"inches")) x1 <- unit(.025, 'npc') y1 <- unit(.95, 'npc') grid.text('A', x1, y1, gp = gpar(fontsize=40,font=2)) x2 <- unit(.525, 'npc') y2 <- unit(.95, 'npc') grid.text('B', x2, y2, gp = gpar(fontsize=40,font=2)) dev.off() #################### Figure S3a ##################### par(mar=c(5,6,0,1)) scatter.smooth(dat3$Part_age,dat3$Difference,xlab="Age",ylab="Difference in hazard-benefit credulity",cex.axis=2,cex.lab=2,cex=1.5,span=0.5,mgp=c(3.5, 1.5, 0),lwd=2)