library(FactoMineR);library(reshape);library(psych);library(polycor);library(lmerTest);library(car); library(mice); library(AICcmodavg);library(extrafont);library(lm.beta) vif.mer <- function (fit) { ## adapted from rms::vif v <- vcov(fit) nam <- names(fixef(fit)) ## exclude intercepts ns <- sum(1 * (nam == "Intercept" | nam == "(Intercept)")) if (ns > 0) { v <- v[-(1:ns), -(1:ns), drop = FALSE] nam <- nam[-(1:ns)] } d <- diag(v)^0.5 v <- diag(solve(v/(d %o% d))) names(v) <- nam v } #Variance inflation factors for mixed-effect models generated with nlme or lme4, courtesy of aufrank and denohora: https://github.com/aufrank/R-hacks/blob/master/mer-utils.R dat<-read.csv("[Insert path here]/Fessler et al. Study 2 Data.csv",header=TRUE) ##### Exclusions ##### ## Prepare to impute values for individuals who were missing less than 10% of the questions per the following sections: tmpc<-subset(dat,select=grep("_Belief",colnames(dat))) maxi_c<-0.1*length(tmpc) dat$Cred_Incomplete[apply(apply(tmpc,2,is.na),1,sum,na.rm=TRUE)>maxi_c]<-1 #Credulity tmpr<-subset(dat,select=grep("Raven_",colnames(dat))) maxi_r<-0.1*length(tmpr) dat$Ravens_Incomplete[apply(apply(tmpr,2,is.na),1,sum,na.rm=TRUE)>maxi_r]<-1 #Raven's matrices tmpw<-subset(dat,select=grep("Wonderlic_",colnames(dat))) maxi_w<-0.1*length(tmpw) dat$Wonderlic_Incomplete[apply(apply(tmpw,2,is.na),1,sum,na.rm=TRUE)>maxi_w]<-1 #Wonderlic test tmpwp<-subset(dat,select=grep("WP_",colnames(dat))) maxi_wp<-0.1*length(tmpwp) dat$Values_Incomplete[apply(apply(tmpwp,2,is.na),1,sum,na.rm=TRUE)>maxi_wp]<-1 #Values tmpsw<-subset(dat,select=grep("SW_",colnames(dat))) maxi_sw<-0.1*length(tmpsw) dat$SW_Incomplete[apply(apply(tmpsw,2,is.na),1,sum,na.rm=TRUE)>maxi_sw]<-1 #Society works ### Exclusion criteria. ### excluded<-subset(dat,(Cred_Incomplete==1 | Ravens_Incomplete==1 | Wonderlic_Incomplete==1 | Values_Incomplete==1 | SW_Incomplete==1 | is.na(LettersInObligatory) | Less_than_10_minutes == 1 | American == 2 | is.na(American) | EnglishFirst == 2 | is.na(EnglishFirst))) #We only exclude people who failed to answer "letters in obligatory" because those who did were within a reasonable range of miscounting, especially counting i and l as one letter. #As RAs we assigned to pilot the survey spent far longer than 10 minutes, we decided that one could not complete the survey in less than 10 and have paid enough attention. fun.exc <- function(x.1,x.2,...){ x.1p <- do.call("paste", x.1) x.2p <- do.call("paste", x.2) x.1p %in% x.2p} dat$Excluded<-fun.exc(dat,excluded) ## Code that excludes. Run only to exclude participants who did not meet exclusion criteria. dat1<-dat[dat$Excluded==FALSE,] ## Code that includes. Run to include participants who met the exclusion criteria. #dat1<-dat ################################# Balancing the sample ############################################ ### Randomly eliminate liberals to approach a balanced sample (i.e., mean political orientation 5 or greater). ### # Gallup 2016 degree of social conservatism: originally a 1-5 scale, converted to 1-9 here for comparability. See citation in the main text. Unsures are excluded; numbers here are the proportion of the remaining 98% of the sample in each category. mericans<-c(rep(1,10),rep(3,22),rep(5,31),rep(7,27),rep(9,7)) mean(mericans)->m_gal;sd(mericans)->s_gal # For our own distribution of social conservatism on a Likert scale of 1-9, eliminate individuals randomly to approximate the Gallup distribution. There are two parts to this: first, individuals will be eliminated one by one until the mean of our sample (on a 1-9 scale) is the same as the mean of the Gallup poll (on a 1-9 scale). Second, we preferentially eliminate participants in the most liberal categories -- 1 and 2 -- because that's where we have particular overrepresentation. The loop eliminates people in those categories until the two together represent 10% of the sample or less, then it begins to eliminate participants from category 3. For the Likert scale and question stem, see the SOM. dat2<-dat1 for (i in 1:nrow(dat2)) { if (as.logical(mean(dat2$PoliOr,na.rm=TRUE).100)) { tmp<-dat2[dat2$PoliOr<3,] } else {tmp<-dat2[dat2$PoliOr==3,]} sample(tmp$ID,1)->z dat2[dat2$ID %in% z,c("PoliOr")]<-NA rm(tmp,z) } else {break} } dat2<-dat2[!is.na(dat2$PoliOr),] #Elimination abore worked by setting the participant's value for political orientation Likert to NA. This gets rid of those individuals. ## To avoid randomly eliminating liberals, like to replicate Table S11, use the line of code immediately below. It will overwrite the dataframe on which the loop operated above. #dat2<-dat1 ## Remove variables that are no longer being used. dat2<-subset(dat2,select=c(-American,-EnglishFirst,-LettersInObligatory,-PayAttn,-Less_than_10_minutes,-SM_time,-time_taken,-Cred_Incomplete,-Ravens_Incomplete,-Wonderlic_Incomplete,-Values_Incomplete,-SW_Incomplete)) dat2$Part_sex[dat2$Part_sex==""]<-NA #Set people who failed to report their sex as NA. droplevels(dat2$Part_sex)->dat2$Part_sex #Remove the now-empty level of "". ############################# Impute missing values using predictive mean matching ################################## len<-rep('',length(dat2)) nms<-which( colnames(dat2) %in% colnames(dat2)[colSums(is.na(dat2)) > 0]) #These are the columns that contain at least one NA. We want to impute those, but... nms[which(nms %in% as.character(which(colnames(dat2) %in% c("Feet","Inches","HalfInch","Excluded","Par"))))]<-"" #We don't want to impute for height (which is not used in these analyses) and parenthood (which much of the sample failed to answer. len[c(as.integer(nms))]<-'pmm' #PMM signals "predictive mean matching" to the mice function. ### Set seed to stabilize results across re-runs of this code. ### #x1 <- runif(1, 0, 100000000) #We did this 5 times to randomly select each of the below seeds. x1<-45575935; x2<-4990306;x3<-96058240;x4<-54475644;x5<-49487042 set.seed(x3) mice(dat2,method=len)->imp complete(imp)->dat3 #Imputation happens five times by default; the complete function takes the average across these imputations and gives you back the data frame with average imputed values filled in. # To examine sample without imputation, as we did for Table S5, run code immediately below to override imputation. #dat2->dat3 ########################### Prepare variables #################################### ##### Values scale, created by Wilson & Patterson, modified by Dodd et al. ##### ### Code responses that are "liberal" -1, "conservative" 1, and unknown 0. ## First, take responses geared towards conservatives and make agreement 1. cons<-subset(dat3,select=c(WP_SP,WP_DP,WP_Pat,WP_BT,WP_BC,WP_Tax,WP_WB,WP_MS,WP_SG,WP_DS,WP_Obe,WP_CS)) #Excludes warrantless search and use of nuclear weapons. See Appendix 2 for explanation. adjcons<-matrix(rep(NA,nrow(cons)*length(cons)),ncol=length(cons)) colnames(adjcons)<-colnames(cons) for (i in 1:length(cons)){ adjcons[,i][cons[,i]==1]<-1 -1->adjcons[,i][cons[,i]==2] adjcons[,i][cons[,i]==3]<-0 } as.data.frame(adjcons)->adjcons ## Second, responses geared towards liberals and make agreement -1. lib<-subset(dat3,select=c(WP_Pac,WP_Soc,WP_Por,WP_II,WP_WE,WP_PS,WP_GM,WP_AR,WP_Evo,WP_WS,WP_GC,WP_PC,WP_FA,WP_Com)) #Excludes free trade and globalization. See Appendix 2 for an explanation. adjlib<-matrix(rep(NA,nrow(lib)*length(lib)),ncol=length(lib)) colnames(adjlib)<-colnames(lib) for (i in 1:length(lib)){ -1->adjlib[,i][lib[,i]==1] adjlib[,i][lib[,i]==2]<-1 adjlib[,i][lib[,i]==3]<-0 } as.data.frame(adjlib)->adjlib cbind(adjcons,adjlib)->adj rowSums(adj)->dat3$WiPa #Sum of responses to the scale, per Dodd et al. Higher values on this mean more conservative. ### Break the items into those concerning social conservatism, economic conservatism, and military conservatism ### soccons<-subset(adj,select=c(WP_GC,WP_SP,WP_BT,WP_Por,WP_II,WP_WE,WP_PS,WP_GM,WP_AR,WP_Evo)) PCA(soccons,graph=FALSE)$ind$coord[,1]->dat3$SocCons #Extracting first prinicipal component. ecocons<-subset(adj,select=c(WP_Tax,WP_SG,WP_Soc,WP_WS,WP_PC,WP_FA,WP_CS)) PCA(ecocons,graph=FALSE)$ind$coord[,1]->dat3$EcoCons milcons<-subset(adj,select=c(WP_DP,WP_Obe,WP_Pat,WP_BC,WP_WB,WP_MS,WP_Pac,WP_DS,WP_Com)) PCA(milcons,graph=FALSE)$ind$coord[,1]->dat3$MilCons ##### Dodd et al. Society Works scale ##### c(reverse.code(dat3$SW_LC,keys=c(-1)))->dat3$SW_LC #Reverse code one item accidentally reversed by Dodd et al. #Code responses that are "liberal" as -1, "conservative" 1. dat3[,grep("SW_",colnames(dat3))]->dodd adjdodd<-matrix(rep(NA,nrow(dodd)*length(dodd)),ncol=length(dodd)) colnames(adjdodd)<-colnames(dodd) for (i in 1:length(dodd)){ adjdodd[,i][dodd[,i]==1]<-1 -1->adjdodd[,i][dodd[,i]==2] } as.data.frame(adjdodd)->adjdodd rowSums(adjdodd)->dat3$Dodd #Sum of responses to the scale, per Dodd et al. Higher values on this mean more conservative. ##### Political category ##### rep(NA,nrow(dat3))->dat3$NumPolCat ## Break parties into conservative, liberal, and neither. # 1=Republican, 2=Democratic, 3=Tea Party, 4=Libertarian, 5=Green, 6=none. # As before, -1 is liberal affiliation, 1 is conservative affiliation, 0 is none or libertarian. dat3$NumPolCat[as.numeric(dat3$Party)==1]<-1 -1->dat3$NumPolCat[as.numeric(dat3$Party)==2] dat3$NumPolCat[as.numeric(dat3$Party)==3]<-1 dat3$NumPolCat[as.numeric(dat3$Party)==4]<-0 -1->dat3$NumPolCat[as.numeric(dat3$Party)==5] dat3$NumPolCat[as.numeric(dat3$Party)==6]<-0 ##### Create summary political orientation measures ##### subset(dat3,select=c(PoliOr,WiPa,Dodd,NumPolCat))->polit sapply(polit,scale,center=TRUE,scale=TRUE)->polit #Z-scores each of the four political orientation measures so that they have comparable scales. #PCA 1st comp for four political orientation measures. PCA(polit,graph=FALSE)$ind$coord[,1]->dat3$PolPCA #Dodd et al. summed the four political orientation measures. We use this summed summary measure as a robusticity check (Table S7). rowSums(polit,na.rm=FALSE)->dat3$PolSum ################ Adjust other varis of interest ################ ## Bin ethnicity because sample is majority white. dat3$EthBin<-rep(NA,nrow(dat3)) dat3$EthBin[dat3$Part_Ethnicity==7]<-"w" #white dat3$EthBin[dat3$Part_Ethnicity!=7]<-"o" #other as.factor(dat3$EthBin)->dat3$EthBin ## Round extreme values of education to avoid undue influence of outliers. All rounded variables listed in Appendix 2. quantile(dat3$Edu,0.975,na.rm=TRUE)->hi quantile(dat3$Edu,0.025,na.rm=TRUE)->lo dat3$Edu[dat3$Edu>=hi]<-hi dat3$Edu[dat3$Edu<=lo]<-lo ## Convert education numeric values to what they represent. dat3$EduCat<-rep(NA,nrow(dat3)) dat3$EduCat[dat3$Edu==3]<-"HS Grad" dat3$EduCat[dat3$Edu==4]<-"Some AA" dat3$EduCat[dat3$Edu==5]<-"AA Grad" dat3$EduCat[dat3$Edu==6]<-"BA Grad" dat3$EduCat[dat3$Edu==7]<-"Some Adv" dat3$EduCat[dat3$Edu==8]<-"Adv Grad" as.factor(dat3$EduCat)->dat3$EduCat ## Most people for whom child data are missing also didn't answer questions about religion, marital status, etc... in other words, can't infer that they didn't have kids and thus didn't answer. dat3$Par[dat3$Par==""]<-NA;droplevels(dat3$Par)->dat3$Par ## Calculate participant height - orthogonal to present hypotheses; included here because included in the survey and exploratory analyses. dat3$HaInch<-rep(NA,nrow(dat3));dat3$HaInch[dat3$HalfInch==1]<-0;dat3$HaInch[dat3$HalfInch==2]<-0.5 dat3$Feet*12->dat3$FeetInInch c(which(colnames(dat3)=="FeetInInch"),which(colnames(dat3)=="HaInch"),which(colnames(dat3)=="Inches"))->z dat3$Height<-ifelse(is.na(dat3$Feet),NA,rowSums(dat3[,z],na.rm=TRUE)) ### Generalized reasoning ability metrics ### # Award points to correct answers dat3$Ravens1<-ifelse(dat3$Raven_1_Correct_is_2==2,1,0) dat3$Ravens2<-ifelse(dat3$Raven_2_Correct_is_2==2,1,0) dat3$Ravens3<-ifelse(dat3$Raven_3_Correct_is_4==4,1,0) dat3$Ravens4<-ifelse(dat3$Raven_4_Correct_is_2==2,1,0) dat3$Ravens5<-ifelse(dat3$Raven_5_Correct_is_5==5,1,0) dat3$Ravens6<-ifelse(dat3$Raven_6_Correct_is_8==8,1,0) dat3$Ravens7<-ifelse(dat3$Raven_7_Correct_is_8==8,1,0) dat3$Ravens8<-ifelse(dat3$Raven_8_Correct_is_1==1,1,0) dat3$Ravens9<-ifelse(dat3$Raven_9_Correct_is_7==7,1,0) dat3$Ravens10<-ifelse(dat3$Raven_10_Correct_is_4==4,1,0) dat3$Ravens11<-ifelse(dat3$Raven_11_Correct_is_6==6,1,0) dat3$Ravens12<-ifelse(dat3$Raven_12_Correct_is_6==6,1,0) dat3$Ravens13<-ifelse(dat3$Raven_13_Correct_is_8==8,1,0) dat3$Ravens14<-ifelse(dat3$Raven_14_Correct_is_1==1,1,0) dat3$Ravens15<-ifelse(dat3$Raven_15_Correct_is_5==5,1,0) # We first explored the PCA of these measures. The first PC captures only 25.52% of variation. Instead, we sum the responses across all items, then convert this distribution to z-scores to make it more comparable to the Wonderlic test, which has a different number of items. This also makes it such that participants are scored relative to each other, where higher values are higher reasoning capabilities relative to others in the sample. scale(rowSums(dat3[,grep("Ravens",names(dat3))]),center=TRUE,scale=TRUE)[,1]->dat3$RavenSum dat3$Wonderlics1<-ifelse(dat3$Wonderlic_1_Correct_is_40==40,1,0) dat3$Wonderlics2<-ifelse(dat3$Wonderlic_2_Correct_is_4==4,1,0) dat3$Wonderlics3<-ifelse(dat3$Wonderlic_3_Correct_is_3==3,1,0) dat3$Wonderlics4<-ifelse(dat3$Wonderlic_4_Correct_is_65==65,1,0) dat3$Wonderlics5<-ifelse(dat3$Wonderlic_5_Correct_is_2==2,1,0) dat3$Wonderlics6<-ifelse(dat3$Wonderlic_6_Correct_is_192==192,1,0) dat3$Wonderlics7<-ifelse(dat3$Wonderlic_7_Correct_is_3==3,1,0) dat3$Wonderlics8<-ifelse(dat3$Wonderlic_8_Correct_is_17==17,1,0) dat3$Wonderlics9<-ifelse(dat3$Wonderlic_9_Correct_is_1==1,1,0) dat3$Wonderlics10<-ifelse(dat3$Wonderlic_10_Correct_is_9==9,1,0) # First PC summarizes just 25.04%. Summary measure again. scale(c(rowSums(dat3[,grep("Wonderlics",names(dat3))])),center=TRUE,scale=TRUE)[,1]->dat3$WonderSum # 53% correlated to Raven score. However, they exhibit no collinearity in later models. ## Center age at age of youngest participant so that intercept in models is interpretable minz<-min(dat3$Part_age,na.rm=TRUE) dat3$Part_age<-dat3$Part_age-minz ## Likewise, center income such that under 20k (represented by #1) is the baseline. There are so many bins for income, we treat this as continuous. dat3$Inc<-dat3$Inc-1 ### Center Likert responses so that intercepts are interpretable ### for (i in 1:length(dat3[,grep("_Belief",colnames(dat3))])) {dat3[,grep("_Belief",colnames(dat3))[i]]<-dat3[,grep("_Belief",colnames(dat3))[i]]-1} ### Round down (and, for height, round up) additional outliers. ### dat3$Inc[dat3$Inc>=quantile(dat3$Inc,0.975,na.rm=TRUE)]<-quantile(dat3$Inc,0.975,na.rm=TRUE) #Extreme values rounded. See Appendix 2. dat3$RavenSum[dat3$RavenSum<=quantile(dat3$RavenSum,0.025,na.rm=TRUE)]<-quantile(dat3$RavenSum,0.025,na.rm=TRUE) dat3$WonderSum[dat3$WonderSum<=quantile(dat3$WonderSum,0.025,na.rm=TRUE)]<-quantile(dat3$WonderSum,0.025,na.rm=TRUE) ### Relevel two factors such that the values held at zero offer a more interpretable contrast ### relevel(dat3$EduCat,"HS Grad")->dat3$EduCat relevel(dat3$Par,"No")->dat3$Par ###################################################### ##################### ANALYSES ####################### # Separate out benefit credulity, benefit gravity, hazard credulity, hazard gravity. c("Battery_Belief","Carrots_Belief","Stomach_Belief","CreditCards_Belief","Cats_Belief","Stockwood_Belief","Airline_Belief","Car_Belief")->bennm grep("_Benefit",colnames(dat3))->gravnm c("CellPhone_Belief","Kale_Belief","Knees_Belief","Keycards_Belief","Sharks_Belief","Terrorism_Belief","Masks_Belief","Lightning_Belief")->haznm grep("_Risk",colnames(dat3))->rsknm #### Calculate and analyze bias in hazard relative to benefit credulity, weighted by individual perceptions of question gravity #### toMatch<-c("_Belief","_Risk","_Benefit") dat4<-dat3[rowSums(is.na(dat3[,grep(paste(toMatch,collapse="|"),colnames(dat3))])) == 0,] #NAs exist for credulity and gravity measures when imputation is not performed. Individuals with NAs need to be excluded to avoid biasing their difference scores. This function will not eliminate anyone when imputation has been performed (as there will be no NAs). crhaz<-dat4[,haznm] crben<-dat4[,bennm] perben<-subset(dat4,select=c(grep("_Benefit",colnames(dat3)))) perrsk<-subset(dat4,select=c(grep("_Risk",colnames(dat3)))) # Here, we weight a participant's response for each credulity item by his or her own perceived gravity (either in terms of risks or benefits) for that item. for (i in 1:16) {dat4[,paste(grep("_Belief",colnames(dat4),value=TRUE)[i],"_adj",sep="")]<-rep(NA,nrow(dat4))} for (i in 1:nrow(dat4)){ for (j in 1:length(crben)){ stm<-sub("_Belief.*", "", colnames(crben))[j] k<-which(sub("_Benefit.*", "", colnames(perben)) %in% stm) perben[i,k]*crben[i,j]->dat4[i,paste0(stm,"_Belief_adj")] rm(stm,k) } } for (i in 1:nrow(dat4)){ for (j in 1:length(crhaz)){ stm<-sub("_Belief.*", "", colnames(crhaz))[j] k<-which(sub("_Risk.*", "", colnames(perrsk)) %in% stm) perrsk[i,k]*crhaz[i,j]->dat4[i,paste0(stm,"_Belief_adj")] rm(stm,k) } } ################### Main analysis #################### hadj<-paste0(haznm,"_adj");badj<-paste0(bennm,"_adj") dat4$Difference<-rowMeans(dat4[,paste0(haznm,"_adj")])-rowMeans(dat4[,paste0(bennm,"_adj")]) #The difference between the average of hazard credulity, each item weighted by perceived gravity, and the average of benefit credulity, each item weighted by perceived gravity. ### Table 1 (and Table S4) model ### sa_diff<-lm(Difference~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4) vif(sa_diff) #Variance inflation factor function used with all LMs. Just fill in the appropriate model name. t(t(lm.beta(sa_diff)$standardized.coefficients)) #Provides the standardized beta for each model. ### Table S12 ### sa_sex<-lm(Difference~PolPCA*Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4) ### Table S13. Investigated because age has a significant main effect in some models with Study 1 data. ### sa_age<-lm(Difference~PolPCA*Part_age+Part_sex+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4) # Do interaction models provide a better fit? Examine weighted AIC comparison. aictab(list(sa_sex,sa_diff)) aictab(list(sa_age,sa_diff)) ### Table 2 ### #Values (Wilson & Patterson) sa_wp<-lm(Difference~WiPa+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,data=dat4) #Society Works (Dodd et al.) sa_dd<-lm(Difference~Dodd+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,data=dat4) #Political Likert sa_po<-lm(Difference~PoliOr+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,data=dat4) #Political category sa_cat<-lm(Difference~as.factor(NumPolCat)+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,data=dat4) ### Table 3 ### sa_wpall1<-lm(Difference~SocCons+EcoCons+MilCons+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,data=dat4) ### Exclusion of one item (and its gravity) at a time (Figure S1) ### exclusions=list() alladj<-c(hadj,badj) for (i in 1:length(alladj)) { tmp<-dat4[,alladj[-i]] bennm1<-badj[badj %in% alladj[-i]] haznm1<-hadj[hadj %in% alladj[-i]] oneout<-rowMeans(tmp[,haznm1])-rowMeans(tmp[,bennm1]) exclusions[[i]]<-summary(lm(oneout~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4)) } exclusions[[6]]$coef[c(2,13*1+2,13*3+2)] #Extracts parameter, s.e., and p value for terrorism exlcuded fit. These are then used in Figure S1 (see code for Study 1). ### Hazards and benefits separately (Tables S6a-b) ### dat4$HazWtd<-rowMeans(dat4[,paste0(haznm,"_adj")]) dat4$BenWtd<-rowMeans(dat4[,paste0(bennm,"_adj")]) sa_haz<-lm(HazWtd~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4) sa_ben<-lm(BenWtd~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4) ### Exclusion of one item (and its gravity) at a time from HAZARDS credulity (Figure S2) ### exclusions_h=list() for (i in 1:length(hadj)) { tmp<-dat4[,hadj[-i]] haznm1<-hadj[-i] oneout<-rowMeans(tmp[,haznm1]) exclusions_h[[i]]<-summary(lm(oneout~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4)) } exclusions_h[[6]]$coef[c(2,13*1+2,13*3+2)] #Again, provides the parameter, s.e., and p value for the fit excluding the terrorism item. ### Exclusion of one item (and its gravity) at a time from BENEFIT credulity ### exclusions_b=list() for (i in 1:length(badj)) { tmp<-dat4[,badj[-i]] bennm1<-badj[-i] oneout<-rowMeans(tmp[,bennm1]) exclusions_b[[i]]<-summary(lm(oneout~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4)) } ### Robusticity check: political summary as a sum (Table S7) ### sa_podiff<-lm(Difference~PolSum+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,dat4) ### Robusticity check: multilevel model with random effects for individual and question (Table S9a-b) ### datlh<-reshape(dat3,varying=list(haznm,rsknm),v.names=c("HazResp","Gravity"),timevar="Quest",times=haznm,direction="long") datlb<-reshape(dat3,varying=list(bennm,gravnm),v.names=c("BenResp","Gravity"),timevar="Quest",times=bennm,direction="long") #Switches responses from wide format (one participant per row) to long format (one item for each participant in each row). Creates an ID for each question ("Quest") and an "id" for each participant (although unnecessary because we have participant IDs already, so we ignore these). multib<-subset(datlb,select=c(BenResp,PolPCA,Gravity,Part_sex,Part_age,EthBin,Inc,EduCat,WiPa,Dodd,PoliOr,NumPolCat,SocCons,EcoCons,MilCons,PolSum,Quest,ID,RavenSum,WonderSum)) multih<-subset(datlh,select=c(HazResp,PolPCA,Gravity,Part_sex,Part_age,EthBin,Inc,EduCat,WiPa,Dodd,PoliOr,NumPolCat,SocCons,EcoCons,MilCons,PolSum,Quest,ID,RavenSum,WonderSum)) multib1<-na.omit(multib) multih1<-na.omit(multih) #NAs must be removed to fit an linear mixed-effect model with lmer. me_hpc<-lmer(HazResp~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum+Gravity+(1|ID)+(1|Quest),data=multih1) #Crossed random effects for ID and question. me_bpc<-lmer(BenResp~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum+Gravity+(1|ID)+(1|Quest),data=multib1) vif.mer(me_hpc) #Check the variance inflation of a lmer model. ### The difference between unweighted average credulity for hazards and for benefits (Table S8) ### dat4$Unwtd_diff<-rowMeans(dat4[,haznm])-rowMeans(dat4[,bennm]) nc_diff<-lm(Unwtd_diff~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,data=dat4) ### Robusticity check: parenthood included (Table S10) ### dat4$Par[dat4$Par=="MISSING"]<-NA;dat4$Par[dat4$Par=="N/A"]<-NA;droplevels(dat4$Par)->dat4$Par sa_par<-lm(Difference~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum+Par,dat4) #################### SUMMARY STATS ################### ## In Tables S1a and b, reported on participants without imputation. (Return to code immediately following mice and overwrite results produce by mice.) dat5<-dat4 ## Use below code to explore descriptive stats for excluded participants. #dat5<-dat4[dat4$Excluded==TRUE,] summs<-subset(dat5,select=c(Unwtd_diff,Difference,HazWtd,BenWtd,PolPCA,WiPa,Dodd,PoliOr,NumPolCat,SocCons,EcoCons,MilCons,Part_age,Inc,Part_sex,EthBin,EduCat,Par,PolSum,RavenSum,WonderSum)) subset(summs,select=c(-EthBin,-EduCat,-Part_sex,-NumPolCat,-Par))->matnum subset(summs,select=c(NumPolCat,Part_sex,EthBin,EduCat,Par))->matint numdes<-matrix(c(rep(NA,length(matnum)*7)),ncol=7,byrow=FALSE) colnames(numdes)<-c("Variable","Mean","SD","Median","Min","Max","N") for (i in 1:length(matnum)) { colnames(matnum[i])->numdes[i,1] mean(matnum[,i],na.rm=TRUE)->numdes[i,2] sd(matnum[,i],na.rm=TRUE)->numdes[i,3] median(matnum[,i],na.rm=TRUE)->numdes[i,4] min(matnum[,i],na.rm=TRUE)->numdes[i,5] max(matnum[,i],na.rm=TRUE)->numdes[i,6] nrow(matnum[!is.na(matnum[,i]),])->numdes[i,7] } facdes<-matrix(c(rep(NA,length(matint)*7)),ncol=7,byrow=FALSE) colnames(facdes)<-c("Variable","Mean","SD","Median","Min","Max","N") for (i in 1:length(matint)) { colnames(matint[i])->facdes[i,1] if (class(matint[,i])=="integer") median(matint[,i],na.rm=TRUE)->facdes[i,4] else median(as.numeric(matint[,i]),na.rm=T)->facdes[i,4] nrow(matint[!is.na(matint[,i]),])->facdes[i,7] } i=1 summary(as.factor(matint[,i][!is.na(matint[,i])]))/length(as.factor(matint[,i][!is.na(matint[,i])])) #Gives breakdown of percentage per level for each of the categorical variables if you change the value of i (i=1, just above; contextualizes the "medians" provided by the facdes table). ### Any differences in demographics or political orientation for excluded vs included people? Must be run on sample with no exclusion: go back to where participants were excluded (near the top) and overwrite the exclusion using the code just below. summary(demodiffs<-glm(Excluded~PolPCA+Part_sex+Part_age+EthBin+Inc+EduCat+RavenSum+WonderSum,data=dat4,family=binomial)) ################## Figure 1 Part B ##################### betas1<-sa_wpall1$coef[c("SocCons","MilCons","EcoCons")] sds1<-coef(summary(sa_wpall1))[, 2][c("SocCons","MilCons","EcoCons")] lci1<-betas1-1.96*sds1;hci1<-betas1+1.96*sds1 betas.cis1<-data.frame(betas1,lci1,hci1) xticks <- seq(-0.4,0.6,by=0.1) xtlab <- rep(c(TRUE, FALSE), length.out = length(xticks)) attr(xticks, "labels") <- xtlab attr(xticks,"cex")<-5 forestplot(labeltext=c("","",""),mean=betas.cis1$betas1,lower=betas.cis1$lci1,upper=betas.cis1$hci1,hrzl_lines=TRUE,zero=0,xticks.digits=1,grid=FALSE,lwd.zero=1.5,lwd.ci=3,lwd.xaxis=1.5,lty.ci=1,txt_gp=fpTxtGp(label=gpar(fontfamily = "",cex=0.11),ticks = gpar(fontfamily = "",cex=0.10)),col=fpColors(zero="black",lines="dodgerblue3", box="dodgerblue3"),boxsize=0.15,xticks = xticks,new_page=FALSE,graphwidth=unit(2.5,"inches"),mar=unit(c(0,3.55,0,0),"inches"),graph.pos="right",lineheight=unit(0.2,"npc")) ################ Figure S3b ################ par(mar=c(5,6,0,1)) scatter.smooth(dat4$Part_age,dat4$Difference,xlab="Age",ylab="Difference in hazard-benefit credulity",cex.axis=2,cex.lab=2,cex=1.5,span=0.5,mgp=c(3.5, 1.5, 0),lwd=2)