library(xlsx) library(xlsxjars) library(sna) library(statnet) library(igraph) library(car) library(foreign) library(lattice) library (lme4) library(network) library(MASS) ############################################################################################################## ############################################################################################################## # # # LOADING DATA AND CREATING MATRICES # # # # # ############################################################################################################## ############################################################################################################## #SET WORKING DIRECTORY AND LOAD IN FULL DATASET ############################################################# setwd ("C:/Users/joeha/Dropbox (ASU)/Hruschka Lab/Bangladesh Network Data and Paper/Network Data Files/Master Data Files and Final Analysis/Data Files and SPSS Syntax") #load in the cleaned data for R file net1<-read.csv('net1_1.csv') #DEFINING THE FUNCTION TO TURN EDGELISTS INTO MATRICES ############################################################## edge_to_adjacency <-function(edgemat,egocol,altercol,tiecol){ egoids <- unique(edgemat[,egocol]) alterids <- unique(edgemat[,altercol]) allids <- unique(c(egoids,alterids)) n <- length(allids) adjmat <- matrix(ncol=n,nrow=n,0) colnames(adjmat) <- allids rownames(adjmat)<-allids for (i in 1:length(edgemat[,1])){ ego_id <- as.character(edgemat[i,egocol]) alter_id <- as.character(edgemat[i,altercol]) adjmat[ego_id,alter_id] <- as.numeric(edgemat[i,tiecol]) } return(adjmat) } ############################################################## #DUMMY CODING CLOSENESS net1$C1<-0 net1$C1[which(net1$Close==1)]<-1 net1$C2<-0 net1$C2[which(net1$Close==2)]<-1 net1$C3<-0 net1$C3[which(net1$Close==3)]<-1 net1$C4<-0 net1$C4[which(net1$Close==4)]<-1 net1$C5<-0 net1$C5[which(net1$Close==5)]<-1 #DUMMY CODING REVERSE CLOSENESS net1$RC1<-0 net1$RC1[which(net1$RClose==1)]<-1 net1$RC2<-0 net1$RC2[which(net1$RClose==2)]<-1 net1$RC3<-0 net1$RC3[which(net1$RClose==3)]<-1 net1$RC4<-0 net1$RC4[which(net1$RClose==4)]<-1 net1$RC5<-0 net1$RC5[which(net1$RClose==5)]<-1 #DUMMY CODING RELATEDNESS net1$R5<-0 net1$R5[which(net1$DescentRelatedness3>=0.5)]<-1 net1$R25<-0 net1$R25[which(net1$DescentRelatedness3>=0.25 & net1$DescentRelatedness3<0.5)]<-1 net1$R125<-0 net1$R125[which(net1$DescentRelatedness3>=0.125 & net1$DescentRelatedness3<0.25)]<-1 net1$R0625<-0 net1$R0625[which(net1$DescentRelatedness3>=0.015625 & net1$DescentRelatedness3<0.125)]<-1 #DUMMY CODING DISTANCE BETWEEN HOUSEHOLDS net1$dist0<-0 net1$dist0[which(net1$distance==0)]<-1 net1$dist1<-0 net1$dist1[which(net1$distance>0 & net1$distance<=0.05)]<-1 net1$dist2<-0 net1$dist2[which(net1$distance>0.05 & net1$distance<=0.1)]<-1 net1$dist3<-0 net1$dist3[which(net1$distance>0.1 & net1$distance<=0.15)]<-1 net1$dist4<-0 net1$dist4[which(net1$distance>0.15 & net1$distance<=0.2)]<-1 net1$dist5<-0 net1$dist5[which(net1$distance>0.2)]<-1 #DUMMY CODING GROUP (PARA) net1$group1<-0 net1$group1[which(net1$group==1)]<-1 net1$group2<-0 net1$group2[which(net1$group==2)]<-1 net1$group3<-0 net1$group3[which(net1$group==3)]<-1 net1$group4<-0 net1$group4[which(net1$group==4)]<-1 #DUMMY CODING GENDER COMPOSTION OF DYADS net1$MM<-0 net1$MM[which(net1$egender==1 & net1$agender==1)]<-1 net1$MF<-0 net1$MF[which(net1$egender==1 & net1$agender==0)]<-1 net1$FM<-0 net1$FM[which(net1$egender==0 & net1$agender==1)]<-1 net1$FF<-0 net1$FF[which(net1$egender==0 & net1$agender==0)]<-1 #RECODING AGE-SIMILARITY net1$agesim<-net1$EgoAgey-net1$AlterAgey #CREATING THE MATRICES ############################################################### GROUP<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","group") CLOSE<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","Close") RCLOSE<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","RClose") R5<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","R5") R25<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","R25") R125<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","R125") R0625<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","R0625") VFRIEND<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","vfriend") CFRIEND<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","cfriend") RFRIEND<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","rfriend") SPOUSE<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","Spouse") SAMEHH<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","SameHH") DIST0<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","dist0") DIST1<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","dist1") DIST2<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","dist2") DIST3<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","dist3") DIST4<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","dist4") DIST5<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","dist5") GROUP1<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","group1") GROUP2<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","group2") GROUP3<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","group3") GROUP4<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","group4") CLOSE1<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","C1") CLOSE2<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","C2") CLOSE3<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","C3") CLOSE4<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","C4") CLOSE5<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","C5") RCLOSE1<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","RC1") RCLOSE2<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","RC2") RCLOSE3<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","RC3") RCLOSE4<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","RC4") RCLOSE5<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","RC5") MM<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","MM") MF<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","MF") FF<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","FF") FM<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","FM") DISCRETE_GANY<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","discrete_gany") DISCRETE_RANY<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","discrete_rany") ALTER_GIVE<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","Alt_give") ALTER_RECEIVE<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","Alt_recieve") RELATEDNESS<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","DescentRelatedness3") DISTANCE<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","distance") ALTER_AGE<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","AlterAgey") EGO_AGE<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","EgoAgey") AGE_SIM<-edge_to_adjacency(net1,"EUNIQUEID","AUNIQUEID","agesim") ############################################################### #COMPUTING NEW VARIABLE MATRICES ############################################################### #NEED A CONSTANT MATRIX FOR RUNNING THE MSMRQAP CODE CONSTANT<-GROUP CONSTANT<-replace(CONSTANT, CONSTANT>1,1) #MERGING FRIEND MATRICES ############################################################### FRIEND<-VFRIEND+CFRIEND+RFRIEND FRIEND_WITHR<-VFRIEND+CFRIEND+RFRIEND DISCRETE_FRIEND<-FRIEND DISCRETE_FRIEND[DISCRETE_FRIEND>0]<-1 #REMOVING ANYONE MARKED AS RITUAL FRIEND FROM THE FRIEND VARIABLE DISCRETE_FRIEND_NOR<-DISCRETE_FRIEND for (i in 1:length(DISCRETE_FRIEND[,1])){ for (j in 1:length(DISCRETE_FRIEND[1,])){ if (RFRIEND[i,j] == 1) DISCRETE_FRIEND_NOR[i,j] <- 0 } } ############################################################### ############################################################### #NEW CODE FOR REVIEWER COMMENTS ############################################################### #friendship variable without ritual friends has already been computed #as with the age and age difference variables above. #below we remove the same household ties, and create the affine matrix #REPLACING THE SAME HOUSEHOLD TIES WITH NA'S ############################################################# # DV<-ALTER_RECEIVE for (i in 1:length(ALTER_RECEIVE[,1])){ for (j in 1:length(ALTER_RECEIVE[1,])){ if (SAMEHH[i,j] == 1) DV[i,j] <- NA } } ALT_DV<-DISCRETE_GANY for (i in 1:length(ALTER_RECEIVE[,1])){ for (j in 1:length(ALTER_RECEIVE[1,])){ if (SAMEHH[i,j] == 1) ALT_DV[i,j] <- NA } } ############################################################# #CREATING THE AFFINE MATRIX ############################################################# AFFINE_R5<- matrix (ncol=length(SPOUSE[1,]),nrow=length(SPOUSE[,1]),0) AFFINE_R25<- matrix (ncol=length(SPOUSE[1,]),nrow=length(SPOUSE[,1]),0) AFFINE_R125<- matrix (ncol=length(SPOUSE[1,]),nrow=length(SPOUSE[,1]),0) AFFINE_R0625<- matrix (ncol=length(SPOUSE[1,]),nrow=length(SPOUSE[,1]),0) for (i in 1: length (SPOUSE[,1])){ for (j in 1: length (SPOUSE [1,])){ if (SPOUSE[i,j]==1){ for (k in 1: length (SPOUSE [1,])){ AFFINE_R5[i,k]<-R5[j,k] AFFINE_R25[i,k]<-R25[j,k] AFFINE_R125[i,k]<-R125[j,k] AFFINE_R0625[i,k]<-R0625[j,k] } } } } #ADDING AFFINE MATRIX AND TRANSPOSE AND SETTING VALUES TO 1 A_R5<-1*(AFFINE_R5 | t(AFFINE_R5)) A_R25<-1*(AFFINE_R25 | t(AFFINE_R25)) A_R125<-1*(AFFINE_R125 | t(AFFINE_R125)) A_R0625<-1*(AFFINE_R0625 | t(AFFINE_R0625)) for (i in 1:length(A_R5[1,])){ for (j in 1:length(A_R5[,1])){ if (A_R5[i,j]==1){ A_R25[i,j]<-0 A_R125[i,j]<-0 A_R0625[i,j]<-0 } if (A_R25[i,j]==1){ A_R125[i,j]<-0 A_R0625[i,j]<-0 } if (A_R125[i,j]==1){ A_R0625[i,j]<-0 } } } table(A_R5) for (i in 1:length(A_R5[1,])){ for (j in 1:length(A_R5[,1])){ if (RELATEDNESS[i,j]>=0.25){ A_R5[i,j]<-0 A_R25[i,j]<-0 A_R125[i,j]<-0 A_R0625[i,j]<-0 } } } table(A_R5) ############################################################################################################## # #CREATING THE AFFINE_R MATRIX ############################################################# AFFINE_R<- matrix (ncol=length(SPOUSE[1,]),nrow=length(SPOUSE[,1]),0) for (i in 1: length (SPOUSE[,1])){ for (j in 1: length (SPOUSE [1,])){ if (SPOUSE[i,j]==1){ for (k in 1: length (SPOUSE [1,])){ AFFINE_R[i,k]<-RELATEDNESS[j,k] } } } } AFFINE_R<-pmax(AFFINE_R,t(AFFINE_R)) table(makevect(parray1[,,4],gsize), makevect (A_R5,gsize)) ############################################################################################################## # # CREATING AN ARRAY FOR ANALYSIS # # # # # ############################################################################################################## ############################################################################################################## #appending the array of matrices g1<-dim(CLOSE)[1] parray1<-0 parray1<-array(dim=c(g1,g1,62)) parray1[,,1] <- GROUP parray1[,,2] <- CLOSE parray1[,,3] <- RCLOSE parray1[,,4] <- R5 parray1[,,5] <- R25 parray1[,,6] <- R125 parray1[,,7] <- R0625 parray1[,,8] <- VFRIEND parray1[,,9] <- CFRIEND parray1[,,10] <- RFRIEND parray1[,,11] <- SPOUSE parray1[,,12] <- SAMEHH parray1[,,13] <- DIST0 parray1[,,14] <- DIST1 parray1[,,15] <- DIST2 parray1[,,16] <- DIST3 parray1[,,17] <- DIST4 parray1[,,18] <- DIST5 parray1[,,19] <- GROUP1 parray1[,,20] <- GROUP2 parray1[,,21] <- GROUP3 parray1[,,22] <- GROUP4 parray1[,,23] <- CLOSE1 parray1[,,24] <- CLOSE2 parray1[,,25] <- CLOSE3 parray1[,,26] <- CLOSE4 parray1[,,27] <- CLOSE5 parray1[,,28] <- RCLOSE1 parray1[,,29] <- RCLOSE2 parray1[,,30] <- RCLOSE3 parray1[,,31] <- RCLOSE4 parray1[,,32] <- RCLOSE5 parray1[,,33] <- GROUP #these are repeated to maintain the structure of the array parray1[,,34] <- GROUP #they used to hold data that has been removed. parray1[,,35] <- MM parray1[,,36] <- MF parray1[,,37] <- FF parray1[,,38] <- MF parray1[,,39] <- DISCRETE_GANY parray1[,,40] <- DISCRETE_RANY parray1[,,41] <- ALTER_GIVE parray1[,,42] <- ALTER_RECEIVE parray1[,,43] <- RELATEDNESS parray1[,,44] <- DISTANCE parray1[,,45] <- ALTER_AGE parray1[,,46] <- EGO_AGE parray1[,,47] <- AGE_SIM parray1[,,48] <- CONSTANT parray1[,,49] <- FRIEND parray1[,,50] <- FRIEND_WITHR parray1[,,51] <- DISCRETE_FRIEND parray1[,,52] <- A_R5 parray1[,,53] <- A_R25 parray1[,,54] <- A_R125 parray1[,,55] <- A_R0625 parray1[,,56] <-DV parray1[,,57]<-DISCRETE_FRIEND_NOR parray1[,,58]<-ALT_DV ############################################################### for (i in 2:length(parray1[1,1,])){ for (j in 1: length (parray1[1,,1])){ for (k in 1: length (parray1[,1,1])){ if (parray1[k,j,1]==0) parray1[k,j,i]<-NA } } } ############################################################################################################## ############################################################################################################## # # # DEFINING MRQAP FUNCTIONS # # # # # ############################################################################################################## ############################################################################################################## #----MSMRQAP FUNCTION FOR LINEAR REGRESSION----# ############################################################### #Code for running MRQAP on multiple samples. #Data should be in an array of adjacency matrices, including a matrix whose #values indicate the group ID (each group should be assigned a unique numeric value). #These matrices should have all groups combinedinto a single matrix. #The adjacency matrices of each group will be referenced #as a sub-section of this larger combined matrix. ############################################################### #specifying datasets and setting up functions ############################################################### #specify data array and location of group id matrix in that array dats<-parray1 grp<-1 #Using the 'table' function to calculate the number of each group, the size of each group #and the size of each groups submatrix. #groupnum - number of groups (submatrices) in combined matrix groupnum<-nrow(table(unique(dats[,,grp]))[-1]) #matsize - size of each submatrix minus diagonal (i.e. number of dyads in each submatrix) matsize<-(table(unique(dats[,,grp]))[])[-1] #gsize - number of individuals in each submatrix gsize<-(1+sqrt(1+4*matsize))/2 #Creating a function that creates a vector that removes #the diagonals and the meaningless null ties between members of different groups. #creating the matrix->vector function makevect<-function(mat,gsize) { groupnum<-length(gsize) casevector<-vector(length=sum(gsize*(gsize-1))) groupstart<-0 cvectorcounter<-1 for(m in 1:groupnum) { for (i in 1:gsize[m]) { for (j in 1:gsize[m]) { if(i!=j) { casevector[cvectorcounter]<-mat[i+groupstart,j+groupstart] cvectorcounter=cvectorcounter+1 } } } groupstart<-groupstart+gsize[m] } return(casevector) } ############################################################### #Creating a function that builds a matrix out of the vector. this inputs #the diagonal and meaningless null ties back into the matrix. This step is needed for #the permutation procedure. #creating the matrix->vector function #creating the vector-> matrix function makemat<-function(vctr,gsize) { groupnum<-length(gsize) mat<-matrix(0,nrow=g1,ncol=g1,byrow=T) gstart<-0 cmatrixcounter<-1 for (m in 1:groupnum) { for (i in 1:gsize[m]) { for (j in 1:gsize[m]) { if(i!=j) { mat[gstart+i,gstart+j]<-vctr[cmatrixcounter] cmatrixcounter=cmatrixcounter+1 } } } gstart<-gstart+gsize[m] } return(mat) } #Creating a function that runs the Multi-Sample MRQAP for Linear regression. MSMRQAP<-function(dats,Y,Xs,group1,constant,reps) { #Using the 'table' function to calculate the number of each group, the size of each group #and the size of each groups submatrix. #groupnum - number of groups (submatrices) in combined matrix groupnum<-nrow(table(unique(dats[,,group1]))[-1]) #matsize - size of each submatrix minus diagonal (i.e. number of dyads in each submatrix) matsize<-(table(unique(dats[,,group1]))[])[-1] #gsize - number of individuals in each submatrix gsize<-(1+sqrt(1+4*matsize))/2 nxs <- length(Xs) # count number of ivs g <- dim(dats[1,,])[1] # size of network y <- makevect(dats[,,Y],gsize) results <- matrix(nrow=(nxs+2),ncol=7) colnames(results) <- c('coeff','std_err','onetail','twotail','lower025','upper975', 'upper95') rownames(results) <- c(Xs,'intercept','r-squared') #################### # new code to test standard errors simcoef<-matrix(nrow=(nxs),ncol=reps) ##################### # convert the independent variables to vectors xs <- matrix(0,nrow=(length=sum(gsize*(gsize-1))),ncol=nxs) for (v in 1:nxs) { # cycle through each independent variable xs[,v] <- makevect((dats[,,Xs[v]]),gsize) } for (v in 1:nxs) { # cycle through each independent variable x <- xs[,v] # coefficient to test if (nxs>1) {z <- xs[,-v]} else {z<- makevect((dats[,,constant]),gsize)} # other ivs, if only 1 iv, run quasi-QAP # regress x on all ivs but X, need residuals for step below xz <- lm(y ~ z,na.action=na.exclude) # CONVERT RESIDUALS TO MATRIX, commands are likely redundant resids <- array(dim=c(g,g,1)) resids[,,1] <- makemat(residuals(xz),gsize) matResid <- array(dim=c(g,g,1)) matResid[1:g,1:g,1] <- resids[,,1] # permutation procedure out <- rep(0,reps) for (i in 1:reps) { # permute the residuals matrix # select the code for 1:g to only select the neighborhood and pR<-matResid start<-1 stp<-0 for(m in 1:groupnum) { stp<-gsize[m]+stp no<-sample(1:gsize[m],replace=F) pR[start:stp,start:stp,1]<-matResid[start:stp,start:stp,1][no,no] start<-start+gsize[m] } pR <- makevect(pR[,,1],gsize) # convert to vector model <- lm(y ~ pR + z, na.action=na.exclude) # regress new Y on all ivs out[i] <- coef(model)[2] # output the coefficient from the model (but not the intercept) ############### simcoef [v,i]<-out[i] ################ } # end of permutation modeltv <- lm(y ~ x + z, na.action=na.exclude) print(results[v,1]) results[v,1] <- coef(modeltv)[2] meanOut <- mean(out) results[v,2] <- ((sum((out-meanOut)**2))/reps)**.5 # calculate standard error pgreq <- mean(as.numeric(simcoef[v,] >= results[v,1])) # record proportion greater than or equal to observed coefficient pleeq <- mean(as.numeric(simcoef[v,] <= results[v,1])) # record proportion less than or equal to observed coefficient results[v,4] <- (mean(as.numeric(simcoef[v,]) >= abs(results[v,1]))) + (mean(as.numeric(simcoef[v,] <= -abs(results[v,1])))) if (results[v,1] > 0) {results[v,3]<-pgreq} else {results[v,3]<-pleeq} results[v,5] <- quantile(simcoef[v,],0.025) # lower 2.5% quintile of simulated distribution results[v,6] <- quantile(simcoef[v,],0.975) # higher 97.5% quintile of simulated distribution results[v,7] <- quantile(simcoef[v,],0.95) # higher 95.0% quintile of simulated distribution } # end of cycle through ivs # store the results results[(nxs+1),1] <- modeltv$coeff[1] # intercept results[(nxs+1),2] <- summary.lm(modeltv)$coefficients[1,2] # standard error of interecept results[(nxs+1),4] <- summary.lm(modeltv)$coefficients[1,4] # p-value of interecept results[(nxs+2),1] <- summary.lm(modeltv)$r.squared # r-squared print(results) return(results) } #end of MSMRQAP Function ############################################################### #----MSMRQAP FUNCTION FOR LOGISTIC REGRESSION----# ####################################################### #Creating a function that runs the Multi-Sample MRQAP. LOGIT_MSMRQAP<-function(dats,Y,Xs,group1,constant,reps,outfile) { #Using the 'table' function to calculate the number of each group, the size of each group #and the size of each groups submatrix. #groupnum - number of groups (submatrices) in combined matrix groupnum<-nrow(table(unique(dats[,,group1]))[-1]) #matsize - size of each submatrix minus diagonal (i.e. number of dyads in each submatrix) matsize<-(table(unique(dats[,,group1]))[])[-1] #gsize - number of individuals in each submatrix gsize<-(1+sqrt(1+4*matsize))/2 nxs <- length(Xs) # count number of ivs #################### # new code to test standard errors simcoef<-matrix(nrow=(nxs),ncol=reps) ##################### g <- dim(dats[1,,])[1] # size of network y <- makevect(dats[,,Y],gsize) #y<-dats[,,Y][row(dats[Y,,]) != col(dats[Y,,])] results <- matrix(nrow=(nxs+2),ncol=7) colnames(results) <- c('coeff','std_err','onetail','twotail','lower025','upper975', 'upper95') rownames(results) <- c(Xs,'intercept','r-squared') # convert the independent variables to vectors xs <- matrix(0,nrow=(length=sum(gsize*(gsize-1))),ncol=nxs) for (v in 1:nxs) { # cycle through each independent variable xs[,v] <- makevect((dats[,,Xs[v]]),gsize) } for (v in 1:nxs) { # cycle through each independent variable x <- xs[,v] # coefficient to test if (nxs>1) {z <- xs[,-v]} else {z<- makevect((dats[,,constant]),gsize)} # other ivs, if only 1 iv, run quasi-QAP # regress x on all ivs but X, need residuals for step below xz <- lm(x ~ z,na.action=na.exclude) # CONVERT RESIDUALS TO MATRIX, commands are likely redundant resids <- array(dim=c(g,g,1)) resids[,,1] <- makemat(residuals(xz),gsize) matResid <- array(dim=c(g,g,1)) matResid[1:g,1:g,1] <- resids[,,1] # permutation procedure out <- rep(0,reps) for (i in 1:reps) { # permute the residuals matrix # select the code for 1:g to only select the neighborhood and pR<-matResid start<-1 stp<-0 for(m in 1:groupnum) { stp<-gsize[m]+stp no<-sample(1:gsize[m],replace=F) pR[start:stp,start:stp,1]<-matResid[start:stp,start:stp,1][no,no] start<-start+gsize[m] } pR <- makevect(pR[,,1],gsize)#permutedResiduals[row(permutedResiduals) != col(permutedResiduals)] # convert to vector #Ypi <- fitted(yz) + pR # create new Y based upon Z and permuted residuals model <- glm(y ~ pR + z, family=binomial,na.action=na.exclude) # regress new Y on all ivs out[i] <- coef(model)[2] # output the coefficient from the model (but not the intercept) ############### simcoef [v,i]<-out[i] ################ } # end of permutation modeltv <- glm(y ~ x + z, family=binomial,na.action=na.exclude) results[v,1] <- coef(modeltv)[2] print(results[v,1]) meanOut <- mean(out) results[v,2] <- ((sum((out-meanOut)**2))/reps)**.5 # calculate standard error pgreq <- mean(as.numeric(simcoef[v,] >= results[v,1])) # record proportion greater than or equal to observed coefficient pleeq <- mean(as.numeric(simcoef[v,] <= results[v,1])) # record proportion less than or equal to observed coefficient results[v,4] <- (mean(as.numeric(simcoef[v,] >= abs(results[v,1]))) + mean(as.numeric(simcoef[v,] <= -abs(results[v,1])))) if (results[v,1] > 0) {results[v,3]<-pgreq} else {results[v,3]<-pleeq} results[v,5] <- quantile(simcoef[v,],0.025) # lower 2.5% quintile of simulated distribution results[v,6] <- quantile(simcoef[v,],0.975) # higher 97.5% quintile of simulated distribution results[v,7] <- quantile(simcoef[v,],0.95) # higher 95.0% quintile of simulated distribution } # end of cycle through ivs # store the results results[(nxs+1),1] <- modeltv$coeff[1] # intercept results[(nxs+1),2] <- summary.lm(modeltv)$coefficients[1,2] # standard error of interecept results[(nxs+1),4] <- summary.lm(modeltv)$coefficients[1,4] # p-value of interecept results[(nxs+2),1] <- summary.lm(modeltv)$r.squared # r-squared print(results) #print(simcoef) #write.xlsx(results, outfile, sheetName= "results") #write.xlsx(simcoef, outfile, sheetName= "simcoef", append=TRUE) return(results) } #end of LOGIT - MSMRQAP Function ############################################################### ############################################################################################################## ############################################################################################################## # # # ANALYSIS # # # # # ############################################################################################################## ############################################################################################################## #LOGIT_MSMRQAP<-function(dats,Y,Xs,group1,constant,reps,outfile) #recieved help WITH SAME HH REMOVED (56),closeness(24:27),Relatedness(4:7),friend(51), distance(13:17), gender(35:37) #constant (48), AGE(45:47), affines (52:55) #RECIPROCITY -discrete rany (40) - this is the one that was missing :( #ALL MODELS FOR MAIN TEXT THAT INCLUDE FRIENDS #HAVE THE FULL FRIENDSHIP VARIABLE CF+VF+RF (51) ####################################################### #HELPING MODELS FOR MAIN TEXT ####################################################### #BASELINE FOR ALL PREDICTOR NumParam <-21 NumOut <- 7 Iterations <-10 begin <-0 nx<-1 GroupIndex <-48 Outcome <- 56 ParameterStorage <- matrix(nrow = NumParam,ncol=NumOut,0 ) Covariates <- c(35:37,45:46,20:22) ivs<-list(29:32) #LOGIT_MSMRQAP LOOPING THROUGH ALL INDEPENDENT VARIABLES FOR THE BASELINE for (i in 1:length(ivs)){ out <- LOGIT_MSMRQAP(parray1,Outcome,c(ivs[[i]],Covariates),1,GroupIndex,Iterations,"test.xlsx") begin<-begin+nx nx<-length(out[,1])-2-length(Covariates) ParameterStorage[begin:(begin+nx-1),] <- out[1:nx,] print(c(begin,begin+nx-1)) } #B LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22),1,48,1000,"B.xlsx") #B+HBE LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40),1,48,1000,"B_HBE.xlsx") #B+C+CF+VF LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,51,24:27),1,48,10,"B_C_CF_VF.xlsx") #B+HBE+C LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,24:27),1,48,1000,"B_HBE_C.xlsx") #B+HBE+C+CF+VF LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,51,24:27),1,48,1000,"B_HBE_C_CF_VF.xlsx") ############################################### #SUPPLEMENTAL MATERIALS ############################################### #B+C LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,24:27),1,48,1000,"B.xlsx") #B+HBE+C+CF+VF-RF - ANALYSIS REMOVING RITUAL FRIENDS USING DISCRETE_FRIEND_NOR LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,57,24:27),1,48,10,"B_HBE_C_CF_VF_RF.xlsx") #B+HBE+CF+VF+RF - ANALYSIS REMOVING ALL CLOSENESS VARIABLES TO SHOW JUST EFFECTS OF FRIENDSHIP LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,51),1,48,1000,"B_HBE_C.xlsx") #ADDING REVERSE CLOSENESS ############################################### #B+HBE+C LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,24:27,29:32),1,48,1000,"B_HBE_C.xlsx") #B+HBE+C+F LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,24:27,29:32,51),1,48,1000,"B_HBE_C.xlsx") #ADDING EGO AND ALTER WEALTH AND FS SCORES ############################################### #WEALTH LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,24:27,59,60),1,48,1000,"B_HBE_C.xlsx") #FS LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,24:27,61,62),1,48,1000,"B_HBE_C.xlsx") #BOTH LOGIT_MSMRQAP(parray1,56,c(45:46,35:37,20:22,4:6,13:17,52:54,40,24:27,59:62),1,48,1000,"B_HBE_C.xlsx") ####################################################### #GETTING THE AIC'S ####################################################### y <- makevect(parray1[,,56],gsize) x45 <- makevect(parray1[,,45],gsize) x46 <- makevect(parray1[,,46],gsize) x35 <- makevect(parray1[,,35],gsize) x36 <- makevect(parray1[,,36],gsize) x37 <- makevect(parray1[,,37],gsize) x20 <- makevect(parray1[,,20],gsize) x21 <- makevect(parray1[,,21],gsize) x22 <- makevect(parray1[,,22],gsize) x4 <- makevect(parray1[,,4],gsize) x5 <- makevect(parray1[,,5],gsize) x6 <- makevect(parray1[,,6],gsize) x13 <- makevect(parray1[,,13],gsize) x14 <- makevect(parray1[,,14],gsize) x15 <- makevect(parray1[,,16],gsize) x16 <- makevect(parray1[,,13],gsize) x17 <- makevect(parray1[,,17],gsize) x52 <- makevect(parray1[,,52],gsize) x53 <- makevect(parray1[,,53],gsize) x54 <- makevect(parray1[,,54],gsize) x40 <- makevect(parray1[,,40],gsize) x24 <- makevect(parray1[,,24],gsize) x25 <- makevect(parray1[,,25],gsize) x26 <- makevect(parray1[,,26],gsize) x27 <- makevect(parray1[,,27],gsize) x29 <- makevect(parray1[,,29],gsize) x30 <- makevect(parray1[,,30],gsize) x31 <- makevect(parray1[,,31],gsize) x32 <- makevect(parray1[,,32],gsize) x51 <- makevect(parray1[,,51],gsize) #discrete friend with ritual friends included #Baseline k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22,family=binomial) k logLik(k) #HBE k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22 +x4 +x5 +x6 +x13 +x14 +x15 +x16 +x17 +x52 +x53 +x54 +x40,family=binomial) k logLik(k) #HBE+C k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22 +x4 +x5 +x6 +x13 +x14 +x15 +x16 +x17 +x52 +x53 +x54 +x40 +x24 +x25 +x26 +x27,family=binomial) k logLik(k) #HBE+CVF k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22 +x4 +x5 +x6 +x13 +x14 +x15 +x16 +x17 +x52 +x53 +x54 +x40 +x51,family=binomial) k logLik(k) #HBE+CVF + C k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22 +x4 +x5 +x6 +x13 +x14 +x15 +x16 +x17 +x52 +x53 +x54 +x40 +x24 +x25 +x26 +x27+x51,family=binomial) k logLik(k) #HBE+ C +RC k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22 +x4 +x5 +x6 +x13 +x14 +x15 +x16 +x17 +x52 +x53 +x54 +x40 +x24 +x25 +x26 +x27+x29 +x30 +x31 +x32,family=binomial) k logLik(k) #HBE+CVF + C +RC k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22 +x4 +x5 +x6 +x13 +x14 +x15 +x16 +x17 +x52 +x53 +x54 +x40 +x24 +x25 +x26 +x27+x51 +x29 +x30 +x31 +x32,family=binomial) k logLik(k) #Baseline+C k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22 +x24 +x25 +x26 +x27,family=binomial) k logLik(k) #Baseline+C + VF k <-glm(y~x45 +x46 +x35 +x36 +x37 +x20 +x21 +x22 +x24 +x25 +x26 +x27+x40,family=binomial) k logLik(k) ################################### ####################################################### #ANALYSIS OF CLOSENESS FOR MAIN AND SUPPLEMENTAL TEXT ####################################################### #MSMRQAP<-function(dats,Y,Xs,group1,constant,reps) #MSMRQAP<-(parray1,2,c(),1,48,10) #RELATEDNESS MSMRQAP(parray1,2,c(4:7),1,48,1000) #FRIENDSHIP MSMRQAP(parray1,2,c(8,9,10),1,48,1000) #SPOUSE MSMRQAP(parray1,2,c(11),1,48,1000) #SAME HH MSMRQAP(parray1,2,c(12),1,48,1000) #GEOGRAPHIC DISTANCE MSMRQAP(parray1,2,c(13:17),1,48,1000) #ALTER CLOSENESS MSMRQAP(parray1,2,c(29:32),1,48,1000) #GENDER SIM MSMRQAP(parray1,2,c(35:37),1,48,1000) #AFFINE RELATEDNESS MSMRQAP(parray1,2,c(52:55),1,48,1000) #AGE SIM MSMRQAP(parray1,2,c(45:46),1,48,1000) #FULL MODEL MSMRQAP(parray1,2,c(4:7,8,9,10,11,12,13:17,29:32,35:37,52:55,45:46),1,48,1000) ############################################################################################################## ############################################################################################################## # # # DESCRIPTIVES # # # # # ############################################################################################################## ############################################################################################################## #CREATING A MATRIX WITH SAMEHH HELP TIES SET TO 0 FOR DESCRIPTIVES R_TIES<-DISCRETE_RANY for (i in 1:length(ALTER_RECEIVE[,1])){ for (j in 1:length(ALTER_RECEIVE[1,])){ if (SAMEHH[i,j] == 1) R_TIES[i,j] <- 0 } } H_TIES<-DISCRETE_GANY for (i in 1:length(ALTER_RECEIVE[,1])){ for (j in 1:length(ALTER_RECEIVE[1,])){ if (SAMEHH[i,j] == 1) H_TIES[i,j] <- 0 } } #TOTAL NUMBER OF PEOPLE EXPRESSING HELPING TIES table(colSums(R_TIES)) 103+42+20+4+2 table(colSums(H_TIES)) 115+(43*2)+(21*3)+(6*4)+(1*5)+(2*6)+(1*7)+(2*20) #TOTAL NUMBER OF HELPING TIES table(DV) #OR (103*1)+(42*2)+(20*3)+(4*4)+(5*2) 171/444 #SPLITTING DATASET INTO GROUPS TO RUN DESCRIPTIVES ################################################## gr1<-net1[which(net1$group==1),] table(gr1$group) gr2<-net1[which(net1$group==2),] table(gr2$group) gr3<-net1[which(net1$group==3),] table(gr3$group) gr4<-net1[which(net1$group==4),] table(gr4$group) #CALCULATING GENDER BY PARA ############################################## sum(table(gr1$egender, gr1$EUNIQUEID)[1,]>0) sum(table(gr1$egender, gr1$EUNIQUEID)[2,]>0) #PARA 1 64/117 #WOMEN=64 #MEN=53 sum(table(gr2$egender, gr2$EUNIQUEID)[1,]>0) sum(table(gr2$egender, gr2$EUNIQUEID)[2,]>0) #PARA 2 73/136 #WOMEN=73 #MEN=63 sum(table(gr3$egender, gr3$EUNIQUEID)[1,]>0) sum(table(gr3$egender, gr3$EUNIQUEID)[2,]>0) #PARA 3 47/92 #WOMEN=47 #MEN=45 sum(table(gr4$egender, gr4$EUNIQUEID)[1,]>0) sum(table(gr4$egender, gr4$EUNIQUEID)[2,]>0) #PARA 4 55/99 #WOMEN=55 #MEN=44 #NUMBER AND PERCENT OF SPOUSE TIES ############################################## sum(table(gr1$Spouse,gr1$EUNIQUEID)[2,]>0) #PARA1 85/117 sum(table(gr2$Spouse,gr2$EUNIQUEID)[2,]>0) #PARA2 97/136 sum(table(gr3$Spouse,gr3$EUNIQUEID)[2,]>0) #PARA3 74/92 sum(table(gr4$Spouse,gr4$EUNIQUEID)[2,]>0) #PARA4 78/99 #MEAN NUMBER OF YEARS IN THE PARA ############################################## mean(gr1$Egoyinpara, na.rm=TRUE) #para 1 = 31.1 sd(gr1$Egoyinpara, na.rm=TRUE) #para 1 = 18.5 mean(gr2$Egoyinpara, na.rm=TRUE) #para 2 = 30.6 - 2 individuals were missing years in para for para 2 sd(gr2$Egoyinpara, na.rm=TRUE) #para 1 = 17.1 mean(gr3$Egoyinpara, na.rm=TRUE) #para 3 = 31.9 sd(gr3$Egoyinpara, na.rm=TRUE) #para 1 = 17.4 mean(gr4$Egoyinpara, na.rm=TRUE) #para 4 = 23.3 sd(gr4$Egoyinpara, na.rm=TRUE) #para 1 = 20.2 #double check missing data for para 2 table(gr2$Egoyinpara, useNA="ifany") #no missing data for paras 1,3,and 4 table(gr1$Egoyinpara, useNA="ifany") table(gr3$Egoyinpara, useNA="ifany") table(gr4$Egoyinpara, useNA="ifany") ############################################## #MEAN EGO AGE IN YEARS ############################################## mean(gr1$EgoAgey, na.rm=TRUE) #para 1 = 39.5 sd(gr1$EgoAgey, na.rm=TRUE) #para 1 = 15.7 mean(gr2$EgoAgey, na.rm=TRUE) #para 2 = 40.3 - 2 individuals were missing age in years for para 2 sd(gr2$EgoAgey, na.rm=TRUE) #para 1 = 15.0 mean(gr3$EgoAgey, na.rm=TRUE) #para 3 = 42.2 sd(gr3$EgoAgey, na.rm=TRUE) #para 1 = 15.8 mean(gr4$EgoAgey, na.rm=TRUE) #para 4 = 40.4 sd(gr4$EgoAgey, na.rm=TRUE) #para 1 = 17.8 #double check missing data for para 2 table(gr2$EgoAgey, useNA="ifany") #no missing data for paras 1,3,and 4 table(gr1$EgoAgey, useNA="ifany") table(gr3$EgoAgey, useNA="ifany") table(gr4$EgoAgey, useNA="ifany") ############################################## ############################################################################################ #MEAN NUMBER OF TIE TYPES PER INDIVIDUAL PER PARA ############################################################################################ #GENETIC KIN TIES ARE TIES WITH R>0 #PARA 1 Kin ties=1290 ############################################## sum(table(GROUP,RELATEDNESS)[2,2:17]) #mean ties per individual 1290/117 mean(colSums(R5)[1:117]+colSums(R25)[1:117]+colSums(R125)[1:117]+colSums(R0625)[1:117]) sd(colSums(R5)[1:117]+colSums(R25)[1:117]+colSums(R125)[1:117]+colSums(R0625)[1:117]) #Para 1 #mean = 11.0 #sd = 12.1 #PARA 2 Kin ties=408 ############################################## sum(table(GROUP,RELATEDNESS)[3,2:17]) #mean ties per individual 408/136 mean(colSums(R5)[118:253]+colSums(R25)[118:253]+colSums(R125)[118:253]+colSums(R0625)[118:253]) sd(colSums(R5)[118:253]+colSums(R25)[118:253]+colSums(R125)[118:253]+colSums(R0625)[118:253]) #Para 2 #mean = 3 #sd = 2.8 #PARA 3 Kin ties=660 ############################################## sum(table(GROUP,RELATEDNESS)[4,2:17]) #mean ties per individual 660/92 mean(colSums(R5)[254:345]+colSums(R25)[254:345]+colSums(R125)[254:345]+colSums(R0625)[254:345]) sd(colSums(R5)[254:345]+colSums(R25)[254:345]+colSums(R125)[254:345]+colSums(R0625)[254:345]) #Para 3 #mean = 7.2 #sd = 7.1 #PARA 4 Kin ties=190 ############################################## sum(table(GROUP,RELATEDNESS)[5,2:17]) #mean ties per individual 190/99 mean(colSums(R5)[346:444]+colSums(R25)[346:444]+colSums(R125)[346:444]+colSums(R0625)[346:444]) sd(colSums(R5)[346:444]+colSums(R25)[346:444]+colSums(R125)[346:444]+colSums(R0625)[346:444]) #Para 4 #mean = 1.9 #sd = 2.7 #AFFINAL KIN TIES ARE TIES WITH AFFINE R>0 #PARA 1 Affine Kin ties=1811 ############################################## sum(colSums(A_R5)[1:117]+colSums(A_R25)[1:117]+colSums(A_R125)[1:117]+colSums(A_R0625)[1:117]) mean(colSums(A_R5)[1:117]+colSums(A_R25)[1:117]+colSums(A_R125)[1:117]+colSums(A_R0625)[1:117]) sd(colSums(A_R5)[1:117]+colSums(A_R25)[1:117]+colSums(A_R125)[1:117]+colSums(A_R0625)[1:117]) #Para 1 #mean = 15.5 #sd = 11.5 #PARA 2 Affine Kin ties=532 ############################################## sum(colSums(A_R5)[118:253]+colSums(A_R25)[118:253]+colSums(A_R125)[118:253]+colSums(A_R0625)[118:253]) mean(colSums(A_R5)[118:253]+colSums(A_R25)[118:253]+colSums(A_R125)[118:253]+colSums(A_R0625)[118:253]) sd(colSums(A_R5)[118:253]+colSums(A_R25)[118:253]+colSums(A_R125)[118:253]+colSums(A_R0625)[118:253]) #Para 2 #mean = 3.9 #sd = 2.5 #PARA 3 Affine Kin ties=1052 ############################################## sum(colSums(A_R5)[254:345]+colSums(A_R25)[254:345]+colSums(A_R125)[254:345]+colSums(A_R0625)[254:345]) mean(colSums(A_R5)[254:345]+colSums(A_R25)[254:345]+colSums(A_R125)[254:345]+colSums(A_R0625)[254:345]) sd(colSums(A_R5)[254:345]+colSums(A_R25)[254:345]+colSums(A_R125)[254:345]+colSums(A_R0625)[254:345]) #Para 3 #mean = 11.4 #sd = 7.5 #PARA 4 Affine Kin ties=272 ############################################## sum(colSums(A_R5)[346:444]+colSums(A_R25)[346:444]+colSums(A_R125)[346:444]+colSums(A_R0625)[346:444]) mean(colSums(A_R5)[346:444]+colSums(A_R25)[346:444]+colSums(A_R125)[346:444]+colSums(A_R0625)[346:444]) sd(colSums(A_R5)[346:444]+colSums(A_R25)[346:444]+colSums(A_R125)[346:444]+colSums(A_R0625)[346:444]) #Para 4 #mean = 2.7 #sd = 2.3 #PARA 1 SPOUSE ties= 86 ############################################## sum(colSums(SPOUSE)[1:117]) mean(colSums(SPOUSE)[1:117]) sd(colSums(SPOUSE)[1:117]) #Para 1 #mean = 0.7 #sd = 0.5 #PARA 2 SPOUSE ties= 98 ############################################## sum(colSums(SPOUSE)[118:253]) mean(colSums(SPOUSE)[118:253]) sd(colSums(SPOUSE)[118:253]) #Para 2 #mean = 0.7 #sd = 0.5 #PARA 3 SPOUSE ties= 76 ############################################## sum(colSums(SPOUSE)[254:345]) mean(colSums(SPOUSE)[254:345]) sd(colSums(SPOUSE)[254:345]) #Para 3 #mean = 0.8 #sd = 0.4 #PARA 4 SPOUSE ties= 78 ############################################## sum(colSums(SPOUSE)[346:444]) mean(colSums(SPOUSE)[346:444]) sd(colSums(SPOUSE)[346:444]) #Para 4 #mean = 0.8 #sd = 0.4 #CLOSE FRIEND TIES #PARA 1 CFRIEND ties= 125 ############################################## sum(colSums(CFRIEND)[1:117]) mean(colSums(CFRIEND)[1:117]) sd(colSums(CFRIEND)[1:117]) #Para 1 #mean = 1.1 #sd = 1.4 #PARA 2 CFRIEND ties= 199 ############################################## sum(colSums(CFRIEND)[118:253]) mean(colSums(CFRIEND)[118:253]) sd(colSums(CFRIEND)[118:253]) #Para 2 #mean = 1.5 #sd = 1.5 #PARA 3 CFRIEND ties= 115 ############################################## sum(colSums(CFRIEND)[254:345]) mean(colSums(CFRIEND)[254:345]) sd(colSums(CFRIEND)[254:345]) #Para 3 #mean = 1.25 #sd = 1.25 #PARA 4 CFRIEND ties= 127 ############################################## sum(colSums(CFRIEND)[346:444]) mean(colSums(CFRIEND)[346:444]) sd(colSums(CFRIEND)[346:444]) #Para 4 #mean = 1.3 #sd = 1.2 #VISITING FRIEND TIES #PARA 1 VFRIEND ties= 127 ############################################## sum(colSums(VFRIEND)[1:117]) mean(colSums(VFRIEND)[1:117]) sd(colSums(VFRIEND)[1:117]) #Para 1 #mean = 1.5 #sd = 2.2 #PARA 2 VFRIEND ties= 251 ############################################## sum(colSums(VFRIEND)[118:253]) mean(colSums(VFRIEND)[118:253]) sd(colSums(VFRIEND)[118:253]) #Para 2 #mean = 1.8 #sd = 2.1 #PARA 3 VFRIEND ties= 160 ############################################## sum(colSums(VFRIEND)[254:345]) mean(colSums(VFRIEND)[254:345]) sd(colSums(VFRIEND)[254:345]) #Para 3 #mean = 1.7 #sd = 1.6 #PARA 4 VFRIEND ties= 176 ############################################## sum(colSums(VFRIEND)[346:444]) mean(colSums(VFRIEND)[346:444]) sd(colSums(VFRIEND)[346:444]) #Para 4 #mean = 1.8 #sd = 2.0 #RITUAL FRIEND TIES #PARA 1 RFRIEND ties= 4 ############################################## sum(colSums(RFRIEND)[1:117]) mean(colSums(RFRIEND)[1:117]) sd(colSums(RFRIEND)[1:117]) #Para 1 #mean = 0.03 #sd = 0.2 #PARA 2 RFRIEND ties= 36 ############################################## sum(colSums(RFRIEND)[118:253]) mean(colSums(RFRIEND)[118:253]) sd(colSums(RFRIEND)[118:253]) #Para 2 #mean = 0.3 #sd = 0.7 #PARA 3 RFRIEND ties= 5 ############################################## sum(colSums(RFRIEND)[254:345]) mean(colSums(RFRIEND)[254:345]) sd(colSums(RFRIEND)[254:345]) #Para 3 #mean = 0.05 #sd = 0.27 #PARA 4 RFRIEND ties= 20 ############################################## sum(colSums(RFRIEND)[346:444]) mean(colSums(RFRIEND)[346:444]) sd(colSums(RFRIEND)[346:444]) #Para 4 #mean = 0.2 #sd = 0.6 #TABLE OF DISTRIBUTION OF HELP TIES BY PREDICTORS ########################################## x<-56 Ys<-c(58,4:7,52:55,13:18,51,23:27,35:38) T2<-matrix(ncol=3, nrow=length(Ys),0) colnames(T2) <- c("Total","N", "%") rownames(T2)<-Ys for (i in 1:length(Ys)){ T2[i,1]<-(table(parray1[,,x],parray1[,,Ys[i]])[2,2]+ table(parray1[,,x],parray1[,,Ys[i]])[2,1]) T2[i,2]<-table(parray1[,,x],parray1[,,Ys[i]])[2,2] T2[i,3]<-round((table(parray1[,,x],parray1[,,Ys[i]])[2,2]/ (table(parray1[,,x],parray1[,,Ys[i]])[2,2]+ table(parray1[,,x],parray1[,,Ys[i]])[2,1]))*100, digits=3) } T2 #IN-TEXT DESCRIPTIVES ############################################################# ct<-table(parray1[,,58],parray1[,,56]) #CROSSTABS AND ODDS RATIO OF HELP GIVEN / HELP RECEIVED or<- (ct[1,1]/ct[1,2])/(ct[2,1]/ct[2,2]) #Odds ratio by hand fisher.test(ct) #odds ratio over_report<-(ct[2,1]+ct[2,2])/(ct[1,2]+ct[2,2]) #MOST FREQUENT HELP TIES ARE BETWEEN EMOTIONALLY AND GEOGRAPHICALLY CLOSE NON-RELATIVES DIST_BIN<-matrix(ncol=444,nrow=444,0) for (i in 1:length(DIST_BIN[1,])){ for (j in 1:length(DIST_BIN[,1])){ if (DIST1[i,j]==1)DIST_BIN[i,j]<-1 if (DIST2[i,j]==1)DIST_BIN[i,j]<-2 if (DIST3[i,j]==1)DIST_BIN[i,j]<-3 if (DIST4[i,j]==1)DIST_BIN[i,j]<-4 if (DIST5[i,j]==1)DIST_BIN[i,j]<-5 } } R_BIN<-matrix(ncol=444,nrow=444,0) for (i in 1:length(R_BIN[1,])){ for (j in 1:length(R_BIN[,1])){ if (R5[i,j]==1)R_BIN[i,j]<-1 if (R25[i,j]==1)R_BIN[i,j]<-2 if (R125[i,j]==1)R_BIN[i,j]<-3 if (R0625[i,j]==1)R_BIN[i,j]<-4 } } table(DIST_BIN,R_BIN,parray1[,,56],parray1[,,2]) #DISTRIBUTION OF CLOSENESS table(DIST_BIN,parray1[,,2]) #CORRELATIONS ########################################################## #CORRELATION BETWEEN ALTER AND EGO CLOSENESS cor.test(makevect(parray1[,,3], gsize), makevect(parray1[,,2], gsize)) #CLOSENESS AND GEO DISTANCE cor.test(makevect(parray1[,,2], gsize), makevect(parray1[,,44], gsize)) #CLOSENESS AND RELATEDNESS cor.test(makevect(parray1[,,2], gsize), makevect(parray1[,,43], gsize)) #CREATING THE AFFINE MATRIX ############################################################# AFFINE_R<- matrix (ncol=length(SPOUSE[1,]),nrow=length(SPOUSE[,1]),0) for (i in 1: length (SPOUSE[,1])){ for (j in 1: length (SPOUSE [1,])){ if (SPOUSE[i,j]==1){ for (k in 1: length (SPOUSE [1,])){ AFFINE_R[i,k]<-RELATEDNESS[j,k] } } } } table(AFFINE_R) AFFINE_R<-pmax(AFFINE_R,t(AFFINE_R)) table(AFFINE_R) table(makevect(parray1[,,43],gsize), makevect (AFFINE_R,gsize)) #CLOSENESS AND AFFINE RELATEDNESS cor.test(makevect(parray1[,,2], gsize), makevect(AFFINE_R, gsize)) #DISTRIBUTION OF FRIEND CATEGORIES ######################################################################## #CLOSE FRIEND dist_cfriend<-matrix(ncol=10,nrow=4,0) colnames(dist_cfriend)<-c("Para1", "%","Para2","%","Para3", "%","Para4","%","Total","%") dist_cfriend[,1]<-table(table(gr1$cfriend,gr1$EUNIQUEID)[2,]) dist_cfriend[,2]<-round((table(table(gr1$cfriend,gr1$EUNIQUEID)[2,])/sum(table(gr1$cfriend,gr1$EUNIQUEID)[2,]))*100,1) dist_cfriend[,3]<-table(table(gr2$cfriend,gr2$EUNIQUEID)[2,]) dist_cfriend[,4]<-round((table(table(gr2$cfriend,gr2$EUNIQUEID)[2,])/sum(table(gr2$cfriend,gr2$EUNIQUEID)[2,]))*100,1) dist_cfriend[,5]<-table(table(gr3$cfriend,gr3$EUNIQUEID)[2,]) dist_cfriend[,6]<-round((table(table(gr3$cfriend,gr3$EUNIQUEID)[2,])/sum(table(gr3$cfriend,gr3$EUNIQUEID)[2,]))*100,1) dist_cfriend[,7]<-table(table(gr4$cfriend,gr4$EUNIQUEID)[2,]) dist_cfriend[,8]<-round((table(table(gr4$cfriend,gr4$EUNIQUEID)[2,])/sum(table(gr4$cfriend,gr4$EUNIQUEID)[2,]))*100,1) dist_cfriend[,9]<-table(table(net1$cfriend,net1$EUNIQUEID)[2,]) dist_cfriend[,10]<-round((table(table(net1$cfriend,net1$EUNIQUEID)[2,])/sum(table(net1$cfriend,net1$EUNIQUEID)[2,]))*100,1) dist_cfriend #VISITING FRIEND dist_vfriend<-matrix(ncol=10,nrow=4,0) colnames(dist_vfriend)<-c("Para1", "%","Para2","%","Para3", "%","Para4","%","Total","%") dist_vfriend[,1]<-table(table(gr1$vfriend,gr1$EUNIQUEID)[2,]) dist_vfriend[,2]<-round((table(table(gr1$vfriend,gr1$EUNIQUEID)[2,])/sum(table(gr1$vfriend,gr1$EUNIQUEID)[2,]))*100,1) dist_vfriend[,3]<-table(table(gr2$vfriend,gr2$EUNIQUEID)[2,]) dist_vfriend[,4]<-round((table(table(gr2$vfriend,gr2$EUNIQUEID)[2,])/sum(table(gr2$vfriend,gr2$EUNIQUEID)[2,]))*100,1) dist_vfriend[,5]<-table(table(gr3$vfriend,gr3$EUNIQUEID)[2,]) dist_vfriend[,6]<-round((table(table(gr3$vfriend,gr3$EUNIQUEID)[2,])/sum(table(gr3$vfriend,gr3$EUNIQUEID)[2,]))*100,1) dist_vfriend[,7]<-table(table(gr4$vfriend,gr4$EUNIQUEID)[2,]) dist_vfriend[,8]<-round((table(table(gr4$vfriend,gr4$EUNIQUEID)[2,])/sum(table(gr4$vfriend,gr4$EUNIQUEID)[2,]))*100,1) dist_vfriend[,9]<-table(table(net1$vfriend,net1$EUNIQUEID)[2,]) dist_vfriend[,10]<-round((table(table(net1$vfriend,net1$EUNIQUEID)[2,])/sum(table(net1$vfriend,net1$EUNIQUEID)[2,]))*100,1) dist_vfriend #DISTRIBUTION OF CLOSENESS CATAGORIES ######################################################################## # CLOSENESS dist_Close<-matrix(ncol=10,nrow=5,0) colnames(dist_Close)<-c("Para1", "%","Para2","%","Para3", "%","Para4","%","Total","%") dist_Close[1,1]<-mean(table(gr1$Close,gr1$EUNIQUEID)[1,]) dist_Close[2,1]<-mean(table(gr1$Close,gr1$EUNIQUEID)[2,]) dist_Close[3,1]<-mean(table(gr1$Close,gr1$EUNIQUEID)[3,]) dist_Close[4,1]<-mean(table(gr1$Close,gr1$EUNIQUEID)[4,]) dist_Close[5,1]<-mean(table(gr1$Close,gr1$EUNIQUEID)[5,]) dist_Close[1,2]<-sd(table(gr1$Close,gr1$EUNIQUEID)[1,]) dist_Close[2,2]<-sd(table(gr1$Close,gr1$EUNIQUEID)[2,]) dist_Close[3,2]<-sd(table(gr1$Close,gr1$EUNIQUEID)[3,]) dist_Close[4,2]<-sd(table(gr1$Close,gr1$EUNIQUEID)[4,]) dist_Close[5,2]<-sd(table(gr1$Close,gr1$EUNIQUEID)[5,]) dist_Close[1,3]<-mean(table(gr2$Close,gr2$EUNIQUEID)[1,]) dist_Close[2,3]<-mean(table(gr2$Close,gr2$EUNIQUEID)[2,]) dist_Close[3,3]<-mean(table(gr2$Close,gr2$EUNIQUEID)[3,]) dist_Close[4,3]<-mean(table(gr2$Close,gr2$EUNIQUEID)[4,]) dist_Close[5,3]<-mean(table(gr2$Close,gr2$EUNIQUEID)[5,]) dist_Close #DISTRIBUTION OF CLOSENESS CATAGORIES ######################################################################## # CLOSENESS dist_Close_bin<-matrix(ncol=10,nrow=5,0) colnames(dist_Close_bin)<-c("Para1", "%","Para2","%","Para3", "%","Para4","%","Total","%") dist_Close_bin[,1]<-rowSums(table(gr1$Close,gr1$EUNIQUEID)) dist_Close_bin[,2]<-round((rowSums(table(gr1$Close,gr1$EUNIQUEID))/sum(table(gr1$Close,gr1$EUNIQUEID)))*100,1) dist_Close_bin[,3]<-rowSums(table(gr2$Close,gr2$EUNIQUEID)) dist_Close_bin[,4]<-round((rowSums(table(gr2$Close,gr2$EUNIQUEID))/sum(table(gr2$Close,gr2$EUNIQUEID)))*100,1) dist_Close_bin[,5]<-rowSums(table(gr3$Close,gr3$EUNIQUEID)) dist_Close_bin[,6]<-round((rowSums(table(gr3$Close,gr3$EUNIQUEID))/sum(table(gr3$Close,gr3$EUNIQUEID)))*100,1) dist_Close_bin[,7]<-rowSums(table(gr4$Close,gr4$EUNIQUEID)) dist_Close_bin[,8]<-round((rowSums(table(gr4$Close,gr4$EUNIQUEID))/sum(table(gr4$Close,gr4$EUNIQUEID)))*100,1) dist_Close_bin[,9]<-rowSums(table(net1$Close,net1$EUNIQUEID)) dist_Close_bin[,10]<-round((rowSums(table(net1$Close,net1$EUNIQUEID))/sum(table(net1$Close,net1$EUNIQUEID)))*100,1) dist_Close_bin #DISTRIBUTION OF GEOGRAPHIC PROXIMITY ########################################################### #MEAN DISTANCE IN EACH PARA mean_dist<-matrix(ncol=2, nrow=5,0) colnames(mean_dist)<-c("Mean","SD") rownames(mean_dist)<-c("Para1","Para2", "Para3", "Para4", "Total") mean_dist[1,1]<-mean(gr1$distance) mean_dist[1,2]<-sd(gr1$distance) mean_dist[2,1]<-mean(gr2$distance) mean_dist[2,2]<-sd(gr2$distance) mean_dist[3,1]<-mean(gr3$distance) mean_dist[3,2]<-sd(gr3$distance) mean_dist[4,1]<-mean(gr4$distance) mean_dist[4,2]<-sd(gr4$distance) mean_dist[5,1]<-mean(net1$distance) mean_dist[5,2]<-sd(net1$distance) mean_dist