diff --git a/diary.md b/diary.md index 861d410..bccd86c 100644 --- a/diary.md +++ b/diary.md @@ -49,3 +49,26 @@ Decisionmaking in humans seems to be more driven by a bimodal choose-self or ave The model should be run on the more basic manipulations to begin with. +## 25/02/2019 + +Using the choose-self-or-average strategy under the basic manipulations of advice noise and bad advice, the agents show an extreme sensitivity to the parameter values. Whether they remain stable at any parameter value with an outcome between 0 and 1, or whether only 0 and 1 are stable points at all parameter values. There's potentially a saddle effect where parameter values below some critical number tend eventually to stability at 0, and parameter values above it tend eventually to stability at 1. + +Note that 0 and 1 refer to probabilities of choosing self rather than averaging, not to the resultant mean advice-weight. Because simple averaging always occurs where the probability is 0, the own-advice-weight ranges from .5 to 1 for these models. + +![Noisy advice](results/2019-02-21_23-50-17_d1-a2_graph.png "Note the remarkable sensitivity to parameter values") + +![Bad advice](results/2019-02-23_15-15-17_Uncapped_bad-advice_graph.png "An even closer examination of the potential saddle point.") + +## 01/02/2019 + +Futher exploration in model 19 demonstrates that the relationship is not as clear as previously thought. Over 3000 generations it is plausible that the stability-at-extremes effect emerges, but simulation over more generations is required. + +![Weighted-average strategy](results/2019-03-01_01-57-22_Weighted-Average-with bad advice_graph.png "The existence of a fairly stable mid-point solution seems to be detectable at .98 weighting") + +![Pick-or-average strategy](results/2019-03-01_05-55-08_Pick-or-Average-with bad advice_graph.png "Plausibly even the most midrange value (.98) is tending towards a stable extreme value.") + +For computational reasons this needs to be broken up into multiple rounds. Each generation acts as a save point for the models, so they can be resumed from previous saves without compromise. The core package needs to be updated to handle taking an existing dataframe as input. + +## 04/03/2019 + +A **critical** mistake in Model 20 means previous result all used the pick-or-average strategy. The models must be rerun now that Model 20 has been fixed to use the correct weighted-average function where applicable. diff --git a/evoSim/evoSim/R/evoSim.R b/evoSim/evoSim/R/evoSim.R index 6cc5c4d..13336b6 100644 --- a/evoSim/evoSim/R/evoSim.R +++ b/evoSim/evoSim/R/evoSim.R @@ -303,14 +303,14 @@ selectParents <- function(modelParams, agents, world, ties) { # the others get weighted by relative fitness which are transformed to +ve values tmp$fitness <- tmp$fitness - min(tmp$fitness) + 1 # scale appropriately - while(any(tmp$fitness < 10)) + while (any(tmp$fitness < 10)) tmp$fitness <- tmp$fitness * 10 # and round off tmp$fitness <- round(tmp$fitness) tickets <- vector(length = sum(tmp$fitness)) # each success buys a ticket in the draw i <- 0 - for(a in 1:nrow(tmp)) { - tickets[(i+1):(i+1+tmp$fitness[a])] <- a + for (a in 1:nrow(tmp)) { + tickets[(i + 1):(i + 1 + tmp$fitness[a])] <- a i <- i + 1 + tmp$fitness[a] } winners <- sample(tickets, modelParams$agentCount, replace = T) @@ -326,6 +326,8 @@ selectParents <- function(modelParams, agents, world, ties) { #' @param decisionCount number of decisions made each generation #' @param generationCount number of generations (duration of model) #' @param mutationChance probability of a mutation occuring +#' @param genZero data frame of agents to be used as the starting generation. If +#' this is NULL the first generation is created using the makeAgentsFun #' @param learnRate scaling factor for updating connection weights #' @param other list used for passing other arguments to custom functions. #' Accessed by \code{modelParams$other} @@ -376,6 +378,7 @@ evoSim <- function(agentCount, decisionCount, generationCount, mutationChance, + genZero = NULL, learnRate = 0.00, other = NULL, makeAgentsFun = NULL, @@ -413,6 +416,7 @@ evoSim <- function(agentCount, recordDecisions = recordDecisions) n <- modelParams$agentCount * modelParams$generationCount + agents <- data.frame(id = 1:n, genId = rep(1:modelParams$agentCount, modelParams$generationCount), generation = rep(1:modelParams$generationCount, each = modelParams$agentCount), @@ -423,8 +427,39 @@ evoSim <- function(agentCount, advice = rep(NA, n), finalDecision = rep(NA, n)) + tStart <- Sys.time() + + # overwrite generation by spawning from genZero if necessary + # N.B. the ties are regenerated, so this will need rewriting if ties are + # important to remember over cycle breaks + if (!is.null(genZero)) { + lastGen <- max(genZero$generation) + parents <- modelParams$selectParents(modelParams, genZero, + list(generation = lastGen, + decision = 0), + NULL) + + tmp <- modelParams$makeAgents(modelParams, genZero, parents = parents) + + # Update the generation values for compatability + agents$generation <- rep((lastGen + 1):(lastGen + modelParams$generationCount), + each = modelParams$agentCount) + } else { + tmp <- modelParams$makeAgents(modelParams) + } + + tmp$agents$fitness <- 0 + + # ensure all column names appear in both hardcoded and user-supplied data frames + agents[, names(tmp$agents)[which(!(names(tmp$agents) %in% names(agents)))]] <- NA + + # fill in the initial generation data in the correct columns + agents[agents$generation == min(agents$generation), names(agents) %in% names(tmp$agents)] <- + tmp$agents[,names(tmp$agents)[order(match(names(tmp$agents), names(agents)))]] + ties <- tmp$ties + n <- n * modelParams$decisionCount - if(recordDecisions) + if (recordDecisions) decisions <- data.frame(id = rep(agents$id, each = modelParams$decisionCount), genId = rep(agents$genId, each = modelParams$decisionCount), generation = rep(agents$generation, each = modelParams$decisionCount), @@ -436,22 +471,13 @@ evoSim <- function(agentCount, advisor = rep(NA, n), advice = rep(NA, n), finalDecision = rep(NA, n)) - - tStart <- Sys.time() - tmp <- modelParams$makeAgents(modelParams) - tmp$agents$fitness <- 0 - # ensure all column names appear in both hardcoded and user-supplied data frames - agents[, names(tmp$agents)[which(!(names(tmp$agents) %in% names(agents)))]] <- NA - - # fill in the initial generation data in the correct columns - agents[agents$generation==1, names(agents) %in% names(tmp$agents)] <- - tmp$agents[,names(tmp$agents)[order(match(names(tmp$agents), names(agents)))]] - ties <- tmp$ties - - for(g in 1:modelParams$generationCount) { + + genStart <- min(agents$generation) + + for (g in genStart:max(agents$generation)) { # Make decisions - mask <- which(agents$generation==g) - for(d in 1:modelParams$decisionCount) { + mask <- which(agents$generation == g) + for (d in 1:modelParams$decisionCount) { # Correct answer (same for all agents) world <- list(generation = g, decision = d) world$state <- modelParams$getWorldState(modelParams, world) @@ -466,20 +492,20 @@ evoSim <- function(agentCount, # Update connections ties <- modelParams$updateConnections(modelParams, agents, world, ties) # Record decision - if(recordDecisions) + if (recordDecisions & (g %% recordDecisions == 0)) decisions[decisions$generation == g & decisions$decision == d, c('id', 'genId', 'generation', 'fitness', 'egoBias', 'initialDecision', 'advisor', 'advice', 'finalDecision')] <- agents[mask, c('id', 'genId', 'generation', 'fitness', 'egoBias', 'initialDecision', 'advisor', 'advice', 'finalDecision')] } - if(g==modelParams$generationCount) + if (g == modelParams$generationCount) next() # Evolve parents <- modelParams$selectParents(modelParams, agents, world, ties) tmp <- modelParams$makeAgents(modelParams, agents[agents$generation == g, ], parents) # combine new generation into full data frame while allowing for mismatched column orders - agents[agents$generation==g+1, names(agents) %in% names(tmp$agents)] <- + agents[agents$generation == g + 1, names(agents) %in% names(tmp$agents)] <- tmp$agents[,names(tmp$agents)[order(match(names(tmp$agents), names(agents)))]] ties <- tmp$ties } @@ -487,12 +513,16 @@ evoSim <- function(agentCount, tEnd <- Sys.time() tElapsed <- as.numeric(format(tEnd,"%s")) - as.numeric(format(tStart,"%s")) # Return an output containing inputs and the agents data frame - modelData <- list(model = modelParams, - # to avoid duplicating info we reduce agents dataframe to remove columns about decisions - agents = agents[,-which(names(agents) %in% - c('initialDecision', 'advisor', 'advice', 'finalDecsision'))], - duration = tElapsed) - if(recordDecisions) - modelData <- c(modelData, list(decisions = decisions)) + + if (recordDecisions) { + modelData <- list(model = modelParams, + # to avoid duplicating info we reduce agents dataframe to remove columns about decisions + agents = agents[,-which(names(agents) %in% + c('initialDecision', 'advisor', 'advice', 'finalDecsision'))], + decisions = decisions, + duration = tElapsed) + } else { + modelData <- list(model = modelParams, agents = agents, duration = tElapsed) + } return(modelData) } diff --git a/model18.R b/model18.R index d7d62a2..2de6367 100644 --- a/model18.R +++ b/model18.R @@ -47,8 +47,9 @@ runModel <- function(spec) { data <- evoSim(agentCount = spec$agents, agentDegree = spec$degree, decisionCount = spec$decisions, - generationCount = 3000, + generationCount = 1000, mutationChance = 0.01, + # recordDecisions = 100, other = list(sensitivity = spec$sensitivity, sensitivitySD = spec$sensitivitySD, startingEgoBias = spec$startingEgoBias, @@ -79,13 +80,10 @@ runModel <- function(spec) { adviceNoise = rep(NA, modelParams$agentCount), confidenceScaling = sample(1:5, 1)) - # Overwrite the agents' egobias and sensitivity by + # Overwrite the agents' egobias by # inheritance from parents where applicable if (!is.null(parents)) { - if (modelParams$other$manipulation) - evolveCols <- c('egoBias', 'sensitivity') - else - evolveCols <- c('egoBias') + evolveCols <- c('egoBias') makeAgents.agents[ , evolveCols] <- previousGeneration[parents, evolveCols] # mutants inherit from a normal distribution for (x in evolveCols) { @@ -138,7 +136,9 @@ runModel <- function(spec) { # Decision functions - uncapped continuous (default), capped continuous, and discrete #### uncappedDecisionFun <- function(modelParams, agents, world, ties, initial = F) { - adviceNoise <- ifelse(modelParams$other$manipulation, modelParams$other$adviceNoise, 0) + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) mask <- which(agents$generation == world$generation) if (initial) { # initial decision - look and see @@ -166,7 +166,9 @@ uncappedDecisionFun <- function(modelParams, agents, world, ties, initial = F) { } cappedDecisionFun <- function(modelParams, agents, world, ties, initial = F) { - adviceNoise <- ifelse(modelParams$other$manipulation, modelParams$other$adviceNoise, 0) + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) mask <- which(agents$generation == world$generation) if (initial) { # initial decision - look and see @@ -245,7 +247,9 @@ categoricalFitnessFun <- function(modelParams, agents, world, ties) { # Advice functions - there's noisy advice and bad advice #### noisyAdviceFun <- function(modelParams, agents, world, ties) { - adviceNoise <- ifelse(modelParams$other$manipulation, modelParams$other$adviceNoise, 0) + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) mask <- which(agents$generation == world$generation) agents$advisor[mask] <- apply(ties, 1, function(x) sample(which(x != 0),1)) # Fetch advice as a vector @@ -260,7 +264,9 @@ noisyAdviceFun <- function(modelParams, agents, world, ties) { } badAdviceFun <- function(modelParams, agents, world, ties) { - badAdviceProb <- ifelse(modelParams$other$manipulation, modelParams$other$adviceNoise/100, 0) + badAdviceProb <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation]/100, + 0) mask <- which(agents$generation == world$generation) agents$advisor[mask] <- apply(ties, 1, function(x) sample(which(x != 0),1)) # Fetch advice as a vector @@ -291,12 +297,11 @@ confAdviceFun <- function(modelParams, agents, world, ties) { return(agents) } -for (decisionType in 3) { - for (adviceType in 4) { +for (decisionType in 1) { + for (adviceType in 2) { # Storage path for results resultsPath <- ifelse(ARC,'results/','results/') time <- format(Sys.time(), "%F_%H-%M-%S") - resultsPath <- paste0(resultsPath,'d',decisionType,'a',adviceType,'_',time) # Clear the result storage variables suppressWarnings(rm('rawdata')) @@ -304,13 +309,14 @@ for (decisionType in 3) { # Parameter space to explore specs <- list() for (s in c(1, 10)) - for (x in c(F, T)) - specs[[length(specs) + 1]] <- list(agents = 1000, degree = 10,decisions = 30, - sensitivity = s,sensitivitySD = s, - startingEgoBias = .45, # .45 is neither optimal nor extreme - adviceNoise = 5, - manipulation = x, - wd = getwd()) + for (x in 1:5) + specs[[length(specs) + 1]] <- list(agents = 1000, degree = 10, + decisions = 30, + sensitivity = s, sensitivitySD = s, + startingEgoBias = .45, # .45 is neither optimal nor extreme + adviceNoise = seq(.9, 1, .02), + manipulation = x, + wd = getwd()) if (decisionType == 1) { for (i in 1:length(specs)) { @@ -370,6 +376,9 @@ for (decisionType in 3) { # Run parallel repetitions of the model with these settings startTime <- Sys.time() + + print(paste('Processing models:', specs[[1]]$shortDesc)) + if (!ARC) { degreeResults <- lapply(specs, runModel) } else { @@ -387,12 +396,14 @@ for (decisionType in 3) { print(paste0('...complete.')) print(Sys.time() - startTime) - print('Estimated data size:') - print(object.size(rawdata), units = 'auto') + print(paste0('Estimated data size: ', object.size(rawdata) * 1e-6, 'MB')) print('Saving data...') - # Save data - save(rawdata, file = paste(resultsPath, 'rawdata.Rdata')) - print('...saved rawdata...') + + # Save data with a nice name + resultsPath <- paste0(resultsPath, time, '_', + sub(" ", "-", + sub(" decisions with ", "_", specs[[1]]$shortDesc))) + save(rawdata, file = paste0(resultsPath, '_rawdata.Rdata')) # Smaller datafile for stopping me running out of memory during analysis allAgents <- NULL #allDecisions <- NULL @@ -405,6 +416,7 @@ for (decisionType in 3) { rd$agents$sdSensitivity <- rep(rd$model$other$sensitivitySD,nrow(rd$agents)) rd$agents$startingEgoBias <- rep(rd$model$other$startingEgoBias,nrow(rd$agents)) rd$agents$manipulation <- rep(rd$model$other$manipulation,nrow(rd$agents)) + rd$agents$manipulationValue <- rd$model$other$adviceNoise[rd$agents$manipulation] rd$agents$description <- rep(rd$model$other$shortDesc, nrow(rd$agents)) # only take a subset because of memory limitations # allAgents <- rbind(allAgents, rd$agents) @@ -412,15 +424,13 @@ for (decisionType in 3) { | (rd$agents$generation %% 25 == 1 & rd$agents$generation < 250), ]) #allDecisions <- rbind(allDecisions, rd$decisions[rd$decisions$generation %in% allAgents$generation, ]) } - #toSave <- list(allAgents, allDecisions) - save(allAgents, file = paste(resultsPath, 'rawdata_subset.Rdata')) - #rm('toSave') - print('...saved subset...') + + save(allAgents, file = paste0(resultsPath, '_rawdata-subset.Rdata')) # Plot ggplot(allAgents, aes(x = generation, y = egoBias, - colour = manipulation)) + + colour = factor(manipulationValue))) + geom_hline(yintercept = 0.5, linetype = 'dashed') + stat_summary(geom = 'point', fun.y = mean, size = 3, alpha = 0.25) + stat_summary(fun.data = mean_cl_boot, fun.args = (conf.int = .99), geom = 'errorbar', size = 1) + @@ -428,19 +438,22 @@ for (decisionType in 3) { facet_grid(~meanSensitivity, labeller = label_both) + labs(title = allAgents$description[1]) + style - ggsave(paste(resultsPath, 'graph.png')) + ggsave(paste0(resultsPath, '_graph.png')) - print('...saved graph...') print('...data saved.') } } -ggplot(allAgents[allAgents$meanSensitivity == 1 & allAgents$generation %% 1000 == 1, ], - aes(x = egoBias, y = sensitivity, colour = fitness)) + - geom_point() + - facet_wrap(~generation, labeller = label_both) + - # stat_summary(geom = 'point', fun.y = mean, size = 3) + - style +# For all of the agents, look at how the egocentric bias affects the fitness. +# If there really is a step function, we might expect a non-normal distribution, +# and that distribution's peak should shift as the parameter value changes. +allAgents$egoBiasBin <- cut(allAgents$egoBias, 10) +ggplot(allAgents, aes(x = egoBiasBin, y = fitness, colour = factor(meanSensitivity))) + + stat_summary(geom = 'point', fun.y = mean) + + stat_summary(geom = 'errorbar', fun.data = mean_cl_normal) + + labs(title = "Fitness by egoBias") + + facet_wrap(~manipulationValue, labeller = label_both) + + style # Cleanup if (ARC) diff --git a/model19.R b/model19.R new file mode 100644 index 0000000..1ad17a5 --- /dev/null +++ b/model19.R @@ -0,0 +1,382 @@ +# Model 18 #### + +#' Here we allow sensitivity to evolve (up to a maximum (in)sensitivity of .1). +#' We expect this senstivity evolution to initially suppress egocentric bias, +#' and then increase it, although there are various reasons why this may not +#' happen. +#' +#' The outcome measure has changed from a self/other weighting to a probability +#' of averaging (weighting = .5) as opposed to simply sticking with the initial +#' response. + + +# Libraries +library(parallel) +library(ggplot2) + +style <- theme_light() + + theme(legend.position = 'top', + panel.grid.major.x = element_blank(), + panel.grid.minor = element_blank()) + +parallel <- T + + +ARC <- Sys.info()[[1]] != 'Windows' +if (ARC) + setwd(paste0(getwd(), '/EvoEgoBias')) +if (parallel) + ARC <- T + +if (ARC) { + print(Sys.info()) + # Set up the parallel execution capabilities + nCores <- detectCores() + cl <- makeCluster(nCores) + print(paste('Running in parallel on', nCores, 'cores.')) + reps <- nCores +} else { + reps <- 1 +} + +# Define the function +runModel <- function(spec) { + print(spec) + setwd(spec$wd) + source('evoSim/evoSim/R/evoSim.R') + data <- evoSim(agentCount = spec$agents, + agentDegree = spec$degree, + decisionCount = spec$decisions, + generationCount = 3000, + mutationChance = 0.01, + # recordDecisions = 100, + other = list(sensitivity = spec$sensitivity, + sensitivitySD = spec$sensitivitySD, + startingEgoBias = spec$startingEgoBias, + adviceNoise = spec$adviceNoise, + manipulation = spec$manipulation, + shortDesc = spec$shortDesc), + makeAgentsFun = function(modelParams, previousGeneration = NULL, parents = NULL) { + # Population size and generation extracted from current population or by + # modelParams$agentCount + if (is.null(previousGeneration)) { + n <- modelParams$agentCount + g <- 0 + } + else { + n <- dim(previousGeneration)[1] + g <- previousGeneration$generation[1] + } + + g <- g + 1 # increment generation + + # mutants and fresh spawns get randomly assigned egoBias + makeAgents.agents <- data.frame(egoBias = rep(modelParams$other$startingEgoBias, + modelParams$agentCount), + sensitivity = abs(rnorm(modelParams$agentCount, + modelParams$other$sensitivity, + modelParams$other$sensitivitySD)), + generation = rep(g, modelParams$agentCount), + adviceNoise = rep(NA, modelParams$agentCount), + confidenceScaling = sample(1:5, 1)) + + # Overwrite the agents' egobias by + # inheritance from parents where applicable + if (!is.null(parents)) { + evolveCols <- c('egoBias') + makeAgents.agents[ , evolveCols] <- previousGeneration[parents, evolveCols] + # mutants inherit from a normal distribution + for (x in evolveCols) { + mutants <- runif(modelParams$agentCount) < modelParams$mutationChance + makeAgents.agents[mutants, x] <- rnorm(sum(mutants), + previousGeneration[parents[mutants], x], + 0.1) + } + } + + # Keep values in sensible ranges + makeAgents.agents$egoBias <- clamp(makeAgents.agents$egoBias, maxVal = 1, minVal = 0) + makeAgents.agents$sensitivity <- clamp(makeAgents.agents$sensitivity, maxVal = 1, minVal = 0.1) + + # Connect agents together + ties <- modelParams$connectAgents(modelParams, makeAgents.agents) + makeAgents.agents$degree <- sapply(1:nrow(makeAgents.agents), getDegree, ties) + return(list(agents = makeAgents.agents, ties = ties)) + }, + selectParentsFun = function(modelParams, agents, world, ties) { + tmp <- agents[which(agents$generation == world$generation),] + tmp <- tmp[order(tmp$fitness, decreasing = T),] + # the others get weighted by relative fitness which are transformed to +ve values + tmp$fitness <- tmp$fitness - min(tmp$fitness) + 1 + # scale appropriately + while (any(abs(tmp$fitness) < 10)) + tmp$fitness <- tmp$fitness * 10 + # and round off + tmp$fitness <- round(tmp$fitness) + tickets <- vector(length = sum(tmp$fitness)) # each success buys a ticket in the draw + i <- 0 + for (a in 1:nrow(tmp)) { + tickets[(i + 1):(i + 1 + tmp$fitness[a])] <- a + i <- i + 1 + tmp$fitness[a] + } + winners <- sample(tickets, modelParams$agentCount, replace = T) + # The winners clone their egocentric discounting + winners <- tmp[winners,'genId'] + return(winners) + }, + getAdviceFun = spec$getAdviceFun, + getWorldStateFun = spec$getWorldStateFun, + getDecisionFun = spec$getDecisionFun, + getFitnessFun = spec$getFitnessFun) + # save results + rawdata <- data + rawdata$rep <- 1 + return(list(rawdata = rawdata)) +} + +# Decision functions - uncapped continuous (default), capped continuous, and discrete #### +uncappedPickOrAverageFun <- function(modelParams, agents, world, ties, initial = F) { + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) + mask <- which(agents$generation == world$generation) + if (initial) { + # initial decision - look and see + n <- length(mask) + agents$initialDecision[mask] <- rnorm(n, + rep(world$state, n), + clamp(agents$sensitivity[mask],Inf)) + } else { + # Final decision - take advice with probability (1 - egoBias) + # Taking advice equates to simple averaging + # Use vector math to do the advice taking + out <- NULL + noise <- rnorm(length(mask), 0, adviceNoise) + roll <- runif(length(mask)) + agents$adviceNoise[mask] <- noise + out <- ifelse(roll >= agents$egoBias[mask], + (agents$initialDecision[mask] + agents$advice[mask] + noise) / 2, + agents$initialDecision[mask]) + #out <- clamp(out, 100) + out[is.na(out)] <- agents$initialDecision[mask][is.na(out)] + agents$roll[mask] <- roll + agents$finalDecision[mask] <- out + } + return(agents) +} + +uncappedWeightedAvgFun <- function(modelParams, agents, world, ties, initial = F) { + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) + mask <- which(agents$generation == world$generation) + if (initial) { + # initial decision - look and see + n <- length(mask) + agents$initialDecision[mask] <- rnorm(n, + rep(world$state, n), + clamp(agents$sensitivity[mask],Inf)) + } else { + # Final decision - take advice with probability (1 - egoBias) + # Taking advice equates to simple averaging + # Use vector math to do the advice taking + out <- NULL + noise <- rnorm(length(mask), 0, adviceNoise) + roll <- runif(length(mask)) + agents$adviceNoise[mask] <- noise + out <- ifelse(roll >= agents$egoBias[mask], + (agents$initialDecision[mask] + agents$advice[mask] + noise) / 2, + agents$initialDecision[mask]) + #out <- clamp(out, 100) + out[is.na(out)] <- agents$initialDecision[mask][is.na(out)] + agents$roll[mask] <- roll + agents$finalDecision[mask] <- out + } + return(agents) +} + +# World state functions - return 50 and return 0-49|51-100 #### +staticWorldStateFun = function(modelParams, world) { + return(50) +} + +# Advice functions - there's noisy advice and bad advice #### +noisyAdviceFun <- function(modelParams, agents, world, ties) { + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) + mask <- which(agents$generation == world$generation) + agents$advisor[mask] <- apply(ties, 1, function(x) sample(which(x != 0),1)) + # Fetch advice as a vector + n <- length(mask) + agents$advice[mask] <- rnorm(n, + rep(world$state, n), + clamp(agents$sensitivity[mask][agents$advisor[mask]] + + adviceNoise, + Inf)) + agents$advice[mask] <- clamp(agents$advice[mask], 100) + return(agents) +} + +badAdviceFun <- function(modelParams, agents, world, ties) { + badAdviceProb <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation]/100, + 0) + mask <- which(agents$generation == world$generation) + agents$advisor[mask] <- apply(ties, 1, function(x) sample(which(x != 0),1)) + # Fetch advice as a vector + n <- length(mask) + agents$advice[mask] <- rnorm(n, + rep(world$state, n), + clamp(agents$sensitivity[mask][agents$advisor[mask]],Inf)) + badActors <- mask & (runif(n) < badAdviceProb) + # bad actors give advice as certain in the other direction + agents$advice[badActors] <- ifelse(agents$advice[badActors] < 50, + 50 + 3*modelParams$other$sensitivity, + 50 - 3*modelParams$other$sensitivity) + return(agents) +} + +for (decisionType in c(2,1)) { + for (adviceType in 2) { + # Storage path for results + resultsPath <- ifelse(ARC,'results/','results/') + time <- format(Sys.time(), "%F_%H-%M-%S") + + # Clear the result storage variables + suppressWarnings(rm('rawdata')) + + # Parameter space to explore + specs <- list() + for (s in 1) + for (x in 1:5) + specs[[length(specs) + 1]] <- list(agents = 750, degree = 10, + decisions = 30, + sensitivity = s, sensitivitySD = s, + startingEgoBias = .45, # .45 is neither optimal nor extreme + adviceNoise = c(.5, .94, .96, .98, 1.5), + manipulation = x, + wd = getwd()) + + if (decisionType == 1) { + for (i in 1:length(specs)) { + specs[[i]]$getWorldStateFun <- staticWorldStateFun + specs[[i]]$getDecisionFun <- uncappedPickOrAverageFun + specs[[i]]$shortDesc <- 'Pick-or-Average' + } + } else if (decisionType == 2) { + for (i in 1:length(specs)) { + specs[[i]]$getWorldStateFun <- staticWorldStateFun + specs[[i]]$getDecisionFun <- uncappedWeightedAvgFun + specs[[i]]$shortDesc <- 'Weighted-Average' + } + } + + # Noisy advice = advice made with +adviceNoise sd on error + if (adviceType == 1) { + for (i in 1:length(specs)) { + specs[[i]]$getAdviceFun <- noisyAdviceFun + specs[[i]]$shortDesc <- paste(specs[[i]]$shortDesc, 'with noisy advice') + } + # Bad advice = advice which is 3*mean sensitivity in the opposite direction + } else if (adviceType == 2) { + for (i in 1:length(specs)) { + specs[[i]]$getAdviceFun <- badAdviceFun + specs[[i]]$shortDesc <- paste(specs[[i]]$shortDesc, 'with bad advice') + } + # Noisy communication means noise is added at evalutation time rather than decision time + } else if (adviceType == 3) { + for (i in 1:length(specs)) { + # getAdviceFun is NULL + specs[[i]]$shortDesc <- paste(specs[[i]]$shortDesc, 'with noisy communication') + } + } + + # Run the models + + # Run parallel repetitions of the model with these settings + startTime <- Sys.time() + + print(paste('Processing models:', specs[[1]]$shortDesc)) + + if (!ARC) { + degreeResults <- lapply(specs, runModel) + } else { + print('Executing parallel operations...') + degreeResults <- parLapply(cl, specs, runModel) + } + print('...combining results...') + # Join up results + for (res in degreeResults) { + if (!exists('rawdata')) + rawdata <- list(res$rawdata) + else + rawdata[[length(rawdata) + 1]] <- res$rawdata + } + print(paste0('...complete.')) + print(Sys.time() - startTime) + + print(paste0('Estimated data size: ', object.size(rawdata) * 1e-6, 'MB')) + print('Saving data...') + + # Save data with a nice name + resultsPath <- paste0(resultsPath, time, '_', + sub(" ", "-", + sub(" decisions with ", "_", specs[[1]]$shortDesc))) + save(rawdata, file = paste0(resultsPath, '_rawdata.Rdata')) + # Smaller datafile for stopping me running out of memory during analysis + allAgents <- NULL + #allDecisions <- NULL + for (rd in rawdata) { + rd$agents$agentCount <- rep(rd$model$agentCount,nrow(rd$agents)) + rd$agents$agentDegree <- rep(rd$model$agentDegree,nrow(rd$agents)) + rd$agents$decisionCount <- rep(rd$model$decisionCount,nrow(rd$agents)) + rd$agents$modelDuration <- rep(rd$duration,nrow(rd$agents)) + rd$agents$meanSensitivity <- rep(rd$model$other$sensitivity,nrow(rd$agents)) + rd$agents$sdSensitivity <- rep(rd$model$other$sensitivitySD,nrow(rd$agents)) + rd$agents$startingEgoBias <- rep(rd$model$other$startingEgoBias,nrow(rd$agents)) + rd$agents$manipulation <- rep(rd$model$other$manipulation,nrow(rd$agents)) + rd$agents$manipulationValue <- rd$model$other$adviceNoise[rd$agents$manipulation] + rd$agents$description <- rep(rd$model$other$shortDesc, nrow(rd$agents)) + # only take a subset because of memory limitations + # allAgents <- rbind(allAgents, rd$agents) + allAgents <- rbind(allAgents, rd$agents[rd$agents$generation %% 50 == 1 + | (rd$agents$generation %% 25 == 1 & rd$agents$generation < 250), ]) + #allDecisions <- rbind(allDecisions, rd$decisions[rd$decisions$generation %in% allAgents$generation, ]) + } + + save(allAgents, file = paste0(resultsPath, '_rawdata-subset.Rdata')) + + # Plot + ggplot(allAgents, + aes(x = generation, y = egoBias, + colour = factor(manipulationValue))) + + geom_hline(yintercept = 0.5, linetype = 'dashed') + + stat_summary(geom = 'point', fun.y = mean, size = 3, alpha = 0.25) + + stat_summary(fun.data = mean_cl_boot, fun.args = (conf.int = .99), geom = 'errorbar', size = 1) + + scale_y_continuous(limits = c(0,1)) + + facet_grid(~meanSensitivity, labeller = label_both) + + labs(title = allAgents$description[1]) + + style + ggsave(paste0(resultsPath, '_graph.png')) + + print('...data saved.') + } +} + +# For all of the agents, look at how the egocentric bias affects the fitness. +# If there really is a step function, we might expect a non-normal distribution, +# and that distribution's peak should shift as the parameter value changes. +allAgents$egoBiasBin <- cut(allAgents$egoBias, 10) +ggplot(allAgents, aes(x = egoBiasBin, y = fitness, colour = factor(meanSensitivity))) + + stat_summary(geom = 'point', fun.y = mean) + + stat_summary(geom = 'errorbar', fun.data = mean_cl_normal) + + labs(title = "Fitness by egoBias") + + facet_wrap(~manipulationValue, labeller = label_both) + + style + +# Cleanup +if (ARC) + stopCluster(cl) +print('Complete.') + diff --git a/model20.R b/model20.R new file mode 100644 index 0000000..734a376 --- /dev/null +++ b/model20.R @@ -0,0 +1,434 @@ +# Model 18 #### + +#' Here we allow sensitivity to evolve (up to a maximum (in)sensitivity of .1). +#' We expect this senstivity evolution to initially suppress egocentric bias, +#' and then increase it, although there are various reasons why this may not +#' happen. +#' +#' The outcome measure has changed from a self/other weighting to a probability +#' of averaging (weighting = .5) as opposed to simply sticking with the initial +#' response. + + +# Libraries +library(parallel) +library(ggplot2) + +style <- theme_light() + + theme(legend.position = 'top', + panel.grid.major.x = element_blank(), + panel.grid.minor = element_blank()) + +parallel <- T + + +ARC <- Sys.info()[[1]] != 'Windows' +if (ARC) + setwd(paste0(getwd(), '/EvoEgoBias')) +if (parallel) + ARC <- T + +if (ARC) { + print(Sys.info()) + # Set up the parallel execution capabilities + nCores <- detectCores() + cl <- makeCluster(nCores) + print(paste('Running in parallel on', nCores, 'cores.')) + reps <- nCores +} else { + reps <- 1 +} + +# Define the function +runModel <- function(spec) { + + tmpName <- paste0(spec$shortDesc, '-tmpfile-', + runif(1, max = 1000000), '-') + + i <- 0 + + # Break up very long models into sequential shorter models + genCount <- spec$generations + + generationZero <- NULL # model's seed generation data frame + + while (genCount > 0) { + + i <- i + 1 + + print(paste0(spec$shortDesc, " [", i, "]genCount = ", genCount)) + + setwd(spec$wd) + + # Load the evoSim package + source('evoSim/evoSim/R/evoSim.R') + + # Run the model for spec$maxGenerations + data <- evoSim(genZero = generationZero, + agentCount = spec$agents, + agentDegree = spec$degree, + decisionCount = spec$decisions, + generationCount = min(genCount, spec$maxGenerations), + mutationChance = 0.01, + # recordDecisions = 100, + other = list(sensitivity = spec$sensitivity, + sensitivitySD = spec$sensitivitySD, + startingEgoBias = spec$startingEgoBias, + adviceNoise = spec$adviceNoise, + manipulation = spec$manipulation, + shortDesc = spec$shortDesc), + makeAgentsFun = function(modelParams, previousGeneration = NULL, parents = NULL) { + # Population size and generation extracted from current population or by + # modelParams$agentCount + if (is.null(previousGeneration)) { + n <- modelParams$agentCount + g <- 0 + } + else { + n <- dim(previousGeneration)[1] + g <- previousGeneration$generation[1] + } + + g <- g + 1 # increment generation + + # mutants and fresh spawns get randomly assigned egoBias + makeAgents.agents <- data.frame(egoBias = rep(modelParams$other$startingEgoBias, + modelParams$agentCount), + sensitivity = abs(rnorm(modelParams$agentCount, + modelParams$other$sensitivity, + modelParams$other$sensitivitySD)), + generation = rep(g, modelParams$agentCount), + adviceNoise = rep(NA, modelParams$agentCount), + confidenceScaling = sample(1:5, 1)) + + # Overwrite the agents' egobias by + # inheritance from parents where applicable + if (!is.null(parents)) { + evolveCols <- c('egoBias') + makeAgents.agents[ , evolveCols] <- previousGeneration[parents, evolveCols] + # mutants inherit from a normal distribution + for (x in evolveCols) { + mutants <- runif(modelParams$agentCount) < modelParams$mutationChance + makeAgents.agents[mutants, x] <- rnorm(sum(mutants), + previousGeneration[parents[mutants], x], + 0.1) + } + } + + # Keep values in sensible ranges + makeAgents.agents$egoBias <- clamp(makeAgents.agents$egoBias, maxVal = 1, minVal = 0) + makeAgents.agents$sensitivity <- clamp(makeAgents.agents$sensitivity, maxVal = 1, minVal = 0.1) + + # Connect agents together + ties <- modelParams$connectAgents(modelParams, makeAgents.agents) + makeAgents.agents$degree <- sapply(1:nrow(makeAgents.agents), getDegree, ties) + return(list(agents = makeAgents.agents, ties = ties)) + }, + selectParentsFun = function(modelParams, agents, world, ties) { + tmp <- agents[which(agents$generation == world$generation),] + tmp <- tmp[order(tmp$fitness, decreasing = T),] + # the others get weighted by relative fitness which are transformed to +ve values + tmp$fitness <- tmp$fitness - min(tmp$fitness) + 1 + # scale appropriately + while (any(abs(tmp$fitness) < 10)) + tmp$fitness <- tmp$fitness * 10 + # and round off + tmp$fitness <- round(tmp$fitness) + tickets <- vector(length = sum(tmp$fitness)) # each success buys a ticket in the draw + i <- 0 + for (a in 1:nrow(tmp)) { + tickets[(i + 1):(i + 1 + tmp$fitness[a])] <- a + i <- i + 1 + tmp$fitness[a] + } + winners <- sample(tickets, modelParams$agentCount, replace = T) + # The winners clone their egocentric discounting + winners <- tmp[winners,'genId'] + return(winners) + }, + getAdviceFun = spec$getAdviceFun, + getWorldStateFun = spec$getWorldStateFun, + getDecisionFun = spec$getDecisionFun, + getFitnessFun = spec$getFitnessFun) + + # update the seed generation + generationZero <- data$agents[data$agents$generation == + max(data$agents$generation), ] + + # Slim down the results if required + if (!is.null(spec$saveEveryNthGeneration)) + data$agents <- data$agents[data$agents$generation %% + spec$saveEveryNthGeneration == 0, ] + + # Save temporary results + saveRDS(data, file = paste0(tmpName, i)) + data <- NULL + + genCount <- genCount - spec$maxGenerations + } + + # Collate and save full results + rawdata <- list() + + for (n in 1:i) { + tmp <- readRDS(paste0(tmpName, n)) + rawdata$agents <- rbind(rawdata$agents, tmp$agents) + if (is.null(rawdata$model)) + rawdata$model <- tmp$model + rawdata$duration <- c(rawdata$duration, tmp$duration) + + file.remove(paste0(tmpName, n)) + } + + rawdata$rep <- 1 + + return(list(rawdata = rawdata)) +} + +# Decision functions - uncapped continuous (default), capped continuous, and discrete #### +uncappedPickOrAverageFun <- function(modelParams, agents, world, ties, initial = F) { + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) + mask <- which(agents$generation == world$generation) + if (initial) { + # initial decision - look and see + n <- length(mask) + agents$initialDecision[mask] <- rnorm(n, + rep(world$state, n), + clamp(agents$sensitivity[mask],Inf)) + } else { + # Final decision - take advice with probability (1 - egoBias) + # Taking advice equates to simple averaging + # Use vector math to do the advice taking + out <- NULL + noise <- rnorm(length(mask), 0, adviceNoise) + roll <- runif(length(mask)) + agents$adviceNoise[mask] <- noise + out <- ifelse(roll >= agents$egoBias[mask], + (agents$initialDecision[mask] + agents$advice[mask] + noise) / 2, + agents$initialDecision[mask]) + #out <- clamp(out, 100) + out[is.na(out)] <- agents$initialDecision[mask][is.na(out)] + agents$roll[mask] <- roll + agents$finalDecision[mask] <- out + } + return(agents) +} + +uncappedWeightedAvgFun <- function(modelParams, agents, world, ties, initial = F) { + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) + mask <- which(agents$generation == world$generation) + if (initial) { + # initial decision - look and see + n <- length(mask) + agents$initialDecision[mask] <- rnorm(n, + rep(world$state, n), + clamp(agents$sensitivity[mask],Inf)) + } else { + # Final decision - take advice with probability (1 - egoBias) + # Taking advice equates to simple averaging + # Use vector math to do the advice taking + out <- NULL + noise <- rnorm(length(mask), 0, adviceNoise) + roll <- runif(length(mask)) + agents$adviceNoise[mask] <- noise + out <- (agents$initialDecision[mask] * agents$egoBias[mask]) + + ((1 - agents$egoBias[mask]) * (agents$advice[mask] + noise)) + #out <- clamp(out, 100) + out[is.na(out)] <- agents$initialDecision[mask][is.na(out)] + agents$roll[mask] <- roll + agents$finalDecision[mask] <- out + } + return(agents) +} + +# World state functions - return 50 and return 0-49|51-100 #### +staticWorldStateFun = function(modelParams, world) { + return(50) +} + +# Advice functions - there's noisy advice and bad advice #### +noisyAdviceFun <- function(modelParams, agents, world, ties) { + adviceNoise <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation], + 0) + mask <- which(agents$generation == world$generation) + agents$advisor[mask] <- apply(ties, 1, function(x) sample(which(x != 0),1)) + # Fetch advice as a vector + n <- length(mask) + agents$advice[mask] <- rnorm(n, + rep(world$state, n), + clamp(agents$sensitivity[mask][agents$advisor[mask]] + + adviceNoise, + Inf)) + agents$advice[mask] <- clamp(agents$advice[mask], 100) + return(agents) +} + +badAdviceFun <- function(modelParams, agents, world, ties) { + badAdviceProb <- ifelse(modelParams$other$manipulation > 0, + modelParams$other$adviceNoise[modelParams$other$manipulation]/100, + 0) + mask <- which(agents$generation == world$generation) + agents$advisor[mask] <- apply(ties, 1, function(x) sample(which(x != 0),1)) + # Fetch advice as a vector + n <- length(mask) + agents$advice[mask] <- rnorm(n, + rep(world$state, n), + clamp(agents$sensitivity[mask][agents$advisor[mask]],Inf)) + badActors <- mask & (runif(n) < badAdviceProb) + # bad actors give advice as certain in the other direction + agents$advice[badActors] <- ifelse(agents$advice[badActors] < 50, + 50 + 3*modelParams$other$sensitivity, + 50 - 3*modelParams$other$sensitivity) + return(agents) +} + +for (decisionType in c(2)) { + for (adviceType in 2) { + # Storage path for results + resultsPath <- ifelse(ARC,'results/','results/') + time <- format(Sys.time(), "%F_%H-%M-%S") + + # Clear the result storage variables + suppressWarnings(rm('rawdata')) + + # Parameter space to explore + specs <- list() + for (s in 1) + for (x in 1:5) + specs[[length(specs) + 1]] <- list(agents = 750, degree = 10, + generations = 3000, + decisions = 30, + sensitivity = s, sensitivitySD = s, + startingEgoBias = .45, # .45 is neither optimal nor extreme + adviceNoise = c(.5, .94, .96, .98, 1.5), + manipulation = x, + wd = getwd(), + saveEveryNthGeneration = 50, + maxGenerations = 1500) + + if (decisionType == 1) { + for (i in 1:length(specs)) { + specs[[i]]$getWorldStateFun <- staticWorldStateFun + specs[[i]]$getDecisionFun <- uncappedPickOrAverageFun + specs[[i]]$shortDesc <- 'Pick-or-Average' + } + } else if (decisionType == 2) { + for (i in 1:length(specs)) { + specs[[i]]$getWorldStateFun <- staticWorldStateFun + specs[[i]]$getDecisionFun <- uncappedWeightedAvgFun + specs[[i]]$shortDesc <- 'Weighted-Average' + } + } + + # Noisy advice = advice made with +adviceNoise sd on error + if (adviceType == 1) { + for (i in 1:length(specs)) { + specs[[i]]$getAdviceFun <- noisyAdviceFun + specs[[i]]$shortDesc <- paste(specs[[i]]$shortDesc, 'with noisy advice') + } + # Bad advice = advice which is 3*mean sensitivity in the opposite direction + } else if (adviceType == 2) { + for (i in 1:length(specs)) { + specs[[i]]$getAdviceFun <- badAdviceFun + specs[[i]]$shortDesc <- paste(specs[[i]]$shortDesc, 'with bad advice') + } + # Noisy communication means noise is added at evalutation time rather than decision time + } else if (adviceType == 3) { + for (i in 1:length(specs)) { + # getAdviceFun is NULL + specs[[i]]$shortDesc <- paste(specs[[i]]$shortDesc, 'with noisy communication') + } + } + + # Run the models + + # Run parallel repetitions of the model with these settings + startTime <- Sys.time() + + print(paste('Processing models:', specs[[1]]$shortDesc)) + + if (!ARC) { + degreeResults <- lapply(specs, runModel) + } else { + print('Executing parallel operations...') + degreeResults <- parLapply(cl, specs, runModel) + } + print('...combining results...') + # Join up results + for (res in degreeResults) { + if (!exists('rawdata')) + rawdata <- list(res$rawdata) + else + rawdata[[length(rawdata) + 1]] <- res$rawdata + } + print(paste0('...complete.')) + print(Sys.time() - startTime) + + print(paste0('Estimated data size: ', object.size(rawdata) * 1e-6, 'MB')) + print('Saving data...') + + # Save data with a nice name + resultsPath <- paste0(resultsPath, time, '_', + sub(" ", "-", + sub(" decisions with ", "_", specs[[1]]$shortDesc))) + save(rawdata, file = paste0(resultsPath, '_rawdata.Rdata')) + # Smaller datafile for stopping me running out of memory during analysis + allAgents <- NULL + #allDecisions <- NULL + for (rd in rawdata) { + rd$agents$agentCount <- rep(rd$model$agentCount,nrow(rd$agents)) + rd$agents$agentDegree <- rep(rd$model$agentDegree,nrow(rd$agents)) + rd$agents$decisionCount <- rep(rd$model$decisionCount,nrow(rd$agents)) + rd$agents$modelDuration <- rep(sum(rd$duration),nrow(rd$agents)) + rd$agents$meanSensitivity <- rep(rd$model$other$sensitivity,nrow(rd$agents)) + rd$agents$sdSensitivity <- rep(rd$model$other$sensitivitySD,nrow(rd$agents)) + rd$agents$startingEgoBias <- rep(rd$model$other$startingEgoBias,nrow(rd$agents)) + rd$agents$manipulation <- rep(rd$model$other$manipulation,nrow(rd$agents)) + rd$agents$manipulationValue <- rd$model$other$adviceNoise[rd$agents$manipulation] + rd$agents$description <- rep(rd$model$other$shortDesc, nrow(rd$agents)) + # only take a subset because of memory limitations + allAgents <- rbind(allAgents, rd$agents) + # allAgents <- rbind(allAgents, rd$agents[rd$agents$generation %% 50 == 1 + # | (rd$agents$generation %% 25 == 1 & rd$agents$generation < 250), ]) + #allDecisions <- rbind(allDecisions, rd$decisions[rd$decisions$generation %in% allAgents$generation, ]) + } + + save(allAgents, file = paste0(resultsPath, '_rawdata-subset.Rdata')) + + # Plot + ggplot(allAgents, + aes(x = generation, y = egoBias, + colour = factor(manipulationValue))) + + geom_hline(yintercept = 0.5, linetype = 'dashed') + + stat_summary(geom = 'point', fun.y = mean, size = 3, alpha = 0.25) + + stat_summary(fun.data = mean_cl_boot, fun.args = (conf.int = .99), geom = 'errorbar', size = 1) + + scale_y_continuous(limits = c(0,1)) + + facet_grid(~meanSensitivity, labeller = label_both) + + labs(title = allAgents$description[1], y = 'egocentricity') + + style + ggsave(paste0(resultsPath, '_graph.png')) + + print('...data saved.') + } +} + +# For all of the agents, look at how the egocentric bias affects the fitness. +# If there really is a step function, we might expect a non-normal distribution, +# and that distribution's peak should shift as the parameter value changes. +allAgents$egoBiasBin <- cut(allAgents$egoBias, 10) +ggplot(allAgents, aes(x = egoBiasBin, y = fitness, colour = factor(meanSensitivity))) + + stat_summary(geom = 'point', fun.y = mean) + + stat_summary(geom = 'errorbar', fun.data = mean_cl_normal) + + labs(title = "Fitness by egoBias") + + facet_wrap(~manipulationValue, labeller = label_both) + + style + +# Cleanup +if (ARC) + stopCluster(cl) +print('Complete.') +