diff --git a/ccn2019.Rmd b/ccn2019.Rmd index 7301fc5..51b0168 100644 --- a/ccn2019.Rmd +++ b/ccn2019.Rmd @@ -1,6 +1,5 @@ --- title: "Unbiased N-Back" -date: "5/12/2019" output: ioslides_presentation: default slidy_presentation: default @@ -16,26 +15,30 @@ load('./data/CL2015.RData') ``` -## Intro - -### Problem: +## Problem - local statistical properties of the n-back affect how we respond. - local vs. global properties, what matters the most? -### By-products +### - an agent to replicate behavioral data sets - an online service to generate and evaluate n-back sequences ## Method - - create history window (a.k.a, contiguous subsequences) - - calculate local T, L, S, U, $RT_{mean}$, $Accuracy_{mean}$ for each subsequence + - create a history window for each trial (a.k.a, contiguous subsequences) + - calculate local $T$, $L$, $S$, $U$, $RT_{mean}$, $Accuracy_{mean}$ for each subsequence - Model RT/Acc (response vars) with local properties (exp. vars) - Cluster responses (or exp. vars?) - Investigate if extracted clusters are statistically different -## Modeling - - Create two models for local and global features as explanatory vars - - Continue with modeling RT and Accuracy based upon local and global feats and compare them. Which model provides a better description of the recoreded RT and Accuracy vars? (model comparasion, model selection, etc) +### Explanatory Variables +- $T$ number of targets +- $L$ number of lures +- $S$ Skewness score +- $U$ Uniformity (!repetition) + +### Response Variables +- $RT_{mean}$ +- $Accuracy_{mean}$ ## Constraints @@ -44,6 +47,10 @@ - uniform distribution of choices - controlled local lumpiness +## Modeling + - Create two models for local and global features as explanatory vars + - Continue with modeling RT and Accuracy based upon local and global feats and compare them. Which model provides a better description of the recoreded RT and Accuracy vars? (model comparasion, model selection, etc) + ```{r} trials <- c('a','b','c','d','c','d','b','a','a','d','b','a','c','c','a','c') @@ -65,9 +72,11 @@ ``` -Each constraint is a cost function to minimize for each sequence of stimuli +Each constraint is a cost function to minimize for each sequence of stimuli. -``` +```{r, eval=F} + +# Codes for fitness and loss functions history <- contig_seqs targets <- 4 lures <- 2 @@ -106,47 +115,56 @@ ```{r} -#NB %>% -# filter(participant=='P1') %>% -# group_by(block) %>% -# summarise(trials=n_distinct(trial)) - -with_lures <- function(stim, stim_type, history) { +# Codes to calculate local statistical properties +with_lures <- function(condition, stim, stim_type, history = NA) { + # extend to 2-back/3-back - - res <- sapply( + sapply( 1:length(stim), function(i) { - ifelse( - stim[i]==stri_sub(history[i],-2,-2) || stim[i]==stri_sub(history[i],-4,-4), - 'lure', - as.character(stim_type[i]) + switch(condition[i], + "2-back" = { + ifelse( + stim[i]==stri_sub(history[i],-2,-2) || stim[i]==stri_sub(history[i],-4,-4), + 'lure', + as.character(stim_type[i]) + )}, + "3-back" = { + ifelse( + stim[i]==stri_sub(history[i],-3,-3) || stim[i]==stri_sub(history[i],-5,-5), + 'lure', + as.character(stim_type[i]) + )} ) + }) - as.factor(res) } -with_targets_ratio <- function(correct, history = c(), block_size=NA) { - if (is.na(block_size)) block_size = str_length(history) - sapply(1:length(correct), function(i) { - 0 #TODO +with_targets_ratio <- function(stimulus_type, history) { + sapply(1:length(history), function(i) { + trials <- stimulus_type[(i-str_length(history[i])):i] + trials <- unlist(trials, use.names=FALSE) + length(trials[trials=="target"]) }) } with_lures_ratio <- function(stimulus_type, history) { - res <- sapply(1:length(history), function(i) { + sapply(1:length(history), function(i) { trials <- stimulus_type[(i-str_length(history[i])):i] trials <- unlist(trials, use.names=FALSE) length(trials[trials=="lure"]) }) - res } -with_skewness_score <- function(history) { - sapply(1:length(history), function(i) 0) +with_lumpiness_score <- function(stimulus, history) { + sapply(1:length(history), function(i) { + trials <- stimulus[(i-str_length(history[i])):i] + trials <- unlist(trials, use.names=FALSE) + max(table(trials)) - 1 + }) } -with_lumpiness_score <- function(history) { +with_skewness_score <- function(stimulus, history) { sapply(1:length(history), function(i) 0) } @@ -159,6 +177,7 @@ } normalize_scores <- function(targets_ratio, lures_ratio, skewness, lumpiness) { + #TODO sapply(1:length(targets_ratio), function(i) 0) } @@ -166,25 +185,27 @@ group_by(participant, condition, block) %>% mutate(history = with_history(stimulus)) %>% #mutate(stimulus_type = map_chr(.x=stimulus, stim_type=stimulus_type, history=history,.f=with_lures)) - mutate(stimulus_type_2 = with_lures(stimulus, stimulus_type, history)) %>% - mutate(targets_ratio = with_targets_ratio(correct)) %>% - mutate(lures_ratio = with_lures_ratio(stimulus_type_2, history)) %>% - mutate(skewness = with_skewness_score(history)) %>% - mutate(lumpiness = with_lumpiness_score(history)) %>% + mutate(stimulus_type_2 = with_lures(condition, stimulus, stimulus_type, history)) %>% + mutate(t = with_targets_ratio(stimulus_type_2, history)) %>% + mutate(l = with_lures_ratio(stimulus_type_2, history)) %>% + mutate(s = with_skewness_score(stimulus, history)) %>% + mutate(u = with_lumpiness_score(stimulus, history)) %>% #normalize_scores(targets_ratio, lures_ratio, skewness, lumpiness) %>% ungroup() # print -NB_modified %>% - filter(participant=='P1', lures_ratio>0) %>% - View() +# NB_modified %>% +# filter(participant=='P1') %>% +# View() +# ``` -#TODO -modified_NB <- NB %>% - mutate(constraint1=fitness1(history), constrain2=fitness2(history), constraint3=fitness(history)) -kmeans(NB) -ggplot(kmeans$accuracy) -ggplot(kmeans$rt) +## TODO + - data %>% mutate(constraint1=fitness1(history), constrain2=fitness2(history), + - constraint3=fitness(history)) + - kmeans(NB) + - ggplot(kmeans_clusters$accuracy) + - ggplot(kmeans_clusters$rt) + -