diff --git a/ccn2019/ccn2019.rev3.Rmd b/ccn2019/ccn2019.rev3.Rmd index d8c501e..0db19ef 100644 --- a/ccn2019/ccn2019.rev3.Rmd +++ b/ccn2019/ccn2019.rev3.Rmd @@ -4,8 +4,6 @@ chunk_output_type: inline --- -$P=\langle V,D,C,W \rangle$ - ```{r setup, message=FALSE, include=FALSE, paged.print=FALSE} #! =============================================== #! load required packages @@ -54,7 +52,7 @@ The new data set is then saved in a file for further analysis. One final chuck of codes clean up the dataset by removing outlier participants and adding RT_CAT which shows high or low response times. -```{r preprocessing} +```{r preprocessing, message=FALSE, warning=FALSE, include=FALSE} #! =============================================== #! A function to mark lures in a sequence @@ -100,6 +98,7 @@ save(seqs.raw,file=here("data/nback_seqs.Rd")) #load(here("notebooks/data/nback_seqs.Rd")) +# seqs is the main dataset of sequences including all stats extracted from a block or the sliding history window. ## classify RTs (mid and high), low RTs will be removed for now seqs <- seqs.raw %>% mutate( @@ -115,11 +114,11 @@ The following chunk marks highly correlated predictors, but does not work anymore. ```{r remove_highly_correlated_predictors} # WIP: This is an extra step for non-pls methods to remove highly correlated predictors -cor_matrix <- cor(seqs[,-1]) -cor_high <- findCorrelation(cor_matrix, 0.8) -high_cor_remove <- row.names(cor_matrix)[cor_high] +#cor_matrix <- cor(seqs[,-1]) +#cor_high <- findCorrelation(cor_matrix, 0.8) +#high_cor_remove <- row.names(cor_matrix)[cor_high] #FIXME remove by column name -seqs.uncorr <- seqs %>% select(-high_cor_remove) +#seqs.uncorr <- seqs %>% select(-high_cor_remove) ```