diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/__init__.py b/generators/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/generators/__init__.py diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/__init__.py b/generators/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/generators/__init__.py diff --git a/generators/even_random.py b/generators/even_random.py new file mode 100644 index 0000000..fda40d8 --- /dev/null +++ b/generators/even_random.py @@ -0,0 +1,75 @@ +from constraint import * + +import itertools as it + +import Numberjack as nj + +class EvenRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials=64, tl=1): + self.tl, self.trials, self.choices = tl, trials, choices + + def generate(self): + seqs = self._generate_initial_sequences() + return self._find_optimal_sequence(seqs, 0.2) + + def _generate_initial_sequences(self): + """ + Generates initial sequence of items based on choices and number of desired trials. + In EvenRandom sequences, all stimuli have same number of appearances. + """ + + pool = it.product(self.choices, repeat=self.trials) + return pool + + def _find_optimal_sequence_numberjack(self,tl_ratio): + """Optimize with Numberjack""" + + cost = nj.Variable(0, 100, 'cost') + seqs = nj.Variable([]) # all sequences + + model = nj.Model( + seqs., + cost == self.calculate_tl_ratio(seq) - tl_ratio, + nj.Minimise(cost) + ) + solver = model.load('Mistral') + if solver.solve(): + solver.printStatistics() + else: + print("No solution with Numberjack") + + + def _find_optimal_sequence(self, sequences, tl_ratio): + """Optimize a sequence to match a desired tl ratio with python-constraints""" + + p = Problem() + + # TODO add all possible values for seq (its domain) + p.addVariable("seq", list(sequences)) + + p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) + + return p.getSolution() + + @staticmethod + def calculate_tl_ratio(seq): + """Calculates the T:L ratio of a sequence.""" + targets = 0 + lures = 0 + for index, item in seq: + if item == seq[index-2]: + targets += 1 + elif item == seq[index-1] or item == seq[index-3]: + lures += 1 + # avoid division by zero + if lures == 0: + lures = 1 + return targets/lures + + +if __name__ == '__main__': + generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) + generated_seq = generator.generate() + print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/__init__.py b/generators/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/generators/__init__.py diff --git a/generators/even_random.py b/generators/even_random.py new file mode 100644 index 0000000..fda40d8 --- /dev/null +++ b/generators/even_random.py @@ -0,0 +1,75 @@ +from constraint import * + +import itertools as it + +import Numberjack as nj + +class EvenRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials=64, tl=1): + self.tl, self.trials, self.choices = tl, trials, choices + + def generate(self): + seqs = self._generate_initial_sequences() + return self._find_optimal_sequence(seqs, 0.2) + + def _generate_initial_sequences(self): + """ + Generates initial sequence of items based on choices and number of desired trials. + In EvenRandom sequences, all stimuli have same number of appearances. + """ + + pool = it.product(self.choices, repeat=self.trials) + return pool + + def _find_optimal_sequence_numberjack(self,tl_ratio): + """Optimize with Numberjack""" + + cost = nj.Variable(0, 100, 'cost') + seqs = nj.Variable([]) # all sequences + + model = nj.Model( + seqs., + cost == self.calculate_tl_ratio(seq) - tl_ratio, + nj.Minimise(cost) + ) + solver = model.load('Mistral') + if solver.solve(): + solver.printStatistics() + else: + print("No solution with Numberjack") + + + def _find_optimal_sequence(self, sequences, tl_ratio): + """Optimize a sequence to match a desired tl ratio with python-constraints""" + + p = Problem() + + # TODO add all possible values for seq (its domain) + p.addVariable("seq", list(sequences)) + + p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) + + return p.getSolution() + + @staticmethod + def calculate_tl_ratio(seq): + """Calculates the T:L ratio of a sequence.""" + targets = 0 + lures = 0 + for index, item in seq: + if item == seq[index-2]: + targets += 1 + elif item == seq[index-1] or item == seq[index-3]: + lures += 1 + # avoid division by zero + if lures == 0: + lures = 1 + return targets/lures + + +if __name__ == '__main__': + generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) + generated_seq = generator.generate() + print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/generators/progressive_random.py b/generators/progressive_random.py new file mode 100644 index 0000000..9914b47 --- /dev/null +++ b/generators/progressive_random.py @@ -0,0 +1,100 @@ +import random +import scipy.stats + + +class SequenceGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=4.0, n=3, targets_ratio=0.2): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param n: + """ + self.tl, self.trials, self.choices, self.n, self.targets_ratio = tl, trials, choices, n, targets_ratio + self.sequence = list() + self.norm_even_dist = scipy.stats.norm(0, trials/2) + self.norm_targets_ratio_dist = scipy.stats.norm(targets_ratio, 0.5) + self.norm_tl_ratio_dist = scipy.stats.norm(tl, trials/2) + + def generate(self): + while not self.sequence or len(self.sequence) < self.trials: + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence + + def next_trial(self): + if self.sequence and len(self.sequence) >= self.trials: + return None + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence[-1] + + def __find_best_next_sequence(self, seq: list, choices: list) -> list: + import sys + min_cost = sys.float_info.max + best_seq = seq + random.shuffle(choices) # to avoid ordering effect + for choice in choices: + tmp_seq = seq + list(choice) + cost = self.cost(tmp_seq) + if cost < min_cost: + min_cost = cost + best_seq = tmp_seq + return best_seq + + def calc_even_distribution_distance(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + even_ratio = self.trials / len(self.choices) + costs = {k: abs(v - even_ratio)/self.trials for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + + targets, lures = self.count_targets_and_lures(seq, self.n) + targets_ratio_cost = 1.0 - self.norm_targets_ratio_dist.pdf(targets/self.trials) + tl_ratio_cost = 1.0 - self.norm_tl_ratio_dist.pdf(self.calc_tl_ratio(seq, self.n)) + even_dist_cost = 1.0 - self.norm_even_dist.pdf(self.calc_even_distribution_distance(seq)) + # print(targets_ratio_cost, tl_ratio_cost, even_dist_cost) + return targets_ratio_cost + tl_ratio_cost + even_dist_cost + + @staticmethod + def count_targets_and_lures(seq, n: int): + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + if seq[index] == seq[index - n]: + targets += 1.0 + elif seq[index] == seq[index - (n-1)] or seq[index] == seq[index - (n+1)]: + lures += 1.0 + return targets, lures + + def calc_tl_ratio(self, seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets, lures = self.count_targets_and_lures(seq, n) + if lures < 0.01: # avoid division by zero + lures = 0.01 + return targets/lures + + +if __name__ == '__main__': + + n = 3 + generator = SequenceGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=128, n=n) + sq = generator.generate() + tl_ratio = generator.calc_tl_ratio(sq, n=n) + even_dist_distance = generator.calc_even_distribution_distance(sq) + + print('Progressively-Optimized Sequence: targets=%d, lures=%d' % generator.count_targets_and_lures(sq, n=n), 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist_distance) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/__init__.py b/generators/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/generators/__init__.py diff --git a/generators/even_random.py b/generators/even_random.py new file mode 100644 index 0000000..fda40d8 --- /dev/null +++ b/generators/even_random.py @@ -0,0 +1,75 @@ +from constraint import * + +import itertools as it + +import Numberjack as nj + +class EvenRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials=64, tl=1): + self.tl, self.trials, self.choices = tl, trials, choices + + def generate(self): + seqs = self._generate_initial_sequences() + return self._find_optimal_sequence(seqs, 0.2) + + def _generate_initial_sequences(self): + """ + Generates initial sequence of items based on choices and number of desired trials. + In EvenRandom sequences, all stimuli have same number of appearances. + """ + + pool = it.product(self.choices, repeat=self.trials) + return pool + + def _find_optimal_sequence_numberjack(self,tl_ratio): + """Optimize with Numberjack""" + + cost = nj.Variable(0, 100, 'cost') + seqs = nj.Variable([]) # all sequences + + model = nj.Model( + seqs., + cost == self.calculate_tl_ratio(seq) - tl_ratio, + nj.Minimise(cost) + ) + solver = model.load('Mistral') + if solver.solve(): + solver.printStatistics() + else: + print("No solution with Numberjack") + + + def _find_optimal_sequence(self, sequences, tl_ratio): + """Optimize a sequence to match a desired tl ratio with python-constraints""" + + p = Problem() + + # TODO add all possible values for seq (its domain) + p.addVariable("seq", list(sequences)) + + p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) + + return p.getSolution() + + @staticmethod + def calculate_tl_ratio(seq): + """Calculates the T:L ratio of a sequence.""" + targets = 0 + lures = 0 + for index, item in seq: + if item == seq[index-2]: + targets += 1 + elif item == seq[index-1] or item == seq[index-3]: + lures += 1 + # avoid division by zero + if lures == 0: + lures = 1 + return targets/lures + + +if __name__ == '__main__': + generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) + generated_seq = generator.generate() + print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/generators/progressive_random.py b/generators/progressive_random.py new file mode 100644 index 0000000..9914b47 --- /dev/null +++ b/generators/progressive_random.py @@ -0,0 +1,100 @@ +import random +import scipy.stats + + +class SequenceGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=4.0, n=3, targets_ratio=0.2): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param n: + """ + self.tl, self.trials, self.choices, self.n, self.targets_ratio = tl, trials, choices, n, targets_ratio + self.sequence = list() + self.norm_even_dist = scipy.stats.norm(0, trials/2) + self.norm_targets_ratio_dist = scipy.stats.norm(targets_ratio, 0.5) + self.norm_tl_ratio_dist = scipy.stats.norm(tl, trials/2) + + def generate(self): + while not self.sequence or len(self.sequence) < self.trials: + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence + + def next_trial(self): + if self.sequence and len(self.sequence) >= self.trials: + return None + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence[-1] + + def __find_best_next_sequence(self, seq: list, choices: list) -> list: + import sys + min_cost = sys.float_info.max + best_seq = seq + random.shuffle(choices) # to avoid ordering effect + for choice in choices: + tmp_seq = seq + list(choice) + cost = self.cost(tmp_seq) + if cost < min_cost: + min_cost = cost + best_seq = tmp_seq + return best_seq + + def calc_even_distribution_distance(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + even_ratio = self.trials / len(self.choices) + costs = {k: abs(v - even_ratio)/self.trials for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + + targets, lures = self.count_targets_and_lures(seq, self.n) + targets_ratio_cost = 1.0 - self.norm_targets_ratio_dist.pdf(targets/self.trials) + tl_ratio_cost = 1.0 - self.norm_tl_ratio_dist.pdf(self.calc_tl_ratio(seq, self.n)) + even_dist_cost = 1.0 - self.norm_even_dist.pdf(self.calc_even_distribution_distance(seq)) + # print(targets_ratio_cost, tl_ratio_cost, even_dist_cost) + return targets_ratio_cost + tl_ratio_cost + even_dist_cost + + @staticmethod + def count_targets_and_lures(seq, n: int): + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + if seq[index] == seq[index - n]: + targets += 1.0 + elif seq[index] == seq[index - (n-1)] or seq[index] == seq[index - (n+1)]: + lures += 1.0 + return targets, lures + + def calc_tl_ratio(self, seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets, lures = self.count_targets_and_lures(seq, n) + if lures < 0.01: # avoid division by zero + lures = 0.01 + return targets/lures + + +if __name__ == '__main__': + + n = 3 + generator = SequenceGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=128, n=n) + sq = generator.generate() + tl_ratio = generator.calc_tl_ratio(sq, n=n) + even_dist_distance = generator.calc_even_distribution_distance(sq) + + print('Progressively-Optimized Sequence: targets=%d, lures=%d' % generator.count_targets_and_lures(sq, n=n), 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist_distance) diff --git a/generators/progressive_random_block_ga.py b/generators/progressive_random_block_ga.py new file mode 100644 index 0000000..c07ac49 --- /dev/null +++ b/generators/progressive_random_block_ga.py @@ -0,0 +1,134 @@ +import itertools as it +import random + + +class ProgressiveGAGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __init_pool(self, pool_size) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + self.pool.clear() + all_comb = it.combinations_with_replacement(self.choices, self.trials) + sample = random.sample(list(all_comb), pool_size) + self.pool.extend(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, count=1): + """ + Find best gene(s) or parent(s) from the current pool. + :param count: Number of desired best parents to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(self.pool, key=lambda ss: self.cost(ss)) + return sorted_pool[:count] + + def even_dist_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.even_dist_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +if __name__ == '__main__': + + generator = ProgressiveGAGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.even_dist_cost(sq) + + print('Progressively-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/__init__.py b/generators/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/generators/__init__.py diff --git a/generators/even_random.py b/generators/even_random.py new file mode 100644 index 0000000..fda40d8 --- /dev/null +++ b/generators/even_random.py @@ -0,0 +1,75 @@ +from constraint import * + +import itertools as it + +import Numberjack as nj + +class EvenRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials=64, tl=1): + self.tl, self.trials, self.choices = tl, trials, choices + + def generate(self): + seqs = self._generate_initial_sequences() + return self._find_optimal_sequence(seqs, 0.2) + + def _generate_initial_sequences(self): + """ + Generates initial sequence of items based on choices and number of desired trials. + In EvenRandom sequences, all stimuli have same number of appearances. + """ + + pool = it.product(self.choices, repeat=self.trials) + return pool + + def _find_optimal_sequence_numberjack(self,tl_ratio): + """Optimize with Numberjack""" + + cost = nj.Variable(0, 100, 'cost') + seqs = nj.Variable([]) # all sequences + + model = nj.Model( + seqs., + cost == self.calculate_tl_ratio(seq) - tl_ratio, + nj.Minimise(cost) + ) + solver = model.load('Mistral') + if solver.solve(): + solver.printStatistics() + else: + print("No solution with Numberjack") + + + def _find_optimal_sequence(self, sequences, tl_ratio): + """Optimize a sequence to match a desired tl ratio with python-constraints""" + + p = Problem() + + # TODO add all possible values for seq (its domain) + p.addVariable("seq", list(sequences)) + + p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) + + return p.getSolution() + + @staticmethod + def calculate_tl_ratio(seq): + """Calculates the T:L ratio of a sequence.""" + targets = 0 + lures = 0 + for index, item in seq: + if item == seq[index-2]: + targets += 1 + elif item == seq[index-1] or item == seq[index-3]: + lures += 1 + # avoid division by zero + if lures == 0: + lures = 1 + return targets/lures + + +if __name__ == '__main__': + generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) + generated_seq = generator.generate() + print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/generators/progressive_random.py b/generators/progressive_random.py new file mode 100644 index 0000000..9914b47 --- /dev/null +++ b/generators/progressive_random.py @@ -0,0 +1,100 @@ +import random +import scipy.stats + + +class SequenceGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=4.0, n=3, targets_ratio=0.2): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param n: + """ + self.tl, self.trials, self.choices, self.n, self.targets_ratio = tl, trials, choices, n, targets_ratio + self.sequence = list() + self.norm_even_dist = scipy.stats.norm(0, trials/2) + self.norm_targets_ratio_dist = scipy.stats.norm(targets_ratio, 0.5) + self.norm_tl_ratio_dist = scipy.stats.norm(tl, trials/2) + + def generate(self): + while not self.sequence or len(self.sequence) < self.trials: + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence + + def next_trial(self): + if self.sequence and len(self.sequence) >= self.trials: + return None + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence[-1] + + def __find_best_next_sequence(self, seq: list, choices: list) -> list: + import sys + min_cost = sys.float_info.max + best_seq = seq + random.shuffle(choices) # to avoid ordering effect + for choice in choices: + tmp_seq = seq + list(choice) + cost = self.cost(tmp_seq) + if cost < min_cost: + min_cost = cost + best_seq = tmp_seq + return best_seq + + def calc_even_distribution_distance(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + even_ratio = self.trials / len(self.choices) + costs = {k: abs(v - even_ratio)/self.trials for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + + targets, lures = self.count_targets_and_lures(seq, self.n) + targets_ratio_cost = 1.0 - self.norm_targets_ratio_dist.pdf(targets/self.trials) + tl_ratio_cost = 1.0 - self.norm_tl_ratio_dist.pdf(self.calc_tl_ratio(seq, self.n)) + even_dist_cost = 1.0 - self.norm_even_dist.pdf(self.calc_even_distribution_distance(seq)) + # print(targets_ratio_cost, tl_ratio_cost, even_dist_cost) + return targets_ratio_cost + tl_ratio_cost + even_dist_cost + + @staticmethod + def count_targets_and_lures(seq, n: int): + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + if seq[index] == seq[index - n]: + targets += 1.0 + elif seq[index] == seq[index - (n-1)] or seq[index] == seq[index - (n+1)]: + lures += 1.0 + return targets, lures + + def calc_tl_ratio(self, seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets, lures = self.count_targets_and_lures(seq, n) + if lures < 0.01: # avoid division by zero + lures = 0.01 + return targets/lures + + +if __name__ == '__main__': + + n = 3 + generator = SequenceGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=128, n=n) + sq = generator.generate() + tl_ratio = generator.calc_tl_ratio(sq, n=n) + even_dist_distance = generator.calc_even_distribution_distance(sq) + + print('Progressively-Optimized Sequence: targets=%d, lures=%d' % generator.count_targets_and_lures(sq, n=n), 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist_distance) diff --git a/generators/progressive_random_block_ga.py b/generators/progressive_random_block_ga.py new file mode 100644 index 0000000..c07ac49 --- /dev/null +++ b/generators/progressive_random_block_ga.py @@ -0,0 +1,134 @@ +import itertools as it +import random + + +class ProgressiveGAGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __init_pool(self, pool_size) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + self.pool.clear() + all_comb = it.combinations_with_replacement(self.choices, self.trials) + sample = random.sample(list(all_comb), pool_size) + self.pool.extend(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, count=1): + """ + Find best gene(s) or parent(s) from the current pool. + :param count: Number of desired best parents to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(self.pool, key=lambda ss: self.cost(ss)) + return sorted_pool[:count] + + def even_dist_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.even_dist_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +if __name__ == '__main__': + + generator = ProgressiveGAGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.even_dist_cost(sq) + + print('Progressively-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/random_block_ga.py b/generators/random_block_ga.py new file mode 100644 index 0000000..56b7cc0 --- /dev/null +++ b/generators/random_block_ga.py @@ -0,0 +1,141 @@ +import itertools as it +import random + + +class GAOptimizedRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __append_chunk(self, prefix="", chunk_size=8): + chunk_generation = 0 + pool = [] + + def __init_pool(self, pool_size, chunk_size = 8) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + population = it.combinations_with_replacement(self.choices, chunk_size) + sample = random.sample(list(population), pool_size) + self.pool = list(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, pool: list, count=1) -> list: + """ + Find best available sequences from the current pool based on the cost function. + :param count: Number of desired best sequences to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(pool, key=lambda _: self.cost(_)) + return sorted_pool[:count] + + def distribution_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += 1.0 if costs.__contains__(c) else 0.0 + + # TODO instead of normalizing all, only normalize the max value + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness (or cost) of a sequence. + It's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + # TODO merge different cost functions with weights + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +# Demo +if __name__ == '__main__': + + generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.distribution_cost(sq) + + print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/__init__.py b/generators/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/generators/__init__.py diff --git a/generators/even_random.py b/generators/even_random.py new file mode 100644 index 0000000..fda40d8 --- /dev/null +++ b/generators/even_random.py @@ -0,0 +1,75 @@ +from constraint import * + +import itertools as it + +import Numberjack as nj + +class EvenRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials=64, tl=1): + self.tl, self.trials, self.choices = tl, trials, choices + + def generate(self): + seqs = self._generate_initial_sequences() + return self._find_optimal_sequence(seqs, 0.2) + + def _generate_initial_sequences(self): + """ + Generates initial sequence of items based on choices and number of desired trials. + In EvenRandom sequences, all stimuli have same number of appearances. + """ + + pool = it.product(self.choices, repeat=self.trials) + return pool + + def _find_optimal_sequence_numberjack(self,tl_ratio): + """Optimize with Numberjack""" + + cost = nj.Variable(0, 100, 'cost') + seqs = nj.Variable([]) # all sequences + + model = nj.Model( + seqs., + cost == self.calculate_tl_ratio(seq) - tl_ratio, + nj.Minimise(cost) + ) + solver = model.load('Mistral') + if solver.solve(): + solver.printStatistics() + else: + print("No solution with Numberjack") + + + def _find_optimal_sequence(self, sequences, tl_ratio): + """Optimize a sequence to match a desired tl ratio with python-constraints""" + + p = Problem() + + # TODO add all possible values for seq (its domain) + p.addVariable("seq", list(sequences)) + + p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) + + return p.getSolution() + + @staticmethod + def calculate_tl_ratio(seq): + """Calculates the T:L ratio of a sequence.""" + targets = 0 + lures = 0 + for index, item in seq: + if item == seq[index-2]: + targets += 1 + elif item == seq[index-1] or item == seq[index-3]: + lures += 1 + # avoid division by zero + if lures == 0: + lures = 1 + return targets/lures + + +if __name__ == '__main__': + generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) + generated_seq = generator.generate() + print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/generators/progressive_random.py b/generators/progressive_random.py new file mode 100644 index 0000000..9914b47 --- /dev/null +++ b/generators/progressive_random.py @@ -0,0 +1,100 @@ +import random +import scipy.stats + + +class SequenceGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=4.0, n=3, targets_ratio=0.2): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param n: + """ + self.tl, self.trials, self.choices, self.n, self.targets_ratio = tl, trials, choices, n, targets_ratio + self.sequence = list() + self.norm_even_dist = scipy.stats.norm(0, trials/2) + self.norm_targets_ratio_dist = scipy.stats.norm(targets_ratio, 0.5) + self.norm_tl_ratio_dist = scipy.stats.norm(tl, trials/2) + + def generate(self): + while not self.sequence or len(self.sequence) < self.trials: + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence + + def next_trial(self): + if self.sequence and len(self.sequence) >= self.trials: + return None + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence[-1] + + def __find_best_next_sequence(self, seq: list, choices: list) -> list: + import sys + min_cost = sys.float_info.max + best_seq = seq + random.shuffle(choices) # to avoid ordering effect + for choice in choices: + tmp_seq = seq + list(choice) + cost = self.cost(tmp_seq) + if cost < min_cost: + min_cost = cost + best_seq = tmp_seq + return best_seq + + def calc_even_distribution_distance(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + even_ratio = self.trials / len(self.choices) + costs = {k: abs(v - even_ratio)/self.trials for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + + targets, lures = self.count_targets_and_lures(seq, self.n) + targets_ratio_cost = 1.0 - self.norm_targets_ratio_dist.pdf(targets/self.trials) + tl_ratio_cost = 1.0 - self.norm_tl_ratio_dist.pdf(self.calc_tl_ratio(seq, self.n)) + even_dist_cost = 1.0 - self.norm_even_dist.pdf(self.calc_even_distribution_distance(seq)) + # print(targets_ratio_cost, tl_ratio_cost, even_dist_cost) + return targets_ratio_cost + tl_ratio_cost + even_dist_cost + + @staticmethod + def count_targets_and_lures(seq, n: int): + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + if seq[index] == seq[index - n]: + targets += 1.0 + elif seq[index] == seq[index - (n-1)] or seq[index] == seq[index - (n+1)]: + lures += 1.0 + return targets, lures + + def calc_tl_ratio(self, seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets, lures = self.count_targets_and_lures(seq, n) + if lures < 0.01: # avoid division by zero + lures = 0.01 + return targets/lures + + +if __name__ == '__main__': + + n = 3 + generator = SequenceGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=128, n=n) + sq = generator.generate() + tl_ratio = generator.calc_tl_ratio(sq, n=n) + even_dist_distance = generator.calc_even_distribution_distance(sq) + + print('Progressively-Optimized Sequence: targets=%d, lures=%d' % generator.count_targets_and_lures(sq, n=n), 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist_distance) diff --git a/generators/progressive_random_block_ga.py b/generators/progressive_random_block_ga.py new file mode 100644 index 0000000..c07ac49 --- /dev/null +++ b/generators/progressive_random_block_ga.py @@ -0,0 +1,134 @@ +import itertools as it +import random + + +class ProgressiveGAGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __init_pool(self, pool_size) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + self.pool.clear() + all_comb = it.combinations_with_replacement(self.choices, self.trials) + sample = random.sample(list(all_comb), pool_size) + self.pool.extend(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, count=1): + """ + Find best gene(s) or parent(s) from the current pool. + :param count: Number of desired best parents to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(self.pool, key=lambda ss: self.cost(ss)) + return sorted_pool[:count] + + def even_dist_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.even_dist_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +if __name__ == '__main__': + + generator = ProgressiveGAGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.even_dist_cost(sq) + + print('Progressively-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/random_block_ga.py b/generators/random_block_ga.py new file mode 100644 index 0000000..56b7cc0 --- /dev/null +++ b/generators/random_block_ga.py @@ -0,0 +1,141 @@ +import itertools as it +import random + + +class GAOptimizedRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __append_chunk(self, prefix="", chunk_size=8): + chunk_generation = 0 + pool = [] + + def __init_pool(self, pool_size, chunk_size = 8) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + population = it.combinations_with_replacement(self.choices, chunk_size) + sample = random.sample(list(population), pool_size) + self.pool = list(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, pool: list, count=1) -> list: + """ + Find best available sequences from the current pool based on the cost function. + :param count: Number of desired best sequences to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(pool, key=lambda _: self.cost(_)) + return sorted_pool[:count] + + def distribution_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += 1.0 if costs.__contains__(c) else 0.0 + + # TODO instead of normalizing all, only normalize the max value + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness (or cost) of a sequence. + It's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + # TODO merge different cost functions with weights + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +# Demo +if __name__ == '__main__': + + generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.distribution_cost(sq) + + print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/skewed_random.py b/generators/skewed_random.py new file mode 100644 index 0000000..561f2b4 --- /dev/null +++ b/generators/skewed_random.py @@ -0,0 +1,51 @@ +import logging +import random + + +class SequenceGenerator: + """Generates random sequence of stimuli for the n-back task. Implementation is based on Ralph (2014).""" + + seq = [] + + def __init__(self, + N=2, + trials=16, # Number of total trials + alphabet=['A', 'B', 'C', 'D', 'E', 'F'], + T=2, # Number of targets + L1=1, # Number of lures (foil) similar to the (N+1)-back + L2=1 # Number of lures (foil) similar to the (N-1)-back + ): + self.N, self.alphabet, self.trials, self.T, self.L1, self.L2 = N, alphabet, trials, T, L1, L2 + self.D = trials - T - L1 - L2 + + def generate(self) -> list: + trial = 1 + self.seq = [] + while trial <= self.trials: + self.seq += self.random_stimulus(trial) + trial += 1 + return self.seq + + def random_stimulus(self, trial): + rnd = random.randint(1, self.trials - trial + 1) + T, L1, L2 = self.T, self.L1, self.L2 + if rnd <= T and len(self.seq) >= self.N: + self.T -= 1 + return self.seq[-self.N] + elif T < rnd <= T + L1 and len(self.seq) >= self.N+1: + self.L1 -= 1 + return self.seq[-(self.N+1)] + elif T + L1 < rnd <= T + L1 + L2 and len(self.seq) >= self.N-1: + self.L2 -= 1 + return self.seq[-(self.N-1)] + + # distract + self.D -= 1 + alphabet = [item for item in self.alphabet if item not in self.seq[-self.N-1:-self.N+1]] + return random.choice(alphabet) + + +if __name__ == '__main__': + generator = SequenceGenerator() + seq = generator.generate() + print('Skewed Random Sequence: %s' % ''.join(seq)) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/__init__.py b/generators/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/generators/__init__.py diff --git a/generators/even_random.py b/generators/even_random.py new file mode 100644 index 0000000..fda40d8 --- /dev/null +++ b/generators/even_random.py @@ -0,0 +1,75 @@ +from constraint import * + +import itertools as it + +import Numberjack as nj + +class EvenRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials=64, tl=1): + self.tl, self.trials, self.choices = tl, trials, choices + + def generate(self): + seqs = self._generate_initial_sequences() + return self._find_optimal_sequence(seqs, 0.2) + + def _generate_initial_sequences(self): + """ + Generates initial sequence of items based on choices and number of desired trials. + In EvenRandom sequences, all stimuli have same number of appearances. + """ + + pool = it.product(self.choices, repeat=self.trials) + return pool + + def _find_optimal_sequence_numberjack(self,tl_ratio): + """Optimize with Numberjack""" + + cost = nj.Variable(0, 100, 'cost') + seqs = nj.Variable([]) # all sequences + + model = nj.Model( + seqs., + cost == self.calculate_tl_ratio(seq) - tl_ratio, + nj.Minimise(cost) + ) + solver = model.load('Mistral') + if solver.solve(): + solver.printStatistics() + else: + print("No solution with Numberjack") + + + def _find_optimal_sequence(self, sequences, tl_ratio): + """Optimize a sequence to match a desired tl ratio with python-constraints""" + + p = Problem() + + # TODO add all possible values for seq (its domain) + p.addVariable("seq", list(sequences)) + + p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) + + return p.getSolution() + + @staticmethod + def calculate_tl_ratio(seq): + """Calculates the T:L ratio of a sequence.""" + targets = 0 + lures = 0 + for index, item in seq: + if item == seq[index-2]: + targets += 1 + elif item == seq[index-1] or item == seq[index-3]: + lures += 1 + # avoid division by zero + if lures == 0: + lures = 1 + return targets/lures + + +if __name__ == '__main__': + generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) + generated_seq = generator.generate() + print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/generators/progressive_random.py b/generators/progressive_random.py new file mode 100644 index 0000000..9914b47 --- /dev/null +++ b/generators/progressive_random.py @@ -0,0 +1,100 @@ +import random +import scipy.stats + + +class SequenceGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=4.0, n=3, targets_ratio=0.2): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param n: + """ + self.tl, self.trials, self.choices, self.n, self.targets_ratio = tl, trials, choices, n, targets_ratio + self.sequence = list() + self.norm_even_dist = scipy.stats.norm(0, trials/2) + self.norm_targets_ratio_dist = scipy.stats.norm(targets_ratio, 0.5) + self.norm_tl_ratio_dist = scipy.stats.norm(tl, trials/2) + + def generate(self): + while not self.sequence or len(self.sequence) < self.trials: + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence + + def next_trial(self): + if self.sequence and len(self.sequence) >= self.trials: + return None + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence[-1] + + def __find_best_next_sequence(self, seq: list, choices: list) -> list: + import sys + min_cost = sys.float_info.max + best_seq = seq + random.shuffle(choices) # to avoid ordering effect + for choice in choices: + tmp_seq = seq + list(choice) + cost = self.cost(tmp_seq) + if cost < min_cost: + min_cost = cost + best_seq = tmp_seq + return best_seq + + def calc_even_distribution_distance(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + even_ratio = self.trials / len(self.choices) + costs = {k: abs(v - even_ratio)/self.trials for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + + targets, lures = self.count_targets_and_lures(seq, self.n) + targets_ratio_cost = 1.0 - self.norm_targets_ratio_dist.pdf(targets/self.trials) + tl_ratio_cost = 1.0 - self.norm_tl_ratio_dist.pdf(self.calc_tl_ratio(seq, self.n)) + even_dist_cost = 1.0 - self.norm_even_dist.pdf(self.calc_even_distribution_distance(seq)) + # print(targets_ratio_cost, tl_ratio_cost, even_dist_cost) + return targets_ratio_cost + tl_ratio_cost + even_dist_cost + + @staticmethod + def count_targets_and_lures(seq, n: int): + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + if seq[index] == seq[index - n]: + targets += 1.0 + elif seq[index] == seq[index - (n-1)] or seq[index] == seq[index - (n+1)]: + lures += 1.0 + return targets, lures + + def calc_tl_ratio(self, seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets, lures = self.count_targets_and_lures(seq, n) + if lures < 0.01: # avoid division by zero + lures = 0.01 + return targets/lures + + +if __name__ == '__main__': + + n = 3 + generator = SequenceGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=128, n=n) + sq = generator.generate() + tl_ratio = generator.calc_tl_ratio(sq, n=n) + even_dist_distance = generator.calc_even_distribution_distance(sq) + + print('Progressively-Optimized Sequence: targets=%d, lures=%d' % generator.count_targets_and_lures(sq, n=n), 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist_distance) diff --git a/generators/progressive_random_block_ga.py b/generators/progressive_random_block_ga.py new file mode 100644 index 0000000..c07ac49 --- /dev/null +++ b/generators/progressive_random_block_ga.py @@ -0,0 +1,134 @@ +import itertools as it +import random + + +class ProgressiveGAGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __init_pool(self, pool_size) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + self.pool.clear() + all_comb = it.combinations_with_replacement(self.choices, self.trials) + sample = random.sample(list(all_comb), pool_size) + self.pool.extend(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, count=1): + """ + Find best gene(s) or parent(s) from the current pool. + :param count: Number of desired best parents to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(self.pool, key=lambda ss: self.cost(ss)) + return sorted_pool[:count] + + def even_dist_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.even_dist_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +if __name__ == '__main__': + + generator = ProgressiveGAGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.even_dist_cost(sq) + + print('Progressively-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/random_block_ga.py b/generators/random_block_ga.py new file mode 100644 index 0000000..56b7cc0 --- /dev/null +++ b/generators/random_block_ga.py @@ -0,0 +1,141 @@ +import itertools as it +import random + + +class GAOptimizedRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __append_chunk(self, prefix="", chunk_size=8): + chunk_generation = 0 + pool = [] + + def __init_pool(self, pool_size, chunk_size = 8) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + population = it.combinations_with_replacement(self.choices, chunk_size) + sample = random.sample(list(population), pool_size) + self.pool = list(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, pool: list, count=1) -> list: + """ + Find best available sequences from the current pool based on the cost function. + :param count: Number of desired best sequences to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(pool, key=lambda _: self.cost(_)) + return sorted_pool[:count] + + def distribution_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += 1.0 if costs.__contains__(c) else 0.0 + + # TODO instead of normalizing all, only normalize the max value + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness (or cost) of a sequence. + It's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + # TODO merge different cost functions with weights + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +# Demo +if __name__ == '__main__': + + generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.distribution_cost(sq) + + print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/skewed_random.py b/generators/skewed_random.py new file mode 100644 index 0000000..561f2b4 --- /dev/null +++ b/generators/skewed_random.py @@ -0,0 +1,51 @@ +import logging +import random + + +class SequenceGenerator: + """Generates random sequence of stimuli for the n-back task. Implementation is based on Ralph (2014).""" + + seq = [] + + def __init__(self, + N=2, + trials=16, # Number of total trials + alphabet=['A', 'B', 'C', 'D', 'E', 'F'], + T=2, # Number of targets + L1=1, # Number of lures (foil) similar to the (N+1)-back + L2=1 # Number of lures (foil) similar to the (N-1)-back + ): + self.N, self.alphabet, self.trials, self.T, self.L1, self.L2 = N, alphabet, trials, T, L1, L2 + self.D = trials - T - L1 - L2 + + def generate(self) -> list: + trial = 1 + self.seq = [] + while trial <= self.trials: + self.seq += self.random_stimulus(trial) + trial += 1 + return self.seq + + def random_stimulus(self, trial): + rnd = random.randint(1, self.trials - trial + 1) + T, L1, L2 = self.T, self.L1, self.L2 + if rnd <= T and len(self.seq) >= self.N: + self.T -= 1 + return self.seq[-self.N] + elif T < rnd <= T + L1 and len(self.seq) >= self.N+1: + self.L1 -= 1 + return self.seq[-(self.N+1)] + elif T + L1 < rnd <= T + L1 + L2 and len(self.seq) >= self.N-1: + self.L2 -= 1 + return self.seq[-(self.N-1)] + + # distract + self.D -= 1 + alphabet = [item for item in self.alphabet if item not in self.seq[-self.N-1:-self.N+1]] + return random.choice(alphabet) + + +if __name__ == '__main__': + generator = SequenceGenerator() + seq = generator.generate() + print('Skewed Random Sequence: %s' % ''.join(seq)) diff --git a/progressive_ga_optimized_generator.py b/progressive_ga_optimized_generator.py deleted file mode 100644 index c07ac49..0000000 --- a/progressive_ga_optimized_generator.py +++ /dev/null @@ -1,134 +0,0 @@ -import itertools as it -import random - - -class ProgressiveGAGenerator: - """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __init_pool(self, pool_size) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - self.pool.clear() - all_comb = it.combinations_with_replacement(self.choices, self.trials) - sample = random.sample(list(all_comb), pool_size) - self.pool.extend(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, count=1): - """ - Find best gene(s) or parent(s) from the current pool. - :param count: Number of desired best parents to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(self.pool, key=lambda ss: self.cost(ss)) - return sorted_pool[:count] - - def even_dist_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += (1.0 if costs.__contains__(c) else 0.0) - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k,v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness of a sequence (block of trials). - Right now it's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.even_dist_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -if __name__ == '__main__': - - generator = ProgressiveGAGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.even_dist_cost(sq) - - print('Progressively-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/Pipfile b/Pipfile index 5703ad7..277029e 100644 --- a/Pipfile +++ b/Pipfile @@ -12,6 +12,7 @@ Numberjack = "*" expyriment = "*" pygame = "==1.9.5.dev0" +scipy = "*" [requires] python_version = "3.7" diff --git a/demo/progressive_random.py b/demo/progressive_random.py new file mode 100644 index 0000000..d2aba0f --- /dev/null +++ b/demo/progressive_random.py @@ -0,0 +1,47 @@ +import expyriment +from expyriment import design, control, stimuli, misc +from generators import progressive_random + +num_of_blocks = 2 +trials_per_block = 16 +n = 2 +choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] + +generators = [progressive_random.SequenceGenerator(choices, trials=trials_per_block, n=n) for b in range(0, num_of_blocks)] + +exp = design.Experiment("Progressive N-Back Task") +control.initialize(exp) +exp.data_variable_names = ["block", "stimulus", "pressed_key", "rt"] + +fixation = stimuli.FixCross() +fixation.preload() + +# left and right arrow keys for responses +response_keys = [misc.constants.K_LEFT, misc.constants.K_RIGHT] + +expyriment.control.start(exp) + + +# initialize three blocks +for b in range(0, num_of_blocks): + block = design.Block() + block.set_factor("block", b) + block.set_factor("trials", trials_per_block) + exp.add_block(block) + +for block in exp.blocks: + trials = block.get_factor("trials") + block_index = block.get_factor("block") + for i in range(0, trials): + trial = design.Trial() + item = generators[block_index].next_trial() + trial.set_factor("stimulus", item) + stim = stimuli.TextLine(item, text_size=200) + exp.clock.wait(1000 - fixation.present() - stim.preload()) + stim.present() + pressed_key, rt = exp.keyboard.wait(response_keys) + exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) + exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - stim.unload()) + +expyriment.control.end("Goodbye!", goodbye_delay=1000) + diff --git a/demo/skewed_nback.py b/demo/skewed_nback.py index 1395b7c..61e32ba 100644 --- a/demo/skewed_nback.py +++ b/demo/skewed_nback.py @@ -1,8 +1,8 @@ import expyriment from expyriment import design, control, stimuli, misc -from skewed_random import SequenceGenerator +from generators import skewed_random -nback_sequence = SequenceGenerator().generate() +nback_sequence = skewed_random.SequenceGenerator().generate() exp = design.Experiment("Skewed N-Back Task") control.initialize(exp) @@ -34,7 +34,7 @@ for trial in block.trials: exp.clock.wait(1000 - fixation.present()) trial.stimuli[0].present() - pressed_key, rt = exp.keyboard.wait([expyriment.misc.constants.K_LEFT, expyriment.misc.constants.K_RIGHT]) + pressed_key, rt = exp.keyboard.wait(response_keys) exp.data.add([block.get_factor("block"), trial.get_factor("stimulus"), pressed_key, rt]) exp.clock.wait(1000 - expyriment.stimuli.BlankScreen().present() - trial.stimuli[0].unload()) diff --git a/even_random_generator.py b/even_random_generator.py deleted file mode 100644 index fda40d8..0000000 --- a/even_random_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -from constraint import * - -import itertools as it - -import Numberjack as nj - -class EvenRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials=64, tl=1): - self.tl, self.trials, self.choices = tl, trials, choices - - def generate(self): - seqs = self._generate_initial_sequences() - return self._find_optimal_sequence(seqs, 0.2) - - def _generate_initial_sequences(self): - """ - Generates initial sequence of items based on choices and number of desired trials. - In EvenRandom sequences, all stimuli have same number of appearances. - """ - - pool = it.product(self.choices, repeat=self.trials) - return pool - - def _find_optimal_sequence_numberjack(self,tl_ratio): - """Optimize with Numberjack""" - - cost = nj.Variable(0, 100, 'cost') - seqs = nj.Variable([]) # all sequences - - model = nj.Model( - seqs., - cost == self.calculate_tl_ratio(seq) - tl_ratio, - nj.Minimise(cost) - ) - solver = model.load('Mistral') - if solver.solve(): - solver.printStatistics() - else: - print("No solution with Numberjack") - - - def _find_optimal_sequence(self, sequences, tl_ratio): - """Optimize a sequence to match a desired tl ratio with python-constraints""" - - p = Problem() - - # TODO add all possible values for seq (its domain) - p.addVariable("seq", list(sequences)) - - p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) - - return p.getSolution() - - @staticmethod - def calculate_tl_ratio(seq): - """Calculates the T:L ratio of a sequence.""" - targets = 0 - lures = 0 - for index, item in seq: - if item == seq[index-2]: - targets += 1 - elif item == seq[index-1] or item == seq[index-3]: - lures += 1 - # avoid division by zero - if lures == 0: - lures = 1 - return targets/lures - - -if __name__ == '__main__': - generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) - generated_seq = generator.generate() - print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/ga_optimized_generator.py b/ga_optimized_generator.py deleted file mode 100644 index 56b7cc0..0000000 --- a/ga_optimized_generator.py +++ /dev/null @@ -1,141 +0,0 @@ -import itertools as it -import random - - -class GAOptimizedRandomGenerator: - """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __append_chunk(self, prefix="", chunk_size=8): - chunk_generation = 0 - pool = [] - - def __init_pool(self, pool_size, chunk_size = 8) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - population = it.combinations_with_replacement(self.choices, chunk_size) - sample = random.sample(list(population), pool_size) - self.pool = list(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, pool: list, count=1) -> list: - """ - Find best available sequences from the current pool based on the cost function. - :param count: Number of desired best sequences to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(pool, key=lambda _: self.cost(_)) - return sorted_pool[:count] - - def distribution_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += 1.0 if costs.__contains__(c) else 0.0 - - # TODO instead of normalizing all, only normalize the max value - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness (or cost) of a sequence. - It's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - # TODO merge different cost functions with weights - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -# Demo -if __name__ == '__main__': - - generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.distribution_cost(sq) - - print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/__init__.py b/generators/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/generators/__init__.py diff --git a/generators/even_random.py b/generators/even_random.py new file mode 100644 index 0000000..fda40d8 --- /dev/null +++ b/generators/even_random.py @@ -0,0 +1,75 @@ +from constraint import * + +import itertools as it + +import Numberjack as nj + +class EvenRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials=64, tl=1): + self.tl, self.trials, self.choices = tl, trials, choices + + def generate(self): + seqs = self._generate_initial_sequences() + return self._find_optimal_sequence(seqs, 0.2) + + def _generate_initial_sequences(self): + """ + Generates initial sequence of items based on choices and number of desired trials. + In EvenRandom sequences, all stimuli have same number of appearances. + """ + + pool = it.product(self.choices, repeat=self.trials) + return pool + + def _find_optimal_sequence_numberjack(self,tl_ratio): + """Optimize with Numberjack""" + + cost = nj.Variable(0, 100, 'cost') + seqs = nj.Variable([]) # all sequences + + model = nj.Model( + seqs., + cost == self.calculate_tl_ratio(seq) - tl_ratio, + nj.Minimise(cost) + ) + solver = model.load('Mistral') + if solver.solve(): + solver.printStatistics() + else: + print("No solution with Numberjack") + + + def _find_optimal_sequence(self, sequences, tl_ratio): + """Optimize a sequence to match a desired tl ratio with python-constraints""" + + p = Problem() + + # TODO add all possible values for seq (its domain) + p.addVariable("seq", list(sequences)) + + p.addConstraint(lambda s: self.calculate_tl_ratio(s) - tl_ratio < 0.05) + + return p.getSolution() + + @staticmethod + def calculate_tl_ratio(seq): + """Calculates the T:L ratio of a sequence.""" + targets = 0 + lures = 0 + for index, item in seq: + if item == seq[index-2]: + targets += 1 + elif item == seq[index-1] or item == seq[index-3]: + lures += 1 + # avoid division by zero + if lures == 0: + lures = 1 + return targets/lures + + +if __name__ == '__main__': + generator = EvenRandomGenerator(['a', 'b', 'c'], trials = 4) + generated_seq = generator.generate() + print('Even Random Sequence: %s' % ''.join(generated_seq)) diff --git a/generators/progressive_random.py b/generators/progressive_random.py new file mode 100644 index 0000000..9914b47 --- /dev/null +++ b/generators/progressive_random.py @@ -0,0 +1,100 @@ +import random +import scipy.stats + + +class SequenceGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=4.0, n=3, targets_ratio=0.2): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param n: + """ + self.tl, self.trials, self.choices, self.n, self.targets_ratio = tl, trials, choices, n, targets_ratio + self.sequence = list() + self.norm_even_dist = scipy.stats.norm(0, trials/2) + self.norm_targets_ratio_dist = scipy.stats.norm(targets_ratio, 0.5) + self.norm_tl_ratio_dist = scipy.stats.norm(tl, trials/2) + + def generate(self): + while not self.sequence or len(self.sequence) < self.trials: + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence + + def next_trial(self): + if self.sequence and len(self.sequence) >= self.trials: + return None + self.sequence = self.__find_best_next_sequence(self.sequence, self.choices) + return self.sequence[-1] + + def __find_best_next_sequence(self, seq: list, choices: list) -> list: + import sys + min_cost = sys.float_info.max + best_seq = seq + random.shuffle(choices) # to avoid ordering effect + for choice in choices: + tmp_seq = seq + list(choice) + cost = self.cost(tmp_seq) + if cost < min_cost: + min_cost = cost + best_seq = tmp_seq + return best_seq + + def calc_even_distribution_distance(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + even_ratio = self.trials / len(self.choices) + costs = {k: abs(v - even_ratio)/self.trials for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + + targets, lures = self.count_targets_and_lures(seq, self.n) + targets_ratio_cost = 1.0 - self.norm_targets_ratio_dist.pdf(targets/self.trials) + tl_ratio_cost = 1.0 - self.norm_tl_ratio_dist.pdf(self.calc_tl_ratio(seq, self.n)) + even_dist_cost = 1.0 - self.norm_even_dist.pdf(self.calc_even_distribution_distance(seq)) + # print(targets_ratio_cost, tl_ratio_cost, even_dist_cost) + return targets_ratio_cost + tl_ratio_cost + even_dist_cost + + @staticmethod + def count_targets_and_lures(seq, n: int): + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + if seq[index] == seq[index - n]: + targets += 1.0 + elif seq[index] == seq[index - (n-1)] or seq[index] == seq[index - (n+1)]: + lures += 1.0 + return targets, lures + + def calc_tl_ratio(self, seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets, lures = self.count_targets_and_lures(seq, n) + if lures < 0.01: # avoid division by zero + lures = 0.01 + return targets/lures + + +if __name__ == '__main__': + + n = 3 + generator = SequenceGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=128, n=n) + sq = generator.generate() + tl_ratio = generator.calc_tl_ratio(sq, n=n) + even_dist_distance = generator.calc_even_distribution_distance(sq) + + print('Progressively-Optimized Sequence: targets=%d, lures=%d' % generator.count_targets_and_lures(sq, n=n), 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist_distance) diff --git a/generators/progressive_random_block_ga.py b/generators/progressive_random_block_ga.py new file mode 100644 index 0000000..c07ac49 --- /dev/null +++ b/generators/progressive_random_block_ga.py @@ -0,0 +1,134 @@ +import itertools as it +import random + + +class ProgressiveGAGenerator: + """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __init_pool(self, pool_size) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + self.pool.clear() + all_comb = it.combinations_with_replacement(self.choices, self.trials) + sample = random.sample(list(all_comb), pool_size) + self.pool.extend(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, count=1): + """ + Find best gene(s) or parent(s) from the current pool. + :param count: Number of desired best parents to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(self.pool, key=lambda ss: self.cost(ss)) + return sorted_pool[:count] + + def even_dist_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += (1.0 if costs.__contains__(c) else 0.0) + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k,v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness of a sequence (block of trials). + Right now it's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.even_dist_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +if __name__ == '__main__': + + generator = ProgressiveGAGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.even_dist_cost(sq) + + print('Progressively-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/random_block_ga.py b/generators/random_block_ga.py new file mode 100644 index 0000000..56b7cc0 --- /dev/null +++ b/generators/random_block_ga.py @@ -0,0 +1,141 @@ +import itertools as it +import random + + +class GAOptimizedRandomGenerator: + """Generate even random sequences according to a predefined TL ration (Ralph, 2014)""" + + def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): + """Initialize the genetic algorithm optimizer for n-back sequences. + :param choices: + :param trials: + :param tl: + :param pool_size: + :param n: + """ + self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n + self.pool = [] + self.__init_pool(pool_size) + + def generate(self): + """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be + close to the desired ones but not exactly the same. + :return: a sequence of items in "list" format. + """ + generation = 0 + best_parent = self.__find_best_parents(1)[0] + while self.cost(best_parent) > 0.1 and generation < 1000: + generation += 1 + if random.random() > 0.5: + self.pool = list(map(lambda s: self.mutate(s), self.pool)) + self.pool = self.crossover_all() + best_parent = self.__find_best_parents(1)[0] + print(best_parent, 'cost=%f' % self.cost(best_parent)) + return best_parent + + def __append_chunk(self, prefix="", chunk_size=8): + chunk_generation = 0 + pool = [] + + def __init_pool(self, pool_size, chunk_size = 8) -> list: + """ + Initialize solution pool. + :param pool_size: Num of initial random solutions + :return: initial pool of + """ + print("Initializing the pool...") + population = it.combinations_with_replacement(self.choices, chunk_size) + sample = random.sample(list(population), pool_size) + self.pool = list(map(lambda _: ''.join(_), sample)) + return self.pool + + def __find_best_parents(self, pool: list, count=1) -> list: + """ + Find best available sequences from the current pool based on the cost function. + :param count: Number of desired best sequences to be returned. Default is 1. + :return: A list of most fit sequences. + """ + sorted_pool = sorted(pool, key=lambda _: self.cost(_)) + return sorted_pool[:count] + + def distribution_cost(self, seq): + """ + Calculate fitness according to the similarity to the desired uniform distribution. + :param seq: a string + :return: + """ + costs = {c: 0.0 for c in self.choices} + for c in list(seq): + costs[c] += 1.0 if costs.__contains__(c) else 0.0 + + # TODO instead of normalizing all, only normalize the max value + costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k, v in costs.items()} + return max(list(costs.values())) + + def cost(self, seq): + """ + Calculate overall fitness (or cost) of a sequence. + It's a cost function, so we try to minimize this cost. + :param seq: + :return: + """ + # add fitness for uniform distribution of all stimuli + # TODO merge different cost functions with weights + return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.__distribution_cost(seq) + + def crossover_all(self): + """ + Perform random crossover for all pairs. + :return: new pool + """ + new_pool = [] + for i in range(int(self.pool_size/2)): + seq1 = self.pool[i*2] # change to weighted random + seq2 = self.pool[i*2 + 1] # change to weighted random + new_pool.extend(self.crossover(seq1, seq2)) + + return new_pool + + def crossover(self, seq1, seq2): + """ + Crossover two sequences. + :param seq1: + :param seq2: + :return: + """ + pos = random.randint(0, self.trials) + return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] + + def mutate(self, seq): + if random.random() > 0.5: + pos = random.randint(0, len(seq)-1) + seq_list = list(seq) + seq_list[pos] = random.choice(self.choices) + return ''.join(seq_list) + return seq + + @staticmethod + def calculate_tl_ratio(seq, n: int): + """Calculates the T/L ratio in a block of trials.""" + targets = 0.0 + lures = 0.0 + for index in range(n, len(seq)): + item = seq[index] + if item == seq[index - n]: + targets += 1.0 + elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: + lures += 1.0 + if lures - 0.0 < 0.001: # avoid division by zero + lures = 0.001 + return targets/lures + + +# Demo +if __name__ == '__main__': + + generator = GAOptimizedRandomGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) + sq = generator.generate() + tl_ratio = generator.calculate_tl_ratio(sq, n=2) + even_dist = generator.distribution_cost(sq) + + print('GA-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/generators/skewed_random.py b/generators/skewed_random.py new file mode 100644 index 0000000..561f2b4 --- /dev/null +++ b/generators/skewed_random.py @@ -0,0 +1,51 @@ +import logging +import random + + +class SequenceGenerator: + """Generates random sequence of stimuli for the n-back task. Implementation is based on Ralph (2014).""" + + seq = [] + + def __init__(self, + N=2, + trials=16, # Number of total trials + alphabet=['A', 'B', 'C', 'D', 'E', 'F'], + T=2, # Number of targets + L1=1, # Number of lures (foil) similar to the (N+1)-back + L2=1 # Number of lures (foil) similar to the (N-1)-back + ): + self.N, self.alphabet, self.trials, self.T, self.L1, self.L2 = N, alphabet, trials, T, L1, L2 + self.D = trials - T - L1 - L2 + + def generate(self) -> list: + trial = 1 + self.seq = [] + while trial <= self.trials: + self.seq += self.random_stimulus(trial) + trial += 1 + return self.seq + + def random_stimulus(self, trial): + rnd = random.randint(1, self.trials - trial + 1) + T, L1, L2 = self.T, self.L1, self.L2 + if rnd <= T and len(self.seq) >= self.N: + self.T -= 1 + return self.seq[-self.N] + elif T < rnd <= T + L1 and len(self.seq) >= self.N+1: + self.L1 -= 1 + return self.seq[-(self.N+1)] + elif T + L1 < rnd <= T + L1 + L2 and len(self.seq) >= self.N-1: + self.L2 -= 1 + return self.seq[-(self.N-1)] + + # distract + self.D -= 1 + alphabet = [item for item in self.alphabet if item not in self.seq[-self.N-1:-self.N+1]] + return random.choice(alphabet) + + +if __name__ == '__main__': + generator = SequenceGenerator() + seq = generator.generate() + print('Skewed Random Sequence: %s' % ''.join(seq)) diff --git a/progressive_ga_optimized_generator.py b/progressive_ga_optimized_generator.py deleted file mode 100644 index c07ac49..0000000 --- a/progressive_ga_optimized_generator.py +++ /dev/null @@ -1,134 +0,0 @@ -import itertools as it -import random - - -class ProgressiveGAGenerator: - """Generate a sequence progressively according to a predefined TL ratio and an even distribution""" - - def __init__(self, choices, trials, tl=2.0, pool_size=100, n=3): - """Initialize the genetic algorithm optimizer for n-back sequences. - :param choices: - :param trials: - :param tl: - :param pool_size: - :param n: - """ - self.tl, self.trials, self.choices, self.pool_size, self.n = tl, trials, choices, pool_size, n - self.pool = [] - self.__init_pool(pool_size) - - def generate(self): - """Generate a sequence of trials based on passed parameters. TL ratio and distribution are expected to be - close to the desired ones but not exactly the same. - :return: a sequence of items in "list" format. - """ - generation = 0 - best_parent = self.__find_best_parents(1)[0] - while self.cost(best_parent) > 0.1 and generation < 1000: - generation += 1 - if random.random() > 0.5: - self.pool = list(map(lambda s: self.mutate(s), self.pool)) - self.pool = self.crossover_all() - best_parent = self.__find_best_parents(1)[0] - print(best_parent, 'cost=%f' % self.cost(best_parent)) - return best_parent - - def __init_pool(self, pool_size) -> list: - """ - Initialize solution pool. - :param pool_size: Num of initial random solutions - :return: initial pool of - """ - print("Initializing the pool...") - self.pool.clear() - all_comb = it.combinations_with_replacement(self.choices, self.trials) - sample = random.sample(list(all_comb), pool_size) - self.pool.extend(map(lambda _: ''.join(_), sample)) - return self.pool - - def __find_best_parents(self, count=1): - """ - Find best gene(s) or parent(s) from the current pool. - :param count: Number of desired best parents to be returned. Default is 1. - :return: A list of most fit sequences. - """ - sorted_pool = sorted(self.pool, key=lambda ss: self.cost(ss)) - return sorted_pool[:count] - - def even_dist_cost(self, seq): - """ - Calculate fitness according to the similarity to the desired uniform distribution. - :param seq: a string - :return: - """ - costs = {c: 0.0 for c in self.choices} - for c in list(seq): - costs[c] += (1.0 if costs.__contains__(c) else 0.0) - costs = {k: abs(1.0 - v*len(self.choices)/self.trials) for k,v in costs.items()} - return max(list(costs.values())) - - def cost(self, seq): - """ - Calculate overall fitness of a sequence (block of trials). - Right now it's a cost function, so we try to minimize this cost. - :param seq: - :return: - """ - # add fitness for uniform distribution of all stimuli - return abs(self.calculate_tl_ratio(seq, self.n) - self.tl) + self.even_dist_cost(seq) - - def crossover_all(self): - """ - Perform random crossover for all pairs. - :return: new pool - """ - new_pool = [] - for i in range(int(self.pool_size/2)): - seq1 = self.pool[i*2] # change to weighted random - seq2 = self.pool[i*2 + 1] # change to weighted random - new_pool.extend(self.crossover(seq1, seq2)) - - return new_pool - - def crossover(self, seq1, seq2): - """ - Crossover two sequences. - :param seq1: - :param seq2: - :return: - """ - pos = random.randint(0, self.trials) - return [seq1[:pos] + seq2[pos:], seq2[:pos] + seq1[pos:]] - - def mutate(self, seq): - if random.random() > 0.5: - pos = random.randint(0, len(seq)-1) - seq_list = list(seq) - seq_list[pos] = random.choice(self.choices) - return ''.join(seq_list) - return seq - - @staticmethod - def calculate_tl_ratio(seq, n: int): - """Calculates the T/L ratio in a block of trials.""" - targets = 0.0 - lures = 0.0 - for index in range(n, len(seq)): - item = seq[index] - if item == seq[index - n]: - targets += 1.0 - elif item == seq[index - (n-1)] or item == seq[index - (n+1)]: - lures += 1.0 - if lures - 0.0 < 0.001: # avoid division by zero - lures = 0.001 - return targets/lures - - -if __name__ == '__main__': - - generator = ProgressiveGAGenerator(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], trials=16, n=2) - sq = generator.generate() - tl_ratio = generator.calculate_tl_ratio(sq, n=2) - even_dist = generator.even_dist_cost(sq) - - print('Progressively-Optimized Sequence: %s' % sq, 'with tl_ratio=%f' % tl_ratio, 'and even_dist_cost=%f' % even_dist) diff --git a/skewed_random.py b/skewed_random.py deleted file mode 100644 index a6e4ea7..0000000 --- a/skewed_random.py +++ /dev/null @@ -1,51 +0,0 @@ -import logging -import random - - -class SequenceGenerator: - """Generates random sequence of stimuli for the n-back task. Implementation is based on Ralph (2014).""" - - seq = [] - - def __init__(self, - N=2, - trials=10, # Number of total trials - alphabet=['A', 'B', 'C', 'D', 'E', 'F'], - T=2, # Number of targets - L1=1, # Number of lures (foil) similar to the (N+1)-back - L2=1 # Number of lures (foil) similar to the (N-1)-back - ): - self.N, self.alphabet, self.trials, self.T, self.L1, self.L2 = N, alphabet, trials, T, L1, L2 - self.D = trials - T - L1 - L2 - - def generate(self) -> list: - trial = 1 - self.seq = [] - while trial <= self.trials: - self.seq += self.random_stimulus(trial) - trial += 1 - return self.seq - - def random_stimulus(self, trial): - rnd = random.randint(1, self.trials - trial + 1) - T, L1, L2 = self.T, self.L1, self.L2 - if rnd <= T and len(self.seq) >= self.N: - self.T -= 1 - return self.seq[-self.N] - elif T < rnd <= T + L1 and len(self.seq) >= self.N+1: - self.L1 -= 1 - return self.seq[-(self.N+1)] - elif T + L1 < rnd <= T + L1 + L2 and len(self.seq) >= self.N-1: - self.L2 -= 1 - return self.seq[-(self.N-1)] - - # distract - self.D -= 1 - alphabet = [item for item in self.alphabet if item not in self.seq[-self.N-1:-self.N+1]] - return random.choice(alphabet) - - -if __name__ == '__main__': - generator = SequenceGenerator() - seq = generator.generate() - print('Skewed Random Sequence: %s' % ''.join(seq))