Skip to content

Wrappers

block

ScheduleAttr

ScheduleAttr(env, schedule, attr_list)

Bases: TrialWrapper

Schedule attributes.

Parameters:

Name Type Description Default
env

TrialEnv object

required
schedule
required
Source code in neurogym/wrappers/block.py
def __init__(self, env, schedule, attr_list) -> None:
    super().__init__(env)
    self.schedule = schedule
    self.attr_list = attr_list

MultiEnvs

MultiEnvs(envs, env_input=False)

Bases: TrialWrapper

Wrap multiple environments.

Parameters:

Name Type Description Default
envs

list of env object

required
env_input

bool, if True, add scalar inputs indicating current envinronment. default False.

False
Source code in neurogym/wrappers/block.py
def __init__(self, envs, env_input=False) -> None:
    super().__init__(envs[0])
    for env in envs:
        env.unwrapped.set_top(self)
    self.envs = envs
    self.i_env = 0

    self.env_input = env_input
    if env_input:
        env_shape = envs[0].observation_space.shape
        if len(env_shape) > 1:
            msg = f"Env must have 1-D Box shape but got {env_shape}."
            raise ValueError(msg)
        _have_equal_shape(envs)
        self.observation_space: spaces.Box = spaces.Box(
            -np.inf,
            np.inf,
            shape=(env_shape[0] + len(self.envs),),
            dtype=self.envs[0].observation_space.dtype,
        )

set_i

set_i(i) -> None

Set the i-th environment.

Source code in neurogym/wrappers/block.py
def set_i(self, i) -> None:
    """Set the i-th environment."""
    self.i_env = i
    self.env = self.envs[self.i_env]

ScheduleEnvs

ScheduleEnvs(envs, schedule, env_input=False)

Bases: TrialWrapper

Schedule environments.

Parameters:

Name Type Description Default
envs

list of env object

required
schedule

utils.scheduler.BaseSchedule object

required
env_input

bool, if True, add scalar inputs indicating current environment. default False.

False
Source code in neurogym/wrappers/block.py
def __init__(self, envs, schedule, env_input=False) -> None:
    super().__init__(envs[0])
    for env in envs:
        env.unwrapped.set_top(self)
    self.envs = envs
    self.schedule = schedule
    self.i_env = self.next_i_env = 0

    self.env_input = env_input
    if env_input:
        env_shape = envs[0].observation_space.shape
        if len(env_shape) > 1:
            msg = f"Env must have 1-D Box shape but got {env_shape}."
            raise ValueError(msg)
        _have_equal_shape(envs)
        self.observation_space: spaces.Box = spaces.Box(
            -np.inf,
            np.inf,
            shape=(env_shape[0] + len(self.envs),),
            dtype=np.float32,
        )

reset

reset(**kwargs)

Resets environments.

Reset each environment in self.envs and use the scheduler to select the environment returning the initial observation. This environment is also used to set the current environment self.env.

Source code in neurogym/wrappers/block.py
def reset(self, **kwargs):
    # TODO: kwargs to specify the condition for new_trial
    """Resets environments.

    Reset each environment in self.envs and use the scheduler to select the environment returning
    the initial observation. This environment is also used to set the current environment self.env.
    """
    self.schedule.reset()
    return_i_env = self.schedule()

    # first reset all the env excepted return_i_env
    for i, env in enumerate(self.envs):
        if i == return_i_env:
            continue

        # change the current env so that calling _top.new_trial() in env.reset() will generate a trial for the env
        # being currently reset (and not an env that is not yet reset)
        self.set_i(i)
        # same env used here and in the first call to new_trial()
        self.next_i_env = self.i_env

        env.reset(**kwargs)

    # then reset return_i_env and return the result
    self.set_i(return_i_env)
    self.next_i_env = self.i_env
    return self.env.reset()

set_i

set_i(i) -> None

Set the current environment to the i-th environment in the list envs.

Source code in neurogym/wrappers/block.py
def set_i(self, i) -> None:
    """Set the current environment to the i-th environment in the list envs."""
    self.i_env = i
    self.env = self.envs[self.i_env]
    self.schedule.i = i

TrialHistoryV2

TrialHistoryV2(env, probs=None)

Bases: TrialWrapper

Change ground truth probability based on previous outcome.

Parameters:

Name Type Description Default
probs

matrix of probabilities of the current choice conditioned on the previous. Shape, num-choices x num-choices

None
Source code in neurogym/wrappers/block.py
def __init__(self, env, probs=None) -> None:
    super().__init__(env)
    try:
        self.n_ch = len(self.choices)  # max num of choices
    except AttributeError as e:
        msg = "TrialHistory requires task to have attribute choices."
        raise AttributeError(msg) from e
    if probs is None:
        probs = np.ones((self.n_ch, self.n_ch)) / self.n_ch  # uniform
    self.probs = probs
    if self.probs.shape != (self.n_ch, self.n_ch):
        msg = f"{self.probs.shape=} should be {self.n_ch, self.n_ch=}."
        raise ValueError(msg)
    self.prev_trial = self.rng.choice(self.n_ch)  # random initialization

monitor

Monitor

Monitor(env, folder=None, sv_per=100000, sv_stp='trial', verbose=False, sv_fig=False, num_stps_sv_fig=100, name='', fig_type='png', step_fn=None)

Bases: Wrapper

Monitor task.

Saves relevant behavioral information: rewards, actions, observations, new trial, ground truth.

Parameters:

Name Type Description Default
folder

Folder where the data will be saved. (def: None, str) sv_per and sv_stp: Data will be saved every sv_per sv_stp's. (def: 100000, int)

None
verbose

Whether to print information about average reward and number of trials. (def: False, bool)

False
sv_fig

Whether to save a figure of the experiment structure. If True, a figure will be updated every sv_per. (def: False, bool)

False
num_stps_sv_fig

Number of trial steps to include in the figure. (def: 100, int)

100
Source code in neurogym/wrappers/monitor.py
def __init__(
    self,
    env,
    folder=None,
    sv_per=100000,
    sv_stp="trial",
    verbose=False,
    sv_fig=False,
    num_stps_sv_fig=100,
    name="",
    fig_type="png",
    step_fn=None,
) -> None:
    super().__init__(env)
    self.env = env
    self.num_tr = 0
    self.step_fn = step_fn
    # data to save
    self.data: dict[str, list] = {"action": [], "reward": []}
    self.sv_per = sv_per
    self.sv_stp = sv_stp
    self.fig_type = fig_type
    if self.sv_stp == "timestep":
        self.t = 0
    self.verbose = verbose
    if folder is None:
        # FIXME is it ok to use tempfile.TemporaryDirectory instead or does this need to be stored locally always?
        self.folder = "tmp"
    Path(self.folder).mkdir(parents=True, exist_ok=True)
    # seeding
    self.sv_name = self.folder + self.env.__class__.__name__ + "_bhvr_data_" + name + "_"  # FIXME: use pathlib
    # figure
    self.sv_fig = sv_fig
    if self.sv_fig:
        self.num_stps_sv_fig = num_stps_sv_fig
        self.stp_counter = 0
        self.ob_mat: list = []
        self.act_mat: list = []
        self.rew_mat: list = []
        self.gt_mat: list = []
        self.perf_mat: list = []

noise

Noise wrapper.

Created on Thu Feb 28 15:07:21 2019

@author: molano

Noise

Noise(env, std_noise=0.1)

Bases: Wrapper

Add Gaussian noise to the observations.

Parameters:

Name Type Description Default
std_noise

Standard deviation of noise. (def: 0.1)

0.1
perf_th

If != None, the wrapper will adjust the noise so the mean performance is not larger than perf_th. (def: None, float)

required
w

Window used to compute the mean performance. (def: 100, int)

required
step_noise

Step used to increment/decrease std. (def: 0.001, float)

required
Source code in neurogym/wrappers/noise.py
def __init__(self, env, std_noise=0.1) -> None:
    super().__init__(env)
    self.env = env
    self.std_noise = std_noise

pass_action

PassAction

PassAction(env)

Bases: Wrapper

Modifies observation by adding the previous action.

Source code in neurogym/wrappers/pass_action.py
def __init__(self, env) -> None:
    super().__init__(env)
    self.env = env
    # TODO: This is not adding one-hot
    env_oss = env.observation_space.shape[0]
    self.observation_space = spaces.Box(
        -np.inf,
        np.inf,
        shape=(env_oss + 1,),
        dtype=np.float32,
    )

pass_reward

PassReward

PassReward(env)

Bases: Wrapper

Modifies observation by adding the previous reward.

Source code in neurogym/wrappers/pass_reward.py
def __init__(self, env) -> None:
    """Modifies observation by adding the previous reward."""
    super().__init__(env)
    env_oss = env.observation_space.shape[0]
    self.observation_space = spaces.Box(
        -np.inf,
        np.inf,
        shape=(env_oss + 1,),
        dtype=np.float32,
    )

reaction_time

Noise wrapper.

Created on Thu Feb 28 15:07:21 2019

@author: molano

ReactionTime

ReactionTime(env, urgency=0.0)

Bases: Wrapper

Allow reaction time response.

Modifies a given environment by allowing the network to act at any time after the fixation period.

Source code in neurogym/wrappers/reaction_time.py
def __init__(self, env, urgency=0.0) -> None:
    super().__init__(env)
    self.env = env
    self.urgency = urgency
    self.tr_dur = 0

side_bias

SideBias

SideBias(env, probs=None, block_dur=200)

Bases: TrialWrapper

Changes the probability of ground truth.

Parameters:

Name Type Description Default
prob

Specifies probabilities for each choice. Within each block,the probability should sum up to 1. (def: None, numpy array (n_block, n_choices))

required
block_dur

Number of trials per block. (def: 200, int)

200
Source code in neurogym/wrappers/side_bias.py
def __init__(self, env, probs=None, block_dur=200) -> None:
    super().__init__(env)
    try:
        self.choices = self.task.choices
    except AttributeError as e:
        msg = "SideBias requires task to have attribute choices."
        raise AttributeError(msg) from e
    if not isinstance(self.task, ngym.TrialEnv):
        msg = "Task has to be TrialEnv."
        raise TypeError(msg)
    if probs is None:
        msg = "Please provide choices probabilities."
        raise ValueError(msg)
    if isinstance(probs, float | int):
        mat = np.eye(len(self.choices)) * probs
        mat[mat == 0] = 1 - probs
        self.choice_prob = mat
    else:
        self.choice_prob = np.array(probs)
    if self.choice_prob.shape[1] != len(self.choices):
        msg = (
            f"The number of choices {self.choice_prob.shape[1]} inferred from prob mismatches "
            f"{len(self.choices)} inferred from choices."
        )
        raise ValueError(msg)

    self.n_block = self.choice_prob.shape[0]
    self.curr_block = self.task.rng.choice(range(self.n_block))
    self.block_dur = block_dur