From d7e4b6380f59e84069b224e7e78337dbb991ee28 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 19 Dec 2024 14:33:16 -0800 Subject: [PATCH 01/23] create model and connect widgets --- src/foraging_gui/Dialogs.py | 8 +- src/foraging_gui/Foraging.py | 190 ++++++++++++++++++++++++++++++++--- 2 files changed, 182 insertions(+), 16 deletions(-) diff --git a/src/foraging_gui/Dialogs.py b/src/foraging_gui/Dialogs.py index f785937e0..d5d6e5c66 100644 --- a/src/foraging_gui/Dialogs.py +++ b/src/foraging_gui/Dialogs.py @@ -15,7 +15,7 @@ from PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QHBoxLayout, QMessageBox, QGridLayout from PyQt5.QtWidgets import QLabel, QDialogButtonBox,QFileDialog,QInputDialog, QLineEdit from PyQt5 import QtWidgets, uic, QtGui -from PyQt5.QtCore import QThreadPool,Qt, QAbstractTableModel, QItemSelectionModel, QObject, QTimer +from PyQt5.QtCore import QThreadPool,Qt, QAbstractTableModel, QItemSelectionModel, QObject, QTimer, pyqtSignal from PyQt5.QtSvg import QSvgWidget from foraging_gui.MyFunctions import Worker @@ -2215,7 +2215,7 @@ def _SelectRigMetadata(self,rig_metadata_file=None): class AutoTrainDialog(QDialog): '''For automatic training''' - + trainingStageChanged = pyqtSignal(TrainingStage) # signal to indicate training stage has changed def __init__(self, MainWindow, parent=None): super().__init__(parent) uic.loadUi('AutoTrain.ui', self) @@ -2651,12 +2651,12 @@ def _override_curriculum_clicked(self, state): def _update_stage_to_apply(self): if self.checkBox_override_stage.isChecked(): self.stage_in_use = self.comboBox_override_stage.currentText() + logger.info(f"Stage overridden to: {self.stage_in_use}") elif self.last_session is not None: self.stage_in_use = self.last_session['next_stage_suggested'] else: self.stage_in_use = 'unknown training stage' - self.pushButton_apply_auto_train_paras.setText( f"Apply and lock\n" + '\n'.join(get_curriculum_string(self.curriculum_in_use).split('(')).strip(')') @@ -2665,7 +2665,7 @@ def _update_stage_to_apply(self): logger.info(f"Current stage to apply: {self.stage_in_use} @" f"{get_curriculum_string(self.curriculum_in_use)}") - + self.trainingStageChanged.emit(self.stage_in_use) def _apply_curriculum(self): # Check if a curriculum is selected if not hasattr(self, 'selected_curriculum') or self.selected_curriculum is None: diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index e30abd628..c6ed40a72 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -9,7 +9,7 @@ import logging from hashlib import md5 -import logging_loki +#import logging_loki import socket import harp import threading @@ -25,7 +25,7 @@ import serial import numpy as np import pandas as pd -from pykeepass import PyKeePass +#from pykeepass import PyKeePass from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from scipy.io import savemat, loadmat from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QSizePolicy @@ -54,7 +54,9 @@ from aind_data_schema.core.session import Session from aind_data_schema_models.modalities import Modality from aind_behavior_services.session import AindBehaviorSessionModel -from aind_auto_train.schema.task import TrainingStage +from aind_behavior_services.task_logic import AindBehaviorTaskLogicModel +from aind_auto_train.schema.task import TrainingStage, DynamicForagingParas, AdvancedBlockMode +import aind_auto_train logger = logging.getLogger(__name__) logger.root.handlers.clear() # clear handlers so console output can be configured @@ -128,11 +130,17 @@ def __init__(self, parent=None,box_number=1,start_bonsai_ide=True): subject=self.ID.text(), experiment_version=foraging_gui.__version__, notes=self.ShowNotes.toPlainText(), - commit_hash= subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip(), + commit_hash=subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip(), allow_dirty_repo= subprocess.check_output(['git','diff-index','--name-only', 'HEAD']).decode('ascii').strip() != '', skip_hardware_validation=True ) + # create AindBehaviorTaskLogicModel to be used and referenced for task parameter info + self.behavior_task_logic_model = AindBehaviorTaskLogicModel( + name=self.Task.currentText(), + task_parameters=self.initialize_task_parameters().dict(), + version=aind_auto_train.__version__ + ) # add warning_widget to layout and set color self.warning_widget = WarningWidget(log_tag=self.warning_log_tag, @@ -213,6 +221,8 @@ def __init__(self, parent=None,box_number=1,start_bonsai_ide=True): self._ShowRewardPairs() # show reward pairs self._GetTrainingParameters() # get initial training parameters self.connectSignalsSlots() + self.connect_session_model() # connect relevant widgets to update session model + self.connect_task_parameters() # connect relevant widgets to update task parameters self._Task() self.keyPressEvent() self._WaterVolumnManage2() @@ -243,6 +253,170 @@ def __init__(self, parent=None,box_number=1,start_bonsai_ide=True): self._ReconnectBonsai() logging.info('Start up complete') + def initialize_task_parameters(self) -> DynamicForagingParas: + """ + initialize schema of task parameters based on widgets + """ + + return DynamicForagingParas( + training_stage=TrainingStage.STAGE_1, # dummy value + task=self.Task.currentText(), + task_schema_version=aind_auto_train.__version__, + BaseRewardSum=float(self.BaseRewardSum.text()), + RewardFamily=int(self.RewardFamily.text()), + RewardPairsN=int(self.RewardPairsN.text()), + UncoupledReward=self.UncoupledReward.text(), + # Randomness + Randomness=self.Randomness.currentText(), + # Block length + BlockMin=int(self.BlockMin.text()), + BlockMax=int(self.BlockMax.text()), + BlockBeta=int(self.BlockBeta.text()), + BlockMinReward=int(self.BlockMinReward.text()), + # Delay period + DelayMin=float(self.DelayMin.text()), + DelayMax=float(self.DelayMax.text()), + DelayBeta=float(self.DelayBeta.text()), + # Reward delay + RewardDelay=float(self.RewardDelay.text()), + # Auto water + AutoReward=self.AutoReward.isChecked(), + AutoWaterType=self.AutoWaterType.currentText(), + Multiplier=float(self.Multiplier.text()), + Unrewarded=int(self.Unrewarded.text()), + Ignored=int(self.Ignored.text()), + # ITI + ITIMin=float(self.ITIMin.text()), + ITIMax=float(self.ITIMax.text()), + ITIBeta=float(self.ITIBeta.text()), + ITIIncrease=float(self.ITIIncrease.text()), + # Response time + ResponseTime=float(self.ResponseTime.text()), + RewardConsumeTime=float(self.RewardConsumeTime.text()), + StopIgnores=round(int(self.auto_stop_ignore_win.text())*float(self.auto_stop_ignore_ratio_threshold.text())), + # Auto block + AdvancedBlockAuto=AdvancedBlockMode.OFF, + SwitchThr=float(self.SwitchThr.text()), + PointsInARow=int(self.PointsInARow.text()), + # Auto stop + MaxTrial=int(self.MaxTrial.text()), + MaxTime=int(self.MaxTime.text()), + # Reward size + RightValue_volume=float(self.RightValue_volume.text()), + LeftValue_volume=float(self.LeftValue_volume.text()), + # Warmup + warmup=self.warmup.currentText(), + warm_min_trial=int(self.warm_min_trial.text()), + warm_max_choice_ratio_bias=float(self.warm_max_choice_ratio_bias.text()), + warm_min_finish_ratio=float(self.warm_min_finish_ratio.text()), + warm_windowsize=int(self.warm_windowsize.text()) + ) + + def connect_task_parameters(self) -> None: + """ + Connect relevant widgets to update task parameters in task logic model + """ + + # update parameters in behavior task logic model + self.AutoTrain_dialog.trainingStageChanged.connect( + lambda stage: setattr(self.behavior_task_logic_model.task_parameters, 'training_stage', stage)) + self.Task.currentIndexChanged.connect( + lambda task: setattr(self.behavior_task_logic_model.task_parameters, 'task', task)) + self.BaseRewardSum.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BaseRewardSum', float(text))) + self.RewardFamily.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardFamily', int(text))) + self.RewardPairsN.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardPairsN', int(text))) + self.UncoupledReward.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'UncoupledReward', text)) + self.Randomness.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Randomness', text)) + self.BlockMin.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockMin', int(text))) + self.BlockMax.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockMax', int(text))) + self.BlockBeta.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockBeta', int(text))) + self.BlockMinReward.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockMinReward', int(text))) + self.DelayMin.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'DelayMin', float(text))) + self.DelayMax.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'DelayMax', float(text))) + self.DelayBeta.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'DelayBeta', float(text))) + self.RewardDelay.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardDelay', float(text))) + self.AutoReward.toggled.connect( + lambda checked: setattr(self.behavior_task_logic_model.task_parameters, 'RewardDelay', checked)) + self.AutoWaterType.currentIndexChanged.connect( + lambda water: setattr(self.behavior_task_logic_model.task_parameters, 'AutoWaterType', water)) + self.Multiplier.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Multiplier', float(text))) + self.Unrewarded.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Unrewarded', int(text))) + self.Ignored.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Ignored', int(text))) + self.ITIMin.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ITIMin', float(text))) + self.ITIMax.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ITIMax', float(text))) + self.ITIBeta.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ITIBeta', float(text))) + self.ITIIncrease.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ITIIncrease', float(text))) + self.ResponseTime.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ResponseTime', float(text))) + self.RewardConsumeTime.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardConsumeTime', float(text))) + self.auto_stop_ignore_win.textChanged.connect( + lambda win: setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', + round(int(win) * float(self.auto_stop_ignore_ratio_threshold.text())))) + self.auto_stop_ignore_ratio_threshold.textChanged.connect( + lambda ratio: setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', + round(int(self.auto_stop_ignore_win.text()) * float(ratio)))) + self.SwitchThr.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'SwitchThr', float(text))) + self.PointsInARow.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'PointsInARow', int(text))) + self.MaxTrial.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'MaxTrial', int(text))) + self.MaxTime.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'MaxTime', int(text))) + self.RightValue_volume.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RightValue_volume', float(text))) + self.LeftValue_volume.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'LeftValue_volume', float(text))) + self.warmup.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warmup', text)) + self.warm_min_trial.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_trial', int(text))) + self.warm_max_choice_ratio_bias.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_max_choice_ratio_bias', + float(text))) + self.warm_min_finish_ratio.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_finish_ratio', float(text))) + self.warm_windowsize.textChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_windowsize', float(text))) + + # def add_task_parameter_validators(self) -> None: + # """ + # Add validators for widget within + # """ + + def connect_session_model(self) -> None: + """ + Connect relevant widgets to update session model + """ + + # update parameters in behavior session model if widgets change + self.Task.currentTextChanged.connect(lambda task: setattr(self.behavior_session_model, 'experiment', task)) + self.Experimenter.textChanged.connect(lambda text: setattr(self.behavior_session_model, 'experimenter', [text])) + self.ID.textChanged.connect(lambda subject: setattr(self.behavior_session_model, 'subject', subject)) + self.ShowNotes.textChanged.connect(lambda: setattr(self.behavior_session_model, 'notes', + self.ShowNotes.toPlainText())) + def _load_rig_metadata(self): '''Load the latest rig metadata''' @@ -407,14 +581,6 @@ def connectSignalsSlots(self): self.Opto_dialog.laser_1_calibration_power.textChanged.connect(self._toggle_save_color) self.Opto_dialog.laser_2_calibration_power.textChanged.connect(self._toggle_save_color) - # update parameters in behavior session model if widgets change - self.Task.currentTextChanged.connect(lambda task: setattr(self.behavior_session_model, 'experiment', task)) - self.Experimenter.textChanged.connect(lambda text: setattr(self.behavior_session_model, 'experimenter', [text])) - self.ID.textChanged.connect(lambda subject: setattr(self.behavior_session_model, 'subject', subject)) - self.ShowNotes.textChanged.connect(lambda: setattr(self.behavior_session_model, 'notes', - self.ShowNotes.toPlainText())) - - # Set manual water volume to earned reward and trigger update if changed for side in ['Left', 'Right']: reward_volume_widget = getattr(self, f'{side}Value_volume') From bedf67c8a4e54fd2c7b3a43f8e71825a5bd449a9 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 19 Dec 2024 14:41:45 -0800 Subject: [PATCH 02/23] fixing connection errors --- src/foraging_gui/Dialogs.py | 2 +- src/foraging_gui/Foraging.py | 17 +++++++---------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/foraging_gui/Dialogs.py b/src/foraging_gui/Dialogs.py index d5d6e5c66..1fcc699f0 100644 --- a/src/foraging_gui/Dialogs.py +++ b/src/foraging_gui/Dialogs.py @@ -2215,7 +2215,7 @@ def _SelectRigMetadata(self,rig_metadata_file=None): class AutoTrainDialog(QDialog): '''For automatic training''' - trainingStageChanged = pyqtSignal(TrainingStage) # signal to indicate training stage has changed + trainingStageChanged = pyqtSignal(str) # signal to indicate training stage has changed def __init__(self, MainWindow, parent=None): super().__init__(parent) uic.loadUi('AutoTrain.ui', self) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index c6ed40a72..b10be4cf9 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -318,8 +318,8 @@ def connect_task_parameters(self) -> None: """ # update parameters in behavior task logic model - self.AutoTrain_dialog.trainingStageChanged.connect( - lambda stage: setattr(self.behavior_task_logic_model.task_parameters, 'training_stage', stage)) + # self.AutoTrain_dialog.trainingStageChanged.connect( + # lambda stage: setattr(self.behavior_task_logic_model.task_parameters, 'training_stage', stage)) self.Task.currentIndexChanged.connect( lambda task: setattr(self.behavior_task_logic_model.task_parameters, 'task', task)) self.BaseRewardSum.textChanged.connect( @@ -330,7 +330,7 @@ def connect_task_parameters(self) -> None: lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardPairsN', int(text))) self.UncoupledReward.textChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'UncoupledReward', text)) - self.Randomness.textChanged.connect( + self.Randomness.currentIndexChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Randomness', text)) self.BlockMin.textChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockMin', int(text))) @@ -388,7 +388,7 @@ def connect_task_parameters(self) -> None: lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RightValue_volume', float(text))) self.LeftValue_volume.textChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'LeftValue_volume', float(text))) - self.warmup.textChanged.connect( + self.warmup.currentIndexChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warmup', text)) self.warm_min_trial.textChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_trial', int(text))) @@ -400,11 +400,6 @@ def connect_task_parameters(self) -> None: self.warm_windowsize.textChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_windowsize', float(text))) - # def add_task_parameter_validators(self) -> None: - # """ - # Add validators for widget within - # """ - def connect_session_model(self) -> None: """ Connect relevant widgets to update session model @@ -5142,6 +5137,8 @@ def log_subprocess_output(process, prefix): # Move creating AutoTrain here to catch any AWS errors win.create_auto_train_dialog() - + win.AutoTrain_dialog.trainingStageChanged.connect( + lambda stage: setattr(win.behavior_task_logic_model.task_parameters, 'training_stage', stage)) + # TODO: Feels weird doing it this way? I don't know the AWS errors reasoning though # Run your application's event loop and stop after closing all windows sys.exit(app.exec()) From bd4566b8e6d33213637c52b90e9ae7be2c9e7b94 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 19 Dec 2024 14:54:30 -0800 Subject: [PATCH 03/23] saving at end of trial --- src/foraging_gui/Foraging.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index b10be4cf9..29ae24d80 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -2997,7 +2997,8 @@ def _get_folder_structure_new(self): self.SaveFileMat=os.path.join(self.behavior_session_model.root_path,f'{id_name}.mat') self.SaveFileJson=os.path.join(self.behavior_session_model.root_path,f'{id_name}.json') self.SaveFileParJson=os.path.join(self.behavior_session_model.root_path,f'{id_name}_par.json') - self.behavior_session_modelJson = os.path.join(self.behavior_session_model.root_path,f'behavior_session_model_{id_name}.json') + self.behavior_session_model_json = os.path.join(self.behavior_session_model.root_path, f'behavior_session_model_{id_name}.json') + self.behavior_task_logic_model_json = os.path.join(self.behavior_session_model.root_path, f'behavior_task_logic_model_{id_name}.json') self.HarpFolder=os.path.join(self.behavior_session_model.root_path,'raw.harp') self.VideoFolder=os.path.join(self.SessionFolder,'behavior-videos') self.PhotometryFolder=os.path.join(self.SessionFolder,'fib') @@ -4206,9 +4207,18 @@ def _Start(self): except ValidationError as e: logging.error(str(e), extra={'tags': [self.warning_log_tag]}) # save behavior session model - with open(self.behavior_session_modelJson, "w") as outfile: + with open(self.behavior_session_model_json, "w") as outfile: outfile.write(self.behavior_session_model.model_dump_json()) + # validate behavior session task logic model and document validation errors if any + try: + AindBehaviorTaskLogicModel(**self.behavior_task_logic_model.model_dump()) + except ValidationError as e: + logging.error(str(e), extra={'tags': [self.warning_log_tag]}) + # save behavior session model + with open(self.behavior_task_logic_model_json, "w") as outfile: + outfile.write(self.behavior_task_logic_model.model_dump_json()) + if (self.StartANewSession == 1) and (self.ANewTrial == 0): # If we are starting a new session, we should wait for the last trial to finish From 297d9f94791d6c2596069d3714f79b8887d656c1 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 19 Dec 2024 15:19:59 -0800 Subject: [PATCH 04/23] reference model instead of widgets --- src/foraging_gui/Dialogs.py | 4 ++-- src/foraging_gui/Foraging.py | 25 ++++++++++++++----------- src/foraging_gui/MyFunctions.py | 11 ++++++----- src/foraging_gui/Visualization.py | 20 ++++++++++---------- 4 files changed, 32 insertions(+), 28 deletions(-) diff --git a/src/foraging_gui/Dialogs.py b/src/foraging_gui/Dialogs.py index 1fcc699f0..6ed55fdee 100644 --- a/src/foraging_gui/Dialogs.py +++ b/src/foraging_gui/Dialogs.py @@ -2885,10 +2885,10 @@ def _set_training_parameters(self, paras_dict, if_apply_and_lock=False): # Set warmup to off first so that all AutoTrain parameters # can be correctly registered in WarmupBackup if warmup is turned on later - if paras_dict and paras_dict['warmup'] != self.MainWindow.warmup.currentText(): + if paras_dict and paras_dict['warmup'] != self.MainWindow.behavior_task_logic_model.task_parameters.warmup: widgets_changed.update( {self.MainWindow.warmup: - self.MainWindow.warmup.currentText() + self.MainWindow.behavior_task_logic_model.task_parameters.warmup } ) # Track the changes diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 29ae24d80..b2a09282a 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -295,7 +295,7 @@ def initialize_task_parameters(self) -> DynamicForagingParas: RewardConsumeTime=float(self.RewardConsumeTime.text()), StopIgnores=round(int(self.auto_stop_ignore_win.text())*float(self.auto_stop_ignore_ratio_threshold.text())), # Auto block - AdvancedBlockAuto=AdvancedBlockMode.OFF, + AdvancedBlockAuto=self.AdvancedBlockAuto.currentText(), SwitchThr=float(self.SwitchThr.text()), PointsInARow=int(self.PointsInARow.text()), # Auto stop @@ -376,6 +376,8 @@ def connect_task_parameters(self) -> None: self.auto_stop_ignore_ratio_threshold.textChanged.connect( lambda ratio: setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', round(int(self.auto_stop_ignore_win.text()) * float(ratio)))) + self.AdvancedBlockAuto.currentIndexChanged.connect( + lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'AdvancedBlockAuto', text)) self.SwitchThr.textChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'SwitchThr', float(text))) self.PointsInARow.textChanged.connect( @@ -884,7 +886,7 @@ def _warmup(self): to some incorrect parameters when it was turned off. ''' # set warm up parameters - if self.warmup.currentText()=='on': + if self.behavior_task_logic_model.task_parameters.warmup=='on': # get parameters before the warm up is on;WarmupBackup_ stands for Warmup backup, which are parameters before warm-up. self._GetTrainingParameters(prefix='WarmupBackup_') self.warm_min_trial.setEnabled(True) @@ -916,7 +918,7 @@ def _warmup(self): # turn advanced block auto off self.AdvancedBlockAuto.setCurrentIndex(self.AdvancedBlockAuto.findText('off')) self._ShowRewardPairs() - elif self.warmup.currentText()=='off': + elif self.behavior_task_logic_model.task_parameters.warmup=='off': # set parameters back to the previous parameters before warm up self._revert_to_previous_parameters() self.warm_min_trial.setEnabled(False) @@ -1994,7 +1996,7 @@ def _set_parameters(self,key,widget_dict,parameters): def _Randomness(self): '''enable/disable some fields in the Block/Delay Period/ITI''' - if self.Randomness.currentText()=='Exponential': + if self.behavior_task_logic_model.task_parameters.Randomness == 'Exponential': self.label_14.setEnabled(True) self.label_18.setEnabled(True) self.label_39.setEnabled(True) @@ -2003,7 +2005,7 @@ def _Randomness(self): self.ITIBeta.setEnabled(True) # if self.Task.currentText()!='RewardN': # self.BlockBeta.setStyleSheet("color: black;border: 1px solid gray;background-color: white;") - elif self.Randomness.currentText()=='Even': + elif self.behavior_task_logic_model.task_parameters.Randomness == 'Even': self.label_14.setEnabled(False) self.label_18.setEnabled(False) self.label_39.setEnabled(False) @@ -2017,7 +2019,7 @@ def _Randomness(self): def _AdvancedBlockAuto(self): '''enable/disable some fields in the AdvancedBlockAuto''' - if self.AdvancedBlockAuto.currentText()=='off': + if self.behavior_task_logic_model.task_parameters.AdvancedBlockAuto=='off': self.label_54.setEnabled(False) self.label_60.setEnabled(False) self.SwitchThr.setEnabled(False) @@ -2212,7 +2214,7 @@ def _CheckFormat(self,child): return 0 if child.objectName()=='UncoupledReward': try: - input_string=self.UncoupledReward.text() + input_string = self.behavior_task_logic_model.task_parameters.UncoupledReward if input_string=='': # do not permit empty uncoupled reward return 0 # remove any square brackets and spaces from the string @@ -2390,10 +2392,11 @@ def _ShowRewardPairs(self): '''Show reward pairs''' try: if self.behavior_session_model.experiment in ['Coupled Baiting','Coupled Without Baiting','RewardN']: - self.RewardPairs=self.RewardFamilies[int(self.RewardFamily.text())-1][:int(self.RewardPairsN.text())] - self.RewardProb=np.array(self.RewardPairs)/np.expand_dims(np.sum(self.RewardPairs,axis=1),axis=1)*float(self.BaseRewardSum.text()) + self.RewardPairs=self.RewardFamilies[int(self.behavior_task_logic_model.task_parameters.RewardFamily)-1][:int(self.behavior_task_logic_model.task_parameters.RewardPairsN)] + self.RewardProb=np.array(self.RewardPairs)/np.expand_dims(np.sum(self.RewardPairs,axis=1),axis=1)*\ + float(self.behavior_task_logic_model.task_parameters.BaseRewardSum) elif self.behavior_session_model.experiment in ['Uncoupled Baiting','Uncoupled Without Baiting']: - input_string=self.UncoupledReward.text() + input_string = self.behavior_task_logic_model.task_parameters.UncoupledReward # remove any square brackets and spaces from the string input_string = input_string.replace('[','').replace(']','').replace(',', ' ') # split the remaining string into a list of individual numbers @@ -3708,7 +3711,7 @@ def _StopPhotometry(self,closing=False): QMessageBox.Ok) def _AutoReward(self): - if self.AutoReward.isChecked(): + if self.behavior_task_logic_model.task_parameters.AutoReward: self.AutoReward.setStyleSheet("background-color : green;") self.AutoReward.setText('Auto water On') for widget in ['AutoWaterType', 'Multiplier', 'Unrewarded', 'Ignored']: diff --git a/src/foraging_gui/MyFunctions.py b/src/foraging_gui/MyFunctions.py index 266b8dcb1..893c1bda9 100644 --- a/src/foraging_gui/MyFunctions.py +++ b/src/foraging_gui/MyFunctions.py @@ -278,7 +278,7 @@ def _get_uncoupled_reward_prob_pool(self): def _CheckWarmUp(self): '''Check if we should turn on warm up''' - if self.win.warmup.currentText()=='off': + if self.win.behavior_task_logic_model.task_parameters.warmup=='off': return warmup=self._get_warmup_state() if warmup==0 and self.TP_warmup=='on': @@ -1647,10 +1647,11 @@ def _GetAnimalResponse(self,Channel1,Channel3,Channel4): self._SimulateResponse() return # set the valve time of auto water + multiplier = self.win.behavior_task_logic_model.task_parameters.Multiplier if self.CurrentAutoRewardTrial[0]==1: - self._set_valve_time_left(Channel3,float(self.win.LeftValue.text()),float(self.win.Multiplier.text())) + self._set_valve_time_left(Channel3,float(self.win.LeftValue.text()),multiplier) if self.CurrentAutoRewardTrial[1]==1: - self._set_valve_time_right(Channel3,float(self.win.RightValue.text()),float(self.win.Multiplier.text())) + self._set_valve_time_right(Channel3,float(self.win.RightValue.text()),multiplier) if self.CurrentStartType==3: # no delay timestamp ReceiveN=9 @@ -1794,14 +1795,14 @@ def _set_valve_time_right(self,channel3,RightValue=0.01,Multiplier=1): def _GiveLeft(self,channel3): '''manually give left water''' - channel3.LeftValue1(float(self.win.LeftValue.text())*1000*float(self.win.Multiplier.text())) + channel3.LeftValue1(float(self.win.LeftValue.text())*1000*self.win.behavior_task_logic_model.task_parameters.Multiplier) time.sleep(0.01) channel3.ManualWater_Left(int(1)) channel3.LeftValue1(float(self.win.LeftValue.text())*1000) def _GiveRight(self,channel3): '''manually give right water''' - channel3.RightValue1(float(self.win.RightValue.text())*1000*float(self.win.Multiplier.text())) + channel3.RightValue1(float(self.win.RightValue.text())*1000*self.win.behavior_task_logic_model.task_parameters.Multiplier) time.sleep(0.01) channel3.ManualWater_Right(int(1)) channel3.RightValue1(float(self.win.RightValue.text())*1000) diff --git a/src/foraging_gui/Visualization.py b/src/foraging_gui/Visualization.py index d64f256b5..5afbb30d1 100644 --- a/src/foraging_gui/Visualization.py +++ b/src/foraging_gui/Visualization.py @@ -522,23 +522,23 @@ def __init__(self,GeneratedTrials=None,dpi=100,width=5, height=4): FigureCanvas.__init__(self, self.fig) def _Update(self,win): # randomly draw a block length between Min and Max - SampleMethods=win.Randomness.currentText() + SampleMethods=win.behavior_task_logic_model.task_parameters.Randomness # block length - Min=float(win.BlockMin.text()) - Max=float(win.BlockMax.text()) - Beta=float(win.BlockBeta.text()) + Min=win.behavior_task_logic_model.task_parameters.BlockMin + Max=win.behavior_task_logic_model.task_parameters.BlockMax + Beta=win.behavior_task_logic_model.task_parameters.BlockBeta DataType='int' SampledBlockLen=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) # ITI - Min=float(win.ITIMin.text()) - Max=float(win.ITIMax.text()) - Beta=float(win.ITIBeta.text()) + Min=win.behavior_task_logic_model.task_parameters.ITIMin + Max=win.behavior_task_logic_model.task_parameters.ITIMax + Beta=win.behavior_task_logic_model.task_parameters.ITIBeta DataType='float' SampledITI=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) # Delay - Min=float(win.DelayMin.text()) - Max=float(win.DelayMax.text()) - Beta=float(win.DelayBeta.text()) + Min=win.behavior_task_logic_model.task_parameters.DelayMin + Max=win.behavior_task_logic_model.task_parameters.DelayMax + Beta=win.behavior_task_logic_model.task_parameters.DelayBeta DataType='float' SampledDelay=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) self.ax1.cla() From 5f914a5fa94bda3c92203befef5134d742adcde3 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 19 Dec 2024 15:21:59 -0800 Subject: [PATCH 05/23] add back logging --- src/foraging_gui/Foraging.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index b2a09282a..5ce71a86d 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -9,7 +9,7 @@ import logging from hashlib import md5 -#import logging_loki +import logging_loki import socket import harp import threading @@ -25,7 +25,7 @@ import serial import numpy as np import pandas as pd -#from pykeepass import PyKeePass +from pykeepass import PyKeePass from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from scipy.io import savemat, loadmat from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QSizePolicy From 6bae4973c27f942e0c075512699d594e1f451420 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 20 Dec 2024 14:15:11 -0800 Subject: [PATCH 06/23] adding validators and checks in signals --- src/foraging_gui/Foraging.py | 158 +++++++++++++++++++++++++---------- 1 file changed, 112 insertions(+), 46 deletions(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 5ce71a86d..6d749e4d1 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -28,10 +28,11 @@ from pykeepass import PyKeePass from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from scipy.io import savemat, loadmat -from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QSizePolicy +from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QSizePolicy, QLineEdit, QComboBox, QPushButton, QDoubleSpinBox from PyQt5.QtWidgets import QFileDialog,QVBoxLayout, QGridLayout, QLabel from PyQt5 import QtWidgets,QtGui,QtCore, uic from PyQt5.QtCore import QThreadPool,Qt,QThread +from PyQt5.QtGui import QIntValidator, QDoubleValidator from pyOSC3.OSC3 import OSCStreamingClient import webbrowser from pydantic import ValidationError @@ -314,93 +315,159 @@ def initialize_task_parameters(self) -> DynamicForagingParas: def connect_task_parameters(self) -> None: """ - Connect relevant widgets to update task parameters in task logic model + Connect relevant widgets to update task parameters in task logic model and add validators """ - # update parameters in behavior task logic model # self.AutoTrain_dialog.trainingStageChanged.connect( # lambda stage: setattr(self.behavior_task_logic_model.task_parameters, 'training_stage', stage)) - self.Task.currentIndexChanged.connect( + self.Task.currentTextChanged.connect( lambda task: setattr(self.behavior_task_logic_model.task_parameters, 'task', task)) self.BaseRewardSum.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BaseRewardSum', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'BaseRewardSum', float(text))) + self.BaseRewardSum.setValidator(QDoubleValidator()) + self.RewardFamily.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardFamily', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'RewardFamily', int(text))) + self.RewardFamily.setValidator(QIntValidator()) + self.RewardPairsN.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardPairsN', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'RewardPairsN', int(text))) + self.RewardPairsN.setValidator(QIntValidator()) + self.UncoupledReward.textChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'UncoupledReward', text)) + self.Randomness.currentIndexChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Randomness', text)) + self.BlockMin.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockMin', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'BlockMin', int(text))) + self.BlockMin.setValidator(QIntValidator()) + self.BlockMax.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockMax', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'BlockMax', int(text))) + self.BlockMax.setValidator(QIntValidator()) + self.BlockBeta.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockBeta', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'BlockBeta', int(text))) + self.BlockBeta.setValidator(QIntValidator()) + self.BlockMinReward.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'BlockMinReward', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'BlockMinReward', int(text))) + self.BlockMinReward.setValidator(QIntValidator()) + self.DelayMin.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'DelayMin', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'DelayMin', float(text))) + self.DelayMin.setValidator(QDoubleValidator()) + self.DelayMax.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'DelayMax', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'DelayMax', float(text))) + self.DelayMax.setValidator(QDoubleValidator()) + self.DelayBeta.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'DelayBeta', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'DelayBeta', float(text))) + self.DelayBeta.setValidator(QDoubleValidator()) + self.RewardDelay.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardDelay', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'RewardDelay', float(text))) + self.RewardDelay.setValidator(QDoubleValidator()) + self.AutoReward.toggled.connect( lambda checked: setattr(self.behavior_task_logic_model.task_parameters, 'RewardDelay', checked)) - self.AutoWaterType.currentIndexChanged.connect( + + self.AutoWaterType.currentTextChanged.connect( lambda water: setattr(self.behavior_task_logic_model.task_parameters, 'AutoWaterType', water)) + self.Multiplier.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Multiplier', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'Multiplier', float(text))) + self.Multiplier.setValidator(QDoubleValidator()) + self.Unrewarded.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Unrewarded', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'Unrewarded', int(text))) + self.Unrewarded.setValidator(QIntValidator()) + self.Ignored.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Ignored', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'Ignored', int(text))) + self.Ignored.setValidator(QIntValidator()) + self.ITIMin.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ITIMin', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ITIMin', float(text))) + self.ITIMin.setValidator(QDoubleValidator()) + self.ITIMax.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ITIMax', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ITIMax', float(text))) + self.ITIMax.setValidator(QDoubleValidator()) + self.ITIBeta.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ITIBeta', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ITIBeta', float(text))) + self.ITIBeta.setValidator(QDoubleValidator()) + self.ITIIncrease.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ITIIncrease', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ITIIncrease', float(text))) + self.ITIIncrease.setValidator(QDoubleValidator()) + self.ResponseTime.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'ResponseTime', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ResponseTime', float(text))) + self.ResponseTime.setValidator(QDoubleValidator()) + self.RewardConsumeTime.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RewardConsumeTime', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'RewardConsumeTime', float(text))) + self.RewardConsumeTime.setValidator(QDoubleValidator()) + self.auto_stop_ignore_win.textChanged.connect( - lambda win: setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', + lambda win: None if win == '' else setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', round(int(win) * float(self.auto_stop_ignore_ratio_threshold.text())))) + self.auto_stop_ignore_win.setValidator(QIntValidator()) + self.auto_stop_ignore_ratio_threshold.textChanged.connect( - lambda ratio: setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', - round(int(self.auto_stop_ignore_win.text()) * float(ratio)))) - self.AdvancedBlockAuto.currentIndexChanged.connect( + lambda ratio: None if ratio in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', + round(int(self.auto_stop_ignore_win.text()) * (float(ratio))))) + self.auto_stop_ignore_ratio_threshold.setValidator(QDoubleValidator()) + + self.AdvancedBlockAuto.currentTextChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'AdvancedBlockAuto', text)) + self.SwitchThr.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'SwitchThr', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'SwitchThr', float(text))) + self.SwitchThr.setValidator(QDoubleValidator()) + self.PointsInARow.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'PointsInARow', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'PointsInARow', int(text))) + self.PointsInARow.setValidator(QIntValidator()) + self.MaxTrial.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'MaxTrial', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'MaxTrial', int(text))) + self.MaxTrial.setValidator(QIntValidator()) + self.MaxTime.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'MaxTime', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'MaxTime', int(text))) + self.MaxTime.setValidator(QIntValidator()) + self.RightValue_volume.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'RightValue_volume', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'RightValue_volume', float(text))) + self.LeftValue_volume.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'LeftValue_volume', float(text))) - self.warmup.currentIndexChanged.connect( + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'LeftValue_volume', float(text))) + + self.warmup.currentTextChanged.connect( lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warmup', text)) + self.warm_min_trial.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_trial', int(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_trial', int(text))) + self.warm_min_trial.setValidator(QIntValidator()) + self.warm_max_choice_ratio_bias.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_max_choice_ratio_bias', + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'warm_max_choice_ratio_bias', float(text))) + self.warm_max_choice_ratio_bias.setValidator(QDoubleValidator()) + self.warm_min_finish_ratio.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_finish_ratio', float(text))) + lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_finish_ratio', float(text))) + self.warm_min_finish_ratio.setValidator(QDoubleValidator()) + self.warm_windowsize.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warm_windowsize', float(text))) + lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'warm_windowsize', int(text))) + self.warm_windowsize.setValidator(QIntValidator()) def connect_session_model(self) -> None: """ @@ -2195,17 +2262,16 @@ def _CheckFormat(self,child): '''Check if the input format is correct''' if child.objectName()=='RewardFamily': # When we change the RewardFamily, sometimes the RewardPairsN is larger than available reward pairs in this family. try: - self.RewardFamilies[int(self.RewardFamily.text())-1] - if int(self.RewardPairsN.text())>len(self.RewardFamilies[int(self.RewardFamily.text())-1]): - self.RewardPairsN.setText(str(len(self.RewardFamilies[int(self.RewardFamily.text())-1]))) + if self.behavior_task_logic_model.task_parameters.RewardPairsN>len(self.RewardFamilies[self.behavior_task_logic_model.task_parameters.RewardFamily-1]): + self.RewardPairsN.setText(str(len(self.RewardFamilies[self.behavior_task_logic_model.task_parameters.RewardFamily-1]))) return 1 except Exception as e: logging.error(traceback.format_exc()) return 0 if child.objectName()=='RewardFamily' or child.objectName()=='RewardPairsN' or child.objectName()=='BaseRewardSum': try: - self.RewardPairs=self.RewardFamilies[int(self.RewardFamily.text())-1][:int(self.RewardPairsN.text())] - if int(self.RewardPairsN.text())>len(self.RewardFamilies[int(self.RewardFamily.text())-1]): + self.RewardPairs=self.RewardFamilies[self.behavior_task_logic_model.task_parameters.RewardFamily-1][:self.behavior_task_logic_model.task_parameters.RewardPairsN] + if self.behavior_task_logic_model.task_parameters.RewardPairsN>len(self.RewardFamilies[self.behavior_task_logic_model.task_parameters.RewardFamily-1]): return 0 else: return 1 @@ -2394,7 +2460,7 @@ def _ShowRewardPairs(self): if self.behavior_session_model.experiment in ['Coupled Baiting','Coupled Without Baiting','RewardN']: self.RewardPairs=self.RewardFamilies[int(self.behavior_task_logic_model.task_parameters.RewardFamily)-1][:int(self.behavior_task_logic_model.task_parameters.RewardPairsN)] self.RewardProb=np.array(self.RewardPairs)/np.expand_dims(np.sum(self.RewardPairs,axis=1),axis=1)*\ - float(self.behavior_task_logic_model.task_parameters.BaseRewardSum) + self.behavior_task_logic_model.task_parameters.BaseRewardSum elif self.behavior_session_model.experiment in ['Uncoupled Baiting','Uncoupled Without Baiting']: input_string = self.behavior_task_logic_model.task_parameters.UncoupledReward # remove any square brackets and spaces from the string From 537352dc91d3bc0c21e79e2218ee973782ec1271 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 16 Jan 2025 13:33:02 -0800 Subject: [PATCH 07/23] chaning qlineedits to spinboxes --- pyproject.toml | 1 + src/foraging_gui/Dialogs.py | 8 +- src/foraging_gui/Foraging.py | 338 +++++++++----------------- src/foraging_gui/ForagingGUI.ui | 120 ++++----- src/foraging_gui/ForagingGUI_Ephys.ui | 116 ++++----- src/foraging_gui/MyFunctions.py | 8 +- src/foraging_gui/Visualization.py | 20 +- 7 files changed, 257 insertions(+), 354 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ea007f52f..ec63ca451 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ dependencies = [ "aind-auto-train@git+https://github.com/AllenNeuralDynamics/aind-foraging-behavior-bonsai-automatic-training.git@main", "aind-slims-api@git+https://github.com/AllenNeuralDynamics/aind-slims-api@main", "aind-dynamic-foraging-models@git+https://github.com/AllenNeuralDynamics/aind-dynamic-foraging-models@main", + "aind-behavior-dynamic-foraging@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.DynamicForaging@main", "aind-behavior-services >=0.8, <0.9", "pynwb >=2, <3", "requests >=2, <3", diff --git a/src/foraging_gui/Dialogs.py b/src/foraging_gui/Dialogs.py index 6ed55fdee..03e9cd448 100644 --- a/src/foraging_gui/Dialogs.py +++ b/src/foraging_gui/Dialogs.py @@ -2215,7 +2215,7 @@ def _SelectRigMetadata(self,rig_metadata_file=None): class AutoTrainDialog(QDialog): '''For automatic training''' - trainingStageChanged = pyqtSignal(str) # signal to indicate training stage has changed + def __init__(self, MainWindow, parent=None): super().__init__(parent) uic.loadUi('AutoTrain.ui', self) @@ -2665,7 +2665,7 @@ def _update_stage_to_apply(self): logger.info(f"Current stage to apply: {self.stage_in_use} @" f"{get_curriculum_string(self.curriculum_in_use)}") - self.trainingStageChanged.emit(self.stage_in_use) + def _apply_curriculum(self): # Check if a curriculum is selected if not hasattr(self, 'selected_curriculum') or self.selected_curriculum is None: @@ -2885,10 +2885,10 @@ def _set_training_parameters(self, paras_dict, if_apply_and_lock=False): # Set warmup to off first so that all AutoTrain parameters # can be correctly registered in WarmupBackup if warmup is turned on later - if paras_dict and paras_dict['warmup'] != self.MainWindow.behavior_task_logic_model.task_parameters.warmup: + if paras_dict and paras_dict['warmup'] != self.MainWindow.task_logic.task_parameters.warmup: widgets_changed.update( {self.MainWindow.warmup: - self.MainWindow.behavior_task_logic_model.task_parameters.warmup + self.MainWindow.task_logic.task_parameters.warmup } ) # Track the changes diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 6d749e4d1..9b97878d8 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -9,7 +9,7 @@ import logging from hashlib import md5 -import logging_loki +#import logging_loki import socket import harp import threading @@ -25,7 +25,7 @@ import serial import numpy as np import pandas as pd -from pykeepass import PyKeePass +#from pykeepass import PyKeePass from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from scipy.io import savemat, loadmat from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QSizePolicy, QLineEdit, QComboBox, QPushButton, QDoubleSpinBox @@ -55,10 +55,14 @@ from aind_data_schema.core.session import Session from aind_data_schema_models.modalities import Modality from aind_behavior_services.session import AindBehaviorSessionModel -from aind_behavior_services.task_logic import AindBehaviorTaskLogicModel -from aind_auto_train.schema.task import TrainingStage, DynamicForagingParas, AdvancedBlockMode -import aind_auto_train - +from aind_auto_train.schema.task import TrainingStage +from aind_behavior_dynamic_foraging import ( + AindDynamicForagingTaskLogic, + AindDynamicForagingTaskParameters, + AutoWaterMode, + AdvancedBlockMode +) +import aind_behavior_dynamic_foraging logger = logging.getLogger(__name__) logger.root.handlers.clear() # clear handlers so console output can be configured logging.raiseExceptions = os.getenv('FORAGING_DEV_MODE', False) @@ -136,11 +140,10 @@ def __init__(self, parent=None,box_number=1,start_bonsai_ide=True): subprocess.check_output(['git','diff-index','--name-only', 'HEAD']).decode('ascii').strip() != '', skip_hardware_validation=True ) - # create AindBehaviorTaskLogicModel to be used and referenced for task parameter info - self.behavior_task_logic_model = AindBehaviorTaskLogicModel( - name=self.Task.currentText(), + # create AindDynamicForagingTaskLogic to be used and referenced for task parameter info + self.task_logic = AindDynamicForagingTaskLogic( task_parameters=self.initialize_task_parameters().dict(), - version=aind_auto_train.__version__ + version=aind_behavior_dynamic_foraging.__version__ ) # add warning_widget to layout and set color @@ -254,220 +257,122 @@ def __init__(self, parent=None,box_number=1,start_bonsai_ide=True): self._ReconnectBonsai() logging.info('Start up complete') - def initialize_task_parameters(self) -> DynamicForagingParas: + def initialize_task_parameters(self) -> AindDynamicForagingTaskParameters: """ initialize schema of task parameters based on widgets """ - return DynamicForagingParas( - training_stage=TrainingStage.STAGE_1, # dummy value - task=self.Task.currentText(), - task_schema_version=aind_auto_train.__version__, - BaseRewardSum=float(self.BaseRewardSum.text()), - RewardFamily=int(self.RewardFamily.text()), - RewardPairsN=int(self.RewardPairsN.text()), + return AindDynamicForagingTaskParameters( + BaseRewardSum=self.BaseRewardSum.value(), + RewardFamily=self.RewardFamily.value(), + RewardPairsN=self.RewardPairsN.value(), UncoupledReward=self.UncoupledReward.text(), # Randomness Randomness=self.Randomness.currentText(), # Block length - BlockMin=int(self.BlockMin.text()), - BlockMax=int(self.BlockMax.text()), - BlockBeta=int(self.BlockBeta.text()), - BlockMinReward=int(self.BlockMinReward.text()), + BlockMin=self.BlockMin.value(), + BlockMax=self.BlockMax.value(), + BlockBeta=self.BlockBeta.value(), + BlockMinReward=self.BlockMinReward.value(), # Delay period - DelayMin=float(self.DelayMin.text()), - DelayMax=float(self.DelayMax.text()), - DelayBeta=float(self.DelayBeta.text()), + DelayMin=self.DelayMin.value(), + DelayMax=self.DelayMax.value(), + DelayBeta=self.DelayBeta.value(), # Reward delay - RewardDelay=float(self.RewardDelay.text()), + RewardDelay=self.RewardDelay.value(), # Auto water AutoReward=self.AutoReward.isChecked(), - AutoWaterType=self.AutoWaterType.currentText(), - Multiplier=float(self.Multiplier.text()), - Unrewarded=int(self.Unrewarded.text()), - Ignored=int(self.Ignored.text()), + AutoWaterType=AutoWaterMode(self.AutoWaterType.currentText()), + Multiplier=self.Multiplier.value(), + Unrewarded=self.Unrewarded.value(), + Ignored=self.Ignored.value(), # ITI - ITIMin=float(self.ITIMin.text()), - ITIMax=float(self.ITIMax.text()), - ITIBeta=float(self.ITIBeta.text()), - ITIIncrease=float(self.ITIIncrease.text()), + ITIMin=self.ITIMin.value(), + ITIMax=self.ITIMax.value(), + ITIBeta=self.ITIBeta.value(), + ITIIncrease=self.ITIIncrease.value(), # Response time - ResponseTime=float(self.ResponseTime.text()), - RewardConsumeTime=float(self.RewardConsumeTime.text()), - StopIgnores=round(int(self.auto_stop_ignore_win.text())*float(self.auto_stop_ignore_ratio_threshold.text())), + ResponseTime=self.ResponseTime.value(), + RewardConsumeTime=self.RewardConsumeTime.value(), + StopIgnores=self.auto_stop_ignore_win.value()*self.auto_stop_ignore_ratio_threshold.value(), # Auto block - AdvancedBlockAuto=self.AdvancedBlockAuto.currentText(), - SwitchThr=float(self.SwitchThr.text()), - PointsInARow=int(self.PointsInARow.text()), + AdvancedBlockAuto=AdvancedBlockMode(self.AdvancedBlockAuto.currentText()), + SwitchThr=self.SwitchThr.value(), + PointsInARow=self.PointsInARow.value(), # Auto stop - MaxTrial=int(self.MaxTrial.text()), - MaxTime=int(self.MaxTime.text()), + MaxTrial=self.MaxTrial.value(), + MaxTime=self.MaxTime.value(), # Reward size - RightValue_volume=float(self.RightValue_volume.text()), - LeftValue_volume=float(self.LeftValue_volume.text()), + RightValue_volume=self.RightValue_volume.value(), + LeftValue_volume=self.LeftValue_volume.value(), # Warmup warmup=self.warmup.currentText(), - warm_min_trial=int(self.warm_min_trial.text()), - warm_max_choice_ratio_bias=float(self.warm_max_choice_ratio_bias.text()), - warm_min_finish_ratio=float(self.warm_min_finish_ratio.text()), - warm_windowsize=int(self.warm_windowsize.text()) + warm_min_trial=self.warm_min_trial.value(), + warm_max_choice_ratio_bias=self.warm_max_choice_ratio_bias.value(), + warm_min_finish_ratio=self.warm_min_finish_ratio.value(), + warm_windowsize=self.warm_windowsize.value() ) def connect_task_parameters(self) -> None: """ - Connect relevant widgets to update task parameters in task logic model and add validators + Connect relevant widgets to update task parameters in task logic model """ - # update parameters in behavior task logic model - # self.AutoTrain_dialog.trainingStageChanged.connect( - # lambda stage: setattr(self.behavior_task_logic_model.task_parameters, 'training_stage', stage)) - self.Task.currentTextChanged.connect( - lambda task: setattr(self.behavior_task_logic_model.task_parameters, 'task', task)) - self.BaseRewardSum.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'BaseRewardSum', float(text))) - self.BaseRewardSum.setValidator(QDoubleValidator()) - - self.RewardFamily.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'RewardFamily', int(text))) - self.RewardFamily.setValidator(QIntValidator()) - - self.RewardPairsN.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'RewardPairsN', int(text))) - self.RewardPairsN.setValidator(QIntValidator()) - - self.UncoupledReward.textChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'UncoupledReward', text)) - - self.Randomness.currentIndexChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'Randomness', text)) - - self.BlockMin.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'BlockMin', int(text))) - self.BlockMin.setValidator(QIntValidator()) - - self.BlockMax.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'BlockMax', int(text))) - self.BlockMax.setValidator(QIntValidator()) - - self.BlockBeta.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'BlockBeta', int(text))) - self.BlockBeta.setValidator(QIntValidator()) - - self.BlockMinReward.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'BlockMinReward', int(text))) - self.BlockMinReward.setValidator(QIntValidator()) - - self.DelayMin.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'DelayMin', float(text))) - self.DelayMin.setValidator(QDoubleValidator()) - - self.DelayMax.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'DelayMax', float(text))) - self.DelayMax.setValidator(QDoubleValidator()) - - self.DelayBeta.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'DelayBeta', float(text))) - self.DelayBeta.setValidator(QDoubleValidator()) - - self.RewardDelay.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'RewardDelay', float(text))) - self.RewardDelay.setValidator(QDoubleValidator()) - - self.AutoReward.toggled.connect( - lambda checked: setattr(self.behavior_task_logic_model.task_parameters, 'RewardDelay', checked)) - - self.AutoWaterType.currentTextChanged.connect( - lambda water: setattr(self.behavior_task_logic_model.task_parameters, 'AutoWaterType', water)) - - self.Multiplier.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'Multiplier', float(text))) - self.Multiplier.setValidator(QDoubleValidator()) - - self.Unrewarded.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'Unrewarded', int(text))) - self.Unrewarded.setValidator(QIntValidator()) - - self.Ignored.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'Ignored', int(text))) - self.Ignored.setValidator(QIntValidator()) - - self.ITIMin.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ITIMin', float(text))) - self.ITIMin.setValidator(QDoubleValidator()) - - self.ITIMax.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ITIMax', float(text))) - self.ITIMax.setValidator(QDoubleValidator()) - - self.ITIBeta.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ITIBeta', float(text))) - self.ITIBeta.setValidator(QDoubleValidator()) - - self.ITIIncrease.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ITIIncrease', float(text))) - self.ITIIncrease.setValidator(QDoubleValidator()) - - self.ResponseTime.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'ResponseTime', float(text))) - self.ResponseTime.setValidator(QDoubleValidator()) - - self.RewardConsumeTime.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'RewardConsumeTime', float(text))) - self.RewardConsumeTime.setValidator(QDoubleValidator()) - - self.auto_stop_ignore_win.textChanged.connect( - lambda win: None if win == '' else setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', - round(int(win) * float(self.auto_stop_ignore_ratio_threshold.text())))) - self.auto_stop_ignore_win.setValidator(QIntValidator()) - - self.auto_stop_ignore_ratio_threshold.textChanged.connect( - lambda ratio: None if ratio in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'StopIgnores', - round(int(self.auto_stop_ignore_win.text()) * (float(ratio))))) - self.auto_stop_ignore_ratio_threshold.setValidator(QDoubleValidator()) - + # update parameters in task logic model + self.BaseRewardSum.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'BaseRewardSum', v)) + self.RewardFamily.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'RewardFamily', v)) + self.RewardPairsN.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'RewardPairsN', v)) + self.UncoupledReward.textChanged.connect(lambda t: + setattr(self.task_logic.task_parameters, 'UncoupledReward', t)) + self.Randomness.currentIndexChanged.connect(lambda t: setattr(self.task_logic.task_parameters, 'Randomness', t)) + self.BlockMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'BlockMin', v)) + self.BlockMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'BlockMax', v)) + self.BlockBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'BlockBeta', v)) + self.BlockMinReward.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, 'BlockMinReward', v)) + self.DelayMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'DelayMin', v)) + self.DelayMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'DelayMax', v)) + self.DelayBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'DelayBeta', v)) + self.RewardDelay.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'RewardDelay', v)) + self.AutoReward.toggled.connect(lambda c: setattr(self.task_logic.task_parameters, 'RewardDelay', c)) + self.AutoWaterType.currentTextChanged.connect(lambda water: + setattr(self.task_logic.task_parameters, 'AutoWaterType', + AutoWaterMode(water))) + self.Multiplier.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'Multiplier', v)) + self.Unrewarded.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'Unrewarded', v)) + self.Ignored.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'Ignored', v)) + self.ITIMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ITIMin', v)) + self.ITIMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ITIMax', v)) + self.ITIBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ITIBeta', v)) + self.ITIIncrease.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ITIIncrease', v)) + self.ResponseTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ResponseTime', v)) + self.RewardConsumeTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'RewardConsumeTime', v)) + self.auto_stop_ignore_win.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'StopIgnores', + round(v * self.auto_stop_ignore_ratio_threshold.value()))) + self.auto_stop_ignore_ratio_threshold.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, + 'StopIgnores', + round(self.auto_stop_ignore_win.value() * v))) self.AdvancedBlockAuto.currentTextChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'AdvancedBlockAuto', text)) - - self.SwitchThr.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'SwitchThr', float(text))) - self.SwitchThr.setValidator(QDoubleValidator()) - - self.PointsInARow.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'PointsInARow', int(text))) - self.PointsInARow.setValidator(QIntValidator()) - - self.MaxTrial.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'MaxTrial', int(text))) - self.MaxTrial.setValidator(QIntValidator()) - - self.MaxTime.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'MaxTime', int(text))) - self.MaxTime.setValidator(QIntValidator()) - - self.RightValue_volume.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'RightValue_volume', float(text))) - - self.LeftValue_volume.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'LeftValue_volume', float(text))) - - self.warmup.currentTextChanged.connect( - lambda text: setattr(self.behavior_task_logic_model.task_parameters, 'warmup', text)) - - self.warm_min_trial.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_trial', int(text))) - self.warm_min_trial.setValidator(QIntValidator()) - - self.warm_max_choice_ratio_bias.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'warm_max_choice_ratio_bias', - float(text))) - self.warm_max_choice_ratio_bias.setValidator(QDoubleValidator()) - - self.warm_min_finish_ratio.textChanged.connect( - lambda text: None if text in ['', '.'] else setattr(self.behavior_task_logic_model.task_parameters, 'warm_min_finish_ratio', float(text))) - self.warm_min_finish_ratio.setValidator(QDoubleValidator()) - - self.warm_windowsize.textChanged.connect( - lambda text: None if text == '' else setattr(self.behavior_task_logic_model.task_parameters, 'warm_windowsize', int(text))) - self.warm_windowsize.setValidator(QIntValidator()) + lambda text: setattr(self.task_logic.task_parameters, 'AdvancedBlockAuto', AdvancedBlockMode(text))) + self.SwitchThr.valueChanged.connect(lambda v:setattr(self.task_logic.task_parameters, 'SwitchThr', v)) + self.PointsInARow.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'PointsInARow', v)) + self.MaxTrial.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'MaxTrial', v)) + self.MaxTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'MaxTime', v)) + self.RightValue_volume.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, 'RightValue_volume', v)) + + self.LeftValue_volume.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, 'LeftValue_volume', v)) + + self.warmup.currentTextChanged.connect(lambda text: setattr(self.task_logic.task_parameters, 'warmup', text)) + self.warm_min_trial.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, 'warm_min_trial', v)) + self.warm_max_choice_ratio_bias.valueChanged.connect( + lambda v: setattr(self.task_logic.task_parameters, 'warm_max_choice_ratio_bias', v)) + self.warm_min_finish_ratio.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, + 'warm_min_finish_ratio', v)) + self.warm_windowsize.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, 'warm_windowsize', v)) def connect_session_model(self) -> None: """ @@ -953,7 +858,7 @@ def _warmup(self): to some incorrect parameters when it was turned off. ''' # set warm up parameters - if self.behavior_task_logic_model.task_parameters.warmup=='on': + if self.task_logic.task_parameters.warmup== 'on': # get parameters before the warm up is on;WarmupBackup_ stands for Warmup backup, which are parameters before warm-up. self._GetTrainingParameters(prefix='WarmupBackup_') self.warm_min_trial.setEnabled(True) @@ -985,7 +890,7 @@ def _warmup(self): # turn advanced block auto off self.AdvancedBlockAuto.setCurrentIndex(self.AdvancedBlockAuto.findText('off')) self._ShowRewardPairs() - elif self.behavior_task_logic_model.task_parameters.warmup=='off': + elif self.task_logic.task_parameters.warmup== 'off': # set parameters back to the previous parameters before warm up self._revert_to_previous_parameters() self.warm_min_trial.setEnabled(False) @@ -2063,7 +1968,7 @@ def _set_parameters(self,key,widget_dict,parameters): def _Randomness(self): '''enable/disable some fields in the Block/Delay Period/ITI''' - if self.behavior_task_logic_model.task_parameters.Randomness == 'Exponential': + if self.task_logic.task_parameters.Randomness == 'Exponential': self.label_14.setEnabled(True) self.label_18.setEnabled(True) self.label_39.setEnabled(True) @@ -2072,7 +1977,7 @@ def _Randomness(self): self.ITIBeta.setEnabled(True) # if self.Task.currentText()!='RewardN': # self.BlockBeta.setStyleSheet("color: black;border: 1px solid gray;background-color: white;") - elif self.behavior_task_logic_model.task_parameters.Randomness == 'Even': + elif self.task_logic.task_parameters.Randomness == 'Even': self.label_14.setEnabled(False) self.label_18.setEnabled(False) self.label_39.setEnabled(False) @@ -2086,7 +1991,7 @@ def _Randomness(self): def _AdvancedBlockAuto(self): '''enable/disable some fields in the AdvancedBlockAuto''' - if self.behavior_task_logic_model.task_parameters.AdvancedBlockAuto=='off': + if self.task_logic.task_parameters.AdvancedBlockAuto== 'off': self.label_54.setEnabled(False) self.label_60.setEnabled(False) self.SwitchThr.setEnabled(False) @@ -2262,16 +2167,16 @@ def _CheckFormat(self,child): '''Check if the input format is correct''' if child.objectName()=='RewardFamily': # When we change the RewardFamily, sometimes the RewardPairsN is larger than available reward pairs in this family. try: - if self.behavior_task_logic_model.task_parameters.RewardPairsN>len(self.RewardFamilies[self.behavior_task_logic_model.task_parameters.RewardFamily-1]): - self.RewardPairsN.setText(str(len(self.RewardFamilies[self.behavior_task_logic_model.task_parameters.RewardFamily-1]))) + if self.task_logic.task_parameters.RewardPairsN>len(self.RewardFamilies[self.task_logic.task_parameters.RewardFamily - 1]): + self.RewardPairsN.setText(str(len(self.RewardFamilies[self.task_logic.task_parameters.RewardFamily - 1]))) return 1 except Exception as e: logging.error(traceback.format_exc()) return 0 if child.objectName()=='RewardFamily' or child.objectName()=='RewardPairsN' or child.objectName()=='BaseRewardSum': try: - self.RewardPairs=self.RewardFamilies[self.behavior_task_logic_model.task_parameters.RewardFamily-1][:self.behavior_task_logic_model.task_parameters.RewardPairsN] - if self.behavior_task_logic_model.task_parameters.RewardPairsN>len(self.RewardFamilies[self.behavior_task_logic_model.task_parameters.RewardFamily-1]): + self.RewardPairs= self.RewardFamilies[self.task_logic.task_parameters.RewardFamily - 1][:self.task_logic.task_parameters.RewardPairsN] + if self.task_logic.task_parameters.RewardPairsN>len(self.RewardFamilies[self.task_logic.task_parameters.RewardFamily - 1]): return 0 else: return 1 @@ -2280,7 +2185,7 @@ def _CheckFormat(self,child): return 0 if child.objectName()=='UncoupledReward': try: - input_string = self.behavior_task_logic_model.task_parameters.UncoupledReward + input_string = self.task_logic.task_parameters.UncoupledReward if input_string=='': # do not permit empty uncoupled reward return 0 # remove any square brackets and spaces from the string @@ -2458,11 +2363,11 @@ def _ShowRewardPairs(self): '''Show reward pairs''' try: if self.behavior_session_model.experiment in ['Coupled Baiting','Coupled Without Baiting','RewardN']: - self.RewardPairs=self.RewardFamilies[int(self.behavior_task_logic_model.task_parameters.RewardFamily)-1][:int(self.behavior_task_logic_model.task_parameters.RewardPairsN)] + self.RewardPairs= self.RewardFamilies[int(self.task_logic.task_parameters.RewardFamily) - 1][:int(self.task_logic.task_parameters.RewardPairsN)] self.RewardProb=np.array(self.RewardPairs)/np.expand_dims(np.sum(self.RewardPairs,axis=1),axis=1)*\ - self.behavior_task_logic_model.task_parameters.BaseRewardSum + self.task_logic.task_parameters.BaseRewardSum elif self.behavior_session_model.experiment in ['Uncoupled Baiting','Uncoupled Without Baiting']: - input_string = self.behavior_task_logic_model.task_parameters.UncoupledReward + input_string = self.task_logic.task_parameters.UncoupledReward # remove any square brackets and spaces from the string input_string = input_string.replace('[','').replace(']','').replace(',', ' ') # split the remaining string into a list of individual numbers @@ -3777,7 +3682,7 @@ def _StopPhotometry(self,closing=False): QMessageBox.Ok) def _AutoReward(self): - if self.behavior_task_logic_model.task_parameters.AutoReward: + if self.task_logic.task_parameters.AutoReward: self.AutoReward.setStyleSheet("background-color : green;") self.AutoReward.setText('Auto water On') for widget in ['AutoWaterType', 'Multiplier', 'Unrewarded', 'Ignored']: @@ -4281,12 +4186,12 @@ def _Start(self): # validate behavior session task logic model and document validation errors if any try: - AindBehaviorTaskLogicModel(**self.behavior_task_logic_model.model_dump()) + AindDynamicForagingTaskLogic(**self.task_logic.model_dump()) except ValidationError as e: logging.error(str(e), extra={'tags': [self.warning_log_tag]}) # save behavior session model with open(self.behavior_task_logic_model_json, "w") as outfile: - outfile.write(self.behavior_task_logic_model.model_dump_json()) + outfile.write(self.task_logic.model_dump_json()) if (self.StartANewSession == 1) and (self.ANewTrial == 0): @@ -5216,8 +5121,5 @@ def log_subprocess_output(process, prefix): # Move creating AutoTrain here to catch any AWS errors win.create_auto_train_dialog() - win.AutoTrain_dialog.trainingStageChanged.connect( - lambda stage: setattr(win.behavior_task_logic_model.task_parameters, 'training_stage', stage)) - # TODO: Feels weird doing it this way? I don't know the AWS errors reasoning though # Run your application's event loop and stop after closing all windows sys.exit(app.exec()) diff --git a/src/foraging_gui/ForagingGUI.ui b/src/foraging_gui/ForagingGUI.ui index ccf1dc1bb..f53e62558 100644 --- a/src/foraging_gui/ForagingGUI.ui +++ b/src/foraging_gui/ForagingGUI.ui @@ -2373,7 +2373,7 @@ Double dipping: - + 0 @@ -2386,13 +2386,13 @@ Double dipping: 16777215 - + 1 - + 0 @@ -2405,7 +2405,7 @@ Double dipping: 16777215 - + 0 @@ -2506,7 +2506,7 @@ Double dipping: - + 0 @@ -2519,7 +2519,7 @@ Double dipping: 16777215 - + 1 @@ -2544,7 +2544,7 @@ Double dipping: - + 0 @@ -2557,7 +2557,7 @@ Double dipping: 16777215 - + 1 @@ -2642,7 +2642,7 @@ Double dipping: - + 7 @@ -2655,7 +2655,7 @@ Double dipping: 16777215 - + 0 @@ -2788,7 +2788,7 @@ Double dipping: - + false @@ -2798,7 +2798,7 @@ Double dipping: 0 - + 0 @@ -2888,7 +2888,7 @@ Double dipping: - + 7 @@ -2901,7 +2901,7 @@ Double dipping: 16777215 - + 60 @@ -2920,7 +2920,7 @@ Double dipping: - + 7 @@ -2933,13 +2933,13 @@ Double dipping: 16777215 - + 0 - + false @@ -2955,7 +2955,7 @@ Double dipping: 16777215 - + 0.5 @@ -3015,7 +3015,7 @@ Double dipping: - + 0 @@ -3028,13 +3028,13 @@ Double dipping: 16777215 - + 8 - + 0 @@ -3047,7 +3047,7 @@ Double dipping: 16777215 - + 2 @@ -3072,7 +3072,7 @@ Double dipping: - + 0 @@ -3085,7 +3085,7 @@ Double dipping: 16777215 - + 1 @@ -3110,7 +3110,7 @@ Double dipping: - + false @@ -3120,7 +3120,7 @@ Double dipping: 0 - + 5 @@ -3239,7 +3239,7 @@ Double dipping: - + 0 @@ -3252,7 +3252,7 @@ Double dipping: 16777215 - + 100 @@ -3306,7 +3306,7 @@ Double dipping: - + 7 @@ -3319,7 +3319,7 @@ Double dipping: 16777215 - + 20 @@ -3395,7 +3395,7 @@ Double dipping: - + 7 @@ -3408,13 +3408,13 @@ Double dipping: 16777215 - + 20 - + 0 @@ -3427,7 +3427,7 @@ Double dipping: 16777215 - + 30 @@ -3452,7 +3452,7 @@ Double dipping: - + 0 @@ -3465,7 +3465,7 @@ Double dipping: 16777215 - + 1 @@ -3478,7 +3478,7 @@ Double dipping: - + 0 @@ -3491,13 +3491,13 @@ Double dipping: 16777215 - + 120 - + 0 @@ -3510,7 +3510,7 @@ Double dipping: 16777215 - + 3 @@ -3673,7 +3673,7 @@ Double dipping: - + 0 @@ -3686,13 +3686,13 @@ Double dipping: 16777215 - + 0.8 - + 0 @@ -3705,7 +3705,7 @@ Double dipping: 16777215 - + 1 @@ -3923,7 +3923,7 @@ Double dipping: - + 0 @@ -3936,7 +3936,7 @@ Double dipping: 16777215 - + 0.8 @@ -3983,7 +3983,7 @@ Double dipping: - + 7 @@ -3996,13 +3996,13 @@ Double dipping: 16777215 - + 1000 - + 7 @@ -4015,7 +4015,7 @@ Double dipping: 16777215 - + 200 @@ -4065,27 +4065,27 @@ Double dipping: - + 0 0 - + 20 - + 0 0 - + 0.1 @@ -4152,27 +4152,27 @@ Double dipping: - + 0 0 - + 0.8 - + 0 0 - + 60 @@ -4254,7 +4254,7 @@ Double dipping: - + 0 @@ -4267,7 +4267,7 @@ Double dipping: 16777215 - + .8 diff --git a/src/foraging_gui/ForagingGUI_Ephys.ui b/src/foraging_gui/ForagingGUI_Ephys.ui index ce38a260a..e497940f7 100644 --- a/src/foraging_gui/ForagingGUI_Ephys.ui +++ b/src/foraging_gui/ForagingGUI_Ephys.ui @@ -287,7 +287,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 191 @@ -302,7 +302,7 @@ 0 - + 0.8 @@ -331,7 +331,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 403 @@ -346,7 +346,7 @@ 0 - + 1 @@ -375,7 +375,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 614 @@ -390,7 +390,7 @@ 0 - + 1 @@ -441,7 +441,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 191 @@ -456,7 +456,7 @@ 0 - + 20 @@ -485,7 +485,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 403 @@ -500,7 +500,7 @@ 0 - + 20 @@ -529,7 +529,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 614 @@ -544,7 +544,7 @@ 0 - + 60 @@ -573,7 +573,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 863 @@ -588,7 +588,7 @@ 0 - + 0 @@ -639,7 +639,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 191 @@ -654,7 +654,7 @@ 0 - + 1 @@ -683,7 +683,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 403 @@ -698,7 +698,7 @@ 0 - + 0 @@ -727,7 +727,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 614 @@ -742,7 +742,7 @@ 0 - + 1 @@ -793,7 +793,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 191 @@ -808,7 +808,7 @@ 0 - + 2 @@ -837,7 +837,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 403 @@ -852,7 +852,7 @@ 0 - + 1 @@ -881,7 +881,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 614 @@ -896,7 +896,7 @@ 0 - + 8 @@ -928,7 +928,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + false @@ -946,7 +946,7 @@ 0 - + 0 @@ -997,7 +997,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 614 @@ -1012,7 +1012,7 @@ 0 - + 0.8 @@ -1041,7 +1041,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 863 @@ -1056,7 +1056,7 @@ 0 - + 200 @@ -1085,7 +1085,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 1080 @@ -1100,7 +1100,7 @@ 0 - + 100 @@ -1151,7 +1151,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 191 @@ -1166,7 +1166,7 @@ 0 - + 1 @@ -1279,7 +1279,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + false @@ -1297,7 +1297,7 @@ 0 - + 0.5 @@ -1520,7 +1520,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 403 @@ -1535,7 +1535,7 @@ 0 - + 3 @@ -1667,7 +1667,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + false @@ -1685,7 +1685,7 @@ 0 - + 5 @@ -2022,7 +2022,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 862 @@ -2037,7 +2037,7 @@ 0 - + 0 @@ -2113,7 +2113,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 1081 @@ -2128,11 +2128,11 @@ 0 - + 1000 - + 613 @@ -2147,7 +2147,7 @@ 0 - + 120 @@ -2245,7 +2245,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 190 @@ -2260,7 +2260,7 @@ 0 - + 30 @@ -2409,7 +2409,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 402 @@ -2424,11 +2424,11 @@ 0 - + 60 - + 613 @@ -2443,7 +2443,7 @@ 0 - + 0.8 @@ -2497,7 +2497,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 863 @@ -2512,11 +2512,11 @@ 0 - + 0.1 - + 1081 @@ -2531,7 +2531,7 @@ 0 - + 20 diff --git a/src/foraging_gui/MyFunctions.py b/src/foraging_gui/MyFunctions.py index 893c1bda9..afb1cf44d 100644 --- a/src/foraging_gui/MyFunctions.py +++ b/src/foraging_gui/MyFunctions.py @@ -278,7 +278,7 @@ def _get_uncoupled_reward_prob_pool(self): def _CheckWarmUp(self): '''Check if we should turn on warm up''' - if self.win.behavior_task_logic_model.task_parameters.warmup=='off': + if self.win.task_logic.task_parameters.warmup== 'off': return warmup=self._get_warmup_state() if warmup==0 and self.TP_warmup=='on': @@ -1647,7 +1647,7 @@ def _GetAnimalResponse(self,Channel1,Channel3,Channel4): self._SimulateResponse() return # set the valve time of auto water - multiplier = self.win.behavior_task_logic_model.task_parameters.Multiplier + multiplier = self.win.task_logic.task_parameters.Multiplier if self.CurrentAutoRewardTrial[0]==1: self._set_valve_time_left(Channel3,float(self.win.LeftValue.text()),multiplier) if self.CurrentAutoRewardTrial[1]==1: @@ -1795,14 +1795,14 @@ def _set_valve_time_right(self,channel3,RightValue=0.01,Multiplier=1): def _GiveLeft(self,channel3): '''manually give left water''' - channel3.LeftValue1(float(self.win.LeftValue.text())*1000*self.win.behavior_task_logic_model.task_parameters.Multiplier) + channel3.LeftValue1(float(self.win.LeftValue.text()) * 1000 * self.win.task_logic.task_parameters.Multiplier) time.sleep(0.01) channel3.ManualWater_Left(int(1)) channel3.LeftValue1(float(self.win.LeftValue.text())*1000) def _GiveRight(self,channel3): '''manually give right water''' - channel3.RightValue1(float(self.win.RightValue.text())*1000*self.win.behavior_task_logic_model.task_parameters.Multiplier) + channel3.RightValue1(float(self.win.RightValue.text()) * 1000 * self.win.task_logic.task_parameters.Multiplier) time.sleep(0.01) channel3.ManualWater_Right(int(1)) channel3.RightValue1(float(self.win.RightValue.text())*1000) diff --git a/src/foraging_gui/Visualization.py b/src/foraging_gui/Visualization.py index 5afbb30d1..928392180 100644 --- a/src/foraging_gui/Visualization.py +++ b/src/foraging_gui/Visualization.py @@ -522,23 +522,23 @@ def __init__(self,GeneratedTrials=None,dpi=100,width=5, height=4): FigureCanvas.__init__(self, self.fig) def _Update(self,win): # randomly draw a block length between Min and Max - SampleMethods=win.behavior_task_logic_model.task_parameters.Randomness + SampleMethods=win.task_logic.task_parameters.Randomness # block length - Min=win.behavior_task_logic_model.task_parameters.BlockMin - Max=win.behavior_task_logic_model.task_parameters.BlockMax - Beta=win.behavior_task_logic_model.task_parameters.BlockBeta + Min=win.task_logic.task_parameters.BlockMin + Max=win.task_logic.task_parameters.BlockMax + Beta=win.task_logic.task_parameters.BlockBeta DataType='int' SampledBlockLen=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) # ITI - Min=win.behavior_task_logic_model.task_parameters.ITIMin - Max=win.behavior_task_logic_model.task_parameters.ITIMax - Beta=win.behavior_task_logic_model.task_parameters.ITIBeta + Min=win.task_logic.task_parameters.ITIMin + Max=win.task_logic.task_parameters.ITIMax + Beta=win.task_logic.task_parameters.ITIBeta DataType='float' SampledITI=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) # Delay - Min=win.behavior_task_logic_model.task_parameters.DelayMin - Max=win.behavior_task_logic_model.task_parameters.DelayMax - Beta=win.behavior_task_logic_model.task_parameters.DelayBeta + Min=win.task_logic.task_parameters.DelayMin + Max=win.task_logic.task_parameters.DelayMax + Beta=win.task_logic.task_parameters.DelayBeta DataType='float' SampledDelay=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) self.ax1.cla() From 1164af89f6e11b43396b05021af1a833570fa48c Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 16 Jan 2025 13:36:33 -0800 Subject: [PATCH 08/23] fixing ephys ui file --- src/foraging_gui/Foraging.py | 2 +- src/foraging_gui/ForagingGUI_Ephys.ui | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 9b97878d8..757fcc4d3 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -294,7 +294,7 @@ def initialize_task_parameters(self) -> AindDynamicForagingTaskParameters: # Response time ResponseTime=self.ResponseTime.value(), RewardConsumeTime=self.RewardConsumeTime.value(), - StopIgnores=self.auto_stop_ignore_win.value()*self.auto_stop_ignore_ratio_threshold.value(), + StopIgnores=round(self.auto_stop_ignore_win.value()*self.auto_stop_ignore_ratio_threshold.value()), # Auto block AdvancedBlockAuto=AdvancedBlockMode(self.AdvancedBlockAuto.currentText()), SwitchThr=self.SwitchThr.value(), diff --git a/src/foraging_gui/ForagingGUI_Ephys.ui b/src/foraging_gui/ForagingGUI_Ephys.ui index e497940f7..d486fd407 100644 --- a/src/foraging_gui/ForagingGUI_Ephys.ui +++ b/src/foraging_gui/ForagingGUI_Ephys.ui @@ -2289,7 +2289,7 @@ Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter - + 402 @@ -2304,7 +2304,7 @@ 0 - + .8 From c4d10404a0a17054005396c3a6d2169cf7aa5002 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 16 Jan 2025 15:41:49 -0800 Subject: [PATCH 09/23] replacing tp references with logic model --- src/foraging_gui/Dialogs.py | 4 +- src/foraging_gui/Foraging.py | 168 ++++++++++++++++--------------- src/foraging_gui/MyFunctions.py | 170 +++++++++++++++++--------------- 3 files changed, 180 insertions(+), 162 deletions(-) diff --git a/src/foraging_gui/Dialogs.py b/src/foraging_gui/Dialogs.py index 03e9cd448..2241b6c09 100644 --- a/src/foraging_gui/Dialogs.py +++ b/src/foraging_gui/Dialogs.py @@ -15,7 +15,7 @@ from PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QHBoxLayout, QMessageBox, QGridLayout from PyQt5.QtWidgets import QLabel, QDialogButtonBox,QFileDialog,QInputDialog, QLineEdit from PyQt5 import QtWidgets, uic, QtGui -from PyQt5.QtCore import QThreadPool,Qt, QAbstractTableModel, QItemSelectionModel, QObject, QTimer, pyqtSignal +from PyQt5.QtCore import QThreadPool,Qt, QAbstractTableModel, QItemSelectionModel, QObject, QTimer from PyQt5.QtSvg import QSvgWidget from foraging_gui.MyFunctions import Worker @@ -2651,12 +2651,12 @@ def _override_curriculum_clicked(self, state): def _update_stage_to_apply(self): if self.checkBox_override_stage.isChecked(): self.stage_in_use = self.comboBox_override_stage.currentText() - logger.info(f"Stage overridden to: {self.stage_in_use}") elif self.last_session is not None: self.stage_in_use = self.last_session['next_stage_suggested'] else: self.stage_in_use = 'unknown training stage' + self.pushButton_apply_auto_train_paras.setText( f"Apply and lock\n" + '\n'.join(get_curriculum_string(self.curriculum_in_use).split('(')).strip(')') diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 757fcc4d3..8be684245 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -263,48 +263,48 @@ def initialize_task_parameters(self) -> AindDynamicForagingTaskParameters: """ return AindDynamicForagingTaskParameters( - BaseRewardSum=self.BaseRewardSum.value(), - RewardFamily=self.RewardFamily.value(), - RewardPairsN=self.RewardPairsN.value(), - UncoupledReward=self.UncoupledReward.text(), + base_reward_sum=self.BaseRewardSum.value(), + reward_family=self.RewardFamily.value(), + reward_pairs_n=self.RewardPairsN.value(), + uncoupled_reward=self.UncoupledReward.text(), # Randomness - Randomness=self.Randomness.currentText(), + randomness=self.Randomness.currentText(), # Block length - BlockMin=self.BlockMin.value(), - BlockMax=self.BlockMax.value(), - BlockBeta=self.BlockBeta.value(), - BlockMinReward=self.BlockMinReward.value(), + block_min=self.BlockMin.value(), + block_max=self.BlockMax.value(), + block_beta=self.BlockBeta.value(), + block_min_reward=self.BlockMinReward.value(), # Delay period - DelayMin=self.DelayMin.value(), - DelayMax=self.DelayMax.value(), - DelayBeta=self.DelayBeta.value(), + delay_min=self.DelayMin.value(), + delay_max=self.DelayMax.value(), + delay_beta=self.DelayBeta.value(), # Reward delay - RewardDelay=self.RewardDelay.value(), + reward_delay=self.RewardDelay.value(), # Auto water - AutoReward=self.AutoReward.isChecked(), - AutoWaterType=AutoWaterMode(self.AutoWaterType.currentText()), - Multiplier=self.Multiplier.value(), - Unrewarded=self.Unrewarded.value(), - Ignored=self.Ignored.value(), + auto_reward=self.AutoReward.isChecked(), + auto_water_type=AutoWaterMode(self.AutoWaterType.currentText()), + multiplier=self.Multiplier.value(), + unrewarded=self.Unrewarded.value(), + ignored=self.Ignored.value(), # ITI - ITIMin=self.ITIMin.value(), - ITIMax=self.ITIMax.value(), - ITIBeta=self.ITIBeta.value(), - ITIIncrease=self.ITIIncrease.value(), + iti_min=self.ITIMin.value(), + iti_max=self.ITIMax.value(), + iti_beta=self.ITIBeta.value(), + iti_increase=self.ITIIncrease.value(), # Response time - ResponseTime=self.ResponseTime.value(), - RewardConsumeTime=self.RewardConsumeTime.value(), - StopIgnores=round(self.auto_stop_ignore_win.value()*self.auto_stop_ignore_ratio_threshold.value()), + response_time=self.ResponseTime.value(), + reward_consume_time=self.RewardConsumeTime.value(), + stop_ignores=round(self.auto_stop_ignore_win.value()*self.auto_stop_ignore_ratio_threshold.value()), # Auto block - AdvancedBlockAuto=AdvancedBlockMode(self.AdvancedBlockAuto.currentText()), - SwitchThr=self.SwitchThr.value(), - PointsInARow=self.PointsInARow.value(), + advanced_block_auto=AdvancedBlockMode(self.AdvancedBlockAuto.currentText()), + switch_thr=self.SwitchThr.value(), + points_in_a_row=self.PointsInARow.value(), # Auto stop - MaxTrial=self.MaxTrial.value(), - MaxTime=self.MaxTime.value(), + max_trial=self.MaxTrial.value(), + max_time=self.MaxTime.value(), # Reward size - RightValue_volume=self.RightValue_volume.value(), - LeftValue_volume=self.LeftValue_volume.value(), + right_value_volume=self.RightValue_volume.value(), + left_value_volume=self.LeftValue_volume.value(), # Warmup warmup=self.warmup.currentText(), warm_min_trial=self.warm_min_trial.value(), @@ -318,50 +318,52 @@ def connect_task_parameters(self) -> None: Connect relevant widgets to update task parameters in task logic model """ # update parameters in task logic model - self.BaseRewardSum.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'BaseRewardSum', v)) - self.RewardFamily.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'RewardFamily', v)) - self.RewardPairsN.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'RewardPairsN', v)) + self.BaseRewardSum.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, 'base_reward_sum', v)) + self.RewardFamily.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'reward_family', v)) + self.RewardPairsN.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'reward_pairs_n', v)) self.UncoupledReward.textChanged.connect(lambda t: - setattr(self.task_logic.task_parameters, 'UncoupledReward', t)) - self.Randomness.currentIndexChanged.connect(lambda t: setattr(self.task_logic.task_parameters, 'Randomness', t)) - self.BlockMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'BlockMin', v)) - self.BlockMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'BlockMax', v)) - self.BlockBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'BlockBeta', v)) + setattr(self.task_logic.task_parameters, 'uncoupled_reward', t)) + self.Randomness.currentIndexChanged.connect(lambda t: setattr(self.task_logic.task_parameters, 'randomness', t)) + self.BlockMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'block_min', v)) + self.BlockMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'block_max', v)) + self.BlockBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'block_beta', v)) self.BlockMinReward.valueChanged.connect(lambda v: - setattr(self.task_logic.task_parameters, 'BlockMinReward', v)) - self.DelayMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'DelayMin', v)) - self.DelayMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'DelayMax', v)) - self.DelayBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'DelayBeta', v)) - self.RewardDelay.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'RewardDelay', v)) - self.AutoReward.toggled.connect(lambda c: setattr(self.task_logic.task_parameters, 'RewardDelay', c)) + setattr(self.task_logic.task_parameters, 'block_min_reward', v)) + self.DelayMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'delay_min', v)) + self.DelayMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'delay_max', v)) + self.DelayBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'delay_beta', v)) + self.RewardDelay.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'reward_delay', v)) + self.AutoReward.toggled.connect(lambda c: setattr(self.task_logic.task_parameters, 'auto_reward', c)) self.AutoWaterType.currentTextChanged.connect(lambda water: - setattr(self.task_logic.task_parameters, 'AutoWaterType', + setattr(self.task_logic.task_parameters, 'auto_water_type', AutoWaterMode(water))) - self.Multiplier.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'Multiplier', v)) - self.Unrewarded.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'Unrewarded', v)) - self.Ignored.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'Ignored', v)) - self.ITIMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ITIMin', v)) - self.ITIMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ITIMax', v)) - self.ITIBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ITIBeta', v)) - self.ITIIncrease.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ITIIncrease', v)) - self.ResponseTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ResponseTime', v)) - self.RewardConsumeTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'RewardConsumeTime', v)) - self.auto_stop_ignore_win.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'StopIgnores', + self.Multiplier.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'multiplier', v)) + self.Unrewarded.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'unrewarded', v)) + self.Ignored.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'ignored', v)) + self.ITIMin.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'iti_min', v)) + self.ITIMax.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'iti_max', v)) + self.ITIBeta.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'iti_beta', v)) + self.ITIIncrease.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'iti_increase', v)) + self.ResponseTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'response_time', v)) + self.RewardConsumeTime.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, 'reward_consume_time', v)) + self.auto_stop_ignore_win.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'stop_ignores', round(v * self.auto_stop_ignore_ratio_threshold.value()))) self.auto_stop_ignore_ratio_threshold.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, - 'StopIgnores', + 'stop_ignores', round(self.auto_stop_ignore_win.value() * v))) self.AdvancedBlockAuto.currentTextChanged.connect( - lambda text: setattr(self.task_logic.task_parameters, 'AdvancedBlockAuto', AdvancedBlockMode(text))) - self.SwitchThr.valueChanged.connect(lambda v:setattr(self.task_logic.task_parameters, 'SwitchThr', v)) - self.PointsInARow.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'PointsInARow', v)) - self.MaxTrial.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'MaxTrial', v)) - self.MaxTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'MaxTime', v)) + lambda text: setattr(self.task_logic.task_parameters, 'advanced_block_auto', AdvancedBlockMode(text))) + self.SwitchThr.valueChanged.connect(lambda v:setattr(self.task_logic.task_parameters, 'switch_thr', v)) + self.PointsInARow.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'points_in_a_row', v)) + self.MaxTrial.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'max_trial', v)) + self.MaxTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'max_time', v)) self.RightValue_volume.valueChanged.connect(lambda v: - setattr(self.task_logic.task_parameters, 'RightValue_volume', v)) + setattr(self.task_logic.task_parameters, 'right_value_volume', v)) self.LeftValue_volume.valueChanged.connect(lambda v: - setattr(self.task_logic.task_parameters, 'LeftValue_volume', v)) + setattr(self.task_logic.task_parameters, 'left_value_volume', v)) self.warmup.currentTextChanged.connect(lambda text: setattr(self.task_logic.task_parameters, 'warmup', text)) self.warm_min_trial.valueChanged.connect(lambda v: @@ -872,21 +874,21 @@ def _warmup(self): # set warm up default parameters self.Task.setCurrentIndex(self.Task.findText('Coupled Baiting')) - self.BaseRewardSum.setText('1') - self.RewardFamily.setText('3') - self.RewardPairsN.setText('1') + self.BaseRewardSum.setValue(1) + self.RewardFamily.setValue(3) + self.RewardPairsN.setValue(1) - self.BlockBeta.setText('1') - self.BlockMin.setText('1') - self.BlockMax.setText('1') - self.BlockMinReward.setText('1') + self.BlockBeta.setValue(1) + self.BlockMin.setValue(1) + self.BlockMax.setValue(1) + self.BlockMinReward.setValue(1) self.AutoReward.setChecked(True) self._AutoReward() self.AutoWaterType.setCurrentIndex(self.AutoWaterType.findText('Natural')) - self.Multiplier.setText('0.8') - self.Unrewarded.setText('0') - self.Ignored.setText('0') + self.Multiplier.setValue(0.8) + self.Unrewarded.setValue(0) + self.Ignored.setValue(0) # turn advanced block auto off self.AdvancedBlockAuto.setCurrentIndex(self.AdvancedBlockAuto.findText('off')) self._ShowRewardPairs() @@ -2356,16 +2358,17 @@ def _Task(self): self.IncludeAutoReward.setGeometry(QtCore.QRect(614, 128, 80, 20)) self.label_26.setGeometry(QtCore.QRect(460, 128, 146, 16)) # set block length to be 1 - self.BlockMin.setText('1') - self.BlockMax.setText('1') + self.BlockMin.setValue(1) + self.BlockMax.setVlue(1) def _ShowRewardPairs(self): '''Show reward pairs''' + tp = self.task_logic.task_parameters try: if self.behavior_session_model.experiment in ['Coupled Baiting','Coupled Without Baiting','RewardN']: - self.RewardPairs= self.RewardFamilies[int(self.task_logic.task_parameters.RewardFamily) - 1][:int(self.task_logic.task_parameters.RewardPairsN)] + self.RewardPairs= self.RewardFamilies[tp.reward_family - 1][:tp.reward_pairs_n] self.RewardProb=np.array(self.RewardPairs)/np.expand_dims(np.sum(self.RewardPairs,axis=1),axis=1)*\ - self.task_logic.task_parameters.BaseRewardSum + tp.base_reward_sum elif self.behavior_session_model.experiment in ['Uncoupled Baiting','Uncoupled Without Baiting']: input_string = self.task_logic.task_parameters.UncoupledReward # remove any square brackets and spaces from the string @@ -4400,8 +4403,9 @@ def _StartTrialLoop(self,GeneratedTrials,worker1,worker_save): GeneratedTrials.B_CurrentTrialN+=1 print('Current trial: '+str(GeneratedTrials.B_CurrentTrialN+1)) logging.info('Current trial: '+str(GeneratedTrials.B_CurrentTrialN+1)) - if (self.GeneratedTrials.TP_AutoReward or int(self.GeneratedTrials.TP_BlockMinReward)>0 - or self.GeneratedTrials.TP_Task in ['Uncoupled Baiting','Uncoupled Without Baiting']) or self.AddOneTrialForNoresponse.currentText()=='Yes': + if (self.task_logic.task_parameters.auto_reward or self.task_logic.task_parameters.block_min_reward>0 + or self.GeneratedTrials.TP_Task in ['Uncoupled Baiting','Uncoupled Without Baiting']) or \ + self.AddOneTrialForNoresponse.currentText()=='Yes': # The next trial parameters must be dependent on the current trial's choice # get animal response and then generate a new trial self.NewTrialRewardOrder=0 @@ -4564,7 +4568,7 @@ def _StartTrialLoop1(self,GeneratedTrials,worker1,workerPlot,workerGenerateAtria GeneratedTrials.B_CurrentTrialN+=1 print('Current trial: '+str(GeneratedTrials.B_CurrentTrialN+1)) logging.info('Current trial: '+str(GeneratedTrials.B_CurrentTrialN+1)) - if not (self.GeneratedTrials.TP_AutoReward or int(self.GeneratedTrials.TP_BlockMinReward)>0): + if not (self.task_logic.task_parameters.auto_reward or self.task_logic.task_parameters.block_min_reward>0): # generate new trial and get reward self.NewTrialRewardOrder=1 else: diff --git a/src/foraging_gui/MyFunctions.py b/src/foraging_gui/MyFunctions.py index afb1cf44d..6789f9b3d 100644 --- a/src/foraging_gui/MyFunctions.py +++ b/src/foraging_gui/MyFunctions.py @@ -15,6 +15,7 @@ from PyQt5 import QtCore from foraging_gui.reward_schedules.uncoupled_block import UncoupledBlocks +from aind_behavior_dynamic_foraging import AindDynamicForagingTaskLogic if PLATFORM == 'win32': from newscale.usbxpress import USBXpressLib, USBXpressDevice @@ -23,8 +24,9 @@ class GenerateTrials(): - def __init__(self,win): + def __init__(self,win, task_logic: AindDynamicForagingTaskLogic): self.win=win + self.task_logic = task_logic self.B_LeftLickIntervalPercent = None # percentage of left lick intervals under 100ms self.B_RightLickIntervalPercent = None # percentage of right lick intervals under 100ms self.B_CrossSideIntervalPercent = None # percentage of cross side lick intervals under 100ms @@ -142,8 +144,8 @@ def _GenerateATrial(self,Channel4): # Initialize the UncoupledBlocks object and generate the first trial self.uncoupled_blocks = UncoupledBlocks( rwd_prob_array=self.RewardProbPoolUncoupled, - block_min=int(self.TP_BlockMin), - block_max=int(self.TP_BlockMax), + block_min=self.task_logic.task_parameters.block_min, + block_max=self.task_logic.task_parameters.block_max, persev_add=True, # Hard-coded to True for now perseverative_limit=4, # Hard-coded to 4 for now max_block_tally=3, # Hard-coded to 3 for now @@ -239,13 +241,13 @@ def _PerformOptogenetics(self,Channel4): def _CheckSessionControl(self): '''Check if the session control is on''' if self.TP_SessionWideControl=='on': - session_control_block_length=float(self.TP_MaxTrial)*float(self.TP_FractionOfSession) + session_control_block_length=self.task_logic.task_parameters.max_trial*float(self.TP_FractionOfSession) if self.TP_SessionStartWith=='on': initial_state=1 elif self.TP_SessionStartWith=='off': initial_state=0 - calculated_state=np.zeros(int(self.TP_MaxTrial)) - numbers=np.arange(int(self.TP_MaxTrial)) + calculated_state=np.zeros(self.task_logic.task_parameters.max_trial) + numbers=np.arange(self.task_logic.task_parameters.max_trial) numbers_floor=np.floor(numbers/session_control_block_length) # Find odd values: A value is odd if value % 2 != 0 odd_values_mask = (numbers_floor % 2 != 0) @@ -266,7 +268,7 @@ def _CheckSessionControl(self): def _get_uncoupled_reward_prob_pool(self): # Get reward prob pool from the input string (e.g., ["0.1", "0.5", "0.9"]) - input_string = self.win.UncoupledReward.text() + input_string = self.task_logic.task_parameters.uncoupled_reward # remove any square brackets and spaces from the string input_string = input_string.replace('[','').replace(']','').replace(',', ' ') # split the remaining string into a list of individual numbers @@ -278,10 +280,10 @@ def _get_uncoupled_reward_prob_pool(self): def _CheckWarmUp(self): '''Check if we should turn on warm up''' - if self.win.task_logic.task_parameters.warmup== 'off': + if self.task_logic.task_parameters.warmup== 'off': return warmup=self._get_warmup_state() - if warmup==0 and self.TP_warmup=='on': + if warmup==0 and self.task_logic.task_parameters.warmup == 'on': # set warm up to off index=self.win.warmup.findText('off') self.win.warmup.setCurrentIndex(index) @@ -293,8 +295,8 @@ def _CheckWarmUp(self): def _get_warmup_state(self): '''calculate the metrics related to the warm up and decide if we should turn on the warm up''' - TP_warm_windowsize=int(self.TP_warm_windowsize) - B_AnimalResponseHistory_window=self.B_AnimalResponseHistory[-TP_warm_windowsize:] + warm_windowsize=self.task_logic.task_parameters.warm_windowsize + B_AnimalResponseHistory_window=self.B_AnimalResponseHistory[-warm_windowsize:] finish_trial=self.B_AnimalResponseHistory.shape[0] # the warmup is only turned on at the beginning of the session, thus the number of finished trials is equal to the number of trials with warmup on left_choices = np.count_nonzero(B_AnimalResponseHistory_window == 0) right_choices = np.count_nonzero(B_AnimalResponseHistory_window == 1) @@ -307,7 +309,9 @@ def _get_warmup_state(self): choice_ratio=0 else: choice_ratio=right_choices/(left_choices+right_choices) - if finish_trial>=float(self.TP_warm_min_trial) and finish_ratio>=float(self.TP_warm_min_finish_ratio) and abs(choice_ratio-0.5)<=float(self.TP_warm_max_choice_ratio_bias): + if finish_trial>=self.task_logic.task_parameters.warm_min_trial and \ + finish_ratio>=self.task_logic.task_parameters.warm_min_finish_ratio\ + and abs(choice_ratio-0.5)<=self.task_logic.task_parameters.warm_max_choice_ratio_bias: # turn off the warm up warmup=0 else: @@ -362,8 +366,11 @@ def _GetMaximumConSelection(self): def _generate_next_coupled_block(self): '''Generate the next block reward probability and block length (coupled task only)''' # determine the reward probability of the next trial based on tasks - self.RewardPairs=self.B_RewardFamilies[int(self.TP_RewardFamily)-1][:int(self.TP_RewardPairsN)] - self.RewardProb=np.array(self.RewardPairs)/np.expand_dims(np.sum(self.RewardPairs,axis=1),axis=1)*float(self.TP_BaseRewardSum) + + tp = self.task_logic.task_parameters + + self.RewardPairs=self.B_RewardFamilies[tp.reward_family-1][:tp.reward_pairs_n] + self.RewardProb=np.array(self.RewardPairs)/np.expand_dims(np.sum(self.RewardPairs,axis=1),axis=1)*tp.base_reward_sum # get the reward probabilities pool RewardProbPool=np.append(self.RewardProb,np.fliplr(self.RewardProb),axis=0) if self.B_RewardProHistory.size!=0: @@ -377,12 +384,12 @@ def _generate_next_coupled_block(self): # get the reward probabilities of the current block self.B_CurrentRewardProb=RewardProbPool[random.choice(range(np.shape(RewardProbPool)[0]))] # randomly draw a block length between Min and Max - if self.TP_Randomness=='Exponential': - self.BlockLen = np.array(int(np.random.exponential(float(self.TP_BlockBeta),1)+float(self.TP_BlockMin))) - elif self.TP_Randomness=='Even': - self.BlockLen= np.array(np.random.randint(float(self.TP_BlockMin), float(self.TP_BlockMax)+1)) - if self.BlockLen>float(self.TP_BlockMax): - self.BlockLen=int(self.TP_BlockMax) + if tp.randomness=='Exponential': + self.BlockLen = np.array(int(np.random.exponential(tp.block_beta,1)+tp.block_min)) + elif tp.randomness=='Even': + self.BlockLen= np.array(np.random.randint(tp.block_min, tp.block_max+1)) + if self.BlockLen>tp.block_max: + self.BlockLen=tp.block_max for i in range(len(self.B_ANewBlock)): self.BlockLenHistory[i].append(self.BlockLen) self.B_ANewBlock=np.array([0,0]) @@ -390,18 +397,26 @@ def _generate_next_coupled_block(self): def _generate_next_trial_other_paras(self): # get the ITI time and delay time - if self.TP_Randomness=='Exponential': - self.CurrentITI = float(np.random.exponential(float(self.TP_ITIBeta),1)+float(self.TP_ITIMin)) - elif self.TP_Randomness=='Even': - self.CurrentITI = random.uniform(float(self.TP_ITIMin),float(self.TP_ITIMax)) - if self.CurrentITI>float(self.TP_ITIMax): - self.CurrentITI=float(self.TP_ITIMax) - if self.TP_Randomness=='Exponential': - self.CurrentDelay = float(np.random.exponential(float(self.TP_DelayBeta),1)+float(self.TP_DelayMin)) - elif self.TP_Randomness=='Even': - self.CurrentDelay=random.uniform(float(self.TP_DelayMin),float(self.TP_DelayMax)) - if self.CurrentDelay>float(self.TP_DelayMax): - self.CurrentDelay=float(self.TP_DelayMax) + randomness = self.task_logic.task_parameters.randomness + iti_min = self.task_logic.task_parameters.iti_min + iti_max = self.task_logic.task_parameters.iti_max + iti_beta = self.task_logic.task_parameters.iti_beta + delay_beta = self.task_logic.task_parameters.delay_beta + delay_min = self.task_logic.task_parameters.delay_min + delay_max = self.task_logic.task_parameters.delay_max + response_time = self.task_logic.task_parameters.response_time + if randomness=='Exponential': + self.CurrentITI = float(np.random.exponential(iti_beta,1)+iti_min) + elif randomness=='Even': + self.CurrentITI = random.uniform(iti_min,iti_max) + if self.CurrentITI>iti_max: + self.CurrentITI=iti_max + if randomness=='Exponential': + self.CurrentDelay = float(np.random.exponential(delay_beta,1)+delay_min) + elif randomness=='Even': + self.CurrentDelay=random.uniform(delay_min, delay_max) + if self.CurrentDelay>delay_max: + self.CurrentDelay=delay_max # extremely important. Currently, the shaders timer does not allow delay close to zero. if self.CurrentITI<0.05: self.CurrentITI=0.05 @@ -409,7 +424,7 @@ def _generate_next_trial_other_paras(self): self.CurrentDelay=0.05 self.B_ITIHistory.append(self.CurrentITI) self.B_DelayHistory.append(self.CurrentDelay) - self.B_ResponseTimeHistory.append(float(self.TP_ResponseTime)) + self.B_ResponseTimeHistory.append(response_time) def _check_coupled_block_transition(self): '''Check if we should perform a block change for the next trial. @@ -449,7 +464,7 @@ def _check_coupled_block_transition(self): # or advanced block switch is not permitted # For the coupled task, hold block switch on both sides if np.all(self.B_ANewBlock==1) and self.AllRewardThisBlock!=-1: - if self.AllRewardThisBlock < float(self.TP_BlockMinReward) \ + if self.AllRewardThisBlock < self.task_logic.task_parameters.block_min_reward \ or self.AdvancedBlockSwitchPermitted==0: self.B_ANewBlock=np.zeros_like(self.B_ANewBlock) self._override_block_len(range(len(self.B_ANewBlock))) @@ -474,7 +489,7 @@ def _override_block_len(self,ind): def _check_advanced_block_switch(self): '''Check if we can switch to a different block''' - if self.TP_AdvancedBlockAuto=='off': + if self.task_logic.task_parameters.advanced_block_auto.value == 'off': self.AdvancedBlockSwitchPermitted=1 return kernel_size=2 @@ -490,7 +505,7 @@ def _check_advanced_block_switch(self): return ChoiceFractionCurrentBlock=ChoiceFraction[-CurrentEffectiveBlockLen:] # decide the current high rewrad side and threshold(for 2 reward probability) - Delta=abs((self.B_CurrentRewardProb[0]-self.B_CurrentRewardProb[1])*float(self.TP_SwitchThr)) + Delta=abs((self.B_CurrentRewardProb[0]-self.B_CurrentRewardProb[1])*self.task_logic.task_parameters.switch_thr) if self.B_CurrentRewardProb[0]>self.B_CurrentRewardProb[1]: # it's the left side with high reward probability # decide the threshold @@ -510,19 +525,15 @@ def _check_advanced_block_switch(self): if consecutive_lengths.size==0: self.AdvancedBlockSwitchPermitted=0 return - # determine if we can switch - if self.TP_PointsInARow=='': - self.AdvancedBlockSwitchPermitted=1 - return - if self.TP_AdvancedBlockAuto=='now': + if self.task_logic.task_parameters.advanced_block_auto.value == 'now': # the courrent condition is qualified - if len(OkPoints) in consecutive_indices[consecutive_lengths>float(self.TP_PointsInARow)][:,1]+1: + if len(OkPoints) in consecutive_indices[consecutive_lengths>self.task_logic.task_parameters.points_in_a_row][:,1]+1: self.AdvancedBlockSwitchPermitted=1 else: self.AdvancedBlockSwitchPermitted=0 - elif self.TP_AdvancedBlockAuto=='once': + elif self.task_logic.task_parameters.advanced_block_auto.value == 'once': # it happens before - if np.any(consecutive_lengths>float(self.TP_PointsInARow)): + if np.any(consecutive_lengths>self.task_logic.task_parameters.points_in_a_row): self.AdvancedBlockSwitchPermitted=1 else: self.AdvancedBlockSwitchPermitted=0 @@ -1011,7 +1022,8 @@ def _ShowInformation(self): # Performance info # 1. essential info # left side in the GUI - if (self.TP_AutoReward or int(self.TP_BlockMinReward)>0) and self.win.Start.isChecked(): + if (self.task_logic.task_parameters.auto_reward or + self.task_logic.task_parameters.block_min_reward>0) and self.win.Start.isChecked(): # show the next trial self.win.info_performance_essential_1 = f'Current trial: {self.B_CurrentTrialN + 2}\n' @@ -1078,7 +1090,8 @@ def _ShowInformation(self): self.win.label_info_performance_others.setText(self.win.info_performance_others) elif self.win.default_ui=='ForagingGUI_Ephys.ui': self.win.Other_inforTitle='Session started: '+SessionStartTimeHM+ ' Current: '+CurrentTimeHM+ ' Run: '+str(self.win.Other_RunningTime)+'m' - if (self.TP_AutoReward or int(self.TP_BlockMinReward)>0) and self.win.Start.isChecked(): + if (self.task_logic.task_parameters.auto_reward or + self.task_logic.task_parameters.block_min_reward>0) and self.win.Start.isChecked(): # show the next trial self.win.Other_BasicTitle='Current trial: ' + str(self.B_CurrentTrialN+2) else: @@ -1154,9 +1167,9 @@ def _ShowInformation(self): def _CheckStop(self): '''Stop if there are many ingoral trials or if the maximam trial is exceeded MaxTrial''' - StopIgnore=round(float(self.TP_auto_stop_ignore_ratio_threshold)*int(self.TP_auto_stop_ignore_win)) - MaxTrial=int(self.TP_MaxTrial)-2 # trial number starts from 0 - MaxTime=float(self.TP_MaxTime)*60 # convert minutes to seconds + stop_ignore=self.task_logic.task_parameters.stop_ignore + max_trial=self.task_logic.task_parameters.max_trial-2 # trial number starts from 0 + max_time=self.task_logic.task_parameters.max_time*60 # convert minutes to seconds if hasattr(self, 'BS_CurrentRunningTime'): pass else: @@ -1171,7 +1184,7 @@ def _CheckStop(self): non_auto_reward = self.B_AnimalResponseHistory[np.where(~auto_rewards.astype(bool))] # isolate non-auto-reward win_sz = int(self.TP_auto_stop_ignore_win) min_time = int(self.TP_min_time) - if self.BS_CurrentRunningTime/60 >= min_time and len(np.where(non_auto_reward[-win_sz:] == 2)[0]) >= StopIgnore: + if self.BS_CurrentRunningTime/60 >= min_time and len(np.where(non_auto_reward[-win_sz:] == 2)[0]) >= stop_ignore: stop=True threshold = float(self.TP_auto_stop_ignore_ratio_threshold)*100 msg = f'Stopping the session because the mouse has ignored at least ' \ @@ -1179,14 +1192,14 @@ def _CheckStop(self): f'consecutive trials' warning_label_text = 'Stop because ignore trials exceed or equal: '+\ f'{threshold}% of {self.TP_auto_stop_ignore_win}' - elif self.B_CurrentTrialN>MaxTrial: + elif self.B_CurrentTrialN>max_trial: stop=True - msg = 'Stopping the session because the mouse has reached the maximum trial count: {}'.format(self.TP_MaxTrial) - warning_label_text = 'Stop because maximum trials exceed or equal: '+self.TP_MaxTrial - elif self.BS_CurrentRunningTime>MaxTime: + msg = 'Stopping the session because the mouse has reached the maximum trial count: {}'.format(max_trial) + warning_label_text = f'Stop because maximum trials exceed or equal: {max_trial}' + elif self.BS_CurrentRunningTime>max_time: stop=True - msg = 'Stopping the session because the session running time has reached {} minutes'.format(self.TP_MaxTime) - warning_label_text = 'Stop because running time exceeds or equals: '+self.TP_MaxTime+'m' + msg = 'Stopping the session because the session running time has reached {} minutes'.format(max_time) + warning_label_text = f'Stop because running time exceeds or equals: {max_time}m' else: stop=False @@ -1217,31 +1230,34 @@ def _CheckStop(self): def _CheckAutoWater(self): '''Check if it should be an auto water trial''' - if self.TP_AutoReward: - UnrewardedN=int(self.TP_Unrewarded) - IgnoredN=int(self.TP_Ignored) - if UnrewardedN<=0: + if self.task_logic.task_parameters.auto_reward: + unrewarded = self.task_logic.task_parameters.unrewarded + ignored=self.task_logic.task_parameters.ignored + if unrewarded<=0: self.CurrentAutoReward=1 - logging.warning('Auto water because unrewarded trials exceed: '+self.TP_Unrewarded, + logging.warning(f'Auto water because unrewarded trials exceed: {unrewarded}', extra={'tags': [self.win.warning_log_tag]}) - elif IgnoredN <=0: - logging.warning('Auto water because ignored trials exceed: '+self.TP_Ignored, + elif ignored <=0: + logging.warning(f'Auto water because ignored trials exceed: {ignored}', extra={'tags': [self.win.warning_log_tag]}) self.CurrentAutoReward=1 else: - if np.shape(self.B_AnimalResponseHistory)[0]>=IgnoredN or np.shape(self.B_RewardedHistory[0])[0]>=UnrewardedN: + if np.shape(self.B_AnimalResponseHistory)[0]>=ignored or \ + np.shape(self.B_RewardedHistory[0])[0]>=unrewarded: # auto reward is considered as reward B_RewardedHistory=self.B_RewardedHistory.copy() Ind=range(len(self.B_RewardedHistory[0])) for i in range(len(self.B_RewardedHistory)): B_RewardedHistory[i]=np.logical_or(self.B_RewardedHistory[i],self.B_AutoWaterTrial[i][Ind]) - if np.all(self.B_AnimalResponseHistory[-IgnoredN:]==2) and np.shape(self.B_AnimalResponseHistory)[0]>=IgnoredN: + if np.all(self.B_AnimalResponseHistory[-ignored:]==2) and np.shape(self.B_AnimalResponseHistory)[0]>=ignored: self.CurrentAutoReward=1 - logging.warning('Auto water because ignored trials exceed: '+self.TP_Ignored, + logging.warning(f'Auto water because ignored trials exceed: {ignored}', extra={'tags': [self.win.warning_log_tag]}) - elif (np.all(B_RewardedHistory[0][-UnrewardedN:]==False) and np.all(B_RewardedHistory[1][-UnrewardedN:]==False) and np.shape(B_RewardedHistory[0])[0]>=UnrewardedN): + elif (np.all(B_RewardedHistory[0][-unrewarded:]==False) + and np.all(B_RewardedHistory[1][-unrewarded:]==False) + and np.shape(B_RewardedHistory[0])[0]>=unrewarded): self.CurrentAutoReward=1 - logging.warning('Auto water because unrewarded trials exceed: '+self.TP_Unrewarded, + logging.warning(f'Auto water because unrewarded trials exceed: {unrewarded}', extra={'tags': [self.win.warning_log_tag]}) else: self.CurrentAutoReward=0 @@ -1283,7 +1299,7 @@ def _GetLaserWaveForm(self): elif self.CLP_LaserStart=='Go cue' and self.CLP_LaserEnd=='Trial start': # The duration is inaccurate as it doesn't account for time outside of bonsai (can be solved in Bonsai) # the duration is determined by TP_ResponseTime, self.CLP_OffsetStart, self.CLP_OffsetEnd - self.CLP_CurrentDuration=float(self.TP_ResponseTime)-self.CLP_OffsetStart+self.CLP_OffsetEnd + self.CLP_CurrentDuration=self.task_logic.task_parameters.response_time-self.CLP_OffsetStart+self.CLP_OffsetEnd else: pass self.B_LaserDuration.append(self.CLP_CurrentDuration) @@ -1469,13 +1485,13 @@ def _InitiateATrial(self,Channel1,Channel4): # determine auto water if self.CurrentAutoReward==1: self.CurrentAutoRewardTrial=[0,0] - if self.TP_AutoWaterType=='Natural': + if self.task_logic.task_parameters.auto_water_type.value == 'Natural': for i in range(len(self.CurrentBait)): if self.CurrentBait[i]==True: self.CurrentAutoRewardTrial[i]=1 - if self.TP_AutoWaterType=='Both': + if self.task_logic.task_parameters.auto_water_type.value == 'Both': self.CurrentAutoRewardTrial=[1,1] - if self.TP_AutoWaterType=='High pro': + if self.task_logic.task_parameters.auto_water_type.value == 'High pro': if self.B_CurrentRewardProb[0]>self.B_CurrentRewardProb[1]: self.CurrentAutoRewardTrial=[1,0] elif self.B_CurrentRewardProb[0] Date: Thu, 16 Jan 2025 15:54:59 -0800 Subject: [PATCH 10/23] renaming variables --- src/foraging_gui/Foraging.py | 21 +++++++++++---------- src/foraging_gui/MyFunctions.py | 4 ++-- src/foraging_gui/Visualization.py | 20 ++++++++++---------- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 8be684245..73500aea8 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -1970,7 +1970,7 @@ def _set_parameters(self,key,widget_dict,parameters): def _Randomness(self): '''enable/disable some fields in the Block/Delay Period/ITI''' - if self.task_logic.task_parameters.Randomness == 'Exponential': + if self.task_logic.task_parameters.randomness == 'Exponential': self.label_14.setEnabled(True) self.label_18.setEnabled(True) self.label_39.setEnabled(True) @@ -1979,7 +1979,7 @@ def _Randomness(self): self.ITIBeta.setEnabled(True) # if self.Task.currentText()!='RewardN': # self.BlockBeta.setStyleSheet("color: black;border: 1px solid gray;background-color: white;") - elif self.task_logic.task_parameters.Randomness == 'Even': + elif self.task_logic.task_parameters.randomness == 'Even': self.label_14.setEnabled(False) self.label_18.setEnabled(False) self.label_39.setEnabled(False) @@ -1993,7 +1993,7 @@ def _Randomness(self): def _AdvancedBlockAuto(self): '''enable/disable some fields in the AdvancedBlockAuto''' - if self.task_logic.task_parameters.AdvancedBlockAuto== 'off': + if self.task_logic.task_parameters.advanced_block_auto == 'off': self.label_54.setEnabled(False) self.label_60.setEnabled(False) self.SwitchThr.setEnabled(False) @@ -2167,18 +2167,19 @@ def _CheckTextChange(self): def _CheckFormat(self,child): '''Check if the input format is correct''' + tp = self.task_logic.task_parameters if child.objectName()=='RewardFamily': # When we change the RewardFamily, sometimes the RewardPairsN is larger than available reward pairs in this family. try: - if self.task_logic.task_parameters.RewardPairsN>len(self.RewardFamilies[self.task_logic.task_parameters.RewardFamily - 1]): - self.RewardPairsN.setText(str(len(self.RewardFamilies[self.task_logic.task_parameters.RewardFamily - 1]))) + if tp.reward_pairs_n>len(self.RewardFamilies[tp.reward_family - 1]): + self.RewardPairsN.setText(str(len(self.RewardFamilies[tp.reward_family - 1]))) return 1 except Exception as e: logging.error(traceback.format_exc()) return 0 if child.objectName()=='RewardFamily' or child.objectName()=='RewardPairsN' or child.objectName()=='BaseRewardSum': try: - self.RewardPairs= self.RewardFamilies[self.task_logic.task_parameters.RewardFamily - 1][:self.task_logic.task_parameters.RewardPairsN] - if self.task_logic.task_parameters.RewardPairsN>len(self.RewardFamilies[self.task_logic.task_parameters.RewardFamily - 1]): + self.RewardPairs= self.RewardFamilies[tp.reward_family - 1][:tp.reward_pairs_n] + if tp.reward_pairs_n>len(self.RewardFamilies[tp.reward_family - 1]): return 0 else: return 1 @@ -2187,7 +2188,7 @@ def _CheckFormat(self,child): return 0 if child.objectName()=='UncoupledReward': try: - input_string = self.task_logic.task_parameters.UncoupledReward + input_string = tp.uncoupled_reward if input_string=='': # do not permit empty uncoupled reward return 0 # remove any square brackets and spaces from the string @@ -2370,7 +2371,7 @@ def _ShowRewardPairs(self): self.RewardProb=np.array(self.RewardPairs)/np.expand_dims(np.sum(self.RewardPairs,axis=1),axis=1)*\ tp.base_reward_sum elif self.behavior_session_model.experiment in ['Uncoupled Baiting','Uncoupled Without Baiting']: - input_string = self.task_logic.task_parameters.UncoupledReward + input_string = tp.uncoupled_reward # remove any square brackets and spaces from the string input_string = input_string.replace('[','').replace(']','').replace(',', ' ') # split the remaining string into a list of individual numbers @@ -3685,7 +3686,7 @@ def _StopPhotometry(self,closing=False): QMessageBox.Ok) def _AutoReward(self): - if self.task_logic.task_parameters.AutoReward: + if self.task_logic.task_parameters.auto_reward: self.AutoReward.setStyleSheet("background-color : green;") self.AutoReward.setText('Auto water On') for widget in ['AutoWaterType', 'Multiplier', 'Unrewarded', 'Ignored']: diff --git a/src/foraging_gui/MyFunctions.py b/src/foraging_gui/MyFunctions.py index 6789f9b3d..050ba1dca 100644 --- a/src/foraging_gui/MyFunctions.py +++ b/src/foraging_gui/MyFunctions.py @@ -1809,14 +1809,14 @@ def _set_valve_time_right(self,channel3,RightValue=0.01,Multiplier=1): def _GiveLeft(self,channel3): '''manually give left water''' - channel3.LeftValue1(float(self.win.LeftValue.text()) * 1000 * self.win.task_logic.task_parameters.Multiplier) + channel3.LeftValue1(float(self.win.LeftValue.text()) * 1000 * self.win.task_logic.task_parameters.multiplier) time.sleep(0.01) channel3.ManualWater_Left(int(1)) channel3.LeftValue1(float(self.win.LeftValue.text())*1000) def _GiveRight(self,channel3): '''manually give right water''' - channel3.RightValue1(float(self.win.RightValue.text()) * 1000 * self.win.task_logic.task_parameters.Multiplier) + channel3.RightValue1(float(self.win.RightValue.text()) * 1000 * self.win.task_logic.task_parameters.multiplier) time.sleep(0.01) channel3.ManualWater_Right(int(1)) channel3.RightValue1(float(self.win.RightValue.text())*1000) diff --git a/src/foraging_gui/Visualization.py b/src/foraging_gui/Visualization.py index 928392180..433fbcda6 100644 --- a/src/foraging_gui/Visualization.py +++ b/src/foraging_gui/Visualization.py @@ -522,23 +522,23 @@ def __init__(self,GeneratedTrials=None,dpi=100,width=5, height=4): FigureCanvas.__init__(self, self.fig) def _Update(self,win): # randomly draw a block length between Min and Max - SampleMethods=win.task_logic.task_parameters.Randomness + SampleMethods=win.task_logic.task_parameters.randomness # block length - Min=win.task_logic.task_parameters.BlockMin - Max=win.task_logic.task_parameters.BlockMax - Beta=win.task_logic.task_parameters.BlockBeta + Min=win.task_logic.task_parameters.block_min + Max=win.task_logic.task_parameters.block_max + Beta=win.task_logic.task_parameters.block_beta DataType='int' SampledBlockLen=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) # ITI - Min=win.task_logic.task_parameters.ITIMin - Max=win.task_logic.task_parameters.ITIMax - Beta=win.task_logic.task_parameters.ITIBeta + Min=win.task_logic.task_parameters.iti_min + Max=win.task_logic.task_parameters.iti_max + Beta=win.task_logic.task_parameters.iti_beta DataType='float' SampledITI=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) # Delay - Min=win.task_logic.task_parameters.DelayMin - Max=win.task_logic.task_parameters.DelayMax - Beta=win.task_logic.task_parameters.DelayBeta + Min=win.task_logic.task_parameters.delay_min + Max=win.task_logic.task_parameters.delay_max + Beta=win.task_logic.task_parameters.delay_beta DataType='float' SampledDelay=self._Sample(Min=Min,Max=Max,SampleMethods=SampleMethods,Beta=Beta,DataType=DataType) self.ax1.cla() From b460da749ae78e7a32c393d2471c2f0e9d9a67de Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 16 Jan 2025 15:57:00 -0800 Subject: [PATCH 11/23] adding in task logic argument --- src/foraging_gui/Foraging.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 73500aea8..13bacd07e 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -3396,7 +3396,7 @@ def _LoadVisualization(self): '''To visulize the training when loading a session''' self.ToInitializeVisual=1 Obj=self.Obj - self.GeneratedTrials=GenerateTrials(self) + self.GeneratedTrials=GenerateTrials(self, self.task_logic) # Iterate over all attributes of the GeneratedTrials object for attr_name in dir(self.GeneratedTrials): if attr_name in Obj.keys(): @@ -4240,7 +4240,7 @@ def _Start(self): self.Camera_dialog.StartRecording.setChecked(True) self.SessionStartTime=datetime.now() self.Other_SessionStartTime=str(self.SessionStartTime) # for saving - GeneratedTrials=GenerateTrials(self) + GeneratedTrials=GenerateTrials(self, self.task_logic) self.GeneratedTrials=GeneratedTrials self.StartANewSession=0 PlotM=PlotV(win=self,GeneratedTrials=GeneratedTrials,width=5, height=4) From 73c1c9e8d6ae565c4b712ae1ab51880dac84d6f5 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 17 Jan 2025 07:37:28 -0800 Subject: [PATCH 12/23] fixing imports --- src/foraging_gui/Foraging.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 13bacd07e..37c393976 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -9,30 +9,27 @@ import logging from hashlib import md5 -#import logging_loki +import logging_loki import socket import harp import threading -from random import randint import yaml import copy import shutil from pathlib import Path from datetime import date, datetime, timezone, timedelta -import csv from aind_slims_api import SlimsClient from aind_slims_api import models import serial import numpy as np import pandas as pd -#from pykeepass import PyKeePass +from pykeepass import PyKeePass from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from scipy.io import savemat, loadmat -from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QSizePolicy, QLineEdit, QComboBox, QPushButton, QDoubleSpinBox +from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QSizePolicy from PyQt5.QtWidgets import QFileDialog,QVBoxLayout, QGridLayout, QLabel from PyQt5 import QtWidgets,QtGui,QtCore, uic from PyQt5.QtCore import QThreadPool,Qt,QThread -from PyQt5.QtGui import QIntValidator, QDoubleValidator from pyOSC3.OSC3 import OSCStreamingClient import webbrowser from pydantic import ValidationError From 156d40ecbc91bb660fd1f2b5cc1c3d5055be5f76 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 17 Jan 2025 07:54:37 -0800 Subject: [PATCH 13/23] better variable calling --- src/foraging_gui/MyFunctions.py | 35 +++++++++++++-------------------- 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/src/foraging_gui/MyFunctions.py b/src/foraging_gui/MyFunctions.py index 050ba1dca..09693ec6e 100644 --- a/src/foraging_gui/MyFunctions.py +++ b/src/foraging_gui/MyFunctions.py @@ -397,26 +397,19 @@ def _generate_next_coupled_block(self): def _generate_next_trial_other_paras(self): # get the ITI time and delay time - randomness = self.task_logic.task_parameters.randomness - iti_min = self.task_logic.task_parameters.iti_min - iti_max = self.task_logic.task_parameters.iti_max - iti_beta = self.task_logic.task_parameters.iti_beta - delay_beta = self.task_logic.task_parameters.delay_beta - delay_min = self.task_logic.task_parameters.delay_min - delay_max = self.task_logic.task_parameters.delay_max - response_time = self.task_logic.task_parameters.response_time - if randomness=='Exponential': - self.CurrentITI = float(np.random.exponential(iti_beta,1)+iti_min) - elif randomness=='Even': - self.CurrentITI = random.uniform(iti_min,iti_max) - if self.CurrentITI>iti_max: - self.CurrentITI=iti_max - if randomness=='Exponential': - self.CurrentDelay = float(np.random.exponential(delay_beta,1)+delay_min) - elif randomness=='Even': - self.CurrentDelay=random.uniform(delay_min, delay_max) - if self.CurrentDelay>delay_max: - self.CurrentDelay=delay_max + tp = self.task_logic.task_parameters + if tp.randomness=='Exponential': + self.CurrentITI = float(np.random.exponential(tp.iti_beta,1)+tp.iti_min) + elif tp.randomness=='Even': + self.CurrentITI = random.uniform(tp.iti_min,tp.iti_max) + if self.CurrentITI>tp.iti_max: + self.CurrentITI=tp.iti_max + if tp.randomness=='Exponential': + self.CurrentDelay = float(np.random.exponential(tp.delay_beta,1)+tp.delay_min) + elif tp.randomness=='Even': + self.CurrentDelay=random.uniform(tp.delay_min, tp.delay_max) + if self.CurrentDelay>tp.delay_max: + self.CurrentDelay=tp.delay_max # extremely important. Currently, the shaders timer does not allow delay close to zero. if self.CurrentITI<0.05: self.CurrentITI=0.05 @@ -424,7 +417,7 @@ def _generate_next_trial_other_paras(self): self.CurrentDelay=0.05 self.B_ITIHistory.append(self.CurrentITI) self.B_DelayHistory.append(self.CurrentDelay) - self.B_ResponseTimeHistory.append(response_time) + self.B_ResponseTimeHistory.append(tp.response_time) def _check_coupled_block_transition(self): '''Check if we should perform a block change for the next trial. From e0ee5f82f93d9032254a85959e27afa905b9abe6 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 17 Jan 2025 08:00:52 -0800 Subject: [PATCH 14/23] fixed task logic reference --- src/foraging_gui/MyFunctions.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/foraging_gui/MyFunctions.py b/src/foraging_gui/MyFunctions.py index 09693ec6e..607111606 100644 --- a/src/foraging_gui/MyFunctions.py +++ b/src/foraging_gui/MyFunctions.py @@ -398,23 +398,23 @@ def _generate_next_coupled_block(self): def _generate_next_trial_other_paras(self): # get the ITI time and delay time tp = self.task_logic.task_parameters - if tp.randomness=='Exponential': + if tp.randomness == 'Exponential': self.CurrentITI = float(np.random.exponential(tp.iti_beta,1)+tp.iti_min) - elif tp.randomness=='Even': + elif tp.randomness == 'Even': self.CurrentITI = random.uniform(tp.iti_min,tp.iti_max) - if self.CurrentITI>tp.iti_max: - self.CurrentITI=tp.iti_max - if tp.randomness=='Exponential': + if self.CurrentITI > tp.iti_max: + self.CurrentITI = tp.iti_max + if tp.randomness == 'Exponential': self.CurrentDelay = float(np.random.exponential(tp.delay_beta,1)+tp.delay_min) - elif tp.randomness=='Even': - self.CurrentDelay=random.uniform(tp.delay_min, tp.delay_max) - if self.CurrentDelay>tp.delay_max: - self.CurrentDelay=tp.delay_max - # extremely important. Currently, the shaders timer does not allow delay close to zero. - if self.CurrentITI<0.05: - self.CurrentITI=0.05 - if self.CurrentDelay<0.05: - self.CurrentDelay=0.05 + elif tp.randomness == 'Even': + self.CurrentDelay = random.uniform(tp.delay_min, tp.delay_max) + if self.CurrentDelay > tp.delay_max: + self.CurrentDelay = tp.delay_max + # extremely important. Currently, the shaders timer does not allow delay close to zero. + if self.CurrentITI < 0.05: + self.CurrentITI = 0.05 + if self.CurrentDelay < 0.05: + self.CurrentDelay = 0.05 self.B_ITIHistory.append(self.CurrentITI) self.B_DelayHistory.append(self.CurrentDelay) self.B_ResponseTimeHistory.append(tp.response_time) @@ -1802,14 +1802,14 @@ def _set_valve_time_right(self,channel3,RightValue=0.01,Multiplier=1): def _GiveLeft(self,channel3): '''manually give left water''' - channel3.LeftValue1(float(self.win.LeftValue.text()) * 1000 * self.win.task_logic.task_parameters.multiplier) + channel3.LeftValue1(float(self.win.LeftValue.text()) * 1000 * self.task_logic.task_parameters.multiplier) time.sleep(0.01) channel3.ManualWater_Left(int(1)) channel3.LeftValue1(float(self.win.LeftValue.text())*1000) def _GiveRight(self,channel3): '''manually give right water''' - channel3.RightValue1(float(self.win.RightValue.text()) * 1000 * self.win.task_logic.task_parameters.multiplier) + channel3.RightValue1(float(self.win.RightValue.text()) * 1000 * self.task_logic.task_parameters.multiplier) time.sleep(0.01) channel3.ManualWater_Right(int(1)) channel3.RightValue1(float(self.win.RightValue.text())*1000) From 63e07269a9e2db102ae86add468f53f85e620499 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 17 Jan 2025 09:22:18 -0800 Subject: [PATCH 15/23] fixing ignores --- src/foraging_gui/MyFunctions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/foraging_gui/MyFunctions.py b/src/foraging_gui/MyFunctions.py index 646be6851..15534543b 100644 --- a/src/foraging_gui/MyFunctions.py +++ b/src/foraging_gui/MyFunctions.py @@ -1161,7 +1161,7 @@ def _ShowInformation(self): def _CheckStop(self): '''Stop if there are many ingoral trials or if the maximam trial is exceeded MaxTrial''' - stop_ignore=self.task_logic.task_parameters.stop_ignore + stop_ignore=self.task_logic.task_parameters.stop_ignores max_trial=self.task_logic.task_parameters.max_trial-2 # trial number starts from 0 max_time=self.task_logic.task_parameters.max_time*60 # convert minutes to seconds if hasattr(self, 'BS_CurrentRunningTime'): From f1a6ce79b43e2be3335d7723a9c5952c5c85baea Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 17 Jan 2025 14:16:01 -0800 Subject: [PATCH 16/23] updating ignore stopping parameters --- src/foraging_gui/Foraging.py | 14 ++++++++------ src/foraging_gui/MyFunctions.py | 16 ++++++++-------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index ff28cb7df..fff21c9c7 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -291,7 +291,8 @@ def initialize_task_parameters(self) -> AindDynamicForagingTaskParameters: # Response time response_time=self.ResponseTime.value(), reward_consume_time=self.RewardConsumeTime.value(), - stop_ignores=round(self.auto_stop_ignore_win.value()*self.auto_stop_ignore_ratio_threshold.value()), + auto_stop_ignore_window=self.auto_stop_ignore_win.value(), + auto_stop_ignore_ratio_threshold=self.auto_stop_ignore_ratio_threshold.value(), # Auto block advanced_block_auto=AdvancedBlockMode(self.AdvancedBlockAuto.currentText()), switch_thr=self.SwitchThr.value(), @@ -345,11 +346,12 @@ def connect_task_parameters(self) -> None: self.ResponseTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'response_time', v)) self.RewardConsumeTime.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'reward_consume_time', v)) - self.auto_stop_ignore_win.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'stop_ignores', - round(v * self.auto_stop_ignore_ratio_threshold.value()))) - self.auto_stop_ignore_ratio_threshold.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, - 'stop_ignores', - round(self.auto_stop_ignore_win.value() * v))) + self.auto_stop_ignore_win.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, + 'auto_stop_ignore_window', v)) + self.auto_stop_ignore_ratio_threshold.valueChanged.connect(lambda v: + setattr(self.task_logic.task_parameters, + 'auto_stop_ignore_ratio_threshold', v)) self.AdvancedBlockAuto.currentTextChanged.connect( lambda text: setattr(self.task_logic.task_parameters, 'advanced_block_auto', AdvancedBlockMode(text))) self.SwitchThr.valueChanged.connect(lambda v:setattr(self.task_logic.task_parameters, 'switch_thr', v)) diff --git a/src/foraging_gui/MyFunctions.py b/src/foraging_gui/MyFunctions.py index 15534543b..835354afc 100644 --- a/src/foraging_gui/MyFunctions.py +++ b/src/foraging_gui/MyFunctions.py @@ -1161,9 +1161,10 @@ def _ShowInformation(self): def _CheckStop(self): '''Stop if there are many ingoral trials or if the maximam trial is exceeded MaxTrial''' - stop_ignore=self.task_logic.task_parameters.stop_ignores - max_trial=self.task_logic.task_parameters.max_trial-2 # trial number starts from 0 - max_time=self.task_logic.task_parameters.max_time*60 # convert minutes to seconds + tp = self.task_logic.task_parameters + stop_ignore=round(tp.auto_stop_ignore_win*tp.auto_stop_ignore_ratio_threshold) + max_trial=tp.max_trial-2 # trial number starts from 0 + max_time=tp.max_time*60 # convert minutes to seconds if hasattr(self, 'BS_CurrentRunningTime'): pass else: @@ -1176,16 +1177,15 @@ def _CheckStop(self): # Check for reasons to stop early auto_rewards = np.array([any(x) for x in np.column_stack(self.B_AutoWaterTrial.astype(bool))]) non_auto_reward = self.B_AnimalResponseHistory[np.where(~auto_rewards.astype(bool))] # isolate non-auto-reward - win_sz = int(self.TP_auto_stop_ignore_win) min_time = int(self.TP_min_time) - if self.BS_CurrentRunningTime/60 >= min_time and len(np.where(non_auto_reward[-win_sz:] == 2)[0]) >= stop_ignore: + if self.BS_CurrentRunningTime/60 >= min_time and len(np.where(non_auto_reward[-tp.auto_stop_ignore_win:] == 2)[0]) >= stop_ignore: stop=True - threshold = float(self.TP_auto_stop_ignore_ratio_threshold)*100 + threshold = tp.auto_stop_ignore_ratio_threshold*100 msg = f'Stopping the session because the mouse has ignored at least ' \ - f'{threshold}% of {self.TP_auto_stop_ignore_win} ' \ + f'{threshold}% of {tp.auto_stop_ignore_win} ' \ f'consecutive trials' warning_label_text = 'Stop because ignore trials exceed or equal: '+\ - f'{threshold}% of {self.TP_auto_stop_ignore_win}' + f'{threshold}% of {tp.auto_stop_ignore_win}' elif self.B_CurrentTrialN>max_trial: stop=True msg = 'Stopping the session because the mouse has reached the maximum trial count: {}'.format(max_trial) From cecd667d8a8dec2cc0b07562ee6ebc2ea36bbd6c Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 24 Jan 2025 07:46:52 -0800 Subject: [PATCH 17/23] loosening package requirements --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index cf0da7f1b..2a7eb87dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,7 @@ dependencies = [ "aind-slims-api@git+https://github.com/AllenNeuralDynamics/aind-slims-api@main", "aind-dynamic-foraging-models@git+https://github.com/AllenNeuralDynamics/aind-dynamic-foraging-models@main", "aind-behavior-dynamic-foraging@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.DynamicForaging@main", - "aind-behavior-services >=0.8, <0.9", + "aind-behavior-services", "pynwb >=2, <3", "requests >=2, <3", "harp-python >=0.3, <2", From 25a5a4630176219aed77cdcc7c33ef9fc9f51cae Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 24 Jan 2025 08:27:16 -0800 Subject: [PATCH 18/23] different dynamic foraging branch --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2a7eb87dc..7e1b372ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "aind-auto-train@git+https://github.com/AllenNeuralDynamics/aind-foraging-behavior-bonsai-automatic-training.git@main", "aind-slims-api@git+https://github.com/AllenNeuralDynamics/aind-slims-api@main", "aind-dynamic-foraging-models@git+https://github.com/AllenNeuralDynamics/aind-dynamic-foraging-models@main", - "aind-behavior-dynamic-foraging@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.DynamicForaging@main", + "aind-behavior-dynamic-foraging@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.DynamicForaging@test-remove-curriculum", "aind-behavior-services", "pynwb >=2, <3", "requests >=2, <3", From 7ecc112c129353a46829ab8d98d40a8f62c96755 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 24 Jan 2025 08:36:41 -0800 Subject: [PATCH 19/23] use main --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7e1b372ec..2a7eb87dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "aind-auto-train@git+https://github.com/AllenNeuralDynamics/aind-foraging-behavior-bonsai-automatic-training.git@main", "aind-slims-api@git+https://github.com/AllenNeuralDynamics/aind-slims-api@main", "aind-dynamic-foraging-models@git+https://github.com/AllenNeuralDynamics/aind-dynamic-foraging-models@main", - "aind-behavior-dynamic-foraging@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.DynamicForaging@test-remove-curriculum", + "aind-behavior-dynamic-foraging@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.DynamicForaging@main", "aind-behavior-services", "pynwb >=2, <3", "requests >=2, <3", From 6ceb5fbc98be436c82ed17feb76d5d1595564a6b Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 24 Jan 2025 08:42:13 -0800 Subject: [PATCH 20/23] fixing auto_stop_ignore_win --- src/foraging_gui/Foraging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index fff21c9c7..9339e31f7 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -291,7 +291,7 @@ def initialize_task_parameters(self) -> AindDynamicForagingTaskParameters: # Response time response_time=self.ResponseTime.value(), reward_consume_time=self.RewardConsumeTime.value(), - auto_stop_ignore_window=self.auto_stop_ignore_win.value(), + auto_stop_ignore_win=self.auto_stop_ignore_win.value(), auto_stop_ignore_ratio_threshold=self.auto_stop_ignore_ratio_threshold.value(), # Auto block advanced_block_auto=AdvancedBlockMode(self.AdvancedBlockAuto.currentText()), From fdb9e8316859a9af8bde8293e4e01e19a8c8ecb2 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 24 Jan 2025 08:43:49 -0800 Subject: [PATCH 21/23] fixing setting of auto_stop_win --- src/foraging_gui/Foraging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 9339e31f7..a1b67d031 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -348,7 +348,7 @@ def connect_task_parameters(self) -> None: setattr(self.task_logic.task_parameters, 'reward_consume_time', v)) self.auto_stop_ignore_win.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, - 'auto_stop_ignore_window', v)) + 'auto_stop_ignore_win', v)) self.auto_stop_ignore_ratio_threshold.valueChanged.connect(lambda v: setattr(self.task_logic.task_parameters, 'auto_stop_ignore_ratio_threshold', v)) From 893f61af133001f51b7032f6b98bd00a1ad3bdb0 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 24 Jan 2025 08:49:22 -0800 Subject: [PATCH 22/23] fix typo --- src/foraging_gui/Foraging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index a1b67d031..9e0028d98 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -2361,7 +2361,7 @@ def _Task(self): self.label_26.setGeometry(QtCore.QRect(460, 128, 146, 16)) # set block length to be 1 self.BlockMin.setValue(1) - self.BlockMax.setVlue(1) + self.BlockMax.setValue(1) def _ShowRewardPairs(self): '''Show reward pairs''' From d09f84ba5beb29ff629ca7adc66206c5ce73ecac Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 24 Jan 2025 10:00:52 -0800 Subject: [PATCH 23/23] pin aind-behavior-service version --- pyproject.toml | 2 +- src/foraging_gui/Foraging.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2a7eb87dc..ae6477f33 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,7 @@ dependencies = [ "aind-slims-api@git+https://github.com/AllenNeuralDynamics/aind-slims-api@main", "aind-dynamic-foraging-models@git+https://github.com/AllenNeuralDynamics/aind-dynamic-foraging-models@main", "aind-behavior-dynamic-foraging@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.DynamicForaging@main", - "aind-behavior-services", + "aind-behavior-services==0.9", "pynwb >=2, <3", "requests >=2, <3", "harp-python >=0.3, <2", diff --git a/src/foraging_gui/Foraging.py b/src/foraging_gui/Foraging.py index 9e0028d98..33ce0f8ab 100644 --- a/src/foraging_gui/Foraging.py +++ b/src/foraging_gui/Foraging.py @@ -2097,7 +2097,6 @@ def keyPressEvent(self, event=None,allow_reset=False): new = float(child.text()) if new != old: logging.info('Changing parameter: {}, {} -> {}'.format(child.objectName(), old,new)) - # update the current training parameters self._GetTrainingParameters()