From 0c5e8eed12544fc7628f0f89fdab7fb160fe5bb7 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 26 Jan 2026 12:44:42 -0800 Subject: [PATCH 01/53] deleted temp markdowns --- step4_data_flow_analysis.md | 348 --------------- timestamp_correction_analysis.md | 723 ------------------------------- 2 files changed, 1071 deletions(-) delete mode 100644 step4_data_flow_analysis.md delete mode 100644 timestamp_correction_analysis.md diff --git a/step4_data_flow_analysis.md b/step4_data_flow_analysis.md deleted file mode 100644 index d86e938..0000000 --- a/step4_data_flow_analysis.md +++ /dev/null @@ -1,348 +0,0 @@ -# Step 4 (preprocess.py) Data Flow Analysis - -## Overview - -Step 4 processes timestamp-corrected photometry data and computes normalized signals (ΔF/F and z-scores). It handles artifact removal, data combination from multiple sessions, and generates quality control visualizations. - -## High-Level Data Flow - -```mermaid -flowchart TD - A[Entry: extractTsAndSignal] --> B{combine_data?} - - B -->|False| C[execute_timestamp_correction] - B -->|True| D[execute_timestamp_correction] - - C --> E[execute_zscore] - - D --> F[check_storeslistfile] - F --> G[combineData] - G --> H[execute_zscore] - - E --> I[Output: z_score, dff, cntrl_sig_fit HDF5 files] - H --> I - - style A fill:#e1f5ff - style I fill:#d4edda -``` - -## Main Processing Paths - -### Entry Point -**`extractTsAndSignal(inputParameters)`** (line 1178) is the main entry point called by the GUI or API. - -### Path 1: Normal Processing (combine_data = False) -1. `execute_timestamp_correction()` → Correct timestamps and align data -2. `execute_zscore()` → Compute z-scores and ΔF/F - -### Path 2: Combined Data Processing (combine_data = True) -1. `execute_timestamp_correction()` → Correct timestamps for each file -2. `check_storeslistfile()` → Merge store lists from multiple files -3. `combineData()` → Combine data from multiple recording sessions -4. `execute_zscore()` → Compute z-scores and ΔF/F on combined data - -## Detailed Processing Stages - -### Stage 1: Timestamp Correction - -```mermaid -flowchart LR - A[Raw HDF5 files] --> B[Read storesList.csv] - B --> C{isosbestic_control?} - C -->|No| D[add_control_channel] - C -->|Yes| E[timestampCorrection_tdt/csv] - D --> E - E --> F[Eliminate first N seconds] - F --> G[decide_naming_convention_and_applyCorrection] - G --> H[applyCorrection for each store] - H --> I{isosbestic_control?} - I -->|No| J[create_control_channel via curve fitting] - I -->|Yes| K[timeCorrection_*.hdf5 files] - J --> K - - style A fill:#e1f5ff - style K fill:#d4edda -``` - -#### Function: `execute_timestamp_correction(folderNames, inputParameters)` - -**Input:** -- Raw HDF5 files from extractors: `control_*.hdf5`, `signal_*.hdf5`, `event_*.hdf5` - -**Process:** -1. For each session folder: - - Read `storesList.csv` (mapping of raw names to semantic names) - - If no isosbestic control: `add_control_channel()` creates placeholder control files - - **`timestampCorrection_tdt()`** or **`timestampCorrection_csv()`**: - - Eliminates first N seconds (`timeForLightsTurnOn`) - - For TDT: expands timestamps from block timestamps + sampling rate - - For CSV: uses timestamps as-is - - Writes `timeCorrection_*.hdf5` with keys: `timestampNew`, `correctionIndex`, `sampling_rate` - - **`decide_naming_convention_and_applyCorrection()`**: - - For each store, calls `applyCorrection()` to crop data using `correctionIndex` - - For control/signal channels: crops data arrays - - For event channels: subtracts time offset from timestamps - - If no isosbestic control: **`create_control_channel()`** generates synthetic control via curve fitting - -**Output:** -- Timestamp-corrected HDF5 files with trimmed data -- `timeCorrection_*.hdf5` files containing corrected timestamps - -### Stage 2: Z-Score Computation - -```mermaid -flowchart TD - A[Timestamp-corrected HDF5] --> B[compute_z_score] - B --> C{removeArtifacts?} - - C -->|No| D[helper_z_score: full data] - C -->|Yes| E[helper_z_score: chunk-by-chunk] - - D --> F[filterSignal] - E --> F - - F --> G[controlFit: linear regression] - G --> H[deltaFF: compute ΔF/F] - H --> I[z_score_computation] - - I --> J{removeArtifacts?} - - J -->|No| K[Write z_score, dff, cntrl_sig_fit] - J -->|Yes| L{artifactsRemovalMethod?} - - L -->|concatenate| M[processTimestampsForArtifacts] - L -->|NaN| N[addingNaNtoChunksWithArtifacts] - - M --> K - N --> K - - K --> O[visualizeControlAndSignal] - - style A fill:#e1f5ff - style K fill:#d4edda - style O fill:#fff3cd -``` - -#### Function: `execute_zscore(folderNames, inputParameters)` - -**Input:** -- Timestamp-corrected HDF5 files - -**Process:** -1. For each output folder: - - **`compute_z_score(filepath, inputParameters)`**: - - For each control/signal pair: - - **`helper_z_score(control, signal, filepath, name, inputParameters)`**: - - **Without artifacts removal:** - - `execute_controlFit_dff()`: Filter signals → fit control to signal → compute ΔF/F - - `z_score_computation()`: Compute z-score from ΔF/F - - **With artifacts removal:** - - For each user-selected chunk (from `coordsForPreProcessing_*.npy`): - - If no isosbestic: `helper_create_control_channel()` creates synthetic control - - `execute_controlFit_dff()` on chunk - - Concatenate or NaN-fill between chunks - - `z_score_computation()` on processed data - - - Writes: `z_score_*.hdf5`, `dff_*.hdf5`, `cntrl_sig_fit_*.hdf5` - - **If artifacts removal with concatenate method:** - - **`processTimestampsForArtifacts()`**: - - `eliminateData()`: Concatenates good chunks, adjusts timestamps to be continuous - - `eliminateTs()`: Aligns event timestamps with new timeline - - Overwrites data files with concatenated versions - - **If artifacts removal with NaN method:** - - **`addingNaNtoChunksWithArtifacts()`**: - - `addingNaNValues()`: Replaces bad chunks with NaN - - `removeTTLs()`: Filters event timestamps to keep only valid times - - - **`visualizeControlAndSignal()`**: Plots control, signal, cntrl_sig_fit for QC - -**Output:** -- `z_score_*.hdf5` (z-scored signal) -- `dff_*.hdf5` (ΔF/F) -- `cntrl_sig_fit_*.hdf5` (fitted control channel) - -## Key Data Transformations - -### Signal Processing Pipeline - -```mermaid -flowchart LR - A[Raw Signal] --> B[filterSignal: Moving Average] - C[Raw Control] --> D[filterSignal: Moving Average] - - B --> E[controlFit: Linear Regression] - D --> E - - E --> F[control_fit = p0*control + p1] - F --> G[deltaFF] - - B --> G - - G --> H[ΔF/F = signal - control_fit / control_fit * 100] - H --> I[z_score_computation] - - I --> J{zscore_method?} - J -->|standard| K[z = ΔF/F - mean / std] - J -->|baseline| L[z = ΔF/F - baseline_mean / baseline_std] - J -->|robust| M[z = 0.6745 * ΔF/F - median / MAD] - - K --> N[Z-Score Output] - L --> N - M --> N - - style A fill:#e1f5ff - style C fill:#e1f5ff - style N fill:#d4edda -``` - -### Transformation Functions - -1. **`filterSignal(filter_window, signal)`** (line 822) - - Applies moving average filter with configurable window - - Uses `scipy.signal.filtfilt` for zero-phase filtering - -2. **`controlFit(control, signal)`** (line 815) - - Linear regression: fits control to signal - - Returns: `fitted_control = p[0] * control + p[1]` - -3. **`deltaFF(signal, control)`** (line 804) - - Formula: `((signal - control) / control) * 100` - - Computes normalized fluorescence change - -4. **`z_score_computation(dff, timestamps, inputParameters)`** (line 853) - - **Standard z-score:** `(ΔF/F - mean(ΔF/F)) / std(ΔF/F)` - - **Baseline z-score:** `(ΔF/F - mean(baseline)) / std(baseline)` - - **Robust z-score:** `0.6745 * (ΔF/F - median) / MAD` - -## Artifact Removal Workflow - -### Interactive Artifact Selection - -The `visualize()` function (line 469) provides an interactive matplotlib plot: -- **Space key:** Mark artifact boundary (vertical line drawn) -- **'d' key:** Delete last marked boundary -- **Close plot:** Save coordinates to `coordsForPreProcessing_*.npy` - -### Two Removal Methods - -**Concatenate Method:** -- Removes artifact chunks completely -- Concatenates good chunks end-to-end -- Adjusts timestamps to be continuous -- Event timestamps realigned to new timeline - -**NaN Method:** -- Replaces artifact chunks with NaN values -- Preserves original timeline -- Filters out event timestamps in artifact regions - -## Supporting Functions - -### Control Channel Creation - -**`helper_create_control_channel(signal, timestamps, window)`** (line 69) -- Used when no isosbestic control is available -- Applies Savitzky-Golay filter to signal -- Fits to exponential function: `f(x) = a + b * exp(-(1/c) * x)` -- Returns synthetic control channel - -### Data Combination - -**`combineData(folderNames, inputParameters, storesList)`** (line 1084) -- Merges data from multiple recording sessions -- Validates that sampling rates match across sessions -- Calls `processTimestampsForCombiningData()` to align timelines -- Saves combined data to first output folder - -### Coordinate Fetching - -**`fetchCoords(filepath, naming, data)`** (line 610) -- Reads `coordsForPreProcessing_*.npy` (artifact boundary coordinates) -- If file doesn't exist: uses `[0, data[-1]]` (entire recording) -- Validates even number of coordinates (pairs of boundaries) -- Returns reshaped array of coordinate pairs - -## File I/O Summary - -### Files Read - -| File Pattern | Content | Source | -|-------------|---------|--------| -| `control_*.hdf5` | Control channel data | Extractors (Step 3) | -| `signal_*.hdf5` | Signal channel data | Extractors (Step 3) | -| `event_*.hdf5` | Event timestamps | Extractors (Step 3) | -| `storesList.csv` | Channel name mapping | Step 2 | -| `coordsForPreProcessing_*.npy` | Artifact boundaries | User selection (optional) | - -### Files Written - -| File Pattern | Content | Keys | -|-------------|---------|------| -| `timeCorrection_*.hdf5` | Corrected timestamps | `timestampNew`, `correctionIndex`, `sampling_rate`, `timeRecStart` (TDT only) | -| `z_score_*.hdf5` | Z-scored signal | `data` | -| `dff_*.hdf5` | ΔF/F signal | `data` | -| `cntrl_sig_fit_*.hdf5` | Fitted control | `data` | -| `event_*_*.hdf5` | Corrected event timestamps | `ts` | - -## Key Parameters from inputParameters - -| Parameter | Purpose | Default/Options | -|-----------|---------|-----------------| -| `timeForLightsTurnOn` | Seconds to eliminate from start | 1 | -| `filter_window` | Moving average window size | 100 | -| `isosbestic_control` | Use isosbestic control channel? | True/False | -| `removeArtifacts` | Enable artifact removal? | True/False | -| `artifactsRemovalMethod` | How to handle artifacts | "concatenate" / "NaN" | -| `zscore_method` | Z-score computation method | "standard z-score" / "baseline z-score" / "robust z-score" | -| `baselineWindowStart` | Baseline window start (seconds) | 0 | -| `baselineWindowEnd` | Baseline window end (seconds) | 0 | -| `combine_data` | Combine multiple recordings? | True/False | - -## Architecture Notes for Refactoring - -### Current Coupling Issues - -1. **GUI Progress Tracking:** `writeToFile()` writes to `~/pbSteps.txt` for progress bar updates (lines 36-38, 1042, 1171, 1203, 1208, 1220) -2. **Interactive Plotting:** `visualize()` requires user interaction (matplotlib event handlers) -3. **File Path Assumptions:** Hard-coded path patterns (`*_output_*`, naming conventions) -4. **Mixed Responsibilities:** Single functions handle both computation and I/O - -### Recommended Separation Points - -**Backend Analysis Layer Should Include:** -- `filterSignal()` - pure signal processing -- `controlFit()` - pure regression -- `deltaFF()` - pure computation -- `z_score_computation()` - pure statistical computation -- `helper_create_control_channel()` - algorithmic control generation -- Core timestamp correction logic (separated from I/O) -- Core artifact removal logic (separated from I/O) - -**Data I/O Layer Should Include:** -- `read_hdf5()`, `write_hdf5()` - file operations -- Store list reading/writing -- Coordinate file handling -- HDF5 file discovery and path management - -**Frontend Visualization Layer Should Include:** -- `visualize()` - interactive artifact selection -- `visualizeControlAndSignal()` - QC plots -- `visualize_z_score()`, `visualize_dff()` - result visualization -- Progress tracking callbacks (replace `writeToFile()`) - -### Potential Refactoring Strategy - -1. **Extract pure computation functions** into a `signal_processing` module -2. **Create data models** (dataclasses) for: - - TimeCorrectionResult - - ProcessedSignal (with z_score, dff, control_fit) - - ArtifactRegions -3. **Separate I/O operations** into `io_utils` module with consistent interfaces -4. **Create processing pipelines** that accept data objects, return data objects -5. **Move visualization to separate module** with callbacks for progress/interaction -6. **Use dependency injection** for progress callbacks instead of hard-coded file writes diff --git a/timestamp_correction_analysis.md b/timestamp_correction_analysis.md deleted file mode 100644 index 121aa3f..0000000 --- a/timestamp_correction_analysis.md +++ /dev/null @@ -1,723 +0,0 @@ -# Timestamp Correction Module Analysis - -## Overview - -The `timestamp_correction.py` module handles the correction of timestamps for photometry data, including: -- Eliminating the first N seconds of recording (light stabilization period) -- Expanding TDT block timestamps into continuous timestamps -- Creating synthetic control channels when no isosbestic control is present -- Applying corrections to both data channels and event markers - -## Module Structure - -### Entry Point from preprocess.py - -```python -execute_timestamp_correction(folderNames, inputParameters) # preprocess.py:212 -``` - -This orchestrator loops through all session folders and calls functions in this module. - -## Two-Phase Control Channel Creation Pattern - -### Understanding add_control_channel vs create_control_channel - -These two functions work together in a **two-phase process** to handle synthetic control channel generation. They are **not redundant** but serve distinct purposes: - -#### Phase 1: `add_control_channel` (Called BEFORE timestamp correction) - -**Execution:** Line 229 in `execute_timestamp_correction` - -**Purpose:** Create **PLACEHOLDER** control files to satisfy workflow requirements - -**What it does:** -1. Validates that if `isosbestic_control=False`, no real control channels exist -2. For each signal channel without a matching control: - - Copies the raw signal HDF5 file to `cntrl{i}.hdf5` (placeholder) - - Adds entry to storesList: `[["cntrl{i}"], ["control_{region}"]]` -3. Saves updated `storesList.csv` - -**Files created:** -- `cntrl0.hdf5`, `cntrl1.hdf5`, etc. (copies of **RAW** signal data) -- Updated `storesList.csv` with placeholder entries - -**Why it's needed:** -- Timestamp correction workflow expects **paired** control/signal channels in storesList -- Without placeholders, the pairing logic in `timestampCorrection_xxx` and `check_cntrl_sig_length` would fail -- The placeholder **data is never actually used** - it just satisfies structural requirements - -#### Phase 2: `create_control_channel` (Called AFTER timestamp correction) - -**Execution:** Line 243 in `execute_timestamp_correction` - -**Purpose:** Generate **ACTUAL** synthetic control via curve fitting and overwrite placeholders - -**What it does:** -1. Looks for placeholder files (checks: `"control" in event_name.lower() and "cntrl" in event.lower()`) -2. Reads the **CORRECTED** signal data: `signal_{region}.hdf5` (after timestamp correction) -3. Calls `helper_create_control_channel()` to: - - Apply Savitzky-Golay filter to cleaned signal - - Fit to exponential function: `f(x) = a + b * exp(-(1/c) * x)` -4. **OVERWRITES** the placeholder `control_{region}.hdf5` with real synthetic control -5. Also exports to CSV format (legacy) - -**Files written:** -- `control_{region}.hdf5` → `data` (replaces placeholder with curve-fitted control) -- `{raw_name}.csv` (timestamps, data, sampling_rate columns) - -**Why it's separate:** -- Requires **timestamp-corrected** signal data (doesn't exist until after lines 232-239) -- Curve fitting algorithm needs clean timestamps (first N seconds eliminated) -- Cannot be done before timestamp correction without re-correcting the synthetic control - -#### Execution Timeline - -```python -# When isosbestic_control == False: - -# ========== PHASE 1: BEFORE TIMESTAMP CORRECTION ========== -# Line 229: Create placeholders (just file copies) -storesList = add_control_channel(filepath, storesList) -# Result: storesList now has paired structure -# [["Dv1A", "cntrl0"], ["signal_dms", "control_dms"]] -# Files: cntrl0.hdf5 (copy of raw signal, never used) - -# ========== TIMESTAMP CORRECTION PHASE ========== -# Lines 232-234: Process both signal AND placeholder control -timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList) -# Result: Creates timeCorrection_dms.hdf5 with correctionIndex - -# Lines 236-239: Apply corrections to all channels -decide_naming_convention_and_applyCorrection(...) -# Result: signal_dms.hdf5 now contains corrected signal data -# control_dms.hdf5 still contains uncorrected placeholder copy - -# ========== PHASE 2: AFTER TIMESTAMP CORRECTION ========== -# Line 243: Generate REAL synthetic controls -create_control_channel(filepath, storesList, window=101) -# Result: control_dms.hdf5 OVERWRITTEN with curve-fitted synthetic control -# Now contains valid control data derived from corrected signal -``` - -#### Why This Design Exists - -This is a **chicken-and-egg problem solved with placeholders:** - -1. **Requirement:** Timestamp correction expects paired control/signal channels -2. **Constraint:** Synthetic control generation requires timestamp-corrected signal data -3. **Solution:** Create dummy placeholders → correct everything → replace placeholders with real data - -#### Visual Flow - -```mermaid -flowchart TD - A[isosbestic_control = False] --> B[add_control_channel] - B --> C[Copy signal.hdf5 to cntrl0.hdf5] - C --> D[Update storesList.csv] - - D --> E[timestampCorrection_xxx] - E --> F[Creates timeCorrection_dms.hdf5] - - F --> G[decide_naming_convention_and_applyCorrection] - G --> H[Corrects signal_dms.hdf5] - G --> I[Corrects control_dms.hdf5
still contains placeholder] - - I --> J[create_control_channel] - J --> K[Read corrected signal_dms.hdf5] - K --> L[helper_create_control_channel
curve fit] - L --> M[OVERWRITE control_dms.hdf5
with synthetic control] - - style C fill:#fff3cd - style I fill:#fff3cd - style M fill:#d4edda -``` - -#### Refactoring Opportunity - -This placeholder pattern is a **code smell** indicating potential design improvements: - -**Issues:** -1. **Unnecessary I/O:** Placeholder files are written and then overwritten -2. **Confusing flow:** Hard to understand that placeholders are temporary -3. **Tight coupling:** Timestamp correction assumes paired files exist -4. **Wasted computation:** Placeholder controls get timestamp-corrected unnecessarily - -**Potential Improvements:** - -**Option 1: Lazy Control Creation** -- Modify timestamp correction to handle missing controls gracefully -- Only create synthetic controls after all corrections complete -- Remove placeholder file creation entirely - -**Option 2: Data Structure Refactoring** -- Use a data structure that doesn't require physical paired files upfront -- Track "needs synthetic control" as metadata rather than file presence -- Generate and write controls only once at the end - -**Option 3: Two-Pass Workflow** -- First pass: Correct only signal channels -- Second pass: Generate synthetic controls from corrected signals -- Would require refactoring `check_cntrl_sig_length` and pairing logic - -## Function Catalog - -### 1. add_control_channel -**Location:** `timestamp_correction.py:20` -**Purpose:** Create placeholder control channel files when no isosbestic control exists - -```python -def add_control_channel(filepath, arr) -> arr -``` - -**Input:** -- `filepath`: Path to session output folder -- `arr`: 2D array `[[storenames], [storesList]]` from storesList.csv - -**Process:** -1. Validates that control/signal pairs match (raises error if mismatched) -2. For each signal channel without a matching control: - - Copies signal HDF5 file to `cntrl{i}.hdf5` (placeholder) - - Adds entry to storesList array: `[["cntrl{i}"], ["control_{region}"]]` -3. Writes updated storesList.csv - -**Output:** -- Updated `arr` with new control channel entries -- **Files Written:** Updated `storesList.csv`, copied `cntrl*.hdf5` files - -**I/O Summary:** -- **Reads:** Signal HDF5 files (via shutil.copyfile) -- **Writes:** `storesList.csv`, placeholder `cntrl*.hdf5` files - ---- - -### 2. timestampCorrection_csv -**Location:** `timestamp_correction.py:65` -**Purpose:** Correct timestamps for CSV-format data (Doric, NPM, custom CSV) - -```python -def timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList) -``` - -**Input:** -- `filepath`: Path to session output folder -- `timeForLightsTurnOn`: Seconds to eliminate from start (default: 1) -- `storesList`: 2D array `[[storenames], [storesList]]` - -**Process:** -1. Filters storesList to control/signal channels only -2. Pairs control/signal channels, validates naming matches -3. Calls `check_cntrl_sig_length()` to determine which channel to use (shorter one) -4. For each control/signal pair: - - **Reads:** `timestamps` and `sampling_rate` from raw HDF5 - - **Computes:** `correctionIndex = np.where(timestamp >= timeForLightsTurnOn)` - - **Writes:** `timeCorrection_{region}.hdf5` with keys: - - `timestampNew`: Corrected timestamps - - `correctionIndex`: Indices to keep - - `sampling_rate`: Sampling rate - -**Output:** -- **Files Written:** `timeCorrection_{region}.hdf5` for each control/signal pair - -**I/O Summary:** -- **Reads:** `{storename}.hdf5` → `timestamps`, `sampling_rate` -- **Writes:** `timeCorrection_{region}.hdf5` → `timestampNew`, `correctionIndex`, `sampling_rate` - ---- - -### 3. timestampCorrection_tdt -**Location:** `timestamp_correction.py:115` -**Purpose:** Correct timestamps for TDT-format data (expands block timestamps) - -```python -def timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList) -``` - -**Input:** Same as `timestampCorrection_csv` - -**Process:** -1. Filters storesList to control/signal channels only -2. Pairs control/signal channels, validates naming matches -3. Calls `check_cntrl_sig_length()` to determine which channel to use -4. For each control/signal pair: - - **Reads:** `timestamps`, `npoints`, `sampling_rate` from raw HDF5 - - **TDT-specific expansion algorithm:** - ```python - timeRecStart = timestamp[0] - timestamps = np.subtract(timestamp, timeRecStart) # Zero-base - adder = np.arange(npoints) / sampling_rate # Within-block offsets - # Expand: for each block timestamp, add within-block offsets - timestampNew = np.zeros((len(timestamps), lengthAdder)) - for i in range(lengthAdder): - timestampNew[:, i] = np.add(timestamps, adder[i]) - timestampNew = (timestampNew.T).reshape(-1, order="F") # Flatten - correctionIndex = np.where(timestampNew >= timeForLightsTurnOn) - timestampNew = timestampNew[correctionIndex] - ``` - - **Writes:** `timeCorrection_{region}.hdf5` with keys: - - `timeRecStart`: Recording start time (TDT-specific) - - `timestampNew`: Expanded, corrected timestamps - - `correctionIndex`: Indices to keep - - `sampling_rate`: Sampling rate - -**Output:** -- **Files Written:** `timeCorrection_{region}.hdf5` with TDT-specific `timeRecStart` key - -**I/O Summary:** -- **Reads:** `{storename}.hdf5` → `timestamps`, `npoints`, `sampling_rate` -- **Writes:** `timeCorrection_{region}.hdf5` → `timeRecStart`, `timestampNew`, `correctionIndex`, `sampling_rate` - ---- - -### 4. check_cntrl_sig_length -**Location:** `timestamp_correction.py:273` -**Purpose:** Determine which channel (control or signal) to use as reference based on length - -```python -def check_cntrl_sig_length(filepath, channels_arr, storenames, storesList) -> indices -``` - -**Input:** -- `filepath`: Path to session output folder -- `channels_arr`: Paired control/signal array `[["control_A", "control_B"], ["signal_A", "signal_B"]]` -- `storenames`: Raw HDF5 filenames -- `storesList`: Semantic channel names - -**Process:** -1. For each control/signal pair: - - **Reads:** `data` from both control and signal HDF5 - - Compares lengths: `control.shape[0]` vs `signal.shape[0]` - - Returns the shorter one's storename (or signal if equal) - -**Output:** -- List of storenames to use for timestamp correction (one per pair) - -**I/O Summary:** -- **Reads:** `{control_storename}.hdf5` → `data`, `{signal_storename}.hdf5` → `data` - -**Note:** This is a pure analysis function but performs I/O to determine which data to use. - ---- - -### 5. decide_naming_convention_and_applyCorrection -**Location:** `timestamp_correction.py:178` -**Purpose:** Loop through all channels and apply timestamp corrections - -```python -def decide_naming_convention_and_applyCorrection(filepath, timeForLightsTurnOn, event, displayName, storesList) -``` - -**Input:** -- `filepath`: Path to session output folder -- `timeForLightsTurnOn`: Seconds eliminated from start -- `event`: Raw storename (e.g., "Dv1A") -- `displayName`: Semantic name (e.g., "control_DMS") -- `storesList`: Full storesList array - -**Process:** -1. Filters storesList to control/signal channels -2. Pairs channels and validates naming conventions -3. For each pair, calls `applyCorrection(filepath, timeForLightsTurnOn, event, displayName, region)` - -**Output:** -- Delegates to `applyCorrection()` (no direct I/O) - ---- - -### 6. applyCorrection -**Location:** `timestamp_correction.py:205` -**Purpose:** Apply timestamp corrections to data channels or event markers - -```python -def applyCorrection(filepath, timeForLightsTurnOn, event, displayName, naming) -``` - -**Input:** -- `filepath`: Path to session output folder -- `timeForLightsTurnOn`: Seconds eliminated from start -- `event`: Raw storename -- `displayName`: Semantic display name -- `naming`: Region identifier (e.g., "dms") - -**Process:** - -**For Control/Signal Channels:** -1. **Reads:** `timeCorrection_{naming}.hdf5` → `correctionIndex` -2. **Reads:** `{event}.hdf5` → `data` -3. **Applies:** `arr = arr[correctionIndex]` (crops data) -4. **Writes:** `{displayName}.hdf5` → `data` (overwrites with corrected data) - -**For Event Channels:** -1. Detects TDT format: `check_TDT(os.path.dirname(filepath))` -2. **Reads:** `timeCorrection_{naming}.hdf5` → `timeRecStart` (if TDT) -3. **Reads:** `{event}.hdf5` → `timestamps` -4. **Applies corrections:** - - If TDT and timestamps >= timeRecStart: subtract both `timeRecStart` and `timeForLightsTurnOn` - - Otherwise: subtract only `timeForLightsTurnOn` -5. **Writes:** `{event}_{naming}.hdf5` → `ts` (corrected event timestamps) - -**Output:** -- **Files Written:** - - `{displayName}.hdf5` → `data` (for control/signal) - - `{event}_{naming}.hdf5` → `ts` (for events) - -**I/O Summary:** -- **Reads:** `timeCorrection_{naming}.hdf5`, `{event}.hdf5` -- **Writes:** `{displayName}.hdf5` or `{event}_{naming}.hdf5` - ---- - -### 7. create_control_channel -**Location:** `timestamp_correction.py:247` -**Purpose:** Generate synthetic control channel using curve fitting (when no isosbestic control exists) - -```python -def create_control_channel(filepath, arr, window=5001) -``` - -**Input:** -- `filepath`: Path to session output folder -- `arr`: storesList array `[[storenames], [storesList]]` -- `window`: Savitzky-Golay filter window (default: 5001) - -**Process:** -1. Loops through storesList to find placeholder control channels (`cntrl` in storename) -2. For each placeholder: - - **Reads:** `signal_{region}.hdf5` → `data` (corrected signal) - - **Reads:** `timeCorrection_{region}.hdf5` → `timestampNew`, `sampling_rate` - - **Calls:** `helper_create_control_channel(signal, timestampNew, window)` from `control_channel.py` - - Applies Savitzky-Golay filter - - Fits to exponential: `f(x) = a + b * exp(-(1/c) * x)` - - **Writes:** `{control_name}.hdf5` → `data` (synthetic control) - - **Writes:** `{event_name}.csv` with columns: `timestamps`, `data`, `sampling_rate` - -**Output:** -- **Files Written:** - - `control_{region}.hdf5` → `data` (replaces placeholder) - - `{raw_name}.csv` (legacy format export) - -**I/O Summary:** -- **Reads:** `signal_{region}.hdf5` → `data`, `timeCorrection_{region}.hdf5` → `timestampNew`, `sampling_rate` -- **Writes:** `control_{region}.hdf5` → `data`, `{raw_name}.csv` - ---- - -## Data Flow Diagram - -### High-Level Flow (called from execute_timestamp_correction) - -```mermaid -flowchart TD - A[execute_timestamp_correction] --> B[Read storesList.csv] - B --> C{isosbestic_control?} - - C -->|False| D[add_control_channel] - C -->|True| E{Check format} - D --> E - - E -->|TDT| F[timestampCorrection_tdt] - E -->|CSV/Doric/NPM| G[timestampCorrection_csv] - - F --> H[Loop: decide_naming_convention_and_applyCorrection] - G --> H - - H --> I[For each store: applyCorrection] - - I --> J{isosbestic_control?} - J -->|False| K[create_control_channel] - J -->|True| L[Done] - K --> L - - style A fill:#e1f5ff - style L fill:#d4edda -``` - -### Detailed Flow: timestampCorrection Functions - -```mermaid -flowchart LR - A[Raw HDF5 files] --> B[check_cntrl_sig_length] - B --> C[Read control & signal data] - C --> D[Return shorter channel name] - - D --> E{Format?} - E -->|CSV| F[timestampCorrection_csv] - E -->|TDT| G[timestampCorrection_tdt] - - F --> H[Read timestamps from selected channel] - G --> I[Read timestamps, npoints, sampling_rate] - - H --> J[correctionIndex = where >= timeForLightsTurnOn] - I --> K[Expand block timestamps] - K --> J - - J --> L[Write timeCorrection_{region}.hdf5] - - style A fill:#e1f5ff - style L fill:#d4edda -``` - -### Detailed Flow: applyCorrection - -```mermaid -flowchart TD - A[applyCorrection called] --> B{Channel type?} - - B -->|control/signal| C[Read correctionIndex] - B -->|event| D[Read event timestamps] - - C --> E[Read raw data] - E --> F[data = data correctionIndex] - F --> G[Write displayName.hdf5] - - D --> H{TDT format?} - H -->|Yes| I[Read timeRecStart] - H -->|No| J[ts -= timeForLightsTurnOn] - - I --> K[ts -= timeRecStart] - K --> J - J --> L[Write event_region.hdf5] - - style A fill:#e1f5ff - style G fill:#d4edda - style L fill:#d4edda -``` - -### Detailed Flow: Control Channel Creation - -```mermaid -flowchart LR - A[add_control_channel] --> B[For each signal without control] - B --> C[Copy signal.hdf5 to cntrl_i.hdf5] - C --> D[Update storesList.csv] - - D --> E[... timestamp correction ...] - - E --> F[create_control_channel] - F --> G[For each cntrl_i placeholder] - G --> H[Read signal_{region}.hdf5] - H --> I[helper_create_control_channel] - I --> J[Savitzky-Golay filter] - J --> K[Curve fit to exponential] - K --> L[Write control_{region}.hdf5] - L --> M[Export to CSV] - - style A fill:#fff3cd - style M fill:#d4edda -``` - -## Execution Order in execute_timestamp_correction - -```python -# preprocess.py:212-247 -for each session in folderNames: - for each output_folder in session: - # Step 1: Read metadata - storesList = np.genfromtxt("storesList.csv") - - # Step 2: Add placeholder controls if needed - if isosbestic_control == False: - storesList = add_control_channel(filepath, storesList) - - # Step 3: Compute correctionIndex and timestampNew - if check_TDT(folderName): - timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList) - else: - timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList) - - # Step 4: Apply corrections to all channels/events - for each store in storesList: - decide_naming_convention_and_applyCorrection( - filepath, timeForLightsTurnOn, storename, displayName, storesList - ) - # ^ This calls applyCorrection for each channel - - # Step 5: Generate synthetic controls via curve fitting - if isosbestic_control == False: - create_control_channel(filepath, storesList, window=101) -``` - -## File I/O Summary - -### Files Read - -| Function | Files Read | Keys | -|----------|-----------|------| -| `add_control_channel` | `signal_*.hdf5` (for copying) | - | -| `timestampCorrection_csv` | `{storename}.hdf5` | `timestamps`, `sampling_rate` | -| `timestampCorrection_tdt` | `{storename}.hdf5` | `timestamps`, `npoints`, `sampling_rate` | -| `check_cntrl_sig_length` | `control_*.hdf5`, `signal_*.hdf5` | `data` | -| `applyCorrection` | `timeCorrection_{region}.hdf5`
`{event}.hdf5` | `correctionIndex`, `timeRecStart` (TDT)
`data` or `timestamps` | -| `create_control_channel` | `signal_{region}.hdf5`
`timeCorrection_{region}.hdf5` | `data`
`timestampNew`, `sampling_rate` | - -### Files Written - -| Function | Files Written | Keys | Notes | -|----------|--------------|------|-------| -| `add_control_channel` | `storesList.csv`
`cntrl{i}.hdf5` | -
(copy of signal) | Placeholder files | -| `timestampCorrection_csv` | `timeCorrection_{region}.hdf5` | `timestampNew`, `correctionIndex`, `sampling_rate` | One per region | -| `timestampCorrection_tdt` | `timeCorrection_{region}.hdf5` | `timeRecStart`, `timestampNew`, `correctionIndex`, `sampling_rate` | TDT-specific | -| `applyCorrection` | `{displayName}.hdf5`
`{event}_{region}.hdf5` | `data`
`ts` | Overwrites with corrected data | -| `create_control_channel` | `control_{region}.hdf5`
`{raw_name}.csv` | `data`
timestamps, data, sampling_rate | Replaces placeholder | - -## Key Transformations - -### 1. Timestamp Expansion (TDT only) - -**Input:** Block timestamps (one per acquisition block) -**Algorithm:** -```python -timeRecStart = timestamp[0] -timestamps = timestamp - timeRecStart # Zero-base -adder = np.arange(npoints) / sampling_rate # Within-block offsets [0, 1/fs, 2/fs, ...] -# Matrix multiplication to expand: -timestampNew = zeros((n_blocks, npoints)) -for i in range(npoints): - timestampNew[:, i] = timestamps + adder[i] -timestampNew = timestampNew.T.reshape(-1, order='F') # Column-major flatten -``` -**Output:** Continuous timestamps at full sampling rate - -### 2. Correction Index Computation - -**Input:** Timestamps array, `timeForLightsTurnOn` -**Algorithm:** -```python -correctionIndex = np.where(timestamp >= timeForLightsTurnOn)[0] -``` -**Output:** Indices of timestamps to keep (after eliminating first N seconds) - -### 3. Data Cropping - -**Applied to:** Control/signal data channels -**Algorithm:** -```python -data_corrected = data[correctionIndex] -``` - -### 4. Event Timestamp Adjustment - -**Applied to:** Event markers (TTL pulses) -**Algorithm:** -```python -# CSV format: -ts_corrected = ts - timeForLightsTurnOn - -# TDT format (if ts >= timeRecStart): -ts_corrected = ts - timeRecStart - timeForLightsTurnOn -``` - -### 5. Synthetic Control Generation - -**Input:** Signal channel (already corrected) -**Algorithm:** -1. Apply Savitzky-Golay filter: `filtered_signal = savgol_filter(signal, window, polyorder=3)` -2. Curve fit to exponential: `control = a + b * exp(-(1/c) * t)` -3. Return fitted curve as synthetic control - -## Analysis for I/O Separation - -### Pure Analysis Functions (Minimal I/O) -These could be extracted with I/O injected: -- ❌ None - all functions perform substantial I/O - -### Orchestration Functions (Heavy I/O, Light Analysis) -These coordinate reading/writing and delegate computation: -- `add_control_channel` - File copying and CSV writing -- `decide_naming_convention_and_applyCorrection` - Loops and delegates -- `create_control_channel` - Orchestrates read → process → write - -### Mixed Functions (I/O + Analysis) -These perform both I/O and computation inline: -- `timestampCorrection_csv` - Reads data, computes correctionIndex, writes results -- `timestampCorrection_tdt` - Reads data, expands timestamps, computes correctionIndex, writes -- `applyCorrection` - Reads multiple files, applies transformations, writes -- `check_cntrl_sig_length` - Reads data just to compare lengths - -## Refactoring Recommendations for I/O Separation - -### Option 1: Extract Pure Computation Functions - -Create new pure functions: -```python -# Pure analysis (no I/O) -def compute_correction_index(timestamps, timeForLightsTurnOn): - return np.where(timestamps >= timeForLightsTurnOn)[0] - -def expand_tdt_timestamps(block_timestamps, npoints, sampling_rate): - # TDT expansion algorithm - ... - return expanded_timestamps - -def crop_data_by_index(data, correctionIndex): - return data[correctionIndex] - -def adjust_event_timestamps(ts, timeRecStart, timeForLightsTurnOn, is_tdt): - # Event adjustment logic - ... - return adjusted_ts -``` - -Then modify existing functions to use these pure functions, keeping I/O separate. - -### Option 2: Reader/Writer Pattern - -Create dedicated I/O classes: -```python -class TimestampCorrectionReader: - def read_raw_timestamps(self, filepath, storename): - ... - - def read_correction_data(self, filepath, region): - ... - -class TimestampCorrectionWriter: - def write_correction_file(self, filepath, region, data): - ... - - def write_corrected_data(self, filepath, displayName, data): - ... -``` - -### Option 3: Data Class Pattern - -Return data objects instead of writing directly: -```python -@dataclass -class TimestampCorrection: - timestampNew: np.ndarray - correctionIndex: np.ndarray - sampling_rate: float - timeRecStart: Optional[float] = None # TDT only - -def timestampCorrection_tdt(...) -> TimestampCorrection: - # Compute all values - return TimestampCorrection( - timestampNew=..., - correctionIndex=..., - sampling_rate=..., - timeRecStart=... - ) - -# Separate writer function -def write_timestamp_correction(filepath, region, correction: TimestampCorrection): - write_hdf5(correction.timestampNew, f"timeCorrection_{region}", filepath, "timestampNew") - # ... etc -``` - -## Current I/O Patterns to Refactor - -1. **Inline writes in computation functions:** - - `timestampCorrection_csv` and `timestampCorrection_tdt` compute AND write - - Should separate: compute → return data → write in caller - -2. **Reading for validation only:** - - `check_cntrl_sig_length` reads full data arrays just to compare shapes - - Could be optimized to read only array metadata/shapes - -3. **Side-effect file creation:** - - `add_control_channel` creates files as side effect - - `create_control_channel` both generates data AND writes multiple formats (HDF5 + CSV) - -4. **Mixed responsibilities in applyCorrection:** - - Handles both control/signal cropping AND event timestamp adjustment - - Could be split into two separate functions From de0d373c1abce90a5b77e7f2d1bbfdfb41b994f8 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 26 Jan 2026 13:25:47 -0800 Subject: [PATCH 02/53] Move folder_selection to Kinter pop-up up to a dedicated module. --- src/guppy/frontend/__init__.py | 0 src/guppy/frontend/path_selection.py | 40 ++++++++++++++++++++++++++++ src/guppy/savingInputParameters.py | 33 +++-------------------- 3 files changed, 43 insertions(+), 30 deletions(-) create mode 100644 src/guppy/frontend/__init__.py create mode 100644 src/guppy/frontend/path_selection.py diff --git a/src/guppy/frontend/__init__.py b/src/guppy/frontend/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/guppy/frontend/path_selection.py b/src/guppy/frontend/path_selection.py new file mode 100644 index 0000000..3d69efb --- /dev/null +++ b/src/guppy/frontend/path_selection.py @@ -0,0 +1,40 @@ +import logging +import os +import tkinter as tk +from tkinter import filedialog, ttk + +logger = logging.getLogger(__name__) + + +def get_folder_path(): + # Determine base folder path (headless-friendly via env var) + base_dir_env = os.environ.get("GUPPY_BASE_DIR") + is_headless = base_dir_env and os.path.isdir(base_dir_env) + if is_headless: + folder_path = base_dir_env + logger.info(f"Folder path set to {folder_path} (from GUPPY_BASE_DIR)") + return folder_path + + # Create the main window + folder_selection = tk.Tk() + folder_selection.title("Select the folder path where your data is located") + folder_selection.geometry("700x200") + + selected_path = {"value": None} + + def select_folder(): + selected = filedialog.askdirectory(title="Select the folder path where your data is located") + if selected: + logger.info(f"Folder path set to {selected}") + selected_path["value"] = selected + else: + default_path = os.path.expanduser("~") + logger.info(f"Folder path set to {default_path}") + selected_path["value"] = default_path + folder_selection.destroy() + + select_button = ttk.Button(folder_selection, text="Select a Folder", command=select_folder) + select_button.pack(pady=5) + folder_selection.mainloop() + + return selected_path["value"] diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index a1bd35e..2dc25c1 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -4,14 +4,13 @@ import subprocess import sys import time -import tkinter as tk from threading import Thread -from tkinter import filedialog, ttk import numpy as np import pandas as pd import panel as pn +from .frontend.path_selection import get_folder_path from .saveStoresList import execute from .visualizePlot import visualizeResults @@ -20,34 +19,8 @@ def savingInputParameters(): pn.extension() - - # Determine base folder path (headless-friendly via env var) - base_dir_env = os.environ.get("GUPPY_BASE_DIR") - is_headless = base_dir_env and os.path.isdir(base_dir_env) - if is_headless: - global folder_path - folder_path = base_dir_env - logger.info(f"Folder path set to {folder_path} (from GUPPY_BASE_DIR)") - else: - # Create the main window - folder_selection = tk.Tk() - folder_selection.title("Select the folder path where your data is located") - folder_selection.geometry("700x200") - - def select_folder(): - global folder_path - folder_path = filedialog.askdirectory(title="Select the folder path where your data is located") - if folder_path: - logger.info(f"Folder path set to {folder_path}") - folder_selection.destroy() - else: - folder_path = os.path.expanduser("~") - logger.info(f"Folder path set to {folder_path}") - - select_button = ttk.Button(folder_selection, text="Select a Folder", command=select_folder) - select_button.pack(pady=5) - folder_selection.mainloop() - + global folder_path + folder_path = get_folder_path() current_dir = os.getcwd() def make_dir(filepath): From f030da5fc7d3aede2c76c4b0ddeb4a31baf84646 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 26 Jan 2026 15:22:17 -0800 Subject: [PATCH 03/53] Progress bar to a dedicated module. --- src/guppy/savingInputParameters.py | 73 ++++++------------------------ 1 file changed, 14 insertions(+), 59 deletions(-) diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 2dc25c1..3bcff3c 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -3,7 +3,6 @@ import os import subprocess import sys -import time from threading import Thread import numpy as np @@ -11,6 +10,7 @@ import panel as pn from .frontend.path_selection import get_folder_path +from .frontend.progress import readPBIncrementValues from .saveStoresList import execute from .visualizePlot import visualizeResults @@ -23,64 +23,6 @@ def savingInputParameters(): folder_path = get_folder_path() current_dir = os.getcwd() - def make_dir(filepath): - op = os.path.join(filepath, "inputParameters") - if not os.path.exists(op): - os.mkdir(op) - return op - - def readRawData(): - inputParameters = getInputParameters() - subprocess.call([sys.executable, "-m", "guppy.readTevTsq", json.dumps(inputParameters)]) - - def extractTs(): - inputParameters = getInputParameters() - subprocess.call([sys.executable, "-m", "guppy.preprocess", json.dumps(inputParameters)]) - - def psthComputation(): - inputParameters = getInputParameters() - inputParameters["curr_dir"] = current_dir - subprocess.call([sys.executable, "-m", "guppy.computePsth", json.dumps(inputParameters)]) - - def readPBIncrementValues(progressBar): - logger.info("Read progress bar increment values function started...") - file_path = os.path.join(os.path.expanduser("~"), "pbSteps.txt") - if os.path.exists(file_path): - os.remove(file_path) - increment, maximum = 0, 100 - progressBar.value = increment - progressBar.bar_color = "success" - while True: - try: - with open(file_path, "r") as file: - content = file.readlines() - if len(content) == 0: - pass - else: - maximum = int(content[0]) - increment = int(content[-1]) - - if increment == -1: - progressBar.bar_color = "danger" - os.remove(file_path) - break - progressBar.max = maximum - progressBar.value = increment - time.sleep(0.001) - except FileNotFoundError: - time.sleep(0.001) - except PermissionError: - time.sleep(0.001) - except Exception as e: - # Handle other exceptions that may occur - logger.info(f"An error occurred while reading the file: {e}") - break - if increment == maximum: - os.remove(file_path) - break - - logger.info("Read progress bar increment values stopped.") - # progress bars = PB read_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) extract_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) @@ -380,6 +322,19 @@ def getInputParameters(): } return inputParameters + def readRawData(): + inputParameters = getInputParameters() + subprocess.call([sys.executable, "-m", "guppy.readTevTsq", json.dumps(inputParameters)]) + + def extractTs(): + inputParameters = getInputParameters() + subprocess.call([sys.executable, "-m", "guppy.preprocess", json.dumps(inputParameters)]) + + def psthComputation(): + inputParameters = getInputParameters() + inputParameters["curr_dir"] = current_dir + subprocess.call([sys.executable, "-m", "guppy.computePsth", json.dumps(inputParameters)]) + def checkSameLocation(arr, abspath): # abspath = [] for i in range(len(arr)): From d14abc2723eacfe0cae7af404f9cfb299a121b77 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 26 Jan 2026 15:22:34 -0800 Subject: [PATCH 04/53] Progress bar to a dedicated module. --- src/guppy/frontend/progress.py | 45 ++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 src/guppy/frontend/progress.py diff --git a/src/guppy/frontend/progress.py b/src/guppy/frontend/progress.py new file mode 100644 index 0000000..d7bc226 --- /dev/null +++ b/src/guppy/frontend/progress.py @@ -0,0 +1,45 @@ +import logging +import os +import time + +logger = logging.getLogger(__name__) + + +def readPBIncrementValues(progressBar): + logger.info("Read progress bar increment values function started...") + file_path = os.path.join(os.path.expanduser("~"), "pbSteps.txt") + if os.path.exists(file_path): + os.remove(file_path) + increment, maximum = 0, 100 + progressBar.value = increment + progressBar.bar_color = "success" + while True: + try: + with open(file_path, "r") as file: + content = file.readlines() + if len(content) == 0: + pass + else: + maximum = int(content[0]) + increment = int(content[-1]) + + if increment == -1: + progressBar.bar_color = "danger" + os.remove(file_path) + break + progressBar.max = maximum + progressBar.value = increment + time.sleep(0.001) + except FileNotFoundError: + time.sleep(0.001) + except PermissionError: + time.sleep(0.001) + except Exception as e: + # Handle other exceptions that may occur + logger.info(f"An error occurred while reading the file: {e}") + break + if increment == maximum: + os.remove(file_path) + break + + logger.info("Read progress bar increment values stopped.") From 2c2fc04f2c7226fe94321194bf4a41c7378a67df Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 26 Jan 2026 15:47:53 -0800 Subject: [PATCH 05/53] Refactored inputParameters into a class pt 1 --- src/guppy/frontend/input_parameters.py | 320 +++++++++++++++++++++++++ src/guppy/savingInputParameters.py | 300 ----------------------- 2 files changed, 320 insertions(+), 300 deletions(-) create mode 100644 src/guppy/frontend/input_parameters.py diff --git a/src/guppy/frontend/input_parameters.py b/src/guppy/frontend/input_parameters.py new file mode 100644 index 0000000..28c74f9 --- /dev/null +++ b/src/guppy/frontend/input_parameters.py @@ -0,0 +1,320 @@ +import logging + +import numpy as np +import pandas as pd +import panel as pn + +logger = logging.getLogger(__name__) + + +class InputParametersGUI: + def __init__(self, folder_path): + read_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) + extract_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) + psth_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) + + template = pn.template.BootstrapTemplate(title="Input Parameters GUI") + + mark_down_1 = pn.pane.Markdown( + """**Select folders for the analysis from the file selector below**""", width=600 + ) + + files_1 = pn.widgets.FileSelector(folder_path, name="folderNames", width=950) + + explain_modality = pn.pane.Markdown( + """ + **Data Modality:** Select the type of data acquisition system used for your recordings: + - **tdt**: Tucker-Davis Technologies system + - **csv**: Generic CSV format + - **doric**: Doric Photometry system + - **npm**: Neurophotometrics system + """, + width=600, + ) + + modality_selector = pn.widgets.Select( + name="Data Modality", value="tdt", options=["tdt", "csv", "doric", "npm"], width=320 + ) + + explain_time_artifacts = pn.pane.Markdown( + """ + - ***Number of cores :*** Number of cores used for analysis. Try to + keep it less than the number of cores in your machine. + - ***Combine Data? :*** Make this parameter ``` True ``` if user wants to combine + the data, especially when there is two different + data files for the same recording session.
+ - ***Isosbestic Control Channel? :*** Make this parameter ``` False ``` if user + does not want to use isosbestic control channel in the analysis.
+ - ***Eliminate first few seconds :*** It is the parameter to cut out first x seconds + from the data. Default is 1 seconds.
+ - ***Window for Moving Average filter :*** The filtering of signals + is done using moving average filter. Default window used for moving + average filter is 100 datapoints. Change it based on the requirement.
+ - ***Moving Window (transients detection) :*** Transients in the z-score + and/or \u0394F/F are detected using this moving window. + Default is 15 seconds. Change it based on the requirement.
+ - ***High Amplitude filtering threshold (HAFT) (transients detection) :*** High amplitude + events greater than x times the MAD above the median are filtered out. Here, x is + high amplitude filtering threshold. Default is 2. + - ***Transients detection threshold (TD Thresh):*** Peaks with local maxima greater than x times + the MAD above the median of the trace (after filtering high amplitude events) are detected + as transients. Here, x is transients detection threshold. Default is 3. + - ***Number of channels (Neurophotometrics only) :*** Number of + channels used while recording, when data files has no column names mentioning "Flags" + or "LedState". + - ***removeArtifacts? :*** Make this parameter ``` True``` if there are + artifacts and user wants to remove the artifacts. + - ***removeArtifacts method :*** Selecting ```concatenate``` will remove bad + chunks and concatenate the selected good chunks together. + Selecting ```replace with NaN``` will replace bad chunks with NaN + values. + """, + width=350, + ) + + timeForLightsTurnOn = pn.widgets.LiteralInput( + name="Eliminate first few seconds (int)", value=1, type=int, width=320 + ) + + isosbestic_control = pn.widgets.Select( + name="Isosbestic Control Channel? (bool)", value=True, options=[True, False], width=320 + ) + + numberOfCores = pn.widgets.LiteralInput(name="# of cores (int)", value=2, type=int, width=150) + + combine_data = pn.widgets.Select(name="Combine Data? (bool)", value=False, options=[True, False], width=150) + + computePsth = pn.widgets.Select( + name="z_score and/or \u0394F/F? (psth)", options=["z_score", "dff", "Both"], width=320 + ) + + transients = pn.widgets.Select( + name="z_score and/or \u0394F/F? (transients)", options=["z_score", "dff", "Both"], width=320 + ) + + plot_zScore_dff = pn.widgets.Select( + name="z-score plot and/or \u0394F/F plot?", + options=["z_score", "dff", "Both", "None"], + value="None", + width=320, + ) + + moving_wd = pn.widgets.LiteralInput( + name="Moving Window for transients detection (s) (int)", value=15, type=int, width=320 + ) + + highAmpFilt = pn.widgets.LiteralInput(name="HAFT (int)", value=2, type=int, width=150) + + transientsThresh = pn.widgets.LiteralInput(name="TD Thresh (int)", value=3, type=int, width=150) + + moving_avg_filter = pn.widgets.LiteralInput( + name="Window for Moving Average filter (int)", value=100, type=int, width=320 + ) + + removeArtifacts = pn.widgets.Select( + name="removeArtifacts? (bool)", value=False, options=[True, False], width=150 + ) + + artifactsRemovalMethod = pn.widgets.Select( + name="removeArtifacts method", value="concatenate", options=["concatenate", "replace with NaN"], width=150 + ) + + no_channels_np = pn.widgets.LiteralInput( + name="Number of channels (Neurophotometrics only)", value=2, type=int, width=320 + ) + + z_score_computation = pn.widgets.Select( + name="z-score computation Method", + options=["standard z-score", "baseline z-score", "modified z-score"], + value="standard z-score", + width=200, + ) + + baseline_wd_strt = pn.widgets.LiteralInput( + name="Baseline Window Start Time (s) (int)", value=0, type=int, width=200 + ) + baseline_wd_end = pn.widgets.LiteralInput( + name="Baseline Window End Time (s) (int)", value=0, type=int, width=200 + ) + + explain_z_score = pn.pane.Markdown( + """ + ***Note :***
+ - Details about z-score computation methods are explained in Github wiki.
+ - The details will make user understand what computation method to use for + their data.
+ - Baseline Window Parameters should be kept 0 unless you are using baseline
+ z-score computation method. The parameters are in seconds. + """, + width=580, + ) + + explain_nsec = pn.pane.Markdown( + """ + - ***Time Interval :*** To omit bursts of event timestamps, user defined time interval + is set so that if the time difference between two timestamps is less than this defined time + interval, it will be deleted for the calculation of PSTH. + - ***Compute Cross-correlation :*** Make this parameter ```True```, when user wants + to compute cross-correlation between PSTHs of two different signals or signals + recorded from different brain regions. + """, + width=580, + ) + + nSecPrev = pn.widgets.LiteralInput(name="Seconds before 0 (int)", value=-10, type=int, width=120) + + nSecPost = pn.widgets.LiteralInput(name="Seconds after 0 (int)", value=20, type=int, width=120) + + computeCorr = pn.widgets.Select( + name="Compute Cross-correlation (bool)", options=[True, False], value=False, width=200 + ) + + timeInterval = pn.widgets.LiteralInput(name="Time Interval (s)", value=2, type=int, width=120) + + use_time_or_trials = pn.widgets.Select( + name="Bin PSTH trials (str)", options=["Time (min)", "# of trials"], value="Time (min)", width=120 + ) + + bin_psth_trials = pn.widgets.LiteralInput( + name="Time(min) / # of trials \n for binning? (int)", value=0, type=int, width=200 + ) + + explain_baseline = pn.pane.Markdown( + """ + ***Note :***
+ - If user does not want to do baseline correction, + put both parameters 0.
+ - If the first event timestamp is less than the length of baseline + window, it will be rejected in the PSTH computation step.
+ - Baseline parameters must be within the PSTH parameters + set in the PSTH parameters section. + """, + width=580, + ) + + baselineCorrectionStart = pn.widgets.LiteralInput( + name="Baseline Correction Start time(int)", value=-5, type=int, width=200 + ) + + baselineCorrectionEnd = pn.widgets.LiteralInput( + name="Baseline Correction End time(int)", value=0, type=int, width=200 + ) + + zscore_param_wd = pn.WidgetBox( + "### Z-score Parameters", + explain_z_score, + z_score_computation, + pn.Row(baseline_wd_strt, baseline_wd_end), + width=600, + ) + + psth_param_wd = pn.WidgetBox( + "### PSTH Parameters", + explain_nsec, + pn.Row(nSecPrev, nSecPost, computeCorr), + pn.Row(timeInterval, use_time_or_trials, bin_psth_trials), + width=600, + ) + + baseline_param_wd = pn.WidgetBox( + "### Baseline Parameters", + explain_baseline, + pn.Row(baselineCorrectionStart, baselineCorrectionEnd), + width=600, + ) + peak_explain = pn.pane.Markdown( + """ + ***Note :***
+ - Peak and area are computed between the window set below.
+ - Peak and AUC parameters must be within the PSTH parameters set in the PSTH parameters section.
+ - Please make sure when user changes the parameters in the table below, click on any other cell after + changing a value in a particular cell. + """, + width=580, + ) + + start_end_point_df = pd.DataFrame( + { + "Peak Start time": [-5, 0, 5, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + "Peak End time": [0, 3, 10, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + } + ) + + df_widget = pn.widgets.Tabulator(start_end_point_df, name="DataFrame", show_index=False, widths=280) + + peak_param_wd = pn.WidgetBox("### Peak and AUC Parameters", peak_explain, df_widget, width=600) + + mark_down_2 = pn.pane.Markdown( + """**Select folders for the average analysis from the file selector below**""", width=600 + ) + + files_2 = pn.widgets.FileSelector(folder_path, name="folderNamesForAvg", width=950) + + averageForGroup = pn.widgets.Select(name="Average Group? (bool)", value=False, options=[True, False], width=435) + + visualizeAverageResults = pn.widgets.Select( + name="Visualize Average Results? (bool)", value=False, options=[True, False], width=435 + ) + + visualize_zscore_or_dff = pn.widgets.Select( + name="z-score or \u0394F/F? (for visualization)", options=["z_score", "dff"], width=435 + ) + + individual_analysis_wd_2 = pn.Column( + explain_time_artifacts, + pn.Row(numberOfCores, combine_data), + isosbestic_control, + timeForLightsTurnOn, + moving_avg_filter, + computePsth, + transients, + plot_zScore_dff, + moving_wd, + pn.Row(highAmpFilt, transientsThresh), + no_channels_np, + pn.Row(removeArtifacts, artifactsRemovalMethod), + ) + + group_analysis_wd_1 = pn.Column(mark_down_2, files_2, averageForGroup, width=800) + + visualization_wd = pn.Row(visualize_zscore_or_dff, pn.Spacer(width=60), visualizeAverageResults) + + def getInputParameters(self): + abspath = getAbsPath() + inputParameters = { + "abspath": abspath[0], + "folderNames": files_1.value, + "modality": modality_selector.value, + "numberOfCores": numberOfCores.value, + "combine_data": combine_data.value, + "isosbestic_control": isosbestic_control.value, + "timeForLightsTurnOn": timeForLightsTurnOn.value, + "filter_window": moving_avg_filter.value, + "removeArtifacts": removeArtifacts.value, + "artifactsRemovalMethod": artifactsRemovalMethod.value, + "noChannels": no_channels_np.value, + "zscore_method": z_score_computation.value, + "baselineWindowStart": baseline_wd_strt.value, + "baselineWindowEnd": baseline_wd_end.value, + "nSecPrev": nSecPrev.value, + "nSecPost": nSecPost.value, + "computeCorr": computeCorr.value, + "timeInterval": timeInterval.value, + "bin_psth_trials": bin_psth_trials.value, + "use_time_or_trials": use_time_or_trials.value, + "baselineCorrectionStart": baselineCorrectionStart.value, + "baselineCorrectionEnd": baselineCorrectionEnd.value, + "peak_startPoint": list(df_widget.value["Peak Start time"]), # startPoint.value, + "peak_endPoint": list(df_widget.value["Peak End time"]), # endPoint.value, + "selectForComputePsth": computePsth.value, + "selectForTransientsComputation": transients.value, + "moving_window": moving_wd.value, + "highAmpFilt": highAmpFilt.value, + "transientsThresh": transientsThresh.value, + "plot_zScore_dff": plot_zScore_dff.value, + "visualize_zscore_or_dff": visualize_zscore_or_dff.value, + "folderNamesForAvg": files_2.value, + "averageForGroup": averageForGroup.value, + "visualizeAverageResults": visualizeAverageResults.value, + } + return inputParameters diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 3bcff3c..07c400a 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -6,7 +6,6 @@ from threading import Thread import numpy as np -import pandas as pd import panel as pn from .frontend.path_selection import get_folder_path @@ -23,305 +22,6 @@ def savingInputParameters(): folder_path = get_folder_path() current_dir = os.getcwd() - # progress bars = PB - read_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) - extract_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) - psth_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) - - template = pn.template.BootstrapTemplate(title="Input Parameters GUI") - - mark_down_1 = pn.pane.Markdown("""**Select folders for the analysis from the file selector below**""", width=600) - - files_1 = pn.widgets.FileSelector(folder_path, name="folderNames", width=950) - - explain_modality = pn.pane.Markdown( - """ - **Data Modality:** Select the type of data acquisition system used for your recordings: - - **tdt**: Tucker-Davis Technologies system - - **csv**: Generic CSV format - - **doric**: Doric Photometry system - - **npm**: Neurophotometrics system - """, - width=600, - ) - - modality_selector = pn.widgets.Select( - name="Data Modality", value="tdt", options=["tdt", "csv", "doric", "npm"], width=320 - ) - - explain_time_artifacts = pn.pane.Markdown( - """ - - ***Number of cores :*** Number of cores used for analysis. Try to - keep it less than the number of cores in your machine. - - ***Combine Data? :*** Make this parameter ``` True ``` if user wants to combine - the data, especially when there is two different - data files for the same recording session.
- - ***Isosbestic Control Channel? :*** Make this parameter ``` False ``` if user - does not want to use isosbestic control channel in the analysis.
- - ***Eliminate first few seconds :*** It is the parameter to cut out first x seconds - from the data. Default is 1 seconds.
- - ***Window for Moving Average filter :*** The filtering of signals - is done using moving average filter. Default window used for moving - average filter is 100 datapoints. Change it based on the requirement.
- - ***Moving Window (transients detection) :*** Transients in the z-score - and/or \u0394F/F are detected using this moving window. - Default is 15 seconds. Change it based on the requirement.
- - ***High Amplitude filtering threshold (HAFT) (transients detection) :*** High amplitude - events greater than x times the MAD above the median are filtered out. Here, x is - high amplitude filtering threshold. Default is 2. - - ***Transients detection threshold (TD Thresh):*** Peaks with local maxima greater than x times - the MAD above the median of the trace (after filtering high amplitude events) are detected - as transients. Here, x is transients detection threshold. Default is 3. - - ***Number of channels (Neurophotometrics only) :*** Number of - channels used while recording, when data files has no column names mentioning "Flags" - or "LedState". - - ***removeArtifacts? :*** Make this parameter ``` True``` if there are - artifacts and user wants to remove the artifacts. - - ***removeArtifacts method :*** Selecting ```concatenate``` will remove bad - chunks and concatenate the selected good chunks together. - Selecting ```replace with NaN``` will replace bad chunks with NaN - values. - """, - width=350, - ) - - timeForLightsTurnOn = pn.widgets.LiteralInput( - name="Eliminate first few seconds (int)", value=1, type=int, width=320 - ) - - isosbestic_control = pn.widgets.Select( - name="Isosbestic Control Channel? (bool)", value=True, options=[True, False], width=320 - ) - - numberOfCores = pn.widgets.LiteralInput(name="# of cores (int)", value=2, type=int, width=150) - - combine_data = pn.widgets.Select(name="Combine Data? (bool)", value=False, options=[True, False], width=150) - - computePsth = pn.widgets.Select( - name="z_score and/or \u0394F/F? (psth)", options=["z_score", "dff", "Both"], width=320 - ) - - transients = pn.widgets.Select( - name="z_score and/or \u0394F/F? (transients)", options=["z_score", "dff", "Both"], width=320 - ) - - plot_zScore_dff = pn.widgets.Select( - name="z-score plot and/or \u0394F/F plot?", options=["z_score", "dff", "Both", "None"], value="None", width=320 - ) - - moving_wd = pn.widgets.LiteralInput( - name="Moving Window for transients detection (s) (int)", value=15, type=int, width=320 - ) - - highAmpFilt = pn.widgets.LiteralInput(name="HAFT (int)", value=2, type=int, width=150) - - transientsThresh = pn.widgets.LiteralInput(name="TD Thresh (int)", value=3, type=int, width=150) - - moving_avg_filter = pn.widgets.LiteralInput( - name="Window for Moving Average filter (int)", value=100, type=int, width=320 - ) - - removeArtifacts = pn.widgets.Select(name="removeArtifacts? (bool)", value=False, options=[True, False], width=150) - - artifactsRemovalMethod = pn.widgets.Select( - name="removeArtifacts method", value="concatenate", options=["concatenate", "replace with NaN"], width=150 - ) - - no_channels_np = pn.widgets.LiteralInput( - name="Number of channels (Neurophotometrics only)", value=2, type=int, width=320 - ) - - z_score_computation = pn.widgets.Select( - name="z-score computation Method", - options=["standard z-score", "baseline z-score", "modified z-score"], - value="standard z-score", - width=200, - ) - - baseline_wd_strt = pn.widgets.LiteralInput( - name="Baseline Window Start Time (s) (int)", value=0, type=int, width=200 - ) - baseline_wd_end = pn.widgets.LiteralInput(name="Baseline Window End Time (s) (int)", value=0, type=int, width=200) - - explain_z_score = pn.pane.Markdown( - """ - ***Note :***
- - Details about z-score computation methods are explained in Github wiki.
- - The details will make user understand what computation method to use for - their data.
- - Baseline Window Parameters should be kept 0 unless you are using baseline
- z-score computation method. The parameters are in seconds. - """, - width=580, - ) - - explain_nsec = pn.pane.Markdown( - """ - - ***Time Interval :*** To omit bursts of event timestamps, user defined time interval - is set so that if the time difference between two timestamps is less than this defined time - interval, it will be deleted for the calculation of PSTH. - - ***Compute Cross-correlation :*** Make this parameter ```True```, when user wants - to compute cross-correlation between PSTHs of two different signals or signals - recorded from different brain regions. - """, - width=580, - ) - - nSecPrev = pn.widgets.LiteralInput(name="Seconds before 0 (int)", value=-10, type=int, width=120) - - nSecPost = pn.widgets.LiteralInput(name="Seconds after 0 (int)", value=20, type=int, width=120) - - computeCorr = pn.widgets.Select( - name="Compute Cross-correlation (bool)", options=[True, False], value=False, width=200 - ) - - timeInterval = pn.widgets.LiteralInput(name="Time Interval (s)", value=2, type=int, width=120) - - use_time_or_trials = pn.widgets.Select( - name="Bin PSTH trials (str)", options=["Time (min)", "# of trials"], value="Time (min)", width=120 - ) - - bin_psth_trials = pn.widgets.LiteralInput( - name="Time(min) / # of trials \n for binning? (int)", value=0, type=int, width=200 - ) - - explain_baseline = pn.pane.Markdown( - """ - ***Note :***
- - If user does not want to do baseline correction, - put both parameters 0.
- - If the first event timestamp is less than the length of baseline - window, it will be rejected in the PSTH computation step.
- - Baseline parameters must be within the PSTH parameters - set in the PSTH parameters section. - """, - width=580, - ) - - baselineCorrectionStart = pn.widgets.LiteralInput( - name="Baseline Correction Start time(int)", value=-5, type=int, width=200 - ) - - baselineCorrectionEnd = pn.widgets.LiteralInput( - name="Baseline Correction End time(int)", value=0, type=int, width=200 - ) - - zscore_param_wd = pn.WidgetBox( - "### Z-score Parameters", - explain_z_score, - z_score_computation, - pn.Row(baseline_wd_strt, baseline_wd_end), - width=600, - ) - - psth_param_wd = pn.WidgetBox( - "### PSTH Parameters", - explain_nsec, - pn.Row(nSecPrev, nSecPost, computeCorr), - pn.Row(timeInterval, use_time_or_trials, bin_psth_trials), - width=600, - ) - - baseline_param_wd = pn.WidgetBox( - "### Baseline Parameters", explain_baseline, pn.Row(baselineCorrectionStart, baselineCorrectionEnd), width=600 - ) - peak_explain = pn.pane.Markdown( - """ - ***Note :***
- - Peak and area are computed between the window set below.
- - Peak and AUC parameters must be within the PSTH parameters set in the PSTH parameters section.
- - Please make sure when user changes the parameters in the table below, click on any other cell after - changing a value in a particular cell. - """, - width=580, - ) - - start_end_point_df = pd.DataFrame( - { - "Peak Start time": [-5, 0, 5, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - "Peak End time": [0, 3, 10, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - } - ) - - df_widget = pn.widgets.Tabulator(start_end_point_df, name="DataFrame", show_index=False, widths=280) - - peak_param_wd = pn.WidgetBox("### Peak and AUC Parameters", peak_explain, df_widget, width=600) - - mark_down_2 = pn.pane.Markdown( - """**Select folders for the average analysis from the file selector below**""", width=600 - ) - - files_2 = pn.widgets.FileSelector(folder_path, name="folderNamesForAvg", width=950) - - averageForGroup = pn.widgets.Select(name="Average Group? (bool)", value=False, options=[True, False], width=435) - - visualizeAverageResults = pn.widgets.Select( - name="Visualize Average Results? (bool)", value=False, options=[True, False], width=435 - ) - - visualize_zscore_or_dff = pn.widgets.Select( - name="z-score or \u0394F/F? (for visualization)", options=["z_score", "dff"], width=435 - ) - - individual_analysis_wd_2 = pn.Column( - explain_time_artifacts, - pn.Row(numberOfCores, combine_data), - isosbestic_control, - timeForLightsTurnOn, - moving_avg_filter, - computePsth, - transients, - plot_zScore_dff, - moving_wd, - pn.Row(highAmpFilt, transientsThresh), - no_channels_np, - pn.Row(removeArtifacts, artifactsRemovalMethod), - ) - - group_analysis_wd_1 = pn.Column(mark_down_2, files_2, averageForGroup, width=800) - - visualization_wd = pn.Row(visualize_zscore_or_dff, pn.Spacer(width=60), visualizeAverageResults) - - def getInputParameters(): - abspath = getAbsPath() - inputParameters = { - "abspath": abspath[0], - "folderNames": files_1.value, - "modality": modality_selector.value, - "numberOfCores": numberOfCores.value, - "combine_data": combine_data.value, - "isosbestic_control": isosbestic_control.value, - "timeForLightsTurnOn": timeForLightsTurnOn.value, - "filter_window": moving_avg_filter.value, - "removeArtifacts": removeArtifacts.value, - "artifactsRemovalMethod": artifactsRemovalMethod.value, - "noChannels": no_channels_np.value, - "zscore_method": z_score_computation.value, - "baselineWindowStart": baseline_wd_strt.value, - "baselineWindowEnd": baseline_wd_end.value, - "nSecPrev": nSecPrev.value, - "nSecPost": nSecPost.value, - "computeCorr": computeCorr.value, - "timeInterval": timeInterval.value, - "bin_psth_trials": bin_psth_trials.value, - "use_time_or_trials": use_time_or_trials.value, - "baselineCorrectionStart": baselineCorrectionStart.value, - "baselineCorrectionEnd": baselineCorrectionEnd.value, - "peak_startPoint": list(df_widget.value["Peak Start time"]), # startPoint.value, - "peak_endPoint": list(df_widget.value["Peak End time"]), # endPoint.value, - "selectForComputePsth": computePsth.value, - "selectForTransientsComputation": transients.value, - "moving_window": moving_wd.value, - "highAmpFilt": highAmpFilt.value, - "transientsThresh": transientsThresh.value, - "plot_zScore_dff": plot_zScore_dff.value, - "visualize_zscore_or_dff": visualize_zscore_or_dff.value, - "folderNamesForAvg": files_2.value, - "averageForGroup": averageForGroup.value, - "visualizeAverageResults": visualizeAverageResults.value, - } - return inputParameters - def readRawData(): inputParameters = getInputParameters() subprocess.call([sys.executable, "-m", "guppy.readTevTsq", json.dumps(inputParameters)]) From ef2cf952d00a3e03f5d9d42e3c75860e8e5540f8 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 27 Jan 2026 10:20:59 -0800 Subject: [PATCH 06/53] added Sidebar class --- src/guppy/frontend/input_parameters.py | 271 +++++++++++++++---------- src/guppy/frontend/sidebar.py | 77 +++++++ src/guppy/savingInputParameters.py | 215 +++++++------------- 3 files changed, 310 insertions(+), 253 deletions(-) create mode 100644 src/guppy/frontend/sidebar.py diff --git a/src/guppy/frontend/input_parameters.py b/src/guppy/frontend/input_parameters.py index 28c74f9..b6d0a96 100644 --- a/src/guppy/frontend/input_parameters.py +++ b/src/guppy/frontend/input_parameters.py @@ -1,4 +1,5 @@ import logging +import os import numpy as np import pandas as pd @@ -7,21 +8,48 @@ logger = logging.getLogger(__name__) -class InputParametersGUI: - def __init__(self, folder_path): - read_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) - extract_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) - psth_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) +def checkSameLocation(arr, abspath): + # abspath = [] + for i in range(len(arr)): + abspath.append(os.path.dirname(arr[i])) + abspath = np.asarray(abspath) + abspath = np.unique(abspath) + if len(abspath) > 1: + logger.error("All the folders selected should be at the same location") + raise Exception("All the folders selected should be at the same location") + + return abspath + + +def getAbsPath(files_1, files_2): + arr_1, arr_2 = files_1.value, files_2.value + if len(arr_1) == 0 and len(arr_2) == 0: + logger.error("No folder is selected for analysis") + raise Exception("No folder is selected for analysis") - template = pn.template.BootstrapTemplate(title="Input Parameters GUI") + abspath = [] + if len(arr_1) > 0: + abspath = checkSameLocation(arr_1, abspath) + else: + abspath = checkSameLocation(arr_2, abspath) - mark_down_1 = pn.pane.Markdown( + abspath = np.unique(abspath) + if len(abspath) > 1: + logger.error("All the folders selected should be at the same location") + raise Exception("All the folders selected should be at the same location") + return abspath + + +class InputParametersGUI: + def __init__(self, *, template, folder_path): + self.template = template + self.mark_down_1 = pn.pane.Markdown( """**Select folders for the analysis from the file selector below**""", width=600 ) - files_1 = pn.widgets.FileSelector(folder_path, name="folderNames", width=950) + self.files_1 = pn.widgets.FileSelector(folder_path, name="folderNames", width=950) - explain_modality = pn.pane.Markdown( + self.explain_modality = pn.pane.Markdown( """ **Data Modality:** Select the type of data acquisition system used for your recordings: - **tdt**: Tucker-Davis Technologies system @@ -32,11 +60,11 @@ def __init__(self, folder_path): width=600, ) - modality_selector = pn.widgets.Select( + self.modality_selector = pn.widgets.Select( name="Data Modality", value="tdt", options=["tdt", "csv", "doric", "npm"], width=320 ) - explain_time_artifacts = pn.pane.Markdown( + self.explain_time_artifacts = pn.pane.Markdown( """ - ***Number of cores :*** Number of cores used for analysis. Try to keep it less than the number of cores in your machine. @@ -72,72 +100,74 @@ def __init__(self, folder_path): width=350, ) - timeForLightsTurnOn = pn.widgets.LiteralInput( + self.timeForLightsTurnOn = pn.widgets.LiteralInput( name="Eliminate first few seconds (int)", value=1, type=int, width=320 ) - isosbestic_control = pn.widgets.Select( + self.isosbestic_control = pn.widgets.Select( name="Isosbestic Control Channel? (bool)", value=True, options=[True, False], width=320 ) - numberOfCores = pn.widgets.LiteralInput(name="# of cores (int)", value=2, type=int, width=150) + self.numberOfCores = pn.widgets.LiteralInput(name="# of cores (int)", value=2, type=int, width=150) - combine_data = pn.widgets.Select(name="Combine Data? (bool)", value=False, options=[True, False], width=150) + self.combine_data = pn.widgets.Select( + name="Combine Data? (bool)", value=False, options=[True, False], width=150 + ) - computePsth = pn.widgets.Select( + self.computePsth = pn.widgets.Select( name="z_score and/or \u0394F/F? (psth)", options=["z_score", "dff", "Both"], width=320 ) - transients = pn.widgets.Select( + self.transients = pn.widgets.Select( name="z_score and/or \u0394F/F? (transients)", options=["z_score", "dff", "Both"], width=320 ) - plot_zScore_dff = pn.widgets.Select( + self.plot_zScore_dff = pn.widgets.Select( name="z-score plot and/or \u0394F/F plot?", options=["z_score", "dff", "Both", "None"], value="None", width=320, ) - moving_wd = pn.widgets.LiteralInput( + self.moving_wd = pn.widgets.LiteralInput( name="Moving Window for transients detection (s) (int)", value=15, type=int, width=320 ) - highAmpFilt = pn.widgets.LiteralInput(name="HAFT (int)", value=2, type=int, width=150) + self.highAmpFilt = pn.widgets.LiteralInput(name="HAFT (int)", value=2, type=int, width=150) - transientsThresh = pn.widgets.LiteralInput(name="TD Thresh (int)", value=3, type=int, width=150) + self.transientsThresh = pn.widgets.LiteralInput(name="TD Thresh (int)", value=3, type=int, width=150) - moving_avg_filter = pn.widgets.LiteralInput( + self.moving_avg_filter = pn.widgets.LiteralInput( name="Window for Moving Average filter (int)", value=100, type=int, width=320 ) - removeArtifacts = pn.widgets.Select( + self.removeArtifacts = pn.widgets.Select( name="removeArtifacts? (bool)", value=False, options=[True, False], width=150 ) - artifactsRemovalMethod = pn.widgets.Select( + self.artifactsRemovalMethod = pn.widgets.Select( name="removeArtifacts method", value="concatenate", options=["concatenate", "replace with NaN"], width=150 ) - no_channels_np = pn.widgets.LiteralInput( + self.no_channels_np = pn.widgets.LiteralInput( name="Number of channels (Neurophotometrics only)", value=2, type=int, width=320 ) - z_score_computation = pn.widgets.Select( + self.z_score_computation = pn.widgets.Select( name="z-score computation Method", options=["standard z-score", "baseline z-score", "modified z-score"], value="standard z-score", width=200, ) - baseline_wd_strt = pn.widgets.LiteralInput( + self.baseline_wd_strt = pn.widgets.LiteralInput( name="Baseline Window Start Time (s) (int)", value=0, type=int, width=200 ) - baseline_wd_end = pn.widgets.LiteralInput( + self.baseline_wd_end = pn.widgets.LiteralInput( name="Baseline Window End Time (s) (int)", value=0, type=int, width=200 ) - explain_z_score = pn.pane.Markdown( + self.explain_z_score = pn.pane.Markdown( """ ***Note :***
- Details about z-score computation methods are explained in Github wiki.
@@ -149,7 +179,7 @@ def __init__(self, folder_path): width=580, ) - explain_nsec = pn.pane.Markdown( + self.explain_nsec = pn.pane.Markdown( """ - ***Time Interval :*** To omit bursts of event timestamps, user defined time interval is set so that if the time difference between two timestamps is less than this defined time @@ -161,25 +191,25 @@ def __init__(self, folder_path): width=580, ) - nSecPrev = pn.widgets.LiteralInput(name="Seconds before 0 (int)", value=-10, type=int, width=120) + self.nSecPrev = pn.widgets.LiteralInput(name="Seconds before 0 (int)", value=-10, type=int, width=120) - nSecPost = pn.widgets.LiteralInput(name="Seconds after 0 (int)", value=20, type=int, width=120) + self.nSecPost = pn.widgets.LiteralInput(name="Seconds after 0 (int)", value=20, type=int, width=120) - computeCorr = pn.widgets.Select( + self.computeCorr = pn.widgets.Select( name="Compute Cross-correlation (bool)", options=[True, False], value=False, width=200 ) - timeInterval = pn.widgets.LiteralInput(name="Time Interval (s)", value=2, type=int, width=120) + self.timeInterval = pn.widgets.LiteralInput(name="Time Interval (s)", value=2, type=int, width=120) - use_time_or_trials = pn.widgets.Select( + self.use_time_or_trials = pn.widgets.Select( name="Bin PSTH trials (str)", options=["Time (min)", "# of trials"], value="Time (min)", width=120 ) - bin_psth_trials = pn.widgets.LiteralInput( + self.bin_psth_trials = pn.widgets.LiteralInput( name="Time(min) / # of trials \n for binning? (int)", value=0, type=int, width=200 ) - explain_baseline = pn.pane.Markdown( + self.explain_baseline = pn.pane.Markdown( """ ***Note :***
- If user does not want to do baseline correction, @@ -192,37 +222,37 @@ def __init__(self, folder_path): width=580, ) - baselineCorrectionStart = pn.widgets.LiteralInput( + self.baselineCorrectionStart = pn.widgets.LiteralInput( name="Baseline Correction Start time(int)", value=-5, type=int, width=200 ) - baselineCorrectionEnd = pn.widgets.LiteralInput( + self.baselineCorrectionEnd = pn.widgets.LiteralInput( name="Baseline Correction End time(int)", value=0, type=int, width=200 ) - zscore_param_wd = pn.WidgetBox( + self.zscore_param_wd = pn.WidgetBox( "### Z-score Parameters", - explain_z_score, - z_score_computation, - pn.Row(baseline_wd_strt, baseline_wd_end), + self.explain_z_score, + self.z_score_computation, + pn.Row(self.baseline_wd_strt, self.baseline_wd_end), width=600, ) - psth_param_wd = pn.WidgetBox( + self.psth_param_wd = pn.WidgetBox( "### PSTH Parameters", - explain_nsec, - pn.Row(nSecPrev, nSecPost, computeCorr), - pn.Row(timeInterval, use_time_or_trials, bin_psth_trials), + self.explain_nsec, + pn.Row(self.nSecPrev, self.nSecPost, self.computeCorr), + pn.Row(self.timeInterval, self.use_time_or_trials, self.bin_psth_trials), width=600, ) - baseline_param_wd = pn.WidgetBox( + self.baseline_param_wd = pn.WidgetBox( "### Baseline Parameters", - explain_baseline, - pn.Row(baselineCorrectionStart, baselineCorrectionEnd), + self.explain_baseline, + pn.Row(self.baselineCorrectionStart, self.baselineCorrectionEnd), width=600, ) - peak_explain = pn.pane.Markdown( + self.peak_explain = pn.pane.Markdown( """ ***Note :***
- Peak and area are computed between the window set below.
@@ -233,88 +263,113 @@ def __init__(self, folder_path): width=580, ) - start_end_point_df = pd.DataFrame( + self.start_end_point_df = pd.DataFrame( { "Peak Start time": [-5, 0, 5, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], "Peak End time": [0, 3, 10, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], } ) - df_widget = pn.widgets.Tabulator(start_end_point_df, name="DataFrame", show_index=False, widths=280) + self.df_widget = pn.widgets.Tabulator(self.start_end_point_df, name="DataFrame", show_index=False, widths=280) - peak_param_wd = pn.WidgetBox("### Peak and AUC Parameters", peak_explain, df_widget, width=600) + self.peak_param_wd = pn.WidgetBox("### Peak and AUC Parameters", self.peak_explain, self.df_widget, width=600) - mark_down_2 = pn.pane.Markdown( + self.mark_down_2 = pn.pane.Markdown( """**Select folders for the average analysis from the file selector below**""", width=600 ) - files_2 = pn.widgets.FileSelector(folder_path, name="folderNamesForAvg", width=950) + self.files_2 = pn.widgets.FileSelector(folder_path, name="folderNamesForAvg", width=950) - averageForGroup = pn.widgets.Select(name="Average Group? (bool)", value=False, options=[True, False], width=435) + self.averageForGroup = pn.widgets.Select( + name="Average Group? (bool)", value=False, options=[True, False], width=435 + ) - visualizeAverageResults = pn.widgets.Select( + self.visualizeAverageResults = pn.widgets.Select( name="Visualize Average Results? (bool)", value=False, options=[True, False], width=435 ) - visualize_zscore_or_dff = pn.widgets.Select( + self.visualize_zscore_or_dff = pn.widgets.Select( name="z-score or \u0394F/F? (for visualization)", options=["z_score", "dff"], width=435 ) - individual_analysis_wd_2 = pn.Column( - explain_time_artifacts, - pn.Row(numberOfCores, combine_data), - isosbestic_control, - timeForLightsTurnOn, - moving_avg_filter, - computePsth, - transients, - plot_zScore_dff, - moving_wd, - pn.Row(highAmpFilt, transientsThresh), - no_channels_np, - pn.Row(removeArtifacts, artifactsRemovalMethod), + self.individual_analysis_wd_2 = pn.Column( + self.explain_time_artifacts, + pn.Row(self.numberOfCores, self.combine_data), + self.isosbestic_control, + self.timeForLightsTurnOn, + self.moving_avg_filter, + self.computePsth, + self.transients, + self.plot_zScore_dff, + self.moving_wd, + pn.Row(self.highAmpFilt, self.transientsThresh), + self.no_channels_np, + pn.Row(self.removeArtifacts, self.artifactsRemovalMethod), + ) + + self.group_analysis_wd_1 = pn.Column(self.mark_down_2, self.files_2, self.averageForGroup, width=800) + + self.visualization_wd = pn.Row(self.visualize_zscore_or_dff, pn.Spacer(width=60), self.visualizeAverageResults) + + self.psth_baseline_param = pn.Column( + self.zscore_param_wd, self.psth_param_wd, self.baseline_param_wd, self.peak_param_wd + ) + + self.widget = pn.Column( + self.mark_down_1, + self.files_1, + self.explain_modality, + self.modality_selector, + pn.Row(self.individual_analysis_wd_2, self.psth_baseline_param), ) - group_analysis_wd_1 = pn.Column(mark_down_2, files_2, averageForGroup, width=800) + styles = dict(background="WhiteSmoke") + self.individual = pn.Card(self.widget, title="Individual Analysis", styles=styles, width=1000) + self.group = pn.Card(self.group_analysis_wd_1, title="Group Analysis", styles=styles, width=1000) + self.visualize = pn.Card(self.visualization_wd, title="Visualization Parameters", styles=styles, width=1000) + self.add_to_template() - visualization_wd = pn.Row(visualize_zscore_or_dff, pn.Spacer(width=60), visualizeAverageResults) + def add_to_template(self): + self.template.main.append(self.individual) + self.template.main.append(self.group) + self.template.main.append(self.visualize) def getInputParameters(self): - abspath = getAbsPath() + abspath = getAbsPath(self.files_1, self.files_2) inputParameters = { "abspath": abspath[0], - "folderNames": files_1.value, - "modality": modality_selector.value, - "numberOfCores": numberOfCores.value, - "combine_data": combine_data.value, - "isosbestic_control": isosbestic_control.value, - "timeForLightsTurnOn": timeForLightsTurnOn.value, - "filter_window": moving_avg_filter.value, - "removeArtifacts": removeArtifacts.value, - "artifactsRemovalMethod": artifactsRemovalMethod.value, - "noChannels": no_channels_np.value, - "zscore_method": z_score_computation.value, - "baselineWindowStart": baseline_wd_strt.value, - "baselineWindowEnd": baseline_wd_end.value, - "nSecPrev": nSecPrev.value, - "nSecPost": nSecPost.value, - "computeCorr": computeCorr.value, - "timeInterval": timeInterval.value, - "bin_psth_trials": bin_psth_trials.value, - "use_time_or_trials": use_time_or_trials.value, - "baselineCorrectionStart": baselineCorrectionStart.value, - "baselineCorrectionEnd": baselineCorrectionEnd.value, - "peak_startPoint": list(df_widget.value["Peak Start time"]), # startPoint.value, - "peak_endPoint": list(df_widget.value["Peak End time"]), # endPoint.value, - "selectForComputePsth": computePsth.value, - "selectForTransientsComputation": transients.value, - "moving_window": moving_wd.value, - "highAmpFilt": highAmpFilt.value, - "transientsThresh": transientsThresh.value, - "plot_zScore_dff": plot_zScore_dff.value, - "visualize_zscore_or_dff": visualize_zscore_or_dff.value, - "folderNamesForAvg": files_2.value, - "averageForGroup": averageForGroup.value, - "visualizeAverageResults": visualizeAverageResults.value, + "folderNames": self.files_1.value, + "modality": self.modality_selector.value, + "numberOfCores": self.numberOfCores.value, + "combine_data": self.combine_data.value, + "isosbestic_control": self.isosbestic_control.value, + "timeForLightsTurnOn": self.timeForLightsTurnOn.value, + "filter_window": self.moving_avg_filter.value, + "removeArtifacts": self.removeArtifacts.value, + "artifactsRemovalMethod": self.artifactsRemovalMethod.value, + "noChannels": self.no_channels_np.value, + "zscore_method": self.z_score_computation.value, + "baselineWindowStart": self.baseline_wd_strt.value, + "baselineWindowEnd": self.baseline_wd_end.value, + "nSecPrev": self.nSecPrev.value, + "nSecPost": self.nSecPost.value, + "computeCorr": self.computeCorr.value, + "timeInterval": self.timeInterval.value, + "bin_psth_trials": self.bin_psth_trials.value, + "use_time_or_trials": self.use_time_or_trials.value, + "baselineCorrectionStart": self.baselineCorrectionStart.value, + "baselineCorrectionEnd": self.baselineCorrectionEnd.value, + "peak_startPoint": list(self.df_widget.value["Peak Start time"]), # startPoint.value, + "peak_endPoint": list(self.df_widget.value["Peak End time"]), # endPoint.value, + "selectForComputePsth": self.computePsth.value, + "selectForTransientsComputation": self.transients.value, + "moving_window": self.moving_wd.value, + "highAmpFilt": self.highAmpFilt.value, + "transientsThresh": self.transientsThresh.value, + "plot_zScore_dff": self.plot_zScore_dff.value, + "visualize_zscore_or_dff": self.visualize_zscore_or_dff.value, + "folderNamesForAvg": self.files_2.value, + "averageForGroup": self.averageForGroup.value, + "visualizeAverageResults": self.visualizeAverageResults.value, } return inputParameters diff --git a/src/guppy/frontend/sidebar.py b/src/guppy/frontend/sidebar.py new file mode 100644 index 0000000..6095a56 --- /dev/null +++ b/src/guppy/frontend/sidebar.py @@ -0,0 +1,77 @@ +import logging + +import panel as pn + +logger = logging.getLogger(__name__) + + +class Sidebar: + def __init__(self, template): + self.template = template + self.setup_markdown() + self.setup_buttons() + self.setup_progress_bars() + + def setup_markdown(self): + self.mark_down_ip = pn.pane.Markdown("""**Step 1 : Save Input Parameters**""", width=300) + self.mark_down_ip_note = pn.pane.Markdown( + """***Note : ***
+ - Save Input Parameters will save input parameters used for the analysis + in all the folders you selected for the analysis (useful for future + reference). All analysis steps will run without saving input parameters. + """, + width=300, + ) + self.mark_down_storenames = pn.pane.Markdown( + """**Step 2 : Open Storenames GUI
and save storenames**""", width=300 + ) + self.mark_down_read = pn.pane.Markdown("""**Step 3 : Read Raw Data**""", width=300) + self.mark_down_extract = pn.pane.Markdown( + """**Step 4 : Extract timestamps
and its correction**""", width=300 + ) + self.mark_down_psth = pn.pane.Markdown("""**Step 5 : PSTH Computation**""", width=300) + self.mark_down_visualization = pn.pane.Markdown("""**Step 6 : Visualization**""", width=300) + + def setup_buttons(self): + self.open_storesList = pn.widgets.Button( + name="Open Storenames GUI", button_type="primary", width=300, align="end" + ) + self.read_rawData = pn.widgets.Button(name="Read Raw Data", button_type="primary", width=300, align="end") + self.extract_ts = pn.widgets.Button( + name="Extract timestamps and it's correction", button_type="primary", width=300, align="end" + ) + self.psth_computation = pn.widgets.Button( + name="PSTH Computation", button_type="primary", width=300, align="end" + ) + self.open_visualization = pn.widgets.Button( + name="Open Visualization GUI", button_type="primary", width=300, align="end" + ) + self.save_button = pn.widgets.Button(name="Save to file...", button_type="primary", width=300, align="end") + + def attach_callbacks(self, button_name_to_onclick_fn: dict): + for button_name, onclick_fn in button_name_to_onclick_fn.items(): + button = getattr(self, button_name) + button.on_click(onclick_fn) + + def setup_progress_bars(self): + self.read_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) + self.extract_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) + self.psth_progress = pn.indicators.Progress(name="Progress", value=100, max=100, width=300) + + def add_to_template(self): + self.template.sidebar.append(self.mark_down_ip) + self.template.sidebar.append(self.mark_down_ip_note) + self.template.sidebar.append(self.save_button) + self.template.sidebar.append(self.mark_down_storenames) + self.template.sidebar.append(self.open_storesList) + self.template.sidebar.append(self.mark_down_read) + self.template.sidebar.append(self.read_rawData) + self.template.sidebar.append(self.read_progress) + self.template.sidebar.append(self.mark_down_extract) + self.template.sidebar.append(self.extract_ts) + self.template.sidebar.append(self.extract_progress) + self.template.sidebar.append(self.mark_down_psth) + self.template.sidebar.append(self.psth_computation) + self.template.sidebar.append(self.psth_progress) + self.template.sidebar.append(self.mark_down_visualization) + self.template.sidebar.append(self.open_visualization) diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 07c400a..5498e08 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -5,96 +5,75 @@ import sys from threading import Thread -import numpy as np import panel as pn +from .frontend.input_parameters import InputParametersGUI from .frontend.path_selection import get_folder_path from .frontend.progress import readPBIncrementValues +from .frontend.sidebar import Sidebar from .saveStoresList import execute from .visualizePlot import visualizeResults logger = logging.getLogger(__name__) +def readRawData(input_parameters_gui): + inputParameters = input_parameters_gui.getInputParameters() + subprocess.call([sys.executable, "-m", "guppy.readTevTsq", json.dumps(inputParameters)]) + + +def extractTs(input_parameters_gui): + inputParameters = input_parameters_gui.getInputParameters() + subprocess.call([sys.executable, "-m", "guppy.preprocess", json.dumps(inputParameters)]) + + +def psthComputation(input_parameters_gui, current_dir): + inputParameters = input_parameters_gui.getInputParameters() + inputParameters["curr_dir"] = current_dir + subprocess.call([sys.executable, "-m", "guppy.computePsth", json.dumps(inputParameters)]) + + def savingInputParameters(): pn.extension() global folder_path folder_path = get_folder_path() current_dir = os.getcwd() - def readRawData(): - inputParameters = getInputParameters() - subprocess.call([sys.executable, "-m", "guppy.readTevTsq", json.dumps(inputParameters)]) - - def extractTs(): - inputParameters = getInputParameters() - subprocess.call([sys.executable, "-m", "guppy.preprocess", json.dumps(inputParameters)]) - - def psthComputation(): - inputParameters = getInputParameters() - inputParameters["curr_dir"] = current_dir - subprocess.call([sys.executable, "-m", "guppy.computePsth", json.dumps(inputParameters)]) - - def checkSameLocation(arr, abspath): - # abspath = [] - for i in range(len(arr)): - abspath.append(os.path.dirname(arr[i])) - abspath = np.asarray(abspath) - abspath = np.unique(abspath) - if len(abspath) > 1: - logger.error("All the folders selected should be at the same location") - raise Exception("All the folders selected should be at the same location") - - return abspath - - def getAbsPath(): - arr_1, arr_2 = files_1.value, files_2.value - if len(arr_1) == 0 and len(arr_2) == 0: - logger.error("No folder is selected for analysis") - raise Exception("No folder is selected for analysis") - - abspath = [] - if len(arr_1) > 0: - abspath = checkSameLocation(arr_1, abspath) - else: - abspath = checkSameLocation(arr_2, abspath) - - abspath = np.unique(abspath) - if len(abspath) > 1: - logger.error("All the folders selected should be at the same location") - raise Exception("All the folders selected should be at the same location") - return abspath + template = pn.template.BootstrapTemplate(title="Input Parameters GUI") + input_parameters_gui = InputParametersGUI(folder_path=folder_path, template=template) + sidebar = Sidebar(template=template) + # ------------------------------------------------------------------------------------------------------------------ + # onclick closure functions for sidebar buttons def onclickProcess(event=None): - + inputParameters = input_parameters_gui.getInputParameters() logger.debug("Saving Input Parameters file.") - abspath = getAbsPath() analysisParameters = { - "combine_data": combine_data.value, - "isosbestic_control": isosbestic_control.value, - "timeForLightsTurnOn": timeForLightsTurnOn.value, - "filter_window": moving_avg_filter.value, - "removeArtifacts": removeArtifacts.value, - "noChannels": no_channels_np.value, - "zscore_method": z_score_computation.value, - "baselineWindowStart": baseline_wd_strt.value, - "baselineWindowEnd": baseline_wd_end.value, - "nSecPrev": nSecPrev.value, - "nSecPost": nSecPost.value, - "timeInterval": timeInterval.value, - "bin_psth_trials": bin_psth_trials.value, - "use_time_or_trials": use_time_or_trials.value, - "baselineCorrectionStart": baselineCorrectionStart.value, - "baselineCorrectionEnd": baselineCorrectionEnd.value, - "peak_startPoint": list(df_widget.value["Peak Start time"]), # startPoint.value, - "peak_endPoint": list(df_widget.value["Peak End time"]), # endPoint.value, - "selectForComputePsth": computePsth.value, - "selectForTransientsComputation": transients.value, - "moving_window": moving_wd.value, - "highAmpFilt": highAmpFilt.value, - "transientsThresh": transientsThresh.value, + "combine_data": inputParameters["combine_data"], + "isosbestic_control": inputParameters["isosbestic_control"], + "timeForLightsTurnOn": inputParameters["timeForLightsTurnOn"], + "filter_window": inputParameters["filter_window"], + "removeArtifacts": inputParameters["removeArtifacts"], + "noChannels": inputParameters["noChannels"], + "zscore_method": inputParameters["zscore_method"], + "baselineWindowStart": inputParameters["baselineWindowStart"], + "baselineWindowEnd": inputParameters["baselineWindowEnd"], + "nSecPrev": inputParameters["nSecPrev"], + "nSecPost": inputParameters["nSecPost"], + "timeInterval": inputParameters["timeInterval"], + "bin_psth_trials": inputParameters["bin_psth_trials"], + "use_time_or_trials": inputParameters["use_time_or_trials"], + "baselineCorrectionStart": inputParameters["baselineCorrectionStart"], + "baselineCorrectionEnd": inputParameters["baselineCorrectionEnd"], + "peak_startPoint": inputParameters["peak_startPoint"], + "peak_endPoint": inputParameters["peak_endPoint"], + "selectForComputePsth": inputParameters["selectForComputePsth"], + "selectForTransientsComputation": inputParameters["selectForTransientsComputation"], + "moving_window": inputParameters["moving_window"], + "highAmpFilt": inputParameters["highAmpFilt"], + "transientsThresh": inputParameters["transientsThresh"], } - for folder in files_1.value: + for folder in inputParameters["folderNames"]: with open(os.path.join(folder, "GuPPyParamtersUsed.json"), "w") as f: json.dump(analysisParameters, f, indent=4) logger.info(f"Input Parameters file saved at {folder}") @@ -105,105 +84,51 @@ def onclickProcess(event=None): logger.info("Input Parameters File Saved.") def onclickStoresList(event=None): - inputParameters = getInputParameters() + inputParameters = input_parameters_gui.getInputParameters() execute(inputParameters) def onclickVisualization(event=None): - inputParameters = getInputParameters() + inputParameters = input_parameters_gui.getInputParameters() visualizeResults(inputParameters) def onclickreaddata(event=None): - thread = Thread(target=readRawData) + thread = Thread(target=readRawData, args=(input_parameters_gui,)) thread.start() - readPBIncrementValues(read_progress) + readPBIncrementValues(sidebar.read_progress) thread.join() def onclickextractts(event=None): - thread = Thread(target=extractTs) + thread = Thread(target=extractTs, args=(input_parameters_gui,)) thread.start() - readPBIncrementValues(extract_progress) + readPBIncrementValues(sidebar.extract_progress) thread.join() def onclickpsth(event=None): - thread = Thread(target=psthComputation) + thread = Thread(target=psthComputation, args=(input_parameters_gui, current_dir)) thread.start() - readPBIncrementValues(psth_progress) + readPBIncrementValues(sidebar.psth_progress) thread.join() - mark_down_ip = pn.pane.Markdown("""**Step 1 : Save Input Parameters**""", width=300) - mark_down_ip_note = pn.pane.Markdown( - """***Note : ***
- - Save Input Parameters will save input parameters used for the analysis - in all the folders you selected for the analysis (useful for future - reference). All analysis steps will run without saving input parameters. - """, - width=300, - ) - save_button = pn.widgets.Button(name="Save to file...", button_type="primary", width=300, align="end") - mark_down_storenames = pn.pane.Markdown("""**Step 2 : Open Storenames GUI
and save storenames**""", width=300) - open_storesList = pn.widgets.Button(name="Open Storenames GUI", button_type="primary", width=300, align="end") - mark_down_read = pn.pane.Markdown("""**Step 3 : Read Raw Data**""", width=300) - read_rawData = pn.widgets.Button(name="Read Raw Data", button_type="primary", width=300, align="end") - mark_down_extract = pn.pane.Markdown("""**Step 4 : Extract timestamps
and its correction**""", width=300) - extract_ts = pn.widgets.Button( - name="Extract timestamps and it's correction", button_type="primary", width=300, align="end" - ) - mark_down_psth = pn.pane.Markdown("""**Step 5 : PSTH Computation**""", width=300) - psth_computation = pn.widgets.Button(name="PSTH Computation", button_type="primary", width=300, align="end") - mark_down_visualization = pn.pane.Markdown("""**Step 6 : Visualization**""", width=300) - open_visualization = pn.widgets.Button(name="Open Visualization GUI", button_type="primary", width=300, align="end") - open_terminal = pn.widgets.Button(name="Open Terminal", button_type="primary", width=300, align="end") - - save_button.on_click(onclickProcess) - open_storesList.on_click(onclickStoresList) - read_rawData.on_click(onclickreaddata) - extract_ts.on_click(onclickextractts) - psth_computation.on_click(onclickpsth) - open_visualization.on_click(onclickVisualization) - - template.sidebar.append(mark_down_ip) - template.sidebar.append(mark_down_ip_note) - template.sidebar.append(save_button) - # template.sidebar.append(path) - template.sidebar.append(mark_down_storenames) - template.sidebar.append(open_storesList) - template.sidebar.append(mark_down_read) - template.sidebar.append(read_rawData) - template.sidebar.append(read_progress) - template.sidebar.append(mark_down_extract) - template.sidebar.append(extract_ts) - template.sidebar.append(extract_progress) - template.sidebar.append(mark_down_psth) - template.sidebar.append(psth_computation) - template.sidebar.append(psth_progress) - template.sidebar.append(mark_down_visualization) - template.sidebar.append(open_visualization) - # template.sidebar.append(open_terminal) - - psth_baseline_param = pn.Column(zscore_param_wd, psth_param_wd, baseline_param_wd, peak_param_wd) - - widget = pn.Column( - mark_down_1, files_1, explain_modality, modality_selector, pn.Row(individual_analysis_wd_2, psth_baseline_param) - ) - - # file_selector = pn.WidgetBox(files_1) - styles = dict(background="WhiteSmoke") - individual = pn.Card(widget, title="Individual Analysis", styles=styles, width=1000) - group = pn.Card(group_analysis_wd_1, title="Group Analysis", styles=styles, width=1000) - visualize = pn.Card(visualization_wd, title="Visualization Parameters", styles=styles, width=1000) - - # template.main.append(file_selector) - template.main.append(individual) - template.main.append(group) - template.main.append(visualize) + # ------------------------------------------------------------------------------------------------------------------ + + button_name_to_onclick_fn = { + "save_button": onclickProcess, + "open_storesList": onclickStoresList, + "read_rawData": onclickreaddata, + "extract_ts": onclickextractts, + "psth_computation": onclickpsth, + "open_visualization": onclickVisualization, + } + sidebar.attach_callbacks(button_name_to_onclick_fn=button_name_to_onclick_fn) + sidebar.add_to_template() # Expose minimal hooks and widgets to enable programmatic testing template._hooks = { "onclickProcess": onclickProcess, - "getInputParameters": getInputParameters, + "getInputParameters": input_parameters_gui.getInputParameters, } template._widgets = { - "files_1": files_1, + "files_1": input_parameters_gui.files_1, } return template From 0b204a25cae210e049157ba279be4ea5be957175 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 29 Jan 2026 16:04:09 -0800 Subject: [PATCH 07/53] Organized input_parameters GUI class into methods. --- src/guppy/frontend/input_parameters.py | 184 +++++++++++++------------ 1 file changed, 97 insertions(+), 87 deletions(-) diff --git a/src/guppy/frontend/input_parameters.py b/src/guppy/frontend/input_parameters.py index b6d0a96..d2c49ee 100644 --- a/src/guppy/frontend/input_parameters.py +++ b/src/guppy/frontend/input_parameters.py @@ -43,11 +43,20 @@ def getAbsPath(files_1, files_2): class InputParametersGUI: def __init__(self, *, template, folder_path): self.template = template + self.folder_path = folder_path + self.styles = dict(background="WhiteSmoke") + self.setup_individual_parameters() + self.setup_group_parameters() + self.setup_visualization_parameters() + self.add_to_template() + + def setup_individual_parameters(self): + # Individual analysis components self.mark_down_1 = pn.pane.Markdown( """**Select folders for the analysis from the file selector below**""", width=600 ) - self.files_1 = pn.widgets.FileSelector(folder_path, name="folderNames", width=950) + self.files_1 = pn.widgets.FileSelector(self.folder_path, name="folderNames", width=950) self.explain_modality = pn.pane.Markdown( """ @@ -66,37 +75,37 @@ def __init__(self, *, template, folder_path): self.explain_time_artifacts = pn.pane.Markdown( """ - - ***Number of cores :*** Number of cores used for analysis. Try to - keep it less than the number of cores in your machine. - - ***Combine Data? :*** Make this parameter ``` True ``` if user wants to combine - the data, especially when there is two different - data files for the same recording session.
- - ***Isosbestic Control Channel? :*** Make this parameter ``` False ``` if user - does not want to use isosbestic control channel in the analysis.
- - ***Eliminate first few seconds :*** It is the parameter to cut out first x seconds - from the data. Default is 1 seconds.
- - ***Window for Moving Average filter :*** The filtering of signals - is done using moving average filter. Default window used for moving - average filter is 100 datapoints. Change it based on the requirement.
- - ***Moving Window (transients detection) :*** Transients in the z-score - and/or \u0394F/F are detected using this moving window. - Default is 15 seconds. Change it based on the requirement.
- - ***High Amplitude filtering threshold (HAFT) (transients detection) :*** High amplitude - events greater than x times the MAD above the median are filtered out. Here, x is - high amplitude filtering threshold. Default is 2. - - ***Transients detection threshold (TD Thresh):*** Peaks with local maxima greater than x times - the MAD above the median of the trace (after filtering high amplitude events) are detected - as transients. Here, x is transients detection threshold. Default is 3. - - ***Number of channels (Neurophotometrics only) :*** Number of - channels used while recording, when data files has no column names mentioning "Flags" - or "LedState". - - ***removeArtifacts? :*** Make this parameter ``` True``` if there are - artifacts and user wants to remove the artifacts. - - ***removeArtifacts method :*** Selecting ```concatenate``` will remove bad - chunks and concatenate the selected good chunks together. - Selecting ```replace with NaN``` will replace bad chunks with NaN - values. - """, + - ***Number of cores :*** Number of cores used for analysis. Try to + keep it less than the number of cores in your machine. + - ***Combine Data? :*** Make this parameter ``` True ``` if user wants to combine + the data, especially when there is two different + data files for the same recording session.
+ - ***Isosbestic Control Channel? :*** Make this parameter ``` False ``` if user + does not want to use isosbestic control channel in the analysis.
+ - ***Eliminate first few seconds :*** It is the parameter to cut out first x seconds + from the data. Default is 1 seconds.
+ - ***Window for Moving Average filter :*** The filtering of signals + is done using moving average filter. Default window used for moving + average filter is 100 datapoints. Change it based on the requirement.
+ - ***Moving Window (transients detection) :*** Transients in the z-score + and/or \u0394F/F are detected using this moving window. + Default is 15 seconds. Change it based on the requirement.
+ - ***High Amplitude filtering threshold (HAFT) (transients detection) :*** High amplitude + events greater than x times the MAD above the median are filtered out. Here, x is + high amplitude filtering threshold. Default is 2. + - ***Transients detection threshold (TD Thresh):*** Peaks with local maxima greater than x times + the MAD above the median of the trace (after filtering high amplitude events) are detected + as transients. Here, x is transients detection threshold. Default is 3. + - ***Number of channels (Neurophotometrics only) :*** Number of + channels used while recording, when data files has no column names mentioning "Flags" + or "LedState". + - ***removeArtifacts? :*** Make this parameter ``` True``` if there are + artifacts and user wants to remove the artifacts. + - ***removeArtifacts method :*** Selecting ```concatenate``` will remove bad + chunks and concatenate the selected good chunks together. + Selecting ```replace with NaN``` will replace bad chunks with NaN + values. + """, width=350, ) @@ -169,25 +178,25 @@ def __init__(self, *, template, folder_path): self.explain_z_score = pn.pane.Markdown( """ - ***Note :***
- - Details about z-score computation methods are explained in Github wiki.
- - The details will make user understand what computation method to use for - their data.
- - Baseline Window Parameters should be kept 0 unless you are using baseline
- z-score computation method. The parameters are in seconds. - """, + ***Note :***
+ - Details about z-score computation methods are explained in Github wiki.
+ - The details will make user understand what computation method to use for + their data.
+ - Baseline Window Parameters should be kept 0 unless you are using baseline
+ z-score computation method. The parameters are in seconds. + """, width=580, ) self.explain_nsec = pn.pane.Markdown( """ - - ***Time Interval :*** To omit bursts of event timestamps, user defined time interval - is set so that if the time difference between two timestamps is less than this defined time - interval, it will be deleted for the calculation of PSTH. - - ***Compute Cross-correlation :*** Make this parameter ```True```, when user wants - to compute cross-correlation between PSTHs of two different signals or signals - recorded from different brain regions. - """, + - ***Time Interval :*** To omit bursts of event timestamps, user defined time interval + is set so that if the time difference between two timestamps is less than this defined time + interval, it will be deleted for the calculation of PSTH. + - ***Compute Cross-correlation :*** Make this parameter ```True```, when user wants + to compute cross-correlation between PSTHs of two different signals or signals + recorded from different brain regions. + """, width=580, ) @@ -211,14 +220,14 @@ def __init__(self, *, template, folder_path): self.explain_baseline = pn.pane.Markdown( """ - ***Note :***
- - If user does not want to do baseline correction, - put both parameters 0.
- - If the first event timestamp is less than the length of baseline - window, it will be rejected in the PSTH computation step.
- - Baseline parameters must be within the PSTH parameters - set in the PSTH parameters section. - """, + ***Note :***
+ - If user does not want to do baseline correction, + put both parameters 0.
+ - If the first event timestamp is less than the length of baseline + window, it will be rejected in the PSTH computation step.
+ - Baseline parameters must be within the PSTH parameters + set in the PSTH parameters section. + """, width=580, ) @@ -254,12 +263,12 @@ def __init__(self, *, template, folder_path): ) self.peak_explain = pn.pane.Markdown( """ - ***Note :***
- - Peak and area are computed between the window set below.
- - Peak and AUC parameters must be within the PSTH parameters set in the PSTH parameters section.
- - Please make sure when user changes the parameters in the table below, click on any other cell after - changing a value in a particular cell. - """, + ***Note :***
+ - Peak and area are computed between the window set below.
+ - Peak and AUC parameters must be within the PSTH parameters set in the PSTH parameters section.
+ - Please make sure when user changes the parameters in the table below, click on any other cell after + changing a value in a particular cell. + """, width=580, ) @@ -274,24 +283,6 @@ def __init__(self, *, template, folder_path): self.peak_param_wd = pn.WidgetBox("### Peak and AUC Parameters", self.peak_explain, self.df_widget, width=600) - self.mark_down_2 = pn.pane.Markdown( - """**Select folders for the average analysis from the file selector below**""", width=600 - ) - - self.files_2 = pn.widgets.FileSelector(folder_path, name="folderNamesForAvg", width=950) - - self.averageForGroup = pn.widgets.Select( - name="Average Group? (bool)", value=False, options=[True, False], width=435 - ) - - self.visualizeAverageResults = pn.widgets.Select( - name="Visualize Average Results? (bool)", value=False, options=[True, False], width=435 - ) - - self.visualize_zscore_or_dff = pn.widgets.Select( - name="z-score or \u0394F/F? (for visualization)", options=["z_score", "dff"], width=435 - ) - self.individual_analysis_wd_2 = pn.Column( self.explain_time_artifacts, pn.Row(self.numberOfCores, self.combine_data), @@ -307,10 +298,6 @@ def __init__(self, *, template, folder_path): pn.Row(self.removeArtifacts, self.artifactsRemovalMethod), ) - self.group_analysis_wd_1 = pn.Column(self.mark_down_2, self.files_2, self.averageForGroup, width=800) - - self.visualization_wd = pn.Row(self.visualize_zscore_or_dff, pn.Spacer(width=60), self.visualizeAverageResults) - self.psth_baseline_param = pn.Column( self.zscore_param_wd, self.psth_param_wd, self.baseline_param_wd, self.peak_param_wd ) @@ -322,12 +309,35 @@ def __init__(self, *, template, folder_path): self.modality_selector, pn.Row(self.individual_analysis_wd_2, self.psth_baseline_param), ) + self.individual = pn.Card(self.widget, title="Individual Analysis", styles=self.styles, width=1000) - styles = dict(background="WhiteSmoke") - self.individual = pn.Card(self.widget, title="Individual Analysis", styles=styles, width=1000) - self.group = pn.Card(self.group_analysis_wd_1, title="Group Analysis", styles=styles, width=1000) - self.visualize = pn.Card(self.visualization_wd, title="Visualization Parameters", styles=styles, width=1000) - self.add_to_template() + def setup_group_parameters(self): + self.mark_down_2 = pn.pane.Markdown( + """**Select folders for the average analysis from the file selector below**""", width=600 + ) + + self.files_2 = pn.widgets.FileSelector(self.folder_path, name="folderNamesForAvg", width=950) + + self.averageForGroup = pn.widgets.Select( + name="Average Group? (bool)", value=False, options=[True, False], width=435 + ) + + self.group_analysis_wd_1 = pn.Column(self.mark_down_2, self.files_2, self.averageForGroup, width=800) + self.group = pn.Card(self.group_analysis_wd_1, title="Group Analysis", styles=self.styles, width=1000) + + def setup_visualization_parameters(self): + self.visualizeAverageResults = pn.widgets.Select( + name="Visualize Average Results? (bool)", value=False, options=[True, False], width=435 + ) + + self.visualize_zscore_or_dff = pn.widgets.Select( + name="z-score or \u0394F/F? (for visualization)", options=["z_score", "dff"], width=435 + ) + + self.visualization_wd = pn.Row(self.visualize_zscore_or_dff, pn.Spacer(width=60), self.visualizeAverageResults) + self.visualize = pn.Card( + self.visualization_wd, title="Visualization Parameters", styles=self.styles, width=1000 + ) def add_to_template(self): self.template.main.append(self.individual) From d59626e8ebbb944b716cd5a575ad8757146a2ab3 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 29 Jan 2026 16:12:27 -0800 Subject: [PATCH 08/53] Renamed InputParameters GUI to ParameterForm. --- src/guppy/frontend/input_parameters.py | 2 +- src/guppy/savingInputParameters.py | 32 +++++++++++++------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/guppy/frontend/input_parameters.py b/src/guppy/frontend/input_parameters.py index d2c49ee..1ae8313 100644 --- a/src/guppy/frontend/input_parameters.py +++ b/src/guppy/frontend/input_parameters.py @@ -40,7 +40,7 @@ def getAbsPath(files_1, files_2): return abspath -class InputParametersGUI: +class ParameterForm: def __init__(self, *, template, folder_path): self.template = template self.folder_path = folder_path diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 5498e08..11a3eed 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -7,7 +7,7 @@ import panel as pn -from .frontend.input_parameters import InputParametersGUI +from .frontend.input_parameters import ParameterForm from .frontend.path_selection import get_folder_path from .frontend.progress import readPBIncrementValues from .frontend.sidebar import Sidebar @@ -17,18 +17,18 @@ logger = logging.getLogger(__name__) -def readRawData(input_parameters_gui): - inputParameters = input_parameters_gui.getInputParameters() +def readRawData(parameter_form): + inputParameters = parameter_form.getInputParameters() subprocess.call([sys.executable, "-m", "guppy.readTevTsq", json.dumps(inputParameters)]) -def extractTs(input_parameters_gui): - inputParameters = input_parameters_gui.getInputParameters() +def extractTs(parameter_form): + inputParameters = parameter_form.getInputParameters() subprocess.call([sys.executable, "-m", "guppy.preprocess", json.dumps(inputParameters)]) -def psthComputation(input_parameters_gui, current_dir): - inputParameters = input_parameters_gui.getInputParameters() +def psthComputation(parameter_form, current_dir): + inputParameters = parameter_form.getInputParameters() inputParameters["curr_dir"] = current_dir subprocess.call([sys.executable, "-m", "guppy.computePsth", json.dumps(inputParameters)]) @@ -40,13 +40,13 @@ def savingInputParameters(): current_dir = os.getcwd() template = pn.template.BootstrapTemplate(title="Input Parameters GUI") - input_parameters_gui = InputParametersGUI(folder_path=folder_path, template=template) + parameter_form = ParameterForm(folder_path=folder_path, template=template) sidebar = Sidebar(template=template) # ------------------------------------------------------------------------------------------------------------------ # onclick closure functions for sidebar buttons def onclickProcess(event=None): - inputParameters = input_parameters_gui.getInputParameters() + inputParameters = parameter_form.getInputParameters() logger.debug("Saving Input Parameters file.") analysisParameters = { "combine_data": inputParameters["combine_data"], @@ -84,27 +84,27 @@ def onclickProcess(event=None): logger.info("Input Parameters File Saved.") def onclickStoresList(event=None): - inputParameters = input_parameters_gui.getInputParameters() + inputParameters = parameter_form.getInputParameters() execute(inputParameters) def onclickVisualization(event=None): - inputParameters = input_parameters_gui.getInputParameters() + inputParameters = parameter_form.getInputParameters() visualizeResults(inputParameters) def onclickreaddata(event=None): - thread = Thread(target=readRawData, args=(input_parameters_gui,)) + thread = Thread(target=readRawData, args=(parameter_form,)) thread.start() readPBIncrementValues(sidebar.read_progress) thread.join() def onclickextractts(event=None): - thread = Thread(target=extractTs, args=(input_parameters_gui,)) + thread = Thread(target=extractTs, args=(parameter_form,)) thread.start() readPBIncrementValues(sidebar.extract_progress) thread.join() def onclickpsth(event=None): - thread = Thread(target=psthComputation, args=(input_parameters_gui, current_dir)) + thread = Thread(target=psthComputation, args=(parameter_form, current_dir)) thread.start() readPBIncrementValues(sidebar.psth_progress) thread.join() @@ -125,10 +125,10 @@ def onclickpsth(event=None): # Expose minimal hooks and widgets to enable programmatic testing template._hooks = { "onclickProcess": onclickProcess, - "getInputParameters": input_parameters_gui.getInputParameters, + "getInputParameters": parameter_form.getInputParameters, } template._widgets = { - "files_1": input_parameters_gui.files_1, + "files_1": parameter_form.files_1, } return template From c048d27961d7e02ca6febc1a98f10b85cb441170 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 29 Jan 2026 16:44:46 -0800 Subject: [PATCH 09/53] Renamed SavingInputParameters --- src/guppy/main.py | 4 ++-- .../home.py} | 14 +++++++------- src/guppy/testing/api.py | 12 ++++++------ 3 files changed, 15 insertions(+), 15 deletions(-) rename src/guppy/{savingInputParameters.py => orchestration/home.py} (94%) diff --git a/src/guppy/main.py b/src/guppy/main.py index 478e991..cb5ebc8 100644 --- a/src/guppy/main.py +++ b/src/guppy/main.py @@ -11,12 +11,12 @@ import panel as pn -from .savingInputParameters import savingInputParameters +from .orchestration.home import build_homepage def serve_app(): """Serve the GuPPy application using Panel.""" - template = savingInputParameters() + template = build_homepage() pn.serve(template, show=True) diff --git a/src/guppy/savingInputParameters.py b/src/guppy/orchestration/home.py similarity index 94% rename from src/guppy/savingInputParameters.py rename to src/guppy/orchestration/home.py index 11a3eed..1d7dc63 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/orchestration/home.py @@ -7,12 +7,12 @@ import panel as pn -from .frontend.input_parameters import ParameterForm -from .frontend.path_selection import get_folder_path -from .frontend.progress import readPBIncrementValues -from .frontend.sidebar import Sidebar -from .saveStoresList import execute -from .visualizePlot import visualizeResults +from ..frontend.input_parameters import ParameterForm +from ..frontend.path_selection import get_folder_path +from ..frontend.progress import readPBIncrementValues +from ..frontend.sidebar import Sidebar +from ..saveStoresList import execute +from ..visualizePlot import visualizeResults logger = logging.getLogger(__name__) @@ -33,7 +33,7 @@ def psthComputation(parameter_form, current_dir): subprocess.call([sys.executable, "-m", "guppy.computePsth", json.dumps(inputParameters)]) -def savingInputParameters(): +def build_homepage(): pn.extension() global folder_path folder_path = get_folder_path() diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 98939cf..1cf4349 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -15,10 +15,10 @@ from guppy.computePsth import psthForEachStorename from guppy.findTransientsFreqAndAmp import executeFindFreqAndAmp +from guppy.orchestration.home import build_homepage from guppy.preprocess import extractTsAndSignal from guppy.readTevTsq import readRawData from guppy.saveStoresList import execute -from guppy.savingInputParameters import savingInputParameters def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: @@ -50,7 +50,7 @@ def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: os.environ["GUPPY_BASE_DIR"] = base_dir # Build the template headlessly - template = savingInputParameters() + template = build_homepage() # Sanity checks: ensure hooks/widgets exposed if not hasattr(template, "_hooks") or "onclickProcess" not in template._hooks: @@ -144,7 +144,7 @@ def step2( # Headless build: set base_dir and construct the template os.environ["GUPPY_BASE_DIR"] = base_dir - template = savingInputParameters() + template = build_homepage() # Ensure hooks/widgets exposed if not hasattr(template, "_hooks") or "getInputParameters" not in template._hooks: @@ -236,7 +236,7 @@ def step3( # Headless build: set base_dir and construct the template os.environ["GUPPY_BASE_DIR"] = base_dir - template = savingInputParameters() + template = build_homepage() # Ensure hooks/widgets exposed if not hasattr(template, "_hooks") or "getInputParameters" not in template._hooks: @@ -328,7 +328,7 @@ def step4( # Headless build: set base_dir and construct the template os.environ["GUPPY_BASE_DIR"] = base_dir - template = savingInputParameters() + template = build_homepage() # Ensure hooks/widgets exposed if not hasattr(template, "_hooks") or "getInputParameters" not in template._hooks: @@ -420,7 +420,7 @@ def step5( # Headless build: set base_dir and construct the template os.environ["GUPPY_BASE_DIR"] = base_dir - template = savingInputParameters() + template = build_homepage() # Ensure hooks/widgets exposed if not hasattr(template, "_hooks") or "getInputParameters" not in template._hooks: From 711b4121d5056d095769eb61c2b6974514d775b2 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 30 Jan 2026 14:46:18 -0800 Subject: [PATCH 10/53] Refactored step one code to a dedicated module. --- src/guppy/orchestration/home.py | 37 ++----------------- src/guppy/orchestration/save_parameters.py | 41 ++++++++++++++++++++++ 2 files changed, 43 insertions(+), 35 deletions(-) create mode 100644 src/guppy/orchestration/save_parameters.py diff --git a/src/guppy/orchestration/home.py b/src/guppy/orchestration/home.py index 1d7dc63..ace9782 100644 --- a/src/guppy/orchestration/home.py +++ b/src/guppy/orchestration/home.py @@ -7,6 +7,7 @@ import panel as pn +from .save_parameters import save_parameters from ..frontend.input_parameters import ParameterForm from ..frontend.path_selection import get_folder_path from ..frontend.progress import readPBIncrementValues @@ -47,41 +48,7 @@ def build_homepage(): # onclick closure functions for sidebar buttons def onclickProcess(event=None): inputParameters = parameter_form.getInputParameters() - logger.debug("Saving Input Parameters file.") - analysisParameters = { - "combine_data": inputParameters["combine_data"], - "isosbestic_control": inputParameters["isosbestic_control"], - "timeForLightsTurnOn": inputParameters["timeForLightsTurnOn"], - "filter_window": inputParameters["filter_window"], - "removeArtifacts": inputParameters["removeArtifacts"], - "noChannels": inputParameters["noChannels"], - "zscore_method": inputParameters["zscore_method"], - "baselineWindowStart": inputParameters["baselineWindowStart"], - "baselineWindowEnd": inputParameters["baselineWindowEnd"], - "nSecPrev": inputParameters["nSecPrev"], - "nSecPost": inputParameters["nSecPost"], - "timeInterval": inputParameters["timeInterval"], - "bin_psth_trials": inputParameters["bin_psth_trials"], - "use_time_or_trials": inputParameters["use_time_or_trials"], - "baselineCorrectionStart": inputParameters["baselineCorrectionStart"], - "baselineCorrectionEnd": inputParameters["baselineCorrectionEnd"], - "peak_startPoint": inputParameters["peak_startPoint"], - "peak_endPoint": inputParameters["peak_endPoint"], - "selectForComputePsth": inputParameters["selectForComputePsth"], - "selectForTransientsComputation": inputParameters["selectForTransientsComputation"], - "moving_window": inputParameters["moving_window"], - "highAmpFilt": inputParameters["highAmpFilt"], - "transientsThresh": inputParameters["transientsThresh"], - } - for folder in inputParameters["folderNames"]: - with open(os.path.join(folder, "GuPPyParamtersUsed.json"), "w") as f: - json.dump(analysisParameters, f, indent=4) - logger.info(f"Input Parameters file saved at {folder}") - - logger.info("#" * 400) - - # path.value = (os.path.join(op, 'inputParameters.json')).replace('\\', '/') - logger.info("Input Parameters File Saved.") + save_parameters(inputParameters=inputParameters) def onclickStoresList(event=None): inputParameters = parameter_form.getInputParameters() diff --git a/src/guppy/orchestration/save_parameters.py b/src/guppy/orchestration/save_parameters.py new file mode 100644 index 0000000..3af5e2c --- /dev/null +++ b/src/guppy/orchestration/save_parameters.py @@ -0,0 +1,41 @@ +import json +import logging +import os + +logger = logging.getLogger(__name__) + + +def save_parameters(inputParameters: dict): + logger.debug("Saving Input Parameters file.") + analysisParameters = { + "combine_data": inputParameters["combine_data"], + "isosbestic_control": inputParameters["isosbestic_control"], + "timeForLightsTurnOn": inputParameters["timeForLightsTurnOn"], + "filter_window": inputParameters["filter_window"], + "removeArtifacts": inputParameters["removeArtifacts"], + "noChannels": inputParameters["noChannels"], + "zscore_method": inputParameters["zscore_method"], + "baselineWindowStart": inputParameters["baselineWindowStart"], + "baselineWindowEnd": inputParameters["baselineWindowEnd"], + "nSecPrev": inputParameters["nSecPrev"], + "nSecPost": inputParameters["nSecPost"], + "timeInterval": inputParameters["timeInterval"], + "bin_psth_trials": inputParameters["bin_psth_trials"], + "use_time_or_trials": inputParameters["use_time_or_trials"], + "baselineCorrectionStart": inputParameters["baselineCorrectionStart"], + "baselineCorrectionEnd": inputParameters["baselineCorrectionEnd"], + "peak_startPoint": inputParameters["peak_startPoint"], + "peak_endPoint": inputParameters["peak_endPoint"], + "selectForComputePsth": inputParameters["selectForComputePsth"], + "selectForTransientsComputation": inputParameters["selectForTransientsComputation"], + "moving_window": inputParameters["moving_window"], + "highAmpFilt": inputParameters["highAmpFilt"], + "transientsThresh": inputParameters["transientsThresh"], + } + for folder in inputParameters["folderNames"]: + with open(os.path.join(folder, "GuPPyParamtersUsed.json"), "w") as f: + json.dump(analysisParameters, f, indent=4) + logger.info(f"Input Parameters file saved at {folder}") + + logger.info("#" * 400) + logger.info("Input Parameters File Saved.") From 9a20a25a88c4dd8399744d20e26e098c72bd9958 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 30 Jan 2026 14:55:06 -0800 Subject: [PATCH 11/53] Refactored npm_gui_prompts into a dedicated module. --- src/guppy/frontend/npm_gui_prompts.py | 104 +++++++++++++++++++++++++ src/guppy/saveStoresList.py | 105 +------------------------- 2 files changed, 108 insertions(+), 101 deletions(-) create mode 100644 src/guppy/frontend/npm_gui_prompts.py diff --git a/src/guppy/frontend/npm_gui_prompts.py b/src/guppy/frontend/npm_gui_prompts.py new file mode 100644 index 0000000..a6f2077 --- /dev/null +++ b/src/guppy/frontend/npm_gui_prompts.py @@ -0,0 +1,104 @@ +import logging +import tkinter as tk +from tkinter import StringVar, messagebox, ttk + +logger = logging.getLogger(__name__) + + +def get_multi_event_responses(multiple_event_ttls): + responses = [] + for has_multiple in multiple_event_ttls: + if not has_multiple: + responses.append(False) + continue + window = tk.Tk() + response = messagebox.askyesno( + "Multiple event TTLs", + ( + "Based on the TTL file, " + "it looks like TTLs " + "belong to multiple behavior types. " + "Do you want to create multiple files for each " + "behavior type?" + ), + ) + window.destroy() + responses.append(response) + return responses + + +def get_timestamp_configuration(ts_unit_needs, col_names_ts): + ts_units, npm_timestamp_column_names = [], [] + for need in ts_unit_needs: + if not need: + ts_units.append("seconds") + npm_timestamp_column_names.append(None) + continue + window = tk.Tk() + window.title("Select appropriate options for timestamps") + window.geometry("500x200") + holdComboboxValues = dict() + + timestamps_label = ttk.Label(window, text="Select which timestamps to use : ").grid( + row=0, column=1, pady=25, padx=25 + ) + holdComboboxValues["timestamps"] = StringVar() + timestamps_combo = ttk.Combobox(window, values=col_names_ts, textvariable=holdComboboxValues["timestamps"]) + timestamps_combo.grid(row=0, column=2, pady=25, padx=25) + timestamps_combo.current(0) + # timestamps_combo.bind("<>", comboBoxSelected) + + time_unit_label = ttk.Label(window, text="Select timestamps unit : ").grid(row=1, column=1, pady=25, padx=25) + holdComboboxValues["time_unit"] = StringVar() + time_unit_combo = ttk.Combobox( + window, + values=["", "seconds", "milliseconds", "microseconds"], + textvariable=holdComboboxValues["time_unit"], + ) + time_unit_combo.grid(row=1, column=2, pady=25, padx=25) + time_unit_combo.current(0) + # time_unit_combo.bind("<>", comboBoxSelected) + window.lift() + window.after(500, lambda: window.lift()) + window.mainloop() + + if holdComboboxValues["timestamps"].get(): + npm_timestamp_column_name = holdComboboxValues["timestamps"].get() + else: + messagebox.showerror( + "All options not selected", + "All the options for timestamps \ + were not selected. Please select appropriate options", + ) + logger.error( + "All the options for timestamps \ + were not selected. Please select appropriate options" + ) + raise Exception( + "All the options for timestamps \ + were not selected. Please select appropriate options" + ) + if holdComboboxValues["time_unit"].get(): + if holdComboboxValues["time_unit"].get() == "seconds": + ts_unit = holdComboboxValues["time_unit"].get() + elif holdComboboxValues["time_unit"].get() == "milliseconds": + ts_unit = holdComboboxValues["time_unit"].get() + else: + ts_unit = holdComboboxValues["time_unit"].get() + else: + messagebox.showerror( + "All options not selected", + "All the options for timestamps \ + were not selected. Please select appropriate options", + ) + logger.error( + "All the options for timestamps \ + were not selected. Please select appropriate options" + ) + raise Exception( + "All the options for timestamps \ + were not selected. Please select appropriate options" + ) + ts_units.append(ts_unit) + npm_timestamp_column_names.append(npm_timestamp_column_name) + return ts_units, npm_timestamp_column_names diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index 20a5c94..e241ea8 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -9,10 +9,8 @@ import logging import os import socket -import tkinter as tk from pathlib import Path from random import randint -from tkinter import StringVar, messagebox, ttk import holoviews as hv import numpy as np @@ -25,6 +23,10 @@ NpmRecordingExtractor, TdtRecordingExtractor, ) +from guppy.frontend.npm_gui_prompts import ( + get_multi_event_responses, + get_timestamp_configuration, +) # hv.extension() pn.extension() @@ -611,102 +613,3 @@ def execute(inputParameters): except Exception as e: logger.error(str(e)) raise e - - -def get_multi_event_responses(multiple_event_ttls): - responses = [] - for has_multiple in multiple_event_ttls: - if not has_multiple: - responses.append(False) - continue - window = tk.Tk() - response = messagebox.askyesno( - "Multiple event TTLs", - ( - "Based on the TTL file, " - "it looks like TTLs " - "belong to multiple behavior types. " - "Do you want to create multiple files for each " - "behavior type?" - ), - ) - window.destroy() - responses.append(response) - return responses - - -def get_timestamp_configuration(ts_unit_needs, col_names_ts): - ts_units, npm_timestamp_column_names = [], [] - for need in ts_unit_needs: - if not need: - ts_units.append("seconds") - npm_timestamp_column_names.append(None) - continue - window = tk.Tk() - window.title("Select appropriate options for timestamps") - window.geometry("500x200") - holdComboboxValues = dict() - - timestamps_label = ttk.Label(window, text="Select which timestamps to use : ").grid( - row=0, column=1, pady=25, padx=25 - ) - holdComboboxValues["timestamps"] = StringVar() - timestamps_combo = ttk.Combobox(window, values=col_names_ts, textvariable=holdComboboxValues["timestamps"]) - timestamps_combo.grid(row=0, column=2, pady=25, padx=25) - timestamps_combo.current(0) - # timestamps_combo.bind("<>", comboBoxSelected) - - time_unit_label = ttk.Label(window, text="Select timestamps unit : ").grid(row=1, column=1, pady=25, padx=25) - holdComboboxValues["time_unit"] = StringVar() - time_unit_combo = ttk.Combobox( - window, - values=["", "seconds", "milliseconds", "microseconds"], - textvariable=holdComboboxValues["time_unit"], - ) - time_unit_combo.grid(row=1, column=2, pady=25, padx=25) - time_unit_combo.current(0) - # time_unit_combo.bind("<>", comboBoxSelected) - window.lift() - window.after(500, lambda: window.lift()) - window.mainloop() - - if holdComboboxValues["timestamps"].get(): - npm_timestamp_column_name = holdComboboxValues["timestamps"].get() - else: - messagebox.showerror( - "All options not selected", - "All the options for timestamps \ - were not selected. Please select appropriate options", - ) - logger.error( - "All the options for timestamps \ - were not selected. Please select appropriate options" - ) - raise Exception( - "All the options for timestamps \ - were not selected. Please select appropriate options" - ) - if holdComboboxValues["time_unit"].get(): - if holdComboboxValues["time_unit"].get() == "seconds": - ts_unit = holdComboboxValues["time_unit"].get() - elif holdComboboxValues["time_unit"].get() == "milliseconds": - ts_unit = holdComboboxValues["time_unit"].get() - else: - ts_unit = holdComboboxValues["time_unit"].get() - else: - messagebox.showerror( - "All options not selected", - "All the options for timestamps \ - were not selected. Please select appropriate options", - ) - logger.error( - "All the options for timestamps \ - were not selected. Please select appropriate options" - ) - raise Exception( - "All the options for timestamps \ - were not selected. Please select appropriate options" - ) - ts_units.append(ts_unit) - npm_timestamp_column_names.append(npm_timestamp_column_name) - return ts_units, npm_timestamp_column_names From c37d5ff34b539c72c1d2aa17337bc3f5c8b24591 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 30 Jan 2026 16:17:27 -0800 Subject: [PATCH 12/53] Added NpmChannelSelector --- src/guppy/frontend/npm_channel_selector.py | 57 +++ src/guppy/frontend/temp.py | 515 ++++++++++++++++++++ src/guppy/saveStoresList.py | 537 +-------------------- 3 files changed, 573 insertions(+), 536 deletions(-) create mode 100644 src/guppy/frontend/npm_channel_selector.py create mode 100644 src/guppy/frontend/temp.py diff --git a/src/guppy/frontend/npm_channel_selector.py b/src/guppy/frontend/npm_channel_selector.py new file mode 100644 index 0000000..4de61d6 --- /dev/null +++ b/src/guppy/frontend/npm_channel_selector.py @@ -0,0 +1,57 @@ +import glob +import logging +import os + +import holoviews as hv +import numpy as np +import pandas as pd +import panel as pn + +# hv.extension() +pn.extension() + +logger = logging.getLogger(__name__) + + +class NpmChannelSelector: + def __init__(self, folder_path): + path_chev = glob.glob(os.path.join(folder_path, "*chev*")) + path_chod = glob.glob(os.path.join(folder_path, "*chod*")) + path_chpr = glob.glob(os.path.join(folder_path, "*chpr*")) + combine_paths = path_chev + path_chod + path_chpr + self.d = dict() + for i in range(len(combine_paths)): + basename = (os.path.basename(combine_paths[i])).split(".")[0] + df = pd.read_csv(combine_paths[i]) + self.d[basename] = {"x": np.array(df["timestamps"]), "y": np.array(df["data"])} + keys = list(self.d.keys()) + self.mark_down_np = pn.pane.Markdown( + """ + ### Extra Instructions to follow when using Neurophotometrics data : + - Guppy will take the NPM data, which has interleaved frames + from the signal and control channels, and divide it out into + separate channels for each site you recordded. + However, since NPM does not automatically annotate which + frames belong to the signal channel and which belong to the + control channel, the user must specify this for GuPPy. + - Each of your recording sites will have a channel + named “chod” and a channel named “chev” + - View the plots below and, for each site, + determine whether the “chev” or “chod” channel is signal or control + - When you give your storenames, name the channels appropriately. + For example, “chev1” might be “signal_A” and + “chod1” might be “control_A” (or vice versa). + + """ + ) + self.plot_select = pn.widgets.Select( + name="Select channel to see correspondings channels", options=keys, value=keys[0] + ) + self.plot_pane = pn.pane.HoloViews(self._make_plot(self.plot_select.value), width=550) + self.plot_select.param.watch(self._on_plot_select_change, "value") + + def _make_plot(self, plot_key): + return hv.Curve((self.d[plot_key]["x"], self.d[plot_key]["y"])).opts(width=550) + + def _on_plot_select_change(self, event): + self.plot_pane.object = self._make_plot(event.new) diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py new file mode 100644 index 0000000..d2de0cf --- /dev/null +++ b/src/guppy/frontend/temp.py @@ -0,0 +1,515 @@ +import glob +import json +import logging +import os +import socket +from pathlib import Path +from random import randint + +import numpy as np +import panel as pn + +from .npm_channel_selector import NpmChannelSelector + +# hv.extension() +pn.extension() + +logger = logging.getLogger(__name__) + + +def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): + while True: + port = randint(start_port, end_port) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(0.001) # Set timeout to avoid long waiting on closed ports + result = sock.connect_ex((host, port)) + if result == 0: # If the connection is successful, the port is open + continue + else: + break + + return port + + +def takeOnlyDirs(paths): + removePaths = [] + for p in paths: + if os.path.isfile(p): + removePaths.append(p) + return list(set(paths) - set(removePaths)) + + +# function to show location for over-writing or creating a new stores list file. +def show_dir(filepath): + i = 1 + while True: + basename = os.path.basename(filepath) + op = os.path.join(filepath, basename + "_output_" + str(i)) + if not os.path.exists(op): + break + i += 1 + return op + + +def make_dir(filepath): + i = 1 + while True: + basename = os.path.basename(filepath) + op = os.path.join(filepath, basename + "_output_" + str(i)) + if not os.path.exists(op): + os.mkdir(op) + break + i += 1 + + return op + + +# function to show GUI and save +def saveStorenames(inputParameters, events, flags, folder_path): + + logger.debug("Saving stores list file.") + # getting input parameters + inputParameters = inputParameters + + # Headless path: if storenames_map provided, write storesList.csv without building the Panel UI + storenames_map = inputParameters.get("storenames_map") + if isinstance(storenames_map, dict) and len(storenames_map) > 0: + op = make_dir(folder_path) + arr = np.asarray([list(storenames_map.keys()), list(storenames_map.values())], dtype=str) + np.savetxt(os.path.join(op, "storesList.csv"), arr, delimiter=",", fmt="%s") + logger.info(f"Storeslist file saved at {op}") + logger.info("Storeslist : \n" + str(arr)) + return + + # Get storenames from extractor's events property + allnames = events + + if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: + npm_channel_selector = NpmChannelSelector(folder_path=folder_path) + else: + pass + + # instructions about how to save the storeslist file + mark_down = pn.pane.Markdown( + """ + + + ### Instructions to follow : + + - Check Storenames to repeat checkbox and see instructions in “Github Wiki” for duplicating storenames. + Otherwise do not check the Storenames to repeat checkbox.
+ - Select storenames from list and click “Select Storenames” to populate area below.
+ - Enter names for storenames, in order, using the following naming convention:
+ Isosbestic = “control_region” (ex: Dv1A= control_DMS)
+ Signal= “signal_region” (ex: Dv2A= signal_DMS)
+ TTLs can be named using any convention (ex: PrtR = RewardedPortEntries) but should be kept consistent for later group analysis + + ``` + {"storenames": ["Dv1A", "Dv2A", + "Dv3B", "Dv4B", + "LNRW", "LNnR", + "PrtN", "PrtR", + "RNPS"], + "names_for_storenames": ["control_DMS", "signal_DMS", + "control_DLS", "signal_DLS", + "RewardedNosepoke", "UnrewardedNosepoke", + "UnrewardedPort", "RewardedPort", + "InactiveNosepoke"]} + ``` + - If user has saved storenames before, clicking "Select Storenames" button will pop up a dialog box + showing previously used names for storenames. Select names for storenames by checking a checkbox and + click on "Show" to populate the text area in the Storenames GUI. Close the dialog box. + + - Select “create new” or “overwrite” to generate a new storenames list or replace a previous one + - Click Save + + """, + width=550, + ) + + # creating GUI template + template = pn.template.BootstrapTemplate( + title="Storenames GUI - {}".format(os.path.basename(folder_path), mark_down) + ) + + # creating different buttons and selectors for the GUI + cross_selector = pn.widgets.CrossSelector(name="Store Names Selection", value=[], options=allnames, width=600) + multi_choice = pn.widgets.MultiChoice( + name="Select Storenames which you want more than once (multi-choice: multiple options selection)", + value=[], + options=allnames, + ) + + literal_input_1 = pn.widgets.LiteralInput( + name="Number of times you want the above storename (list)", value=[], type=list + ) + # literal_input_2 = pn.widgets.LiteralInput(name='Names for Storenames (list)', type=list) + + repeat_storenames = pn.widgets.Checkbox(name="Storenames to repeat", value=False) + repeat_storename_wd = pn.WidgetBox("", width=600) + + def callback(target, event): + if event.new == True: + target.objects = [multi_choice, literal_input_1] + elif event.new == False: + target.clear() + + repeat_storenames.link(repeat_storename_wd, callbacks={"value": callback}) + # repeat_storename_wd = pn.WidgetBox('Storenames to repeat (leave blank if not needed)', multi_choice, literal_input_1, background="white", width=600) + + update_options = pn.widgets.Button(name="Select Storenames", width=600) + save = pn.widgets.Button(name="Save", width=600) + + text = pn.widgets.LiteralInput(value=[], name="Selected Store Names", type=list, width=600) + + path = pn.widgets.TextInput(name="Location to Stores List file", width=600) + + mark_down_for_overwrite = pn.pane.Markdown( + """ Select option from below if user wants to over-write a file or create a new file. + **Creating a new file will make a new output folder and will get saved at that location.** + If user selects to over-write a file **Select location of the file to over-write** will provide + the existing options of the output folders where user needs to over-write the file""", + width=600, + ) + + select_location = pn.widgets.Select( + name="Select location of the file to over-write", value="None", options=["None"], width=600 + ) + + overwrite_button = pn.widgets.MenuButton( + name="over-write storeslist file or create a new one? ", + items=["over_write_file", "create_new_file"], + button_type="default", + split=True, + width=600, + ) + + literal_input_2 = pn.widgets.CodeEditor(value="""{}""", theme="tomorrow", language="json", height=250, width=600) + + alert = pn.pane.Alert("#### No alerts !!", alert_type="danger", height=80, width=600) + + take_widgets = pn.WidgetBox(multi_choice, literal_input_1) + + change_widgets = pn.WidgetBox(text) + + storenames = [] + storename_dropdowns = {} + storename_textboxes = {} + + if len(allnames) == 0: + alert.object = ( + "####Alert !! \n No storenames found. There are not any TDT files or csv files to look for storenames." + ) + + # on clicking overwrite_button, following function is executed + def overwrite_button_actions(event): + if event.new == "over_write_file": + select_location.options = takeOnlyDirs(glob.glob(os.path.join(folder_path, "*_output_*"))) + # select_location.value = select_location.options[0] + else: + select_location.options = [show_dir(folder_path)] + # select_location.value = select_location.options[0] + + def fetchValues(event): + global storenames + alert.object = "#### No alerts !!" + + if not storename_dropdowns or not len(storenames) > 0: + alert.object = "####Alert !! \n No storenames selected." + return + + storenames_cache = dict() + if os.path.exists(os.path.join(Path.home(), ".storesList.json")): + with open(os.path.join(Path.home(), ".storesList.json")) as f: + storenames_cache = json.load(f) + + comboBoxValues, textBoxValues = [], [] + dropdown_keys = list(storename_dropdowns.keys()) + textbox_keys = list(storename_textboxes.keys()) if storename_textboxes else [] + + # Get dropdown values + for key in dropdown_keys: + comboBoxValues.append(storename_dropdowns[key].value) + + # Get textbox values (matching with dropdown keys) + for key in dropdown_keys: + if key in storename_textboxes: + textbox_value = storename_textboxes[key].value or "" + textBoxValues.append(textbox_value) + + # Validation: Check for whitespace + if len(textbox_value.split()) > 1: + alert.object = "####Alert !! \n Whitespace is not allowed in the text box entry." + return + + # Validation: Check for empty required fields + dropdown_value = storename_dropdowns[key].value + if ( + not textbox_value + and dropdown_value not in storenames_cache + and dropdown_value in ["control", "signal", "event TTLs"] + ): + alert.object = "####Alert !! \n One of the text box entry is empty." + return + else: + # For cached values, use the dropdown value directly + textBoxValues.append(storename_dropdowns[key].value) + + if len(comboBoxValues) != len(textBoxValues): + alert.object = "####Alert !! \n Number of entries in combo box and text box should be same." + return + + names_for_storenames = [] + for i in range(len(comboBoxValues)): + if comboBoxValues[i] == "control" or comboBoxValues[i] == "signal": + if "_" in textBoxValues[i]: + alert.object = "####Alert !! \n Please do not use underscore in region name." + return + names_for_storenames.append("{}_{}".format(comboBoxValues[i], textBoxValues[i])) + elif comboBoxValues[i] == "event TTLs": + names_for_storenames.append(textBoxValues[i]) + else: + names_for_storenames.append(comboBoxValues[i]) + + d = dict() + d["storenames"] = text.value + d["names_for_storenames"] = names_for_storenames + literal_input_2.value = str(json.dumps(d, indent=2)) + + # Panel-based storename configuration (replaces Tkinter dialog) + storename_config_widgets = pn.Column(visible=False) + show_config_button = pn.widgets.Button(name="Show Selected Configuration", width=600) + + # on clicking 'Select Storenames' button, following function is executed + def update_values(event): + global storenames, vars_list + arr = [] + for w in take_widgets: + arr.append(w.value) + + new_arr = [] + + for i in range(len(arr[1])): + for j in range(arr[1][i]): + new_arr.append(arr[0][i]) + + if len(new_arr) > 0: + storenames = cross_selector.value + new_arr + else: + storenames = cross_selector.value + + for w in change_widgets: + w.value = storenames + + storenames_cache = dict() + if os.path.exists(os.path.join(Path.home(), ".storesList.json")): + with open(os.path.join(Path.home(), ".storesList.json")) as f: + storenames_cache = json.load(f) + + # Create Panel widgets for storename configuration + config_widgets = [] + storename_dropdowns.clear() + storename_textboxes.clear() + + if len(storenames) > 0: + config_widgets.append( + pn.pane.Markdown( + "## Configure Storenames\nSelect appropriate options for each storename and provide names as needed:" + ) + ) + + for i, storename in enumerate(storenames): + # Create a row for each storename + row_widgets = [] + + # Label + label = pn.pane.Markdown(f"**{storename}:**") + row_widgets.append(label) + + # Dropdown options + if storename in storenames_cache: + options = storenames_cache[storename] + default_value = options[0] if options else "" + else: + options = ["", "control", "signal", "event TTLs"] + default_value = "" + + # Create unique key for widget + widget_key = ( + f"{storename}_{i}" + if f"{storename}_{i}" not in storename_dropdowns + else f"{storename}_{i}_{len(storename_dropdowns)}" + ) + + dropdown = pn.widgets.Select(name="Type", value=default_value, options=options, width=150) + storename_dropdowns[widget_key] = dropdown + row_widgets.append(dropdown) + + # Text input (only show if not cached or if control/signal/event TTLs selected) + if storename not in storenames_cache or default_value in ["control", "signal", "event TTLs"]: + textbox = pn.widgets.TextInput( + name="Name", value="", placeholder="Enter region/event name", width=200 + ) + storename_textboxes[widget_key] = textbox + row_widgets.append(textbox) + + # Add helper text based on selection + def create_help_function(dropdown_widget, help_pane_container): + @pn.depends(dropdown_widget.param.value, watch=True) + def update_help(dropdown_value): + if dropdown_value == "control": + help_pane_container[0] = pn.pane.Markdown( + "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} + ) + elif dropdown_value == "signal": + help_pane_container[0] = pn.pane.Markdown( + "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} + ) + elif dropdown_value == "event TTLs": + help_pane_container[0] = pn.pane.Markdown( + "*Type event name for the TTLs*", styles={"color": "gray", "font-size": "12px"} + ) + else: + help_pane_container[0] = pn.pane.Markdown( + "", styles={"color": "gray", "font-size": "12px"} + ) + + return update_help + + help_container = [pn.pane.Markdown("")] + help_function = create_help_function(dropdown, help_container) + help_function(dropdown.value) # Initialize + row_widgets.append(help_container[0]) + + # Add the row to config widgets + config_widgets.append(pn.Row(*row_widgets, margin=(5, 0))) + + # Add show button + config_widgets.append(pn.Spacer(height=20)) + config_widgets.append(show_config_button) + config_widgets.append( + pn.pane.Markdown( + "*Click 'Show Selected Configuration' to apply your selections.*", + styles={"font-size": "12px", "color": "gray"}, + ) + ) + + # Update the configuration panel + storename_config_widgets.objects = config_widgets + storename_config_widgets.visible = len(storenames) > 0 + + # on clicking save button, following function is executed + def save_button(event=None): + global storenames + + d = json.loads(literal_input_2.value) + arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) + + if np.where(arr2 == "")[0].size > 0: + alert.object = "#### Alert !! \n Empty string in the list names_for_storenames." + logger.error("Empty string in the list names_for_storenames.") + raise Exception("Empty string in the list names_for_storenames.") + else: + alert.object = "#### No alerts !!" + + if arr1.shape[0] != arr2.shape[0]: + alert.object = "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." + logger.error("Length of list storenames and names_for_storenames is not equal.") + raise Exception("Length of list storenames and names_for_storenames is not equal.") + else: + alert.object = "#### No alerts !!" + + if not os.path.exists(os.path.join(Path.home(), ".storesList.json")): + storenames_cache = dict() + + for i in range(arr1.shape[0]): + if arr1[i] in storenames_cache: + storenames_cache[arr1[i]].append(arr2[i]) + storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) + else: + storenames_cache[arr1[i]] = [arr2[i]] + + with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: + json.dump(storenames_cache, f, indent=4) + else: + with open(os.path.join(Path.home(), ".storesList.json")) as f: + storenames_cache = json.load(f) + + for i in range(arr1.shape[0]): + if arr1[i] in storenames_cache: + storenames_cache[arr1[i]].append(arr2[i]) + storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) + else: + storenames_cache[arr1[i]] = [arr2[i]] + + with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: + json.dump(storenames_cache, f, indent=4) + + arr = np.asarray([arr1, arr2]) + logger.info(arr) + if not os.path.exists(select_location.value): + os.mkdir(select_location.value) + + np.savetxt(os.path.join(select_location.value, "storesList.csv"), arr, delimiter=",", fmt="%s") + path.value = os.path.join(select_location.value, "storesList.csv") + logger.info(f"Storeslist file saved at {select_location.value}") + logger.info("Storeslist : \n" + str(arr)) + + # Connect button callbacks + update_options.on_click(update_values) + show_config_button.on_click(fetchValues) + save.on_click(save_button) + overwrite_button.on_click(overwrite_button_actions) + + # creating widgets, adding them to template and showing a GUI on a new browser window + number = scanPortsAndFind(start_port=5000, end_port=5200) + + if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: + widget_1 = pn.Column( + "# " + os.path.basename(folder_path), + mark_down, + npm_channel_selector.mark_down_np, + npm_channel_selector.plot_select, + npm_channel_selector.plot_pane, + ) + widget_2 = pn.Column( + repeat_storenames, + repeat_storename_wd, + pn.Spacer(height=20), + cross_selector, + update_options, + storename_config_widgets, + pn.Spacer(height=10), + text, + literal_input_2, + alert, + mark_down_for_overwrite, + overwrite_button, + select_location, + save, + path, + ) + template.main.append(pn.Row(widget_1, widget_2)) + + else: + widget_1 = pn.Column("# " + os.path.basename(folder_path), mark_down) + widget_2 = pn.Column( + repeat_storenames, + repeat_storename_wd, + pn.Spacer(height=20), + cross_selector, + update_options, + storename_config_widgets, + pn.Spacer(height=10), + text, + literal_input_2, + alert, + mark_down_for_overwrite, + overwrite_button, + select_location, + save, + path, + ) + template.main.append(pn.Row(widget_1, widget_2)) + + template.show(port=number) diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index e241ea8..bd00995 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -4,17 +4,9 @@ # In[1]: -import glob -import json import logging import os -import socket -from pathlib import Path -from random import randint -import holoviews as hv -import numpy as np -import pandas as pd import panel as pn from guppy.extractors import ( @@ -27,6 +19,7 @@ get_multi_event_responses, get_timestamp_configuration, ) +from guppy.frontend.temp import saveStorenames # hv.extension() pn.extension() @@ -34,534 +27,6 @@ logger = logging.getLogger(__name__) -def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): - while True: - port = randint(start_port, end_port) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(0.001) # Set timeout to avoid long waiting on closed ports - result = sock.connect_ex((host, port)) - if result == 0: # If the connection is successful, the port is open - continue - else: - break - - return port - - -def takeOnlyDirs(paths): - removePaths = [] - for p in paths: - if os.path.isfile(p): - removePaths.append(p) - return list(set(paths) - set(removePaths)) - - -# function to show location for over-writing or creating a new stores list file. -def show_dir(filepath): - i = 1 - while True: - basename = os.path.basename(filepath) - op = os.path.join(filepath, basename + "_output_" + str(i)) - if not os.path.exists(op): - break - i += 1 - return op - - -def make_dir(filepath): - i = 1 - while True: - basename = os.path.basename(filepath) - op = os.path.join(filepath, basename + "_output_" + str(i)) - if not os.path.exists(op): - os.mkdir(op) - break - i += 1 - - return op - - -# function to show GUI and save -def saveStorenames(inputParameters, events, flags, folder_path): - - logger.debug("Saving stores list file.") - # getting input parameters - inputParameters = inputParameters - - # Headless path: if storenames_map provided, write storesList.csv without building the Panel UI - storenames_map = inputParameters.get("storenames_map") - if isinstance(storenames_map, dict) and len(storenames_map) > 0: - op = make_dir(folder_path) - arr = np.asarray([list(storenames_map.keys()), list(storenames_map.values())], dtype=str) - np.savetxt(os.path.join(op, "storesList.csv"), arr, delimiter=",", fmt="%s") - logger.info(f"Storeslist file saved at {op}") - logger.info("Storeslist : \n" + str(arr)) - return - - # Get storenames from extractor's events property - allnames = events - - if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: - path_chev = glob.glob(os.path.join(folder_path, "*chev*")) - path_chod = glob.glob(os.path.join(folder_path, "*chod*")) - path_chpr = glob.glob(os.path.join(folder_path, "*chpr*")) - combine_paths = path_chev + path_chod + path_chpr - d = dict() - for i in range(len(combine_paths)): - basename = (os.path.basename(combine_paths[i])).split(".")[0] - df = pd.read_csv(combine_paths[i]) - d[basename] = {"x": np.array(df["timestamps"]), "y": np.array(df["data"])} - keys = list(d.keys()) - mark_down_np = pn.pane.Markdown( - """ - ### Extra Instructions to follow when using Neurophotometrics data : - - Guppy will take the NPM data, which has interleaved frames - from the signal and control channels, and divide it out into - separate channels for each site you recordded. - However, since NPM does not automatically annotate which - frames belong to the signal channel and which belong to the - control channel, the user must specify this for GuPPy. - - Each of your recording sites will have a channel - named “chod” and a channel named “chev” - - View the plots below and, for each site, - determine whether the “chev” or “chod” channel is signal or control - - When you give your storenames, name the channels appropriately. - For example, “chev1” might be “signal_A” and - “chod1” might be “control_A” (or vice versa). - - """ - ) - plot_select = pn.widgets.Select( - name="Select channel to see correspondings channels", options=keys, value=keys[0] - ) - - @pn.depends(plot_select=plot_select) - def plot(plot_select): - return hv.Curve((d[plot_select]["x"], d[plot_select]["y"])).opts(width=550) - - else: - pass - - # instructions about how to save the storeslist file - mark_down = pn.pane.Markdown( - """ - - - ### Instructions to follow : - - - Check Storenames to repeat checkbox and see instructions in “Github Wiki” for duplicating storenames. - Otherwise do not check the Storenames to repeat checkbox.
- - Select storenames from list and click “Select Storenames” to populate area below.
- - Enter names for storenames, in order, using the following naming convention:
- Isosbestic = “control_region” (ex: Dv1A= control_DMS)
- Signal= “signal_region” (ex: Dv2A= signal_DMS)
- TTLs can be named using any convention (ex: PrtR = RewardedPortEntries) but should be kept consistent for later group analysis - - ``` - {"storenames": ["Dv1A", "Dv2A", - "Dv3B", "Dv4B", - "LNRW", "LNnR", - "PrtN", "PrtR", - "RNPS"], - "names_for_storenames": ["control_DMS", "signal_DMS", - "control_DLS", "signal_DLS", - "RewardedNosepoke", "UnrewardedNosepoke", - "UnrewardedPort", "RewardedPort", - "InactiveNosepoke"]} - ``` - - If user has saved storenames before, clicking "Select Storenames" button will pop up a dialog box - showing previously used names for storenames. Select names for storenames by checking a checkbox and - click on "Show" to populate the text area in the Storenames GUI. Close the dialog box. - - - Select “create new” or “overwrite” to generate a new storenames list or replace a previous one - - Click Save - - """, - width=550, - ) - - # creating GUI template - template = pn.template.BootstrapTemplate( - title="Storenames GUI - {}".format(os.path.basename(folder_path), mark_down) - ) - - # creating different buttons and selectors for the GUI - cross_selector = pn.widgets.CrossSelector(name="Store Names Selection", value=[], options=allnames, width=600) - multi_choice = pn.widgets.MultiChoice( - name="Select Storenames which you want more than once (multi-choice: multiple options selection)", - value=[], - options=allnames, - ) - - literal_input_1 = pn.widgets.LiteralInput( - name="Number of times you want the above storename (list)", value=[], type=list - ) - # literal_input_2 = pn.widgets.LiteralInput(name='Names for Storenames (list)', type=list) - - repeat_storenames = pn.widgets.Checkbox(name="Storenames to repeat", value=False) - repeat_storename_wd = pn.WidgetBox("", width=600) - - def callback(target, event): - if event.new == True: - target.objects = [multi_choice, literal_input_1] - elif event.new == False: - target.clear() - - repeat_storenames.link(repeat_storename_wd, callbacks={"value": callback}) - # repeat_storename_wd = pn.WidgetBox('Storenames to repeat (leave blank if not needed)', multi_choice, literal_input_1, background="white", width=600) - - update_options = pn.widgets.Button(name="Select Storenames", width=600) - save = pn.widgets.Button(name="Save", width=600) - - text = pn.widgets.LiteralInput(value=[], name="Selected Store Names", type=list, width=600) - - path = pn.widgets.TextInput(name="Location to Stores List file", width=600) - - mark_down_for_overwrite = pn.pane.Markdown( - """ Select option from below if user wants to over-write a file or create a new file. - **Creating a new file will make a new output folder and will get saved at that location.** - If user selects to over-write a file **Select location of the file to over-write** will provide - the existing options of the output folders where user needs to over-write the file""", - width=600, - ) - - select_location = pn.widgets.Select( - name="Select location of the file to over-write", value="None", options=["None"], width=600 - ) - - overwrite_button = pn.widgets.MenuButton( - name="over-write storeslist file or create a new one? ", - items=["over_write_file", "create_new_file"], - button_type="default", - split=True, - width=600, - ) - - literal_input_2 = pn.widgets.CodeEditor(value="""{}""", theme="tomorrow", language="json", height=250, width=600) - - alert = pn.pane.Alert("#### No alerts !!", alert_type="danger", height=80, width=600) - - take_widgets = pn.WidgetBox(multi_choice, literal_input_1) - - change_widgets = pn.WidgetBox(text) - - storenames = [] - storename_dropdowns = {} - storename_textboxes = {} - - if len(allnames) == 0: - alert.object = ( - "####Alert !! \n No storenames found. There are not any TDT files or csv files to look for storenames." - ) - - # on clicking overwrite_button, following function is executed - def overwrite_button_actions(event): - if event.new == "over_write_file": - select_location.options = takeOnlyDirs(glob.glob(os.path.join(folder_path, "*_output_*"))) - # select_location.value = select_location.options[0] - else: - select_location.options = [show_dir(folder_path)] - # select_location.value = select_location.options[0] - - def fetchValues(event): - global storenames - alert.object = "#### No alerts !!" - - if not storename_dropdowns or not len(storenames) > 0: - alert.object = "####Alert !! \n No storenames selected." - return - - storenames_cache = dict() - if os.path.exists(os.path.join(Path.home(), ".storesList.json")): - with open(os.path.join(Path.home(), ".storesList.json")) as f: - storenames_cache = json.load(f) - - comboBoxValues, textBoxValues = [], [] - dropdown_keys = list(storename_dropdowns.keys()) - textbox_keys = list(storename_textboxes.keys()) if storename_textboxes else [] - - # Get dropdown values - for key in dropdown_keys: - comboBoxValues.append(storename_dropdowns[key].value) - - # Get textbox values (matching with dropdown keys) - for key in dropdown_keys: - if key in storename_textboxes: - textbox_value = storename_textboxes[key].value or "" - textBoxValues.append(textbox_value) - - # Validation: Check for whitespace - if len(textbox_value.split()) > 1: - alert.object = "####Alert !! \n Whitespace is not allowed in the text box entry." - return - - # Validation: Check for empty required fields - dropdown_value = storename_dropdowns[key].value - if ( - not textbox_value - and dropdown_value not in storenames_cache - and dropdown_value in ["control", "signal", "event TTLs"] - ): - alert.object = "####Alert !! \n One of the text box entry is empty." - return - else: - # For cached values, use the dropdown value directly - textBoxValues.append(storename_dropdowns[key].value) - - if len(comboBoxValues) != len(textBoxValues): - alert.object = "####Alert !! \n Number of entries in combo box and text box should be same." - return - - names_for_storenames = [] - for i in range(len(comboBoxValues)): - if comboBoxValues[i] == "control" or comboBoxValues[i] == "signal": - if "_" in textBoxValues[i]: - alert.object = "####Alert !! \n Please do not use underscore in region name." - return - names_for_storenames.append("{}_{}".format(comboBoxValues[i], textBoxValues[i])) - elif comboBoxValues[i] == "event TTLs": - names_for_storenames.append(textBoxValues[i]) - else: - names_for_storenames.append(comboBoxValues[i]) - - d = dict() - d["storenames"] = text.value - d["names_for_storenames"] = names_for_storenames - literal_input_2.value = str(json.dumps(d, indent=2)) - - # Panel-based storename configuration (replaces Tkinter dialog) - storename_config_widgets = pn.Column(visible=False) - show_config_button = pn.widgets.Button(name="Show Selected Configuration", width=600) - - # on clicking 'Select Storenames' button, following function is executed - def update_values(event): - global storenames, vars_list - arr = [] - for w in take_widgets: - arr.append(w.value) - - new_arr = [] - - for i in range(len(arr[1])): - for j in range(arr[1][i]): - new_arr.append(arr[0][i]) - - if len(new_arr) > 0: - storenames = cross_selector.value + new_arr - else: - storenames = cross_selector.value - - for w in change_widgets: - w.value = storenames - - storenames_cache = dict() - if os.path.exists(os.path.join(Path.home(), ".storesList.json")): - with open(os.path.join(Path.home(), ".storesList.json")) as f: - storenames_cache = json.load(f) - - # Create Panel widgets for storename configuration - config_widgets = [] - storename_dropdowns.clear() - storename_textboxes.clear() - - if len(storenames) > 0: - config_widgets.append( - pn.pane.Markdown( - "## Configure Storenames\nSelect appropriate options for each storename and provide names as needed:" - ) - ) - - for i, storename in enumerate(storenames): - # Create a row for each storename - row_widgets = [] - - # Label - label = pn.pane.Markdown(f"**{storename}:**") - row_widgets.append(label) - - # Dropdown options - if storename in storenames_cache: - options = storenames_cache[storename] - default_value = options[0] if options else "" - else: - options = ["", "control", "signal", "event TTLs"] - default_value = "" - - # Create unique key for widget - widget_key = ( - f"{storename}_{i}" - if f"{storename}_{i}" not in storename_dropdowns - else f"{storename}_{i}_{len(storename_dropdowns)}" - ) - - dropdown = pn.widgets.Select(name="Type", value=default_value, options=options, width=150) - storename_dropdowns[widget_key] = dropdown - row_widgets.append(dropdown) - - # Text input (only show if not cached or if control/signal/event TTLs selected) - if storename not in storenames_cache or default_value in ["control", "signal", "event TTLs"]: - textbox = pn.widgets.TextInput( - name="Name", value="", placeholder="Enter region/event name", width=200 - ) - storename_textboxes[widget_key] = textbox - row_widgets.append(textbox) - - # Add helper text based on selection - def create_help_function(dropdown_widget, help_pane_container): - @pn.depends(dropdown_widget.param.value, watch=True) - def update_help(dropdown_value): - if dropdown_value == "control": - help_pane_container[0] = pn.pane.Markdown( - "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} - ) - elif dropdown_value == "signal": - help_pane_container[0] = pn.pane.Markdown( - "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} - ) - elif dropdown_value == "event TTLs": - help_pane_container[0] = pn.pane.Markdown( - "*Type event name for the TTLs*", styles={"color": "gray", "font-size": "12px"} - ) - else: - help_pane_container[0] = pn.pane.Markdown( - "", styles={"color": "gray", "font-size": "12px"} - ) - - return update_help - - help_container = [pn.pane.Markdown("")] - help_function = create_help_function(dropdown, help_container) - help_function(dropdown.value) # Initialize - row_widgets.append(help_container[0]) - - # Add the row to config widgets - config_widgets.append(pn.Row(*row_widgets, margin=(5, 0))) - - # Add show button - config_widgets.append(pn.Spacer(height=20)) - config_widgets.append(show_config_button) - config_widgets.append( - pn.pane.Markdown( - "*Click 'Show Selected Configuration' to apply your selections.*", - styles={"font-size": "12px", "color": "gray"}, - ) - ) - - # Update the configuration panel - storename_config_widgets.objects = config_widgets - storename_config_widgets.visible = len(storenames) > 0 - - # on clicking save button, following function is executed - def save_button(event=None): - global storenames - - d = json.loads(literal_input_2.value) - arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) - - if np.where(arr2 == "")[0].size > 0: - alert.object = "#### Alert !! \n Empty string in the list names_for_storenames." - logger.error("Empty string in the list names_for_storenames.") - raise Exception("Empty string in the list names_for_storenames.") - else: - alert.object = "#### No alerts !!" - - if arr1.shape[0] != arr2.shape[0]: - alert.object = "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." - logger.error("Length of list storenames and names_for_storenames is not equal.") - raise Exception("Length of list storenames and names_for_storenames is not equal.") - else: - alert.object = "#### No alerts !!" - - if not os.path.exists(os.path.join(Path.home(), ".storesList.json")): - storenames_cache = dict() - - for i in range(arr1.shape[0]): - if arr1[i] in storenames_cache: - storenames_cache[arr1[i]].append(arr2[i]) - storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) - else: - storenames_cache[arr1[i]] = [arr2[i]] - - with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: - json.dump(storenames_cache, f, indent=4) - else: - with open(os.path.join(Path.home(), ".storesList.json")) as f: - storenames_cache = json.load(f) - - for i in range(arr1.shape[0]): - if arr1[i] in storenames_cache: - storenames_cache[arr1[i]].append(arr2[i]) - storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) - else: - storenames_cache[arr1[i]] = [arr2[i]] - - with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: - json.dump(storenames_cache, f, indent=4) - - arr = np.asarray([arr1, arr2]) - logger.info(arr) - if not os.path.exists(select_location.value): - os.mkdir(select_location.value) - - np.savetxt(os.path.join(select_location.value, "storesList.csv"), arr, delimiter=",", fmt="%s") - path.value = os.path.join(select_location.value, "storesList.csv") - logger.info(f"Storeslist file saved at {select_location.value}") - logger.info("Storeslist : \n" + str(arr)) - - # Connect button callbacks - update_options.on_click(update_values) - show_config_button.on_click(fetchValues) - save.on_click(save_button) - overwrite_button.on_click(overwrite_button_actions) - - # creating widgets, adding them to template and showing a GUI on a new browser window - number = scanPortsAndFind(start_port=5000, end_port=5200) - - if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: - widget_1 = pn.Column("# " + os.path.basename(folder_path), mark_down, mark_down_np, plot_select, plot) - widget_2 = pn.Column( - repeat_storenames, - repeat_storename_wd, - pn.Spacer(height=20), - cross_selector, - update_options, - storename_config_widgets, - pn.Spacer(height=10), - text, - literal_input_2, - alert, - mark_down_for_overwrite, - overwrite_button, - select_location, - save, - path, - ) - template.main.append(pn.Row(widget_1, widget_2)) - - else: - widget_1 = pn.Column("# " + os.path.basename(folder_path), mark_down) - widget_2 = pn.Column( - repeat_storenames, - repeat_storename_wd, - pn.Spacer(height=20), - cross_selector, - update_options, - storename_config_widgets, - pn.Spacer(height=10), - text, - literal_input_2, - alert, - mark_down_for_overwrite, - overwrite_button, - select_location, - save, - path, - ) - template.main.append(pn.Row(widget_1, widget_2)) - - template.show(port=number) - - # function to read input parameters and run the saveStorenames function def execute(inputParameters): From 89abe937b6aca75aff6f6f4e3f9f076f6e245ab4 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 30 Jan 2026 16:43:50 -0800 Subject: [PATCH 13/53] Added StorenamesSelector --- src/guppy/frontend/storenames_instructions.py | 49 +++++++ src/guppy/frontend/storenames_selector.py | 80 ++++++++++++ src/guppy/frontend/temp.py | 120 ++---------------- 3 files changed, 138 insertions(+), 111 deletions(-) create mode 100644 src/guppy/frontend/storenames_instructions.py create mode 100644 src/guppy/frontend/storenames_selector.py diff --git a/src/guppy/frontend/storenames_instructions.py b/src/guppy/frontend/storenames_instructions.py new file mode 100644 index 0000000..c8105bc --- /dev/null +++ b/src/guppy/frontend/storenames_instructions.py @@ -0,0 +1,49 @@ +import logging + +import panel as pn + +# hv.extension() +pn.extension() + +logger = logging.getLogger(__name__) + + +class StorenamesInstructions: + def __init__(self): + # instructions about how to save the storeslist file + self.mark_down = pn.pane.Markdown( + """ + + + ### Instructions to follow : + + - Check Storenames to repeat checkbox and see instructions in “Github Wiki” for duplicating storenames. + Otherwise do not check the Storenames to repeat checkbox.
+ - Select storenames from list and click “Select Storenames” to populate area below.
+ - Enter names for storenames, in order, using the following naming convention:
+ Isosbestic = “control_region” (ex: Dv1A= control_DMS)
+ Signal= “signal_region” (ex: Dv2A= signal_DMS)
+ TTLs can be named using any convention (ex: PrtR = RewardedPortEntries) but should be kept consistent for later group analysis + + ``` + {"storenames": ["Dv1A", "Dv2A", + "Dv3B", "Dv4B", + "LNRW", "LNnR", + "PrtN", "PrtR", + "RNPS"], + "names_for_storenames": ["control_DMS", "signal_DMS", + "control_DLS", "signal_DLS", + "RewardedNosepoke", "UnrewardedNosepoke", + "UnrewardedPort", "RewardedPort", + "InactiveNosepoke"]} + ``` + - If user has saved storenames before, clicking "Select Storenames" button will pop up a dialog box + showing previously used names for storenames. Select names for storenames by checking a checkbox and + click on "Show" to populate the text area in the Storenames GUI. Close the dialog box. + + - Select “create new” or “overwrite” to generate a new storenames list or replace a previous one + - Click Save + + """, + width=550, + ) diff --git a/src/guppy/frontend/storenames_selector.py b/src/guppy/frontend/storenames_selector.py new file mode 100644 index 0000000..9f7a562 --- /dev/null +++ b/src/guppy/frontend/storenames_selector.py @@ -0,0 +1,80 @@ +import logging + +import panel as pn + +# hv.extension() +pn.extension() + +logger = logging.getLogger(__name__) + + +class StorenamesSelector: + + def __init__(self, allnames): + self.alert = pn.pane.Alert("#### No alerts !!", alert_type="danger", height=80, width=600) + if len(allnames) == 0: + self.alert.object = ( + "####Alert !! \n No storenames found. There are not any TDT files or csv files to look for storenames." + ) + + # creating different buttons and selectors for the GUI + self.cross_selector = pn.widgets.CrossSelector( + name="Store Names Selection", value=[], options=allnames, width=600 + ) + self.multi_choice = pn.widgets.MultiChoice( + name="Select Storenames which you want more than once (multi-choice: multiple options selection)", + value=[], + options=allnames, + ) + + self.literal_input_1 = pn.widgets.LiteralInput( + name="Number of times you want the above storename (list)", value=[], type=list + ) + # self.literal_input_2 = pn.widgets.LiteralInput(name='Names for Storenames (list)', type=list) + + self.repeat_storenames = pn.widgets.Checkbox(name="Storenames to repeat", value=False) + self.repeat_storename_wd = pn.WidgetBox("", width=600) + + self.repeat_storenames.link(self.repeat_storename_wd, callbacks={"value": self.callback}) + # self.repeat_storename_wd = pn.WidgetBox('Storenames to repeat (leave blank if not needed)', multi_choice, literal_input_1, background="white", width=600) + + self.update_options = pn.widgets.Button(name="Select Storenames", width=600) + self.save = pn.widgets.Button(name="Save", width=600) + + self.text = pn.widgets.LiteralInput(value=[], name="Selected Store Names", type=list, width=600) + + self.path = pn.widgets.TextInput(name="Location to Stores List file", width=600) + + self.mark_down_for_overwrite = pn.pane.Markdown( + """ Select option from below if user wants to over-write a file or create a new file. + **Creating a new file will make a new output folder and will get saved at that location.** + If user selects to over-write a file **Select location of the file to over-write** will provide + the existing options of the output folders where user needs to over-write the file""", + width=600, + ) + + self.select_location = pn.widgets.Select( + name="Select location of the file to over-write", value="None", options=["None"], width=600 + ) + + self.overwrite_button = pn.widgets.MenuButton( + name="over-write storeslist file or create a new one? ", + items=["over_write_file", "create_new_file"], + button_type="default", + split=True, + width=600, + ) + + self.literal_input_2 = pn.widgets.CodeEditor( + value="""{}""", theme="tomorrow", language="json", height=250, width=600 + ) + + self.take_widgets = pn.WidgetBox(self.multi_choice, self.literal_input_1) + + self.change_widgets = pn.WidgetBox(self.text) + + def callback(self, target, event): + if event.new == True: + target.objects = [self.multi_choice, self.literal_input_1] + elif event.new == False: + target.clear() diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index d2de0cf..10475e2 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -10,6 +10,8 @@ import panel as pn from .npm_channel_selector import NpmChannelSelector +from .storenames_instructions import StorenamesInstructions +from .storenames_selector import StorenamesSelector # hv.extension() pn.extension() @@ -84,123 +86,19 @@ def saveStorenames(inputParameters, events, flags, folder_path): # Get storenames from extractor's events property allnames = events - if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: - npm_channel_selector = NpmChannelSelector(folder_path=folder_path) - else: - pass - - # instructions about how to save the storeslist file - mark_down = pn.pane.Markdown( - """ - - - ### Instructions to follow : - - - Check Storenames to repeat checkbox and see instructions in “Github Wiki” for duplicating storenames. - Otherwise do not check the Storenames to repeat checkbox.
- - Select storenames from list and click “Select Storenames” to populate area below.
- - Enter names for storenames, in order, using the following naming convention:
- Isosbestic = “control_region” (ex: Dv1A= control_DMS)
- Signal= “signal_region” (ex: Dv2A= signal_DMS)
- TTLs can be named using any convention (ex: PrtR = RewardedPortEntries) but should be kept consistent for later group analysis - - ``` - {"storenames": ["Dv1A", "Dv2A", - "Dv3B", "Dv4B", - "LNRW", "LNnR", - "PrtN", "PrtR", - "RNPS"], - "names_for_storenames": ["control_DMS", "signal_DMS", - "control_DLS", "signal_DLS", - "RewardedNosepoke", "UnrewardedNosepoke", - "UnrewardedPort", "RewardedPort", - "InactiveNosepoke"]} - ``` - - If user has saved storenames before, clicking "Select Storenames" button will pop up a dialog box - showing previously used names for storenames. Select names for storenames by checking a checkbox and - click on "Show" to populate the text area in the Storenames GUI. Close the dialog box. - - - Select “create new” or “overwrite” to generate a new storenames list or replace a previous one - - Click Save - - """, - width=550, - ) - # creating GUI template - template = pn.template.BootstrapTemplate( - title="Storenames GUI - {}".format(os.path.basename(folder_path), mark_down) - ) - - # creating different buttons and selectors for the GUI - cross_selector = pn.widgets.CrossSelector(name="Store Names Selection", value=[], options=allnames, width=600) - multi_choice = pn.widgets.MultiChoice( - name="Select Storenames which you want more than once (multi-choice: multiple options selection)", - value=[], - options=allnames, - ) - - literal_input_1 = pn.widgets.LiteralInput( - name="Number of times you want the above storename (list)", value=[], type=list - ) - # literal_input_2 = pn.widgets.LiteralInput(name='Names for Storenames (list)', type=list) - - repeat_storenames = pn.widgets.Checkbox(name="Storenames to repeat", value=False) - repeat_storename_wd = pn.WidgetBox("", width=600) - - def callback(target, event): - if event.new == True: - target.objects = [multi_choice, literal_input_1] - elif event.new == False: - target.clear() - - repeat_storenames.link(repeat_storename_wd, callbacks={"value": callback}) - # repeat_storename_wd = pn.WidgetBox('Storenames to repeat (leave blank if not needed)', multi_choice, literal_input_1, background="white", width=600) - - update_options = pn.widgets.Button(name="Select Storenames", width=600) - save = pn.widgets.Button(name="Save", width=600) - - text = pn.widgets.LiteralInput(value=[], name="Selected Store Names", type=list, width=600) + template = pn.template.BootstrapTemplate(title="Storenames GUI - {}".format(os.path.basename(folder_path))) - path = pn.widgets.TextInput(name="Location to Stores List file", width=600) - - mark_down_for_overwrite = pn.pane.Markdown( - """ Select option from below if user wants to over-write a file or create a new file. - **Creating a new file will make a new output folder and will get saved at that location.** - If user selects to over-write a file **Select location of the file to over-write** will provide - the existing options of the output folders where user needs to over-write the file""", - width=600, - ) - - select_location = pn.widgets.Select( - name="Select location of the file to over-write", value="None", options=["None"], width=600 - ) - - overwrite_button = pn.widgets.MenuButton( - name="over-write storeslist file or create a new one? ", - items=["over_write_file", "create_new_file"], - button_type="default", - split=True, - width=600, - ) - - literal_input_2 = pn.widgets.CodeEditor(value="""{}""", theme="tomorrow", language="json", height=250, width=600) - - alert = pn.pane.Alert("#### No alerts !!", alert_type="danger", height=80, width=600) - - take_widgets = pn.WidgetBox(multi_choice, literal_input_1) + if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: + npm_channel_selector = NpmChannelSelector(folder_path=folder_path) - change_widgets = pn.WidgetBox(text) + storenames_instructions = StorenamesInstructions() + storenames_selector = StorenamesSelector(allnames=allnames) storenames = [] storename_dropdowns = {} storename_textboxes = {} - if len(allnames) == 0: - alert.object = ( - "####Alert !! \n No storenames found. There are not any TDT files or csv files to look for storenames." - ) - # on clicking overwrite_button, following function is executed def overwrite_button_actions(event): if event.new == "over_write_file": @@ -467,7 +365,7 @@ def save_button(event=None): if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: widget_1 = pn.Column( "# " + os.path.basename(folder_path), - mark_down, + storenames_instructions.mark_down, npm_channel_selector.mark_down_np, npm_channel_selector.plot_select, npm_channel_selector.plot_pane, @@ -492,7 +390,7 @@ def save_button(event=None): template.main.append(pn.Row(widget_1, widget_2)) else: - widget_1 = pn.Column("# " + os.path.basename(folder_path), mark_down) + widget_1 = pn.Column("# " + os.path.basename(folder_path), storenames_instructions.mark_down) widget_2 = pn.Column( repeat_storenames, repeat_storename_wd, From e5b43a8bb9459ffb50f091fb00660052f8f8fd62 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 30 Jan 2026 17:14:47 -0800 Subject: [PATCH 14/53] Refactored fetchValues --- src/guppy/frontend/storenames_selector.py | 10 ++ src/guppy/frontend/temp.py | 152 ++++++++++++---------- 2 files changed, 91 insertions(+), 71 deletions(-) diff --git a/src/guppy/frontend/storenames_selector.py b/src/guppy/frontend/storenames_selector.py index 9f7a562..b01d999 100644 --- a/src/guppy/frontend/storenames_selector.py +++ b/src/guppy/frontend/storenames_selector.py @@ -1,3 +1,4 @@ +import json import logging import panel as pn @@ -78,3 +79,12 @@ def callback(self, target, event): target.objects = [self.multi_choice, self.literal_input_1] elif event.new == False: target.clear() + + def set_select_location_options(self, options): + self.select_location.options = options + + def set_alert_message(self, message): + self.alert.object = message + + def set_literal_input_2(self, d): # TODO: come up with a better name for this method. + self.literal_input_2.value = str(json.dumps(d, indent=2)) diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index 10475e2..4897d28 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -66,6 +66,64 @@ def make_dir(filepath): return op +def _fetchValues(text, storenames, storename_dropdowns, storename_textboxes, d): + if not storename_dropdowns or not len(storenames) > 0: + return "####Alert !! \n No storenames selected." + + storenames_cache = dict() + if os.path.exists(os.path.join(Path.home(), ".storesList.json")): + with open(os.path.join(Path.home(), ".storesList.json")) as f: + storenames_cache = json.load(f) + + comboBoxValues, textBoxValues = [], [] + dropdown_keys = list(storename_dropdowns.keys()) + textbox_keys = list(storename_textboxes.keys()) if storename_textboxes else [] + + # Get dropdown values + for key in dropdown_keys: + comboBoxValues.append(storename_dropdowns[key].value) + + # Get textbox values (matching with dropdown keys) + for key in dropdown_keys: + if key in storename_textboxes: + textbox_value = storename_textboxes[key].value or "" + textBoxValues.append(textbox_value) + + # Validation: Check for whitespace + if len(textbox_value.split()) > 1: + return "####Alert !! \n Whitespace is not allowed in the text box entry." + + # Validation: Check for empty required fields + dropdown_value = storename_dropdowns[key].value + if ( + not textbox_value + and dropdown_value not in storenames_cache + and dropdown_value in ["control", "signal", "event TTLs"] + ): + return "####Alert !! \n One of the text box entry is empty." + else: + # For cached values, use the dropdown value directly + textBoxValues.append(storename_dropdowns[key].value) + + if len(comboBoxValues) != len(textBoxValues): + return "####Alert !! \n Number of entries in combo box and text box should be same." + + names_for_storenames = [] + for i in range(len(comboBoxValues)): + if comboBoxValues[i] == "control" or comboBoxValues[i] == "signal": + if "_" in textBoxValues[i]: + return "####Alert !! \n Please do not use underscore in region name." + names_for_storenames.append("{}_{}".format(comboBoxValues[i], textBoxValues[i])) + elif comboBoxValues[i] == "event TTLs": + names_for_storenames.append(textBoxValues[i]) + else: + names_for_storenames.append(comboBoxValues[i]) + + d["storenames"] = text.value + d["names_for_storenames"] = names_for_storenames + return "#### No alerts !!" + + # function to show GUI and save def saveStorenames(inputParameters, events, flags, folder_path): @@ -94,85 +152,35 @@ def saveStorenames(inputParameters, events, flags, folder_path): storenames_instructions = StorenamesInstructions() storenames_selector = StorenamesSelector(allnames=allnames) + alert = storenames_selector.alert storenames = [] storename_dropdowns = {} storename_textboxes = {} + # ------------------------------------------------------------------------------------------------------------------ + # onclick closure functions # on clicking overwrite_button, following function is executed def overwrite_button_actions(event): if event.new == "over_write_file": - select_location.options = takeOnlyDirs(glob.glob(os.path.join(folder_path, "*_output_*"))) - # select_location.value = select_location.options[0] + options = takeOnlyDirs(glob.glob(os.path.join(folder_path, "*_output_*"))) + storenames_selector.set_select_location_options(options=options) else: - select_location.options = [show_dir(folder_path)] - # select_location.value = select_location.options[0] + options = [show_dir(folder_path)] + storenames_selector.set_select_location_options(options=options) def fetchValues(event): - global storenames - alert.object = "#### No alerts !!" - - if not storename_dropdowns or not len(storenames) > 0: - alert.object = "####Alert !! \n No storenames selected." - return - - storenames_cache = dict() - if os.path.exists(os.path.join(Path.home(), ".storesList.json")): - with open(os.path.join(Path.home(), ".storesList.json")) as f: - storenames_cache = json.load(f) - - comboBoxValues, textBoxValues = [], [] - dropdown_keys = list(storename_dropdowns.keys()) - textbox_keys = list(storename_textboxes.keys()) if storename_textboxes else [] - - # Get dropdown values - for key in dropdown_keys: - comboBoxValues.append(storename_dropdowns[key].value) - - # Get textbox values (matching with dropdown keys) - for key in dropdown_keys: - if key in storename_textboxes: - textbox_value = storename_textboxes[key].value or "" - textBoxValues.append(textbox_value) - - # Validation: Check for whitespace - if len(textbox_value.split()) > 1: - alert.object = "####Alert !! \n Whitespace is not allowed in the text box entry." - return - - # Validation: Check for empty required fields - dropdown_value = storename_dropdowns[key].value - if ( - not textbox_value - and dropdown_value not in storenames_cache - and dropdown_value in ["control", "signal", "event TTLs"] - ): - alert.object = "####Alert !! \n One of the text box entry is empty." - return - else: - # For cached values, use the dropdown value directly - textBoxValues.append(storename_dropdowns[key].value) - - if len(comboBoxValues) != len(textBoxValues): - alert.object = "####Alert !! \n Number of entries in combo box and text box should be same." - return - - names_for_storenames = [] - for i in range(len(comboBoxValues)): - if comboBoxValues[i] == "control" or comboBoxValues[i] == "signal": - if "_" in textBoxValues[i]: - alert.object = "####Alert !! \n Please do not use underscore in region name." - return - names_for_storenames.append("{}_{}".format(comboBoxValues[i], textBoxValues[i])) - elif comboBoxValues[i] == "event TTLs": - names_for_storenames.append(textBoxValues[i]) - else: - names_for_storenames.append(comboBoxValues[i]) - d = dict() - d["storenames"] = text.value - d["names_for_storenames"] = names_for_storenames - literal_input_2.value = str(json.dumps(d, indent=2)) + alert_message = _fetchValues( + text=storenames_selector.text, + storenames=storenames, + storename_dropdowns=storename_dropdowns, + storename_textboxes=storename_textboxes, + d=d, + ) + storenames_selector.set_alert_message(alert_message) + storenames_selector.set_literal_input_2(d=d) + global storenames # Panel-based storename configuration (replaces Tkinter dialog) storename_config_widgets = pn.Column(visible=False) @@ -304,18 +312,20 @@ def save_button(event=None): arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) if np.where(arr2 == "")[0].size > 0: - alert.object = "#### Alert !! \n Empty string in the list names_for_storenames." + storenames_selector.set_alert_message("#### Alert !! \n Empty string in the list names_for_storenames.") logger.error("Empty string in the list names_for_storenames.") raise Exception("Empty string in the list names_for_storenames.") else: - alert.object = "#### No alerts !!" + storenames_selector.set_alert_message("#### No alerts !!") if arr1.shape[0] != arr2.shape[0]: - alert.object = "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." + storenames_selector.set_alert_message( + "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." + ) logger.error("Length of list storenames and names_for_storenames is not equal.") raise Exception("Length of list storenames and names_for_storenames is not equal.") else: - alert.object = "#### No alerts !!" + storenames_selector.set_alert_message("#### No alerts !!") if not os.path.exists(os.path.join(Path.home(), ".storesList.json")): storenames_cache = dict() From 436c800c20cf377ee4c72aba98fb6201148a4806 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 09:10:08 -0800 Subject: [PATCH 15/53] Refined StorenamesSelector --- src/guppy/frontend/storenames_selector.py | 25 ++++++ src/guppy/frontend/temp.py | 93 ++++++++++++----------- 2 files changed, 75 insertions(+), 43 deletions(-) diff --git a/src/guppy/frontend/storenames_selector.py b/src/guppy/frontend/storenames_selector.py index b01d999..73ac4c0 100644 --- a/src/guppy/frontend/storenames_selector.py +++ b/src/guppy/frontend/storenames_selector.py @@ -80,11 +80,36 @@ def callback(self, target, event): elif event.new == False: target.clear() + def get_select_location(self): + return self.select_location.value + def set_select_location_options(self, options): self.select_location.options = options def set_alert_message(self, message): self.alert.object = message + def get_literal_input_2(self): # TODO: come up with a better name for this method. + d = json.loads(self.literal_input_2.value) + return d + def set_literal_input_2(self, d): # TODO: come up with a better name for this method. self.literal_input_2.value = str(json.dumps(d, indent=2)) + + def get_take_widgets(self): + return [w.value for w in self.take_widgets] + + def set_change_widgets(self, value): + for w in self.change_widgets: + w.value = value + + def get_cross_selector(self): + return self.cross_selector.value + + def set_path(self, value): + self.path.value = value + + def attach_callbacks(self, button_name_to_onclick_fn: dict): + for button_name, onclick_fn in button_name_to_onclick_fn.items(): + button = getattr(self, button_name) + button.on_click(onclick_fn) diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index 4897d28..0a824c3 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -170,6 +170,7 @@ def overwrite_button_actions(event): storenames_selector.set_select_location_options(options=options) def fetchValues(event): + global storenames d = dict() alert_message = _fetchValues( text=storenames_selector.text, @@ -180,18 +181,19 @@ def fetchValues(event): ) storenames_selector.set_alert_message(alert_message) storenames_selector.set_literal_input_2(d=d) - global storenames # Panel-based storename configuration (replaces Tkinter dialog) storename_config_widgets = pn.Column(visible=False) show_config_button = pn.widgets.Button(name="Show Selected Configuration", width=600) + # TODO: Refactor frontend into dedicated class # on clicking 'Select Storenames' button, following function is executed def update_values(event): global storenames, vars_list - arr = [] - for w in take_widgets: - arr.append(w.value) + # arr = [] + # for w in take_widgets: + # arr.append(w.value) + arr = storenames_selector.get_take_widgets() new_arr = [] @@ -200,12 +202,11 @@ def update_values(event): new_arr.append(arr[0][i]) if len(new_arr) > 0: - storenames = cross_selector.value + new_arr + storenames = storenames_selector.get_cross_selector() + new_arr else: - storenames = cross_selector.value + storenames = storenames_selector.get_cross_selector() - for w in change_widgets: - w.value = storenames + storenames_selector.set_change_widgets(storenames) storenames_cache = dict() if os.path.exists(os.path.join(Path.home(), ".storesList.json")): @@ -308,7 +309,7 @@ def update_help(dropdown_value): def save_button(event=None): global storenames - d = json.loads(literal_input_2.value) + d = storenames_selector.get_literal_input_2() arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) if np.where(arr2 == "")[0].size > 0: @@ -355,23 +356,27 @@ def save_button(event=None): arr = np.asarray([arr1, arr2]) logger.info(arr) - if not os.path.exists(select_location.value): - os.mkdir(select_location.value) + select_location = storenames_selector.get_select_location() + if not os.path.exists(select_location): + os.mkdir(select_location) - np.savetxt(os.path.join(select_location.value, "storesList.csv"), arr, delimiter=",", fmt="%s") - path.value = os.path.join(select_location.value, "storesList.csv") - logger.info(f"Storeslist file saved at {select_location.value}") + np.savetxt(os.path.join(select_location, "storesList.csv"), arr, delimiter=",", fmt="%s") + storenames_selector.set_path(os.path.join(select_location, "storesList.csv")) + logger.info(f"Storeslist file saved at {select_location}") logger.info("Storeslist : \n" + str(arr)) + # ------------------------------------------------------------------------------------------------------------------ + # Connect button callbacks - update_options.on_click(update_values) show_config_button.on_click(fetchValues) - save.on_click(save_button) - overwrite_button.on_click(overwrite_button_actions) - - # creating widgets, adding them to template and showing a GUI on a new browser window - number = scanPortsAndFind(start_port=5000, end_port=5200) - + button_name_to_onclick_fn = { + "update_options": update_values, + "save": save_button, + "overwrite_button": overwrite_button_actions, + } + storenames_selector.attach_callbacks(button_name_to_onclick_fn) + + # TODO: Refactor this into appropriate class methods if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: widget_1 = pn.Column( "# " + os.path.basename(folder_path), @@ -381,43 +386,45 @@ def save_button(event=None): npm_channel_selector.plot_pane, ) widget_2 = pn.Column( - repeat_storenames, - repeat_storename_wd, + storenames_selector.repeat_storenames, + storenames_selector.repeat_storename_wd, pn.Spacer(height=20), - cross_selector, - update_options, + storenames_selector.cross_selector, + storenames_selector.update_options, storename_config_widgets, pn.Spacer(height=10), - text, - literal_input_2, + storenames_selector.text, + storenames_selector.literal_input_2, alert, - mark_down_for_overwrite, - overwrite_button, - select_location, - save, - path, + storenames_selector.mark_down_for_overwrite, + storenames_selector.overwrite_button, + storenames_selector.select_location, + storenames_selector.save, + storenames_selector.path, ) template.main.append(pn.Row(widget_1, widget_2)) else: widget_1 = pn.Column("# " + os.path.basename(folder_path), storenames_instructions.mark_down) widget_2 = pn.Column( - repeat_storenames, - repeat_storename_wd, + storenames_selector.repeat_storenames, + storenames_selector.repeat_storename_wd, pn.Spacer(height=20), - cross_selector, - update_options, + storenames_selector.cross_selector, + storenames_selector.update_options, storename_config_widgets, pn.Spacer(height=10), - text, - literal_input_2, + storenames_selector.text, + storenames_selector.literal_input_2, alert, - mark_down_for_overwrite, - overwrite_button, - select_location, - save, - path, + storenames_selector.mark_down_for_overwrite, + storenames_selector.overwrite_button, + storenames_selector.select_location, + storenames_selector.save, + storenames_selector.path, ) template.main.append(pn.Row(widget_1, widget_2)) + # creating widgets, adding them to template and showing a GUI on a new browser window + number = scanPortsAndFind(start_port=5000, end_port=5200) template.show(port=number) From daddc9f5369cce73cdee4a33b68f31599c09746e Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 12:06:30 -0800 Subject: [PATCH 16/53] Refactored NpmChannelSelector into StorenamesInstructionsNPM by taking advantage of inheritance relationship. --- src/guppy/frontend/npm_channel_selector.py | 57 --------------- src/guppy/frontend/storenames_instructions.py | 62 ++++++++++++++++- src/guppy/frontend/storenames_selector.py | 22 ++++++ src/guppy/frontend/temp.py | 69 +++---------------- 4 files changed, 92 insertions(+), 118 deletions(-) delete mode 100644 src/guppy/frontend/npm_channel_selector.py diff --git a/src/guppy/frontend/npm_channel_selector.py b/src/guppy/frontend/npm_channel_selector.py deleted file mode 100644 index 4de61d6..0000000 --- a/src/guppy/frontend/npm_channel_selector.py +++ /dev/null @@ -1,57 +0,0 @@ -import glob -import logging -import os - -import holoviews as hv -import numpy as np -import pandas as pd -import panel as pn - -# hv.extension() -pn.extension() - -logger = logging.getLogger(__name__) - - -class NpmChannelSelector: - def __init__(self, folder_path): - path_chev = glob.glob(os.path.join(folder_path, "*chev*")) - path_chod = glob.glob(os.path.join(folder_path, "*chod*")) - path_chpr = glob.glob(os.path.join(folder_path, "*chpr*")) - combine_paths = path_chev + path_chod + path_chpr - self.d = dict() - for i in range(len(combine_paths)): - basename = (os.path.basename(combine_paths[i])).split(".")[0] - df = pd.read_csv(combine_paths[i]) - self.d[basename] = {"x": np.array(df["timestamps"]), "y": np.array(df["data"])} - keys = list(self.d.keys()) - self.mark_down_np = pn.pane.Markdown( - """ - ### Extra Instructions to follow when using Neurophotometrics data : - - Guppy will take the NPM data, which has interleaved frames - from the signal and control channels, and divide it out into - separate channels for each site you recordded. - However, since NPM does not automatically annotate which - frames belong to the signal channel and which belong to the - control channel, the user must specify this for GuPPy. - - Each of your recording sites will have a channel - named “chod” and a channel named “chev” - - View the plots below and, for each site, - determine whether the “chev” or “chod” channel is signal or control - - When you give your storenames, name the channels appropriately. - For example, “chev1” might be “signal_A” and - “chod1” might be “control_A” (or vice versa). - - """ - ) - self.plot_select = pn.widgets.Select( - name="Select channel to see correspondings channels", options=keys, value=keys[0] - ) - self.plot_pane = pn.pane.HoloViews(self._make_plot(self.plot_select.value), width=550) - self.plot_select.param.watch(self._on_plot_select_change, "value") - - def _make_plot(self, plot_key): - return hv.Curve((self.d[plot_key]["x"], self.d[plot_key]["y"])).opts(width=550) - - def _on_plot_select_change(self, event): - self.plot_pane.object = self._make_plot(event.new) diff --git a/src/guppy/frontend/storenames_instructions.py b/src/guppy/frontend/storenames_instructions.py index c8105bc..1e5fb8a 100644 --- a/src/guppy/frontend/storenames_instructions.py +++ b/src/guppy/frontend/storenames_instructions.py @@ -1,5 +1,10 @@ +import glob import logging +import os +import holoviews as hv +import numpy as np +import pandas as pd import panel as pn # hv.extension() @@ -9,7 +14,7 @@ class StorenamesInstructions: - def __init__(self): + def __init__(self, folder_path): # instructions about how to save the storeslist file self.mark_down = pn.pane.Markdown( """ @@ -47,3 +52,58 @@ def __init__(self): """, width=550, ) + + self.widget_1 = pn.Column("# " + os.path.basename(folder_path), self.mark_down) + + +class StorenamesInstructionsNPM(StorenamesInstructions): + def __init__(self, folder_path): + super().__init__(folder_path=folder_path) + path_chev = glob.glob(os.path.join(folder_path, "*chev*")) + path_chod = glob.glob(os.path.join(folder_path, "*chod*")) + path_chpr = glob.glob(os.path.join(folder_path, "*chpr*")) + combine_paths = path_chev + path_chod + path_chpr + self.d = dict() + for i in range(len(combine_paths)): + basename = (os.path.basename(combine_paths[i])).split(".")[0] + df = pd.read_csv(combine_paths[i]) + self.d[basename] = {"x": np.array(df["timestamps"]), "y": np.array(df["data"])} + keys = list(self.d.keys()) + self.mark_down_np = pn.pane.Markdown( + """ + ### Extra Instructions to follow when using Neurophotometrics data : + - Guppy will take the NPM data, which has interleaved frames + from the signal and control channels, and divide it out into + separate channels for each site you recordded. + However, since NPM does not automatically annotate which + frames belong to the signal channel and which belong to the + control channel, the user must specify this for GuPPy. + - Each of your recording sites will have a channel + named “chod” and a channel named “chev” + - View the plots below and, for each site, + determine whether the “chev” or “chod” channel is signal or control + - When you give your storenames, name the channels appropriately. + For example, “chev1” might be “signal_A” and + “chod1” might be “control_A” (or vice versa). + + """ + ) + self.plot_select = pn.widgets.Select( + name="Select channel to see correspondings channels", options=keys, value=keys[0] + ) + self.plot_pane = pn.pane.HoloViews(self._make_plot(self.plot_select.value), width=550) + self.plot_select.param.watch(self._on_plot_select_change, "value") + + self.widget_1 = pn.Column( + "# " + os.path.basename(folder_path), + self.mark_down, + self.mark_down_np, + self.plot_select, + self.plot_pane, + ) + + def _make_plot(self, plot_key): + return hv.Curve((self.d[plot_key]["x"], self.d[plot_key]["y"])).opts(width=550) + + def _on_plot_select_change(self, event): + self.plot_pane.object = self._make_plot(event.new) diff --git a/src/guppy/frontend/storenames_selector.py b/src/guppy/frontend/storenames_selector.py index 73ac4c0..ccee9f0 100644 --- a/src/guppy/frontend/storenames_selector.py +++ b/src/guppy/frontend/storenames_selector.py @@ -74,6 +74,28 @@ def __init__(self, allnames): self.change_widgets = pn.WidgetBox(self.text) + # Panel-based storename configuration (replaces Tkinter dialog) + self.storename_config_widgets = pn.Column(visible=False) + self.show_config_button = pn.widgets.Button(name="Show Selected Configuration", width=600) + + self.widget_2 = pn.Column( + self.repeat_storenames, + self.repeat_storename_wd, + pn.Spacer(height=20), + self.cross_selector, + self.update_options, + self.storename_config_widgets, + pn.Spacer(height=10), + self.text, + self.literal_input_2, + self.alert, + self.mark_down_for_overwrite, + self.overwrite_button, + self.select_location, + self.save, + self.path, + ) + def callback(self, target, event): if event.new == True: target.objects = [self.multi_choice, self.literal_input_1] diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index 0a824c3..21c0fa5 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -9,8 +9,7 @@ import numpy as np import panel as pn -from .npm_channel_selector import NpmChannelSelector -from .storenames_instructions import StorenamesInstructions +from .storenames_instructions import StorenamesInstructions, StorenamesInstructionsNPM from .storenames_selector import StorenamesSelector # hv.extension() @@ -148,9 +147,9 @@ def saveStorenames(inputParameters, events, flags, folder_path): template = pn.template.BootstrapTemplate(title="Storenames GUI - {}".format(os.path.basename(folder_path))) if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: - npm_channel_selector = NpmChannelSelector(folder_path=folder_path) - - storenames_instructions = StorenamesInstructions() + storenames_instructions = StorenamesInstructionsNPM(folder_path=folder_path) + else: + storenames_instructions = StorenamesInstructions(folder_path=folder_path) storenames_selector = StorenamesSelector(allnames=allnames) alert = storenames_selector.alert @@ -182,10 +181,6 @@ def fetchValues(event): storenames_selector.set_alert_message(alert_message) storenames_selector.set_literal_input_2(d=d) - # Panel-based storename configuration (replaces Tkinter dialog) - storename_config_widgets = pn.Column(visible=False) - show_config_button = pn.widgets.Button(name="Show Selected Configuration", width=600) - # TODO: Refactor frontend into dedicated class # on clicking 'Select Storenames' button, following function is executed def update_values(event): @@ -293,7 +288,7 @@ def update_help(dropdown_value): # Add show button config_widgets.append(pn.Spacer(height=20)) - config_widgets.append(show_config_button) + config_widgets.append(storenames_selector.show_config_button) config_widgets.append( pn.pane.Markdown( "*Click 'Show Selected Configuration' to apply your selections.*", @@ -302,8 +297,8 @@ def update_help(dropdown_value): ) # Update the configuration panel - storename_config_widgets.objects = config_widgets - storename_config_widgets.visible = len(storenames) > 0 + storenames_selector.storename_config_widgets.objects = config_widgets + storenames_selector.storename_config_widgets.visible = len(storenames) > 0 # on clicking save button, following function is executed def save_button(event=None): @@ -368,62 +363,16 @@ def save_button(event=None): # ------------------------------------------------------------------------------------------------------------------ # Connect button callbacks - show_config_button.on_click(fetchValues) button_name_to_onclick_fn = { "update_options": update_values, "save": save_button, "overwrite_button": overwrite_button_actions, + "show_config_button": fetchValues, } storenames_selector.attach_callbacks(button_name_to_onclick_fn) # TODO: Refactor this into appropriate class methods - if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: - widget_1 = pn.Column( - "# " + os.path.basename(folder_path), - storenames_instructions.mark_down, - npm_channel_selector.mark_down_np, - npm_channel_selector.plot_select, - npm_channel_selector.plot_pane, - ) - widget_2 = pn.Column( - storenames_selector.repeat_storenames, - storenames_selector.repeat_storename_wd, - pn.Spacer(height=20), - storenames_selector.cross_selector, - storenames_selector.update_options, - storename_config_widgets, - pn.Spacer(height=10), - storenames_selector.text, - storenames_selector.literal_input_2, - alert, - storenames_selector.mark_down_for_overwrite, - storenames_selector.overwrite_button, - storenames_selector.select_location, - storenames_selector.save, - storenames_selector.path, - ) - template.main.append(pn.Row(widget_1, widget_2)) - - else: - widget_1 = pn.Column("# " + os.path.basename(folder_path), storenames_instructions.mark_down) - widget_2 = pn.Column( - storenames_selector.repeat_storenames, - storenames_selector.repeat_storename_wd, - pn.Spacer(height=20), - storenames_selector.cross_selector, - storenames_selector.update_options, - storename_config_widgets, - pn.Spacer(height=10), - storenames_selector.text, - storenames_selector.literal_input_2, - alert, - storenames_selector.mark_down_for_overwrite, - storenames_selector.overwrite_button, - storenames_selector.select_location, - storenames_selector.save, - storenames_selector.path, - ) - template.main.append(pn.Row(widget_1, widget_2)) + template.main.append(pn.Row(storenames_instructions.widget_1, storenames_selector.widget_2)) # creating widgets, adding them to template and showing a GUI on a new browser window number = scanPortsAndFind(start_port=5000, end_port=5200) From 1d8796ab210cf2339970c887c218ffbb51bb5302 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 12:13:28 -0800 Subject: [PATCH 17/53] renamed widgets --- src/guppy/frontend/storenames_instructions.py | 4 ++-- src/guppy/frontend/storenames_selector.py | 2 +- src/guppy/frontend/temp.py | 3 +-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/guppy/frontend/storenames_instructions.py b/src/guppy/frontend/storenames_instructions.py index 1e5fb8a..ba5fe5d 100644 --- a/src/guppy/frontend/storenames_instructions.py +++ b/src/guppy/frontend/storenames_instructions.py @@ -53,7 +53,7 @@ def __init__(self, folder_path): width=550, ) - self.widget_1 = pn.Column("# " + os.path.basename(folder_path), self.mark_down) + self.widget = pn.Column("# " + os.path.basename(folder_path), self.mark_down) class StorenamesInstructionsNPM(StorenamesInstructions): @@ -94,7 +94,7 @@ def __init__(self, folder_path): self.plot_pane = pn.pane.HoloViews(self._make_plot(self.plot_select.value), width=550) self.plot_select.param.watch(self._on_plot_select_change, "value") - self.widget_1 = pn.Column( + self.widget = pn.Column( "# " + os.path.basename(folder_path), self.mark_down, self.mark_down_np, diff --git a/src/guppy/frontend/storenames_selector.py b/src/guppy/frontend/storenames_selector.py index ccee9f0..bcb5c83 100644 --- a/src/guppy/frontend/storenames_selector.py +++ b/src/guppy/frontend/storenames_selector.py @@ -78,7 +78,7 @@ def __init__(self, allnames): self.storename_config_widgets = pn.Column(visible=False) self.show_config_button = pn.widgets.Button(name="Show Selected Configuration", width=600) - self.widget_2 = pn.Column( + self.widget = pn.Column( self.repeat_storenames, self.repeat_storename_wd, pn.Spacer(height=20), diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index 21c0fa5..f6c3e6e 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -371,8 +371,7 @@ def save_button(event=None): } storenames_selector.attach_callbacks(button_name_to_onclick_fn) - # TODO: Refactor this into appropriate class methods - template.main.append(pn.Row(storenames_instructions.widget_1, storenames_selector.widget_2)) + template.main.append(pn.Row(storenames_instructions.widget, storenames_selector.widget)) # creating widgets, adding them to template and showing a GUI on a new browser window number = scanPortsAndFind(start_port=5000, end_port=5200) From 7d339449f6111e4583c3171b11e38458c69e6b98 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 12:34:23 -0800 Subject: [PATCH 18/53] Refactored configuration into a dedicated class. --- src/guppy/frontend/storenames_config.py | 106 ++++++++++++++++++++++++ src/guppy/frontend/temp.py | 97 ++-------------------- 2 files changed, 115 insertions(+), 88 deletions(-) create mode 100644 src/guppy/frontend/storenames_config.py diff --git a/src/guppy/frontend/storenames_config.py b/src/guppy/frontend/storenames_config.py new file mode 100644 index 0000000..618479d --- /dev/null +++ b/src/guppy/frontend/storenames_config.py @@ -0,0 +1,106 @@ +import logging + +import panel as pn + +from .storenames_selector import StorenamesSelector + +pn.extension() + +logger = logging.getLogger(__name__) + + +class StorenamesConfig: + def __init__( + self, + storenames_selector: StorenamesSelector, + storename_dropdowns, + storename_textboxes, + storenames, + storenames_cache, + ): + self.config_widgets = [] + storename_dropdowns.clear() + storename_textboxes.clear() + + if len(storenames) > 0: + self.config_widgets.append( + pn.pane.Markdown( + "## Configure Storenames\nSelect appropriate options for each storename and provide names as needed:" + ) + ) + + for i, storename in enumerate(storenames): + # Create a row for each storename + row_widgets = [] + + # Label + label = pn.pane.Markdown(f"**{storename}:**") + row_widgets.append(label) + + # Dropdown options + if storename in storenames_cache: + options = storenames_cache[storename] + default_value = options[0] if options else "" + else: + options = ["", "control", "signal", "event TTLs"] + default_value = "" + + # Create unique key for widget + widget_key = ( + f"{storename}_{i}" + if f"{storename}_{i}" not in storename_dropdowns + else f"{storename}_{i}_{len(storename_dropdowns)}" + ) + + dropdown = pn.widgets.Select(name="Type", value=default_value, options=options, width=150) + storename_dropdowns[widget_key] = dropdown + row_widgets.append(dropdown) + + # Text input (only show if not cached or if control/signal/event TTLs selected) + if storename not in storenames_cache or default_value in ["control", "signal", "event TTLs"]: + textbox = pn.widgets.TextInput( + name="Name", value="", placeholder="Enter region/event name", width=200 + ) + storename_textboxes[widget_key] = textbox + row_widgets.append(textbox) + + # Add helper text based on selection + def create_help_function(dropdown_widget, help_pane_container): + @pn.depends(dropdown_widget.param.value, watch=True) + def update_help(dropdown_value): + if dropdown_value == "control": + help_pane_container[0] = pn.pane.Markdown( + "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} + ) + elif dropdown_value == "signal": + help_pane_container[0] = pn.pane.Markdown( + "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} + ) + elif dropdown_value == "event TTLs": + help_pane_container[0] = pn.pane.Markdown( + "*Type event name for the TTLs*", styles={"color": "gray", "font-size": "12px"} + ) + else: + help_pane_container[0] = pn.pane.Markdown( + "", styles={"color": "gray", "font-size": "12px"} + ) + + return update_help + + help_container = [pn.pane.Markdown("")] + help_function = create_help_function(dropdown, help_container) + help_function(dropdown.value) # Initialize + row_widgets.append(help_container[0]) + + # Add the row to config widgets + self.config_widgets.append(pn.Row(*row_widgets, margin=(5, 0))) + + # Add show button + self.config_widgets.append(pn.Spacer(height=20)) + self.config_widgets.append(storenames_selector.show_config_button) + self.config_widgets.append( + pn.pane.Markdown( + "*Click 'Show Selected Configuration' to apply your selections.*", + styles={"font-size": "12px", "color": "gray"}, + ) + ) diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index f6c3e6e..b5df0fa 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -9,6 +9,7 @@ import numpy as np import panel as pn +from .storenames_config import StorenamesConfig from .storenames_instructions import StorenamesInstructions, StorenamesInstructionsNPM from .storenames_selector import StorenamesSelector @@ -151,7 +152,6 @@ def saveStorenames(inputParameters, events, flags, folder_path): else: storenames_instructions = StorenamesInstructions(folder_path=folder_path) storenames_selector = StorenamesSelector(allnames=allnames) - alert = storenames_selector.alert storenames = [] storename_dropdowns = {} @@ -209,95 +209,16 @@ def update_values(event): storenames_cache = json.load(f) # Create Panel widgets for storename configuration - config_widgets = [] - storename_dropdowns.clear() - storename_textboxes.clear() - - if len(storenames) > 0: - config_widgets.append( - pn.pane.Markdown( - "## Configure Storenames\nSelect appropriate options for each storename and provide names as needed:" - ) - ) - - for i, storename in enumerate(storenames): - # Create a row for each storename - row_widgets = [] - - # Label - label = pn.pane.Markdown(f"**{storename}:**") - row_widgets.append(label) - - # Dropdown options - if storename in storenames_cache: - options = storenames_cache[storename] - default_value = options[0] if options else "" - else: - options = ["", "control", "signal", "event TTLs"] - default_value = "" - - # Create unique key for widget - widget_key = ( - f"{storename}_{i}" - if f"{storename}_{i}" not in storename_dropdowns - else f"{storename}_{i}_{len(storename_dropdowns)}" - ) - - dropdown = pn.widgets.Select(name="Type", value=default_value, options=options, width=150) - storename_dropdowns[widget_key] = dropdown - row_widgets.append(dropdown) - - # Text input (only show if not cached or if control/signal/event TTLs selected) - if storename not in storenames_cache or default_value in ["control", "signal", "event TTLs"]: - textbox = pn.widgets.TextInput( - name="Name", value="", placeholder="Enter region/event name", width=200 - ) - storename_textboxes[widget_key] = textbox - row_widgets.append(textbox) - - # Add helper text based on selection - def create_help_function(dropdown_widget, help_pane_container): - @pn.depends(dropdown_widget.param.value, watch=True) - def update_help(dropdown_value): - if dropdown_value == "control": - help_pane_container[0] = pn.pane.Markdown( - "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} - ) - elif dropdown_value == "signal": - help_pane_container[0] = pn.pane.Markdown( - "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} - ) - elif dropdown_value == "event TTLs": - help_pane_container[0] = pn.pane.Markdown( - "*Type event name for the TTLs*", styles={"color": "gray", "font-size": "12px"} - ) - else: - help_pane_container[0] = pn.pane.Markdown( - "", styles={"color": "gray", "font-size": "12px"} - ) - - return update_help - - help_container = [pn.pane.Markdown("")] - help_function = create_help_function(dropdown, help_container) - help_function(dropdown.value) # Initialize - row_widgets.append(help_container[0]) - - # Add the row to config widgets - config_widgets.append(pn.Row(*row_widgets, margin=(5, 0))) - - # Add show button - config_widgets.append(pn.Spacer(height=20)) - config_widgets.append(storenames_selector.show_config_button) - config_widgets.append( - pn.pane.Markdown( - "*Click 'Show Selected Configuration' to apply your selections.*", - styles={"font-size": "12px", "color": "gray"}, - ) - ) + storenames_config = StorenamesConfig( + storenames_selector=storenames_selector, + storename_dropdowns=storename_dropdowns, + storename_textboxes=storename_textboxes, + storenames=storenames, + storenames_cache=storenames_cache, + ) # Update the configuration panel - storenames_selector.storename_config_widgets.objects = config_widgets + storenames_selector.storename_config_widgets.objects = storenames_config.config_widgets storenames_selector.storename_config_widgets.visible = len(storenames) > 0 # on clicking save button, following function is executed From 39f84072e06ddc6435fade1db1399485b557781d Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 12:42:53 -0800 Subject: [PATCH 19/53] Refactored configuration init --- src/guppy/frontend/storenames_config.py | 161 ++++++++++++------------ 1 file changed, 81 insertions(+), 80 deletions(-) diff --git a/src/guppy/frontend/storenames_config.py b/src/guppy/frontend/storenames_config.py index 618479d..63eedb6 100644 --- a/src/guppy/frontend/storenames_config.py +++ b/src/guppy/frontend/storenames_config.py @@ -22,85 +22,86 @@ def __init__( storename_dropdowns.clear() storename_textboxes.clear() - if len(storenames) > 0: - self.config_widgets.append( - pn.pane.Markdown( - "## Configure Storenames\nSelect appropriate options for each storename and provide names as needed:" - ) - ) + if len(storenames) == 0: + return - for i, storename in enumerate(storenames): - # Create a row for each storename - row_widgets = [] - - # Label - label = pn.pane.Markdown(f"**{storename}:**") - row_widgets.append(label) - - # Dropdown options - if storename in storenames_cache: - options = storenames_cache[storename] - default_value = options[0] if options else "" - else: - options = ["", "control", "signal", "event TTLs"] - default_value = "" - - # Create unique key for widget - widget_key = ( - f"{storename}_{i}" - if f"{storename}_{i}" not in storename_dropdowns - else f"{storename}_{i}_{len(storename_dropdowns)}" - ) - - dropdown = pn.widgets.Select(name="Type", value=default_value, options=options, width=150) - storename_dropdowns[widget_key] = dropdown - row_widgets.append(dropdown) - - # Text input (only show if not cached or if control/signal/event TTLs selected) - if storename not in storenames_cache or default_value in ["control", "signal", "event TTLs"]: - textbox = pn.widgets.TextInput( - name="Name", value="", placeholder="Enter region/event name", width=200 - ) - storename_textboxes[widget_key] = textbox - row_widgets.append(textbox) - - # Add helper text based on selection - def create_help_function(dropdown_widget, help_pane_container): - @pn.depends(dropdown_widget.param.value, watch=True) - def update_help(dropdown_value): - if dropdown_value == "control": - help_pane_container[0] = pn.pane.Markdown( - "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} - ) - elif dropdown_value == "signal": - help_pane_container[0] = pn.pane.Markdown( - "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} - ) - elif dropdown_value == "event TTLs": - help_pane_container[0] = pn.pane.Markdown( - "*Type event name for the TTLs*", styles={"color": "gray", "font-size": "12px"} - ) - else: - help_pane_container[0] = pn.pane.Markdown( - "", styles={"color": "gray", "font-size": "12px"} - ) - - return update_help - - help_container = [pn.pane.Markdown("")] - help_function = create_help_function(dropdown, help_container) - help_function(dropdown.value) # Initialize - row_widgets.append(help_container[0]) - - # Add the row to config widgets - self.config_widgets.append(pn.Row(*row_widgets, margin=(5, 0))) - - # Add show button - self.config_widgets.append(pn.Spacer(height=20)) - self.config_widgets.append(storenames_selector.show_config_button) - self.config_widgets.append( - pn.pane.Markdown( - "*Click 'Show Selected Configuration' to apply your selections.*", - styles={"font-size": "12px", "color": "gray"}, - ) + self.config_widgets.append( + pn.pane.Markdown( + "## Configure Storenames\nSelect appropriate options for each storename and provide names as needed:" + ) + ) + + for i, storename in enumerate(storenames): + self.setup_storename(i, storename, storename_dropdowns, storename_textboxes, storenames_cache) + + # Add show button + self.config_widgets.append(pn.Spacer(height=20)) + self.config_widgets.append(storenames_selector.show_config_button) + self.config_widgets.append( + pn.pane.Markdown( + "*Click 'Show Selected Configuration' to apply your selections.*", + styles={"font-size": "12px", "color": "gray"}, ) + ) + + def setup_storename(self, i, storename, storename_dropdowns, storename_textboxes, storenames_cache): + # Create a row for each storename + row_widgets = [] + + # Label + label = pn.pane.Markdown(f"**{storename}:**") + row_widgets.append(label) + + # Dropdown options + if storename in storenames_cache: + options = storenames_cache[storename] + default_value = options[0] if options else "" + else: + options = ["", "control", "signal", "event TTLs"] + default_value = "" + + # Create unique key for widget + widget_key = ( + f"{storename}_{i}" + if f"{storename}_{i}" not in storename_dropdowns + else f"{storename}_{i}_{len(storename_dropdowns)}" + ) + + dropdown = pn.widgets.Select(name="Type", value=default_value, options=options, width=150) + storename_dropdowns[widget_key] = dropdown + row_widgets.append(dropdown) + + # Text input (only show if not cached or if control/signal/event TTLs selected) + if storename not in storenames_cache or default_value in ["control", "signal", "event TTLs"]: + textbox = pn.widgets.TextInput(name="Name", value="", placeholder="Enter region/event name", width=200) + storename_textboxes[widget_key] = textbox + row_widgets.append(textbox) + + # Add helper text based on selection + def create_help_function(dropdown_widget, help_pane_container): + @pn.depends(dropdown_widget.param.value, watch=True) + def update_help(dropdown_value): + if dropdown_value == "control": + help_pane_container[0] = pn.pane.Markdown( + "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} + ) + elif dropdown_value == "signal": + help_pane_container[0] = pn.pane.Markdown( + "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} + ) + elif dropdown_value == "event TTLs": + help_pane_container[0] = pn.pane.Markdown( + "*Type event name for the TTLs*", styles={"color": "gray", "font-size": "12px"} + ) + else: + help_pane_container[0] = pn.pane.Markdown("", styles={"color": "gray", "font-size": "12px"}) + + return update_help + + help_container = [pn.pane.Markdown("")] + help_function = create_help_function(dropdown, help_container) + help_function(dropdown.value) # Initialize + row_widgets.append(help_container[0]) + + # Add the row to config widgets + self.config_widgets.append(pn.Row(*row_widgets, margin=(5, 0))) From a3db0c7fcc19e1a86e37aae4785448f90308cafe Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 12:56:45 -0800 Subject: [PATCH 20/53] Refactored configuration dropdown helper --- src/guppy/frontend/storenames_config.py | 47 ++++++++++++------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/src/guppy/frontend/storenames_config.py b/src/guppy/frontend/storenames_config.py index 63eedb6..b52a40e 100644 --- a/src/guppy/frontend/storenames_config.py +++ b/src/guppy/frontend/storenames_config.py @@ -19,6 +19,7 @@ def __init__( storenames_cache, ): self.config_widgets = [] + self._dropdown_help_map = {} storename_dropdowns.clear() storename_textboxes.clear() @@ -44,6 +45,23 @@ def __init__( ) ) + def _on_dropdown_value_change(self, event): + help_pane = self._dropdown_help_map.get(event.obj) + if help_pane is None: + return + dropdown_value = event.new + help_pane.object = self._get_help_text(dropdown_value=dropdown_value) + + def _get_help_text(self, dropdown_value): + if dropdown_value == "control": + return "*Type appropriate region name*" + elif dropdown_value == "signal": + return "*Type appropriate region name*" + elif dropdown_value == "event TTLs": + return "*Type event name for the TTLs*" + else: + return "" + def setup_storename(self, i, storename, storename_dropdowns, storename_textboxes, storenames_cache): # Create a row for each storename row_widgets = [] @@ -78,30 +96,11 @@ def setup_storename(self, i, storename, storename_dropdowns, storename_textboxes row_widgets.append(textbox) # Add helper text based on selection - def create_help_function(dropdown_widget, help_pane_container): - @pn.depends(dropdown_widget.param.value, watch=True) - def update_help(dropdown_value): - if dropdown_value == "control": - help_pane_container[0] = pn.pane.Markdown( - "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} - ) - elif dropdown_value == "signal": - help_pane_container[0] = pn.pane.Markdown( - "*Type appropriate region name*", styles={"color": "gray", "font-size": "12px"} - ) - elif dropdown_value == "event TTLs": - help_pane_container[0] = pn.pane.Markdown( - "*Type event name for the TTLs*", styles={"color": "gray", "font-size": "12px"} - ) - else: - help_pane_container[0] = pn.pane.Markdown("", styles={"color": "gray", "font-size": "12px"}) - - return update_help - - help_container = [pn.pane.Markdown("")] - help_function = create_help_function(dropdown, help_container) - help_function(dropdown.value) # Initialize - row_widgets.append(help_container[0]) + initial_help_text = self._get_help_text(default_value) + help_pane = pn.pane.Markdown(initial_help_text, styles={"color": "gray", "font-size": "12px"}) + self._dropdown_help_map[dropdown] = help_pane + dropdown.param.watch(self._on_dropdown_value_change, "value") + row_widgets.append(help_pane) # Add the row to config widgets self.config_widgets.append(pn.Row(*row_widgets, margin=(5, 0))) From 05399f7cb4594c30bcd4ded9d42a67ed2bb3e835 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 13:07:12 -0800 Subject: [PATCH 21/53] Composed config with the selector. --- src/guppy/frontend/storenames_config.py | 6 ++---- src/guppy/frontend/storenames_selector.py | 17 ++++++++++++++++- src/guppy/frontend/temp.py | 18 ++---------------- 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/src/guppy/frontend/storenames_config.py b/src/guppy/frontend/storenames_config.py index b52a40e..c7aa419 100644 --- a/src/guppy/frontend/storenames_config.py +++ b/src/guppy/frontend/storenames_config.py @@ -2,8 +2,6 @@ import panel as pn -from .storenames_selector import StorenamesSelector - pn.extension() logger = logging.getLogger(__name__) @@ -12,7 +10,7 @@ class StorenamesConfig: def __init__( self, - storenames_selector: StorenamesSelector, + show_config_button, storename_dropdowns, storename_textboxes, storenames, @@ -37,7 +35,7 @@ def __init__( # Add show button self.config_widgets.append(pn.Spacer(height=20)) - self.config_widgets.append(storenames_selector.show_config_button) + self.config_widgets.append(show_config_button) self.config_widgets.append( pn.pane.Markdown( "*Click 'Show Selected Configuration' to apply your selections.*", diff --git a/src/guppy/frontend/storenames_selector.py b/src/guppy/frontend/storenames_selector.py index bcb5c83..97919d6 100644 --- a/src/guppy/frontend/storenames_selector.py +++ b/src/guppy/frontend/storenames_selector.py @@ -3,7 +3,8 @@ import panel as pn -# hv.extension() +from .storenames_config import StorenamesConfig + pn.extension() logger = logging.getLogger(__name__) @@ -135,3 +136,17 @@ def attach_callbacks(self, button_name_to_onclick_fn: dict): for button_name, onclick_fn in button_name_to_onclick_fn.items(): button = getattr(self, button_name) button.on_click(onclick_fn) + + def configure_storenames(self, storename_dropdowns, storename_textboxes, storenames, storenames_cache): + # Create Panel widgets for storename configuration + self.storenames_config = StorenamesConfig( + show_config_button=self.show_config_button, + storename_dropdowns=storename_dropdowns, + storename_textboxes=storename_textboxes, + storenames=storenames, + storenames_cache=storenames_cache, + ) + + # Update the configuration panel + self.storename_config_widgets.objects = self.storenames_config.config_widgets + self.storename_config_widgets.visible = len(storenames) > 0 diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index b5df0fa..3e98ea9 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -9,7 +9,6 @@ import numpy as np import panel as pn -from .storenames_config import StorenamesConfig from .storenames_instructions import StorenamesInstructions, StorenamesInstructionsNPM from .storenames_selector import StorenamesSelector @@ -181,26 +180,19 @@ def fetchValues(event): storenames_selector.set_alert_message(alert_message) storenames_selector.set_literal_input_2(d=d) - # TODO: Refactor frontend into dedicated class # on clicking 'Select Storenames' button, following function is executed def update_values(event): global storenames, vars_list - # arr = [] - # for w in take_widgets: - # arr.append(w.value) - arr = storenames_selector.get_take_widgets() + arr = storenames_selector.get_take_widgets() new_arr = [] - for i in range(len(arr[1])): for j in range(arr[1][i]): new_arr.append(arr[0][i]) - if len(new_arr) > 0: storenames = storenames_selector.get_cross_selector() + new_arr else: storenames = storenames_selector.get_cross_selector() - storenames_selector.set_change_widgets(storenames) storenames_cache = dict() @@ -208,19 +200,13 @@ def update_values(event): with open(os.path.join(Path.home(), ".storesList.json")) as f: storenames_cache = json.load(f) - # Create Panel widgets for storename configuration - storenames_config = StorenamesConfig( - storenames_selector=storenames_selector, + storenames_selector.configure_storenames( storename_dropdowns=storename_dropdowns, storename_textboxes=storename_textboxes, storenames=storenames, storenames_cache=storenames_cache, ) - # Update the configuration panel - storenames_selector.storename_config_widgets.objects = storenames_config.config_widgets - storenames_selector.storename_config_widgets.visible = len(storenames) > 0 - # on clicking save button, following function is executed def save_button(event=None): global storenames From d7daeacf91485130e16daf698396a0547b1afd71 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 13:14:51 -0800 Subject: [PATCH 22/53] refactored pure function _save out of save_button --- src/guppy/frontend/temp.py | 109 +++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 53 deletions(-) diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index 3e98ea9..567cd5c 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -123,6 +123,61 @@ def _fetchValues(text, storenames, storename_dropdowns, storename_textboxes, d): return "#### No alerts !!" +def _save(d, select_location, storenames_selector): + arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) + + if np.where(arr2 == "")[0].size > 0: + storenames_selector.set_alert_message("#### Alert !! \n Empty string in the list names_for_storenames.") + logger.error("Empty string in the list names_for_storenames.") + raise Exception("Empty string in the list names_for_storenames.") + else: + storenames_selector.set_alert_message("#### No alerts !!") + + if arr1.shape[0] != arr2.shape[0]: + storenames_selector.set_alert_message( + "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." + ) + logger.error("Length of list storenames and names_for_storenames is not equal.") + raise Exception("Length of list storenames and names_for_storenames is not equal.") + else: + storenames_selector.set_alert_message("#### No alerts !!") + + if not os.path.exists(os.path.join(Path.home(), ".storesList.json")): + storenames_cache = dict() + + for i in range(arr1.shape[0]): + if arr1[i] in storenames_cache: + storenames_cache[arr1[i]].append(arr2[i]) + storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) + else: + storenames_cache[arr1[i]] = [arr2[i]] + + with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: + json.dump(storenames_cache, f, indent=4) + else: + with open(os.path.join(Path.home(), ".storesList.json")) as f: + storenames_cache = json.load(f) + + for i in range(arr1.shape[0]): + if arr1[i] in storenames_cache: + storenames_cache[arr1[i]].append(arr2[i]) + storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) + else: + storenames_cache[arr1[i]] = [arr2[i]] + + with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: + json.dump(storenames_cache, f, indent=4) + + arr = np.asarray([arr1, arr2]) + logger.info(arr) + if not os.path.exists(select_location): + os.mkdir(select_location) + + np.savetxt(os.path.join(select_location, "storesList.csv"), arr, delimiter=",", fmt="%s") + logger.info(f"Storeslist file saved at {select_location}") + logger.info("Storeslist : \n" + str(arr)) + + # function to show GUI and save def saveStorenames(inputParameters, events, flags, folder_path): @@ -210,62 +265,10 @@ def update_values(event): # on clicking save button, following function is executed def save_button(event=None): global storenames - d = storenames_selector.get_literal_input_2() - arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) - - if np.where(arr2 == "")[0].size > 0: - storenames_selector.set_alert_message("#### Alert !! \n Empty string in the list names_for_storenames.") - logger.error("Empty string in the list names_for_storenames.") - raise Exception("Empty string in the list names_for_storenames.") - else: - storenames_selector.set_alert_message("#### No alerts !!") - - if arr1.shape[0] != arr2.shape[0]: - storenames_selector.set_alert_message( - "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." - ) - logger.error("Length of list storenames and names_for_storenames is not equal.") - raise Exception("Length of list storenames and names_for_storenames is not equal.") - else: - storenames_selector.set_alert_message("#### No alerts !!") - - if not os.path.exists(os.path.join(Path.home(), ".storesList.json")): - storenames_cache = dict() - - for i in range(arr1.shape[0]): - if arr1[i] in storenames_cache: - storenames_cache[arr1[i]].append(arr2[i]) - storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) - else: - storenames_cache[arr1[i]] = [arr2[i]] - - with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: - json.dump(storenames_cache, f, indent=4) - else: - with open(os.path.join(Path.home(), ".storesList.json")) as f: - storenames_cache = json.load(f) - - for i in range(arr1.shape[0]): - if arr1[i] in storenames_cache: - storenames_cache[arr1[i]].append(arr2[i]) - storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) - else: - storenames_cache[arr1[i]] = [arr2[i]] - - with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: - json.dump(storenames_cache, f, indent=4) - - arr = np.asarray([arr1, arr2]) - logger.info(arr) select_location = storenames_selector.get_select_location() - if not os.path.exists(select_location): - os.mkdir(select_location) - - np.savetxt(os.path.join(select_location, "storesList.csv"), arr, delimiter=",", fmt="%s") + _save(d=d, select_location=select_location, storenames_selector=storenames_selector) storenames_selector.set_path(os.path.join(select_location, "storesList.csv")) - logger.info(f"Storeslist file saved at {select_location}") - logger.info("Storeslist : \n" + str(arr)) # ------------------------------------------------------------------------------------------------------------------ From 9e952d4e2a374c87d515ec5578c0928b4ef7ca56 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 13:25:51 -0800 Subject: [PATCH 23/53] refactored channel_selector out of pure _save fn --- src/guppy/frontend/temp.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index 567cd5c..6979bbd 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -123,24 +123,18 @@ def _fetchValues(text, storenames, storename_dropdowns, storename_textboxes, d): return "#### No alerts !!" -def _save(d, select_location, storenames_selector): +def _save(d, select_location): arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) if np.where(arr2 == "")[0].size > 0: - storenames_selector.set_alert_message("#### Alert !! \n Empty string in the list names_for_storenames.") + alert_message = "#### Alert !! \n Empty string in the list names_for_storenames." logger.error("Empty string in the list names_for_storenames.") - raise Exception("Empty string in the list names_for_storenames.") - else: - storenames_selector.set_alert_message("#### No alerts !!") + return alert_message if arr1.shape[0] != arr2.shape[0]: - storenames_selector.set_alert_message( - "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." - ) + alert_message = "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." logger.error("Length of list storenames and names_for_storenames is not equal.") - raise Exception("Length of list storenames and names_for_storenames is not equal.") - else: - storenames_selector.set_alert_message("#### No alerts !!") + return alert_message if not os.path.exists(os.path.join(Path.home(), ".storesList.json")): storenames_cache = dict() @@ -176,6 +170,7 @@ def _save(d, select_location, storenames_selector): np.savetxt(os.path.join(select_location, "storesList.csv"), arr, delimiter=",", fmt="%s") logger.info(f"Storeslist file saved at {select_location}") logger.info("Storeslist : \n" + str(arr)) + return "#### No alerts !!" # function to show GUI and save @@ -267,7 +262,8 @@ def save_button(event=None): global storenames d = storenames_selector.get_literal_input_2() select_location = storenames_selector.get_select_location() - _save(d=d, select_location=select_location, storenames_selector=storenames_selector) + alert_message = _save(d=d, select_location=select_location) + storenames_selector.set_alert_message(alert_message) storenames_selector.set_path(os.path.join(select_location, "storesList.csv")) # ------------------------------------------------------------------------------------------------------------------ From ee22ba0380688c5fc61d96affc16cebf47bd1ef0 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 2 Feb 2026 15:38:25 -0800 Subject: [PATCH 24/53] refactored storenames --- src/guppy/frontend/sidebar.py | 4 +- src/guppy/frontend/temp.py | 276 -------------------- src/guppy/orchestration/home.py | 8 +- src/guppy/orchestration/storenames.py | 351 ++++++++++++++++++++++++++ src/guppy/saveStoresList.py | 80 ------ src/guppy/testing/api.py | 4 +- 6 files changed, 359 insertions(+), 364 deletions(-) create mode 100755 src/guppy/orchestration/storenames.py delete mode 100755 src/guppy/saveStoresList.py diff --git a/src/guppy/frontend/sidebar.py b/src/guppy/frontend/sidebar.py index 6095a56..d73f295 100644 --- a/src/guppy/frontend/sidebar.py +++ b/src/guppy/frontend/sidebar.py @@ -33,7 +33,7 @@ def setup_markdown(self): self.mark_down_visualization = pn.pane.Markdown("""**Step 6 : Visualization**""", width=300) def setup_buttons(self): - self.open_storesList = pn.widgets.Button( + self.open_storenames = pn.widgets.Button( name="Open Storenames GUI", button_type="primary", width=300, align="end" ) self.read_rawData = pn.widgets.Button(name="Read Raw Data", button_type="primary", width=300, align="end") @@ -63,7 +63,7 @@ def add_to_template(self): self.template.sidebar.append(self.mark_down_ip_note) self.template.sidebar.append(self.save_button) self.template.sidebar.append(self.mark_down_storenames) - self.template.sidebar.append(self.open_storesList) + self.template.sidebar.append(self.open_storenames) self.template.sidebar.append(self.mark_down_read) self.template.sidebar.append(self.read_rawData) self.template.sidebar.append(self.read_progress) diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py index 6979bbd..0bcf556 100644 --- a/src/guppy/frontend/temp.py +++ b/src/guppy/frontend/temp.py @@ -1,284 +1,8 @@ -import glob -import json import logging -import os -import socket -from pathlib import Path -from random import randint -import numpy as np import panel as pn -from .storenames_instructions import StorenamesInstructions, StorenamesInstructionsNPM -from .storenames_selector import StorenamesSelector - # hv.extension() pn.extension() logger = logging.getLogger(__name__) - - -def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): - while True: - port = randint(start_port, end_port) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(0.001) # Set timeout to avoid long waiting on closed ports - result = sock.connect_ex((host, port)) - if result == 0: # If the connection is successful, the port is open - continue - else: - break - - return port - - -def takeOnlyDirs(paths): - removePaths = [] - for p in paths: - if os.path.isfile(p): - removePaths.append(p) - return list(set(paths) - set(removePaths)) - - -# function to show location for over-writing or creating a new stores list file. -def show_dir(filepath): - i = 1 - while True: - basename = os.path.basename(filepath) - op = os.path.join(filepath, basename + "_output_" + str(i)) - if not os.path.exists(op): - break - i += 1 - return op - - -def make_dir(filepath): - i = 1 - while True: - basename = os.path.basename(filepath) - op = os.path.join(filepath, basename + "_output_" + str(i)) - if not os.path.exists(op): - os.mkdir(op) - break - i += 1 - - return op - - -def _fetchValues(text, storenames, storename_dropdowns, storename_textboxes, d): - if not storename_dropdowns or not len(storenames) > 0: - return "####Alert !! \n No storenames selected." - - storenames_cache = dict() - if os.path.exists(os.path.join(Path.home(), ".storesList.json")): - with open(os.path.join(Path.home(), ".storesList.json")) as f: - storenames_cache = json.load(f) - - comboBoxValues, textBoxValues = [], [] - dropdown_keys = list(storename_dropdowns.keys()) - textbox_keys = list(storename_textboxes.keys()) if storename_textboxes else [] - - # Get dropdown values - for key in dropdown_keys: - comboBoxValues.append(storename_dropdowns[key].value) - - # Get textbox values (matching with dropdown keys) - for key in dropdown_keys: - if key in storename_textboxes: - textbox_value = storename_textboxes[key].value or "" - textBoxValues.append(textbox_value) - - # Validation: Check for whitespace - if len(textbox_value.split()) > 1: - return "####Alert !! \n Whitespace is not allowed in the text box entry." - - # Validation: Check for empty required fields - dropdown_value = storename_dropdowns[key].value - if ( - not textbox_value - and dropdown_value not in storenames_cache - and dropdown_value in ["control", "signal", "event TTLs"] - ): - return "####Alert !! \n One of the text box entry is empty." - else: - # For cached values, use the dropdown value directly - textBoxValues.append(storename_dropdowns[key].value) - - if len(comboBoxValues) != len(textBoxValues): - return "####Alert !! \n Number of entries in combo box and text box should be same." - - names_for_storenames = [] - for i in range(len(comboBoxValues)): - if comboBoxValues[i] == "control" or comboBoxValues[i] == "signal": - if "_" in textBoxValues[i]: - return "####Alert !! \n Please do not use underscore in region name." - names_for_storenames.append("{}_{}".format(comboBoxValues[i], textBoxValues[i])) - elif comboBoxValues[i] == "event TTLs": - names_for_storenames.append(textBoxValues[i]) - else: - names_for_storenames.append(comboBoxValues[i]) - - d["storenames"] = text.value - d["names_for_storenames"] = names_for_storenames - return "#### No alerts !!" - - -def _save(d, select_location): - arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) - - if np.where(arr2 == "")[0].size > 0: - alert_message = "#### Alert !! \n Empty string in the list names_for_storenames." - logger.error("Empty string in the list names_for_storenames.") - return alert_message - - if arr1.shape[0] != arr2.shape[0]: - alert_message = "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." - logger.error("Length of list storenames and names_for_storenames is not equal.") - return alert_message - - if not os.path.exists(os.path.join(Path.home(), ".storesList.json")): - storenames_cache = dict() - - for i in range(arr1.shape[0]): - if arr1[i] in storenames_cache: - storenames_cache[arr1[i]].append(arr2[i]) - storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) - else: - storenames_cache[arr1[i]] = [arr2[i]] - - with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: - json.dump(storenames_cache, f, indent=4) - else: - with open(os.path.join(Path.home(), ".storesList.json")) as f: - storenames_cache = json.load(f) - - for i in range(arr1.shape[0]): - if arr1[i] in storenames_cache: - storenames_cache[arr1[i]].append(arr2[i]) - storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) - else: - storenames_cache[arr1[i]] = [arr2[i]] - - with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: - json.dump(storenames_cache, f, indent=4) - - arr = np.asarray([arr1, arr2]) - logger.info(arr) - if not os.path.exists(select_location): - os.mkdir(select_location) - - np.savetxt(os.path.join(select_location, "storesList.csv"), arr, delimiter=",", fmt="%s") - logger.info(f"Storeslist file saved at {select_location}") - logger.info("Storeslist : \n" + str(arr)) - return "#### No alerts !!" - - -# function to show GUI and save -def saveStorenames(inputParameters, events, flags, folder_path): - - logger.debug("Saving stores list file.") - # getting input parameters - inputParameters = inputParameters - - # Headless path: if storenames_map provided, write storesList.csv without building the Panel UI - storenames_map = inputParameters.get("storenames_map") - if isinstance(storenames_map, dict) and len(storenames_map) > 0: - op = make_dir(folder_path) - arr = np.asarray([list(storenames_map.keys()), list(storenames_map.values())], dtype=str) - np.savetxt(os.path.join(op, "storesList.csv"), arr, delimiter=",", fmt="%s") - logger.info(f"Storeslist file saved at {op}") - logger.info("Storeslist : \n" + str(arr)) - return - - # Get storenames from extractor's events property - allnames = events - - # creating GUI template - template = pn.template.BootstrapTemplate(title="Storenames GUI - {}".format(os.path.basename(folder_path))) - - if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: - storenames_instructions = StorenamesInstructionsNPM(folder_path=folder_path) - else: - storenames_instructions = StorenamesInstructions(folder_path=folder_path) - storenames_selector = StorenamesSelector(allnames=allnames) - - storenames = [] - storename_dropdowns = {} - storename_textboxes = {} - - # ------------------------------------------------------------------------------------------------------------------ - # onclick closure functions - # on clicking overwrite_button, following function is executed - def overwrite_button_actions(event): - if event.new == "over_write_file": - options = takeOnlyDirs(glob.glob(os.path.join(folder_path, "*_output_*"))) - storenames_selector.set_select_location_options(options=options) - else: - options = [show_dir(folder_path)] - storenames_selector.set_select_location_options(options=options) - - def fetchValues(event): - global storenames - d = dict() - alert_message = _fetchValues( - text=storenames_selector.text, - storenames=storenames, - storename_dropdowns=storename_dropdowns, - storename_textboxes=storename_textboxes, - d=d, - ) - storenames_selector.set_alert_message(alert_message) - storenames_selector.set_literal_input_2(d=d) - - # on clicking 'Select Storenames' button, following function is executed - def update_values(event): - global storenames, vars_list - - arr = storenames_selector.get_take_widgets() - new_arr = [] - for i in range(len(arr[1])): - for j in range(arr[1][i]): - new_arr.append(arr[0][i]) - if len(new_arr) > 0: - storenames = storenames_selector.get_cross_selector() + new_arr - else: - storenames = storenames_selector.get_cross_selector() - storenames_selector.set_change_widgets(storenames) - - storenames_cache = dict() - if os.path.exists(os.path.join(Path.home(), ".storesList.json")): - with open(os.path.join(Path.home(), ".storesList.json")) as f: - storenames_cache = json.load(f) - - storenames_selector.configure_storenames( - storename_dropdowns=storename_dropdowns, - storename_textboxes=storename_textboxes, - storenames=storenames, - storenames_cache=storenames_cache, - ) - - # on clicking save button, following function is executed - def save_button(event=None): - global storenames - d = storenames_selector.get_literal_input_2() - select_location = storenames_selector.get_select_location() - alert_message = _save(d=d, select_location=select_location) - storenames_selector.set_alert_message(alert_message) - storenames_selector.set_path(os.path.join(select_location, "storesList.csv")) - - # ------------------------------------------------------------------------------------------------------------------ - - # Connect button callbacks - button_name_to_onclick_fn = { - "update_options": update_values, - "save": save_button, - "overwrite_button": overwrite_button_actions, - "show_config_button": fetchValues, - } - storenames_selector.attach_callbacks(button_name_to_onclick_fn) - - template.main.append(pn.Row(storenames_instructions.widget, storenames_selector.widget)) - - # creating widgets, adding them to template and showing a GUI on a new browser window - number = scanPortsAndFind(start_port=5000, end_port=5200) - template.show(port=number) diff --git a/src/guppy/orchestration/home.py b/src/guppy/orchestration/home.py index ace9782..604a7d0 100644 --- a/src/guppy/orchestration/home.py +++ b/src/guppy/orchestration/home.py @@ -8,11 +8,11 @@ import panel as pn from .save_parameters import save_parameters +from .storenames import orchestrate_storenames_page from ..frontend.input_parameters import ParameterForm from ..frontend.path_selection import get_folder_path from ..frontend.progress import readPBIncrementValues from ..frontend.sidebar import Sidebar -from ..saveStoresList import execute from ..visualizePlot import visualizeResults logger = logging.getLogger(__name__) @@ -50,9 +50,9 @@ def onclickProcess(event=None): inputParameters = parameter_form.getInputParameters() save_parameters(inputParameters=inputParameters) - def onclickStoresList(event=None): + def onclickStorenames(event=None): inputParameters = parameter_form.getInputParameters() - execute(inputParameters) + orchestrate_storenames_page(inputParameters) def onclickVisualization(event=None): inputParameters = parameter_form.getInputParameters() @@ -80,7 +80,7 @@ def onclickpsth(event=None): button_name_to_onclick_fn = { "save_button": onclickProcess, - "open_storesList": onclickStoresList, + "open_storenames": onclickStorenames, "read_rawData": onclickreaddata, "extract_ts": onclickextractts, "psth_computation": onclickpsth, diff --git a/src/guppy/orchestration/storenames.py b/src/guppy/orchestration/storenames.py new file mode 100755 index 0000000..5c05b1e --- /dev/null +++ b/src/guppy/orchestration/storenames.py @@ -0,0 +1,351 @@ +import glob +import json +import logging +import os +import socket +from pathlib import Path +from random import randint + +import numpy as np +import panel as pn + +from guppy.extractors import ( + CsvRecordingExtractor, + DoricRecordingExtractor, + NpmRecordingExtractor, + TdtRecordingExtractor, +) +from guppy.frontend.npm_gui_prompts import ( + get_multi_event_responses, + get_timestamp_configuration, +) +from guppy.frontend.storenames_instructions import ( + StorenamesInstructions, + StorenamesInstructionsNPM, +) +from guppy.frontend.storenames_selector import StorenamesSelector + +pn.extension() + +logger = logging.getLogger(__name__) + + +def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): + while True: + port = randint(start_port, end_port) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(0.001) # Set timeout to avoid long waiting on closed ports + result = sock.connect_ex((host, port)) + if result == 0: # If the connection is successful, the port is open + continue + else: + break + + return port + + +def takeOnlyDirs(paths): + removePaths = [] + for p in paths: + if os.path.isfile(p): + removePaths.append(p) + return list(set(paths) - set(removePaths)) + + +# function to show location for over-writing or creating a new stores list file. +def show_dir(filepath): + i = 1 + while True: + basename = os.path.basename(filepath) + op = os.path.join(filepath, basename + "_output_" + str(i)) + if not os.path.exists(op): + break + i += 1 + return op + + +def make_dir(filepath): + i = 1 + while True: + basename = os.path.basename(filepath) + op = os.path.join(filepath, basename + "_output_" + str(i)) + if not os.path.exists(op): + os.mkdir(op) + break + i += 1 + + return op + + +def _fetchValues(text, storenames, storename_dropdowns, storename_textboxes, d): + if not storename_dropdowns or not len(storenames) > 0: + return "####Alert !! \n No storenames selected." + + storenames_cache = dict() + if os.path.exists(os.path.join(Path.home(), ".storesList.json")): + with open(os.path.join(Path.home(), ".storesList.json")) as f: + storenames_cache = json.load(f) + + comboBoxValues, textBoxValues = [], [] + dropdown_keys = list(storename_dropdowns.keys()) + textbox_keys = list(storename_textboxes.keys()) if storename_textboxes else [] + + # Get dropdown values + for key in dropdown_keys: + comboBoxValues.append(storename_dropdowns[key].value) + + # Get textbox values (matching with dropdown keys) + for key in dropdown_keys: + if key in storename_textboxes: + textbox_value = storename_textboxes[key].value or "" + textBoxValues.append(textbox_value) + + # Validation: Check for whitespace + if len(textbox_value.split()) > 1: + return "####Alert !! \n Whitespace is not allowed in the text box entry." + + # Validation: Check for empty required fields + dropdown_value = storename_dropdowns[key].value + if ( + not textbox_value + and dropdown_value not in storenames_cache + and dropdown_value in ["control", "signal", "event TTLs"] + ): + return "####Alert !! \n One of the text box entry is empty." + else: + # For cached values, use the dropdown value directly + textBoxValues.append(storename_dropdowns[key].value) + + if len(comboBoxValues) != len(textBoxValues): + return "####Alert !! \n Number of entries in combo box and text box should be same." + + names_for_storenames = [] + for i in range(len(comboBoxValues)): + if comboBoxValues[i] == "control" or comboBoxValues[i] == "signal": + if "_" in textBoxValues[i]: + return "####Alert !! \n Please do not use underscore in region name." + names_for_storenames.append("{}_{}".format(comboBoxValues[i], textBoxValues[i])) + elif comboBoxValues[i] == "event TTLs": + names_for_storenames.append(textBoxValues[i]) + else: + names_for_storenames.append(comboBoxValues[i]) + + d["storenames"] = text.value + d["names_for_storenames"] = names_for_storenames + return "#### No alerts !!" + + +def _save(d, select_location): + arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) + + if np.where(arr2 == "")[0].size > 0: + alert_message = "#### Alert !! \n Empty string in the list names_for_storenames." + logger.error("Empty string in the list names_for_storenames.") + return alert_message + + if arr1.shape[0] != arr2.shape[0]: + alert_message = "#### Alert !! \n Length of list storenames and names_for_storenames is not equal." + logger.error("Length of list storenames and names_for_storenames is not equal.") + return alert_message + + if not os.path.exists(os.path.join(Path.home(), ".storesList.json")): + storenames_cache = dict() + + for i in range(arr1.shape[0]): + if arr1[i] in storenames_cache: + storenames_cache[arr1[i]].append(arr2[i]) + storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) + else: + storenames_cache[arr1[i]] = [arr2[i]] + + with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: + json.dump(storenames_cache, f, indent=4) + else: + with open(os.path.join(Path.home(), ".storesList.json")) as f: + storenames_cache = json.load(f) + + for i in range(arr1.shape[0]): + if arr1[i] in storenames_cache: + storenames_cache[arr1[i]].append(arr2[i]) + storenames_cache[arr1[i]] = list(set(storenames_cache[arr1[i]])) + else: + storenames_cache[arr1[i]] = [arr2[i]] + + with open(os.path.join(Path.home(), ".storesList.json"), "w") as f: + json.dump(storenames_cache, f, indent=4) + + arr = np.asarray([arr1, arr2]) + logger.info(arr) + if not os.path.exists(select_location): + os.mkdir(select_location) + + np.savetxt(os.path.join(select_location, "storesList.csv"), arr, delimiter=",", fmt="%s") + logger.info(f"Storeslist file saved at {select_location}") + logger.info("Storeslist : \n" + str(arr)) + return "#### No alerts !!" + + +# function to show GUI and save +def build_storenames_page(inputParameters, events, flags, folder_path): + + logger.debug("Saving stores list file.") + # getting input parameters + inputParameters = inputParameters + + # Headless path: if storenames_map provided, write storesList.csv without building the Panel UI + storenames_map = inputParameters.get("storenames_map") + if isinstance(storenames_map, dict) and len(storenames_map) > 0: + op = make_dir(folder_path) + arr = np.asarray([list(storenames_map.keys()), list(storenames_map.values())], dtype=str) + np.savetxt(os.path.join(op, "storesList.csv"), arr, delimiter=",", fmt="%s") + logger.info(f"Storeslist file saved at {op}") + logger.info("Storeslist : \n" + str(arr)) + return + + # Get storenames from extractor's events property + allnames = events + + # creating GUI template + template = pn.template.BootstrapTemplate(title="Storenames GUI - {}".format(os.path.basename(folder_path))) + + if "data_np_v2" in flags or "data_np" in flags or "event_np" in flags: + storenames_instructions = StorenamesInstructionsNPM(folder_path=folder_path) + else: + storenames_instructions = StorenamesInstructions(folder_path=folder_path) + storenames_selector = StorenamesSelector(allnames=allnames) + + storenames = [] + storename_dropdowns = {} + storename_textboxes = {} + + # ------------------------------------------------------------------------------------------------------------------ + # onclick closure functions + # on clicking overwrite_button, following function is executed + def overwrite_button_actions(event): + if event.new == "over_write_file": + options = takeOnlyDirs(glob.glob(os.path.join(folder_path, "*_output_*"))) + storenames_selector.set_select_location_options(options=options) + else: + options = [show_dir(folder_path)] + storenames_selector.set_select_location_options(options=options) + + def fetchValues(event): + global storenames + d = dict() + alert_message = _fetchValues( + text=storenames_selector.text, + storenames=storenames, + storename_dropdowns=storename_dropdowns, + storename_textboxes=storename_textboxes, + d=d, + ) + storenames_selector.set_alert_message(alert_message) + storenames_selector.set_literal_input_2(d=d) + + # on clicking 'Select Storenames' button, following function is executed + def update_values(event): + global storenames, vars_list + + arr = storenames_selector.get_take_widgets() + new_arr = [] + for i in range(len(arr[1])): + for j in range(arr[1][i]): + new_arr.append(arr[0][i]) + if len(new_arr) > 0: + storenames = storenames_selector.get_cross_selector() + new_arr + else: + storenames = storenames_selector.get_cross_selector() + storenames_selector.set_change_widgets(storenames) + + storenames_cache = dict() + if os.path.exists(os.path.join(Path.home(), ".storesList.json")): + with open(os.path.join(Path.home(), ".storesList.json")) as f: + storenames_cache = json.load(f) + + storenames_selector.configure_storenames( + storename_dropdowns=storename_dropdowns, + storename_textboxes=storename_textboxes, + storenames=storenames, + storenames_cache=storenames_cache, + ) + + # on clicking save button, following function is executed + def save_button(event=None): + global storenames + d = storenames_selector.get_literal_input_2() + select_location = storenames_selector.get_select_location() + alert_message = _save(d=d, select_location=select_location) + storenames_selector.set_alert_message(alert_message) + storenames_selector.set_path(os.path.join(select_location, "storesList.csv")) + + # ------------------------------------------------------------------------------------------------------------------ + + # Connect button callbacks + button_name_to_onclick_fn = { + "update_options": update_values, + "save": save_button, + "overwrite_button": overwrite_button_actions, + "show_config_button": fetchValues, + } + storenames_selector.attach_callbacks(button_name_to_onclick_fn) + + template.main.append(pn.Row(storenames_instructions.widget, storenames_selector.widget)) + + # creating widgets, adding them to template and showing a GUI on a new browser window + number = scanPortsAndFind(start_port=5000, end_port=5200) + template.show(port=number) + + +def read_header(inputParameters, num_ch, modality, folder_path, headless): + if modality == "tdt": + events, flags = TdtRecordingExtractor.discover_events_and_flags(folder_path=folder_path) + elif modality == "csv": + events, flags = CsvRecordingExtractor.discover_events_and_flags(folder_path=folder_path) + + elif modality == "doric": + events, flags = DoricRecordingExtractor.discover_events_and_flags(folder_path=folder_path) + + elif modality == "npm": + if not headless: + # Resolve multiple event TTLs + multiple_event_ttls = NpmRecordingExtractor.has_multiple_event_ttls(folder_path=folder_path) + responses = get_multi_event_responses(multiple_event_ttls) + inputParameters["npm_split_events"] = responses + + # Resolve timestamp units and columns + ts_unit_needs, col_names_ts = NpmRecordingExtractor.needs_ts_unit(folder_path=folder_path, num_ch=num_ch) + ts_units, npm_timestamp_column_names = get_timestamp_configuration(ts_unit_needs, col_names_ts) + inputParameters["npm_time_units"] = ts_units if ts_units else None + inputParameters["npm_timestamp_column_names"] = ( + npm_timestamp_column_names if npm_timestamp_column_names else None + ) + + events, flags = NpmRecordingExtractor.discover_events_and_flags( + folder_path=folder_path, num_ch=num_ch, inputParameters=inputParameters + ) + else: + raise ValueError("Modality not recognized. Please use 'tdt', 'csv', 'doric', or 'npm'.") + return events, flags + + +# function to read input parameters and run the saveStorenames function +def orchestrate_storenames_page(inputParameters): + + inputParameters = inputParameters + folderNames = inputParameters["folderNames"] + isosbestic_control = inputParameters["isosbestic_control"] + num_ch = inputParameters["noChannels"] + modality = inputParameters.get("modality", "tdt") + headless = bool(os.environ.get("GUPPY_BASE_DIR")) + + logger.info(folderNames) + + try: + for i in folderNames: + folder_path = os.path.join(inputParameters["abspath"], i) + events, flags = read_header(inputParameters, num_ch, modality, folder_path, headless) + build_storenames_page(inputParameters, events, flags, folder_path) + logger.info("#" * 400) + except Exception as e: + logger.error(str(e)) + raise e diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py deleted file mode 100755 index bd00995..0000000 --- a/src/guppy/saveStoresList.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# In[1]: - - -import logging -import os - -import panel as pn - -from guppy.extractors import ( - CsvRecordingExtractor, - DoricRecordingExtractor, - NpmRecordingExtractor, - TdtRecordingExtractor, -) -from guppy.frontend.npm_gui_prompts import ( - get_multi_event_responses, - get_timestamp_configuration, -) -from guppy.frontend.temp import saveStorenames - -# hv.extension() -pn.extension() - -logger = logging.getLogger(__name__) - - -# function to read input parameters and run the saveStorenames function -def execute(inputParameters): - - inputParameters = inputParameters - folderNames = inputParameters["folderNames"] - isosbestic_control = inputParameters["isosbestic_control"] - num_ch = inputParameters["noChannels"] - modality = inputParameters.get("modality", "tdt") - - logger.info(folderNames) - - try: - for i in folderNames: - folder_path = os.path.join(inputParameters["abspath"], i) - if modality == "tdt": - events, flags = TdtRecordingExtractor.discover_events_and_flags(folder_path=folder_path) - elif modality == "csv": - events, flags = CsvRecordingExtractor.discover_events_and_flags(folder_path=folder_path) - - elif modality == "doric": - events, flags = DoricRecordingExtractor.discover_events_and_flags(folder_path=folder_path) - - elif modality == "npm": - headless = bool(os.environ.get("GUPPY_BASE_DIR")) - if not headless: - # Resolve multiple event TTLs - multiple_event_ttls = NpmRecordingExtractor.has_multiple_event_ttls(folder_path=folder_path) - responses = get_multi_event_responses(multiple_event_ttls) - inputParameters["npm_split_events"] = responses - - # Resolve timestamp units and columns - ts_unit_needs, col_names_ts = NpmRecordingExtractor.needs_ts_unit( - folder_path=folder_path, num_ch=num_ch - ) - ts_units, npm_timestamp_column_names = get_timestamp_configuration(ts_unit_needs, col_names_ts) - inputParameters["npm_time_units"] = ts_units if ts_units else None - inputParameters["npm_timestamp_column_names"] = ( - npm_timestamp_column_names if npm_timestamp_column_names else None - ) - - events, flags = NpmRecordingExtractor.discover_events_and_flags( - folder_path=folder_path, num_ch=num_ch, inputParameters=inputParameters - ) - else: - raise ValueError("Modality not recognized. Please use 'tdt', 'csv', 'doric', or 'npm'.") - - saveStorenames(inputParameters, events, flags, folder_path) - logger.info("#" * 400) - except Exception as e: - logger.error(str(e)) - raise e diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 1cf4349..4a4b7cd 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -16,9 +16,9 @@ from guppy.computePsth import psthForEachStorename from guppy.findTransientsFreqAndAmp import executeFindFreqAndAmp from guppy.orchestration.home import build_homepage +from guppy.orchestration.storenames import orchestrate_storenames_page from guppy.preprocess import extractTsAndSignal from guppy.readTevTsq import readRawData -from guppy.saveStoresList import execute def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: @@ -168,7 +168,7 @@ def step2( input_params["npm_split_events"] = npm_split_events # Call the underlying Step 2 executor (now headless-aware) - execute(input_params) + orchestrate_storenames_page(input_params) def step3( From c341c092201b4c5adb75f7399667edce63584941 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 10:15:13 -0800 Subject: [PATCH 25/53] Deduplicated writeToFile --- src/guppy/computePsth.py | 6 +----- src/guppy/findTransientsFreqAndAmp.py | 6 +----- src/guppy/frontend/progress.py | 5 +++++ src/guppy/preprocess.py | 6 +----- src/guppy/readTevTsq.py | 6 +----- 5 files changed, 9 insertions(+), 20 deletions(-) diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index 32d9be1..65a1ad9 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -34,6 +34,7 @@ write_peak_and_area_to_csv, write_peak_and_area_to_hdf5, ) +from .frontend.progress import writeToFile logger = logging.getLogger(__name__) @@ -46,11 +47,6 @@ def takeOnlyDirs(paths): return list(set(paths) - set(removePaths)) -def writeToFile(value: str): - with open(os.path.join(os.path.expanduser("~"), "pbSteps.txt"), "a") as file: - file.write(value) - - # function to create PSTH for each event using function helper_psth and save the PSTH to h5 file def execute_compute_psth(filepath, event, inputParameters): diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index f6c3d6e..3a78871 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -19,15 +19,11 @@ ) from .analysis.transients import analyze_transients from .analysis.transients_average import averageForGroup +from .frontend.progress import writeToFile logger = logging.getLogger(__name__) -def writeToFile(value: str): - with open(os.path.join(os.path.expanduser("~"), "pbSteps.txt"), "a") as file: - file.write(value) - - def visuzlize_peaks(filepath, z_score, timestamps, peaksIndex): dirname = os.path.dirname(filepath) diff --git a/src/guppy/frontend/progress.py b/src/guppy/frontend/progress.py index d7bc226..fb5e7c2 100644 --- a/src/guppy/frontend/progress.py +++ b/src/guppy/frontend/progress.py @@ -43,3 +43,8 @@ def readPBIncrementValues(progressBar): break logger.info("Read progress bar increment values stopped.") + + +def writeToFile(value: str): + with open(os.path.join(os.path.expanduser("~"), "pbSteps.txt"), "a") as file: + file.write(value) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index e4812a2..710bd17 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -39,6 +39,7 @@ ) from .analysis.timestamp_correction import correct_timestamps from .analysis.z_score import compute_z_score +from .frontend.progress import writeToFile logger = logging.getLogger(__name__) @@ -47,11 +48,6 @@ plt.switch_backend("TKAgg") -def writeToFile(value: str): - with open(os.path.join(os.path.expanduser("~"), "pbSteps.txt"), "a") as file: - file.write(value) - - # function to plot z_score def visualize_z_score(filepath): diff --git a/src/guppy/readTevTsq.py b/src/guppy/readTevTsq.py index 19a0a4a..8c1aa0b 100755 --- a/src/guppy/readTevTsq.py +++ b/src/guppy/readTevTsq.py @@ -14,6 +14,7 @@ TdtRecordingExtractor, read_and_save_all_events, ) +from guppy.frontend.progress import writeToFile logger = logging.getLogger(__name__) @@ -26,11 +27,6 @@ def takeOnlyDirs(paths): return list(set(paths) - set(removePaths)) -def writeToFile(value: str): - with open(os.path.join(os.path.expanduser("~"), "pbSteps.txt"), "a") as file: - file.write(value) - - # function to read data from 'tsq' and 'tev' files def readRawData(inputParameters): From 8a2bc59ea42580f307102c659ab0539cfffef832 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 10:23:20 -0800 Subject: [PATCH 26/53] Deduplicated takeOnlyDirs --- src/guppy/analysis/io_utils.py | 10 ++-------- src/guppy/computePsth.py | 9 +-------- src/guppy/orchestration/storenames.py | 9 +-------- src/guppy/readTevTsq.py | 9 +-------- src/guppy/utils/__init__.py | 0 src/guppy/utils/utils.py | 12 ++++++++++++ src/guppy/visualizePlot.py | 9 +-------- 7 files changed, 18 insertions(+), 40 deletions(-) create mode 100644 src/guppy/utils/__init__.py create mode 100644 src/guppy/utils/utils.py diff --git a/src/guppy/analysis/io_utils.py b/src/guppy/analysis/io_utils.py index 742ab3b..892d8c5 100644 --- a/src/guppy/analysis/io_utils.py +++ b/src/guppy/analysis/io_utils.py @@ -8,15 +8,9 @@ import numpy as np import pandas as pd -logger = logging.getLogger(__name__) - +from ..utils.utils import takeOnlyDirs -def takeOnlyDirs(paths): - removePaths = [] - for p in paths: - if os.path.isfile(p): - removePaths.append(p) - return list(set(paths) - set(removePaths)) +logger = logging.getLogger(__name__) # find files by ignoring the case sensitivity diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index 65a1ad9..0b94220 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -35,18 +35,11 @@ write_peak_and_area_to_hdf5, ) from .frontend.progress import writeToFile +from .utils.utils import takeOnlyDirs logger = logging.getLogger(__name__) -def takeOnlyDirs(paths): - removePaths = [] - for p in paths: - if os.path.isfile(p): - removePaths.append(p) - return list(set(paths) - set(removePaths)) - - # function to create PSTH for each event using function helper_psth and save the PSTH to h5 file def execute_compute_psth(filepath, event, inputParameters): diff --git a/src/guppy/orchestration/storenames.py b/src/guppy/orchestration/storenames.py index 5c05b1e..bc770d6 100755 --- a/src/guppy/orchestration/storenames.py +++ b/src/guppy/orchestration/storenames.py @@ -24,6 +24,7 @@ StorenamesInstructionsNPM, ) from guppy.frontend.storenames_selector import StorenamesSelector +from guppy.utils.utils import takeOnlyDirs pn.extension() @@ -44,14 +45,6 @@ def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): return port -def takeOnlyDirs(paths): - removePaths = [] - for p in paths: - if os.path.isfile(p): - removePaths.append(p) - return list(set(paths) - set(removePaths)) - - # function to show location for over-writing or creating a new stores list file. def show_dir(filepath): i = 1 diff --git a/src/guppy/readTevTsq.py b/src/guppy/readTevTsq.py index 8c1aa0b..a53dff5 100755 --- a/src/guppy/readTevTsq.py +++ b/src/guppy/readTevTsq.py @@ -15,18 +15,11 @@ read_and_save_all_events, ) from guppy.frontend.progress import writeToFile +from guppy.utils.utils import takeOnlyDirs logger = logging.getLogger(__name__) -def takeOnlyDirs(paths): - removePaths = [] - for p in paths: - if os.path.isfile(p): - removePaths.append(p) - return list(set(paths) - set(removePaths)) - - # function to read data from 'tsq' and 'tev' files def readRawData(inputParameters): diff --git a/src/guppy/utils/__init__.py b/src/guppy/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/guppy/utils/utils.py b/src/guppy/utils/utils.py new file mode 100644 index 0000000..4c4759f --- /dev/null +++ b/src/guppy/utils/utils.py @@ -0,0 +1,12 @@ +import logging +import os + +logger = logging.getLogger(__name__) + + +def takeOnlyDirs(paths): + removePaths = [] + for p in paths: + if os.path.isfile(p): + removePaths.append(p) + return list(set(paths) - set(removePaths)) diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 929149e..e3fc1fb 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -19,6 +19,7 @@ from holoviews.plotting.util import process_cmap from .preprocess import get_all_stores_for_combining_data +from .utils.utils import takeOnlyDirs pn.extension() @@ -39,14 +40,6 @@ def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): return port -def takeOnlyDirs(paths): - removePaths = [] - for p in paths: - if os.path.isfile(p): - removePaths.append(p) - return list(set(paths) - set(removePaths)) - - # read h5 file as a dataframe def read_Df(filepath, event, name): event = event.replace("\\", "_") From 66b23b404405029fe6f9b95e1af4c792ec438470 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 10:31:31 -0800 Subject: [PATCH 27/53] I moved read_raw_data to orchestration layer. --- src/guppy/orchestration/home.py | 2 +- src/guppy/{readTevTsq.py => orchestration/read_raw_data.py} | 4 ++-- src/guppy/testing/api.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) rename src/guppy/{readTevTsq.py => orchestration/read_raw_data.py} (97%) diff --git a/src/guppy/orchestration/home.py b/src/guppy/orchestration/home.py index 604a7d0..1b5eb29 100644 --- a/src/guppy/orchestration/home.py +++ b/src/guppy/orchestration/home.py @@ -20,7 +20,7 @@ def readRawData(parameter_form): inputParameters = parameter_form.getInputParameters() - subprocess.call([sys.executable, "-m", "guppy.readTevTsq", json.dumps(inputParameters)]) + subprocess.call([sys.executable, "-m", "guppy.orchestration.read_raw_data", json.dumps(inputParameters)]) def extractTs(parameter_form): diff --git a/src/guppy/readTevTsq.py b/src/guppy/orchestration/read_raw_data.py similarity index 97% rename from src/guppy/readTevTsq.py rename to src/guppy/orchestration/read_raw_data.py index a53dff5..cd82634 100755 --- a/src/guppy/readTevTsq.py +++ b/src/guppy/orchestration/read_raw_data.py @@ -21,7 +21,7 @@ # function to read data from 'tsq' and 'tev' files -def readRawData(inputParameters): +def orchestrate_read_raw_data(inputParameters): logger.debug("### Reading raw data... ###") # get input parameters @@ -88,7 +88,7 @@ def readRawData(inputParameters): def main(input_parameters): logger.info("run") try: - readRawData(input_parameters) + orchestrate_read_raw_data(input_parameters) logger.info("#" * 400) except Exception as e: with open(os.path.join(os.path.expanduser("~"), "pbSteps.txt"), "a") as file: diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 4a4b7cd..0c8c027 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -16,9 +16,9 @@ from guppy.computePsth import psthForEachStorename from guppy.findTransientsFreqAndAmp import executeFindFreqAndAmp from guppy.orchestration.home import build_homepage +from guppy.orchestration.read_raw_data import orchestrate_read_raw_data from guppy.orchestration.storenames import orchestrate_storenames_page from guppy.preprocess import extractTsAndSignal -from guppy.readTevTsq import readRawData def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: @@ -257,7 +257,7 @@ def step3( input_params["modality"] = modality # Call the underlying Step 3 worker directly (no subprocess) - readRawData(input_params) + orchestrate_read_raw_data(input_params) def step4( From fa68e656a0407a8e67b3d31484af314b7134e78a Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 11:22:49 -0800 Subject: [PATCH 28/53] Fixed bug with step six: GUI. --- src/guppy/orchestration/storenames.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/guppy/orchestration/storenames.py b/src/guppy/orchestration/storenames.py index bc770d6..39edc44 100755 --- a/src/guppy/orchestration/storenames.py +++ b/src/guppy/orchestration/storenames.py @@ -6,6 +6,7 @@ from pathlib import Path from random import randint +import holoviews as hv # noqa: F401 import numpy as np import panel as pn From bdb45626757031c71bc0a14b9105340391b2a5c1 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 12:32:31 -0800 Subject: [PATCH 29/53] Refactored DFF and z-score visualization to dedicated pre-visualize.preprocessing module. --- src/guppy/preprocess.py | 44 +++++------------------- src/guppy/visualization/__init__.py | 0 src/guppy/visualization/preprocessing.py | 24 +++++++++++++ 3 files changed, 33 insertions(+), 35 deletions(-) create mode 100644 src/guppy/visualization/__init__.py create mode 100644 src/guppy/visualization/preprocessing.py diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 710bd17..1543205 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -3,6 +3,7 @@ import logging import os import sys +from typing import Literal import matplotlib.pyplot as plt import numpy as np @@ -40,6 +41,7 @@ from .analysis.timestamp_correction import correct_timestamps from .analysis.z_score import compute_z_score from .frontend.progress import writeToFile +from .visualization.preprocessing import visualize_preprocessing logger = logging.getLogger(__name__) @@ -48,12 +50,10 @@ plt.switch_backend("TKAgg") -# function to plot z_score -def visualize_z_score(filepath): - +def execute_preprocessing_visualization(filepath, visualization_type: Literal["z_score", "dff"]): name = os.path.basename(filepath) - path = glob.glob(os.path.join(filepath, "z_score_*")) + path = glob.glob(os.path.join(filepath, f"{visualization_type}_*")) path = sorted(path) @@ -62,33 +62,7 @@ def visualize_z_score(filepath): name_1 = basename.split("_")[-1] x = read_hdf5("timeCorrection_" + name_1, filepath, "timestampNew") y = read_hdf5("", path[i], "data") - fig = plt.figure() - ax = fig.add_subplot(111) - ax.plot(x, y) - ax.set_title(basename) - fig.suptitle(name) - # plt.show() - - -# function to plot deltaF/F -def visualize_dff(filepath): - name = os.path.basename(filepath) - - path = glob.glob(os.path.join(filepath, "dff_*")) - - path = sorted(path) - - for i in range(len(path)): - basename = (os.path.basename(path[i])).split(".")[0] - name_1 = basename.split("_")[-1] - x = read_hdf5("timeCorrection_" + name_1, filepath, "timestampNew") - y = read_hdf5("", path[i], "data") - fig = plt.figure() - ax = fig.add_subplot(111) - ax.plot(x, y) - ax.set_title(basename) - fig.suptitle(name) - # plt.show() + fig, ax = visualize_preprocessing(suptitle=name, title=basename, x=x, y=y) def visualize(filepath, x, y1, y2, y3, plot_name, removeArtifacts): @@ -337,12 +311,12 @@ def execute_zscore(folderNames, inputParameters): visualizeControlAndSignal(filepath, removeArtifacts=remove_artifacts) if plot_zScore_dff == "z_score": - visualize_z_score(filepath) + execute_preprocessing_visualization(filepath, visualization_type="z_score") if plot_zScore_dff == "dff": - visualize_dff(filepath) + execute_preprocessing_visualization(filepath, visualization_type="dff") if plot_zScore_dff == "Both": - visualize_z_score(filepath) - visualize_dff(filepath) + execute_preprocessing_visualization(filepath, visualization_type="z_score") + execute_preprocessing_visualization(filepath, visualization_type="dff") writeToFile(str(10 + ((inputParameters["step"] + 1) * 10)) + "\n") inputParameters["step"] += 1 diff --git a/src/guppy/visualization/__init__.py b/src/guppy/visualization/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/guppy/visualization/preprocessing.py b/src/guppy/visualization/preprocessing.py new file mode 100644 index 0000000..460adb9 --- /dev/null +++ b/src/guppy/visualization/preprocessing.py @@ -0,0 +1,24 @@ +import logging +import os + +import matplotlib.pyplot as plt + +from ..analysis.io_utils import ( + get_all_stores_for_combining_data, # noqa: F401 -- Necessary for other modules that depend on preprocess.py +) + +logger = logging.getLogger(__name__) + +# Only set matplotlib backend if not in CI environment +if not os.getenv("CI"): + plt.switch_backend("TKAgg") + + +def visualize_preprocessing(*, suptitle, title, x, y): + fig = plt.figure() + ax = fig.add_subplot(111) + ax.plot(x, y) + ax.set_title(title) + fig.suptitle(suptitle) + + return fig, ax From 9c312998e5f3dfa8911dbc996c9f05a288f6f037 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 15:25:01 -0800 Subject: [PATCH 30/53] refactored visualize into preprocessing.py --- src/guppy/frontend/temp.py | 8 --- src/guppy/preprocess.py | 91 +----------------------- src/guppy/visualization/preprocessing.py | 85 ++++++++++++++++++++-- 3 files changed, 82 insertions(+), 102 deletions(-) delete mode 100644 src/guppy/frontend/temp.py diff --git a/src/guppy/frontend/temp.py b/src/guppy/frontend/temp.py deleted file mode 100644 index 0bcf556..0000000 --- a/src/guppy/frontend/temp.py +++ /dev/null @@ -1,8 +0,0 @@ -import logging - -import panel as pn - -# hv.extension() -pn.extension() - -logger = logging.getLogger(__name__) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 1543205..b5fd4e1 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -41,7 +41,7 @@ from .analysis.timestamp_correction import correct_timestamps from .analysis.z_score import compute_z_score from .frontend.progress import writeToFile -from .visualization.preprocessing import visualize_preprocessing +from .visualization.preprocessing import visualize, visualize_preprocessing logger = logging.getLogger(__name__) @@ -65,95 +65,6 @@ def execute_preprocessing_visualization(filepath, visualization_type: Literal["z fig, ax = visualize_preprocessing(suptitle=name, title=basename, x=x, y=y) -def visualize(filepath, x, y1, y2, y3, plot_name, removeArtifacts): - - # plotting control and signal data - - if (y1 == 0).all() == True: - y1 = np.zeros(x.shape[0]) - - coords_path = os.path.join(filepath, "coordsForPreProcessing_" + plot_name[0].split("_")[-1] + ".npy") - name = os.path.basename(filepath) - fig = plt.figure() - ax1 = fig.add_subplot(311) - (line1,) = ax1.plot(x, y1) - ax1.set_title(plot_name[0]) - ax2 = fig.add_subplot(312) - (line2,) = ax2.plot(x, y2) - ax2.set_title(plot_name[1]) - ax3 = fig.add_subplot(313) - (line3,) = ax3.plot(x, y2) - (line3,) = ax3.plot(x, y3) - ax3.set_title(plot_name[2]) - fig.suptitle(name) - - hfont = {"fontname": "DejaVu Sans"} - - if removeArtifacts == True and os.path.exists(coords_path): - ax3.set_xlabel("Time(s) \n Note : Artifacts have been removed, but are not reflected in this plot.", **hfont) - else: - ax3.set_xlabel("Time(s)", **hfont) - - global coords - coords = [] - - # clicking 'space' key on keyboard will draw a line on the plot so that user can see what chunks are selected - # and clicking 'd' key on keyboard will deselect the selected point - def onclick(event): - # global ix, iy - - if event.key == " ": - ix, iy = event.xdata, event.ydata - logger.info(f"x = {ix}, y = {iy}") - y1_max, y1_min = np.amax(y1), np.amin(y1) - y2_max, y2_min = np.amax(y2), np.amin(y2) - - # ax1.plot([ix,ix], [y1_max, y1_min], 'k--') - # ax2.plot([ix,ix], [y2_max, y2_min], 'k--') - - ax1.axvline(ix, c="black", ls="--") - ax2.axvline(ix, c="black", ls="--") - ax3.axvline(ix, c="black", ls="--") - - fig.canvas.draw() - - global coords - coords.append((ix, iy)) - - # if len(coords) == 2: - # fig.canvas.mpl_disconnect(cid) - - return coords - - elif event.key == "d": - if len(coords) > 0: - logger.info(f"x = {coords[-1][0]}, y = {coords[-1][1]}; deleted") - del coords[-1] - ax1.lines[-1].remove() - ax2.lines[-1].remove() - ax3.lines[-1].remove() - fig.canvas.draw() - - return coords - - # close the plot will save coordinates for all the selected chunks in the data - def plt_close_event(event): - global coords - if coords and len(coords) > 0: - name_1 = plot_name[0].split("_")[-1] - np.save(os.path.join(filepath, "coordsForPreProcessing_" + name_1 + ".npy"), coords) - logger.info(f"Coordinates file saved at {os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy')}") - fig.canvas.mpl_disconnect(cid) - coords = [] - - cid = fig.canvas.mpl_connect("key_press_event", onclick) - cid = fig.canvas.mpl_connect("close_event", plt_close_event) - # multi = MultiCursor(fig.canvas, (ax1, ax2), color='g', lw=1, horizOn=False, vertOn=True) - - # plt.show() - # return fig - - # function to plot control and signal, also provide a feature to select chunks for artifacts removal def visualizeControlAndSignal(filepath, removeArtifacts): path_1 = find_files(filepath, "control_*", ignore_case=True) # glob.glob(os.path.join(filepath, 'control*')) diff --git a/src/guppy/visualization/preprocessing.py b/src/guppy/visualization/preprocessing.py index 460adb9..59f7581 100644 --- a/src/guppy/visualization/preprocessing.py +++ b/src/guppy/visualization/preprocessing.py @@ -2,10 +2,7 @@ import os import matplotlib.pyplot as plt - -from ..analysis.io_utils import ( - get_all_stores_for_combining_data, # noqa: F401 -- Necessary for other modules that depend on preprocess.py -) +import numpy as np logger = logging.getLogger(__name__) @@ -22,3 +19,83 @@ def visualize_preprocessing(*, suptitle, title, x, y): fig.suptitle(suptitle) return fig, ax + + +def visualize(filepath, x, y1, y2, y3, plot_name, removeArtifacts): + + # plotting control and signal data + + if (y1 == 0).all() == True: + y1 = np.zeros(x.shape[0]) + + coords_path = os.path.join(filepath, "coordsForPreProcessing_" + plot_name[0].split("_")[-1] + ".npy") + artifacts_have_been_removed = removeArtifacts and os.path.exists(coords_path) + name = os.path.basename(filepath) + fig, ax1, ax2, ax3 = visualize_control_signal_fit(x, y1, y2, y3, plot_name, name, artifacts_have_been_removed) + + global coords + coords = [] + + # clicking 'space' key on keyboard will draw a line on the plot so that user can see what chunks are selected + # and clicking 'd' key on keyboard will deselect the selected point + def onclick(event): + if event.key == " ": + ix, iy = event.xdata, event.ydata + logger.info(f"x = {ix}, y = {iy}") + ax1.axvline(ix, c="black", ls="--") + ax2.axvline(ix, c="black", ls="--") + ax3.axvline(ix, c="black", ls="--") + + fig.canvas.draw() + + global coords + coords.append((ix, iy)) + + return coords + + elif event.key == "d": + if len(coords) > 0: + logger.info(f"x = {coords[-1][0]}, y = {coords[-1][1]}; deleted") + del coords[-1] + ax1.lines[-1].remove() + ax2.lines[-1].remove() + ax3.lines[-1].remove() + fig.canvas.draw() + + return coords + + # close the plot will save coordinates for all the selected chunks in the data + def plt_close_event(event): + global coords + if coords and len(coords) > 0: + name_1 = plot_name[0].split("_")[-1] + np.save(os.path.join(filepath, "coordsForPreProcessing_" + name_1 + ".npy"), coords) + logger.info(f"Coordinates file saved at {os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy')}") + fig.canvas.mpl_disconnect(cid) + coords = [] + + cid = fig.canvas.mpl_connect("key_press_event", onclick) + cid = fig.canvas.mpl_connect("close_event", plt_close_event) + + +def visualize_control_signal_fit(x, y1, y2, y3, plot_name, name, artifacts_have_been_removed): + fig = plt.figure() + ax1 = fig.add_subplot(311) + (line1,) = ax1.plot(x, y1) + ax1.set_title(plot_name[0]) + ax2 = fig.add_subplot(312) + (line2,) = ax2.plot(x, y2) + ax2.set_title(plot_name[1]) + ax3 = fig.add_subplot(313) + (line3,) = ax3.plot(x, y2) + (line3,) = ax3.plot(x, y3) + ax3.set_title(plot_name[2]) + fig.suptitle(name) + + hfont = {"fontname": "DejaVu Sans"} + + if artifacts_have_been_removed: + ax3.set_xlabel("Time(s) \n Note : Artifacts have been removed, but are not reflected in this plot.", **hfont) + else: + ax3.set_xlabel("Time(s)", **hfont) + return fig, ax1, ax2, ax3 From 78ea92b4f52487567147eef1654e09fe945eae32 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 16:06:06 -0800 Subject: [PATCH 31/53] Added ArtifactRemovalWidget. --- src/guppy/frontend/artifact_removal.py | 68 ++++++++++++++++++++++++ src/guppy/preprocess.py | 5 +- src/guppy/visualization/preprocessing.py | 58 -------------------- 3 files changed, 71 insertions(+), 60 deletions(-) create mode 100644 src/guppy/frontend/artifact_removal.py diff --git a/src/guppy/frontend/artifact_removal.py b/src/guppy/frontend/artifact_removal.py new file mode 100644 index 0000000..596934f --- /dev/null +++ b/src/guppy/frontend/artifact_removal.py @@ -0,0 +1,68 @@ +import logging +import os + +import matplotlib.pyplot as plt +import numpy as np + +from ..visualization.preprocessing import visualize_control_signal_fit + +logger = logging.getLogger(__name__) + +# Only set matplotlib backend if not in CI environment +if not os.getenv("CI"): + plt.switch_backend("TKAgg") + + +class ArtifactRemovalWidget: + + def __init__(self, filepath, x, y1, y2, y3, plot_name, removeArtifacts): + self.coords = [] # List to store selected coordinates + + if (y1 == 0).all() == True: + y1 = np.zeros(x.shape[0]) + + coords_path = os.path.join(filepath, "coordsForPreProcessing_" + plot_name[0].split("_")[-1] + ".npy") + artifacts_have_been_removed = removeArtifacts and os.path.exists(coords_path) + name = os.path.basename(filepath) + fig, ax1, ax2, ax3 = visualize_control_signal_fit(x, y1, y2, y3, plot_name, name, artifacts_have_been_removed) + + # clicking 'space' key on keyboard will draw a line on the plot so that user can see what chunks are selected + # and clicking 'd' key on keyboard will deselect the selected point + def onclick(event): + if event.key == " ": + ix, iy = event.xdata, event.ydata + logger.info(f"x = {ix}, y = {iy}") + ax1.axvline(ix, c="black", ls="--") + ax2.axvline(ix, c="black", ls="--") + ax3.axvline(ix, c="black", ls="--") + + fig.canvas.draw() + + self.coords.append((ix, iy)) + + return self.coords + + elif event.key == "d": + if len(self.coords) > 0: + logger.info(f"x = {self.coords[-1][0]}, y = {self.coords[-1][1]}; deleted") + del self.coords[-1] + ax1.lines[-1].remove() + ax2.lines[-1].remove() + ax3.lines[-1].remove() + fig.canvas.draw() + + return self.coords + + # close the plot will save coordinates for all the selected chunks in the data + def plt_close_event(event): + if self.coords and len(self.coords) > 0: + name_1 = plot_name[0].split("_")[-1] + np.save(os.path.join(filepath, "coordsForPreProcessing_" + name_1 + ".npy"), self.coords) + logger.info( + f"Coordinates file saved at {os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy')}" + ) + fig.canvas.mpl_disconnect(cid) + self.coords = [] + + cid = fig.canvas.mpl_connect("key_press_event", onclick) + cid = fig.canvas.mpl_connect("close_event", plt_close_event) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index b5fd4e1..22b81b8 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -40,8 +40,9 @@ ) from .analysis.timestamp_correction import correct_timestamps from .analysis.z_score import compute_z_score +from .frontend.artifact_removal import ArtifactRemovalWidget from .frontend.progress import writeToFile -from .visualization.preprocessing import visualize, visualize_preprocessing +from .visualization.preprocessing import visualize_preprocessing logger = logging.getLogger(__name__) @@ -97,7 +98,7 @@ def visualizeControlAndSignal(filepath, removeArtifacts): (os.path.basename(path[1, i])).split(".")[0], (os.path.basename(cntrl_sig_fit_path)).split(".")[0], ] - visualize(filepath, ts, control, signal, cntrl_sig_fit, plot_name, removeArtifacts) + widget = ArtifactRemovalWidget(filepath, ts, control, signal, cntrl_sig_fit, plot_name, removeArtifacts) # function to execute timestamps corrections using functions timestampCorrection and decide_naming_convention_and_applyCorrection diff --git a/src/guppy/visualization/preprocessing.py b/src/guppy/visualization/preprocessing.py index 59f7581..cda8150 100644 --- a/src/guppy/visualization/preprocessing.py +++ b/src/guppy/visualization/preprocessing.py @@ -2,7 +2,6 @@ import os import matplotlib.pyplot as plt -import numpy as np logger = logging.getLogger(__name__) @@ -21,63 +20,6 @@ def visualize_preprocessing(*, suptitle, title, x, y): return fig, ax -def visualize(filepath, x, y1, y2, y3, plot_name, removeArtifacts): - - # plotting control and signal data - - if (y1 == 0).all() == True: - y1 = np.zeros(x.shape[0]) - - coords_path = os.path.join(filepath, "coordsForPreProcessing_" + plot_name[0].split("_")[-1] + ".npy") - artifacts_have_been_removed = removeArtifacts and os.path.exists(coords_path) - name = os.path.basename(filepath) - fig, ax1, ax2, ax3 = visualize_control_signal_fit(x, y1, y2, y3, plot_name, name, artifacts_have_been_removed) - - global coords - coords = [] - - # clicking 'space' key on keyboard will draw a line on the plot so that user can see what chunks are selected - # and clicking 'd' key on keyboard will deselect the selected point - def onclick(event): - if event.key == " ": - ix, iy = event.xdata, event.ydata - logger.info(f"x = {ix}, y = {iy}") - ax1.axvline(ix, c="black", ls="--") - ax2.axvline(ix, c="black", ls="--") - ax3.axvline(ix, c="black", ls="--") - - fig.canvas.draw() - - global coords - coords.append((ix, iy)) - - return coords - - elif event.key == "d": - if len(coords) > 0: - logger.info(f"x = {coords[-1][0]}, y = {coords[-1][1]}; deleted") - del coords[-1] - ax1.lines[-1].remove() - ax2.lines[-1].remove() - ax3.lines[-1].remove() - fig.canvas.draw() - - return coords - - # close the plot will save coordinates for all the selected chunks in the data - def plt_close_event(event): - global coords - if coords and len(coords) > 0: - name_1 = plot_name[0].split("_")[-1] - np.save(os.path.join(filepath, "coordsForPreProcessing_" + name_1 + ".npy"), coords) - logger.info(f"Coordinates file saved at {os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy')}") - fig.canvas.mpl_disconnect(cid) - coords = [] - - cid = fig.canvas.mpl_connect("key_press_event", onclick) - cid = fig.canvas.mpl_connect("close_event", plt_close_event) - - def visualize_control_signal_fit(x, y1, y2, y3, plot_name, name, artifacts_have_been_removed): fig = plt.figure() ax1 = fig.add_subplot(311) From ea0489e68dbb56c1e8aca073af7ec3c6aa6f5a8d Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 17:43:52 -0800 Subject: [PATCH 32/53] Refactored ArtifactRemovalWidget --- src/guppy/frontend/artifact_removal.py | 91 ++++++++++++++------------ src/guppy/preprocess.py | 5 +- 2 files changed, 53 insertions(+), 43 deletions(-) diff --git a/src/guppy/frontend/artifact_removal.py b/src/guppy/frontend/artifact_removal.py index 596934f..f626190 100644 --- a/src/guppy/frontend/artifact_removal.py +++ b/src/guppy/frontend/artifact_removal.py @@ -17,6 +17,8 @@ class ArtifactRemovalWidget: def __init__(self, filepath, x, y1, y2, y3, plot_name, removeArtifacts): self.coords = [] # List to store selected coordinates + self.filepath = filepath + self.plot_name = plot_name if (y1 == 0).all() == True: y1 = np.zeros(x.shape[0]) @@ -24,45 +26,50 @@ def __init__(self, filepath, x, y1, y2, y3, plot_name, removeArtifacts): coords_path = os.path.join(filepath, "coordsForPreProcessing_" + plot_name[0].split("_")[-1] + ".npy") artifacts_have_been_removed = removeArtifacts and os.path.exists(coords_path) name = os.path.basename(filepath) - fig, ax1, ax2, ax3 = visualize_control_signal_fit(x, y1, y2, y3, plot_name, name, artifacts_have_been_removed) - - # clicking 'space' key on keyboard will draw a line on the plot so that user can see what chunks are selected - # and clicking 'd' key on keyboard will deselect the selected point - def onclick(event): - if event.key == " ": - ix, iy = event.xdata, event.ydata - logger.info(f"x = {ix}, y = {iy}") - ax1.axvline(ix, c="black", ls="--") - ax2.axvline(ix, c="black", ls="--") - ax3.axvline(ix, c="black", ls="--") - - fig.canvas.draw() - - self.coords.append((ix, iy)) - - return self.coords - - elif event.key == "d": - if len(self.coords) > 0: - logger.info(f"x = {self.coords[-1][0]}, y = {self.coords[-1][1]}; deleted") - del self.coords[-1] - ax1.lines[-1].remove() - ax2.lines[-1].remove() - ax3.lines[-1].remove() - fig.canvas.draw() - - return self.coords - - # close the plot will save coordinates for all the selected chunks in the data - def plt_close_event(event): - if self.coords and len(self.coords) > 0: - name_1 = plot_name[0].split("_")[-1] - np.save(os.path.join(filepath, "coordsForPreProcessing_" + name_1 + ".npy"), self.coords) - logger.info( - f"Coordinates file saved at {os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy')}" - ) - fig.canvas.mpl_disconnect(cid) - self.coords = [] - - cid = fig.canvas.mpl_connect("key_press_event", onclick) - cid = fig.canvas.mpl_connect("close_event", plt_close_event) + self.fig, self.ax1, self.ax2, self.ax3 = visualize_control_signal_fit( + x, y1, y2, y3, plot_name, name, artifacts_have_been_removed + ) + + self.cid = self.fig.canvas.mpl_connect("key_press_event", self._on_key_press) + self.fig.canvas.mpl_connect("close_event", self._on_close) + + def _on_key_press(self, event): + """Handle key press events for artifact selection. + + Pressing 'space' draws a vertical line at the cursor position to mark artifact boundaries. + Pressing 'd' removes the most recently added line. + """ + if event.key == " ": + ix, iy = event.xdata, event.ydata + logger.info(f"x = {ix}, y = {iy}") + self.ax1.axvline(ix, c="black", ls="--") + self.ax2.axvline(ix, c="black", ls="--") + self.ax3.axvline(ix, c="black", ls="--") + + self.fig.canvas.draw() + + self.coords.append((ix, iy)) + + return self.coords + + elif event.key == "d": + if len(self.coords) > 0: + logger.info(f"x = {self.coords[-1][0]}, y = {self.coords[-1][1]}; deleted") + del self.coords[-1] + self.ax1.lines[-1].remove() + self.ax2.lines[-1].remove() + self.ax3.lines[-1].remove() + self.fig.canvas.draw() + + return self.coords + + def _on_close(self, _event): + """Handle figure close event by saving coordinates and cleaning up.""" + if self.coords and len(self.coords) > 0: + name_1 = self.plot_name[0].split("_")[-1] + np.save(os.path.join(self.filepath, "coordsForPreProcessing_" + name_1 + ".npy"), self.coords) + logger.info( + f"Coordinates file saved at {os.path.join(self.filepath, 'coordsForPreProcessing_'+name_1+'.npy')}" + ) + self.fig.canvas.mpl_disconnect(self.cid) + self.coords = [] diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 22b81b8..6b67df0 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -80,6 +80,7 @@ def visualizeControlAndSignal(filepath, removeArtifacts): path = np.asarray(path).reshape(2, -1) + widgets = [] for i in range(path.shape[1]): name_1 = ((os.path.basename(path[0, i])).split(".")[0]).split("_") @@ -99,6 +100,8 @@ def visualizeControlAndSignal(filepath, removeArtifacts): (os.path.basename(cntrl_sig_fit_path)).split(".")[0], ] widget = ArtifactRemovalWidget(filepath, ts, control, signal, cntrl_sig_fit, plot_name, removeArtifacts) + widgets.append(widget) + return widgets # function to execute timestamps corrections using functions timestampCorrection and decide_naming_convention_and_applyCorrection @@ -220,7 +223,7 @@ def execute_zscore(folderNames, inputParameters): logger.info(f"z-score for the data in {filepath} computed.") if not remove_artifacts: - visualizeControlAndSignal(filepath, removeArtifacts=remove_artifacts) + widgets = visualizeControlAndSignal(filepath, removeArtifacts=remove_artifacts) if plot_zScore_dff == "z_score": execute_preprocessing_visualization(filepath, visualization_type="z_score") From a289d1f7a1a23adf0cc69ca4a003c268b797130d Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 17:45:53 -0800 Subject: [PATCH 33/53] Refactored ArtifactRemovalWidget --- src/guppy/preprocess.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 6b67df0..d113fb2 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -223,6 +223,7 @@ def execute_zscore(folderNames, inputParameters): logger.info(f"z-score for the data in {filepath} computed.") if not remove_artifacts: + # a reference to widgets has to persist in the same scope as plt.show() is called widgets = visualizeControlAndSignal(filepath, removeArtifacts=remove_artifacts) if plot_zScore_dff == "z_score": From 2924a2c19cbfc81434b12412439d9792a2f62798 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 3 Feb 2026 18:06:49 -0800 Subject: [PATCH 34/53] Refactor to visualize z-score into its own function. --- src/guppy/preprocess.py | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index d113fb2..ca2e3d6 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -221,10 +221,34 @@ def execute_zscore(folderNames, inputParameters): write_zscore(filepath, name, z_score, dff, control_fit, temp_control_arr) logger.info(f"z-score for the data in {filepath} computed.") + writeToFile(str(10 + ((inputParameters["step"] + 1) * 10)) + "\n") + inputParameters["step"] += 1 + + plt.show() + logger.info("Z-score computation completed.") + + +def visualize_z_score(inputParameters, folderNames): + plot_zScore_dff = inputParameters["plot_zScore_dff"] + combine_data = inputParameters["combine_data"] + remove_artifacts = inputParameters["removeArtifacts"] + + storesListPath = [] + for i in range(len(folderNames)): + if combine_data == True: + storesListPath.append([folderNames[i][0]]) + else: + filepath = folderNames[i] + storesListPath.append(takeOnlyDirs(glob.glob(os.path.join(filepath, "*_output_*")))) + storesListPath = np.concatenate(storesListPath) + + widgets = [] + for j in range(len(storesListPath)): + filepath = storesListPath[j] if not remove_artifacts: # a reference to widgets has to persist in the same scope as plt.show() is called - widgets = visualizeControlAndSignal(filepath, removeArtifacts=remove_artifacts) + widgets.extend(visualizeControlAndSignal(filepath, removeArtifacts=remove_artifacts)) if plot_zScore_dff == "z_score": execute_preprocessing_visualization(filepath, visualization_type="z_score") @@ -234,11 +258,8 @@ def execute_zscore(folderNames, inputParameters): execute_preprocessing_visualization(filepath, visualization_type="z_score") execute_preprocessing_visualization(filepath, visualization_type="dff") - writeToFile(str(10 + ((inputParameters["step"] + 1) * 10)) + "\n") - inputParameters["step"] += 1 - plt.show() - logger.info("Z-score computation completed.") + logger.info("Visualization of z-score and dF/F completed.") # function to remove artifacts from z-score data @@ -376,6 +397,7 @@ def extractTsAndSignal(inputParameters): writeToFile(str((pbMaxValue + 1) * 10) + "\n" + str(10) + "\n") execute_timestamp_correction(folderNames, inputParameters) execute_zscore(folderNames, inputParameters) + visualize_z_score(inputParameters, folderNames) if remove_artifacts == True: execute_artifact_removal(folderNames, inputParameters) else: @@ -385,6 +407,7 @@ def extractTsAndSignal(inputParameters): storesList = check_storeslistfile(folderNames) op_folder = execute_combine_data(folderNames, inputParameters, storesList) execute_zscore(op_folder, inputParameters) + visualize_z_score(inputParameters, op_folder) if remove_artifacts == True: execute_artifact_removal(op_folder, inputParameters) From 86762ea489843478d01c41dbbb54ae03c0b26abf Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 11:44:28 -0800 Subject: [PATCH 35/53] Refactored artifact removal plots to a dedicated function --- src/guppy/preprocess.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index ca2e3d6..2ff435d 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -301,15 +301,34 @@ def execute_artifact_removal(folderNames, inputParameters): ) write_artifact_removal(filepath, name_to_data, pair_name_to_timestamps, compound_name_to_ttl_timestamps) - visualizeControlAndSignal(filepath, removeArtifacts=True) writeToFile(str(10 + ((inputParameters["step"] + 1) * 10)) + "\n") inputParameters["step"] += 1 - plt.show() + visualize_artifact_removal(folderNames, inputParameters) logger.info("Artifact removal completed.") +def visualize_artifact_removal(folderNames, inputParameters): + combine_data = inputParameters["combine_data"] + + storesListPath = [] + for i in range(len(folderNames)): + if combine_data == True: + storesListPath.append([folderNames[i][0]]) + else: + filepath = folderNames[i] + storesListPath.append(takeOnlyDirs(glob.glob(os.path.join(filepath, "*_output_*")))) + + storesListPath = np.concatenate(storesListPath) + + for j in range(len(storesListPath)): + filepath = storesListPath[j] + visualizeControlAndSignal(filepath, removeArtifacts=True) + plt.show() + logger.info("Visualization of artifact removal completed.") + + # function to combine data when there are two different data files for the same recording session # it will combine the data, do timestamps processing and save the combined data in the first output folder. def execute_combine_data(folderNames, inputParameters, storesList): From bb83a2f1fbe9f35cac212e7bc43883687a7d7337 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 11:48:36 -0800 Subject: [PATCH 36/53] Moved preprocessing.py into the orchestration layer. --- src/guppy/{ => orchestration}/preprocess.py | 20 ++++++++++---------- src/guppy/testing/api.py | 2 +- src/guppy/visualizePlot.py | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) rename src/guppy/{ => orchestration}/preprocess.py (97%) diff --git a/src/guppy/preprocess.py b/src/guppy/orchestration/preprocess.py similarity index 97% rename from src/guppy/preprocess.py rename to src/guppy/orchestration/preprocess.py index 2ff435d..21f9ff9 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/orchestration/preprocess.py @@ -8,10 +8,10 @@ import matplotlib.pyplot as plt import numpy as np -from .analysis.artifact_removal import remove_artifacts -from .analysis.combine_data import combine_data -from .analysis.control_channel import add_control_channel, create_control_channel -from .analysis.io_utils import ( +from ..analysis.artifact_removal import remove_artifacts +from ..analysis.combine_data import combine_data +from ..analysis.control_channel import add_control_channel, create_control_channel +from ..analysis.io_utils import ( check_storeslistfile, check_TDT, find_files, @@ -20,7 +20,7 @@ read_hdf5, takeOnlyDirs, ) -from .analysis.standard_io import ( +from ..analysis.standard_io import ( read_control_and_signal, read_coords_pairwise, read_corrected_data, @@ -38,11 +38,11 @@ write_corrected_ttl_timestamps, write_zscore, ) -from .analysis.timestamp_correction import correct_timestamps -from .analysis.z_score import compute_z_score -from .frontend.artifact_removal import ArtifactRemovalWidget -from .frontend.progress import writeToFile -from .visualization.preprocessing import visualize_preprocessing +from ..analysis.timestamp_correction import correct_timestamps +from ..analysis.z_score import compute_z_score +from ..frontend.artifact_removal import ArtifactRemovalWidget +from ..frontend.progress import writeToFile +from ..visualization.preprocessing import visualize_preprocessing logger = logging.getLogger(__name__) diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 0c8c027..6085457 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -16,9 +16,9 @@ from guppy.computePsth import psthForEachStorename from guppy.findTransientsFreqAndAmp import executeFindFreqAndAmp from guppy.orchestration.home import build_homepage +from guppy.orchestration.preprocess import extractTsAndSignal from guppy.orchestration.read_raw_data import orchestrate_read_raw_data from guppy.orchestration.storenames import orchestrate_storenames_page -from guppy.preprocess import extractTsAndSignal def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index e3fc1fb..1383190 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -18,7 +18,7 @@ from holoviews.operation.datashader import datashade from holoviews.plotting.util import process_cmap -from .preprocess import get_all_stores_for_combining_data +from .orchestration.preprocess import get_all_stores_for_combining_data from .utils.utils import takeOnlyDirs pn.extension() From e41c810e11592311aaac6ed2234693e31ef90032 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 11:53:52 -0800 Subject: [PATCH 37/53] Renamed the step 4 button 'pre-process and remove artifacts' to better reflect its function. --- src/guppy/frontend/sidebar.py | 12 +++++------- src/guppy/orchestration/home.py | 10 +++++----- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/src/guppy/frontend/sidebar.py b/src/guppy/frontend/sidebar.py index d73f295..466a639 100644 --- a/src/guppy/frontend/sidebar.py +++ b/src/guppy/frontend/sidebar.py @@ -26,9 +26,7 @@ def setup_markdown(self): """**Step 2 : Open Storenames GUI
and save storenames**""", width=300 ) self.mark_down_read = pn.pane.Markdown("""**Step 3 : Read Raw Data**""", width=300) - self.mark_down_extract = pn.pane.Markdown( - """**Step 4 : Extract timestamps
and its correction**""", width=300 - ) + self.mark_down_preprocess = pn.pane.Markdown("""**Step 4 : Preprocess and Remove Artifacts**""", width=300) self.mark_down_psth = pn.pane.Markdown("""**Step 5 : PSTH Computation**""", width=300) self.mark_down_visualization = pn.pane.Markdown("""**Step 6 : Visualization**""", width=300) @@ -37,8 +35,8 @@ def setup_buttons(self): name="Open Storenames GUI", button_type="primary", width=300, align="end" ) self.read_rawData = pn.widgets.Button(name="Read Raw Data", button_type="primary", width=300, align="end") - self.extract_ts = pn.widgets.Button( - name="Extract timestamps and it's correction", button_type="primary", width=300, align="end" + self.preprocess = pn.widgets.Button( + name="Preprocess and Remove Artifacts", button_type="primary", width=300, align="end" ) self.psth_computation = pn.widgets.Button( name="PSTH Computation", button_type="primary", width=300, align="end" @@ -67,8 +65,8 @@ def add_to_template(self): self.template.sidebar.append(self.mark_down_read) self.template.sidebar.append(self.read_rawData) self.template.sidebar.append(self.read_progress) - self.template.sidebar.append(self.mark_down_extract) - self.template.sidebar.append(self.extract_ts) + self.template.sidebar.append(self.mark_down_preprocess) + self.template.sidebar.append(self.preprocess) self.template.sidebar.append(self.extract_progress) self.template.sidebar.append(self.mark_down_psth) self.template.sidebar.append(self.psth_computation) diff --git a/src/guppy/orchestration/home.py b/src/guppy/orchestration/home.py index 1b5eb29..541dc0d 100644 --- a/src/guppy/orchestration/home.py +++ b/src/guppy/orchestration/home.py @@ -23,9 +23,9 @@ def readRawData(parameter_form): subprocess.call([sys.executable, "-m", "guppy.orchestration.read_raw_data", json.dumps(inputParameters)]) -def extractTs(parameter_form): +def preprocess(parameter_form): inputParameters = parameter_form.getInputParameters() - subprocess.call([sys.executable, "-m", "guppy.preprocess", json.dumps(inputParameters)]) + subprocess.call([sys.executable, "-m", "guppy.orchestration.preprocess", json.dumps(inputParameters)]) def psthComputation(parameter_form, current_dir): @@ -64,8 +64,8 @@ def onclickreaddata(event=None): readPBIncrementValues(sidebar.read_progress) thread.join() - def onclickextractts(event=None): - thread = Thread(target=extractTs, args=(parameter_form,)) + def onclickpreprocess(event=None): + thread = Thread(target=preprocess, args=(parameter_form,)) thread.start() readPBIncrementValues(sidebar.extract_progress) thread.join() @@ -82,7 +82,7 @@ def onclickpsth(event=None): "save_button": onclickProcess, "open_storenames": onclickStorenames, "read_rawData": onclickreaddata, - "extract_ts": onclickextractts, + "preprocess": onclickpreprocess, "psth_computation": onclickpsth, "open_visualization": onclickVisualization, } From 08e13e8a73041fcdbf5952f20bc66e94ccce2fca Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 12:11:56 -0800 Subject: [PATCH 38/53] Refactored into a dedicated module. --- src/guppy/findTransientsFreqAndAmp.py | 18 ++++-------------- src/guppy/visualization/transients.py | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 14 deletions(-) create mode 100644 src/guppy/visualization/transients.py diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index 3a78871..3697e1f 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -20,23 +20,11 @@ from .analysis.transients import analyze_transients from .analysis.transients_average import averageForGroup from .frontend.progress import writeToFile +from .visualization.transients import visualize_peaks logger = logging.getLogger(__name__) -def visuzlize_peaks(filepath, z_score, timestamps, peaksIndex): - - dirname = os.path.dirname(filepath) - - basename = (os.path.basename(filepath)).split(".")[0] - fig = plt.figure() - ax = fig.add_subplot(111) - ax.plot(timestamps, z_score, "-", timestamps[peaksIndex], z_score[peaksIndex], "o") - ax.set_title(basename) - fig.suptitle(os.path.basename(dirname)) - # plt.show() - - def findFreqAndAmp(filepath, inputParameters, window=15, numProcesses=mp.cpu_count()): logger.debug("Calculating frequency and amplitude of transients in z-score data....") @@ -72,7 +60,9 @@ def findFreqAndAmp(filepath, inputParameters, window=15, numProcesses=mp.cpu_cou index=np.arange(peaks_occurrences.shape[0]), columns=["timestamps", "amplitude"], ) - visuzlize_peaks(path[i], z_score, ts, peaksInd) + suptitle = os.path.basename(os.path.dirname(path[i])) + title = (os.path.basename(path[i])).split(".")[0] + visualize_peaks(title, suptitle, z_score, ts, peaksInd) logger.info("Frequency and amplitude of transients in z_score data are calculated.") diff --git a/src/guppy/visualization/transients.py b/src/guppy/visualization/transients.py new file mode 100644 index 0000000..030ac5e --- /dev/null +++ b/src/guppy/visualization/transients.py @@ -0,0 +1,15 @@ +import logging + +import matplotlib.pyplot as plt + +logger = logging.getLogger(__name__) + + +def visualize_peaks(title, suptitle, z_score, timestamps, peaksIndex): + fig = plt.figure() + ax = fig.add_subplot(111) + ax.plot(timestamps, z_score, "-", timestamps[peaksIndex], z_score[peaksIndex], "o") + ax.set_title(title) + fig.suptitle(suptitle) + + return fig, ax From 1f960ca10765959245ab631a8d8531ef116ad4b6 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 12:41:29 -0800 Subject: [PATCH 39/53] Refactored visualization into dedicated execute fn --- src/guppy/analysis/standard_io.py | 15 +++++++++++++++ src/guppy/findTransientsFreqAndAmp.py | 24 ++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/src/guppy/analysis/standard_io.py b/src/guppy/analysis/standard_io.py index d6dd9af..d0ade6d 100644 --- a/src/guppy/analysis/standard_io.py +++ b/src/guppy/analysis/standard_io.py @@ -331,3 +331,18 @@ def read_freq_and_amp_from_hdf5(filepath, name): df = pd.read_hdf(op, key="df", mode="r") return df + + +def write_transients_to_hdf5(filepath, name, z_score, ts, peaksInd): + event = f"transient_outputs_{name}" + write_hdf5(z_score, event, filepath, "z_score") + write_hdf5(ts, event, filepath, "timestamps") + write_hdf5(peaksInd, event, filepath, "peaksInd") + + +def read_transients_from_hdf5(filepath, name): + event = f"transient_outputs_{name}" + z_score = read_hdf5(event, filepath, "z_score") + ts = read_hdf5(event, filepath, "timestamps") + peaksInd = read_hdf5(event, filepath, "peaksInd") + return z_score, ts, peaksInd diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index 3697e1f..5d505c2 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -14,8 +14,10 @@ takeOnlyDirs, ) from .analysis.standard_io import ( + read_transients_from_hdf5, write_freq_and_amp_to_csv, write_freq_and_amp_to_hdf5, + write_transients_to_hdf5, ) from .analysis.transients import analyze_transients from .analysis.transients_average import averageForGroup @@ -60,10 +62,31 @@ def findFreqAndAmp(filepath, inputParameters, window=15, numProcesses=mp.cpu_cou index=np.arange(peaks_occurrences.shape[0]), columns=["timestamps", "amplitude"], ) + write_transients_to_hdf5(filepath, basename, z_score, ts, peaksInd) + logger.info("Frequency and amplitude of transients in z_score data are calculated.") + + +def execute_visualize_peaks(filepath, inputParameters): + + selectForTransientsComputation = inputParameters["selectForTransientsComputation"] + + if selectForTransientsComputation == "z_score": + path = glob.glob(os.path.join(filepath, "z_score_*")) + elif selectForTransientsComputation == "dff": + path = glob.glob(os.path.join(filepath, "dff_*")) + else: + path = glob.glob(os.path.join(filepath, "z_score_*")) + glob.glob(os.path.join(filepath, "dff_*")) + + for i in range(len(path)): + basename = (os.path.basename(path[i])).split(".")[0] + z_score, ts, peaksInd = read_transients_from_hdf5(filepath, basename) + suptitle = os.path.basename(os.path.dirname(path[i])) title = (os.path.basename(path[i])).split(".")[0] visualize_peaks(title, suptitle, z_score, ts, peaksInd) + logger.info("Frequency and amplitude of transients in z_score data are calculated.") + # plt.show() def executeFindFreqAndAmp(inputParameters): @@ -109,6 +132,7 @@ def execute_find_freq_and_amp(inputParameters, folderNames, moving_window, numPr 2, -1 ) findFreqAndAmp(filepath, inputParameters, window=moving_window, numProcesses=numProcesses) + execute_visualize_peaks(filepath, inputParameters) writeToFile(str(10 + ((inputParameters["step"] + 1) * 10)) + "\n") inputParameters["step"] += 1 logger.info("Transients in z-score data found and frequency and amplitude are calculated.") From 4440f6a5f745919be44aca5f0ee32a9e85b41462 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 13:03:54 -0800 Subject: [PATCH 40/53] Refactored visualization into dedicated execute fn --- src/guppy/findTransientsFreqAndAmp.py | 68 ++++++++++++++++++++------- 1 file changed, 51 insertions(+), 17 deletions(-) diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index 5d505c2..0167d28 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -66,27 +66,62 @@ def findFreqAndAmp(filepath, inputParameters, window=15, numProcesses=mp.cpu_cou logger.info("Frequency and amplitude of transients in z_score data are calculated.") -def execute_visualize_peaks(filepath, inputParameters): +def execute_visualize_peaks(folderNames, inputParameters): + selectForTransientsComputation = inputParameters["selectForTransientsComputation"] + for i in range(len(folderNames)): + logger.debug(f"Finding transients in z-score data of {folderNames[i]} and calculating frequency and amplitude.") + filepath = folderNames[i] + storesListPath = takeOnlyDirs(glob.glob(os.path.join(filepath, "*_output_*"))) + for j in range(len(storesListPath)): + filepath = storesListPath[j] + if selectForTransientsComputation == "z_score": + path = glob.glob(os.path.join(filepath, "z_score_*")) + elif selectForTransientsComputation == "dff": + path = glob.glob(os.path.join(filepath, "dff_*")) + else: + path = glob.glob(os.path.join(filepath, "z_score_*")) + glob.glob(os.path.join(filepath, "dff_*")) + + for i in range(len(path)): + basename = (os.path.basename(path[i])).split(".")[0] + z_score, ts, peaksInd = read_transients_from_hdf5(filepath, basename) + + suptitle = os.path.basename(os.path.dirname(path[i])) + title = (os.path.basename(path[i])).split(".")[0] + visualize_peaks(title, suptitle, z_score, ts, peaksInd) + + logger.info("Frequency and amplitude of transients in z_score data are visualized.") + plt.show() + +def execute_visualize_peaks_combined(folderNames, inputParameters): selectForTransientsComputation = inputParameters["selectForTransientsComputation"] - if selectForTransientsComputation == "z_score": - path = glob.glob(os.path.join(filepath, "z_score_*")) - elif selectForTransientsComputation == "dff": - path = glob.glob(os.path.join(filepath, "dff_*")) - else: - path = glob.glob(os.path.join(filepath, "z_score_*")) + glob.glob(os.path.join(filepath, "dff_*")) + storesListPath = [] + for i in range(len(folderNames)): + filepath = folderNames[i] + storesListPath.append(takeOnlyDirs(glob.glob(os.path.join(filepath, "*_output_*")))) + storesListPath = list(np.concatenate(storesListPath).flatten()) + op = get_all_stores_for_combining_data(storesListPath) + for i in range(len(op)): + filepath = op[i][0] - for i in range(len(path)): - basename = (os.path.basename(path[i])).split(".")[0] - z_score, ts, peaksInd = read_transients_from_hdf5(filepath, basename) + if selectForTransientsComputation == "z_score": + path = glob.glob(os.path.join(filepath, "z_score_*")) + elif selectForTransientsComputation == "dff": + path = glob.glob(os.path.join(filepath, "dff_*")) + else: + path = glob.glob(os.path.join(filepath, "z_score_*")) + glob.glob(os.path.join(filepath, "dff_*")) + + for i in range(len(path)): + basename = (os.path.basename(path[i])).split(".")[0] + z_score, ts, peaksInd = read_transients_from_hdf5(filepath, basename) - suptitle = os.path.basename(os.path.dirname(path[i])) - title = (os.path.basename(path[i])).split(".")[0] - visualize_peaks(title, suptitle, z_score, ts, peaksInd) + suptitle = os.path.basename(os.path.dirname(path[i])) + title = (os.path.basename(path[i])).split(".")[0] + visualize_peaks(title, suptitle, z_score, ts, peaksInd) logger.info("Frequency and amplitude of transients in z_score data are calculated.") - # plt.show() + plt.show() def executeFindFreqAndAmp(inputParameters): @@ -115,8 +150,10 @@ def executeFindFreqAndAmp(inputParameters): else: if combine_data == True: execute_find_freq_and_amp_combined(inputParameters, folderNames, moving_window, numProcesses) + execute_visualize_peaks_combined(folderNames, inputParameters) else: execute_find_freq_and_amp(inputParameters, folderNames, moving_window, numProcesses) + execute_visualize_peaks(folderNames, inputParameters) logger.info("Transients in z-score data found and frequency and amplitude are calculated.") @@ -132,11 +169,9 @@ def execute_find_freq_and_amp(inputParameters, folderNames, moving_window, numPr 2, -1 ) findFreqAndAmp(filepath, inputParameters, window=moving_window, numProcesses=numProcesses) - execute_visualize_peaks(filepath, inputParameters) writeToFile(str(10 + ((inputParameters["step"] + 1) * 10)) + "\n") inputParameters["step"] += 1 logger.info("Transients in z-score data found and frequency and amplitude are calculated.") - plt.show() def execute_find_freq_and_amp_combined(inputParameters, folderNames, moving_window, numProcesses): @@ -152,7 +187,6 @@ def execute_find_freq_and_amp_combined(inputParameters, folderNames, moving_wind findFreqAndAmp(filepath, inputParameters, window=moving_window, numProcesses=numProcesses) writeToFile(str(10 + ((inputParameters["step"] + 1) * 10)) + "\n") inputParameters["step"] += 1 - plt.show() def execute_average_for_group(inputParameters, folderNamesForAvg): From 83641a8188440e9d43e04c5cc899ac895d4c3ca8 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 13:09:14 -0800 Subject: [PATCH 41/53] Moved step 5 modules to orchestration --- src/guppy/orchestration/home.py | 2 +- .../{computePsth.py => orchestration/psth.py} | 20 +++++++++---------- .../transients.py} | 12 +++++------ src/guppy/testing/api.py | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) rename src/guppy/{computePsth.py => orchestration/psth.py} (96%) rename src/guppy/{findTransientsFreqAndAmp.py => orchestration/transients.py} (97%) diff --git a/src/guppy/orchestration/home.py b/src/guppy/orchestration/home.py index 541dc0d..3a7f8ee 100644 --- a/src/guppy/orchestration/home.py +++ b/src/guppy/orchestration/home.py @@ -31,7 +31,7 @@ def preprocess(parameter_form): def psthComputation(parameter_form, current_dir): inputParameters = parameter_form.getInputParameters() inputParameters["curr_dir"] = current_dir - subprocess.call([sys.executable, "-m", "guppy.computePsth", json.dumps(inputParameters)]) + subprocess.call([sys.executable, "-m", "guppy.orchestration.psth", json.dumps(inputParameters)]) def build_homepage(): diff --git a/src/guppy/computePsth.py b/src/guppy/orchestration/psth.py similarity index 96% rename from src/guppy/computePsth.py rename to src/guppy/orchestration/psth.py index 0b94220..78acb1c 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/orchestration/psth.py @@ -13,9 +13,9 @@ import numpy as np from scipy import signal as ss -from .analysis.compute_psth import compute_psth -from .analysis.cross_correlation import compute_cross_correlation -from .analysis.io_utils import ( +from ..analysis.compute_psth import compute_psth +from ..analysis.cross_correlation import compute_cross_correlation +from ..analysis.io_utils import ( get_all_stores_for_combining_data, make_dir_for_cross_correlation, makeAverageDir, @@ -23,19 +23,19 @@ read_hdf5, write_hdf5, ) -from .analysis.psth_average import averageForGroup -from .analysis.psth_peak_and_area import compute_psth_peak_and_area -from .analysis.psth_utils import ( +from ..analysis.psth_average import averageForGroup +from ..analysis.psth_peak_and_area import compute_psth_peak_and_area +from ..analysis.psth_utils import ( create_Df_for_cross_correlation, create_Df_for_psth, getCorrCombinations, ) -from .analysis.standard_io import ( +from ..analysis.standard_io import ( write_peak_and_area_to_csv, write_peak_and_area_to_hdf5, ) -from .frontend.progress import writeToFile -from .utils.utils import takeOnlyDirs +from ..frontend.progress import writeToFile +from ..utils.utils import takeOnlyDirs logger = logging.getLogger(__name__) @@ -347,7 +347,7 @@ def psthForEachStorename(inputParameters): def main(input_parameters): try: inputParameters = psthForEachStorename(input_parameters) - subprocess.call([sys.executable, "-m", "guppy.findTransientsFreqAndAmp", json.dumps(inputParameters)]) + subprocess.call([sys.executable, "-m", "guppy.orchestration.transients", json.dumps(inputParameters)]) logger.info("#" * 400) except Exception as e: with open(os.path.join(os.path.expanduser("~"), "pbSteps.txt"), "a") as file: diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/orchestration/transients.py similarity index 97% rename from src/guppy/findTransientsFreqAndAmp.py rename to src/guppy/orchestration/transients.py index 0167d28..bac585b 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/orchestration/transients.py @@ -8,21 +8,21 @@ import matplotlib.pyplot as plt import numpy as np -from .analysis.io_utils import ( +from ..analysis.io_utils import ( get_all_stores_for_combining_data, read_hdf5, takeOnlyDirs, ) -from .analysis.standard_io import ( +from ..analysis.standard_io import ( read_transients_from_hdf5, write_freq_and_amp_to_csv, write_freq_and_amp_to_hdf5, write_transients_to_hdf5, ) -from .analysis.transients import analyze_transients -from .analysis.transients_average import averageForGroup -from .frontend.progress import writeToFile -from .visualization.transients import visualize_peaks +from ..analysis.transients import analyze_transients +from ..analysis.transients_average import averageForGroup +from ..frontend.progress import writeToFile +from ..visualization.transients import visualize_peaks logger = logging.getLogger(__name__) diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 6085457..834a9e1 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -13,12 +13,12 @@ import os from typing import Iterable -from guppy.computePsth import psthForEachStorename -from guppy.findTransientsFreqAndAmp import executeFindFreqAndAmp from guppy.orchestration.home import build_homepage from guppy.orchestration.preprocess import extractTsAndSignal +from guppy.orchestration.psth import psthForEachStorename from guppy.orchestration.read_raw_data import orchestrate_read_raw_data from guppy.orchestration.storenames import orchestrate_storenames_page +from guppy.orchestration.transients import executeFindFreqAndAmp def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: From 8163cbc77fc04f643d0834c9f45d932b931c3bb7 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 14:52:41 -0800 Subject: [PATCH 42/53] Moved get_all_stores_for_combining_data to utils.utils --- src/guppy/analysis/io_utils.py | 14 -------------- src/guppy/orchestration/preprocess.py | 2 +- src/guppy/orchestration/psth.py | 3 +-- src/guppy/orchestration/transients.py | 3 +-- src/guppy/utils/utils.py | 17 +++++++++++++++++ src/guppy/visualizePlot.py | 3 +-- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/guppy/analysis/io_utils.py b/src/guppy/analysis/io_utils.py index 892d8c5..3dacd1d 100644 --- a/src/guppy/analysis/io_utils.py +++ b/src/guppy/analysis/io_utils.py @@ -137,20 +137,6 @@ def get_coords(filepath, name, tsNew, removeArtifacts): # TODO: Make less redun return coords -def get_all_stores_for_combining_data(folderNames): - op = [] - for i in range(100): - temp = [] - match = r"[\s\S]*" + "_output_" + str(i) - for j in folderNames: - temp.append(re.findall(match, j)) - temp = sorted(list(np.concatenate(temp).flatten()), key=str.casefold) - if len(temp) > 0: - op.append(temp) - - return op - - # for combining data, reading storeslist file from both data and create a new storeslist array def check_storeslistfile(folderNames): storesList = np.array([[], []]) diff --git a/src/guppy/orchestration/preprocess.py b/src/guppy/orchestration/preprocess.py index 21f9ff9..b5b9d0d 100755 --- a/src/guppy/orchestration/preprocess.py +++ b/src/guppy/orchestration/preprocess.py @@ -15,7 +15,6 @@ check_storeslistfile, check_TDT, find_files, - get_all_stores_for_combining_data, # noqa: F401 -- Necessary for other modules that depend on preprocess.py get_coords, read_hdf5, takeOnlyDirs, @@ -42,6 +41,7 @@ from ..analysis.z_score import compute_z_score from ..frontend.artifact_removal import ArtifactRemovalWidget from ..frontend.progress import writeToFile +from ..utils.utils import get_all_stores_for_combining_data from ..visualization.preprocessing import visualize_preprocessing logger = logging.getLogger(__name__) diff --git a/src/guppy/orchestration/psth.py b/src/guppy/orchestration/psth.py index 78acb1c..c372cbd 100755 --- a/src/guppy/orchestration/psth.py +++ b/src/guppy/orchestration/psth.py @@ -16,7 +16,6 @@ from ..analysis.compute_psth import compute_psth from ..analysis.cross_correlation import compute_cross_correlation from ..analysis.io_utils import ( - get_all_stores_for_combining_data, make_dir_for_cross_correlation, makeAverageDir, read_Df, @@ -35,7 +34,7 @@ write_peak_and_area_to_hdf5, ) from ..frontend.progress import writeToFile -from ..utils.utils import takeOnlyDirs +from ..utils.utils import get_all_stores_for_combining_data, takeOnlyDirs logger = logging.getLogger(__name__) diff --git a/src/guppy/orchestration/transients.py b/src/guppy/orchestration/transients.py index bac585b..4d899da 100755 --- a/src/guppy/orchestration/transients.py +++ b/src/guppy/orchestration/transients.py @@ -9,9 +9,7 @@ import numpy as np from ..analysis.io_utils import ( - get_all_stores_for_combining_data, read_hdf5, - takeOnlyDirs, ) from ..analysis.standard_io import ( read_transients_from_hdf5, @@ -22,6 +20,7 @@ from ..analysis.transients import analyze_transients from ..analysis.transients_average import averageForGroup from ..frontend.progress import writeToFile +from ..utils.utils import get_all_stores_for_combining_data, takeOnlyDirs from ..visualization.transients import visualize_peaks logger = logging.getLogger(__name__) diff --git a/src/guppy/utils/utils.py b/src/guppy/utils/utils.py index 4c4759f..91a8f5c 100644 --- a/src/guppy/utils/utils.py +++ b/src/guppy/utils/utils.py @@ -1,5 +1,8 @@ import logging import os +import re + +import numpy as np logger = logging.getLogger(__name__) @@ -10,3 +13,17 @@ def takeOnlyDirs(paths): if os.path.isfile(p): removePaths.append(p) return list(set(paths) - set(removePaths)) + + +def get_all_stores_for_combining_data(folderNames): + op = [] + for i in range(100): + temp = [] + match = r"[\s\S]*" + "_output_" + str(i) + for j in folderNames: + temp.append(re.findall(match, j)) + temp = sorted(list(np.concatenate(temp).flatten()), key=str.casefold) + if len(temp) > 0: + op.append(temp) + + return op diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 1383190..fdf89cd 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -18,8 +18,7 @@ from holoviews.operation.datashader import datashade from holoviews.plotting.util import process_cmap -from .orchestration.preprocess import get_all_stores_for_combining_data -from .utils.utils import takeOnlyDirs +from .utils.utils import get_all_stores_for_combining_data, takeOnlyDirs pn.extension() From 0e98e375b2b9fc3e0ebef91921a8632463c936a0 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 14:59:12 -0800 Subject: [PATCH 43/53] Moved scanPortsAndFind to frontend_utils --- src/guppy/frontend/frontend_utils.py | 19 +++++++++++++++++++ src/guppy/orchestration/storenames.py | 17 +---------------- src/guppy/visualizePlot.py | 17 +---------------- 3 files changed, 21 insertions(+), 32 deletions(-) create mode 100644 src/guppy/frontend/frontend_utils.py diff --git a/src/guppy/frontend/frontend_utils.py b/src/guppy/frontend/frontend_utils.py new file mode 100644 index 0000000..572e798 --- /dev/null +++ b/src/guppy/frontend/frontend_utils.py @@ -0,0 +1,19 @@ +import logging +import socket +from random import randint + +logger = logging.getLogger(__name__) + + +def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): + while True: + port = randint(start_port, end_port) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(0.001) # Set timeout to avoid long waiting on closed ports + result = sock.connect_ex((host, port)) + if result == 0: # If the connection is successful, the port is open + continue + else: + break + + return port diff --git a/src/guppy/orchestration/storenames.py b/src/guppy/orchestration/storenames.py index 39edc44..4a015d9 100755 --- a/src/guppy/orchestration/storenames.py +++ b/src/guppy/orchestration/storenames.py @@ -2,9 +2,7 @@ import json import logging import os -import socket from pathlib import Path -from random import randint import holoviews as hv # noqa: F401 import numpy as np @@ -16,6 +14,7 @@ NpmRecordingExtractor, TdtRecordingExtractor, ) +from guppy.frontend.frontend_utils import scanPortsAndFind from guppy.frontend.npm_gui_prompts import ( get_multi_event_responses, get_timestamp_configuration, @@ -32,20 +31,6 @@ logger = logging.getLogger(__name__) -def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): - while True: - port = randint(start_port, end_port) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(0.001) # Set timeout to avoid long waiting on closed ports - result = sock.connect_ex((host, port)) - if result == 0: # If the connection is successful, the port is open - continue - else: - break - - return port - - # function to show location for over-writing or creating a new stores list file. def show_dir(filepath): i = 1 diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index fdf89cd..9f4b1ab 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -3,8 +3,6 @@ import math import os import re -import socket -from random import randint import datashader as ds import holoviews as hv @@ -18,6 +16,7 @@ from holoviews.operation.datashader import datashade from holoviews.plotting.util import process_cmap +from .frontend.frontend_utils import scanPortsAndFind from .utils.utils import get_all_stores_for_combining_data, takeOnlyDirs pn.extension() @@ -25,20 +24,6 @@ logger = logging.getLogger(__name__) -def scanPortsAndFind(start_port=5000, end_port=5200, host="127.0.0.1"): - while True: - port = randint(start_port, end_port) - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(0.001) # Set timeout to avoid long waiting on closed ports - result = sock.connect_ex((host, port)) - if result == 0: # If the connection is successful, the port is open - continue - else: - break - - return port - - # read h5 file as a dataframe def read_Df(filepath, event, name): event = event.replace("\\", "_") From cb373952d1d10763c29e58e773bd2e9ee7b3bd99 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 15:05:38 -0800 Subject: [PATCH 44/53] Moved read_Df to utils.utils --- src/guppy/analysis/io_utils.py | 14 -------------- src/guppy/analysis/psth_average.py | 2 +- src/guppy/orchestration/psth.py | 3 +-- src/guppy/utils/utils.py | 14 ++++++++++++++ src/guppy/visualizePlot.py | 15 +-------------- 5 files changed, 17 insertions(+), 31 deletions(-) diff --git a/src/guppy/analysis/io_utils.py b/src/guppy/analysis/io_utils.py index 3dacd1d..97f7ecf 100644 --- a/src/guppy/analysis/io_utils.py +++ b/src/guppy/analysis/io_utils.py @@ -6,7 +6,6 @@ import h5py import numpy as np -import pandas as pd from ..utils.utils import takeOnlyDirs @@ -177,19 +176,6 @@ def get_control_and_signal_channel_names(storesList): return channels_arr -# function to read h5 file and make a dataframe from it -def read_Df(filepath, event, name): - event = event.replace("\\", "_") - event = event.replace("/", "_") - if name: - op = os.path.join(filepath, event + "_{}.h5".format(name)) - else: - op = os.path.join(filepath, event + ".h5") - df = pd.read_hdf(op, key="df", mode="r") - - return df - - def make_dir_for_cross_correlation(filepath): op = os.path.join(filepath, "cross_correlation_output") if not os.path.exists(op): diff --git a/src/guppy/analysis/psth_average.py b/src/guppy/analysis/psth_average.py index 664cc3d..5df8c87 100644 --- a/src/guppy/analysis/psth_average.py +++ b/src/guppy/analysis/psth_average.py @@ -10,10 +10,10 @@ from .io_utils import ( make_dir_for_cross_correlation, makeAverageDir, - read_Df, write_hdf5, ) from .psth_utils import create_Df_for_psth, getCorrCombinations +from ..utils.utils import read_Df logger = logging.getLogger(__name__) diff --git a/src/guppy/orchestration/psth.py b/src/guppy/orchestration/psth.py index c372cbd..e1d78de 100755 --- a/src/guppy/orchestration/psth.py +++ b/src/guppy/orchestration/psth.py @@ -18,7 +18,6 @@ from ..analysis.io_utils import ( make_dir_for_cross_correlation, makeAverageDir, - read_Df, read_hdf5, write_hdf5, ) @@ -34,7 +33,7 @@ write_peak_and_area_to_hdf5, ) from ..frontend.progress import writeToFile -from ..utils.utils import get_all_stores_for_combining_data, takeOnlyDirs +from ..utils.utils import get_all_stores_for_combining_data, read_Df, takeOnlyDirs logger = logging.getLogger(__name__) diff --git a/src/guppy/utils/utils.py b/src/guppy/utils/utils.py index 91a8f5c..7f7bb17 100644 --- a/src/guppy/utils/utils.py +++ b/src/guppy/utils/utils.py @@ -3,6 +3,7 @@ import re import numpy as np +import pandas as pd logger = logging.getLogger(__name__) @@ -27,3 +28,16 @@ def get_all_stores_for_combining_data(folderNames): op.append(temp) return op + + +# function to read h5 file and make a dataframe from it +def read_Df(filepath, event, name): + event = event.replace("\\", "_") + event = event.replace("/", "_") + if name: + op = os.path.join(filepath, event + "_{}.h5".format(name)) + else: + op = os.path.join(filepath, event + ".h5") + df = pd.read_hdf(op, key="df", mode="r") + + return df diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 9f4b1ab..a887d97 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -17,26 +17,13 @@ from holoviews.plotting.util import process_cmap from .frontend.frontend_utils import scanPortsAndFind -from .utils.utils import get_all_stores_for_combining_data, takeOnlyDirs +from .utils.utils import get_all_stores_for_combining_data, read_Df, takeOnlyDirs pn.extension() logger = logging.getLogger(__name__) -# read h5 file as a dataframe -def read_Df(filepath, event, name): - event = event.replace("\\", "_") - event = event.replace("/", "_") - if name: - op = os.path.join(filepath, event + "_{}.h5".format(name)) - else: - op = os.path.join(filepath, event + ".h5") - df = pd.read_hdf(op, key="df", mode="r") - - return df - - # make a new directory for saving plots def make_dir(filepath): op = os.path.join(filepath, "saved_plots") From 561e074d655bb9d170d1980b9fa9fad267936fed Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 5 Feb 2026 15:07:08 -0800 Subject: [PATCH 45/53] removed comment --- src/guppy/visualizePlot.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index a887d97..d0608c6 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -43,9 +43,6 @@ def remove_cols(cols): return cols -# def look_psth_bins(event, name): - - # helper function to create plots def helper_plots(filepath, event, name, inputParameters): From 6e693f9b84016c92ac2c1cecbdd976c328ccb32f Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 6 Feb 2026 09:23:41 -0800 Subject: [PATCH 46/53] Refactored Viewer class into a dedicated module. --- src/guppy/frontend/visualization.py | 559 +++++++++++++++++++++++++ src/guppy/visualizePlot.py | 621 ++-------------------------- 2 files changed, 604 insertions(+), 576 deletions(-) create mode 100644 src/guppy/frontend/visualization.py diff --git a/src/guppy/frontend/visualization.py b/src/guppy/frontend/visualization.py new file mode 100644 index 0000000..9f8f41b --- /dev/null +++ b/src/guppy/frontend/visualization.py @@ -0,0 +1,559 @@ +import logging +import math +import os +import re + +import datashader as ds +import holoviews as hv +import numpy as np +import pandas as pd +import panel as pn +import param +from bokeh.io import export_png, export_svgs +from holoviews import opts +from holoviews.operation.datashader import datashade +from holoviews.plotting.util import process_cmap + +pn.extension() + +logger = logging.getLogger(__name__) + + +# remove unnecessary column names +def remove_cols(cols): + regex = re.compile("bin_err_*") + remove_cols = [cols[i] for i in range(len(cols)) if regex.match(cols[i])] + remove_cols = remove_cols + ["err", "timestamps"] + cols = [i for i in cols if i not in remove_cols] + + return cols + + +# make a new directory for saving plots +def make_dir(filepath): + op = os.path.join(filepath, "saved_plots") + if not os.path.exists(op): + os.mkdir(op) + + return op + + +# create a class to make GUI and plot different graphs +class Viewer(param.Parameterized): + filepath = param.Path(default=None) + # create different options and selectors + event_selector = param.ObjectSelector(default=None) + event_selector_heatmap = param.ObjectSelector(default=None) + selector_for_multipe_events_plot = param.ListSelector(default=None) + columns_dict = param.Dict(default=None) + df_new = param.DataFrame(default=None) + x_min = param.Number(default=None) + x_max = param.Number(default=None) + select_trials_checkbox = param.ListSelector(default=["just trials"], objects=["mean", "just trials"]) + Y_Label = param.ObjectSelector(default="y", objects=["y", "z-score", "\u0394F/F"]) + save_options = param.ObjectSelector( + default="None", objects=["None", "save_png_format", "save_svg_format", "save_both_format"] + ) + save_options_heatmap = param.ObjectSelector( + default="None", objects=["None", "save_png_format", "save_svg_format", "save_both_format"] + ) + color_map = param.ObjectSelector(default="plasma") + height_heatmap = param.ObjectSelector(default=600, objects=list(np.arange(0, 5100, 100))[1:]) + width_heatmap = param.ObjectSelector(default=1000, objects=list(np.arange(0, 5100, 100))[1:]) + Height_Plot = param.ObjectSelector(default=300, objects=list(np.arange(0, 5100, 100))[1:]) + Width_Plot = param.ObjectSelector(default=1000, objects=list(np.arange(0, 5100, 100))[1:]) + save_hm = param.Action(lambda x: x.param.trigger("save_hm"), label="Save") + save_psth = param.Action(lambda x: x.param.trigger("save_psth"), label="Save") + X_Limit = param.Range(default=(-5, 10)) + Y_Limit = param.Range(bounds=(-50, 50.0)) + + x = param.ObjectSelector(default=None) + y = param.ObjectSelector(default=None) + heatmap_y = param.ListSelector(default=None) + psth_y = param.ListSelector(default=None) + results_hm = dict() + results_psth = dict() + + def __init__(self, **params): + super().__init__(**params) + self.param.X_Limit.bounds = (self.x_min, self.x_max) + + # function to save heatmaps when save button on heatmap tab is clicked + @param.depends("save_hm", watch=True) + def save_hm_plots(self): + plot = self.results_hm["plot"] + op = self.results_hm["op"] + save_opts = self.save_options_heatmap + logger.info(save_opts) + if save_opts == "save_svg_format": + p = hv.render(plot, backend="bokeh") + p.output_backend = "svg" + export_svgs(p, filename=op + ".svg") + elif save_opts == "save_png_format": + p = hv.render(plot, backend="bokeh") + export_png(p, filename=op + ".png") + elif save_opts == "save_both_format": + p = hv.render(plot, backend="bokeh") + p.output_backend = "svg" + export_svgs(p, filename=op + ".svg") + p_png = hv.render(plot, backend="bokeh") + export_png(p_png, filename=op + ".png") + else: + return 0 + + # function to save PSTH plots when save button on PSTH tab is clicked + @param.depends("save_psth", watch=True) + def save_psth_plot(self): + plot, op = [], [] + plot.append(self.results_psth["plot_combine"]) + op.append(self.results_psth["op_combine"]) + plot.append(self.results_psth["plot"]) + op.append(self.results_psth["op"]) + for i in range(len(plot)): + temp_plot, temp_op = plot[i], op[i] + save_opts = self.save_options + if save_opts == "save_svg_format": + p = hv.render(temp_plot, backend="bokeh") + p.output_backend = "svg" + export_svgs(p, filename=temp_op + ".svg") + elif save_opts == "save_png_format": + p = hv.render(temp_plot, backend="bokeh") + export_png(p, filename=temp_op + ".png") + elif save_opts == "save_both_format": + p = hv.render(temp_plot, backend="bokeh") + p.output_backend = "svg" + export_svgs(p, filename=temp_op + ".svg") + p_png = hv.render(temp_plot, backend="bokeh") + export_png(p_png, filename=temp_op + ".png") + else: + return 0 + + # function to change Y values based on event selection + @param.depends("event_selector", watch=True) + def _update_x_y(self): + x_value = self.columns[self.event_selector] + y_value = self.columns[self.event_selector] + self.param["x"].objects = [x_value[-4]] + self.param["y"].objects = remove_cols(y_value) + self.x = x_value[-4] + self.y = self.param["y"].objects[-2] + + @param.depends("event_selector_heatmap", watch=True) + def _update_df(self): + cols = self.columns[self.event_selector_heatmap] + trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) + trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] + ["All"] + self.param["heatmap_y"].objects = trial_ts + self.heatmap_y = [trial_ts[-1]] + + @param.depends("event_selector", watch=True) + def _update_psth_y(self): + cols = self.columns[self.event_selector] + trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) + trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] + self.param["psth_y"].objects = trial_ts + self.psth_y = [trial_ts[0]] + + # function to plot multiple PSTHs into one plot + + @param.depends( + "selector_for_multipe_events_plot", + "Y_Label", + "save_options", + "X_Limit", + "Y_Limit", + "Height_Plot", + "Width_Plot", + ) + def update_selector(self): + data_curve, cols_curve, data_spread, cols_spread = [], [], [], [] + arr = self.selector_for_multipe_events_plot + df1 = self.df_new + for i in range(len(arr)): + if "bin" in arr[i]: + split = arr[i].rsplit("_", 2) + df_name = split[0] #'{}_{}'.format(split[0], split[1]) + col_name_mean = "{}_{}".format(split[-2], split[-1]) + col_name_err = "{}_err_{}".format(split[-2], split[-1]) + data_curve.append(df1[df_name][col_name_mean]) + cols_curve.append(arr[i]) + data_spread.append(df1[df_name][col_name_err]) + cols_spread.append(arr[i]) + else: + data_curve.append(df1[arr[i]]["mean"]) + cols_curve.append(arr[i] + "_" + "mean") + data_spread.append(df1[arr[i]]["err"]) + cols_spread.append(arr[i] + "_" + "mean") + + if len(arr) > 0: + if self.Y_Limit == None: + self.Y_Limit = (np.nanmin(np.asarray(data_curve)) - 0.5, np.nanmax(np.asarray(data_curve)) + 0.5) + + if "bin" in arr[i]: + split = arr[i].rsplit("_", 2) + df_name = split[0] + data_curve.append(df1[df_name]["timestamps"]) + cols_curve.append("timestamps") + data_spread.append(df1[df_name]["timestamps"]) + cols_spread.append("timestamps") + else: + data_curve.append(df1[arr[i]]["timestamps"]) + cols_curve.append("timestamps") + data_spread.append(df1[arr[i]]["timestamps"]) + cols_spread.append("timestamps") + df_curve = pd.concat(data_curve, axis=1) + df_spread = pd.concat(data_spread, axis=1) + df_curve.columns = cols_curve + df_spread.columns = cols_spread + + ts = df_curve["timestamps"] + index = np.arange(0, ts.shape[0], 3) + df_curve = df_curve.loc[index, :] + df_spread = df_spread.loc[index, :] + overlay = hv.NdOverlay( + { + c: hv.Curve((df_curve["timestamps"], df_curve[c]), kdims=["Time (s)"]).opts( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + ) + for c in cols_curve[:-1] + } + ) + spread = hv.NdOverlay( + { + d: hv.Spread( + (df_spread["timestamps"], df_curve[d], df_spread[d], df_spread[d]), + vdims=["y", "yerrpos", "yerrneg"], + ).opts(line_width=0, fill_alpha=0.3) + for d in cols_spread[:-1] + } + ) + plot_combine = ((overlay * spread).opts(opts.NdOverlay(xlabel="Time (s)", ylabel=self.Y_Label))).opts( + shared_axes=False + ) + # plot_err = new_df.hvplot.area(x='timestamps', y=[], y2=[]) + save_opts = self.save_options + op = make_dir(self.filepath) + op_filename = os.path.join(op, str(arr) + "_mean") + + self.results_psth["plot_combine"] = plot_combine + self.results_psth["op_combine"] = op_filename + # self.save_plots(plot_combine, save_opts, op_filename) + return plot_combine + + # function to plot mean PSTH, single trial in PSTH and all the trials of PSTH with mean + @param.depends( + "event_selector", "x", "y", "Y_Label", "save_options", "Y_Limit", "X_Limit", "Height_Plot", "Width_Plot" + ) + def contPlot(self): + print(f"{self.event_selector = }") + print(f"{self.df_new = }") + df1 = self.df_new[self.event_selector] + print(f"{df1.columns = }") + # height = self.Heigth_Plot + # width = self.Width_Plot + # logger.info(height, width) + if self.y == "All": + if self.Y_Limit == None: + self.Y_Limit = (np.nanmin(np.asarray(df1)) - 0.5, np.nanmax(np.asarray(df1)) - 0.5) + + options = self.param["y"].objects + regex = re.compile("bin_[(]") + remove_bin_trials = [options[i] for i in range(len(options)) if not regex.match(options[i])] + + ndoverlay = hv.NdOverlay({c: hv.Curve((df1[self.x], df1[c])) for c in remove_bin_trials[:-2]}) + img1 = datashade(ndoverlay, normalization="linear", aggregator=ds.count()) + x_points = df1[self.x] + y_points = df1["mean"] + img2 = hv.Curve((x_points, y_points)) + img = (img1 * img2).opts( + opts.Curve( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + line_width=4, + color="black", + xlim=self.X_Limit, + ylim=self.Y_Limit, + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + ) + + save_opts = self.save_options + + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector + "_" + self.y) + self.results_psth["plot"] = img + self.results_psth["op"] = op_filename + # self.save_plots(img, save_opts, op_filename) + + return img + + elif self.y == "mean" or "bin" in self.y: + + xpoints = df1[self.x] + ypoints = df1[self.y] + if self.y == "mean": + err = df1["err"] + else: + split = self.y.split("_") + err = df1["{}_err_{}".format(split[0], split[1])] + + index = np.arange(0, xpoints.shape[0], 3) + + if self.Y_Limit == None: + self.Y_Limit = (np.nanmin(ypoints) - 0.5, np.nanmax(ypoints) + 0.5) + + ropts_curve = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + color="blue", + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + ropts_spread = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + fill_alpha=0.3, + fill_color="blue", + line_width=0, + ) + + plot_curve = hv.Curve((xpoints[index], ypoints[index])) # .opts(**ropts_curve) + plot_spread = hv.Spread( + (xpoints[index], ypoints[index], err[index], err[index]) + ) # .opts(**ropts_spread) #vdims=['y', 'yerrpos', 'yerrneg'] + plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) + + save_opts = self.save_options + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector + "_" + self.y) + self.results_psth["plot"] = plot + self.results_psth["op"] = op_filename + # self.save_plots(plot, save_opts, op_filename) + + return plot + + else: + print(f"{self.x = }") + xpoints = df1[self.x] + ypoints = df1[self.y] + if self.Y_Limit == None: + self.Y_Limit = (np.nanmin(ypoints) - 0.5, np.nanmax(ypoints) + 0.5) + + ropts_curve = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + color="blue", + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + plot = hv.Curve((xpoints, ypoints)).opts({"Curve": ropts_curve}) + + save_opts = self.save_options + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector + "_" + self.y) + self.results_psth["plot"] = plot + self.results_psth["op"] = op_filename + # self.save_plots(plot, save_opts, op_filename) + + return plot + + # function to plot specific PSTH trials + @param.depends( + "event_selector", + "x", + "psth_y", + "select_trials_checkbox", + "Y_Label", + "save_options", + "Y_Limit", + "X_Limit", + "Height_Plot", + "Width_Plot", + ) + def plot_specific_trials(self): + df_psth = self.df_new[self.event_selector] + # if self.Y_Limit==None: + # self.Y_Limit = (np.nanmin(ypoints)-0.5, np.nanmax(ypoints)+0.5) + + if self.psth_y == None: + return None + else: + selected_trials = [s.split(" - ")[1] for s in list(self.psth_y)] + + index = np.arange(0, df_psth["timestamps"].shape[0], 3) + + if self.select_trials_checkbox == ["just trials"]: + overlay = hv.NdOverlay( + { + c: hv.Curve((df_psth["timestamps"][index], df_psth[c][index]), kdims=["Time (s)"]) + for c in selected_trials + } + ) + ropts = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + return overlay.opts(**ropts) + elif self.select_trials_checkbox == ["mean"]: + arr = np.asarray(df_psth[selected_trials]) + mean = np.nanmean(arr, axis=1) + err = np.nanstd(arr, axis=1) / math.sqrt(arr.shape[1]) + ropts_curve = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + color="blue", + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + ropts_spread = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + fill_alpha=0.3, + fill_color="blue", + line_width=0, + ) + plot_curve = hv.Curve((df_psth["timestamps"][index], mean[index])) + plot_spread = hv.Spread((df_psth["timestamps"][index], mean[index], err[index], err[index])) + plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) + return plot + elif self.select_trials_checkbox == ["mean", "just trials"]: + overlay = hv.NdOverlay( + { + c: hv.Curve((df_psth["timestamps"][index], df_psth[c][index]), kdims=["Time (s)"]) + for c in selected_trials + } + ) + ropts_overlay = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + + arr = np.asarray(df_psth[selected_trials]) + mean = np.nanmean(arr, axis=1) + err = np.nanstd(arr, axis=1) / math.sqrt(arr.shape[1]) + ropts_curve = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + color="black", + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + ropts_spread = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + fill_alpha=0.3, + fill_color="black", + line_width=0, + ) + plot_curve = hv.Curve((df_psth["timestamps"][index], mean[index])) + plot_spread = hv.Spread((df_psth["timestamps"][index], mean[index], err[index], err[index])) + + plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) + return overlay.opts(**ropts_overlay) * plot + + # function to show heatmaps for each event + @param.depends("event_selector_heatmap", "color_map", "height_heatmap", "width_heatmap", "heatmap_y") + def heatmap(self): + height = self.height_heatmap + width = self.width_heatmap + df_hm = self.df_new[self.event_selector_heatmap] + cols = list(df_hm.columns) + regex = re.compile("bin_err_*") + drop_cols = [cols[i] for i in range(len(cols)) if regex.match(cols[i])] + drop_cols = ["err", "mean"] + drop_cols + df_hm = df_hm.drop(drop_cols, axis=1) + cols = list(df_hm.columns) + bin_cols = [cols[i] for i in range(len(cols)) if re.compile("bin_*").match(cols[i])] + time = np.asarray(df_hm["timestamps"]) + event_ts_for_each_event = np.arange(1, len(df_hm.columns[:-1]) + 1) + yticks = list(event_ts_for_each_event) + z_score = np.asarray(df_hm[df_hm.columns[:-1]]).T + + if self.heatmap_y[0] == "All": + indices = np.arange(z_score.shape[0] - len(bin_cols)) + z_score = z_score[indices, :] + event_ts_for_each_event = np.arange(1, z_score.shape[0] + 1) + yticks = list(event_ts_for_each_event) + else: + remove_all = list(set(self.heatmap_y) - set(["All"])) + indices = sorted([int(s.split("-")[0]) - 1 for s in remove_all]) + z_score = z_score[indices, :] + event_ts_for_each_event = np.arange(1, z_score.shape[0] + 1) + yticks = list(event_ts_for_each_event) + + clim = (np.nanmin(z_score), np.nanmax(z_score)) + font_size = {"labels": 16, "yticks": 6} + + if event_ts_for_each_event.shape[0] == 1: + dummy_image = hv.QuadMesh((time, event_ts_for_each_event, z_score)).opts(colorbar=True, clim=clim) + image = ( + (dummy_image).opts( + opts.QuadMesh( + width=int(width), + height=int(height), + cmap=process_cmap(self.color_map, provider="matplotlib"), + colorbar=True, + ylabel="Trials", + xlabel="Time (s)", + fontsize=font_size, + yticks=yticks, + ) + ) + ).opts(shared_axes=False) + + save_opts = self.save_options_heatmap + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector_heatmap + "_" + "heatmap") + self.results_hm["plot"] = image + self.results_hm["op"] = op_filename + # self.save_plots(image, save_opts, op_filename) + return image + else: + ropts = dict( + width=int(width), + height=int(height), + ylabel="Trials", + xlabel="Time (s)", + fontsize=font_size, + yticks=yticks, + invert_yaxis=True, + ) + dummy_image = hv.QuadMesh((time[0:100], event_ts_for_each_event, z_score[:, 0:100])).opts( + colorbar=True, cmap=process_cmap(self.color_map, provider="matplotlib"), clim=clim + ) + actual_image = hv.QuadMesh((time, event_ts_for_each_event, z_score)) + + dynspread_img = datashade(actual_image, cmap=process_cmap(self.color_map, provider="matplotlib")).opts( + **ropts + ) # clims=self.C_Limit, cnorm='log' + image = ((dummy_image * dynspread_img).opts(opts.QuadMesh(width=int(width), height=int(height)))).opts( + shared_axes=False + ) + + save_opts = self.save_options_heatmap + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector_heatmap + "_" + "heatmap") + self.results_hm["plot"] = image + self.results_hm["op"] = op_filename + + return image diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index d0608c6..ed37f64 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -1,22 +1,15 @@ import glob import logging -import math import os import re -import datashader as ds -import holoviews as hv import matplotlib.pyplot as plt import numpy as np import pandas as pd import panel as pn -import param -from bokeh.io import export_png, export_svgs -from holoviews import opts -from holoviews.operation.datashader import datashade -from holoviews.plotting.util import process_cmap from .frontend.frontend_utils import scanPortsAndFind +from .frontend.visualization import Viewer, remove_cols from .utils.utils import get_all_stores_for_combining_data, read_Df, takeOnlyDirs pn.extension() @@ -24,25 +17,6 @@ logger = logging.getLogger(__name__) -# make a new directory for saving plots -def make_dir(filepath): - op = os.path.join(filepath, "saved_plots") - if not os.path.exists(op): - os.mkdir(op) - - return op - - -# remove unnecessary column names -def remove_cols(cols): - regex = re.compile("bin_err_*") - remove_cols = [cols[i] for i in range(len(cols)) if regex.match(cols[i])] - remove_cols = remove_cols + ["err", "timestamps"] - cols = [i for i in cols if i not in remove_cols] - - return cols - - # helper function to create plots def helper_plots(filepath, event, name, inputParameters): @@ -114,556 +88,51 @@ def helper_plots(filepath, event, name, inputParameters): columns.append("All") columns_dict[new_event[i]] = columns - # create a class to make GUI and plot different graphs - class Viewer(param.Parameterized): - - # class_event = new_event - - # make options array for different selectors - multiple_plots_options = [] - heatmap_options = new_event - bins_keys = list(bins.keys()) - if len(bins_keys) > 0: - bins_new = bins - for i in range(len(bins_keys)): - arr = bins[bins_keys[i]] - if len(arr) > 0: - # heatmap_options.append('{}_bin'.format(bins_keys[i])) - for j in arr: - multiple_plots_options.append("{}_{}".format(bins_keys[i], j)) - - multiple_plots_options = new_event + multiple_plots_options - else: - multiple_plots_options = new_event - - # create different options and selectors - event_selector = param.ObjectSelector(default=new_event[0], objects=new_event) - event_selector_heatmap = param.ObjectSelector(default=heatmap_options[0], objects=heatmap_options) - columns = columns_dict - df_new = df - - colormaps = plt.colormaps() - new_colormaps = ["plasma", "plasma_r", "magma", "magma_r", "inferno", "inferno_r", "viridis", "viridis_r"] - set_a = set(colormaps) - set_b = set(new_colormaps) - colormaps = new_colormaps + list(set_a.difference(set_b)) - - x_min = float(inputParameters["nSecPrev"]) - 20 - x_max = float(inputParameters["nSecPost"]) + 20 - selector_for_multipe_events_plot = param.ListSelector( - default=[multiple_plots_options[0]], objects=multiple_plots_options - ) - x = param.ObjectSelector(default=columns[new_event[0]][-4], objects=[columns[new_event[0]][-4]]) - y = param.ObjectSelector( - default=remove_cols(columns[new_event[0]])[-2], objects=remove_cols(columns[new_event[0]]) - ) - - trial_no = range(1, len(remove_cols(columns[heatmap_options[0]])[:-2]) + 1) - trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(columns[heatmap_options[0]])[:-2])] + [ - "All" - ] - heatmap_y = param.ListSelector(default=[trial_ts[-1]], objects=trial_ts) - psth_y = param.ListSelector(objects=trial_ts[:-1]) - select_trials_checkbox = param.ListSelector(default=["just trials"], objects=["mean", "just trials"]) - Y_Label = param.ObjectSelector(default="y", objects=["y", "z-score", "\u0394F/F"]) - save_options = param.ObjectSelector( - default="None", objects=["None", "save_png_format", "save_svg_format", "save_both_format"] - ) - save_options_heatmap = param.ObjectSelector( - default="None", objects=["None", "save_png_format", "save_svg_format", "save_both_format"] - ) - color_map = param.ObjectSelector(default="plasma", objects=colormaps) - height_heatmap = param.ObjectSelector(default=600, objects=list(np.arange(0, 5100, 100))[1:]) - width_heatmap = param.ObjectSelector(default=1000, objects=list(np.arange(0, 5100, 100))[1:]) - Height_Plot = param.ObjectSelector(default=300, objects=list(np.arange(0, 5100, 100))[1:]) - Width_Plot = param.ObjectSelector(default=1000, objects=list(np.arange(0, 5100, 100))[1:]) - save_hm = param.Action(lambda x: x.param.trigger("save_hm"), label="Save") - save_psth = param.Action(lambda x: x.param.trigger("save_psth"), label="Save") - X_Limit = param.Range(default=(-5, 10), bounds=(x_min, x_max)) - Y_Limit = param.Range(bounds=(-50, 50.0)) - - # C_Limit = param.Range(bounds=(-20,20.0)) - - results_hm = dict() - results_psth = dict() - - # function to save heatmaps when save button on heatmap tab is clicked - @param.depends("save_hm", watch=True) - def save_hm_plots(self): - plot = self.results_hm["plot"] - op = self.results_hm["op"] - save_opts = self.save_options_heatmap - logger.info(save_opts) - if save_opts == "save_svg_format": - p = hv.render(plot, backend="bokeh") - p.output_backend = "svg" - export_svgs(p, filename=op + ".svg") - elif save_opts == "save_png_format": - p = hv.render(plot, backend="bokeh") - export_png(p, filename=op + ".png") - elif save_opts == "save_both_format": - p = hv.render(plot, backend="bokeh") - p.output_backend = "svg" - export_svgs(p, filename=op + ".svg") - p_png = hv.render(plot, backend="bokeh") - export_png(p_png, filename=op + ".png") - else: - return 0 - - # function to save PSTH plots when save button on PSTH tab is clicked - @param.depends("save_psth", watch=True) - def save_psth_plot(self): - plot, op = [], [] - plot.append(self.results_psth["plot_combine"]) - op.append(self.results_psth["op_combine"]) - plot.append(self.results_psth["plot"]) - op.append(self.results_psth["op"]) - for i in range(len(plot)): - temp_plot, temp_op = plot[i], op[i] - save_opts = self.save_options - if save_opts == "save_svg_format": - p = hv.render(temp_plot, backend="bokeh") - p.output_backend = "svg" - export_svgs(p, filename=temp_op + ".svg") - elif save_opts == "save_png_format": - p = hv.render(temp_plot, backend="bokeh") - export_png(p, filename=temp_op + ".png") - elif save_opts == "save_both_format": - p = hv.render(temp_plot, backend="bokeh") - p.output_backend = "svg" - export_svgs(p, filename=temp_op + ".svg") - p_png = hv.render(temp_plot, backend="bokeh") - export_png(p_png, filename=temp_op + ".png") - else: - return 0 - - # function to change Y values based on event selection - @param.depends("event_selector", watch=True) - def _update_x_y(self): - x_value = self.columns[self.event_selector] - y_value = self.columns[self.event_selector] - self.param["x"].objects = [x_value[-4]] - self.param["y"].objects = remove_cols(y_value) - self.x = x_value[-4] - self.y = self.param["y"].objects[-2] - - @param.depends("event_selector_heatmap", watch=True) - def _update_df(self): - cols = self.columns[self.event_selector_heatmap] - trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) - trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] + ["All"] - self.param["heatmap_y"].objects = trial_ts - self.heatmap_y = [trial_ts[-1]] - - @param.depends("event_selector", watch=True) - def _update_psth_y(self): - cols = self.columns[self.event_selector] - trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) - trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] - self.param["psth_y"].objects = trial_ts - self.psth_y = [trial_ts[0]] - - # function to plot multiple PSTHs into one plot - - @param.depends( - "selector_for_multipe_events_plot", - "Y_Label", - "save_options", - "X_Limit", - "Y_Limit", - "Height_Plot", - "Width_Plot", - ) - def update_selector(self): - data_curve, cols_curve, data_spread, cols_spread = [], [], [], [] - arr = self.selector_for_multipe_events_plot - df1 = self.df_new - for i in range(len(arr)): - if "bin" in arr[i]: - split = arr[i].rsplit("_", 2) - df_name = split[0] #'{}_{}'.format(split[0], split[1]) - col_name_mean = "{}_{}".format(split[-2], split[-1]) - col_name_err = "{}_err_{}".format(split[-2], split[-1]) - data_curve.append(df1[df_name][col_name_mean]) - cols_curve.append(arr[i]) - data_spread.append(df1[df_name][col_name_err]) - cols_spread.append(arr[i]) - else: - data_curve.append(df1[arr[i]]["mean"]) - cols_curve.append(arr[i] + "_" + "mean") - data_spread.append(df1[arr[i]]["err"]) - cols_spread.append(arr[i] + "_" + "mean") - + # make options array for different selectors + multiple_plots_options = [] + heatmap_options = new_event + bins_keys = list(bins.keys()) + if len(bins_keys) > 0: + bins_new = bins + for i in range(len(bins_keys)): + arr = bins[bins_keys[i]] if len(arr) > 0: - if self.Y_Limit == None: - self.Y_Limit = (np.nanmin(np.asarray(data_curve)) - 0.5, np.nanmax(np.asarray(data_curve)) + 0.5) - - if "bin" in arr[i]: - split = arr[i].rsplit("_", 2) - df_name = split[0] - data_curve.append(df1[df_name]["timestamps"]) - cols_curve.append("timestamps") - data_spread.append(df1[df_name]["timestamps"]) - cols_spread.append("timestamps") - else: - data_curve.append(df1[arr[i]]["timestamps"]) - cols_curve.append("timestamps") - data_spread.append(df1[arr[i]]["timestamps"]) - cols_spread.append("timestamps") - df_curve = pd.concat(data_curve, axis=1) - df_spread = pd.concat(data_spread, axis=1) - df_curve.columns = cols_curve - df_spread.columns = cols_spread - - ts = df_curve["timestamps"] - index = np.arange(0, ts.shape[0], 3) - df_curve = df_curve.loc[index, :] - df_spread = df_spread.loc[index, :] - overlay = hv.NdOverlay( - { - c: hv.Curve((df_curve["timestamps"], df_curve[c]), kdims=["Time (s)"]).opts( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - ) - for c in cols_curve[:-1] - } - ) - spread = hv.NdOverlay( - { - d: hv.Spread( - (df_spread["timestamps"], df_curve[d], df_spread[d], df_spread[d]), - vdims=["y", "yerrpos", "yerrneg"], - ).opts(line_width=0, fill_alpha=0.3) - for d in cols_spread[:-1] - } - ) - plot_combine = ((overlay * spread).opts(opts.NdOverlay(xlabel="Time (s)", ylabel=self.Y_Label))).opts( - shared_axes=False - ) - # plot_err = new_df.hvplot.area(x='timestamps', y=[], y2=[]) - save_opts = self.save_options - op = make_dir(filepath) - op_filename = os.path.join(op, str(arr) + "_mean") - - self.results_psth["plot_combine"] = plot_combine - self.results_psth["op_combine"] = op_filename - # self.save_plots(plot_combine, save_opts, op_filename) - return plot_combine - - # function to plot mean PSTH, single trial in PSTH and all the trials of PSTH with mean - @param.depends( - "event_selector", "x", "y", "Y_Label", "save_options", "Y_Limit", "X_Limit", "Height_Plot", "Width_Plot" - ) - def contPlot(self): - df1 = self.df_new[self.event_selector] - # height = self.Heigth_Plot - # width = self.Width_Plot - # logger.info(height, width) - if self.y == "All": - if self.Y_Limit == None: - self.Y_Limit = (np.nanmin(np.asarray(df1)) - 0.5, np.nanmax(np.asarray(df1)) - 0.5) - - options = self.param["y"].objects - regex = re.compile("bin_[(]") - remove_bin_trials = [options[i] for i in range(len(options)) if not regex.match(options[i])] - - ndoverlay = hv.NdOverlay({c: hv.Curve((df1[self.x], df1[c])) for c in remove_bin_trials[:-2]}) - img1 = datashade(ndoverlay, normalization="linear", aggregator=ds.count()) - x_points = df1[self.x] - y_points = df1["mean"] - img2 = hv.Curve((x_points, y_points)) - img = (img1 * img2).opts( - opts.Curve( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - line_width=4, - color="black", - xlim=self.X_Limit, - ylim=self.Y_Limit, - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - ) - - save_opts = self.save_options - - op = make_dir(filepath) - op_filename = os.path.join(op, self.event_selector + "_" + self.y) - self.results_psth["plot"] = img - self.results_psth["op"] = op_filename - # self.save_plots(img, save_opts, op_filename) - - return img - - elif self.y == "mean" or "bin" in self.y: - - xpoints = df1[self.x] - ypoints = df1[self.y] - if self.y == "mean": - err = df1["err"] - else: - split = self.y.split("_") - err = df1["{}_err_{}".format(split[0], split[1])] - - index = np.arange(0, xpoints.shape[0], 3) - - if self.Y_Limit == None: - self.Y_Limit = (np.nanmin(ypoints) - 0.5, np.nanmax(ypoints) + 0.5) - - ropts_curve = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - color="blue", - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - ropts_spread = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - fill_alpha=0.3, - fill_color="blue", - line_width=0, - ) - - plot_curve = hv.Curve((xpoints[index], ypoints[index])) # .opts(**ropts_curve) - plot_spread = hv.Spread( - (xpoints[index], ypoints[index], err[index], err[index]) - ) # .opts(**ropts_spread) #vdims=['y', 'yerrpos', 'yerrneg'] - plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) - - save_opts = self.save_options - op = make_dir(filepath) - op_filename = os.path.join(op, self.event_selector + "_" + self.y) - self.results_psth["plot"] = plot - self.results_psth["op"] = op_filename - # self.save_plots(plot, save_opts, op_filename) - - return plot - - else: - xpoints = df1[self.x] - ypoints = df1[self.y] - if self.Y_Limit == None: - self.Y_Limit = (np.nanmin(ypoints) - 0.5, np.nanmax(ypoints) + 0.5) - - ropts_curve = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - color="blue", - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - plot = hv.Curve((xpoints, ypoints)).opts({"Curve": ropts_curve}) - - save_opts = self.save_options - op = make_dir(filepath) - op_filename = os.path.join(op, self.event_selector + "_" + self.y) - self.results_psth["plot"] = plot - self.results_psth["op"] = op_filename - # self.save_plots(plot, save_opts, op_filename) - - return plot - - # function to plot specific PSTH trials - @param.depends( - "event_selector", - "x", - "psth_y", - "select_trials_checkbox", - "Y_Label", - "save_options", - "Y_Limit", - "X_Limit", - "Height_Plot", - "Width_Plot", - ) - def plot_specific_trials(self): - df_psth = self.df_new[self.event_selector] - # if self.Y_Limit==None: - # self.Y_Limit = (np.nanmin(ypoints)-0.5, np.nanmax(ypoints)+0.5) - - if self.psth_y == None: - return None - else: - selected_trials = [s.split(" - ")[1] for s in list(self.psth_y)] - - index = np.arange(0, df_psth["timestamps"].shape[0], 3) - - if self.select_trials_checkbox == ["just trials"]: - overlay = hv.NdOverlay( - { - c: hv.Curve((df_psth["timestamps"][index], df_psth[c][index]), kdims=["Time (s)"]) - for c in selected_trials - } - ) - ropts = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - return overlay.opts(**ropts) - elif self.select_trials_checkbox == ["mean"]: - arr = np.asarray(df_psth[selected_trials]) - mean = np.nanmean(arr, axis=1) - err = np.nanstd(arr, axis=1) / math.sqrt(arr.shape[1]) - ropts_curve = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - color="blue", - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - ropts_spread = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - fill_alpha=0.3, - fill_color="blue", - line_width=0, - ) - plot_curve = hv.Curve((df_psth["timestamps"][index], mean[index])) - plot_spread = hv.Spread((df_psth["timestamps"][index], mean[index], err[index], err[index])) - plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) - return plot - elif self.select_trials_checkbox == ["mean", "just trials"]: - overlay = hv.NdOverlay( - { - c: hv.Curve((df_psth["timestamps"][index], df_psth[c][index]), kdims=["Time (s)"]) - for c in selected_trials - } - ) - ropts_overlay = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - - arr = np.asarray(df_psth[selected_trials]) - mean = np.nanmean(arr, axis=1) - err = np.nanstd(arr, axis=1) / math.sqrt(arr.shape[1]) - ropts_curve = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - color="black", - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - ropts_spread = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - fill_alpha=0.3, - fill_color="black", - line_width=0, - ) - plot_curve = hv.Curve((df_psth["timestamps"][index], mean[index])) - plot_spread = hv.Spread((df_psth["timestamps"][index], mean[index], err[index], err[index])) - - plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) - return overlay.opts(**ropts_overlay) * plot - - # function to show heatmaps for each event - @param.depends("event_selector_heatmap", "color_map", "height_heatmap", "width_heatmap", "heatmap_y") - def heatmap(self): - height = self.height_heatmap - width = self.width_heatmap - df_hm = self.df_new[self.event_selector_heatmap] - cols = list(df_hm.columns) - regex = re.compile("bin_err_*") - drop_cols = [cols[i] for i in range(len(cols)) if regex.match(cols[i])] - drop_cols = ["err", "mean"] + drop_cols - df_hm = df_hm.drop(drop_cols, axis=1) - cols = list(df_hm.columns) - bin_cols = [cols[i] for i in range(len(cols)) if re.compile("bin_*").match(cols[i])] - time = np.asarray(df_hm["timestamps"]) - event_ts_for_each_event = np.arange(1, len(df_hm.columns[:-1]) + 1) - yticks = list(event_ts_for_each_event) - z_score = np.asarray(df_hm[df_hm.columns[:-1]]).T - - if self.heatmap_y[0] == "All": - indices = np.arange(z_score.shape[0] - len(bin_cols)) - z_score = z_score[indices, :] - event_ts_for_each_event = np.arange(1, z_score.shape[0] + 1) - yticks = list(event_ts_for_each_event) - else: - remove_all = list(set(self.heatmap_y) - set(["All"])) - indices = sorted([int(s.split("-")[0]) - 1 for s in remove_all]) - z_score = z_score[indices, :] - event_ts_for_each_event = np.arange(1, z_score.shape[0] + 1) - yticks = list(event_ts_for_each_event) - - clim = (np.nanmin(z_score), np.nanmax(z_score)) - font_size = {"labels": 16, "yticks": 6} - - if event_ts_for_each_event.shape[0] == 1: - dummy_image = hv.QuadMesh((time, event_ts_for_each_event, z_score)).opts(colorbar=True, clim=clim) - image = ( - (dummy_image).opts( - opts.QuadMesh( - width=int(width), - height=int(height), - cmap=process_cmap(self.color_map, provider="matplotlib"), - colorbar=True, - ylabel="Trials", - xlabel="Time (s)", - fontsize=font_size, - yticks=yticks, - ) - ) - ).opts(shared_axes=False) - - save_opts = self.save_options_heatmap - op = make_dir(filepath) - op_filename = os.path.join(op, self.event_selector_heatmap + "_" + "heatmap") - self.results_hm["plot"] = image - self.results_hm["op"] = op_filename - # self.save_plots(image, save_opts, op_filename) - return image - else: - ropts = dict( - width=int(width), - height=int(height), - ylabel="Trials", - xlabel="Time (s)", - fontsize=font_size, - yticks=yticks, - invert_yaxis=True, - ) - dummy_image = hv.QuadMesh((time[0:100], event_ts_for_each_event, z_score[:, 0:100])).opts( - colorbar=True, cmap=process_cmap(self.color_map, provider="matplotlib"), clim=clim - ) - actual_image = hv.QuadMesh((time, event_ts_for_each_event, z_score)) - - dynspread_img = datashade(actual_image, cmap=process_cmap(self.color_map, provider="matplotlib")).opts( - **ropts - ) # clims=self.C_Limit, cnorm='log' - image = ((dummy_image * dynspread_img).opts(opts.QuadMesh(width=int(width), height=int(height)))).opts( - shared_axes=False - ) - - save_opts = self.save_options_heatmap - op = make_dir(filepath) - op_filename = os.path.join(op, self.event_selector_heatmap + "_" + "heatmap") - self.results_hm["plot"] = image - self.results_hm["op"] = op_filename - - return image - - view = Viewer() + # heatmap_options.append('{}_bin'.format(bins_keys[i])) + for j in arr: + multiple_plots_options.append("{}_{}".format(bins_keys[i], j)) + + multiple_plots_options = new_event + multiple_plots_options + else: + multiple_plots_options = new_event + x_min = float(inputParameters["nSecPrev"]) - 20 + x_max = float(inputParameters["nSecPost"]) + 20 + colormaps = plt.colormaps() + new_colormaps = ["plasma", "plasma_r", "magma", "magma_r", "inferno", "inferno_r", "viridis", "viridis_r"] + set_a = set(colormaps) + set_b = set(new_colormaps) + colormaps = new_colormaps + list(set_a.difference(set_b)) + x = [columns_dict[new_event[0]][-4]] + y = remove_cols(columns_dict[new_event[0]]) + trial_no = range(1, len(remove_cols(columns_dict[heatmap_options[0]])[:-2]) + 1) + trial_ts = [ + "{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(columns_dict[heatmap_options[0]])[:-2]) + ] + ["All"] + + view = Viewer( + event_selector=new_event, + event_selector_heatmap=heatmap_options, + selector_for_multipe_events_plot=multiple_plots_options, + columns_dict=columns_dict, + df_new=df, + x_min=x_min, + x_max=x_max, + color_map=colormaps, + filepath=filepath, + x=x, + y=y, + heatmap_y=trial_ts, + psth_y=trial_ts[:-1], + ) # PSTH plot options psth_checkbox = pn.Param( From 2b4ecca533f9e9e375d3eae9c5d47cadb7bc6a06 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 6 Feb 2026 09:28:40 -0800 Subject: [PATCH 47/53] Refactored Viewer class into a dedicated module. --- src/guppy/frontend/visualization.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/guppy/frontend/visualization.py b/src/guppy/frontend/visualization.py index 9f8f41b..7a8d85e 100644 --- a/src/guppy/frontend/visualization.py +++ b/src/guppy/frontend/visualization.py @@ -248,10 +248,7 @@ def update_selector(self): "event_selector", "x", "y", "Y_Label", "save_options", "Y_Limit", "X_Limit", "Height_Plot", "Width_Plot" ) def contPlot(self): - print(f"{self.event_selector = }") - print(f"{self.df_new = }") df1 = self.df_new[self.event_selector] - print(f"{df1.columns = }") # height = self.Heigth_Plot # width = self.Width_Plot # logger.info(height, width) @@ -339,7 +336,6 @@ def contPlot(self): return plot else: - print(f"{self.x = }") xpoints = df1[self.x] ypoints = df1[self.y] if self.Y_Limit == None: From 521dba261ccbd197f5fcb064ea78b17dbcbdedf3 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 6 Feb 2026 10:02:59 -0800 Subject: [PATCH 48/53] Fixed bug with objects initialization. --- src/guppy/frontend/visualization.py | 35 +++++++++++++++++++++++++---- src/guppy/visualizePlot.py | 16 ++++++------- 2 files changed, 39 insertions(+), 12 deletions(-) diff --git a/src/guppy/frontend/visualization.py b/src/guppy/frontend/visualization.py index 7a8d85e..3f6b174 100644 --- a/src/guppy/frontend/visualization.py +++ b/src/guppy/frontend/visualization.py @@ -40,6 +40,15 @@ def make_dir(filepath): # create a class to make GUI and plot different graphs class Viewer(param.Parameterized): + event_selector_objects = param.List(default=None) + event_selector_heatmap_objects = param.List(default=None) + selector_for_multipe_events_plot_objects = param.List(default=None) + color_map_objects = param.List(default=None) + x_objects = param.List(default=None) + y_objects = param.List(default=None) + heatmap_y_objects = param.List(default=None) + psth_y_objects = param.List(default=None) + filepath = param.Path(default=None) # create different options and selectors event_selector = param.ObjectSelector(default=None) @@ -76,6 +85,24 @@ class Viewer(param.Parameterized): def __init__(self, **params): super().__init__(**params) + # Bind selector objects from companion params + self.param.event_selector.objects = self.event_selector_objects + self.param.event_selector_heatmap.objects = self.event_selector_heatmap_objects + self.param.selector_for_multipe_events_plot.objects = self.selector_for_multipe_events_plot_objects + self.param.color_map.objects = self.color_map_objects + self.param.x.objects = self.x_objects + self.param.y.objects = self.y_objects + self.param.heatmap_y.objects = self.heatmap_y_objects + self.param.psth_y.objects = self.psth_y_objects + + # Set defaults + self.event_selector = self.event_selector_objects[0] + self.event_selector_heatmap = self.event_selector_heatmap_objects[0] + self.selector_for_multipe_events_plot = [self.selector_for_multipe_events_plot_objects[0]] + self.x = self.x_objects[0] + self.y = self.y_objects[-2] + self.heatmap_y = [self.heatmap_y_objects[-1]] + self.param.X_Limit.bounds = (self.x_min, self.x_max) # function to save heatmaps when save button on heatmap tab is clicked @@ -131,8 +158,8 @@ def save_psth_plot(self): # function to change Y values based on event selection @param.depends("event_selector", watch=True) def _update_x_y(self): - x_value = self.columns[self.event_selector] - y_value = self.columns[self.event_selector] + x_value = self.columns_dict[self.event_selector] + y_value = self.columns_dict[self.event_selector] self.param["x"].objects = [x_value[-4]] self.param["y"].objects = remove_cols(y_value) self.x = x_value[-4] @@ -140,7 +167,7 @@ def _update_x_y(self): @param.depends("event_selector_heatmap", watch=True) def _update_df(self): - cols = self.columns[self.event_selector_heatmap] + cols = self.columns_dict[self.event_selector_heatmap] trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] + ["All"] self.param["heatmap_y"].objects = trial_ts @@ -148,7 +175,7 @@ def _update_df(self): @param.depends("event_selector", watch=True) def _update_psth_y(self): - cols = self.columns[self.event_selector] + cols = self.columns_dict[self.event_selector] trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] self.param["psth_y"].objects = trial_ts diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index ed37f64..39fe095 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -119,19 +119,19 @@ def helper_plots(filepath, event, name, inputParameters): ] + ["All"] view = Viewer( - event_selector=new_event, - event_selector_heatmap=heatmap_options, - selector_for_multipe_events_plot=multiple_plots_options, + event_selector_objects=new_event, + event_selector_heatmap_objects=heatmap_options, + selector_for_multipe_events_plot_objects=multiple_plots_options, columns_dict=columns_dict, df_new=df, x_min=x_min, x_max=x_max, - color_map=colormaps, + color_map_objects=colormaps, filepath=filepath, - x=x, - y=y, - heatmap_y=trial_ts, - psth_y=trial_ts[:-1], + x_objects=x, + y_objects=y, + heatmap_y_objects=trial_ts, + psth_y_objects=trial_ts[:-1], ) # PSTH plot options From 4b07e1c48ca0e971514170f08e75faeef9ab2e49 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 6 Feb 2026 10:23:01 -0800 Subject: [PATCH 49/53] Refactored visualization front-end code into dedicated dashboard module. --- src/guppy/frontend/dashboard.py | 204 ++++++++++++++++++++++++++++++++ src/guppy/visualizePlot.py | 138 +++------------------ 2 files changed, 218 insertions(+), 124 deletions(-) create mode 100644 src/guppy/frontend/dashboard.py diff --git a/src/guppy/frontend/dashboard.py b/src/guppy/frontend/dashboard.py new file mode 100644 index 0000000..e19cda0 --- /dev/null +++ b/src/guppy/frontend/dashboard.py @@ -0,0 +1,204 @@ +import logging + +import panel as pn + +from .frontend_utils import scanPortsAndFind +from .visualization import Viewer + +pn.extension() + +logger = logging.getLogger(__name__) + + +class VisualizationDashboard: + """Dashboard for interactive PSTH and heatmap visualization. + + Composes a ``Viewer`` instance with Panel widgets and a tabbed layout. + Data loading and preparation are handled externally; this class + receives already-prepared data and is responsible for widget creation, + layout assembly, and serving the application. + + Parameters + ---------- + basename : str + Session name displayed as the tab title. + filepath : str + Output directory path (used by Viewer for saving plots). + df : pandas.DataFrame + Concatenated multi-indexed DataFrame of PSTH data. + columns_dict : dict + Mapping of event names to their available column names. + event_options : list + Event names for the PSTH event selector. + heatmap_options : list + Event names for the heatmap event selector. + multiple_plots_options : list + Options for the multi-event overlay selector. + colormaps : list + Ordered list of matplotlib colormap names. + x_options : list + X-axis column options. + y_options : list + Y-axis column options. + trial_options : list + Trial labels (including "All" as last element). + x_min : float + Lower bound for the X-axis range slider. + x_max : float + Upper bound for the X-axis range slider. + """ + + def __init__( + self, + *, + basename, + filepath, + df, + columns_dict, + event_options, + heatmap_options, + multiple_plots_options, + colormaps, + x_options, + y_options, + trial_options, + x_min, + x_max, + ): + self.basename = basename + self.viewer = Viewer( + event_selector_objects=event_options, + event_selector_heatmap_objects=heatmap_options, + selector_for_multipe_events_plot_objects=multiple_plots_options, + columns_dict=columns_dict, + df_new=df, + x_min=x_min, + x_max=x_max, + color_map_objects=colormaps, + filepath=filepath, + x_objects=x_options, + y_objects=y_options, + heatmap_y_objects=trial_options, + psth_y_objects=trial_options[:-1], + ) + self._psth_tab = self._build_psth_tab() + self._heatmap_tab = self._build_heatmap_tab() + + def _build_psth_tab(self): + """Build the PSTH tab with controls and plot panels.""" + view = self.viewer + + psth_checkbox = pn.Param( + view.param.select_trials_checkbox, + widgets={ + "select_trials_checkbox": { + "type": pn.widgets.CheckBoxGroup, + "inline": True, + "name": "Select mean and/or just trials", + } + }, + ) + parameters = pn.Param( + view.param.selector_for_multipe_events_plot, + widgets={ + "selector_for_multipe_events_plot": {"type": pn.widgets.CrossSelector, "width": 550, "align": "start"} + }, + ) + psth_y_parameters = pn.Param( + view.param.psth_y, + widgets={ + "psth_y": { + "type": pn.widgets.MultiSelect, + "name": "Trial # - Timestamps", + "width": 200, + "size": 15, + "align": "start", + } + }, + ) + + event_selector = pn.Param( + view.param.event_selector, widgets={"event_selector": {"type": pn.widgets.Select, "width": 400}} + ) + x_selector = pn.Param(view.param.x, widgets={"x": {"type": pn.widgets.Select, "width": 180}}) + y_selector = pn.Param(view.param.y, widgets={"y": {"type": pn.widgets.Select, "width": 180}}) + + width_plot = pn.Param(view.param.Width_Plot, widgets={"Width_Plot": {"type": pn.widgets.Select, "width": 70}}) + height_plot = pn.Param( + view.param.Height_Plot, widgets={"Height_Plot": {"type": pn.widgets.Select, "width": 70}} + ) + ylabel = pn.Param(view.param.Y_Label, widgets={"Y_Label": {"type": pn.widgets.Select, "width": 70}}) + save_opts = pn.Param( + view.param.save_options, widgets={"save_options": {"type": pn.widgets.Select, "width": 70}} + ) + + xlimit_plot = pn.Param(view.param.X_Limit, widgets={"X_Limit": {"type": pn.widgets.RangeSlider, "width": 180}}) + ylimit_plot = pn.Param(view.param.Y_Limit, widgets={"Y_Limit": {"type": pn.widgets.RangeSlider, "width": 180}}) + save_psth = pn.Param(view.param.save_psth, widgets={"save_psth": {"type": pn.widgets.Button, "width": 400}}) + + options = pn.Column( + event_selector, + pn.Row(x_selector, y_selector), + pn.Row(xlimit_plot, ylimit_plot), + pn.Row(width_plot, height_plot, ylabel, save_opts), + save_psth, + ) + + options_selectors = pn.Row(options, parameters) + + return pn.Column( + "## " + self.basename, + pn.Row(options_selectors, pn.Column(psth_checkbox, psth_y_parameters), width=1200), + view.contPlot, + view.update_selector, + view.plot_specific_trials, + ) + + def _build_heatmap_tab(self): + """Build the heatmap tab with controls and plot panels.""" + view = self.viewer + + heatmap_y_parameters = pn.Param( + view.param.heatmap_y, + widgets={ + "heatmap_y": {"type": pn.widgets.MultiSelect, "name": "Trial # - Timestamps", "width": 200, "size": 30} + }, + ) + event_selector_heatmap = pn.Param( + view.param.event_selector_heatmap, + widgets={"event_selector_heatmap": {"type": pn.widgets.Select, "width": 150}}, + ) + color_map = pn.Param(view.param.color_map, widgets={"color_map": {"type": pn.widgets.Select, "width": 150}}) + width_heatmap = pn.Param( + view.param.width_heatmap, widgets={"width_heatmap": {"type": pn.widgets.Select, "width": 150}} + ) + height_heatmap = pn.Param( + view.param.height_heatmap, widgets={"height_heatmap": {"type": pn.widgets.Select, "width": 150}} + ) + save_hm = pn.Param(view.param.save_hm, widgets={"save_hm": {"type": pn.widgets.Button, "width": 150}}) + save_options_heatmap = pn.Param( + view.param.save_options_heatmap, + widgets={"save_options_heatmap": {"type": pn.widgets.Select, "width": 150}}, + ) + + return pn.Column( + "## " + self.basename, + pn.Row( + event_selector_heatmap, + color_map, + width_heatmap, + height_heatmap, + save_options_heatmap, + pn.Column(pn.Spacer(height=25), save_hm), + ), + pn.Row(view.heatmap, heatmap_y_parameters), + ) + + def show(self): + """Serve the dashboard in a browser on an available port.""" + logger.info("app") + template = pn.template.MaterialTemplate(title="Visualization GUI") + number = scanPortsAndFind(start_port=5000, end_port=5200) + app = pn.Tabs(("PSTH", self._psth_tab), ("Heat Map", self._heatmap_tab)) + template.main.append(app) + template.show(port=number) diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 39fe095..52b36bb 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -6,14 +6,11 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd -import panel as pn -from .frontend.frontend_utils import scanPortsAndFind -from .frontend.visualization import Viewer, remove_cols +from .frontend.dashboard import VisualizationDashboard +from .frontend.visualization import remove_cols from .utils.utils import get_all_stores_for_combining_data, read_Df, takeOnlyDirs -pn.extension() - logger = logging.getLogger(__name__) @@ -118,129 +115,22 @@ def helper_plots(filepath, event, name, inputParameters): "{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(columns_dict[heatmap_options[0]])[:-2]) ] + ["All"] - view = Viewer( - event_selector_objects=new_event, - event_selector_heatmap_objects=heatmap_options, - selector_for_multipe_events_plot_objects=multiple_plots_options, + dashboard = VisualizationDashboard( + basename=basename, + filepath=filepath, + df=df, columns_dict=columns_dict, - df_new=df, + event_options=new_event, + heatmap_options=heatmap_options, + multiple_plots_options=multiple_plots_options, + colormaps=colormaps, + x_options=x, + y_options=y, + trial_options=trial_ts, x_min=x_min, x_max=x_max, - color_map_objects=colormaps, - filepath=filepath, - x_objects=x, - y_objects=y, - heatmap_y_objects=trial_ts, - psth_y_objects=trial_ts[:-1], - ) - - # PSTH plot options - psth_checkbox = pn.Param( - view.param.select_trials_checkbox, - widgets={ - "select_trials_checkbox": { - "type": pn.widgets.CheckBoxGroup, - "inline": True, - "name": "Select mean and/or just trials", - } - }, - ) - parameters = pn.Param( - view.param.selector_for_multipe_events_plot, - widgets={ - "selector_for_multipe_events_plot": {"type": pn.widgets.CrossSelector, "width": 550, "align": "start"} - }, - ) - heatmap_y_parameters = pn.Param( - view.param.heatmap_y, - widgets={ - "heatmap_y": {"type": pn.widgets.MultiSelect, "name": "Trial # - Timestamps", "width": 200, "size": 30} - }, - ) - psth_y_parameters = pn.Param( - view.param.psth_y, - widgets={ - "psth_y": { - "type": pn.widgets.MultiSelect, - "name": "Trial # - Timestamps", - "width": 200, - "size": 15, - "align": "start", - } - }, - ) - - event_selector = pn.Param( - view.param.event_selector, widgets={"event_selector": {"type": pn.widgets.Select, "width": 400}} - ) - x_selector = pn.Param(view.param.x, widgets={"x": {"type": pn.widgets.Select, "width": 180}}) - y_selector = pn.Param(view.param.y, widgets={"y": {"type": pn.widgets.Select, "width": 180}}) - - width_plot = pn.Param(view.param.Width_Plot, widgets={"Width_Plot": {"type": pn.widgets.Select, "width": 70}}) - height_plot = pn.Param(view.param.Height_Plot, widgets={"Height_Plot": {"type": pn.widgets.Select, "width": 70}}) - ylabel = pn.Param(view.param.Y_Label, widgets={"Y_Label": {"type": pn.widgets.Select, "width": 70}}) - save_opts = pn.Param(view.param.save_options, widgets={"save_options": {"type": pn.widgets.Select, "width": 70}}) - - xlimit_plot = pn.Param(view.param.X_Limit, widgets={"X_Limit": {"type": pn.widgets.RangeSlider, "width": 180}}) - ylimit_plot = pn.Param(view.param.Y_Limit, widgets={"Y_Limit": {"type": pn.widgets.RangeSlider, "width": 180}}) - save_psth = pn.Param(view.param.save_psth, widgets={"save_psth": {"type": pn.widgets.Button, "width": 400}}) - - options = pn.Column( - event_selector, - pn.Row(x_selector, y_selector), - pn.Row(xlimit_plot, ylimit_plot), - pn.Row(width_plot, height_plot, ylabel, save_opts), - save_psth, - ) - - options_selectors = pn.Row(options, parameters) - - line_tab = pn.Column( - "## " + basename, - pn.Row(options_selectors, pn.Column(psth_checkbox, psth_y_parameters), width=1200), - view.contPlot, - view.update_selector, - view.plot_specific_trials, - ) - - # Heatmap plot options - event_selector_heatmap = pn.Param( - view.param.event_selector_heatmap, widgets={"event_selector_heatmap": {"type": pn.widgets.Select, "width": 150}} - ) - color_map = pn.Param(view.param.color_map, widgets={"color_map": {"type": pn.widgets.Select, "width": 150}}) - width_heatmap = pn.Param( - view.param.width_heatmap, widgets={"width_heatmap": {"type": pn.widgets.Select, "width": 150}} - ) - height_heatmap = pn.Param( - view.param.height_heatmap, widgets={"height_heatmap": {"type": pn.widgets.Select, "width": 150}} ) - save_hm = pn.Param(view.param.save_hm, widgets={"save_hm": {"type": pn.widgets.Button, "width": 150}}) - save_options_heatmap = pn.Param( - view.param.save_options_heatmap, widgets={"save_options_heatmap": {"type": pn.widgets.Select, "width": 150}} - ) - - hm_tab = pn.Column( - "## " + basename, - pn.Row( - event_selector_heatmap, - color_map, - width_heatmap, - height_heatmap, - save_options_heatmap, - pn.Column(pn.Spacer(height=25), save_hm), - ), - pn.Row(view.heatmap, heatmap_y_parameters), - ) # - logger.info("app") - - template = pn.template.MaterialTemplate(title="Visualization GUI") - - number = scanPortsAndFind(start_port=5000, end_port=5200) - app = pn.Tabs(("PSTH", line_tab), ("Heat Map", hm_tab)) - - template.main.append(app) - - template.show(port=number) + dashboard.show() # function to combine all the output folders together and preprocess them to use them in helper_plots function From 039b36e46dc417eb128778fed368a2ad0b33d945 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 6 Feb 2026 10:26:21 -0800 Subject: [PATCH 50/53] Refactored visualization front-end code into dedicated dashboard module. --- src/guppy/frontend/dashboard.py | 70 +++++---------------------------- src/guppy/visualizePlot.py | 25 ++++++------ 2 files changed, 22 insertions(+), 73 deletions(-) diff --git a/src/guppy/frontend/dashboard.py b/src/guppy/frontend/dashboard.py index e19cda0..2fe17d6 100644 --- a/src/guppy/frontend/dashboard.py +++ b/src/guppy/frontend/dashboard.py @@ -3,7 +3,6 @@ import panel as pn from .frontend_utils import scanPortsAndFind -from .visualization import Viewer pn.extension() @@ -13,74 +12,23 @@ class VisualizationDashboard: """Dashboard for interactive PSTH and heatmap visualization. - Composes a ``Viewer`` instance with Panel widgets and a tabbed layout. - Data loading and preparation are handled externally; this class - receives already-prepared data and is responsible for widget creation, - layout assembly, and serving the application. + Wraps a ``Viewer`` instance with Panel widgets and a tabbed layout. + Data loading, preparation, and Viewer instantiation are handled + externally; this class is responsible for widget creation, layout + assembly, and serving the application. Parameters ---------- + viewer : Viewer + A fully configured Viewer instance that provides reactive plot + methods and param-based controls. basename : str Session name displayed as the tab title. - filepath : str - Output directory path (used by Viewer for saving plots). - df : pandas.DataFrame - Concatenated multi-indexed DataFrame of PSTH data. - columns_dict : dict - Mapping of event names to their available column names. - event_options : list - Event names for the PSTH event selector. - heatmap_options : list - Event names for the heatmap event selector. - multiple_plots_options : list - Options for the multi-event overlay selector. - colormaps : list - Ordered list of matplotlib colormap names. - x_options : list - X-axis column options. - y_options : list - Y-axis column options. - trial_options : list - Trial labels (including "All" as last element). - x_min : float - Lower bound for the X-axis range slider. - x_max : float - Upper bound for the X-axis range slider. """ - def __init__( - self, - *, - basename, - filepath, - df, - columns_dict, - event_options, - heatmap_options, - multiple_plots_options, - colormaps, - x_options, - y_options, - trial_options, - x_min, - x_max, - ): + def __init__(self, *, viewer, basename): + self.viewer = viewer self.basename = basename - self.viewer = Viewer( - event_selector_objects=event_options, - event_selector_heatmap_objects=heatmap_options, - selector_for_multipe_events_plot_objects=multiple_plots_options, - columns_dict=columns_dict, - df_new=df, - x_min=x_min, - x_max=x_max, - color_map_objects=colormaps, - filepath=filepath, - x_objects=x_options, - y_objects=y_options, - heatmap_y_objects=trial_options, - psth_y_objects=trial_options[:-1], - ) self._psth_tab = self._build_psth_tab() self._heatmap_tab = self._build_heatmap_tab() diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 52b36bb..330b87b 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -8,7 +8,7 @@ import pandas as pd from .frontend.dashboard import VisualizationDashboard -from .frontend.visualization import remove_cols +from .frontend.visualization import Viewer, remove_cols from .utils.utils import get_all_stores_for_combining_data, read_Df, takeOnlyDirs logger = logging.getLogger(__name__) @@ -115,21 +115,22 @@ def helper_plots(filepath, event, name, inputParameters): "{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(columns_dict[heatmap_options[0]])[:-2]) ] + ["All"] - dashboard = VisualizationDashboard( - basename=basename, - filepath=filepath, - df=df, + viewer = Viewer( + event_selector_objects=new_event, + event_selector_heatmap_objects=heatmap_options, + selector_for_multipe_events_plot_objects=multiple_plots_options, columns_dict=columns_dict, - event_options=new_event, - heatmap_options=heatmap_options, - multiple_plots_options=multiple_plots_options, - colormaps=colormaps, - x_options=x, - y_options=y, - trial_options=trial_ts, + df_new=df, x_min=x_min, x_max=x_max, + color_map_objects=colormaps, + filepath=filepath, + x_objects=x, + y_objects=y, + heatmap_y_objects=trial_ts, + psth_y_objects=trial_ts[:-1], ) + dashboard = VisualizationDashboard(viewer=viewer, basename=basename) dashboard.show() From 46f148e4168bfb1c95032c5f57277674ad6faf45 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 6 Feb 2026 10:43:41 -0800 Subject: [PATCH 51/53] Renamed some classes --- src/guppy/frontend/dashboard.py | 152 -------- src/guppy/frontend/visualization.py | 582 ---------------------------- src/guppy/visualizePlot.py | 8 +- 3 files changed, 4 insertions(+), 738 deletions(-) delete mode 100644 src/guppy/frontend/dashboard.py delete mode 100644 src/guppy/frontend/visualization.py diff --git a/src/guppy/frontend/dashboard.py b/src/guppy/frontend/dashboard.py deleted file mode 100644 index 2fe17d6..0000000 --- a/src/guppy/frontend/dashboard.py +++ /dev/null @@ -1,152 +0,0 @@ -import logging - -import panel as pn - -from .frontend_utils import scanPortsAndFind - -pn.extension() - -logger = logging.getLogger(__name__) - - -class VisualizationDashboard: - """Dashboard for interactive PSTH and heatmap visualization. - - Wraps a ``Viewer`` instance with Panel widgets and a tabbed layout. - Data loading, preparation, and Viewer instantiation are handled - externally; this class is responsible for widget creation, layout - assembly, and serving the application. - - Parameters - ---------- - viewer : Viewer - A fully configured Viewer instance that provides reactive plot - methods and param-based controls. - basename : str - Session name displayed as the tab title. - """ - - def __init__(self, *, viewer, basename): - self.viewer = viewer - self.basename = basename - self._psth_tab = self._build_psth_tab() - self._heatmap_tab = self._build_heatmap_tab() - - def _build_psth_tab(self): - """Build the PSTH tab with controls and plot panels.""" - view = self.viewer - - psth_checkbox = pn.Param( - view.param.select_trials_checkbox, - widgets={ - "select_trials_checkbox": { - "type": pn.widgets.CheckBoxGroup, - "inline": True, - "name": "Select mean and/or just trials", - } - }, - ) - parameters = pn.Param( - view.param.selector_for_multipe_events_plot, - widgets={ - "selector_for_multipe_events_plot": {"type": pn.widgets.CrossSelector, "width": 550, "align": "start"} - }, - ) - psth_y_parameters = pn.Param( - view.param.psth_y, - widgets={ - "psth_y": { - "type": pn.widgets.MultiSelect, - "name": "Trial # - Timestamps", - "width": 200, - "size": 15, - "align": "start", - } - }, - ) - - event_selector = pn.Param( - view.param.event_selector, widgets={"event_selector": {"type": pn.widgets.Select, "width": 400}} - ) - x_selector = pn.Param(view.param.x, widgets={"x": {"type": pn.widgets.Select, "width": 180}}) - y_selector = pn.Param(view.param.y, widgets={"y": {"type": pn.widgets.Select, "width": 180}}) - - width_plot = pn.Param(view.param.Width_Plot, widgets={"Width_Plot": {"type": pn.widgets.Select, "width": 70}}) - height_plot = pn.Param( - view.param.Height_Plot, widgets={"Height_Plot": {"type": pn.widgets.Select, "width": 70}} - ) - ylabel = pn.Param(view.param.Y_Label, widgets={"Y_Label": {"type": pn.widgets.Select, "width": 70}}) - save_opts = pn.Param( - view.param.save_options, widgets={"save_options": {"type": pn.widgets.Select, "width": 70}} - ) - - xlimit_plot = pn.Param(view.param.X_Limit, widgets={"X_Limit": {"type": pn.widgets.RangeSlider, "width": 180}}) - ylimit_plot = pn.Param(view.param.Y_Limit, widgets={"Y_Limit": {"type": pn.widgets.RangeSlider, "width": 180}}) - save_psth = pn.Param(view.param.save_psth, widgets={"save_psth": {"type": pn.widgets.Button, "width": 400}}) - - options = pn.Column( - event_selector, - pn.Row(x_selector, y_selector), - pn.Row(xlimit_plot, ylimit_plot), - pn.Row(width_plot, height_plot, ylabel, save_opts), - save_psth, - ) - - options_selectors = pn.Row(options, parameters) - - return pn.Column( - "## " + self.basename, - pn.Row(options_selectors, pn.Column(psth_checkbox, psth_y_parameters), width=1200), - view.contPlot, - view.update_selector, - view.plot_specific_trials, - ) - - def _build_heatmap_tab(self): - """Build the heatmap tab with controls and plot panels.""" - view = self.viewer - - heatmap_y_parameters = pn.Param( - view.param.heatmap_y, - widgets={ - "heatmap_y": {"type": pn.widgets.MultiSelect, "name": "Trial # - Timestamps", "width": 200, "size": 30} - }, - ) - event_selector_heatmap = pn.Param( - view.param.event_selector_heatmap, - widgets={"event_selector_heatmap": {"type": pn.widgets.Select, "width": 150}}, - ) - color_map = pn.Param(view.param.color_map, widgets={"color_map": {"type": pn.widgets.Select, "width": 150}}) - width_heatmap = pn.Param( - view.param.width_heatmap, widgets={"width_heatmap": {"type": pn.widgets.Select, "width": 150}} - ) - height_heatmap = pn.Param( - view.param.height_heatmap, widgets={"height_heatmap": {"type": pn.widgets.Select, "width": 150}} - ) - save_hm = pn.Param(view.param.save_hm, widgets={"save_hm": {"type": pn.widgets.Button, "width": 150}}) - save_options_heatmap = pn.Param( - view.param.save_options_heatmap, - widgets={"save_options_heatmap": {"type": pn.widgets.Select, "width": 150}}, - ) - - return pn.Column( - "## " + self.basename, - pn.Row( - event_selector_heatmap, - color_map, - width_heatmap, - height_heatmap, - save_options_heatmap, - pn.Column(pn.Spacer(height=25), save_hm), - ), - pn.Row(view.heatmap, heatmap_y_parameters), - ) - - def show(self): - """Serve the dashboard in a browser on an available port.""" - logger.info("app") - template = pn.template.MaterialTemplate(title="Visualization GUI") - number = scanPortsAndFind(start_port=5000, end_port=5200) - app = pn.Tabs(("PSTH", self._psth_tab), ("Heat Map", self._heatmap_tab)) - template.main.append(app) - template.show(port=number) diff --git a/src/guppy/frontend/visualization.py b/src/guppy/frontend/visualization.py deleted file mode 100644 index 3f6b174..0000000 --- a/src/guppy/frontend/visualization.py +++ /dev/null @@ -1,582 +0,0 @@ -import logging -import math -import os -import re - -import datashader as ds -import holoviews as hv -import numpy as np -import pandas as pd -import panel as pn -import param -from bokeh.io import export_png, export_svgs -from holoviews import opts -from holoviews.operation.datashader import datashade -from holoviews.plotting.util import process_cmap - -pn.extension() - -logger = logging.getLogger(__name__) - - -# remove unnecessary column names -def remove_cols(cols): - regex = re.compile("bin_err_*") - remove_cols = [cols[i] for i in range(len(cols)) if regex.match(cols[i])] - remove_cols = remove_cols + ["err", "timestamps"] - cols = [i for i in cols if i not in remove_cols] - - return cols - - -# make a new directory for saving plots -def make_dir(filepath): - op = os.path.join(filepath, "saved_plots") - if not os.path.exists(op): - os.mkdir(op) - - return op - - -# create a class to make GUI and plot different graphs -class Viewer(param.Parameterized): - event_selector_objects = param.List(default=None) - event_selector_heatmap_objects = param.List(default=None) - selector_for_multipe_events_plot_objects = param.List(default=None) - color_map_objects = param.List(default=None) - x_objects = param.List(default=None) - y_objects = param.List(default=None) - heatmap_y_objects = param.List(default=None) - psth_y_objects = param.List(default=None) - - filepath = param.Path(default=None) - # create different options and selectors - event_selector = param.ObjectSelector(default=None) - event_selector_heatmap = param.ObjectSelector(default=None) - selector_for_multipe_events_plot = param.ListSelector(default=None) - columns_dict = param.Dict(default=None) - df_new = param.DataFrame(default=None) - x_min = param.Number(default=None) - x_max = param.Number(default=None) - select_trials_checkbox = param.ListSelector(default=["just trials"], objects=["mean", "just trials"]) - Y_Label = param.ObjectSelector(default="y", objects=["y", "z-score", "\u0394F/F"]) - save_options = param.ObjectSelector( - default="None", objects=["None", "save_png_format", "save_svg_format", "save_both_format"] - ) - save_options_heatmap = param.ObjectSelector( - default="None", objects=["None", "save_png_format", "save_svg_format", "save_both_format"] - ) - color_map = param.ObjectSelector(default="plasma") - height_heatmap = param.ObjectSelector(default=600, objects=list(np.arange(0, 5100, 100))[1:]) - width_heatmap = param.ObjectSelector(default=1000, objects=list(np.arange(0, 5100, 100))[1:]) - Height_Plot = param.ObjectSelector(default=300, objects=list(np.arange(0, 5100, 100))[1:]) - Width_Plot = param.ObjectSelector(default=1000, objects=list(np.arange(0, 5100, 100))[1:]) - save_hm = param.Action(lambda x: x.param.trigger("save_hm"), label="Save") - save_psth = param.Action(lambda x: x.param.trigger("save_psth"), label="Save") - X_Limit = param.Range(default=(-5, 10)) - Y_Limit = param.Range(bounds=(-50, 50.0)) - - x = param.ObjectSelector(default=None) - y = param.ObjectSelector(default=None) - heatmap_y = param.ListSelector(default=None) - psth_y = param.ListSelector(default=None) - results_hm = dict() - results_psth = dict() - - def __init__(self, **params): - super().__init__(**params) - # Bind selector objects from companion params - self.param.event_selector.objects = self.event_selector_objects - self.param.event_selector_heatmap.objects = self.event_selector_heatmap_objects - self.param.selector_for_multipe_events_plot.objects = self.selector_for_multipe_events_plot_objects - self.param.color_map.objects = self.color_map_objects - self.param.x.objects = self.x_objects - self.param.y.objects = self.y_objects - self.param.heatmap_y.objects = self.heatmap_y_objects - self.param.psth_y.objects = self.psth_y_objects - - # Set defaults - self.event_selector = self.event_selector_objects[0] - self.event_selector_heatmap = self.event_selector_heatmap_objects[0] - self.selector_for_multipe_events_plot = [self.selector_for_multipe_events_plot_objects[0]] - self.x = self.x_objects[0] - self.y = self.y_objects[-2] - self.heatmap_y = [self.heatmap_y_objects[-1]] - - self.param.X_Limit.bounds = (self.x_min, self.x_max) - - # function to save heatmaps when save button on heatmap tab is clicked - @param.depends("save_hm", watch=True) - def save_hm_plots(self): - plot = self.results_hm["plot"] - op = self.results_hm["op"] - save_opts = self.save_options_heatmap - logger.info(save_opts) - if save_opts == "save_svg_format": - p = hv.render(plot, backend="bokeh") - p.output_backend = "svg" - export_svgs(p, filename=op + ".svg") - elif save_opts == "save_png_format": - p = hv.render(plot, backend="bokeh") - export_png(p, filename=op + ".png") - elif save_opts == "save_both_format": - p = hv.render(plot, backend="bokeh") - p.output_backend = "svg" - export_svgs(p, filename=op + ".svg") - p_png = hv.render(plot, backend="bokeh") - export_png(p_png, filename=op + ".png") - else: - return 0 - - # function to save PSTH plots when save button on PSTH tab is clicked - @param.depends("save_psth", watch=True) - def save_psth_plot(self): - plot, op = [], [] - plot.append(self.results_psth["plot_combine"]) - op.append(self.results_psth["op_combine"]) - plot.append(self.results_psth["plot"]) - op.append(self.results_psth["op"]) - for i in range(len(plot)): - temp_plot, temp_op = plot[i], op[i] - save_opts = self.save_options - if save_opts == "save_svg_format": - p = hv.render(temp_plot, backend="bokeh") - p.output_backend = "svg" - export_svgs(p, filename=temp_op + ".svg") - elif save_opts == "save_png_format": - p = hv.render(temp_plot, backend="bokeh") - export_png(p, filename=temp_op + ".png") - elif save_opts == "save_both_format": - p = hv.render(temp_plot, backend="bokeh") - p.output_backend = "svg" - export_svgs(p, filename=temp_op + ".svg") - p_png = hv.render(temp_plot, backend="bokeh") - export_png(p_png, filename=temp_op + ".png") - else: - return 0 - - # function to change Y values based on event selection - @param.depends("event_selector", watch=True) - def _update_x_y(self): - x_value = self.columns_dict[self.event_selector] - y_value = self.columns_dict[self.event_selector] - self.param["x"].objects = [x_value[-4]] - self.param["y"].objects = remove_cols(y_value) - self.x = x_value[-4] - self.y = self.param["y"].objects[-2] - - @param.depends("event_selector_heatmap", watch=True) - def _update_df(self): - cols = self.columns_dict[self.event_selector_heatmap] - trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) - trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] + ["All"] - self.param["heatmap_y"].objects = trial_ts - self.heatmap_y = [trial_ts[-1]] - - @param.depends("event_selector", watch=True) - def _update_psth_y(self): - cols = self.columns_dict[self.event_selector] - trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) - trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] - self.param["psth_y"].objects = trial_ts - self.psth_y = [trial_ts[0]] - - # function to plot multiple PSTHs into one plot - - @param.depends( - "selector_for_multipe_events_plot", - "Y_Label", - "save_options", - "X_Limit", - "Y_Limit", - "Height_Plot", - "Width_Plot", - ) - def update_selector(self): - data_curve, cols_curve, data_spread, cols_spread = [], [], [], [] - arr = self.selector_for_multipe_events_plot - df1 = self.df_new - for i in range(len(arr)): - if "bin" in arr[i]: - split = arr[i].rsplit("_", 2) - df_name = split[0] #'{}_{}'.format(split[0], split[1]) - col_name_mean = "{}_{}".format(split[-2], split[-1]) - col_name_err = "{}_err_{}".format(split[-2], split[-1]) - data_curve.append(df1[df_name][col_name_mean]) - cols_curve.append(arr[i]) - data_spread.append(df1[df_name][col_name_err]) - cols_spread.append(arr[i]) - else: - data_curve.append(df1[arr[i]]["mean"]) - cols_curve.append(arr[i] + "_" + "mean") - data_spread.append(df1[arr[i]]["err"]) - cols_spread.append(arr[i] + "_" + "mean") - - if len(arr) > 0: - if self.Y_Limit == None: - self.Y_Limit = (np.nanmin(np.asarray(data_curve)) - 0.5, np.nanmax(np.asarray(data_curve)) + 0.5) - - if "bin" in arr[i]: - split = arr[i].rsplit("_", 2) - df_name = split[0] - data_curve.append(df1[df_name]["timestamps"]) - cols_curve.append("timestamps") - data_spread.append(df1[df_name]["timestamps"]) - cols_spread.append("timestamps") - else: - data_curve.append(df1[arr[i]]["timestamps"]) - cols_curve.append("timestamps") - data_spread.append(df1[arr[i]]["timestamps"]) - cols_spread.append("timestamps") - df_curve = pd.concat(data_curve, axis=1) - df_spread = pd.concat(data_spread, axis=1) - df_curve.columns = cols_curve - df_spread.columns = cols_spread - - ts = df_curve["timestamps"] - index = np.arange(0, ts.shape[0], 3) - df_curve = df_curve.loc[index, :] - df_spread = df_spread.loc[index, :] - overlay = hv.NdOverlay( - { - c: hv.Curve((df_curve["timestamps"], df_curve[c]), kdims=["Time (s)"]).opts( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - ) - for c in cols_curve[:-1] - } - ) - spread = hv.NdOverlay( - { - d: hv.Spread( - (df_spread["timestamps"], df_curve[d], df_spread[d], df_spread[d]), - vdims=["y", "yerrpos", "yerrneg"], - ).opts(line_width=0, fill_alpha=0.3) - for d in cols_spread[:-1] - } - ) - plot_combine = ((overlay * spread).opts(opts.NdOverlay(xlabel="Time (s)", ylabel=self.Y_Label))).opts( - shared_axes=False - ) - # plot_err = new_df.hvplot.area(x='timestamps', y=[], y2=[]) - save_opts = self.save_options - op = make_dir(self.filepath) - op_filename = os.path.join(op, str(arr) + "_mean") - - self.results_psth["plot_combine"] = plot_combine - self.results_psth["op_combine"] = op_filename - # self.save_plots(plot_combine, save_opts, op_filename) - return plot_combine - - # function to plot mean PSTH, single trial in PSTH and all the trials of PSTH with mean - @param.depends( - "event_selector", "x", "y", "Y_Label", "save_options", "Y_Limit", "X_Limit", "Height_Plot", "Width_Plot" - ) - def contPlot(self): - df1 = self.df_new[self.event_selector] - # height = self.Heigth_Plot - # width = self.Width_Plot - # logger.info(height, width) - if self.y == "All": - if self.Y_Limit == None: - self.Y_Limit = (np.nanmin(np.asarray(df1)) - 0.5, np.nanmax(np.asarray(df1)) - 0.5) - - options = self.param["y"].objects - regex = re.compile("bin_[(]") - remove_bin_trials = [options[i] for i in range(len(options)) if not regex.match(options[i])] - - ndoverlay = hv.NdOverlay({c: hv.Curve((df1[self.x], df1[c])) for c in remove_bin_trials[:-2]}) - img1 = datashade(ndoverlay, normalization="linear", aggregator=ds.count()) - x_points = df1[self.x] - y_points = df1["mean"] - img2 = hv.Curve((x_points, y_points)) - img = (img1 * img2).opts( - opts.Curve( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - line_width=4, - color="black", - xlim=self.X_Limit, - ylim=self.Y_Limit, - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - ) - - save_opts = self.save_options - - op = make_dir(self.filepath) - op_filename = os.path.join(op, self.event_selector + "_" + self.y) - self.results_psth["plot"] = img - self.results_psth["op"] = op_filename - # self.save_plots(img, save_opts, op_filename) - - return img - - elif self.y == "mean" or "bin" in self.y: - - xpoints = df1[self.x] - ypoints = df1[self.y] - if self.y == "mean": - err = df1["err"] - else: - split = self.y.split("_") - err = df1["{}_err_{}".format(split[0], split[1])] - - index = np.arange(0, xpoints.shape[0], 3) - - if self.Y_Limit == None: - self.Y_Limit = (np.nanmin(ypoints) - 0.5, np.nanmax(ypoints) + 0.5) - - ropts_curve = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - color="blue", - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - ropts_spread = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - fill_alpha=0.3, - fill_color="blue", - line_width=0, - ) - - plot_curve = hv.Curve((xpoints[index], ypoints[index])) # .opts(**ropts_curve) - plot_spread = hv.Spread( - (xpoints[index], ypoints[index], err[index], err[index]) - ) # .opts(**ropts_spread) #vdims=['y', 'yerrpos', 'yerrneg'] - plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) - - save_opts = self.save_options - op = make_dir(self.filepath) - op_filename = os.path.join(op, self.event_selector + "_" + self.y) - self.results_psth["plot"] = plot - self.results_psth["op"] = op_filename - # self.save_plots(plot, save_opts, op_filename) - - return plot - - else: - xpoints = df1[self.x] - ypoints = df1[self.y] - if self.Y_Limit == None: - self.Y_Limit = (np.nanmin(ypoints) - 0.5, np.nanmax(ypoints) + 0.5) - - ropts_curve = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - color="blue", - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - plot = hv.Curve((xpoints, ypoints)).opts({"Curve": ropts_curve}) - - save_opts = self.save_options - op = make_dir(self.filepath) - op_filename = os.path.join(op, self.event_selector + "_" + self.y) - self.results_psth["plot"] = plot - self.results_psth["op"] = op_filename - # self.save_plots(plot, save_opts, op_filename) - - return plot - - # function to plot specific PSTH trials - @param.depends( - "event_selector", - "x", - "psth_y", - "select_trials_checkbox", - "Y_Label", - "save_options", - "Y_Limit", - "X_Limit", - "Height_Plot", - "Width_Plot", - ) - def plot_specific_trials(self): - df_psth = self.df_new[self.event_selector] - # if self.Y_Limit==None: - # self.Y_Limit = (np.nanmin(ypoints)-0.5, np.nanmax(ypoints)+0.5) - - if self.psth_y == None: - return None - else: - selected_trials = [s.split(" - ")[1] for s in list(self.psth_y)] - - index = np.arange(0, df_psth["timestamps"].shape[0], 3) - - if self.select_trials_checkbox == ["just trials"]: - overlay = hv.NdOverlay( - { - c: hv.Curve((df_psth["timestamps"][index], df_psth[c][index]), kdims=["Time (s)"]) - for c in selected_trials - } - ) - ropts = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - return overlay.opts(**ropts) - elif self.select_trials_checkbox == ["mean"]: - arr = np.asarray(df_psth[selected_trials]) - mean = np.nanmean(arr, axis=1) - err = np.nanstd(arr, axis=1) / math.sqrt(arr.shape[1]) - ropts_curve = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - color="blue", - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - ropts_spread = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - fill_alpha=0.3, - fill_color="blue", - line_width=0, - ) - plot_curve = hv.Curve((df_psth["timestamps"][index], mean[index])) - plot_spread = hv.Spread((df_psth["timestamps"][index], mean[index], err[index], err[index])) - plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) - return plot - elif self.select_trials_checkbox == ["mean", "just trials"]: - overlay = hv.NdOverlay( - { - c: hv.Curve((df_psth["timestamps"][index], df_psth[c][index]), kdims=["Time (s)"]) - for c in selected_trials - } - ) - ropts_overlay = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - - arr = np.asarray(df_psth[selected_trials]) - mean = np.nanmean(arr, axis=1) - err = np.nanstd(arr, axis=1) / math.sqrt(arr.shape[1]) - ropts_curve = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - xlim=self.X_Limit, - ylim=self.Y_Limit, - color="black", - xlabel="Time (s)", - ylabel=self.Y_Label, - ) - ropts_spread = dict( - width=int(self.Width_Plot), - height=int(self.Height_Plot), - fill_alpha=0.3, - fill_color="black", - line_width=0, - ) - plot_curve = hv.Curve((df_psth["timestamps"][index], mean[index])) - plot_spread = hv.Spread((df_psth["timestamps"][index], mean[index], err[index], err[index])) - - plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) - return overlay.opts(**ropts_overlay) * plot - - # function to show heatmaps for each event - @param.depends("event_selector_heatmap", "color_map", "height_heatmap", "width_heatmap", "heatmap_y") - def heatmap(self): - height = self.height_heatmap - width = self.width_heatmap - df_hm = self.df_new[self.event_selector_heatmap] - cols = list(df_hm.columns) - regex = re.compile("bin_err_*") - drop_cols = [cols[i] for i in range(len(cols)) if regex.match(cols[i])] - drop_cols = ["err", "mean"] + drop_cols - df_hm = df_hm.drop(drop_cols, axis=1) - cols = list(df_hm.columns) - bin_cols = [cols[i] for i in range(len(cols)) if re.compile("bin_*").match(cols[i])] - time = np.asarray(df_hm["timestamps"]) - event_ts_for_each_event = np.arange(1, len(df_hm.columns[:-1]) + 1) - yticks = list(event_ts_for_each_event) - z_score = np.asarray(df_hm[df_hm.columns[:-1]]).T - - if self.heatmap_y[0] == "All": - indices = np.arange(z_score.shape[0] - len(bin_cols)) - z_score = z_score[indices, :] - event_ts_for_each_event = np.arange(1, z_score.shape[0] + 1) - yticks = list(event_ts_for_each_event) - else: - remove_all = list(set(self.heatmap_y) - set(["All"])) - indices = sorted([int(s.split("-")[0]) - 1 for s in remove_all]) - z_score = z_score[indices, :] - event_ts_for_each_event = np.arange(1, z_score.shape[0] + 1) - yticks = list(event_ts_for_each_event) - - clim = (np.nanmin(z_score), np.nanmax(z_score)) - font_size = {"labels": 16, "yticks": 6} - - if event_ts_for_each_event.shape[0] == 1: - dummy_image = hv.QuadMesh((time, event_ts_for_each_event, z_score)).opts(colorbar=True, clim=clim) - image = ( - (dummy_image).opts( - opts.QuadMesh( - width=int(width), - height=int(height), - cmap=process_cmap(self.color_map, provider="matplotlib"), - colorbar=True, - ylabel="Trials", - xlabel="Time (s)", - fontsize=font_size, - yticks=yticks, - ) - ) - ).opts(shared_axes=False) - - save_opts = self.save_options_heatmap - op = make_dir(self.filepath) - op_filename = os.path.join(op, self.event_selector_heatmap + "_" + "heatmap") - self.results_hm["plot"] = image - self.results_hm["op"] = op_filename - # self.save_plots(image, save_opts, op_filename) - return image - else: - ropts = dict( - width=int(width), - height=int(height), - ylabel="Trials", - xlabel="Time (s)", - fontsize=font_size, - yticks=yticks, - invert_yaxis=True, - ) - dummy_image = hv.QuadMesh((time[0:100], event_ts_for_each_event, z_score[:, 0:100])).opts( - colorbar=True, cmap=process_cmap(self.color_map, provider="matplotlib"), clim=clim - ) - actual_image = hv.QuadMesh((time, event_ts_for_each_event, z_score)) - - dynspread_img = datashade(actual_image, cmap=process_cmap(self.color_map, provider="matplotlib")).opts( - **ropts - ) # clims=self.C_Limit, cnorm='log' - image = ((dummy_image * dynspread_img).opts(opts.QuadMesh(width=int(width), height=int(height)))).opts( - shared_axes=False - ) - - save_opts = self.save_options_heatmap - op = make_dir(self.filepath) - op_filename = os.path.join(op, self.event_selector_heatmap + "_" + "heatmap") - self.results_hm["plot"] = image - self.results_hm["op"] = op_filename - - return image diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 330b87b..33a712a 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -7,8 +7,8 @@ import numpy as np import pandas as pd -from .frontend.dashboard import VisualizationDashboard -from .frontend.visualization import Viewer, remove_cols +from .frontend.parameterized_plotter import ParameterizedPlotter, remove_cols +from .frontend.visualization_dashboard import VisualizationDashboard from .utils.utils import get_all_stores_for_combining_data, read_Df, takeOnlyDirs logger = logging.getLogger(__name__) @@ -115,7 +115,7 @@ def helper_plots(filepath, event, name, inputParameters): "{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(columns_dict[heatmap_options[0]])[:-2]) ] + ["All"] - viewer = Viewer( + plotter = ParameterizedPlotter( event_selector_objects=new_event, event_selector_heatmap_objects=heatmap_options, selector_for_multipe_events_plot_objects=multiple_plots_options, @@ -130,7 +130,7 @@ def helper_plots(filepath, event, name, inputParameters): heatmap_y_objects=trial_ts, psth_y_objects=trial_ts[:-1], ) - dashboard = VisualizationDashboard(viewer=viewer, basename=basename) + dashboard = VisualizationDashboard(plotter=plotter, basename=basename) dashboard.show() From 7d6db84ef06dfe53085687fed54f20f250c4df63 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 6 Feb 2026 10:43:48 -0800 Subject: [PATCH 52/53] Renamed some classes --- src/guppy/frontend/parameterized_plotter.py | 582 ++++++++++++++++++ src/guppy/frontend/visualization_dashboard.py | 158 +++++ 2 files changed, 740 insertions(+) create mode 100644 src/guppy/frontend/parameterized_plotter.py create mode 100644 src/guppy/frontend/visualization_dashboard.py diff --git a/src/guppy/frontend/parameterized_plotter.py b/src/guppy/frontend/parameterized_plotter.py new file mode 100644 index 0000000..2d7d2f1 --- /dev/null +++ b/src/guppy/frontend/parameterized_plotter.py @@ -0,0 +1,582 @@ +import logging +import math +import os +import re + +import datashader as ds +import holoviews as hv +import numpy as np +import pandas as pd +import panel as pn +import param +from bokeh.io import export_png, export_svgs +from holoviews import opts +from holoviews.operation.datashader import datashade +from holoviews.plotting.util import process_cmap + +pn.extension() + +logger = logging.getLogger(__name__) + + +# remove unnecessary column names +def remove_cols(cols): + regex = re.compile("bin_err_*") + remove_cols = [cols[i] for i in range(len(cols)) if regex.match(cols[i])] + remove_cols = remove_cols + ["err", "timestamps"] + cols = [i for i in cols if i not in remove_cols] + + return cols + + +# make a new directory for saving plots +def make_dir(filepath): + op = os.path.join(filepath, "saved_plots") + if not os.path.exists(op): + os.mkdir(op) + + return op + + +# create a class to make GUI and plot different graphs +class ParameterizedPlotter(param.Parameterized): + event_selector_objects = param.List(default=None) + event_selector_heatmap_objects = param.List(default=None) + selector_for_multipe_events_plot_objects = param.List(default=None) + color_map_objects = param.List(default=None) + x_objects = param.List(default=None) + y_objects = param.List(default=None) + heatmap_y_objects = param.List(default=None) + psth_y_objects = param.List(default=None) + + filepath = param.Path(default=None) + # create different options and selectors + event_selector = param.ObjectSelector(default=None) + event_selector_heatmap = param.ObjectSelector(default=None) + selector_for_multipe_events_plot = param.ListSelector(default=None) + columns_dict = param.Dict(default=None) + df_new = param.DataFrame(default=None) + x_min = param.Number(default=None) + x_max = param.Number(default=None) + select_trials_checkbox = param.ListSelector(default=["just trials"], objects=["mean", "just trials"]) + Y_Label = param.ObjectSelector(default="y", objects=["y", "z-score", "\u0394F/F"]) + save_options = param.ObjectSelector( + default="None", objects=["None", "save_png_format", "save_svg_format", "save_both_format"] + ) + save_options_heatmap = param.ObjectSelector( + default="None", objects=["None", "save_png_format", "save_svg_format", "save_both_format"] + ) + color_map = param.ObjectSelector(default="plasma") + height_heatmap = param.ObjectSelector(default=600, objects=list(np.arange(0, 5100, 100))[1:]) + width_heatmap = param.ObjectSelector(default=1000, objects=list(np.arange(0, 5100, 100))[1:]) + Height_Plot = param.ObjectSelector(default=300, objects=list(np.arange(0, 5100, 100))[1:]) + Width_Plot = param.ObjectSelector(default=1000, objects=list(np.arange(0, 5100, 100))[1:]) + save_hm = param.Action(lambda x: x.param.trigger("save_hm"), label="Save") + save_psth = param.Action(lambda x: x.param.trigger("save_psth"), label="Save") + X_Limit = param.Range(default=(-5, 10)) + Y_Limit = param.Range(bounds=(-50, 50.0)) + + x = param.ObjectSelector(default=None) + y = param.ObjectSelector(default=None) + heatmap_y = param.ListSelector(default=None) + psth_y = param.ListSelector(default=None) + results_hm = dict() + results_psth = dict() + + def __init__(self, **params): + super().__init__(**params) + # Bind selector objects from companion params + self.param.event_selector.objects = self.event_selector_objects + self.param.event_selector_heatmap.objects = self.event_selector_heatmap_objects + self.param.selector_for_multipe_events_plot.objects = self.selector_for_multipe_events_plot_objects + self.param.color_map.objects = self.color_map_objects + self.param.x.objects = self.x_objects + self.param.y.objects = self.y_objects + self.param.heatmap_y.objects = self.heatmap_y_objects + self.param.psth_y.objects = self.psth_y_objects + + # Set defaults + self.event_selector = self.event_selector_objects[0] + self.event_selector_heatmap = self.event_selector_heatmap_objects[0] + self.selector_for_multipe_events_plot = [self.selector_for_multipe_events_plot_objects[0]] + self.x = self.x_objects[0] + self.y = self.y_objects[-2] + self.heatmap_y = [self.heatmap_y_objects[-1]] + + self.param.X_Limit.bounds = (self.x_min, self.x_max) + + # function to save heatmaps when save button on heatmap tab is clicked + @param.depends("save_hm", watch=True) + def save_hm_plots(self): + plot = self.results_hm["plot"] + op = self.results_hm["op"] + save_opts = self.save_options_heatmap + logger.info(save_opts) + if save_opts == "save_svg_format": + p = hv.render(plot, backend="bokeh") + p.output_backend = "svg" + export_svgs(p, filename=op + ".svg") + elif save_opts == "save_png_format": + p = hv.render(plot, backend="bokeh") + export_png(p, filename=op + ".png") + elif save_opts == "save_both_format": + p = hv.render(plot, backend="bokeh") + p.output_backend = "svg" + export_svgs(p, filename=op + ".svg") + p_png = hv.render(plot, backend="bokeh") + export_png(p_png, filename=op + ".png") + else: + return 0 + + # function to save PSTH plots when save button on PSTH tab is clicked + @param.depends("save_psth", watch=True) + def save_psth_plot(self): + plot, op = [], [] + plot.append(self.results_psth["plot_combine"]) + op.append(self.results_psth["op_combine"]) + plot.append(self.results_psth["plot"]) + op.append(self.results_psth["op"]) + for i in range(len(plot)): + temp_plot, temp_op = plot[i], op[i] + save_opts = self.save_options + if save_opts == "save_svg_format": + p = hv.render(temp_plot, backend="bokeh") + p.output_backend = "svg" + export_svgs(p, filename=temp_op + ".svg") + elif save_opts == "save_png_format": + p = hv.render(temp_plot, backend="bokeh") + export_png(p, filename=temp_op + ".png") + elif save_opts == "save_both_format": + p = hv.render(temp_plot, backend="bokeh") + p.output_backend = "svg" + export_svgs(p, filename=temp_op + ".svg") + p_png = hv.render(temp_plot, backend="bokeh") + export_png(p_png, filename=temp_op + ".png") + else: + return 0 + + # function to change Y values based on event selection + @param.depends("event_selector", watch=True) + def _update_x_y(self): + x_value = self.columns_dict[self.event_selector] + y_value = self.columns_dict[self.event_selector] + self.param["x"].objects = [x_value[-4]] + self.param["y"].objects = remove_cols(y_value) + self.x = x_value[-4] + self.y = self.param["y"].objects[-2] + + @param.depends("event_selector_heatmap", watch=True) + def _update_df(self): + cols = self.columns_dict[self.event_selector_heatmap] + trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) + trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] + ["All"] + self.param["heatmap_y"].objects = trial_ts + self.heatmap_y = [trial_ts[-1]] + + @param.depends("event_selector", watch=True) + def _update_psth_y(self): + cols = self.columns_dict[self.event_selector] + trial_no = range(1, len(remove_cols(cols)[:-2]) + 1) + trial_ts = ["{} - {}".format(i, j) for i, j in zip(trial_no, remove_cols(cols)[:-2])] + self.param["psth_y"].objects = trial_ts + self.psth_y = [trial_ts[0]] + + # function to plot multiple PSTHs into one plot + + @param.depends( + "selector_for_multipe_events_plot", + "Y_Label", + "save_options", + "X_Limit", + "Y_Limit", + "Height_Plot", + "Width_Plot", + ) + def update_selector(self): + data_curve, cols_curve, data_spread, cols_spread = [], [], [], [] + arr = self.selector_for_multipe_events_plot + df1 = self.df_new + for i in range(len(arr)): + if "bin" in arr[i]: + split = arr[i].rsplit("_", 2) + df_name = split[0] #'{}_{}'.format(split[0], split[1]) + col_name_mean = "{}_{}".format(split[-2], split[-1]) + col_name_err = "{}_err_{}".format(split[-2], split[-1]) + data_curve.append(df1[df_name][col_name_mean]) + cols_curve.append(arr[i]) + data_spread.append(df1[df_name][col_name_err]) + cols_spread.append(arr[i]) + else: + data_curve.append(df1[arr[i]]["mean"]) + cols_curve.append(arr[i] + "_" + "mean") + data_spread.append(df1[arr[i]]["err"]) + cols_spread.append(arr[i] + "_" + "mean") + + if len(arr) > 0: + if self.Y_Limit == None: + self.Y_Limit = (np.nanmin(np.asarray(data_curve)) - 0.5, np.nanmax(np.asarray(data_curve)) + 0.5) + + if "bin" in arr[i]: + split = arr[i].rsplit("_", 2) + df_name = split[0] + data_curve.append(df1[df_name]["timestamps"]) + cols_curve.append("timestamps") + data_spread.append(df1[df_name]["timestamps"]) + cols_spread.append("timestamps") + else: + data_curve.append(df1[arr[i]]["timestamps"]) + cols_curve.append("timestamps") + data_spread.append(df1[arr[i]]["timestamps"]) + cols_spread.append("timestamps") + df_curve = pd.concat(data_curve, axis=1) + df_spread = pd.concat(data_spread, axis=1) + df_curve.columns = cols_curve + df_spread.columns = cols_spread + + ts = df_curve["timestamps"] + index = np.arange(0, ts.shape[0], 3) + df_curve = df_curve.loc[index, :] + df_spread = df_spread.loc[index, :] + overlay = hv.NdOverlay( + { + c: hv.Curve((df_curve["timestamps"], df_curve[c]), kdims=["Time (s)"]).opts( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + ) + for c in cols_curve[:-1] + } + ) + spread = hv.NdOverlay( + { + d: hv.Spread( + (df_spread["timestamps"], df_curve[d], df_spread[d], df_spread[d]), + vdims=["y", "yerrpos", "yerrneg"], + ).opts(line_width=0, fill_alpha=0.3) + for d in cols_spread[:-1] + } + ) + plot_combine = ((overlay * spread).opts(opts.NdOverlay(xlabel="Time (s)", ylabel=self.Y_Label))).opts( + shared_axes=False + ) + # plot_err = new_df.hvplot.area(x='timestamps', y=[], y2=[]) + save_opts = self.save_options + op = make_dir(self.filepath) + op_filename = os.path.join(op, str(arr) + "_mean") + + self.results_psth["plot_combine"] = plot_combine + self.results_psth["op_combine"] = op_filename + # self.save_plots(plot_combine, save_opts, op_filename) + return plot_combine + + # function to plot mean PSTH, single trial in PSTH and all the trials of PSTH with mean + @param.depends( + "event_selector", "x", "y", "Y_Label", "save_options", "Y_Limit", "X_Limit", "Height_Plot", "Width_Plot" + ) + def contPlot(self): + df1 = self.df_new[self.event_selector] + # height = self.Heigth_Plot + # width = self.Width_Plot + # logger.info(height, width) + if self.y == "All": + if self.Y_Limit == None: + self.Y_Limit = (np.nanmin(np.asarray(df1)) - 0.5, np.nanmax(np.asarray(df1)) - 0.5) + + options = self.param["y"].objects + regex = re.compile("bin_[(]") + remove_bin_trials = [options[i] for i in range(len(options)) if not regex.match(options[i])] + + ndoverlay = hv.NdOverlay({c: hv.Curve((df1[self.x], df1[c])) for c in remove_bin_trials[:-2]}) + img1 = datashade(ndoverlay, normalization="linear", aggregator=ds.count()) + x_points = df1[self.x] + y_points = df1["mean"] + img2 = hv.Curve((x_points, y_points)) + img = (img1 * img2).opts( + opts.Curve( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + line_width=4, + color="black", + xlim=self.X_Limit, + ylim=self.Y_Limit, + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + ) + + save_opts = self.save_options + + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector + "_" + self.y) + self.results_psth["plot"] = img + self.results_psth["op"] = op_filename + # self.save_plots(img, save_opts, op_filename) + + return img + + elif self.y == "mean" or "bin" in self.y: + + xpoints = df1[self.x] + ypoints = df1[self.y] + if self.y == "mean": + err = df1["err"] + else: + split = self.y.split("_") + err = df1["{}_err_{}".format(split[0], split[1])] + + index = np.arange(0, xpoints.shape[0], 3) + + if self.Y_Limit == None: + self.Y_Limit = (np.nanmin(ypoints) - 0.5, np.nanmax(ypoints) + 0.5) + + ropts_curve = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + color="blue", + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + ropts_spread = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + fill_alpha=0.3, + fill_color="blue", + line_width=0, + ) + + plot_curve = hv.Curve((xpoints[index], ypoints[index])) # .opts(**ropts_curve) + plot_spread = hv.Spread( + (xpoints[index], ypoints[index], err[index], err[index]) + ) # .opts(**ropts_spread) #vdims=['y', 'yerrpos', 'yerrneg'] + plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) + + save_opts = self.save_options + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector + "_" + self.y) + self.results_psth["plot"] = plot + self.results_psth["op"] = op_filename + # self.save_plots(plot, save_opts, op_filename) + + return plot + + else: + xpoints = df1[self.x] + ypoints = df1[self.y] + if self.Y_Limit == None: + self.Y_Limit = (np.nanmin(ypoints) - 0.5, np.nanmax(ypoints) + 0.5) + + ropts_curve = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + color="blue", + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + plot = hv.Curve((xpoints, ypoints)).opts({"Curve": ropts_curve}) + + save_opts = self.save_options + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector + "_" + self.y) + self.results_psth["plot"] = plot + self.results_psth["op"] = op_filename + # self.save_plots(plot, save_opts, op_filename) + + return plot + + # function to plot specific PSTH trials + @param.depends( + "event_selector", + "x", + "psth_y", + "select_trials_checkbox", + "Y_Label", + "save_options", + "Y_Limit", + "X_Limit", + "Height_Plot", + "Width_Plot", + ) + def plot_specific_trials(self): + df_psth = self.df_new[self.event_selector] + # if self.Y_Limit==None: + # self.Y_Limit = (np.nanmin(ypoints)-0.5, np.nanmax(ypoints)+0.5) + + if self.psth_y == None: + return None + else: + selected_trials = [s.split(" - ")[1] for s in list(self.psth_y)] + + index = np.arange(0, df_psth["timestamps"].shape[0], 3) + + if self.select_trials_checkbox == ["just trials"]: + overlay = hv.NdOverlay( + { + c: hv.Curve((df_psth["timestamps"][index], df_psth[c][index]), kdims=["Time (s)"]) + for c in selected_trials + } + ) + ropts = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + return overlay.opts(**ropts) + elif self.select_trials_checkbox == ["mean"]: + arr = np.asarray(df_psth[selected_trials]) + mean = np.nanmean(arr, axis=1) + err = np.nanstd(arr, axis=1) / math.sqrt(arr.shape[1]) + ropts_curve = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + color="blue", + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + ropts_spread = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + fill_alpha=0.3, + fill_color="blue", + line_width=0, + ) + plot_curve = hv.Curve((df_psth["timestamps"][index], mean[index])) + plot_spread = hv.Spread((df_psth["timestamps"][index], mean[index], err[index], err[index])) + plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) + return plot + elif self.select_trials_checkbox == ["mean", "just trials"]: + overlay = hv.NdOverlay( + { + c: hv.Curve((df_psth["timestamps"][index], df_psth[c][index]), kdims=["Time (s)"]) + for c in selected_trials + } + ) + ropts_overlay = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + + arr = np.asarray(df_psth[selected_trials]) + mean = np.nanmean(arr, axis=1) + err = np.nanstd(arr, axis=1) / math.sqrt(arr.shape[1]) + ropts_curve = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + xlim=self.X_Limit, + ylim=self.Y_Limit, + color="black", + xlabel="Time (s)", + ylabel=self.Y_Label, + ) + ropts_spread = dict( + width=int(self.Width_Plot), + height=int(self.Height_Plot), + fill_alpha=0.3, + fill_color="black", + line_width=0, + ) + plot_curve = hv.Curve((df_psth["timestamps"][index], mean[index])) + plot_spread = hv.Spread((df_psth["timestamps"][index], mean[index], err[index], err[index])) + + plot = (plot_curve * plot_spread).opts({"Curve": ropts_curve, "Spread": ropts_spread}) + return overlay.opts(**ropts_overlay) * plot + + # function to show heatmaps for each event + @param.depends("event_selector_heatmap", "color_map", "height_heatmap", "width_heatmap", "heatmap_y") + def heatmap(self): + height = self.height_heatmap + width = self.width_heatmap + df_hm = self.df_new[self.event_selector_heatmap] + cols = list(df_hm.columns) + regex = re.compile("bin_err_*") + drop_cols = [cols[i] for i in range(len(cols)) if regex.match(cols[i])] + drop_cols = ["err", "mean"] + drop_cols + df_hm = df_hm.drop(drop_cols, axis=1) + cols = list(df_hm.columns) + bin_cols = [cols[i] for i in range(len(cols)) if re.compile("bin_*").match(cols[i])] + time = np.asarray(df_hm["timestamps"]) + event_ts_for_each_event = np.arange(1, len(df_hm.columns[:-1]) + 1) + yticks = list(event_ts_for_each_event) + z_score = np.asarray(df_hm[df_hm.columns[:-1]]).T + + if self.heatmap_y[0] == "All": + indices = np.arange(z_score.shape[0] - len(bin_cols)) + z_score = z_score[indices, :] + event_ts_for_each_event = np.arange(1, z_score.shape[0] + 1) + yticks = list(event_ts_for_each_event) + else: + remove_all = list(set(self.heatmap_y) - set(["All"])) + indices = sorted([int(s.split("-")[0]) - 1 for s in remove_all]) + z_score = z_score[indices, :] + event_ts_for_each_event = np.arange(1, z_score.shape[0] + 1) + yticks = list(event_ts_for_each_event) + + clim = (np.nanmin(z_score), np.nanmax(z_score)) + font_size = {"labels": 16, "yticks": 6} + + if event_ts_for_each_event.shape[0] == 1: + dummy_image = hv.QuadMesh((time, event_ts_for_each_event, z_score)).opts(colorbar=True, clim=clim) + image = ( + (dummy_image).opts( + opts.QuadMesh( + width=int(width), + height=int(height), + cmap=process_cmap(self.color_map, provider="matplotlib"), + colorbar=True, + ylabel="Trials", + xlabel="Time (s)", + fontsize=font_size, + yticks=yticks, + ) + ) + ).opts(shared_axes=False) + + save_opts = self.save_options_heatmap + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector_heatmap + "_" + "heatmap") + self.results_hm["plot"] = image + self.results_hm["op"] = op_filename + # self.save_plots(image, save_opts, op_filename) + return image + else: + ropts = dict( + width=int(width), + height=int(height), + ylabel="Trials", + xlabel="Time (s)", + fontsize=font_size, + yticks=yticks, + invert_yaxis=True, + ) + dummy_image = hv.QuadMesh((time[0:100], event_ts_for_each_event, z_score[:, 0:100])).opts( + colorbar=True, cmap=process_cmap(self.color_map, provider="matplotlib"), clim=clim + ) + actual_image = hv.QuadMesh((time, event_ts_for_each_event, z_score)) + + dynspread_img = datashade(actual_image, cmap=process_cmap(self.color_map, provider="matplotlib")).opts( + **ropts + ) # clims=self.C_Limit, cnorm='log' + image = ((dummy_image * dynspread_img).opts(opts.QuadMesh(width=int(width), height=int(height)))).opts( + shared_axes=False + ) + + save_opts = self.save_options_heatmap + op = make_dir(self.filepath) + op_filename = os.path.join(op, self.event_selector_heatmap + "_" + "heatmap") + self.results_hm["plot"] = image + self.results_hm["op"] = op_filename + + return image diff --git a/src/guppy/frontend/visualization_dashboard.py b/src/guppy/frontend/visualization_dashboard.py new file mode 100644 index 0000000..1444b86 --- /dev/null +++ b/src/guppy/frontend/visualization_dashboard.py @@ -0,0 +1,158 @@ +import logging + +import panel as pn + +from .frontend_utils import scanPortsAndFind + +pn.extension() + +logger = logging.getLogger(__name__) + + +class VisualizationDashboard: + """Dashboard for interactive PSTH and heatmap visualization. + + Wraps a ``Viewer`` instance with Panel widgets and a tabbed layout. + Data loading, preparation, and Viewer instantiation are handled + externally; this class is responsible for widget creation, layout + assembly, and serving the application. + + Parameters + ---------- + plotter : ParameterizedPlotter + A fully configured ParameterizedPlotter instance that provides reactive plot + methods and param-based controls. + basename : str + Session name displayed as the tab title. + """ + + def __init__(self, *, plotter, basename): + self.plotter = plotter + self.basename = basename + self._psth_tab = self._build_psth_tab() + self._heatmap_tab = self._build_heatmap_tab() + + def _build_psth_tab(self): + """Build the PSTH tab with controls and plot panels.""" + psth_checkbox = pn.Param( + self.plotter.param.select_trials_checkbox, + widgets={ + "select_trials_checkbox": { + "type": pn.widgets.CheckBoxGroup, + "inline": True, + "name": "Select mean and/or just trials", + } + }, + ) + parameters = pn.Param( + self.plotter.param.selector_for_multipe_events_plot, + widgets={ + "selector_for_multipe_events_plot": {"type": pn.widgets.CrossSelector, "width": 550, "align": "start"} + }, + ) + psth_y_parameters = pn.Param( + self.plotter.param.psth_y, + widgets={ + "psth_y": { + "type": pn.widgets.MultiSelect, + "name": "Trial # - Timestamps", + "width": 200, + "size": 15, + "align": "start", + } + }, + ) + + event_selector = pn.Param( + self.plotter.param.event_selector, widgets={"event_selector": {"type": pn.widgets.Select, "width": 400}} + ) + x_selector = pn.Param(self.plotter.param.x, widgets={"x": {"type": pn.widgets.Select, "width": 180}}) + y_selector = pn.Param(self.plotter.param.y, widgets={"y": {"type": pn.widgets.Select, "width": 180}}) + + width_plot = pn.Param( + self.plotter.param.Width_Plot, widgets={"Width_Plot": {"type": pn.widgets.Select, "width": 70}} + ) + height_plot = pn.Param( + self.plotter.param.Height_Plot, widgets={"Height_Plot": {"type": pn.widgets.Select, "width": 70}} + ) + ylabel = pn.Param(self.plotter.param.Y_Label, widgets={"Y_Label": {"type": pn.widgets.Select, "width": 70}}) + save_opts = pn.Param( + self.plotter.param.save_options, widgets={"save_options": {"type": pn.widgets.Select, "width": 70}} + ) + + xlimit_plot = pn.Param( + self.plotter.param.X_Limit, widgets={"X_Limit": {"type": pn.widgets.RangeSlider, "width": 180}} + ) + ylimit_plot = pn.Param( + self.plotter.param.Y_Limit, widgets={"Y_Limit": {"type": pn.widgets.RangeSlider, "width": 180}} + ) + save_psth = pn.Param( + self.plotter.param.save_psth, widgets={"save_psth": {"type": pn.widgets.Button, "width": 400}} + ) + + options = pn.Column( + event_selector, + pn.Row(x_selector, y_selector), + pn.Row(xlimit_plot, ylimit_plot), + pn.Row(width_plot, height_plot, ylabel, save_opts), + save_psth, + ) + + options_selectors = pn.Row(options, parameters) + + return pn.Column( + "## " + self.basename, + pn.Row(options_selectors, pn.Column(psth_checkbox, psth_y_parameters), width=1200), + self.plotter.contPlot, + self.plotter.update_selector, + self.plotter.plot_specific_trials, + ) + + def _build_heatmap_tab(self): + """Build the heatmap tab with controls and plot panels.""" + heatmap_y_parameters = pn.Param( + self.plotter.param.heatmap_y, + widgets={ + "heatmap_y": {"type": pn.widgets.MultiSelect, "name": "Trial # - Timestamps", "width": 200, "size": 30} + }, + ) + event_selector_heatmap = pn.Param( + self.plotter.param.event_selector_heatmap, + widgets={"event_selector_heatmap": {"type": pn.widgets.Select, "width": 150}}, + ) + color_map = pn.Param( + self.plotter.param.color_map, widgets={"color_map": {"type": pn.widgets.Select, "width": 150}} + ) + width_heatmap = pn.Param( + self.plotter.param.width_heatmap, widgets={"width_heatmap": {"type": pn.widgets.Select, "width": 150}} + ) + height_heatmap = pn.Param( + self.plotter.param.height_heatmap, widgets={"height_heatmap": {"type": pn.widgets.Select, "width": 150}} + ) + save_hm = pn.Param(self.plotter.param.save_hm, widgets={"save_hm": {"type": pn.widgets.Button, "width": 150}}) + save_options_heatmap = pn.Param( + self.plotter.param.save_options_heatmap, + widgets={"save_options_heatmap": {"type": pn.widgets.Select, "width": 150}}, + ) + + return pn.Column( + "## " + self.basename, + pn.Row( + event_selector_heatmap, + color_map, + width_heatmap, + height_heatmap, + save_options_heatmap, + pn.Column(pn.Spacer(height=25), save_hm), + ), + pn.Row(self.plotter.heatmap, heatmap_y_parameters), + ) + + def show(self): + """Serve the dashboard in a browser on an available port.""" + logger.info("app") + template = pn.template.MaterialTemplate(title="Visualization GUI") + number = scanPortsAndFind(start_port=5000, end_port=5200) + app = pn.Tabs(("PSTH", self._psth_tab), ("Heat Map", self._heatmap_tab)) + template.main.append(app) + template.show(port=number) From 2733c54a2458771dda82929dd2835d3c813d11e5 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 6 Feb 2026 10:48:32 -0800 Subject: [PATCH 53/53] Move to visualization, step 6, to orchestration layer. --- src/guppy/orchestration/home.py | 2 +- src/guppy/{visualizePlot.py => orchestration/visualize.py} | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) rename src/guppy/{visualizePlot.py => orchestration/visualize.py} (97%) diff --git a/src/guppy/orchestration/home.py b/src/guppy/orchestration/home.py index 3a7f8ee..9b7e6c5 100644 --- a/src/guppy/orchestration/home.py +++ b/src/guppy/orchestration/home.py @@ -9,11 +9,11 @@ from .save_parameters import save_parameters from .storenames import orchestrate_storenames_page +from .visualize import visualizeResults from ..frontend.input_parameters import ParameterForm from ..frontend.path_selection import get_folder_path from ..frontend.progress import readPBIncrementValues from ..frontend.sidebar import Sidebar -from ..visualizePlot import visualizeResults logger = logging.getLogger(__name__) diff --git a/src/guppy/visualizePlot.py b/src/guppy/orchestration/visualize.py similarity index 97% rename from src/guppy/visualizePlot.py rename to src/guppy/orchestration/visualize.py index 33a712a..a4149f9 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/orchestration/visualize.py @@ -7,9 +7,9 @@ import numpy as np import pandas as pd -from .frontend.parameterized_plotter import ParameterizedPlotter, remove_cols -from .frontend.visualization_dashboard import VisualizationDashboard -from .utils.utils import get_all_stores_for_combining_data, read_Df, takeOnlyDirs +from ..frontend.parameterized_plotter import ParameterizedPlotter, remove_cols +from ..frontend.visualization_dashboard import VisualizationDashboard +from ..utils.utils import get_all_stores_for_combining_data, read_Df, takeOnlyDirs logger = logging.getLogger(__name__)