From 305275dc3d6c77840791e24f282f55dbca430149 Mon Sep 17 00:00:00 2001 From: Paul Lizer Date: Sat, 17 Jan 2026 17:44:32 -0500 Subject: [PATCH 1/2] Updated branch flow --- .github/workflows/enforce-branch-flow.yml | 31 +++++++++++++++++++++++ .github/workflows/enforce-dev-to-main.yml | 20 --------------- 2 files changed, 31 insertions(+), 20 deletions(-) create mode 100644 .github/workflows/enforce-branch-flow.yml delete mode 100644 .github/workflows/enforce-dev-to-main.yml diff --git a/.github/workflows/enforce-branch-flow.yml b/.github/workflows/enforce-branch-flow.yml new file mode 100644 index 00000000..1f81da24 --- /dev/null +++ b/.github/workflows/enforce-branch-flow.yml @@ -0,0 +1,31 @@ +name: Enforce Branch Protection Flow (Development → Staging → Main) + +on: + pull_request: + types: + - opened + - reopened + - synchronize + +jobs: + enforce-branch-flow: + runs-on: ubuntu-latest + steps: + - name: Fail if PR→staging doesn't come from development + if: > + github.event.pull_request.base.ref == 'staging' && + github.event.pull_request.head.ref != 'development' + run: | + echo "::error ::Pull requests into 'staging' must originate from branch 'development'." + exit 1 + + - name: Fail if PR→main doesn't come from staging + if: > + github.event.pull_request.base.ref == 'main' && + github.event.pull_request.head.ref != 'staging' + run: | + echo "::error ::Pull requests into 'main' must originate from branch 'staging'." + exit 1 + + - name: Branch flow validated + run: echo "✅ Branch flow validation passed." diff --git a/.github/workflows/enforce-dev-to-main.yml b/.github/workflows/enforce-dev-to-main.yml deleted file mode 100644 index dda5da8d..00000000 --- a/.github/workflows/enforce-dev-to-main.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Enforce PRs to main only from development - -on: - pull_request: - types: - - opened - - reopened - - synchronize - -jobs: - require-dev-base: - runs-on: ubuntu-latest - steps: - - name: Fail if PR→main doesn’t come from development - if: > - github.event.pull_request.base.ref == 'main' && - github.event.pull_request.head.ref != 'development' - run: | - echo "::error ::Pull requests into 'main' must originate from branch 'development'." - exit 1 From 875c338b4de9c71073ab67ece6177a59f46b8024 Mon Sep 17 00:00:00 2001 From: Paul Lizer Date: Tue, 20 Jan 2026 13:23:25 -0500 Subject: [PATCH 2/2] v0.235.025 (#609) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * creating workflows * fraud analyssis * support agents * update * fix * updated demo * Swagger lite (#469) * Development (#467) * upgrade to v0.229.060 (#459) * Update release notes to show support for GPT-5 * Documented support for gpt-image-1 * Update config.py * remove documentation folder * Documentation and message table support (#444) * Develop demo docs and import markdown table support * fixed enhanced citations for groups and public workspaces * Updated to support showing public workspaces in scope * Update config.py * fix docs * Updated RELEASE_NOTES * docs demos for public workspaces * V0.229 bug fixes (v0.229.019) (#448) * Development (#445) * Update release notes to show support for GPT-5 * Documented support for gpt-image-1 * Update config.py * remove documentation folder * Documentation and message table support (#444) * Develop demo docs and import markdown table support * fixed enhanced citations for groups and public workspaces * Updated to support showing public workspaces in scope * Update config.py * fix docs * Updated RELEASE_NOTES * video indexer config details, doc intel test button fix, move multimedia configs to search and extract * improved header security * updated versions * moved * Update EXTERNAL_HEALTH_CHECK_DUPLICATION_FIX.md * added pdfs * v0.229.019 bug fixes upgrade to v0.229.058 (#452) * all urls in chat open in new tabs * consolidated admin settings for improved navigation * added left hand nav admin settings menus * added left hand menu options for workspaces * Added debug logging to video indexer processes * readme and functional test * Workspace Scope in Chat affects Prompts * Create WORKSPACE_SCOPE_PROMPTS_FIX.md * time based turn off for debug and file process logging * improve saving in admin settings * update to v0.229.058 * Update RELEASE_NOTES.md * Update RELEASE_NOTES.md * Popup modal for Health Check config * Added Health Check config guide * Chat page top nav bug (#458) * initial fix * fixed top nav chat up bug * notes for v0.229.060 * file location fix * Update config.py * Update RELEASE_NOTES.md * moved to correct location * Fixed enhanced citations CSP bug Simple Chat implemented improved security which negatively impacted enhanced citations. * Updated release notes * updated version and tests * swagger support for all endpoints and added swagger search * added wide screen support for chats when collapsing side bar * v0.230.001 features * adding support for xlsm, Macro Excel files. * moved into features * initial * added readme * removed html code * Update config.py (#477) Updated else if for AUTHORITY * Initial Setup for Pages documentation (#479) * setup folders and base files * setting up files * architecture diagrams * updated to libdoc * libdoc updates * updating side bar * removed loops * editing side bar * Created Simple Chat Jekyll theme * Update config.py (#477) (#478) Updated else if for AUTHORITY Co-authored-by: Patrick C Davis <82388365+Patrick-Davis-MSFT@users.noreply.github.com> * Updating architectures * Update README.md --------- Co-authored-by: Patrick C Davis <82388365+Patrick-Davis-MSFT@users.noreply.github.com> * initial * added to base * adding real data endpoints * Update route_backend_control_center.py * added individual charts * fix for bug 485 * added document metrics * added links to control center * debug * added date * fixed bugs due to branch descrepancies * added Azure SQL Driver Docker File * added documentation for docker_fileSession updates * Redis Managed Identity Azure Government Support Changes * Stop tracking ignored folders * updated gitignore * added sort by to table for user management * storage account size processing * Front end now shows storage account sizing * export user management list to csv * adding group management * fixing swagger generation * fix * Added inline dynamic property generation * added YAML support * Improved muiltform vs app/json detection * added Control Center Admin role ControlCenterAdmin * ai search sizing is working for groups * group refresh fixed * added group data fix * group table refresh * updated export to include group docs * adding public workspace management * removed sample data and consolidated row generators * Changed both caching helper functions to use the existing update_document() function from functions_documents.py instead of direct upsert. * removed workflow, will work on that in different branch * Document Set Fingerprinting, Scope-Aware Cache Key Generation, Event-Based Invalidation I've successfully implemented Document Set Fingerprint + Event-Based Cache Invalidation with deterministic sorting and Score Normalization. * added debug logging * setup cache feature and ttl time to admin app settings * removed cosmos level ttl * Keyvault for secrets (#492) * add crude keyvault base impl * upd actions for MAG * add settings to fix * upd secret naming convention * upd auth types to include conn string/basic(un/pw) * fix method name * add get agent helper * add ui trigger word and get agent helper * upd function imports * upd agents call * add desc of plugins * fix for admin modal loading * upd default agent handling * rmv unneeded file * rmv extra imp statements * add new cosmos container script * upd instructions for consistency of code * adds safe calls for akv functions * adds akv to personal agents * fix for user agents boot issue * fix global set * upd azure function plugin to super init * upd to clean imports * add keyvault to global actions loading * add plugin loading docs * rmv secret leak via logging * rmv displaying of token in logs * fix not loading global actions for personal agents * rmv unsupported characters from logging * fix chat links in dark mode * chg order of css for links in dark mode * fix chat color * add default plugin print logging * rmv default check for nonsql plugins * upd requirements * add keyvault and dynamic addsetting ui * fix for agents/plugins with invalid akv chars * add imp to appins logging * add security tab UI + key vault UI * add keyvault settings * fix for copilot findings. * fix for resaving plugin without changing secret --------- Co-authored-by: Bionic711 * Feature/remove abp for pr (#510) * add crude keyvault base impl * upd secret naming convention * upd auth types to include conn string/basic(un/pw) * add ui trigger word and get agent helper * adds safe calls for akv functions * add keyvault to global actions loading * rmv secret leak via logging * fix chat links in dark mode * chg order of css for links in dark mode * fix chat color * add keyvault and dynamic addsetting ui * fix for agents/plugins with invalid akv chars * add security tab UI + key vault UI * fix for resaving plugin without changing secret * init azure billing plugin * add app settings cache * upd to azure billing plugin * upd to msgraph plugin * init community customizations * add module * add key vault config modal * add logging and functions to math * rmv extra telemetry, add appcache * upd billing plugin * add/upd key vault, admin settings, agents, max tokens * Remove abp for pr * disable static logging for development * rmv dup import * add note on pass * added notes * rmv dup decl * add semicolon * rmv unused variable add agent name to log * add actions migration back in * add notes and copilot fixes --------- Co-authored-by: Bionic711 * Feature/group agents actions (#521) * add crude keyvault base impl * upd actions for MAG * add settings to fix * upd secret naming convention * upd auth types to include conn string/basic(un/pw) * fix method name * add get agent helper * add ui trigger word and get agent helper * upd function imports * upd agents call * add desc of plugins * fix for admin modal loading * upd default agent handling * rmv unneeded file * rmv extra imp statements * add new cosmos container script * upd instructions for consistency of code * adds safe calls for akv functions * adds akv to personal agents * fix for user agents boot issue * fix global set * upd azure function plugin to super init * upd to clean imports * add keyvault to global actions loading * add plugin loading docs * rmv secret leak via logging * rmv displaying of token in logs * fix not loading global actions for personal agents * rmv unsupported characters from logging * fix chat links in dark mode * chg order of css for links in dark mode * fix chat color * add default plugin print logging * rmv default check for nonsql plugins * upd requirements * add keyvault and dynamic addsetting ui * fix for agents/plugins with invalid akv chars * add imp to appins logging * add security tab UI + key vault UI * add keyvault settings * fix for copilot findings. * fix for resaving plugin without changing secret * init azure billing plugin * add app settings cache * upd to azure billing plugin * upd to msgraph plugin * init community customizations * add module * add key vault config modal * add logging and functions to math * rmv extra telemetry, add appcache * upd billing plugin * add/upd key vault, admin settings, agents, max tokens * Remove abp for pr * disable static logging for development * rmv dup import * add note on pass * added notes * rmv dup decl * add semicolon * rmv unused variable add agent name to log * add actions migration back in * add notes and copilot fixes * add group agents/actions * add branch for testing/rmv old branch * bug fixes, group agent modifications, rmv client validation * rmv ajv * upd from copilot --------- Co-authored-by: Bionic711 * Add cosmos activity logs container configuration * incorporate branch updates Add 372 fix 489 * Support deployment via AZD UP (#530) * Update devcontainer configuration for support of AZD * Move to module based bicep files * Add Azure deployment configuration and update Bicep modules for service outputs * Enhance Azure deployment process by adding predeploy hooks for Docker image management and updating Bicep modules to include managed identity client ID and container registry outputs. * Add deployment script for creating and storing Azure AD client secret in Key Vault * Update Azure Dev CLI feature version to latest in devcontainer configuration * Remove deprecated Bicep files and parameter configurations for cleaner deployment structure * Refactor Bicep modules for improved diagnostics and role assignments - Updated appService.bicep to conditionally import diagnostic settings based on enableDiagLogging parameter. - Changed Azure Cosmos DB authentication type to managed identity and removed key-based authentication settings. - Enhanced appServiceAuthentication.bicep by removing unnecessary parameters and configuring Key Vault reference for client secret. - Modified appServicePlan.bicep to conditionally import diagnostic settings. - Refactored azureContainerRegistry-existing.bicep to deploy role assignment to the ACR's resource group. - Updated azureContainerRegistry.bicep to conditionally import diagnostic settings. - Enhanced contentSafety.bicep with conditional diagnostic settings import. - Updated cosmosDb.bicep to include a new database and container, and added role assignments for managed identity. - Refactored documentIntelligence.bicep to conditionally import diagnostic settings. - Enhanced enterpriseApplication.bicep by adding additional required resource access scopes. - Updated keyVault.bicep to conditionally import diagnostic settings and adjusted enterprise app parameters. - Refactored openAI.bicep to conditionally import diagnostic settings. - Enhanced redisCache.bicep with conditional diagnostic settings import. - Updated search.bicep to conditionally import diagnostic settings. - Refactored speechService.bicep to conditionally import diagnostic settings. - Enhanced storageAccount.bicep with conditional diagnostic settings import. - Added main.parameters.json for parameter management. - Introduced azureContainerRegistry-roleAssignment.bicep for managing ACR role assignments. * Add custom subdomain names for document intelligence, OpenAI, and speech services * Fix casing for hostingMode property in search service configuration * Enhance storage account configuration by enabling hierarchical namespace and setting public access to 'None' for document containers * Add enterprise app permissions module for resource access management * Fixed ExternalApi configuration to valid guid and set value to a unique name * Add Init Script to Configure Entra Application * Fix spelling error * fix failure in hostingMode value * configure managed identity for contentSafety * update readme to support new AZD deployment solution * Video Indexer, Multi-Modal Enhancements, Scope Bug ## PR Summary: Video Indexer Multi-Modal Enhancements ### Overview This PR introduces significant enhancements to video processing and image analysis capabilities, focusing on multi-modal AI features and improved metadata handling. **Version updated from 0.233.167 to 0.233.172**. ### 🎯 Key Features #### 1. **Multi-Modal Vision Analysis for Images** - Added AI-powered vision analysis for uploaded images using GPT-4 Vision or similar models - Extracts comprehensive image insights including: - AI-generated descriptions - Object detection - Text extraction from images (OCR) - Detailed visual analysis - New admin setting: `enable_multimodal_vision` to control feature availability - Vision analysis results stored in document metadata and included in AI Search indexing - Connection testing endpoint added for vision model validation #### 2. **Enhanced Document Metadata Citations** - Implemented metadata-based citations that surface document keywords, abstracts, and vision analysis - New citation types displayed with distinct visual indicators: - **Keywords**: Tagged with `bi-tags` icon, labeled as "Metadata" - **Abstract**: Document summaries included as contextual citations - **Vision Analysis**: AI-generated image insights labeled as "AI Vision" - Metadata content passed to AI models as additional context for more informed responses - Special modal view for metadata citations (separate from standard document citations) #### 3. **Image Message UI Improvements** - Enhanced display for user-uploaded images vs AI-generated images - Added "View Text" button for uploaded images with extracted content or vision analysis - Collapsible info sections showing: - Extracted OCR text from Document Intelligence - AI Vision Analysis results - Proper avatar distinction between uploaded and generated images - Improved metadata tracking with `is_user_upload` flag #### 4. **Video Indexer Configuration Updates** - **BREAKING CHANGE**: Removed API key authentication support - Now exclusively uses **Managed Identity authentication** for Video Indexer - Updated admin UI documentation to guide managed identity setup: - Enable system-assigned managed identity on App Service - Assign "Video Indexer Restricted Viewer" role - Configure required ARM settings (subscription ID, resource group, account name) - Improved validation for required Video Indexer settings - Enhanced error messaging for missing configuration #### 5. **Search Scope Improvements** - Fixed search behavior when `document_scope='all'` to properly include group documents - Added `active_group_id` to search context when document scope is 'all' and groups are enabled - Conditional group index searching - only queries group index when `active_group_id` is present - Prevents unnecessary searches and potential errors when groups aren't in use #### 6. **Image Context in Conversation History** - Enhanced conversation history to include rich image context for AI models - Extracts and includes: - OCR text from Document Intelligence (up to max content length) - AI Vision analysis (description, objects, text) - Structured prompt formatting for multimodal understanding - **Important**: Base64 image data excluded from conversation history to prevent token overflow - Only metadata and extracted insights passed to models for efficient token usage ### 🔧 Technical Improvements #### Backend Changes - **route_backend_chats.py**: - Added metadata citation extraction logic (~150 lines) - Enhanced conversation history building for image uploads - Improved search argument handling for group contexts - **functions_documents.py**: - New `analyze_image_with_vision_model()` function for AI vision analysis - Enhanced `get_document_metadata_for_citations()` integration - Vision analysis now runs BEFORE chunk saving to include insights in AI Search indexing - Removed redundant blob storage for vision JSON (stored in document metadata) - **route_backend_settings.py**: - New `_test_multimodal_vision_connection()` endpoint for testing vision models - Supports both APIM and direct Azure OpenAI endpoints - Test uses 1x1 pixel sample image for validation - **functions_search.py**: - Added conditional logic for group search execution - Prevents empty `active_group_id` from causing search errors #### Frontend Changes - **chat-messages.js** (~275 lines changed): - Enhanced `appendMessage()` to handle uploaded image metadata - New `toggleImageInfo()` functionality for expandable image details - Improved citation rendering with metadata type indicators - Debug logging for image message processing - **chat-citations.js** (~70 lines added): - New `showMetadataModal()` function for displaying keywords/abstracts/vision analysis - Enhanced citation click handling to detect metadata citations - Separate modal styling and behavior for metadata vs document citations - **admin_settings.html**: - Complete redesign of Video Indexer configuration section - Removed all API key references - Added managed identity setup instructions with step-by-step guidance - Updated configuration display to show resource group and subscription ID - **_video_indexer_info.html**: - Updated modal content to clarify managed identity requirement - Added warning banner about authentication type - Enhanced configuration display with ARM resource details ### 📊 Files Changed - **16 files** modified - **+1,063 insertions**, **-412 deletions** - Net change: **+651 lines** ### 🧪 Testing Considerations - Test multi-modal vision analysis with various image types - Validate metadata citations appear correctly in chat responses - Verify Video Indexer works with managed identity authentication - Test search scope behavior with and without groups enabled - Validate image upload UI shows extracted text and vision analysis - Confirm conversation history properly handles image context without token overflow ### 🔐 Security & Performance - Managed identity authentication improves security posture (no stored API keys) - Image base64 data excluded from conversation history prevents token exhaustion - Metadata citations add minimal overhead while providing rich context - Vision analysis runs efficiently during document processing pipeline ### 📝 Configuration Required Admins must configure: 1. Enable `enable_multimodal_vision` in admin settings 2. Select vision-capable model (e.g., `gpt-4o`, `gpt-4-vision-preview`) 3. For Video Indexer: Configure managed identity and ARM resource details 4. Enable `enable_extract_meta_data` to surface metadata citations --- This PR significantly enhances the application's multi-modal capabilities, providing users with richer context from images and documents while maintaining efficient token usage and robust security practices. * Conversation Management Features (#532) New Features 1. Pin Conversations Users can pin important conversations to keep them at the top of the list Pinned conversations display a pin icon (📌) in the conversation header and details modal Pin icon appears before the conversation title Bulk pin/unpin operations available in multi-select mode Pinned conversations always appear first, sorted by most recent activity 2. Hide Conversations Users can hide conversations to declutter their workspace without deleting them Hidden conversations display an eye-slash icon (👁️‍🗨️) in the conversation header and details modal Eye-slash icon appears next to the pin icon (if pinned) Bulk hide/unhide operations available in multi-select mode Toggle visibility of hidden conversations using the eye icon in the sidebar 3. Two-Tier Conversation Search Quick Search (Sidebar) Instant title-based filtering of conversations Search icon in sidebar activates inline search input Real-time filtering as you type Clear button to reset search Expand button to open advanced search modal Advanced Search (Modal) Full-text search across all message content Multiple filter options: Date range (from/to) Chat type (personal/group/public) Classifications (multi-select) Has uploaded files Has generated images Pagination (20 results per page) Message snippets with highlighted search terms (50 chars before/after match) Click to navigate directly to specific messages Search history tracking (last 20 searches) Clickable search history to repeat searches 4. Message Highlighting & Navigation Search results highlight matched text in yellow (amber in dark mode) Smooth scroll animation to navigate to specific messages Pulse animation draws attention to the target message Highlights persist for 30 seconds before auto-clearing Works across conversation switches 5. Multi-Select Mode Select multiple conversations for bulk operations Visual checkboxes appear when entering selection mode Bulk actions available: Pin/unpin selected conversations Hide/unhide selected conversations Delete selected conversations Selection mode accessible from conversation dropdown menu Auto-exit after 30 seconds of inactivity 6. Enhanced Conversation Details Modal Displays pin icon if conversation is pinned Displays eye-slash icon if conversation is hidden Shows both icons at the top of the modal (next to title) Status section shows visual badges for pinned/hidden state Comprehensive metadata display Technical Implementation Frontend Changes chat-conversations.js: Core conversation management, quick search, pin/hide functionality chat-search-modal.js (NEW): Advanced search modal implementation chat-sidebar-conversations.js: Sidebar search synchronization, hidden conversation handling chat-messages.js: Message highlighting, smooth scroll, search highlight persistence chat-conversation-details.js: Updated to show pin/hidden icons in modal chats.css: Styles for search highlights and message pulse animations HTML Templates: Added search modal, updated navigation icons Backend Changes route_backend_conversations.py: /api/search_conversations - Full-text search with filters and pagination /api/conversations/classifications - Get unique classification values /api/user-settings/search-history - GET/POST/DELETE endpoints for search history /api/conversations/{id}/pin - Toggle pin status /api/conversations/{id}/hide - Toggle hide status Bulk operations for pin/hide/delete functions_settings.py: Search history management functions * Message management (#553) * added message masking mask selected content of message or an entire message * fixed citation border * enabled streaming * image gen with streaming * added reasoning support * added reasoning to agents * agent support * fixed key bug * disable group create and fixed model fetch * updated config * fixed support for workspace search for streaming * fix bug with sidebar update * fixed gpt-5 vision processing bug * metadata works with all messages now * fixed debug_print bug * added reasoning effort to agents and fixed agent validation * fixed file metadata loading bug * fixed llm streaming when working with group workspace data * fixed cosmos container config error * added delete message and fixed message threading * retry bug fixes * fixed message threading order * moved message buttons to menu * fixed bug for conversation history that included inactive threads * added css styling for urls for dark mode * fixed bug with newly created messages not showing metadata or deleting * improved search times by 100x * added token collect to messages supports models and agents * added streaming for agents along with token collection * added embedding token tracking * added document creation/deletion and token tracking to activity log * adding conversations to activity logs * added activity log viewer with filters, search, and export * added support for agents in edit and retry messages * Configure Application from AZD Up command (#548) * Add Cosmos DB post-configuration script and update requirements - Initial POC * post deploy configure services in cosmosdb * refactor to prevent post deploy configuration + begin support of key based auth. * Add additional parameter validation for creating entra app * Refactor Bicep modules for improved authentication and key management - Added keyVault-Secrets.bicep module for storing secrets in Key Vault. - Modified keyVault.bicep to remove enterprise app client secret handling and commented out managed identity role assignments. - Removed openAI-existing.bicep and refactored openAI.bicep to handle model deployments dynamically. - Added setPermissions.bicep for managing role assignments for various resources. - Updated postconfig.py to reflect changes in environment variable handling for authentication type. * Refactor Bicep modules to conditionally add settings based on authentication type and enable resource declarations for services * initial support for VideoIndexer service * Refactor Bicep modules to enhance VideoIndexer service integration and update diagnostic settings configurations * move from using chainguard-dev builder image to python slim image. * Updates to support post deployment app config * Add post-deployment permissions script for CosmosDB and update authentication type handling * fix typo in enhanced citation deployment config * Refactor Dockerfile to use Python 3.13-slim and streamline build process * restart web application after deployment settings applied * remove setting for disableLocalAuth * update to latest version of bicep deployment * remove dead code * code cleanup / formatting * removed unnecessary content from readme.md * fix token scope for commericial search service * set permission correctly for lookup of openAI models * fixes required to configure search with managed identity * Adds Azure Billing Plugin in Community Customizations (#546) * add crude keyvault base impl * upd actions for MAG * add settings to fix * upd secret naming convention * upd auth types to include conn string/basic(un/pw) * fix method name * add get agent helper * add ui trigger word and get agent helper * upd function imports * upd agents call * add desc of plugins * fix for admin modal loading * upd default agent handling * rmv unneeded file * rmv extra imp statements * add new cosmos container script * upd instructions for consistency of code * adds safe calls for akv functions * adds akv to personal agents * fix for user agents boot issue * fix global set * upd azure function plugin to super init * upd to clean imports * add keyvault to global actions loading * add plugin loading docs * rmv secret leak via logging * rmv displaying of token in logs * fix not loading global actions for personal agents * rmv unsupported characters from logging * fix chat links in dark mode * chg order of css for links in dark mode * fix chat color * add default plugin print logging * rmv default check for nonsql plugins * upd requirements * add keyvault and dynamic addsetting ui * fix for agents/plugins with invalid akv chars * add imp to appins logging * add security tab UI + key vault UI * add keyvault settings * fix for copilot findings. * fix for resaving plugin without changing secret * init azure billing plugin * add app settings cache * upd to azure billing plugin * upd to msgraph plugin * init community customizations * add module * add key vault config modal * add logging and functions to math * rmv extra telemetry, add appcache * upd billing plugin * add/upd key vault, admin settings, agents, max tokens * Remove abp for pr * disable static logging for development * rmv dup import * add note on pass * added notes * rmv dup decl * add semicolon * rmv unused variable add agent name to log * add actions migration back in * add notes and copilot fixes * add abp back in * upd abp/seperate graph from query * rmv missed merge lines * fix for AL * upd for consistency testing * upd abp to community * fix copilot findings #1 * fix plotting conflict * fix exception handling * fix static max function invokes * rmv unneeded decl * rmv unneeded imports * fix grouping dimensions * fix abp copilot suggestions #2 * simplify methods for message reload * upd dockerfile to google distroless * add pipelines * add modifications to container * upd to build * add missing arg * add arg for major/minor/patch python version * upd python paths and pip install * add perms to /app for user * chg back to root * rmv python3 * rmv not built python * add shared * add path and home * upd for stdlib paths * fix user input filesystem path vulns * fix to consecutive dots * upd pipeline to include branch name in image * add abp to deploy * upd instructions name/rmv abp from deploy * fix pipeline * mov back to Comm Cust for main inclusion --------- Co-authored-by: Bionic711 * Security/container build (#549) * upd dockerfile to google distroless * add pipelines * add modifications to container * upd to build * add missing arg * add arg for major/minor/patch python version * upd python paths and pip install * add perms to /app for user * chg back to root * rmv python3 * rmv not built python * add shared * add path and home * upd for stdlib paths * fix user input filesystem path vulns * fix to consecutive dots --------- Co-authored-by: Bionic711 * Feature/speech managed identity (#543) * Bugfix - deleted duplicate enable_external_healthcheck entry * Feature - updated Speech Service to use Managed Identity in addition to the key, added MAG functionality via Azure Speech SDK since the Fast Transcription API is not available in MAG, updated Admin Setup Walkthrough so it goes to the right place in the settings when Next is clicked, updated Speech requirements in Walkthrough, rewrote Admin Configuration docs, updated/corrected Managed Identity roles in Setup Instructions Special docs. * Update application/single_app/templates/admin_settings.html Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update application/single_app/functions_settings.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update application/single_app/functions_documents.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update application/single_app/functions_documents.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Paul Lizer Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Banner text color picker from Vivien (#555) * Classification text color picker * Line endings * Remove opencensus * Add flask instrumentation * Add troubleshooting doc * Add troubleshooting doc * Control center (#567) * added group status (active, locked, upload disabled, and inactive) Adds Azure Billing Plugin in Community Customizations * added bulk member upload via csv for groups * add document metadata modified activity log tracking * activity logging for members deleted from groups * added group activity timeline * added notification system * added notifications for document upload to workspaces * fixed badge sizing * fixed url link * fixed badge to not show with zero notifications * Updated notification system * Updated approval system * updated approval workflow * updated notification workflow * Fixed set active bug on my public workspace page * Added user retention policy, updated user profile page with dashboards, retention config, and more. * adding speed to text for chat UI * updated the speech wave form and input field * updated to transcribe entire recording * fixed bug creating new conversation with auto-send * add mic permissions * added stream token tracking * Added public workspace reporting * Updated AI search sizing analysis * added management for public workspaces * improved public workspace management includes stats and bulk actions * updated groups dashboard for owners and admins with stats and bulk actions * added voice for ai to talk with users in chats * Auto Voice Response * for speech service, added 429 randomized response pattern to prevent thunder herding * updated admin settings for speech services and fixed dark mode for raw log viewing * updated video extraction card * Added Control Center Admin and Dashboard Reader roles * updated feedback and safety decorators so admins work unless required then those roles must be used * Updated and Validated logic for admin roles; control center, safety, and feedback * added support for control center admin and dashboard reader * Development (#566) * Banner text color picker from Vivien (#555) * Classification text color picker * Line endings * Remove opencensus * Add flask instrumentation * Add troubleshooting doc * Add troubleshooting doc --------- Co-authored-by: Ed Clark <107473135+clarked-msft@users.noreply.github.com> Co-authored-by: Ed Clark Co-authored-by: Bionic711 <13358952+Bionic711@users.noreply.github.com> * updated tool tip to better inform user on status of ai response * improve query parameters detection for swagger * updated visual cue showing the ai is talking to the user * moved duplicates to shared js * replaced alert with toast. * fixed and added log_event to exceptions * added @user_required and improved swagger generation * Update route_frontend_profile.py * fixed swagger generation bug on affecting two apis * returned keyvault to admin settings ui * Fixed bug when running local js --------- Co-authored-by: Ed Clark <107473135+clarked-msft@users.noreply.github.com> Co-authored-by: Ed Clark Co-authored-by: Bionic711 <13358952+Bionic711@users.noreply.github.com> * Adding release notes * fixed debug_debug_print * Updated README * Update README.md * accepted changes * removed files * GitHub Actions workflow that runs Python compilation checks on all Python files in the single_app * Upated to v0.235.002 * removed debug test file * Updated to v0.235.003 * Update python-syntax-check.yml * fixed disable group creation bug * fixed bug not showing raw activity log for groups * fixed control center access to not require admin role when enabling controlcenteradmin role * fix documentation * Update release_notes.md * Update README.md * added click restrictions to top items in control center * bug fix - fixed group member select after search, group member removal, group member role update, and approve/reject request * Retention execution activity log (#600) * retention execution logging bug fix * debug timer reset with admin save bug fix * Create test_debug_logging_timer_preservation.py * fixed file processing logic prevent runtime execution * fixed bug processing execution against personal documents * removed test logging * Retention Policy Document Deletion Fix * Improve execution logic for rentention policy Now uses retention_policy_next_run timestamp - Compares current time against the stored next scheduled time. If current time >= next scheduled time, it runs. Reduced check interval from 1 hour to 5 minutes - More responsive scheduling, ensures it catches the scheduled time promptly. Better fallback logic - If next_run can't be parsed, falls back to checking last_run (23-hour threshold). Runs immediately if never run before - If there's no last_run or next_run, it will execute on the first check. * added log_event to exceptions * enforce-branch-flow --------- Co-authored-by: Patrick C Davis <82388365+Patrick-Davis-MSFT@users.noreply.github.com> Co-authored-by: Bionic711 Co-authored-by: cjackson202 <134412115+cjackson202@users.noreply.github.com> Co-authored-by: Bionic711 Co-authored-by: Bionic711 <13358952+Bionic711@users.noreply.github.com> Co-authored-by: Steve Carroll <37545884+SteveCInVA@users.noreply.github.com> Co-authored-by: Xeelee33 Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Ed Clark <107473135+clarked-msft@users.noreply.github.com> Co-authored-by: Ed Clark Co-authored-by: Joshua Wilshere --- application/single_app/app.py | 74 ++-- application/single_app/config.py | 2 +- .../single_app/functions_retention_policy.py | 94 +++-- .../route_frontend_admin_settings.py | 108 ++++-- .../static/js/group/manage_group.js | 110 +++--- .../RETENTION_POLICY_DOCUMENT_DELETION_FIX.md | 149 ++++++++ .../RETENTION_POLICY_SCHEDULER_FIX.md | 87 +++++ docs/explanation/release_notes.md | 22 ++ .../test_debug_logging_timer_preservation.py | 333 ++++++++++++++++++ 9 files changed, 849 insertions(+), 130 deletions(-) create mode 100644 docs/explanation/fixes/v0.235.025/RETENTION_POLICY_DOCUMENT_DELETION_FIX.md create mode 100644 docs/explanation/fixes/v0.235.025/RETENTION_POLICY_SCHEDULER_FIX.md create mode 100644 functional_tests/test_debug_logging_timer_preservation.py diff --git a/application/single_app/app.py b/application/single_app/app.py index c1dc124f..54336100 100644 --- a/application/single_app/app.py +++ b/application/single_app/app.py @@ -90,6 +90,7 @@ os.makedirs(app.config['SESSION_FILE_DIR'], exist_ok=True) except Exception as e: print(f"WARNING: Unable to create session directory {app.config.get('SESSION_FILE_DIR')}: {e}") + log_event(f"Unable to create session directory {app.config.get('SESSION_FILE_DIR')}: {e}", level=logging.ERROR) Session(app) @@ -186,6 +187,7 @@ def configure_sessions(settings): app.config['SESSION_TYPE'] = 'filesystem' except Exception as e: print(f"⚠️ WARNING: Session configuration error; falling back to filesystem: {e}") + log_event(f"Session configuration error; falling back to filesystem: {e}", level=logging.ERROR) app.config['SESSION_TYPE'] = 'filesystem' # Initialize session interface @@ -266,6 +268,7 @@ def check_logging_timers(): except Exception as e: print(f"Error in logging timer check: {e}") + log_event(f"Error in logging timer check: {e}", level=logging.ERROR) # Check every 60 seconds time.sleep(60) @@ -286,6 +289,7 @@ def check_expired_approvals(): print(f"Auto-denied {denied_count} expired approval request(s).") except Exception as e: print(f"Error in approval expiration check: {e}") + log_event(f"Error in approval expiration check: {e}", level=logging.ERROR) # Check every 6 hours (21600 seconds) time.sleep(21600) @@ -309,14 +313,34 @@ def check_retention_policy(): if personal_enabled or group_enabled or public_enabled: current_time = datetime.now(timezone.utc) - execution_hour = settings.get('retention_policy_execution_hour', 2) - # Check if we're in the execution hour - if current_time.hour == execution_hour: - # Check if we haven't run today yet + # Check if next scheduled run time has passed + next_run = settings.get('retention_policy_next_run') + should_run = False + + if next_run: + try: + next_run_dt = datetime.fromisoformat(next_run) + # Run if we've passed the scheduled time + if current_time >= next_run_dt: + should_run = True + except Exception as parse_error: + print(f"Error parsing next_run timestamp: {parse_error}") + # If we can't parse, fall back to checking last_run + last_run = settings.get('retention_policy_last_run') + if last_run: + try: + last_run_dt = datetime.fromisoformat(last_run) + # Run if last run was more than 23 hours ago + if (current_time - last_run_dt).total_seconds() > (23 * 3600): + should_run = True + except: + should_run = True + else: + should_run = True + else: + # No next_run set, check last_run instead last_run = settings.get('retention_policy_last_run') - should_run = False - if last_run: try: last_run_dt = datetime.fromisoformat(last_run) @@ -326,29 +350,31 @@ def check_retention_policy(): except: should_run = True else: + # Never run before, execute now should_run = True + + if should_run: + print(f"Executing scheduled retention policy at {current_time.isoformat()}") + from functions_retention_policy import execute_retention_policy + results = execute_retention_policy(manual_execution=False) - if should_run: - print(f"Executing scheduled retention policy at {current_time.isoformat()}") - from functions_retention_policy import execute_retention_policy - results = execute_retention_policy(manual_execution=False) - - if results.get('success'): - print(f"Retention policy execution completed: " - f"{results['personal']['conversations']} personal conversations, " - f"{results['personal']['documents']} personal documents, " - f"{results['group']['conversations']} group conversations, " - f"{results['group']['documents']} group documents, " - f"{results['public']['conversations']} public conversations, " - f"{results['public']['documents']} public documents deleted.") - else: - print(f"Retention policy execution failed: {results.get('errors')}") + if results.get('success'): + print(f"Retention policy execution completed: " + f"{results['personal']['conversations']} personal conversations, " + f"{results['personal']['documents']} personal documents, " + f"{results['group']['conversations']} group conversations, " + f"{results['group']['documents']} group documents, " + f"{results['public']['conversations']} public conversations, " + f"{results['public']['documents']} public documents deleted.") + else: + print(f"Retention policy execution failed: {results.get('errors')}") except Exception as e: print(f"Error in retention policy check: {e}") + log_event(f"Error in retention policy check: {e}", level=logging.ERROR) - # Check every hour - time.sleep(3600) + # Check every 5 minutes for more responsive scheduling + time.sleep(300) # Start the retention policy check thread retention_thread = threading.Thread(target=check_retention_policy, daemon=True) @@ -377,6 +403,8 @@ def inject_settings(): from functions_settings import get_user_settings user_settings = get_user_settings(user_id) or {} except Exception as e: + print(f"Error injecting user settings: {e}") + log_event(f"Error injecting user settings: {e}", level=logging.ERROR) user_settings = {} return dict(app_settings=public_settings, user_settings=user_settings) diff --git a/application/single_app/config.py b/application/single_app/config.py index 650393e6..2224a49e 100644 --- a/application/single_app/config.py +++ b/application/single_app/config.py @@ -88,7 +88,7 @@ EXECUTOR_TYPE = 'thread' EXECUTOR_MAX_WORKERS = 30 SESSION_TYPE = 'filesystem' -VERSION = "0.235.012" +VERSION = "0.235.025" SECRET_KEY = os.getenv('SECRET_KEY', 'dev-secret-key-change-in-production') diff --git a/application/single_app/functions_retention_policy.py b/application/single_app/functions_retention_policy.py index 02ba2eea..6c59ef64 100644 --- a/application/single_app/functions_retention_policy.py +++ b/application/single_app/functions_retention_policy.py @@ -18,6 +18,7 @@ from functions_activity_logging import log_conversation_deletion, log_conversation_archival from functions_notifications import create_notification, create_group_notification, create_public_workspace_notification from functions_debug import debug_print +from functions_appinsights import log_event from datetime import datetime, timezone, timedelta @@ -36,6 +37,7 @@ def get_all_user_settings(): )) return users except Exception as e: + log_event("get_all_user_settings_error", {"error": str(e)}) debug_print(f"Error fetching all user settings: {e}") return [] @@ -55,6 +57,7 @@ def get_all_groups(): )) return groups except Exception as e: + log_event("get_all_groups_error", {"error": str(e)}) debug_print(f"Error fetching all groups: {e}") return [] @@ -74,6 +77,7 @@ def get_all_public_workspaces(): )) return workspaces except Exception as e: + log_event("get_all_public_workspaces_error", {"error": str(e)}) debug_print(f"Error fetching all public workspaces: {e}") return [] @@ -156,6 +160,7 @@ def execute_retention_policy(workspace_scopes=None, manual_execution=False): return results except Exception as e: + log_event("execute_retention_policy_error", {"error": str(e), "workspace_scopes": workspace_scopes, "manual_execution": manual_execution}) debug_print(f"Error executing retention policy: {e}") results['success'] = False results['errors'].append(str(e)) @@ -196,6 +201,8 @@ def process_personal_retention(): if conversation_retention_days == 'none' and document_retention_days == 'none': continue + debug_print(f"Processing retention for user {user_id}: conversations={conversation_retention_days} days, documents={document_retention_days} days") + user_deletion_summary = { 'user_id': user_id, 'conversations_deleted': 0, @@ -216,6 +223,7 @@ def process_personal_retention(): user_deletion_summary['conversation_details'] = conv_results['details'] results['conversations'] += conv_results['count'] except Exception as e: + log_event("process_personal_retention_conversations_error", {"error": str(e), "user_id": user_id}) debug_print(f"Error processing conversations for user {user_id}: {e}") # Process documents @@ -230,6 +238,7 @@ def process_personal_retention(): user_deletion_summary['document_details'] = doc_results['details'] results['documents'] += doc_results['count'] except Exception as e: + log_event("process_personal_retention_documents_error", {"error": str(e), "user_id": user_id}) debug_print(f"Error processing documents for user {user_id}: {e}") # Send notification if anything was deleted @@ -241,6 +250,7 @@ def process_personal_retention(): return results except Exception as e: + log_event("process_personal_retention_error", {"error": str(e)}) debug_print(f"Error in process_personal_retention: {e}") return results @@ -299,6 +309,7 @@ def process_group_retention(): group_deletion_summary['conversation_details'] = conv_results['details'] results['conversations'] += conv_results['count'] except Exception as e: + log_event("process_group_retention_conversations_error", {"error": str(e), "group_id": group_id}) debug_print(f"Error processing conversations for group {group_id}: {e}") # Process documents @@ -313,6 +324,7 @@ def process_group_retention(): group_deletion_summary['document_details'] = doc_results['details'] results['documents'] += doc_results['count'] except Exception as e: + log_event("process_group_retention_documents_error", {"error": str(e), "group_id": group_id}) debug_print(f"Error processing documents for group {group_id}: {e}") # Send notification if anything was deleted @@ -324,6 +336,7 @@ def process_group_retention(): return results except Exception as e: + log_event("process_group_retention_error", {"error": str(e)}) debug_print(f"Error in process_group_retention: {e}") return results @@ -382,6 +395,7 @@ def process_public_retention(): workspace_deletion_summary['conversation_details'] = conv_results['details'] results['conversations'] += conv_results['count'] except Exception as e: + log_event("process_public_retention_conversations_error", {"error": str(e), "public_workspace_id": workspace_id}) debug_print(f"Error processing conversations for public workspace {workspace_id}: {e}") # Process documents @@ -396,6 +410,7 @@ def process_public_retention(): workspace_deletion_summary['document_details'] = doc_results['details'] results['documents'] += doc_results['count'] except Exception as e: + log_event("process_public_retention_documents_error", {"error": str(e), "public_workspace_id": workspace_id}) debug_print(f"Error processing documents for public workspace {workspace_id}: {e}") # Send notification if anything was deleted @@ -407,6 +422,7 @@ def process_public_retention(): return results except Exception as e: + log_event("process_public_retention_error", {"error": str(e)}) debug_print(f"Error in process_public_retention: {e}") return results @@ -447,11 +463,14 @@ def delete_aged_conversations(retention_days, workspace_type='personal', user_id cutoff_iso = cutoff_date.isoformat() # Query for aged conversations + # Check for null/undefined FIRST to avoid comparing null values with dates query = f""" SELECT c.id, c.title, c.last_activity_at, c.{partition_field} FROM c WHERE c.{partition_field} = @partition_value - AND (c.last_activity_at < @cutoff_date OR IS_NULL(c.last_activity_at)) + AND (NOT IS_DEFINED(c.last_activity_at) + OR IS_NULL(c.last_activity_at) + OR (IS_DEFINED(c.last_activity_at) AND NOT IS_NULL(c.last_activity_at) AND c.last_activity_at < @cutoff_date)) """ parameters = [ @@ -459,19 +478,27 @@ def delete_aged_conversations(retention_days, workspace_type='personal', user_id {"name": "@cutoff_date", "value": cutoff_iso} ] - aged_conversations = list(container.query_items( - query=query, - parameters=parameters, - enable_cross_partition_query=True - )) + debug_print(f"Querying aged conversations: workspace_type={workspace_type}, partition_field={partition_field}, partition_value={partition_value}, cutoff_date={cutoff_iso}, retention_days={retention_days}") + + try: + aged_conversations = list(container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + debug_print(f"Found {len(aged_conversations)} aged conversations for {workspace_type} workspace") + except Exception as query_error: + log_event("delete_aged_conversations_query_error", {"error": str(query_error), "workspace_type": workspace_type, "partition_value": partition_value}) + debug_print(f"Error querying aged conversations for {workspace_type} (partition_value={partition_value}): {query_error}") + return {'count': 0, 'details': []} deleted_details = [] for conv in aged_conversations: - conversation_id = conv.get('id') - conversation_title = conv.get('title', 'Untitled') - try: + conversation_id = conv.get('id') + conversation_title = conv.get('title', 'Untitled') + # Read full conversation for archiving/logging conversation_item = container.read_item( item=conversation_id, @@ -535,7 +562,7 @@ def delete_aged_conversations(retention_days, workspace_type='personal', user_id is_bulk_operation=True, group_id=conversation_item.get('group_id'), public_workspace_id=conversation_item.get('public_workspace_id'), - deletion_reason='retention_policy' + additional_context={'deletion_reason': 'retention_policy'} ) # Delete conversation @@ -553,7 +580,9 @@ def delete_aged_conversations(retention_days, workspace_type='personal', user_id debug_print(f"Deleted conversation {conversation_id} ({conversation_title}) due to retention policy") except Exception as e: - debug_print(f"Error deleting conversation {conversation_id}: {e}") + conv_id = conv.get('id', 'unknown') if conv else 'unknown' + log_event("delete_aged_conversations_deletion_error", {"error": str(e), "conversation_id": conv_id, "workspace_type": workspace_type}) + debug_print(f"Error deleting conversation {conv_id}: {e}") return { 'count': len(deleted_details), @@ -593,15 +622,18 @@ def delete_aged_documents(retention_days, workspace_type='personal', user_id=Non deletion_user_id = user_id # Calculate cutoff date + # Documents use format like '2026-01-08T21:49:15Z' so we match that format cutoff_date = datetime.now(timezone.utc) - timedelta(days=retention_days) - cutoff_iso = cutoff_date.isoformat() + cutoff_iso = cutoff_date.strftime('%Y-%m-%dT%H:%M:%SZ') # Query for aged documents + # Documents use 'last_updated' field (not 'last_activity_at' like conversations) + # Use simple date comparison - documents always have last_updated field query = f""" - SELECT c.id, c.file_name, c.title, c.last_activity_at, c.{partition_field}, c.user_id + SELECT c.id, c.file_name, c.title, c.last_updated, c.user_id FROM c WHERE c.{partition_field} = @partition_value - AND (c.last_activity_at < @cutoff_date OR IS_NULL(c.last_activity_at)) + AND c.last_updated < @cutoff_date """ parameters = [ @@ -609,21 +641,29 @@ def delete_aged_documents(retention_days, workspace_type='personal', user_id=Non {"name": "@cutoff_date", "value": cutoff_iso} ] - aged_documents = list(container.query_items( - query=query, - parameters=parameters, - enable_cross_partition_query=True - )) + debug_print(f"Querying aged documents: workspace_type={workspace_type}, partition_field={partition_field}, partition_value={partition_value}, cutoff_date={cutoff_iso}, retention_days={retention_days}") + + try: + aged_documents = list(container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True + )) + debug_print(f"Found {len(aged_documents)} aged documents for {workspace_type} workspace") + except Exception as query_error: + log_event("delete_aged_documents_query_error", {"error": str(query_error), "workspace_type": workspace_type, "partition_value": partition_value}) + debug_print(f"Error querying aged documents for {workspace_type} (partition_value={partition_value}): {query_error}") + return {'count': 0, 'details': []} deleted_details = [] for doc in aged_documents: - document_id = doc.get('id') - file_name = doc.get('file_name', 'Unknown') - title = doc.get('title', file_name) - doc_user_id = doc.get('user_id') or deletion_user_id - try: + document_id = doc.get('id') + file_name = doc.get('file_name', 'Unknown') + title = doc.get('title', file_name) + doc_user_id = doc.get('user_id') or deletion_user_id + # Delete document chunks from search index delete_document_chunks(document_id, group_id, public_workspace_id) @@ -634,13 +674,15 @@ def delete_aged_documents(retention_days, workspace_type='personal', user_id=Non 'id': document_id, 'file_name': file_name, 'title': title, - 'last_activity_at': doc.get('last_activity_at') + 'last_updated': doc.get('last_updated') }) debug_print(f"Deleted document {document_id} ({file_name}) due to retention policy") except Exception as e: - debug_print(f"Error deleting document {document_id}: {e}") + doc_id = doc.get('id', 'unknown') if doc else 'unknown' + log_event("delete_aged_documents_deletion_error", {"error": str(e), "document_id": doc_id, "workspace_type": workspace_type}) + debug_print(f"Error deleting document {doc_id}: {e}") return { 'count': len(deleted_details), diff --git a/application/single_app/route_frontend_admin_settings.py b/application/single_app/route_frontend_admin_settings.py index 838a565c..da45c965 100644 --- a/application/single_app/route_frontend_admin_settings.py +++ b/application/single_app/route_frontend_admin_settings.py @@ -413,24 +413,47 @@ def admin_settings(): if debug_timer_value < min_val or debug_timer_value > max_val: debug_timer_value = min(max(debug_timer_value, min_val), max_val) + # Get existing timer settings to check if they've changed + existing_debug_timer_enabled = settings.get('debug_logging_timer_enabled', False) + existing_debug_timer_value = settings.get('debug_timer_value', 1) + existing_debug_timer_unit = settings.get('debug_timer_unit', 'hours') + existing_debug_logging_enabled = settings.get('enable_debug_logging', False) + existing_debug_turnoff_time = settings.get('debug_logging_turnoff_time') + + # Determine if timer settings have changed + timer_settings_changed = ( + debug_logging_timer_enabled != existing_debug_timer_enabled or + debug_timer_value != existing_debug_timer_value or + debug_timer_unit != existing_debug_timer_unit + ) + debug_logging_newly_enabled = enable_debug_logging and not existing_debug_logging_enabled + # Calculate debug logging turnoff time if timer is enabled and debug logging is on if enable_debug_logging and debug_logging_timer_enabled: - now = datetime.now() - - if debug_timer_unit == 'minutes': - delta = timedelta(minutes=debug_timer_value) - elif debug_timer_unit == 'hours': - delta = timedelta(hours=debug_timer_value) - elif debug_timer_unit == 'days': - delta = timedelta(days=debug_timer_value) - elif debug_timer_unit == 'weeks': - delta = timedelta(weeks=debug_timer_value) + # Only recalculate turnoff time if: + # 1. Timer settings have changed (value, unit, or enabled state), OR + # 2. Debug logging was just enabled, OR + # 3. No existing turnoff time exists + if timer_settings_changed or debug_logging_newly_enabled or not existing_debug_turnoff_time: + now = datetime.now() + + if debug_timer_unit == 'minutes': + delta = timedelta(minutes=debug_timer_value) + elif debug_timer_unit == 'hours': + delta = timedelta(hours=debug_timer_value) + elif debug_timer_unit == 'days': + delta = timedelta(days=debug_timer_value) + elif debug_timer_unit == 'weeks': + delta = timedelta(weeks=debug_timer_value) + else: + delta = timedelta(hours=1) # default fallback + + debug_logging_turnoff_time = now + delta + # Convert to ISO string for JSON serialization + debug_logging_turnoff_time_str = debug_logging_turnoff_time.isoformat() else: - delta = timedelta(hours=1) # default fallback - - debug_logging_turnoff_time = now + delta - # Convert to ISO string for JSON serialization - debug_logging_turnoff_time_str = debug_logging_turnoff_time.isoformat() + # Preserve existing turnoff time + debug_logging_turnoff_time_str = existing_debug_turnoff_time else: debug_logging_turnoff_time_str = None @@ -439,6 +462,7 @@ def admin_settings(): file_timer_value = int(form_data.get('file_timer_value', 1)) file_timer_unit = form_data.get('file_timer_unit', 'hours') file_processing_logs_turnoff_time = None + enable_file_processing_logs = form_data.get('enable_file_processing_logs') == 'on' # Validate file timer values if file_timer_unit in timer_limits: @@ -446,25 +470,47 @@ def admin_settings(): if file_timer_value < min_val or file_timer_value > max_val: file_timer_value = min(max(file_timer_value, min_val), max_val) + # Get existing file timer settings to check if they've changed + existing_file_timer_enabled = settings.get('file_processing_logs_timer_enabled', False) + existing_file_timer_value = settings.get('file_timer_value', 1) + existing_file_timer_unit = settings.get('file_timer_unit', 'hours') + existing_file_processing_logs_enabled = settings.get('enable_file_processing_logs', False) + existing_file_turnoff_time = settings.get('file_processing_logs_turnoff_time') + + # Determine if timer settings have changed + file_timer_settings_changed = ( + file_processing_logs_timer_enabled != existing_file_timer_enabled or + file_timer_value != existing_file_timer_value or + file_timer_unit != existing_file_timer_unit + ) + file_processing_logs_newly_enabled = enable_file_processing_logs and not existing_file_processing_logs_enabled + # Calculate file processing logs turnoff time if timer is enabled and file processing logs are on - enable_file_processing_logs = form_data.get('enable_file_processing_logs') == 'on' if enable_file_processing_logs and file_processing_logs_timer_enabled: - now = datetime.now() - - if file_timer_unit == 'minutes': - delta = timedelta(minutes=file_timer_value) - elif file_timer_unit == 'hours': - delta = timedelta(hours=file_timer_value) - elif file_timer_unit == 'days': - delta = timedelta(days=file_timer_value) - elif file_timer_unit == 'weeks': - delta = timedelta(weeks=file_timer_value) + # Only recalculate turnoff time if: + # 1. Timer settings have changed (value, unit, or enabled state), OR + # 2. File processing logs was just enabled, OR + # 3. No existing turnoff time exists + if file_timer_settings_changed or file_processing_logs_newly_enabled or not existing_file_turnoff_time: + now = datetime.now() + + if file_timer_unit == 'minutes': + delta = timedelta(minutes=file_timer_value) + elif file_timer_unit == 'hours': + delta = timedelta(hours=file_timer_value) + elif file_timer_unit == 'days': + delta = timedelta(days=file_timer_value) + elif file_timer_unit == 'weeks': + delta = timedelta(weeks=file_timer_value) + else: + delta = timedelta(hours=1) # default fallback + + file_processing_logs_turnoff_time = now + delta + # Convert to ISO string for JSON serialization + file_processing_logs_turnoff_time_str = file_processing_logs_turnoff_time.isoformat() else: - delta = timedelta(hours=1) # default fallback - - file_processing_logs_turnoff_time = now + delta - # Convert to ISO string for JSON serialization - file_processing_logs_turnoff_time_str = file_processing_logs_turnoff_time.isoformat() + # Preserve existing turnoff time + file_processing_logs_turnoff_time_str = existing_file_turnoff_time else: file_processing_logs_turnoff_time_str = None diff --git a/application/single_app/static/js/group/manage_group.js b/application/single_app/static/js/group/manage_group.js index 371689db..87a12b58 100644 --- a/application/single_app/static/js/group/manage_group.js +++ b/application/single_app/static/js/group/manage_group.js @@ -59,6 +59,39 @@ $(document).ready(function () { } }); + // Add event delegation for select user button in search results + $(document).on("click", ".select-user-btn", function () { + const id = $(this).data("user-id"); + const name = $(this).data("user-name"); + const email = $(this).data("user-email"); + selectUserForAdd(id, name, email); + }); + + // Add event delegation for remove member button + $(document).on("click", ".remove-member-btn", function () { + const userId = $(this).data("user-id"); + removeMember(userId); + }); + + // Add event delegation for change role button + $(document).on("click", ".change-role-btn", function () { + const userId = $(this).data("user-id"); + const currentRole = $(this).data("user-role"); + openChangeRoleModal(userId, currentRole); + $("#changeRoleModal").modal("show"); + }); + + // Add event delegation for approve/reject request buttons + $(document).on("click", ".approve-request-btn", function () { + const requestId = $(this).data("request-id"); + approveRequest(requestId); + }); + + $(document).on("click", ".reject-request-btn", function () { + const requestId = $(this).data("request-id"); + rejectRequest(requestId); + }); + // CSV Bulk Upload Events $("#addBulkMemberBtn").on("click", function () { $("#csvBulkUploadModal").modal("show"); @@ -407,17 +440,15 @@ function renderMemberActions(member) { } else { return ` `; @@ -473,8 +504,10 @@ function loadPendingRequests() { ${u.displayName} ${u.email} - - + + `; @@ -522,66 +555,44 @@ function rejectRequest(requestId) { }); } +// Search users for manual add function searchUsers() { const term = $("#userSearchTerm").val().trim(); if (!term) { - alert("Please enter a search term."); + alert("Enter a name or email to search."); return; } - - // UI state $("#searchStatus").text("Searching..."); $("#searchUsersBtn").prop("disabled", true); - $.ajax({ - url: "/api/userSearch", - method: "GET", - data: { query: term }, - dataType: "json", - }) - .done(function (results) { - renderUserSearchResults(results); - }) - .fail(function (jqXHR, textStatus, errorThrown) { - console.error("User search error:", textStatus, errorThrown); - - if (jqXHR.status === 401) { - // Session expired or no token → force re-login - window.location.href = "/login"; - } else { - const msg = jqXHR.responseJSON?.error - ? jqXHR.responseJSON.error - : "User search failed."; - alert(msg); - } + $.get("/api/userSearch", { query: term }) + .done(renderUserSearchResults) + .fail(function (jq) { + const err = jq.responseJSON?.error || jq.statusText; + alert("User search failed: " + err); }) .always(function () { - // Restore UI state $("#searchStatus").text(""); $("#searchUsersBtn").prop("disabled", false); }); } +// Render user-search results in add-member modal function renderUserSearchResults(users) { let html = ""; - if (!users || users.length === 0) { - html = ` - - No results found - - `; + if (!users || !users.length) { + html = `No results.`; } else { - users.forEach((u) => { + users.forEach(u => { html += ` ${u.displayName || "(no name)"} ${u.email || ""} - @@ -592,9 +603,10 @@ function renderUserSearchResults(users) { $("#userSearchResultsTable tbody").html(html); } -function selectUserForAdd(uid, displayName, email) { - $("#newUserId").val(uid); - $("#newUserDisplayName").val(displayName); +// Populate manual-add fields from search result +function selectUserForAdd(id, name, email) { + $("#newUserId").val(id); + $("#newUserDisplayName").val(name); $("#newUserEmail").val(email); } diff --git a/docs/explanation/fixes/v0.235.025/RETENTION_POLICY_DOCUMENT_DELETION_FIX.md b/docs/explanation/fixes/v0.235.025/RETENTION_POLICY_DOCUMENT_DELETION_FIX.md new file mode 100644 index 00000000..cacb8b68 --- /dev/null +++ b/docs/explanation/fixes/v0.235.025/RETENTION_POLICY_DOCUMENT_DELETION_FIX.md @@ -0,0 +1,149 @@ +# Retention Policy Document Deletion Fix + +**Version Implemented:** 0.235.025 + +## Problem Statement + +The retention policy execution was failing when attempting to delete aged documents, while conversation deletion worked correctly. The error manifested as: + +``` +[DEBUG] [INFO]: Error querying aged documents for personal (partition_value=1d6312bd-3eaa-4586-8b74-e90eee126f78): (BadRequest) One of the input values is invalid. +``` + +This prevented the automated cleanup of old documents based on user-configured retention policies. + +## Root Cause Analysis + +Investigation revealed **four distinct issues** causing the document deletion to fail: + +### Issue 1: Wrong Field Name +Documents use `last_updated` as the timestamp field, but the retention policy was querying for `last_activity_at` (which is used by conversations). + +**Document schema:** +```json +{ + "upload_date": "2025-11-20T15:17:57Z", + "last_updated": "2025-11-20T15:54:22Z" +} +``` + +**Incorrect query:** +```sql +WHERE c.last_activity_at < @cutoff_date +``` + +### Issue 2: Date Format Mismatch +Documents store timestamps in `YYYY-MM-DDTHH:MM:SSZ` format, but the query was using Python's `.isoformat()` which produces `+00:00` suffix with microseconds. + +- **Document format:** `2026-01-08T21:49:15Z` +- **Query format:** `2026-01-15T15:49:09.828460+00:00` + +Cosmos DB string comparison failed due to format differences. + +### Issue 3: Duplicate Column in SELECT +The query included both `c.{partition_field}` and `c.user_id` in the SELECT clause. When `partition_field='user_id'`, this created a duplicate column causing query errors. + +**Problematic query:** +```sql +SELECT c.id, c.file_name, c.title, c.last_updated, c.user_id, c.user_id +``` + +### Issue 4: Incorrect Activity Logging Parameter +The `log_conversation_deletion()` function was called with `deletion_reason='retention_policy'`, but this parameter doesn't exist in the function signature. It should use `additional_context` instead. + +## Solution Implementation + +### File Modified: `functions_retention_policy.py` + +#### Fix 1: Correct Field Name +Changed document queries to use `last_updated` instead of `last_activity_at`: + +```python +# Query for aged documents +# Documents use 'last_updated' field (not 'last_activity_at' like conversations) +query = f""" + SELECT c.id, c.file_name, c.title, c.last_updated, c.user_id + FROM c + WHERE c.{partition_field} = @partition_value + AND c.last_updated < @cutoff_date +""" +``` + +#### Fix 2: Correct Date Format +Changed from `.isoformat()` to `.strftime()` to match document timestamp format: + +```python +# Documents use format like '2026-01-08T21:49:15Z' so we match that format +cutoff_date = datetime.now(timezone.utc) - timedelta(days=retention_days) +cutoff_iso = cutoff_date.strftime('%Y-%m-%dT%H:%M:%SZ') +``` + +#### Fix 3: Remove Duplicate Column +Simplified SELECT to avoid duplicate columns: + +```python +SELECT c.id, c.file_name, c.title, c.last_updated, c.user_id +``` + +#### Fix 4: Correct Activity Logging Parameter +Changed from invalid parameter to proper `additional_context`: + +```python +# Before (incorrect) +log_conversation_deletion( + ... + deletion_reason='retention_policy' +) + +# After (correct) +log_conversation_deletion( + ... + additional_context={'deletion_reason': 'retention_policy'} +) +``` + +### Additional Improvements + +#### Enhanced Debug Logging +Added comprehensive debug logging to aid future troubleshooting: + +```python +debug_print(f"Processing retention for user {user_id}: conversations={conversation_retention_days} days, documents={document_retention_days} days") +debug_print(f"Querying aged documents: workspace_type={workspace_type}, partition_field={partition_field}, partition_value={partition_value}, cutoff_date={cutoff_iso}, retention_days={retention_days}") +debug_print(f"Found {len(aged_documents)} aged documents for {workspace_type} workspace") +``` + +## Testing & Validation + +After the fix, retention policy execution completed successfully: + +``` +[DEBUG] [INFO]: Querying aged documents: workspace_type=personal, partition_field=user_id, partition_value=1d6312bd-3eaa-4586-8b74-e90eee126f78, cutoff_date=2026-01-15T15:58:09Z, retention_days=1 +[DEBUG] [INFO]: Found 1 aged documents for personal workspace +[DEBUG] [INFO]: [DELETE DOCUMENT] Starting deletion for document: 36a030b2-57b2-426b-8aa9-6f49eed5f8a6 +[DEBUG] [INFO]: Logged document deletion transaction: 36a030b2-57b2-426b-8aa9-6f49eed5f8a6 +Successfully deleted blob at 1d6312bd-3eaa-4586-8b74-e90eee126f78/test.pdf +[DEBUG] [INFO]: Deleted document 36a030b2-57b2-426b-8aa9-6f49eed5f8a6 (test.pdf) due to retention policy +[DEBUG] [INFO]: Notification created: 5a92235d-d408-4449-9c72-0951ed198688 [personal] [system_announcement] +[DEBUG] [INFO]: Retention policy execution completed: {'success': True, ... 'documents': 1, 'users_affected': 1 ...} +``` + +## Files Changed + +| File | Changes | +|------|---------| +| `functions_retention_policy.py` | Fixed field name, date format, duplicate columns, activity logging | +| `config.py` | Version bump to 0.235.022 | + +## Impact + +- **Retention Policy:** Now correctly deletes aged documents based on user settings +- **Activity Logging:** Document deletions are properly logged with deletion reason +- **User Notifications:** Users receive notifications when documents are deleted by retention policy +- **Blob Storage:** Associated blob files are correctly removed + +## Related Components + +- Conversation retention (uses `last_activity_at` - unchanged) +- Group workspace retention (shares same document deletion logic) +- Public workspace retention (shares same document deletion logic) diff --git a/docs/explanation/fixes/v0.235.025/RETENTION_POLICY_SCHEDULER_FIX.md b/docs/explanation/fixes/v0.235.025/RETENTION_POLICY_SCHEDULER_FIX.md new file mode 100644 index 00000000..36ae844d --- /dev/null +++ b/docs/explanation/fixes/v0.235.025/RETENTION_POLICY_SCHEDULER_FIX.md @@ -0,0 +1,87 @@ +# Retention Policy Scheduler Fix + +## Issue Description + +The automated retention policy scheduler was not executing at the scheduled time. Users would see the "Next Scheduled Execution" time pass without the policy running. + +## Root Cause Analysis + +The background scheduler in `app.py` had multiple issues preventing reliable execution: + +1. **Hour-matching approach was unreliable**: The scheduler only ran if the check happened exactly during the execution hour (e.g., 2 AM). With 1-hour sleep intervals, it could easily miss the entire window if the thread's check cycle didn't align with the execution hour. + +2. **Check interval too long**: Checking every hour (3600 seconds) meant poor responsiveness and high probability of missing the scheduled time. + +3. **No use of stored next_run timestamp**: The code ignored the `retention_policy_next_run` setting that was being saved, instead relying solely on hour matching. + +4. **No catch-up logic**: If the scheduled time passed while the app was down or during a sleep cycle, there was no mechanism to run the missed execution. + +## Files Modified + +| File | Changes | +|------|---------| +| [app.py](../../../application/single_app/app.py) | Rewrote `check_retention_policy()` background task | +| [config.py](../../../application/single_app/config.py) | Version bump to 0.235.025 | + +## Technical Details + +### Before (Problematic Code) +```python +# Check if we're in the execution hour +if current_time.hour == execution_hour: + # Check if we haven't run today yet + last_run = settings.get('retention_policy_last_run') + # ... run if last_run > 23 hours ago + +# Check every hour +time.sleep(3600) +``` + +### After (Fixed Code) +```python +# Check if next scheduled run time has passed +next_run = settings.get('retention_policy_next_run') +if next_run: + next_run_dt = datetime.fromisoformat(next_run) + # Run if we've passed the scheduled time + if current_time >= next_run_dt: + should_run = True + +# Check every 5 minutes for more responsive scheduling +time.sleep(300) +``` + +### Key Improvements + +1. **Uses `retention_policy_next_run` timestamp**: Compares current time against the stored next scheduled execution time. If current time >= scheduled time, it runs. + +2. **Reduced check interval**: Changed from 1 hour to 5 minutes (300 seconds) for more responsive scheduling. + +3. **Better fallback logic**: If `next_run` can't be parsed, falls back to checking `last_run` with a 23-hour threshold. + +4. **Immediate execution for missed schedules**: If the scheduled time has already passed, the policy runs on the next check cycle. + +5. **Runs immediately if never run before**: If there's no `last_run` or `next_run`, it will execute on the first check. + +## Testing Approach + +1. Enable retention policy for personal workspaces +2. Set execution hour to current hour or a past hour +3. Restart the application +4. Verify the retention policy executes within 5 minutes +5. Confirm `Last Execution` and `Next Scheduled Execution` timestamps update correctly + +## Impact Analysis + +- **Positive**: Retention policies now execute reliably at scheduled times +- **Positive**: Missed executions are caught up on next app start or check cycle +- **Consideration**: Slightly higher CPU usage due to 5-minute checks vs 1-hour checks (negligible impact) + +## Version Information + +- **Fixed in version**: 0.235.025 +- **Issue introduced**: Original implementation + +## Related Changes + +- [RETENTION_POLICY_DOCUMENT_DELETION_FIX.md](RETENTION_POLICY_DOCUMENT_DELETION_FIX.md) - Related retention policy fixes in same version diff --git a/docs/explanation/release_notes.md b/docs/explanation/release_notes.md index ac99efa6..02d7139a 100644 --- a/docs/explanation/release_notes.md +++ b/docs/explanation/release_notes.md @@ -1,6 +1,28 @@ # Feature Release +### **(v0.235.025)** + +#### Bug Fixes + +* **Retention Policy Document Deletion Fix** + * Fixed critical bug where retention policy execution failed when attempting to delete aged documents, while conversation deletion worked correctly. + * **Root Cause 1**: Documents use `last_updated` field, but query was looking for `last_activity_at` (used by conversations). + * **Root Cause 2**: Date format mismatch - documents store `YYYY-MM-DDTHH:MM:SSZ` but query used Python's `.isoformat()` with `+00:00` suffix. + * **Root Cause 3**: Duplicate column in SELECT clause when `partition_field='user_id'` caused query errors. + * **Root Cause 4**: Activity logging called with incorrect `deletion_reason` parameter instead of `additional_context`. + * **Files Modified**: `functions_retention_policy.py` (query field names, date format, SELECT clause, activity logging). + * (Ref: `delete_aged_documents()`, retention policy execution, Cosmos DB queries) + +* **Retention Policy Scheduler Fix** + * Fixed automated retention policy scheduler not executing at the scheduled time. + * **Root Cause 1**: Hour-matching approach was unreliable - only ran if check happened exactly during the execution hour (e.g., 2 AM), but 1-hour sleep intervals could miss the entire window. + * **Root Cause 2**: Check interval too long (1 hour) meant poor responsiveness and high probability of missing scheduled time. + * **Root Cause 3**: Code ignored the stored `retention_policy_next_run` timestamp, instead relying solely on hour matching. + * **Solution**: Now uses `retention_policy_next_run` timestamp for comparison, reduced check interval from 1 hour to 5 minutes, added fallback logic for missed executions. + * **Files Modified**: `app.py` (`check_retention_policy()` background task). + * (Ref: retention policy scheduler, background task, scheduled execution) + ### **(v0.235.012)** #### Bug Fixes diff --git a/functional_tests/test_debug_logging_timer_preservation.py b/functional_tests/test_debug_logging_timer_preservation.py new file mode 100644 index 00000000..b05a31b2 --- /dev/null +++ b/functional_tests/test_debug_logging_timer_preservation.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python3 +""" +Functional test for Debug Logging Timer Preservation Fix. +Version: 0.235.014 +Implemented in: 0.235.014 + +This test ensures that the debug logging and file processing logs turnoff times +are preserved when saving admin settings, rather than being recalculated on every save. + +The bug was that every time admin settings were saved, the turnoff time was recalculated +from "now + delta" instead of preserving the existing turnoff time if the timer settings +(value, unit, enabled state) hadn't changed. + +Root cause: route_frontend_admin_settings.py always recalculated turnoff times instead +of checking if timer settings changed. + +Fix: Only recalculate turnoff time if: +1. Timer settings have changed (value, unit, or enabled state), OR +2. The logging was just enabled, OR +3. No existing turnoff time exists +""" + +import sys +import os +from datetime import datetime, timedelta + +# Add parent directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'application', 'single_app')) + + +def test_timer_recalculation_logic(): + """ + Test the logic that determines when turnoff time should be recalculated. + This simulates the conditions checked in route_frontend_admin_settings.py. + """ + print("🧪 Testing timer recalculation logic...") + + # Scenario 1: Settings unchanged - should preserve existing time + print("\n📋 Scenario 1: No timer settings changed - should preserve existing time") + existing_settings = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks', + 'debug_logging_turnoff_time': '2026-01-22T11:05:44.417753' + } + new_form_data = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks' + } + + timer_settings_changed = ( + new_form_data['debug_logging_timer_enabled'] != existing_settings.get('debug_logging_timer_enabled', False) or + new_form_data['debug_timer_value'] != existing_settings.get('debug_timer_value', 1) or + new_form_data['debug_timer_unit'] != existing_settings.get('debug_timer_unit', 'hours') + ) + debug_logging_newly_enabled = new_form_data['enable_debug_logging'] and not existing_settings.get('enable_debug_logging', False) + existing_turnoff_time = existing_settings.get('debug_logging_turnoff_time') + + should_recalculate = timer_settings_changed or debug_logging_newly_enabled or not existing_turnoff_time + + if should_recalculate: + print("❌ FAILED: Should NOT recalculate - settings unchanged") + return False + else: + print("✅ PASSED: Correctly determined to preserve existing turnoff time") + + # Scenario 2: Timer value changed - should recalculate + print("\n📋 Scenario 2: Timer value changed - should recalculate") + new_form_data_changed_value = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 2, # Changed from 1 to 2 + 'debug_timer_unit': 'weeks' + } + + timer_settings_changed = ( + new_form_data_changed_value['debug_logging_timer_enabled'] != existing_settings.get('debug_logging_timer_enabled', False) or + new_form_data_changed_value['debug_timer_value'] != existing_settings.get('debug_timer_value', 1) or + new_form_data_changed_value['debug_timer_unit'] != existing_settings.get('debug_timer_unit', 'hours') + ) + debug_logging_newly_enabled = new_form_data_changed_value['enable_debug_logging'] and not existing_settings.get('enable_debug_logging', False) + + should_recalculate = timer_settings_changed or debug_logging_newly_enabled or not existing_turnoff_time + + if should_recalculate: + print("✅ PASSED: Correctly determined to recalculate (timer value changed)") + else: + print("❌ FAILED: Should recalculate when timer value changed") + return False + + # Scenario 3: Timer unit changed - should recalculate + print("\n📋 Scenario 3: Timer unit changed - should recalculate") + new_form_data_changed_unit = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 1, + 'debug_timer_unit': 'days' # Changed from 'weeks' to 'days' + } + + timer_settings_changed = ( + new_form_data_changed_unit['debug_logging_timer_enabled'] != existing_settings.get('debug_logging_timer_enabled', False) or + new_form_data_changed_unit['debug_timer_value'] != existing_settings.get('debug_timer_value', 1) or + new_form_data_changed_unit['debug_timer_unit'] != existing_settings.get('debug_timer_unit', 'hours') + ) + debug_logging_newly_enabled = new_form_data_changed_unit['enable_debug_logging'] and not existing_settings.get('enable_debug_logging', False) + + should_recalculate = timer_settings_changed or debug_logging_newly_enabled or not existing_turnoff_time + + if should_recalculate: + print("✅ PASSED: Correctly determined to recalculate (timer unit changed)") + else: + print("❌ FAILED: Should recalculate when timer unit changed") + return False + + # Scenario 4: Debug logging newly enabled - should recalculate + print("\n📋 Scenario 4: Debug logging newly enabled - should recalculate") + existing_settings_logging_off = { + 'enable_debug_logging': False, # Was off + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks', + 'debug_logging_turnoff_time': None # No turnoff time when disabled + } + new_form_data_enable = { + 'enable_debug_logging': True, # Now on + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks' + } + + timer_settings_changed = ( + new_form_data_enable['debug_logging_timer_enabled'] != existing_settings_logging_off.get('debug_logging_timer_enabled', False) or + new_form_data_enable['debug_timer_value'] != existing_settings_logging_off.get('debug_timer_value', 1) or + new_form_data_enable['debug_timer_unit'] != existing_settings_logging_off.get('debug_timer_unit', 'hours') + ) + debug_logging_newly_enabled = new_form_data_enable['enable_debug_logging'] and not existing_settings_logging_off.get('enable_debug_logging', False) + existing_turnoff_time_off = existing_settings_logging_off.get('debug_logging_turnoff_time') + + should_recalculate = timer_settings_changed or debug_logging_newly_enabled or not existing_turnoff_time_off + + if should_recalculate: + print("✅ PASSED: Correctly determined to recalculate (debug logging newly enabled)") + else: + print("❌ FAILED: Should recalculate when debug logging is newly enabled") + return False + + # Scenario 5: Timer enabled changed - should recalculate + print("\n📋 Scenario 5: Timer enabled changed (was off, now on) - should recalculate") + existing_settings_timer_off = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': False, # Was off + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks', + 'debug_logging_turnoff_time': None # No turnoff time when timer disabled + } + new_form_data_timer_on = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': True, # Now on + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks' + } + + timer_settings_changed = ( + new_form_data_timer_on['debug_logging_timer_enabled'] != existing_settings_timer_off.get('debug_logging_timer_enabled', False) or + new_form_data_timer_on['debug_timer_value'] != existing_settings_timer_off.get('debug_timer_value', 1) or + new_form_data_timer_on['debug_timer_unit'] != existing_settings_timer_off.get('debug_timer_unit', 'hours') + ) + debug_logging_newly_enabled = new_form_data_timer_on['enable_debug_logging'] and not existing_settings_timer_off.get('enable_debug_logging', False) + existing_turnoff_time_timer_off = existing_settings_timer_off.get('debug_logging_turnoff_time') + + should_recalculate = timer_settings_changed or debug_logging_newly_enabled or not existing_turnoff_time_timer_off + + if should_recalculate: + print("✅ PASSED: Correctly determined to recalculate (timer enabled state changed)") + else: + print("❌ FAILED: Should recalculate when timer enabled state changed") + return False + + print("\n✅ All timer recalculation logic tests passed!") + return True + + +def test_file_processing_logs_timer_preservation(): + """ + Test the same preservation logic for file processing logs timer. + """ + print("\n🧪 Testing file processing logs timer preservation logic...") + + # Scenario: Settings unchanged - should preserve existing time + print("\n📋 Scenario: No file timer settings changed - should preserve existing time") + existing_settings = { + 'enable_file_processing_logs': True, + 'file_processing_logs_timer_enabled': True, + 'file_timer_value': 24, + 'file_timer_unit': 'hours', + 'file_processing_logs_turnoff_time': '2026-01-17T10:30:00.000000' + } + new_form_data = { + 'enable_file_processing_logs': True, + 'file_processing_logs_timer_enabled': True, + 'file_timer_value': 24, + 'file_timer_unit': 'hours' + } + + file_timer_settings_changed = ( + new_form_data['file_processing_logs_timer_enabled'] != existing_settings.get('file_processing_logs_timer_enabled', False) or + new_form_data['file_timer_value'] != existing_settings.get('file_timer_value', 1) or + new_form_data['file_timer_unit'] != existing_settings.get('file_timer_unit', 'hours') + ) + file_processing_logs_newly_enabled = new_form_data['enable_file_processing_logs'] and not existing_settings.get('enable_file_processing_logs', False) + existing_file_turnoff_time = existing_settings.get('file_processing_logs_turnoff_time') + + should_recalculate = file_timer_settings_changed or file_processing_logs_newly_enabled or not existing_file_turnoff_time + + if should_recalculate: + print("❌ FAILED: Should NOT recalculate - file timer settings unchanged") + return False + else: + print("✅ PASSED: Correctly determined to preserve existing file turnoff time") + + print("\n✅ File processing logs timer preservation test passed!") + return True + + +def test_edge_cases(): + """ + Test edge cases for timer preservation. + """ + print("\n🧪 Testing edge cases...") + + # Edge case 1: No existing turnoff time (should always recalculate) + print("\n📋 Edge case 1: No existing turnoff time - should recalculate") + existing_settings = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks', + 'debug_logging_turnoff_time': None # No existing time + } + new_form_data = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks' + } + + timer_settings_changed = ( + new_form_data['debug_logging_timer_enabled'] != existing_settings.get('debug_logging_timer_enabled', False) or + new_form_data['debug_timer_value'] != existing_settings.get('debug_timer_value', 1) or + new_form_data['debug_timer_unit'] != existing_settings.get('debug_timer_unit', 'hours') + ) + debug_logging_newly_enabled = new_form_data['enable_debug_logging'] and not existing_settings.get('enable_debug_logging', False) + existing_turnoff_time = existing_settings.get('debug_logging_turnoff_time') + + should_recalculate = timer_settings_changed or debug_logging_newly_enabled or not existing_turnoff_time + + if should_recalculate: + print("✅ PASSED: Correctly determined to recalculate (no existing turnoff time)") + else: + print("❌ FAILED: Should recalculate when no existing turnoff time") + return False + + # Edge case 2: Empty string turnoff time (should recalculate) + print("\n📋 Edge case 2: Empty string turnoff time - should recalculate") + existing_settings_empty = { + 'enable_debug_logging': True, + 'debug_logging_timer_enabled': True, + 'debug_timer_value': 1, + 'debug_timer_unit': 'weeks', + 'debug_logging_turnoff_time': '' # Empty string + } + + existing_turnoff_time_empty = existing_settings_empty.get('debug_logging_turnoff_time') + should_recalculate = timer_settings_changed or debug_logging_newly_enabled or not existing_turnoff_time_empty + + if should_recalculate: + print("✅ PASSED: Correctly determined to recalculate (empty string turnoff time)") + else: + print("❌ FAILED: Should recalculate when turnoff time is empty string") + return False + + print("\n✅ All edge case tests passed!") + return True + + +if __name__ == "__main__": + print("=" * 70) + print("Debug Logging Timer Preservation Fix - Functional Test") + print("Version: 0.235.014") + print("=" * 70) + + tests = [ + ("Timer Recalculation Logic", test_timer_recalculation_logic), + ("File Processing Logs Timer Preservation", test_file_processing_logs_timer_preservation), + ("Edge Cases", test_edge_cases), + ] + + results = [] + for test_name, test_func in tests: + print(f"\n{'=' * 70}") + print(f"Running: {test_name}") + print("=" * 70) + try: + result = test_func() + results.append((test_name, result)) + except Exception as e: + print(f"❌ EXCEPTION in {test_name}: {e}") + import traceback + traceback.print_exc() + results.append((test_name, False)) + + print("\n" + "=" * 70) + print("TEST SUMMARY") + print("=" * 70) + + passed = sum(1 for _, r in results if r) + total = len(results) + + for test_name, result in results: + status = "✅ PASSED" if result else "❌ FAILED" + print(f" {status}: {test_name}") + + print(f"\n📊 Results: {passed}/{total} tests passed") + + if passed == total: + print("\n🎉 All tests passed! The fix is working correctly.") + sys.exit(0) + else: + print("\n⚠️ Some tests failed. Please review the fix.") + sys.exit(1)