From a139d1d64a51f8b20b7fa828ca337a41de1f5c79 Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Thu, 30 Oct 2025 08:13:00 -0500 Subject: [PATCH 01/10] feat(containerapp): Add Docker Compose models and MCP gateway support - Parse 'models:' section from compose files with runtime_flags support - Deploy models container app with GPU workload profile support - Inject MODEL_RUNNER_URL and MODEL_RUNNER_MODEL into consuming services - Add MCP gateway detection and automatic configuration - Support x-azure-deployment overrides for ingress and resources - Add --dry-run flag for deployment preview without creating resources - Add --replace-all flag to replace all existing apps in environment --- .../azext_containerapp/_compose_utils.py | 1668 +++++++++++++++++ .../azext_containerapp/_params.py | 17 + src/containerapp/azext_containerapp/custom.py | 567 +++++- .../test_containerapp_compose_mcp_gateway.py | 127 ++ .../test_containerapp_compose_models.py | 300 +++ 5 files changed, 2653 insertions(+), 26 deletions(-) create mode 100644 src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_mcp_gateway.py create mode 100644 src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_models.py diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index 9b6e4455dee..3521159e8ec 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -47,3 +47,1671 @@ def validate_memory_and_cpu_setting(cpu, memory, managed_environment): logger.warning( # pylint: disable=W1203 f"Invalid CPU reservation request of {cpu}. The default resource values will be used.") return (None, None) + + +def parse_models_section(compose_yaml): + """ + Extract models section from raw YAML compose file. + + Args: + compose_yaml: Dictionary representation of compose file + + Returns: + Dictionary of models configuration or empty dict if not present + """ + from knack.log import get_logger + logger = get_logger(__name__) + + models = compose_yaml.get('models', {}) + if models: + logger.info(f"Found {len(models)} model(s) in compose file: {list(models.keys())}") + return models + + +def detect_mcp_gateway_service(service_name, service): + """ + Check if a service is an MCP gateway by examining the image name. + + Args: + service_name: Name of the service + service: Service configuration object + + Returns: + Boolean indicating if this is an MCP gateway service + """ + if not hasattr(service, 'image') or not service.image: + return False + + # Check if image contains 'mcp-gateway' substring + is_mcp_gateway = 'mcp-gateway' in service.image.lower() + + if is_mcp_gateway: + from knack.log import get_logger + logger = get_logger(__name__) + logger.info(f"Detected MCP gateway service: {service_name} (image: {service.image})") + + return is_mcp_gateway + + +def resolve_dependency_graph(services): + """ + Build service dependency order from depends_on relationships. + + Args: + services: Dictionary of service configurations + + Returns: + List of service names in dependency order (dependencies first) + """ + from knack.log import get_logger + logger = get_logger(__name__) + + # Build adjacency list for dependencies + dependencies = {} + for service_name, service in services.items(): + deps = [] + if hasattr(service, 'depends_on') and service.depends_on: + if isinstance(service.depends_on, list): + deps = service.depends_on + elif isinstance(service.depends_on, dict): + deps = list(service.depends_on.keys()) + dependencies[service_name] = deps + + # Topological sort using DFS + visited = set() + result = [] + + def visit(name): + if name in visited: + return + visited.add(name) + for dep in dependencies.get(name, []): + if dep in services: # Only visit dependencies that exist + visit(dep) + result.append(name) + + for service_name in services.keys(): + visit(service_name) + + logger.info(f"Resolved service deployment order: {result}") + return result + + +# ============================================================================ +# Phase 3: Models Deployment Implementation +# ============================================================================ + +def validate_models_configuration(models): + """ + Validate models configuration from compose file. + + Args: + models: Dictionary of model configurations from compose file + + Returns: + Boolean indicating if models configuration is valid + + Raises: + InvalidArgumentValueError: If model names are invalid DNS labels + """ + from knack.log import get_logger + from azure.cli.core.azclierror import InvalidArgumentValueError + import re + + logger = get_logger(__name__) + + if not models: + return True + + # DNS label regex: lowercase alphanumeric and hyphens, start/end with alphanumeric + dns_label_pattern = re.compile(r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?$') + + for model_name, model_config in models.items(): + # Skip x-azure-deployment metadata + if model_name == 'x-azure-deployment': + continue + + if not dns_label_pattern.match(model_name): + raise InvalidArgumentValueError( + f"Invalid model name '{model_name}'. Model names must be valid DNS labels: " + f"lowercase alphanumeric characters or hyphens, starting and ending with alphanumeric." + ) + + # Validate source format + if isinstance(model_config, dict) and 'source' in model_config: + validate_model_source(model_name, model_config['source']) + + logger.info(f"Validated {len(models)} model configuration(s)") + return True + + +def validate_model_source(model_name, source): + """ + Validate model source format. + + Args: + model_name: Name of the model + source: Model source string (e.g., ollama://phi, hf://model-name) + + Raises: + InvalidArgumentValueError: If source format is invalid + """ + from knack.log import get_logger + from azure.cli.core.azclierror import InvalidArgumentValueError + + logger = get_logger(__name__) + + if not source: + raise InvalidArgumentValueError( + f"Model '{model_name}' missing required 'source' field" + ) + + valid_prefixes = ['ollama://', 'hf://', 'https://', 'azureml://'] + + if not any(source.startswith(prefix) for prefix in valid_prefixes): + raise InvalidArgumentValueError( + f"Invalid source '{source}' for model '{model_name}'. " + f"Source must start with one of: {', '.join(valid_prefixes)}" + ) + + logger.info(f"Validated model source for '{model_name}': {source}") + + +def check_gpu_profile_availability(cmd, resource_group_name, env_name, location): + """ + Check if GPU workload profiles are available in the region. + + Args: + cmd: Azure CLI command context + resource_group_name: Resource group name + env_name: Container Apps environment name + location: Azure region + + Returns: + List of available GPU profile types + """ + from knack.log import get_logger + from azure.cli.core.util import send_raw_request + from azure.cli.core.commands.client_factory import get_subscription_id + import json + + logger = get_logger(__name__) + + try: + # Use Azure REST API to list supported workload profiles + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + sub_id = get_subscription_id(cmd.cli_ctx) + api_version = "2024-03-01" + + url_fmt = "{}/subscriptions/{}/providers/Microsoft.App/locations/{}/availableManagedEnvironmentsWorkloadProfileTypes?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + location, + api_version + ) + + r = send_raw_request(cmd.cli_ctx, "GET", request_url) + response = r.json() + + supported_profiles = response.get('value', []) + + # Filter for GPU profiles + gpu_profiles = [ + profile for profile in supported_profiles + if 'GPU' in profile.get('name', '').upper() or + 'GPU' in profile.get('properties', {}).get('category', '').upper() + ] + + if gpu_profiles: + logger.info(f"Found {len(gpu_profiles)} GPU profile(s) in {location}") + else: + logger.warning(f"No GPU profiles available in {location}") + + return gpu_profiles + + except Exception as e: + logger.warning(f"Failed to check GPU profile availability: {str(e)}") + return [] + +def wait_for_environment_provisioning(cmd, resource_group_name, env_name, timeout_seconds=600): + """ + Wait for Container Apps environment to reach 'Succeeded' provisioning state. + + Args: + cmd: Azure CLI command context + resource_group_name: Resource group name + env_name: Container Apps environment name + timeout_seconds: Maximum time to wait (default 600 seconds = 10 minutes) + + Raises: + Exception: If environment doesn't reach Succeeded state within timeout + """ + from knack.log import get_logger + from ._clients import ManagedEnvironmentClient + import time + + logger = get_logger(__name__) + logger.info(f"Waiting for environment '{env_name}' to be provisioned...") + + start_time = time.time() + while True: + env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) + provisioning_state = env.get('properties', {}).get('provisioningState', 'Unknown') + + logger.info(f"Environment provisioning state: {provisioning_state}") + + if provisioning_state == 'Succeeded': + logger.info(f"Environment '{env_name}' is ready") + return + + if provisioning_state in ['Failed', 'Canceled']: + raise Exception(f"Environment provisioning failed with state: {provisioning_state}") + + elapsed = time.time() - start_time + if elapsed > timeout_seconds: + raise Exception(f"Timeout waiting for environment provisioning. Current state: {provisioning_state}") + + logger.info(f"Waiting... (elapsed: {int(elapsed)}s, timeout: {timeout_seconds}s)") + time.sleep(5) # Check every 5 seconds + + +def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, location, requested_gpu_profile_type=None): + """ + Check for existing GPU profile and create one if needed. + + Args: + cmd: Azure CLI command context + resource_group_name: Resource group name + env_name: Container Apps environment name + location: Azure region + requested_gpu_profile_type: Specific GPU profile type requested (e.g., Consumption-GPU-NC8as-T4) + + Returns: + Name of the GPU workload profile to use + + Raises: + ResourceNotFoundError: If no GPU profiles available in region + """ + from knack.log import get_logger + from azure.cli.core.azclierror import ResourceNotFoundError + from ._client_factory import handle_raw_exception + from ._clients import ManagedEnvironmentClient + + logger = get_logger(__name__) + + # Check if environment already has a GPU profile that matches the request + try: + # Use class methods directly - no instantiation needed + env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) + + workload_profiles = env.get('properties', {}).get('workloadProfiles', []) + + # If specific GPU type requested, check if it exists + if requested_gpu_profile_type: + for profile in workload_profiles: + if profile.get('workloadProfileType') == requested_gpu_profile_type: + logger.info(f"Found requested GPU profile in environment: {requested_gpu_profile_type}") + return requested_gpu_profile_type + else: + # No specific request, return any GPU profile found + for profile in workload_profiles: + profile_type = profile.get('workloadProfileType', '') + if 'GPU' in profile_type.upper(): + logger.info(f"Found existing GPU profile: {profile.get('name')}") + return profile.get('name') + except Exception as e: + logger.warning(f"Failed to check existing profiles: {str(e)}") + + # Get available GPU profiles in region + available_gpu = check_gpu_profile_availability(cmd, resource_group_name, env_name, location) + + if not available_gpu: + alternatives_msg = "GPU workload profiles are not available in this region." + # Try to suggest nearby regions (simplified - would need proper region mapping) + alternatives_msg += "\n\nTry deploying to regions with GPU support: westus3, eastus, northeurope" + raise ResourceNotFoundError(alternatives_msg) + + # Use requested GPU profile type if provided, otherwise use first available + if requested_gpu_profile_type: + # Validate that requested profile is available + # The API returns profiles with 'name' field containing the workload profile type + available_types = [p.get('name') for p in available_gpu] + + # Pass through the requested type as-is - it's already the API value + # (e.g., Consumption-GPU-NC8as-T4, Consumption-GPU-NC24-A100, Consumption, Flex) + if requested_gpu_profile_type in available_types: + gpu_profile_type = requested_gpu_profile_type + logger.info(f"Using requested GPU profile type: {gpu_profile_type}") + else: + # Don't fall back - user explicitly requested this type + logger.error(f"Requested GPU profile '{requested_gpu_profile_type}' not available in {location}") + logger.error(f"Available GPU profiles: {available_types}") + raise ResourceNotFoundError( + f"Requested workload profile type '{requested_gpu_profile_type}' is not available in region '{location}'. " + f"Available types: {', '.join(available_types)}" + ) + else: + gpu_profile_type = available_gpu[0].get('name') + logger.info(f"No specific GPU profile requested, using: {gpu_profile_type}") + + # Check if it's a consumption-based GPU profile + if gpu_profile_type.startswith('Consumption-'): + # Consumption profiles need to be added to the environment's workloadProfiles array + logger.info(f"Adding consumption-based GPU profile to environment: {gpu_profile_type}") + + from azure.cli.core.util import send_raw_request + from azure.cli.core.commands.client_factory import get_subscription_id + import json + + # Get current environment configuration + env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) + + # Check if profile already exists + existing_profiles = env.get('properties', {}).get('workloadProfiles', []) + for profile in existing_profiles: + if profile.get('workloadProfileType') == gpu_profile_type: + logger.info(f"GPU profile already exists: {gpu_profile_type}") + # Even if profile exists, environment might still be provisioning from a previous operation + wait_for_environment_provisioning(cmd, resource_group_name, env_name) + return gpu_profile_type + + # Add the consumption GPU profile to the environment + new_profile = { + "name": gpu_profile_type, + "workloadProfileType": gpu_profile_type + } + + # Clean existing profiles - remove unsupported properties like enableFips + cleaned_profiles = [] + for profile in existing_profiles: + cleaned = { + "name": profile.get("name"), + "workloadProfileType": profile.get("workloadProfileType") + } + # Only add minimumCount/maximumCount if they exist and aren't for Consumption profile + if not profile.get("workloadProfileType", "").startswith("Consumption"): + if "minimumCount" in profile: + cleaned["minimumCount"] = profile["minimumCount"] + if "maximumCount" in profile: + cleaned["maximumCount"] = profile["maximumCount"] + cleaned_profiles.append(cleaned) + + cleaned_profiles.append(new_profile) + + # Update the environment + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + sub_id = get_subscription_id(cmd.cli_ctx) + api_version = "2024-03-01" + + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/managedEnvironments/{}?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + env_name, + api_version + ) + + # Update environment with new workload profile + env_update = { + "properties": { + "workloadProfiles": cleaned_profiles + }, + "location": env.get('location') + } + + r = send_raw_request(cmd.cli_ctx, "PATCH", request_url, body=json.dumps(env_update)) + logger.info(f"Added GPU profile to environment: {gpu_profile_type}") + + # Wait for environment to be provisioned before continuing + wait_for_environment_provisioning(cmd, resource_group_name, env_name) + + return gpu_profile_type + + profile_name = 'gpu-profile' + + logger.info(f"Creating GPU workload profile '{profile_name}' of type '{gpu_profile_type}'") + + # Create the workload profile using existing add_workload_profile function + # Create the workload profile using Azure REST API + try: + from azure.cli.core.util import send_raw_request + from azure.cli.core.commands.client_factory import get_subscription_id + import json + + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + sub_id = get_subscription_id(cmd.cli_ctx) + api_version = "2024-03-01" + + # Create workload profile + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/managedEnvironments/{}/workloadProfiles/{}?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + env_name, + profile_name, + api_version + ) + + profile_body = { + "properties": { + "workloadProfileType": gpu_profile_type, + "minimumCount": 1, + "maximumCount": 3 + } + } + + logger.info(f"Creating workload profile at: {request_url}") + logger.info(f"Profile body: {json.dumps(profile_body)}") + + r = send_raw_request(cmd.cli_ctx, "PUT", request_url, body=json.dumps(profile_body)) + + if r.status_code >= 400: + logger.error(f"Failed to create workload profile. Status: {r.status_code}, Response: {r.text}") + raise Exception(f"Failed to create workload profile: {r.status_code} - {r.text}") + + logger.info(f"Successfully created GPU profile: {profile_name}") + return profile_name + except Exception as e: + logger.error(f"Exception creating workload profile: {str(e)}") + handle_raw_exception(e) + raise + + + + +def create_models_container_app(cmd, resource_group_name, env_name, env_id, models, + gpu_profile_name, location): + """ + Create the models container app with model-runner and model-runner-config containers. + + Args: + cmd: Azure CLI command context + resource_group_name: Resource group name + env_name: Container Apps environment name + env_id: Full resource ID of the environment + models: Dictionary of model configurations + gpu_profile_name: Name of the GPU workload profile + location: Azure region + + Returns: + Created container app resource + """ + from knack.log import get_logger + from ._clients import ManagedEnvironmentClient + import json + + logger = get_logger(__name__) + + app_name = 'models' + logger.info(f"Creating models container app '{app_name}' with GPU profile '{gpu_profile_name}'") + + # MODEL_RUNNER_URL uses localhost since model-runner and model-runner-config + # are in the same container app (in-app communication) + model_runner_url = "http://localhost:12434" + logger.info(f"Model runner URL (localhost): {model_runner_url}") + + # Determine GPU-appropriate resources based on profile type + # T4 GPU: 8 vCPUs, 56GB memory (NC8as_T4_v3) + # A100 GPU: 24 vCPUs, 220GB memory (NC24ads_A100_v4) + if 'A100' in gpu_profile_name.upper(): + gpu_cpu = 24.0 + gpu_memory = '220Gi' + logger.info(f"Detected A100 GPU profile - setting resources to {gpu_cpu} CPU / {gpu_memory}") + else: + # Default to T4 resources + gpu_cpu = 8.0 + gpu_memory = '56Gi' + logger.info(f"Detected T4 GPU profile - setting resources to {gpu_cpu} CPU / {gpu_memory}") + + # Build model configuration for model-runner-config + model_config = { + 'models': {} + } + for model_name, model_spec in models.items(): + # Skip x-azure-deployment metadata + if model_name == 'x-azure-deployment': + continue + if isinstance(model_spec, dict): + # Exclude x-azure-deployment from model configuration + clean_model_spec = {k: v for k, v in model_spec.items() if k != 'x-azure-deployment'} + model_config['models'][model_name] = clean_model_spec + else: + # Simple string format (just model name) + model_config['models'][model_name] = {'source': f'ollama://{model_spec}'} + + # Container configuration matching successful deployment pattern + containers = [ + { + 'name': 'model-runner', + 'image': 'docker/model-runner:latest', + 'resources': { + 'cpu': gpu_cpu, + 'memory': gpu_memory + }, + 'env': [ + { + 'name': 'MODEL_RUNNER_PORT', + 'value': '12434' + }, + { + 'name': 'MODEL_RUNNER_HOST', + 'value': '0.0.0.0' + }, + { + 'name': 'MODEL_RUNNER_ENVIRONMENT', + 'value': 'moby' + }, + { + 'name': 'MODEL_RUNNER_GPU', + 'value': 'cuda' + } + ] + }, + { + 'name': 'model-runner-config', + 'image': 'simon.azurecr.io/model-runner-config:09112025-1504', + 'resources': { + 'cpu': 0.5, + 'memory': '1Gi' + }, + 'env': [ + { + 'name': 'MODEL_RUNNER_URL', + 'value': model_runner_url # Use localhost for in-app communication + }, + { + 'name': 'MODELS_CONFIG', + 'value': json.dumps(model_config) + }, + { + 'name': 'CONFIGURE_ON_STARTUP', # whether to configure models on startup + 'value': 'true' + }, + { + 'name': 'STARTUP_DELAY', # how long do we want to wait after startup before configuring models + 'value': '30' # seconds + } + ] + } + ] + + # Check for ingress configuration in x-azure-deployment + ingress_config = { + 'external': False, + 'targetPort': 12434, # MODEL_RUNNER_PORT + 'transport': 'http', + 'allowInsecure': False + } + + # Override with x-azure-deployment ingress settings if present + # Check inside each model (models.gemma.x-azure-deployment) + for model_name, model_spec in models.items(): + if isinstance(model_spec, dict) and 'x-azure-deployment' in model_spec: + azure_deployment = model_spec.get('x-azure-deployment', {}) + if 'ingress' in azure_deployment: + ingress_override = azure_deployment['ingress'] + if 'internal' in ingress_override: + ingress_config['external'] = not ingress_override['internal'] + if 'external' in ingress_override: + ingress_config['external'] = ingress_override['external'] + if 'allowInsecure' in ingress_override: + ingress_config['allowInsecure'] = ingress_override['allowInsecure'] + logger.info(f"Applied ingress overrides from model '{model_name}': {ingress_override}") + break # Use first model's ingress settings + + # Build container app definition + container_app_def = { + 'location': location, + 'properties': { + 'environmentId': env_id, + 'workloadProfileName': gpu_profile_name, + 'configuration': { + 'ingress': ingress_config + }, + 'template': { + 'containers': containers, + 'scale': { + 'minReplicas': 1, + 'maxReplicas': 1 + } + } + } + } + + # Create the container app using REST API + try: + from azure.cli.core.util import send_raw_request + from azure.cli.core.commands.client_factory import get_subscription_id + + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + sub_id = get_subscription_id(cmd.cli_ctx) + api_version = "2024-03-01" + + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + app_name, + api_version + ) + + r = send_raw_request(cmd.cli_ctx, "PUT", request_url, body=json.dumps(container_app_def)) + models_app = r.json() + logger.info(f"Successfully created models container app: {app_name}") + return models_app + except Exception as e: + logger.error(f"Failed to create models container app: {str(e)}") + raise + + +def get_containerapp_fqdn(cmd, resource_group_name, env_name, app_name, is_external=False): + """ + Generate Container Apps FQDN for a given app. + + Args: + cmd: Azure CLI command context + resource_group_name: Resource group name + env_name: Container Apps environment name + app_name: Container app name + is_external: Whether the app has external ingress (True) or internal (False) + + Returns: + FQDN string (e.g., https://myapp.internal.example.azurecontainerapps.io) + """ + from knack.log import get_logger + from ._clients import ManagedEnvironmentClient + + logger = get_logger(__name__) + + # Get environment to retrieve the default domain + env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) + env_default_domain = env.get('properties', {}).get('defaultDomain', '') + + if is_external: + # External ingress: https://. + fqdn = f"https://{app_name}.{env_default_domain}" + else: + # Internal ingress: https://.internal. + fqdn = f"https://{app_name}.internal.{env_default_domain}" + + logger.info(f"Generated FQDN for '{app_name}': {fqdn}") + return fqdn + + +def inject_models_environment_variables(app_def, models_endpoint, models_list): + """ + Inject MODELS_ENDPOINT and MODELS_AVAILABLE environment variables into a container app. + + Args: + app_def: Container app definition dictionary + models_endpoint: URL of the models service endpoint + models_list: List of available model names + + Returns: + Modified app_def with injected environment variables + """ + from knack.log import get_logger + + logger = get_logger(__name__) + + # Get existing environment variables from first container + containers = app_def.get('properties', {}).get('template', {}).get('containers', []) + if not containers: + logger.warning("No containers found in app definition") + return app_def + + env_vars = containers[0].get('env', []) + + # Add MODELS_ENDPOINT + models_endpoint_var = { + 'name': 'MODELS_ENDPOINT', + 'value': models_endpoint + } + env_vars.append(models_endpoint_var) + + # Add MODELS_AVAILABLE (comma-separated list) + models_available_var = { + 'name': 'MODELS_AVAILABLE', + 'value': ','.join(models_list) + } + env_vars.append(models_available_var) + + containers[0]['env'] = env_vars + logger.info(f"Injected models environment variables: {models_endpoint}, {len(models_list)} model(s)") + + return app_def + + +# ============================================================================ +# Phase 4: MCP Gateway Implementation +# ============================================================================ + +def get_mcp_gateway_configuration(service): + """ + Extract MCP gateway configuration from compose service. + + Args: + service: Compose service object + + Returns: + Dictionary with gateway configuration settings + """ + from knack.log import get_logger + + logger = get_logger(__name__) + + config = { + 'port': 8811, # Standard MCP gateway port + 'ingress_type': 'internal', # MCP gateway should be internal-only + 'image': service.image if hasattr(service, 'image') else 'mcr.microsoft.com/mcp-gateway:latest' + } + + # Check for custom port in service ports + if hasattr(service, 'ports') and service.ports: + if isinstance(service.ports, list) and len(service.ports) > 0: + # Port object has 'target' (internal) and 'published' (external) attributes + port_obj = service.ports[0] + if hasattr(port_obj, 'target'): + # Use target (internal container port) for ingress + config['port'] = int(port_obj.target) + elif hasattr(port_obj, 'published'): + config['port'] = int(port_obj.published) + else: + # Fallback: parse as string, removing /tcp suffix + port_str = str(port_obj).split('/')[0] # Remove /tcp suffix + if ':' in port_str: + config['port'] = int(port_str.split(':')[-1]) + else: + config['port'] = int(port_str) + + logger.info(f"MCP Gateway configuration: port={config['port']}, ingress={config['ingress_type']}") + return config + + +def enable_managed_identity(cmd, resource_group_name, app_name): + """ + Enable system-assigned managed identity for a container app. + + Args: + cmd: Azure CLI command context + resource_group_name: Resource group name + app_name: Container app name + + Returns: + Dictionary with identity information including principal_id + """ + from knack.log import get_logger + from ._clients import ContainerAppClient + + logger = get_logger(__name__) + + logger.info(f"Enabling system-assigned managed identity for '{app_name}'") + + try: + # Get current app using show classmethod + app = ContainerAppClient.show(cmd, resource_group_name, app_name) + + # Set identity type to SystemAssigned + if 'identity' not in app: + app['identity'] = {} + + app['identity']['type'] = 'SystemAssigned' + + # Update the app using create_or_update classmethod + updated_app = ContainerAppClient.create_or_update(cmd, resource_group_name, app_name, app) + + identity = updated_app.get('identity', {}) + principal_id = identity.get('principalId') + + if principal_id: + logger.info(f"Successfully enabled managed identity. Principal ID: {principal_id}") + else: + logger.warning("Managed identity enabled but principal ID not yet available") + + return identity + + except Exception as e: + logger.error(f"Failed to enable managed identity: {str(e)}") + raise + + +def attempt_role_assignment(cmd, principal_id, resource_group_name, app_name): + """ + Attempt to assign Container Apps Contributor role to managed identity at resource group scope. + This allows the MCP gateway to modify container apps to add MCP server containers. + + Args: + cmd: Azure CLI command context + principal_id: Principal ID of the managed identity + resource_group_name: Resource group name + app_name: Container app name + + Returns: + Boolean indicating if assignment succeeded + """ + from knack.log import get_logger + from azure.cli.core.commands.client_factory import get_subscription_id + + logger = get_logger(__name__) + + # Use the specific role definition ID for container app management + # This role allows the MCP gateway to modify container apps + role_definition_id = "/subscriptions/30501c6c-81f6-41ac-a388-d29cf43a020d/providers/Microsoft.Authorization/roleDefinitions/358470bc-b998-42bd-ab17-a7e34c199c0f" + + # Build scope - resource group level + subscription_id = get_subscription_id(cmd.cli_ctx) + scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" + + logger.info(f"Attempting to assign role to principal {principal_id}") + logger.info(f"Scope: Resource Group '{resource_group_name}'") + + try: + # Import role assignment function + from azure.cli.command_modules.role.custom import create_role_assignment + + # Attempt role assignment using the full role definition ID + create_role_assignment( + cmd, + role=role_definition_id, + assignee_object_id=principal_id, + scope=scope, + assignee_principal_type='ServicePrincipal' + ) + + logger.info(f"✅ Successfully assigned role to '{app_name}' managed identity") + return True + + except Exception as e: + # Log warning with manual assignment instructions + logger.warning("━" * 70) + logger.warning(f"⚠️ Could not automatically assign role: {str(e)}") + logger.warning("") + logger.warning("To manually assign the role, run:") + logger.warning(f" az role assignment create \\") + logger.warning(f" --role '{role_definition_id}' \\") + logger.warning(f" --assignee-object-id {principal_id} \\") + logger.warning(f" --assignee-principal-type ServicePrincipal \\") + logger.warning(f" --scope {scope}") + logger.warning("━" * 70) + + return False + + +def inject_mcp_gateway_environment_variables(app_def, subscription_id, resource_group_name, app_name): + """ + Inject MCP gateway-specific environment variables into the container app. + + Args: + app_def: Container app definition dictionary + subscription_id: Azure subscription ID + resource_group_name: Resource group name + app_name: Container app name + + Returns: + Modified app_def with injected environment variables + """ + from knack.log import get_logger + + logger = get_logger(__name__) + + # Get existing environment variables from first container + containers = app_def.get('properties', {}).get('template', {}).get('containers', []) + if not containers: + logger.warning("No containers found in MCP gateway app definition") + return app_def + + env_vars = containers[0].get('env', []) + + # Add MCP runtime identifier + env_vars.append({ + 'name': 'MCP_RUNTIME', + 'value': 'ACA' + }) + + # Add Azure context variables + env_vars.append({ + 'name': 'AZURE_SUBSCRIPTION_ID', + 'value': subscription_id + }) + + env_vars.append({ + 'name': 'AZURE_RESOURCE_GROUP', + 'value': resource_group_name + }) + + env_vars.append({ + 'name': 'AZURE_APP_NAME', + 'value': app_name + }) + + containers[0]['env'] = env_vars + logger.info(f"Injected MCP gateway environment variables for '{app_name}'") + + return app_def + + +def inject_gateway_url_to_dependents(app_def, gateway_url): + """ + Inject MCP_GATEWAY_URL environment variable to services that depend on MCP gateway. + + Args: + app_def: Container app definition dictionary + gateway_url: URL of the MCP gateway service + + Returns: + Modified app_def with injected MCP_GATEWAY_URL + """ + from knack.log import get_logger + + logger = get_logger(__name__) + + # Get existing environment variables from first container + containers = app_def.get('properties', {}).get('template', {}).get('containers', []) + if not containers: + logger.warning("No containers found in app definition") + return app_def + + env_vars = containers[0].get('env', []) + + # Add MCP_GATEWAY_URL + env_vars.append({ + 'name': 'MCP_GATEWAY_URL', + 'value': gateway_url + }) + + containers[0]['env'] = env_vars + logger.info(f"Injected MCP_GATEWAY_URL: {gateway_url}") + + return app_def + + +# ============================================================================ +# Phase 5: Dry-Run Mode Functions +# ============================================================================ + +def collect_dry_run_service_config(service_name, service, image=None, ingress_type=None, target_port=None, + cpu=None, memory=None, environment=None, replicas=None, is_models_service=False, is_mcp_gateway=False, gpu_type=None, raw_service=None, models_config=None): + """Collect service configuration for dry-run preview""" + # Extract x-azure-deployment overrides + # Use raw_service (YAML dict) if provided, otherwise use service object + service_for_parse = raw_service if raw_service is not None else service + overrides = parse_x_azure_deployment(service_for_parse) + + # Use x-azure-deployment overrides first, then parameters, then defaults + # Service is a pycomposefile Service object, not a dict + service_image = getattr(service, 'image', None) + service_ports = getattr(service, 'ports', None) + service_env = getattr(service, 'environment', None) + service_depends_on = getattr(service, 'depends_on', None) + service_command = getattr(service, 'command', None) + + # Get image with override priority (x-azure-deployment takes highest precedence) + final_image = overrides.get('image') or image or service_image or 'Not specified' + + # Determine ingress enabled and type (x-azure-deployment takes highest precedence) + ingress_enabled = service_ports is not None or overrides.get('ingress_type') is not None + + # Priority: x-azure-deployment override > parameter > default based on ports + if overrides.get('ingress_type') is not None: + final_ingress_type = overrides['ingress_type'] + elif ingress_type is not None: + final_ingress_type = ingress_type + elif service_ports: + final_ingress_type = 'internal' # Default to internal if ports exist + else: + final_ingress_type = None + + # Get target port from override or parameter + final_target_port = overrides.get('target_port') or target_port + + # Get CPU and memory from overrides or parameters + final_cpu = overrides.get('cpu') or cpu + final_memory = overrides.get('memory') or memory + + # Get replica counts from overrides + min_replicas = overrides.get('min_replicas') + max_replicas = overrides.get('max_replicas') + + # Collect environment variables (handle both dict and string formats, avoid duplicates) + env_vars = [] + env_var_names = set() # Track names to avoid duplicates + + # Helper to add env var and track names + def add_env_var(var): + if isinstance(var, dict) and 'name' in var and 'value' in var: + var_str = f"{var['name']}={var['value']}" + var_name = var['name'] + elif isinstance(var, str): + var_str = var + var_name = var.split('=')[0] if '=' in var else var + else: + return # Skip invalid format + + if var_name not in env_var_names: + env_vars.append(var_str) + env_var_names.add(var_name) + + # First add from service definition (compose file) + if service_env: + if isinstance(service_env, dict): + for k, v in service_env.items(): + add_env_var(f"{k}={v}") + elif isinstance(service_env, list): + for var in service_env: + add_env_var(var) + + # Then add from environment parameter (injected vars, these can override) + if environment: + for var in environment: + if isinstance(var, dict) and 'name' in var: + # For dict format, allow override by removing old value first + var_name = var['name'] + if var_name in env_var_names: + # Remove old value + env_vars = [v for v in env_vars if not v.startswith(f"{var_name}=")] + env_var_names.discard(var_name) + add_env_var(var) + + # Collect depends_on + depends_on = [] + if service_depends_on: + if isinstance(service_depends_on, list): + depends_on = service_depends_on + elif isinstance(service_depends_on, dict): + depends_on = list(service_depends_on.keys()) + + # Collect command + command = [] + if service_command: + if isinstance(service_command, list): + command = service_command + elif isinstance(service_command, str): + command = [service_command] + + # Check for models reference in raw_service and resolve paths + models_ref = [] + if raw_service and isinstance(raw_service, dict): + service_models = raw_service.get('models') + if service_models and isinstance(service_models, dict): + # Resolve model names to paths using models_config + for model_name in service_models.keys(): + if models_config and model_name in models_config: + model_info = models_config[model_name] + if isinstance(model_info, dict) and 'model' in model_info: + model_path = model_info['model'] + models_ref.append(f"{model_name} ({model_path})") + + # Add environment variables for this model + model_env = service_models[model_name] + if isinstance(model_env, dict): + endpoint_var = model_env.get('endpoint_var') + model_var = model_env.get('model_var') + if endpoint_var: + env_vars.append(f"{endpoint_var}=https:///") + if model_var: + env_vars.append(f"{model_var}={model_path}") + elif isinstance(model_info, str): + models_ref.append(f"{model_name} ({model_info})") + + config = { + 'service_name': service_name, + 'image': final_image, + 'ingress_enabled': ingress_enabled, + 'ingress_type': final_ingress_type, + 'target_port': final_target_port, + 'cpu': final_cpu, + 'memory': final_memory, + 'min_replicas': min_replicas, + 'max_replicas': max_replicas, + 'environment': env_vars, + 'depends_on': depends_on, + 'command': command, + 'models': models_ref, + 'is_models_service': is_models_service, + 'is_mcp_gateway': is_mcp_gateway + } + + return config + +def parse_x_azure_deployment(service): + """ + Parse x-azure-deployment custom extension from compose service. + + Supports overrides for: + - cpu: Custom CPU allocation + - memory: Custom memory allocation + - min_replicas: Minimum replica count + - max_replicas: Maximum replica count + - ingress: Ingress type override + - managed_identity: Managed identity configuration + + Args: + service: Service object from parsed compose file OR raw dict from YAML + + Returns: + Dictionary containing override values, or empty dict if no overrides + """ + overrides = {} + + # Handle both Service objects and raw dicts + if isinstance(service, dict): + # Raw dict from YAML + x_azure = service.get('x-azure-deployment') + elif hasattr(service, 'x_azure_deployment'): + # Service object + x_azure = service.x_azure_deployment + else: + return overrides + + if not x_azure: + return overrides + + # Parse image override + if 'image' in x_azure: + overrides['image'] = str(x_azure['image']) + + # Parse resources (cpu and memory can be at top level OR nested under 'resources') + resources = x_azure.get('resources', {}) + if 'cpu' in resources: + overrides['cpu'] = resources['cpu'] # Don't convert to string, keep as float/int + elif 'cpu' in x_azure: + overrides['cpu'] = x_azure['cpu'] + + if 'memory' in resources: + overrides['memory'] = str(resources['memory']) + elif 'memory' in x_azure: + overrides['memory'] = str(x_azure['memory']) + + # Parse replica overrides + if 'min_replicas' in x_azure: + overrides['min_replicas'] = int(x_azure['min_replicas']) + + if 'max_replicas' in x_azure: + overrides['max_replicas'] = int(x_azure['max_replicas']) + + # Parse ingress override + if 'ingress' in x_azure: + ingress_config = x_azure['ingress'] + if isinstance(ingress_config, dict): + # Handle 'external: true/false' format + if 'external' in ingress_config: + overrides['ingress_type'] = 'external' if ingress_config['external'] else 'internal' + # Handle 'internal: true/false' format + elif 'internal' in ingress_config: + overrides['ingress_type'] = 'internal' if ingress_config['internal'] else 'external' + # Handle 'type' field + elif 'type' in ingress_config: + overrides['ingress_type'] = ingress_config['type'] + # Handle port + if 'port' in ingress_config: + overrides['target_port'] = int(ingress_config['port']) + elif isinstance(ingress_config, str): + overrides['ingress_type'] = ingress_config + + # Parse managed identity override + if 'managed_identity' in x_azure: + overrides['managed_identity'] = x_azure['managed_identity'] + + return overrides +def apply_resource_overrides(cpu, memory, min_replicas, max_replicas, overrides): + """ + Apply x-azure-deployment overrides to resource configuration. + + Args: + cpu: Default CPU value + memory: Default memory value + min_replicas: Default min replicas + max_replicas: Default max replicas + overrides: Dictionary from parse_x_azure_deployment + + Returns: + Tuple of (cpu, memory, min_replicas, max_replicas) with overrides applied + """ + final_cpu = overrides.get('cpu', cpu) + final_memory = overrides.get('memory', memory) + final_min_replicas = overrides.get('min_replicas', min_replicas) + final_max_replicas = overrides.get('max_replicas', max_replicas) + + return final_cpu, final_memory, final_min_replicas, final_max_replicas + + +def apply_ingress_overrides(ingress_type, target_port, overrides): + """ + Apply x-azure-deployment ingress overrides. + + Args: + ingress_type: Default ingress type + target_port: Default target port + overrides: Dictionary from parse_x_azure_deployment + + Returns: + Tuple of (ingress_type, target_port) with overrides applied + """ + final_ingress_type = overrides.get('ingress_type', ingress_type) + final_target_port = overrides.get('target_port', target_port) + + return final_ingress_type, final_target_port + + +def validate_x_azure_overrides(overrides, service_name, logger): + """ + Validate x-azure-deployment overrides and log warnings for invalid values. + + Args: + overrides: Dictionary from parse_x_azure_deployment + service_name: Name of the service + logger: Logger instance + + Returns: + True if all overrides are valid, False otherwise + """ + valid = True + + # Validate CPU + if 'cpu' in overrides: + cpu = overrides['cpu'] + try: + cpu_float = float(cpu) + if cpu_float <= 0 or cpu_float > 4: + logger.warning(f"Service '{service_name}': Invalid CPU '{cpu}' (must be 0.25-4.0)") + valid = False + except ValueError: + logger.warning(f"Service '{service_name}': Invalid CPU format '{cpu}'") + valid = False + + # Validate memory + if 'memory' in overrides: + memory = overrides['memory'] + if not (memory.endswith('Gi') or memory.endswith('G')): + logger.warning(f"Service '{service_name}': Memory '{memory}' should end with 'Gi' or 'G'") + valid = False + + # Validate replicas + if 'min_replicas' in overrides: + min_rep = overrides['min_replicas'] + if min_rep < 0 or min_rep > 30: + logger.warning(f"Service '{service_name}': min_replicas '{min_rep}' out of range (0-30)") + valid = False + + if 'max_replicas' in overrides: + max_rep = overrides['max_replicas'] + if max_rep < 1 or max_rep > 30: + logger.warning(f"Service '{service_name}': max_replicas '{max_rep}' out of range (1-30)") + valid = False + + # Validate min <= max + if 'min_replicas' in overrides and 'max_replicas' in overrides: + if overrides['min_replicas'] > overrides['max_replicas']: + logger.warning(f"Service '{service_name}': min_replicas cannot exceed max_replicas") + valid = False + + # Validate ingress type + if 'ingress_type' in overrides: + ingress = overrides['ingress_type'].lower() + if ingress not in ['external', 'internal']: + logger.warning(f"Service '{service_name}': Invalid ingress type '{ingress}' (use 'external' or 'internal')") + valid = False + + return valid + + +def log_applied_overrides(service_name, overrides, logger): + """ + Log which x-azure-deployment overrides were applied. + + Args: + service_name: Name of the service + overrides: Dictionary from parse_x_azure_deployment + logger: Logger instance + """ + if not overrides: + return + + logger.info(f"Service '{service_name}': Applying x-azure-deployment overrides:") + + if 'cpu' in overrides: + logger.info(f" - CPU: {overrides['cpu']}") + + if 'memory' in overrides: + logger.info(f" - Memory: {overrides['memory']}") + + if 'min_replicas' in overrides: + logger.info(f" - Min Replicas: {overrides['min_replicas']}") + + if 'max_replicas' in overrides: + logger.info(f" - Max Replicas: {overrides['max_replicas']}") + + if 'ingress_type' in overrides: + logger.info(f" - Ingress Type: {overrides['ingress_type']}") + + if 'target_port' in overrides: + logger.info(f" - Target Port: {overrides['target_port']}") + + if 'managed_identity' in overrides: + logger.info(f" - Managed Identity: {overrides['managed_identity']}") + + + +# ============================================================================ +# Phase 7: Declarative Update Strategy Functions +# ============================================================================ + +def check_containerapp_exists(cmd, resource_group_name, app_name): + """ + Check if a container app already exists. + + Args: + cmd: Command context + resource_group_name: Resource group name + app_name: Container app name + + Returns: + Existing containerapp object if it exists, None otherwise + """ + from azure.core.exceptions import ResourceNotFoundError + + try: + existing_app = ContainerAppClient.show(cmd, resource_group_name=resource_group_name, name=app_name) + return existing_app + except ResourceNotFoundError: + return None + except Exception: + return None + + +def detect_configuration_changes(existing_app, new_config): + """ + Detect changes between existing and new container app configuration. + + Args: + existing_app: Existing container app object + new_config: Dictionary with new configuration + + Returns: + Dictionary describing detected changes + """ + changes = { + 'has_changes': False, + 'image_changed': False, + 'env_vars_changed': False, + 'replicas_changed': False, + 'resources_changed': False, + 'ingress_changed': False + } + + if not existing_app: + changes['has_changes'] = True + return changes + + # Check image change + if hasattr(existing_app, 'properties'): + props = existing_app.properties + + # Image change + if hasattr(props, 'template') and hasattr(props.template, 'containers'): + if props.template.containers and len(props.template.containers) > 0: + existing_image = props.template.containers[0].image + new_image = new_config.get('image', '') + if existing_image != new_image: + changes['image_changed'] = True + changes['has_changes'] = True + + # Replica change + if hasattr(props, 'template') and hasattr(props.template, 'scale'): + existing_min = getattr(props.template.scale, 'minReplicas', 0) + existing_max = getattr(props.template.scale, 'maxReplicas', 1) + new_min = new_config.get('min_replicas', 1) + new_max = new_config.get('max_replicas', 1) + + if existing_min != new_min or existing_max != new_max: + changes['replicas_changed'] = True + changes['has_changes'] = True + + # Resource change (CPU/Memory) + if hasattr(props, 'template') and hasattr(props.template, 'containers'): + if props.template.containers and len(props.template.containers) > 0: + container = props.template.containers[0] + if hasattr(container, 'resources'): + existing_cpu = getattr(container.resources, 'cpu', '0.5') + existing_memory = getattr(container.resources, 'memory', '1.0Gi') + new_cpu = new_config.get('cpu', '0.5') + new_memory = new_config.get('memory', '1.0Gi') + + if str(existing_cpu) != str(new_cpu) or str(existing_memory) != str(new_memory): + changes['resources_changed'] = True + changes['has_changes'] = True + + return changes + + +def update_containerapp_from_compose(cmd, resource_group_name, app_name, + image=None, env_vars=None, cpu=None, + memory=None, min_replicas=None, max_replicas=None, + logger=None): + """ + Update an existing container app with new configuration. + + Args: + cmd: Command context + resource_group_name: Resource group name + app_name: Container app name + image: New container image (optional) + env_vars: New environment variables (optional) + cpu: New CPU allocation (optional) + memory: New memory allocation (optional) + min_replicas: New min replicas (optional) + max_replicas: New max replicas (optional) + logger: Logger instance + + Returns: + Updated containerapp object + """ + from azure.cli.command_modules.containerapp.custom import update_containerapp + + if logger: + logger.info(f"Updating existing container app: {app_name}") + + # Build update arguments + update_args = { + 'cmd': cmd, + 'name': app_name, + 'resource_group_name': resource_group_name + } + + if image: + update_args['image'] = image + + if env_vars: + update_args['set_env_vars'] = env_vars + + if cpu: + update_args['cpu'] = cpu + + if memory: + update_args['memory'] = memory + + if min_replicas is not None: + update_args['min_replicas'] = min_replicas + + if max_replicas is not None: + update_args['max_replicas'] = max_replicas + + # Perform update + try: + updated_app = update_containerapp(**update_args) + if logger: + logger.info(f"Successfully updated container app: {app_name}") + return updated_app + except Exception as e: + if logger: + logger.error(f"Failed to update container app '{app_name}': {str(e)}") + raise + + +def log_update_detection(service_name, changes, logger): + """ + Log detected changes for a container app update. + + Args: + service_name: Name of the service + changes: Dictionary from detect_configuration_changes + logger: Logger instance + """ + if not changes['has_changes']: + logger.info(f"Service '{service_name}': No changes detected - skipping update") + return + + logger.info(f"Service '{service_name}': Detected changes:") + + if changes['image_changed']: + logger.info(f" - Container image changed") + + if changes['env_vars_changed']: + logger.info(f" - Environment variables changed") + + if changes['replicas_changed']: + logger.info(f" - Replica configuration changed") + + if changes['resources_changed']: + logger.info(f" - Resource allocation (CPU/Memory) changed") + + if changes['ingress_changed']: + logger.info(f" - Ingress configuration changed") + + +# Dry-run print functions (stubs for now - can be enhanced later) +def print_dry_run_header(compose_file_path, resource_group_name, managed_env_name): + """Print dry-run header""" + print("\n" + "="*80) + print("DRY-RUN MODE: Deployment Preview") + print("="*80 + "\n") + print(f"Compose File: {compose_file_path}") + print(f"Resource Group: {resource_group_name}") + print(f"Managed Environment: {managed_env_name}") + print("\nNote: No resources will be created. This is a preview only.\n") + +def print_dry_run_service_plan(service_name, service_config): + """Print dry-run service plan""" + print(f"\nService: {service_name}") + print("-" * 60) + + # Always show image + if 'image' in service_config and service_config['image']: + print(f" Image: {service_config['image']}") + + # Show ingress if enabled or if type is specified + ingress_type = service_config.get('ingress_type') + if service_config.get('ingress_enabled') or ingress_type: + if ingress_type: + print(f" Ingress: {ingress_type}") + + # Show target port if specified + target_port = service_config.get('target_port') + if target_port: + print(f" Target Port: {target_port}") + # Show allow insecure if target port is set + print(f" Allow Insecure: true") + + # Show environment variables + env_vars = service_config.get('environment', []) + if env_vars: + print(f" Environment Variables:") + for env_var in env_vars: + print(f" - {env_var}") + + # Show depends_on + depends_on = service_config.get('depends_on', []) + if depends_on: + # Format as comma-separated string or single value + if len(depends_on) == 1: + print(f" Depends On: {depends_on[0]}") + else: + print(f" Depends On: {', '.join(depends_on)}") + + # Show models reference + models = service_config.get('models', []) + if models: + print(f" Models:") + for model in models: + print(f" - {model}") + + # Show command + command = service_config.get('command', []) + if command: + print(f" Command:") + for cmd in command: + print(f" - {cmd}") + + # Show CPU + cpu = service_config.get('cpu') + if cpu: + print(f" CPU: {cpu}") + + # Show memory with proper formatting + memory = service_config.get('memory') + if memory: + # Add Gi suffix if it's a number + if isinstance(memory, (int, float)): + print(f" Memory: {memory}Gi") + elif isinstance(memory, str) and not memory.endswith('Gi'): + print(f" Memory: {memory}Gi") + else: + print(f" Memory: {memory}") + + # Show replica counts + min_replicas = service_config.get('min_replicas') + max_replicas = service_config.get('max_replicas') + if min_replicas is not None: + print(f" Min Replicas: {min_replicas}") + if max_replicas is not None: + print(f" Max Replicas: {max_replicas}") + +def print_dry_run_models_deployment(models_config, gpu_profile_info): + """Print dry-run models deployment""" + print("\nModels Deployment Preview") + print("="*80) + + # Container App name + print(f" Container App: models") + + # Image + print(f" Image: docker/model-runner:latest") + + # Show model names and paths (filter out x-azure-* keys) + model_names = [k for k in models_config.keys() if not k.startswith('x-')] + if model_names: + print(f" Models:") + for model_name in model_names: + model_info = models_config[model_name] + if isinstance(model_info, dict) and 'model' in model_info: + print(f" - {model_name}: {model_info['model']}") + elif isinstance(model_info, str): + print(f" - {model_name}: {model_info}") + + # Show GPU workload profile + if gpu_profile_info and 'type' in gpu_profile_info: + print(f" Workload Profile: {gpu_profile_info['type']}") + + # Ingress is always internal for models + print(f" Ingress: internal") + +def print_dry_run_mcp_gateway(gateway_config): + """Print dry-run MCP gateway""" + print("\nMCP Gateway Preview") + print("="*80) + if 'service_name' in gateway_config: + print(f" Gateway: {gateway_config['service_name']}") + +def print_dry_run_summary(total_services=0, has_models=False, has_gateway=False): + """Print dry-run summary""" + print("\n" + "="*80) + print("Deployment Summary") + print("="*80) + print(f"Total Services: {total_services}") + print(f"Has Models: {has_models}") + print(f"Has MCP Gateway: {has_gateway}") + print("="*80) diff --git a/src/containerapp/azext_containerapp/_params.py b/src/containerapp/azext_containerapp/_params.py index a1715bfc255..b62bd64817f 100644 --- a/src/containerapp/azext_containerapp/_params.py +++ b/src/containerapp/azext_containerapp/_params.py @@ -520,3 +520,20 @@ def load_arguments(self, _): with self.argument_context('containerapp revision set-mode') as c: c.argument('mode', arg_type=get_enum_type(['single', 'multiple', 'labels']), help="The active revisions mode for the container app.") c.argument('target_label', help="The label to apply to new revisions. Required for revision mode 'labels'.", is_preview=True) + + with self.argument_context('containerapp env premium-ingress') as c: + c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None) + c.argument('name', options_list=['--name', '-n'], help="The name of the managed environment.") + c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help="The workload profile to run ingress replicas on. This profile must not be shared with any container app or job.") + c.argument('min_replicas', options_list=['--min-replicas'], type=int, deprecate_info=c.deprecate(hide=True, expiration='2.79.0'), help="The workload profile minimum instances is used instead.") + c.argument('max_replicas', options_list=['--max-replicas'], type=int, deprecate_info=c.deprecate(hide=True, expiration='2.79.0'), help="The workload profile maximum instances is used instead.") + c.argument('termination_grace_period', options_list=['--termination-grace-period', '-t'], type=int, help="Time in seconds to drain requests during ingress shutdown. Default 500, minimum 0, maximum 3600.") + c.argument('request_idle_timeout', options_list=['--request-idle-timeout'], type=int, help="Timeout in minutes for idle requests. Default 4, minimum 4, maximum 30.") + c.argument('header_count_limit', options_list=['--header-count-limit'], type=int, help="Limit of http headers per request. Default 100, minimum 1.") + + # Compose + with self.argument_context('containerapp compose create') as c: + c.argument('dry_run', options_list=['--dry-run'], arg_type=get_three_state_flag(), + help='Preview deployment without creating actual Azure resources. Generates a detailed report of container apps, workload profiles, role assignments, and environment variable injections.') + c.argument('replace_all', options_list=['--replace-all'], arg_type=get_three_state_flag(), + help='Replace all existing container apps in the environment. By default, only matching apps are updated and new ones are created.') diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 37a72d28592..0d7f1473782 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -1540,7 +1540,9 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 registry_pass=None, transport_mapping=None, location=None, - tags=None): + tags=None, + dry_run=None, + replace_all=None): from pycomposefile import ComposeFile from azure.cli.command_modules.containerapp._compose_utils import (build_containerapp_from_compose_service, @@ -1555,7 +1557,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 resolve_replicas_from_service, resolve_environment_from_service, resolve_secret_from_service) - from ._compose_utils import validate_memory_and_cpu_setting + from ._compose_utils import validate_memory_and_cpu_setting, parse_x_azure_deployment # Validate managed environment parsed_managed_env = parse_resource_id(managed_env) @@ -1576,11 +1578,110 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 location=location) compose_yaml = load_yaml_file(compose_file_path) + # Make a deep copy to preserve original YAML for x-azure-deployment extraction + import copy + compose_yaml_original = copy.deepcopy(compose_yaml) parsed_compose_file = ComposeFile(compose_yaml) logger.info(parsed_compose_file) containerapps_from_compose = [] + + # ======================================================================== + # Phase 3: Models Deployment Implementation + # ======================================================================== + + # Parse models section from compose file + from ._compose_utils import ( + parse_models_section, + validate_models_configuration, + create_gpu_workload_profile_if_needed, + create_models_container_app, + inject_models_environment_variables, + detect_mcp_gateway_service, + resolve_dependency_graph, + print_dry_run_header, + print_dry_run_service_plan, + print_dry_run_models_deployment, + print_dry_run_mcp_gateway, + print_dry_run_summary, + check_containerapp_exists, + collect_dry_run_service_config, + get_mcp_gateway_configuration, + enable_managed_identity, + attempt_role_assignment, + detect_configuration_changes, + log_update_detection, + ) + + models = parse_models_section(compose_yaml_original) + models_app = None + models_endpoint = None + models_list = [] + + # Phase 3: Models Deployment (before services since they depend on it) + # If models section exists, validate and create models container app + if models and not dry_run: + logger.info(f"Models section found with {len(models)} model(s)") + + # Validate models configuration + validate_models_configuration(models) + + # Get or create GPU workload profile + # Extract requested GPU profile type from models x-azure-deployment section + # x-azure-deployment is nested inside each model (models.gemma.x-azure-deployment) + requested_gpu_type = None + + for model_name, model_config in models.items(): + if isinstance(model_config, dict) and 'x-azure-deployment' in model_config: + azure_deployment = model_config.get('x-azure-deployment', {}) + workload_profiles = azure_deployment.get('workloadProfiles', {}) + requested_gpu_type = workload_profiles.get('workloadProfileType') + if requested_gpu_type: + logger.info(f"Found GPU profile in model '{model_name}': {requested_gpu_type}") + break + + try: + gpu_profile_name = create_gpu_workload_profile_if_needed( + cmd, + resource_group_name, + managed_env_name, + managed_environment['location'], + requested_gpu_type + ) + + # Create models container app + models_app = create_models_container_app( + cmd, + resource_group_name, + managed_env_name, + managed_environment['id'], + models, + gpu_profile_name, + managed_environment['location'] + ) + + # Set models endpoint for environment variable injection + models_endpoint = f"http://models" + models_list = list(models.keys()) + + logger.info(f"Models container app created successfully: {models_endpoint}") + + except Exception as e: + logger.error(f"Failed to create models deployment: {str(e)}") + raise + + # Track services that depend on models for environment injection + services_needing_models_env = [] + # Using the key to iterate to get the service name # pylint: disable=C0201,C0206 + + # Phase 5: Dry-run mode initialization + if dry_run: + print_dry_run_header(compose_file_path, resource_group_name, managed_env_name) + dry_run_services = [] + dry_run_has_models = bool(models) # True if models section exists + dry_run_has_gateway = False + for service_name in parsed_compose_file.ordered_services.keys(): service = parsed_compose_file.services[service_name] if not check_supported_platform(service.platform): @@ -1592,12 +1693,43 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 logger.info( # pylint: disable=W1203 f"Creating the Container Apps instance for {service_name} under {resource_group_name} in {location}.") ingress_type, target_port = resolve_ingress_and_target_port(service) + + # Enhanced ACA Compose Support: Override ingress for MCP gateway in ACA mode + + # Phase 4: MCP Gateway Detection and Configuration + is_mcp_gateway = detect_mcp_gateway_service(service_name, service) + mcp_gateway_config = None + + if is_mcp_gateway: + mcp_gateway_config = get_mcp_gateway_configuration(service) + ingress_type = mcp_gateway_config['ingress_type'] + target_port = mcp_gateway_config['port'] + logger.info(f"MCP gateway detected: {service_name} - setting internal ingress on port {target_port}") + registry, registry_username, registry_password = resolve_registry_from_cli_args(registry_server, registry_user, registry_pass) # pylint: disable=C0301 transport_setting = resolve_transport_from_cli_args(service_name, transport_mapping) startup_command, startup_args = resolve_service_startup_command(service) + + # For services with command array (like mcp-gateway), treat as args not command + if service.command and isinstance(service.command, list) and len(service.command) > 0: + # All elements become args, no startup_command + startup_command = None + startup_args = service.command + logger.info(f"Service '{service_name}' has command array - using as args: {startup_args}") + + # Get x-azure-deployment overrides for resources + raw_service_yaml = compose_yaml_original.get('services', {}).get(service_name, {}) + overrides = parse_x_azure_deployment(raw_service_yaml) + override_cpu = overrides.get('cpu') + override_memory = overrides.get('memory') + + # Apply resource overrides if specified + base_cpu = override_cpu if override_cpu is not None else resolve_cpu_configuration_from_service(service) + base_memory = override_memory if override_memory is not None else resolve_memory_configuration_from_service(service) + cpu, memory = validate_memory_and_cpu_setting( - resolve_cpu_configuration_from_service(service), - resolve_memory_configuration_from_service(service), + base_cpu, + base_memory, managed_environment ) replicas = resolve_replicas_from_service(service) @@ -1607,6 +1739,204 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 environment.extend(secret_env_ref) elif secret_env_ref is not None: environment = secret_env_ref + + # Strip ports from URLs in environment variables for ACA (Envoy handles port mapping) + # Also inject proper FQDNs for MCP_GATEWAY_URL references + if environment is not None: + import re + + # Check if service depends on mcp-gateway and needs URL injection + needs_gateway_url = False + gateway_allow_insecure = False + + # Check for MCP_GATEWAY_URL in environment + for env_var in environment: + var_name = None + if isinstance(env_var, dict): + var_name = env_var.get('name') + elif isinstance(env_var, str) and '=' in env_var: + var_name = env_var.split('=', 1)[0] + + if var_name == 'MCP_GATEWAY_URL': + needs_gateway_url = True + break + + # Determine mcp-gateway allowInsecure setting + if needs_gateway_url: + for svc_name, svc_config in compose_yaml_original.get('services', {}).items(): + if detect_mcp_gateway_service(svc_name, type('Service', (), svc_config)()): + azure_deployment = svc_config.get('x-azure-deployment', {}) + ingress_config = azure_deployment.get('ingress', {}) + gateway_allow_insecure = ingress_config.get('allowInsecure', False) + break + + for i, env_var in enumerate(environment): + if isinstance(env_var, dict) and 'value' in env_var: + # Check if this is MCP_GATEWAY_URL that needs FQDN injection + if env_var.get('name') == 'MCP_GATEWAY_URL' and needs_gateway_url: + from ._compose_utils import get_containerapp_fqdn + # Determine if mcp-gateway has external or internal ingress + gateway_external = False + for svc_name in parsed_compose_file.services.keys(): + if detect_mcp_gateway_service(svc_name, parsed_compose_file.services[svc_name]): + raw_gateway_yaml = compose_yaml_original.get('services', {}).get(svc_name, {}) + gateway_overrides = parse_x_azure_deployment(raw_gateway_yaml) + if gateway_overrides.get('ingress_type') == 'external': + gateway_external = True + break + + gateway_fqdn = get_containerapp_fqdn(cmd, resource_group_name, managed_env_name, 'mcp-gateway', is_external=gateway_external) + gateway_protocol = 'http' if gateway_allow_insecure else 'https' + gateway_fqdn = gateway_fqdn.replace('https://', f'{gateway_protocol}://') + env_var['value'] = gateway_fqdn + logger.info(f"Injected MCP_GATEWAY_URL with FQDN: {gateway_fqdn}") + else: + # Match http://hostname:port or https://hostname:port patterns + env_var['value'] = re.sub(r'(https?://[^:/]+):\d+', r'\1', env_var['value']) + elif isinstance(env_var, str) and '=' in env_var: + # Handle string format "KEY=VALUE" + key, value = env_var.split('=', 1) + + if key == 'MCP_GATEWAY_URL' and needs_gateway_url: + from ._compose_utils import get_containerapp_fqdn + gateway_external = False + for svc_name in parsed_compose_file.services.keys(): + if detect_mcp_gateway_service(svc_name, parsed_compose_file.services[svc_name]): + raw_gateway_yaml = compose_yaml_original.get('services', {}).get(svc_name, {}) + gateway_overrides = parse_x_azure_deployment(raw_gateway_yaml) + if gateway_overrides.get('ingress_type') == 'external': + gateway_external = True + break + + gateway_fqdn = get_containerapp_fqdn(cmd, resource_group_name, managed_env_name, 'mcp-gateway', is_external=gateway_external) + gateway_protocol = 'http' if gateway_allow_insecure else 'https' + gateway_fqdn = gateway_fqdn.replace('https://', f'{gateway_protocol}://') + environment[i] = f"{key}={gateway_fqdn}" + logger.info(f"Injected MCP_GATEWAY_URL with FQDN: {gateway_fqdn}") + else: + value = re.sub(r'(https?://[^:/]+):\d+', r'\1', value) + environment[i] = f"{key}={value}" + + + # Phase 4.5: Inject MCP Gateway Environment Variables + if is_mcp_gateway: + logger.info(f"Injecting required MCP gateway environment variables for {service_name}") + + # Ensure environment list exists + if environment is None: + environment = [] + + # Get Azure subscription ID and tenant ID from CLI context + from azure.cli.core._profile import Profile + profile = Profile(cli_ctx=cmd.cli_ctx) + subscription_id = profile.get_subscription_id() + + # Get tenant ID from subscription + subscription_info = profile.get_subscription(subscription_id) + tenant_id = subscription_info.get('tenantId', '') + + # Required environment variables for MCP gateway in ACA mode + mcp_env_vars = { + 'MCP_RUNTIME': 'ACA', + 'AZURE_SUBSCRIPTION_ID': subscription_id, + 'AZURE_RESOURCE_GROUP': resource_group_name, + 'AZURE_APP_NAME': service_name, + 'AZURE_TENANT_ID': tenant_id + } + + # Add environment variables (check for duplicates) + for var_name, var_value in mcp_env_vars.items(): + # Check if variable already exists + var_exists = any( + env.get('name') == var_name for env in environment if isinstance(env, dict) + ) + + if not var_exists: + environment.append({ + 'name': var_name, + 'value': var_value + }) + logger.info(f" Set {var_name}={var_value if var_name not in ['AZURE_SUBSCRIPTION_ID', 'AZURE_TENANT_ID'] else var_value[:8] + '...'}") + else: + logger.info(f" {var_name} already exists in environment, skipping") + + # Override CPU and memory for MCP gateway + logger.info(f"Setting MCP gateway resources to 2.0 CPU / 4Gi memory") + cpu = 2.0 + memory = "4Gi" + + + # Phase 3: Inject environment variables for services with models declarations + # Services with "models:" section and endpoint_var/model_var get those specific variables injected + if hasattr(service, 'models') and service.models and models: + logger.info(f"Service '{service_name}' has models section - processing endpoint_var and model_var") + + # Ensure environment list exists + if environment is None: + environment = [] + + # Generate proper MODEL_RUNNER_URL with FQDN and /engines/v1/ suffix + # Check if models app has allowInsecure set + models_allow_insecure = False + for model_name, model_spec in models.items(): + if isinstance(model_spec, dict) and 'x-azure-deployment' in model_spec: + ingress_config = model_spec['x-azure-deployment'].get('ingress', {}) + models_allow_insecure = ingress_config.get('allowInsecure', False) + break + + from ._compose_utils import get_containerapp_fqdn + models_fqdn = get_containerapp_fqdn(cmd, resource_group_name, managed_env_name, 'models', is_external=False) + + # Use http if allowInsecure, otherwise https + models_protocol = 'http' if models_allow_insecure else 'https' + models_fqdn = models_fqdn.replace('https://', f'{models_protocol}://') + models_endpoint_with_path = f"{models_fqdn}/engines/v1/" + + logger.info(f"Generated MODEL_RUNNER_URL: {models_endpoint_with_path}") + + # Process each model reference + for model_name, model_config in service.models.items(): + if isinstance(model_config, dict): + endpoint_var = model_config.get('endpoint_var') + model_var = model_config.get('model_var') + + # Inject endpoint_var if specified + if endpoint_var: + # Check if variable already exists (handle both string and dict formats) + var_exists = any( + (isinstance(env, dict) and env.get('name') == endpoint_var) or + (isinstance(env, str) and env.startswith(f"{endpoint_var}=")) + for env in environment + ) + + if not var_exists: + environment.append({ + 'name': endpoint_var, + 'value': models_endpoint_with_path + }) + logger.info(f" Injected {endpoint_var}={models_endpoint_with_path}") + + # Inject model_var if specified + if model_var and model_name in models: + model_path = models[model_name].get('model', '') + + # Check if variable already exists + var_exists = any( + (isinstance(env, dict) and env.get('name') == model_var) or + (isinstance(env, str) and env.startswith(f"{model_var}=")) + for env in environment + ) + + if not var_exists: + environment.append({ + 'name': model_var, + 'value': model_path + }) + logger.info(f" Injected {model_var}={model_path}") + + + # Phase 4: No automatic environment variable injection for mcp-gateway dependencies + # Services should explicitly define any needed environment variables in their compose file if service.build is not None: logger.warning("Build configuration defined for this service.") logger.warning("The build will be performed by Azure Container Registry.") @@ -1629,29 +1959,214 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 registry_username, registry_password, environment) - containerapps_from_compose.append( - create_containerapp(cmd, - service_name, - resource_group_name, - image=image, - container_name=service.container_name, - managed_env=managed_environment["id"], - ingress=ingress_type, - target_port=target_port, - registry_server=registry, - registry_user=registry_username, - registry_pass=registry_password, - transport=transport_setting, - startup_command=startup_command, - args=startup_args, - cpu=cpu, - memory=memory, - env_vars=environment, - secrets=secret_vars, - min_replicas=replicas, - max_replicas=replicas, ) + + # Phase 7: Check if container app exists and detect changes (skip in dry-run) + if not dry_run: + # Determine final ingress type using x-azure-deployment overrides + # Get raw service YAML for override parsing + raw_service_yaml = compose_yaml_original.get('services', {}).get(service_name, {}) + overrides = parse_x_azure_deployment(raw_service_yaml) + + # Priority: x-azure-deployment override > default based on ports + # NOTE: We ignore ingress_type from resolve_ingress_and_target_port because it always returns 'external' + if overrides.get('ingress_type') is not None: + final_ingress_type = overrides['ingress_type'] + elif service.ports: + final_ingress_type = 'internal' # Default to internal if ports exist + else: + final_ingress_type = None + + # Apply image override if specified in x-azure-deployment + final_image = overrides.get('image') or image + + # Check if container app already exists + try: + existing_app = show_containerapp(cmd=cmd, resource_group_name=resource_group_name, name=service_name) + except Exception: + existing_app = None + + if existing_app: + # Build new configuration for comparison + new_config = { + 'image': final_image, + 'cpu': cpu, + 'memory': memory, + 'min_replicas': replicas, + 'max_replicas': replicas, + 'env_vars': environment + } + + # Detect changes + changes = detect_configuration_changes(existing_app, new_config) + log_update_detection(service_name, changes, logger) + + # If changes detected, update instead of create + if changes['has_changes']: + logger.info(f"Updating existing container app: {service_name}") + updated_app = update_containerapp_from_compose( + cmd=cmd, + resource_group_name=resource_group_name, + app_name=service_name, + image=final_image if changes['image_changed'] else None, + env_vars=environment if changes['env_vars_changed'] else None, + cpu=cpu if changes['resources_changed'] else None, + memory=memory if changes['resources_changed'] else None, + min_replicas=replicas if changes['replicas_changed'] else None, + max_replicas=replicas if changes['replicas_changed'] else None, + logger=logger + ) + containerapps_from_compose.append(updated_app) + continue # Skip creation, move to next service + + # Convert environment variables from dict format to CLI string format (KEY=VALUE) + env_vars_cli_format = None + if environment: + env_vars_cli_format = [] + for env in environment: + if isinstance(env, dict) and "name" in env and "value" in env: + env_vars_cli_format.append(f"{env['name']}={env['value']}") + elif isinstance(env, str): + env_vars_cli_format.append(env) + + # Determine final replica count with appropriate defaults + # - MCP gateway and models: min 1 replica (always) + # - Other services: default to 1 if not specified + final_min_replicas = replicas if replicas is not None else 1 + final_max_replicas = replicas if replicas is not None else 1 + + # Ensure MCP gateway always has at least 1 replica + if is_mcp_gateway and final_min_replicas < 1: + logger.info(f"MCP gateway {service_name}: forcing min replicas to 1") + final_min_replicas = 1 + final_max_replicas = max(1, final_max_replicas) + + containerapps_from_compose.append( + create_containerapp(cmd, + service_name, + resource_group_name, + image=final_image, + container_name=service.container_name, + managed_env=managed_environment["id"], + ingress=final_ingress_type, + target_port=target_port, + registry_server=registry, + registry_user=registry_username, + registry_pass=registry_password, + transport=transport_setting, + startup_command=startup_command, + args=startup_args, + cpu=cpu, + memory=memory, + env_vars=env_vars_cli_format, + secrets=secret_vars, + min_replicas=final_min_replicas, + max_replicas=final_max_replicas, ) + ) + + + # Phase 5: Dry-run mode - collect service config instead of deploying + if dry_run: + # Get raw service YAML for x-azure-deployment parsing (use original, unmodified YAML) + raw_service_yaml = compose_yaml_original.get('services', {}).get(service_name, {}) + + # In dry-run mode, don't pass ingress_type from resolve_ingress_and_target_port + # because it always returns 'external', which interferes with x-azure-deployment overrides + # Instead, pass None and let collect_dry_run_service_config determine ingress from overrides or ports + service_config = collect_dry_run_service_config( + service_name=service_name, + service=service, + image=image, + ingress_type=None, # Don't pass - let function use override logic + target_port=target_port, + cpu=cpu, + memory=memory, + environment=environment, + replicas=replicas, + is_models_service=(service_name == 'models'), + is_mcp_gateway=is_mcp_gateway, + gpu_type=gpu_profile.get('type') if (service_name == 'models' and 'gpu_profile' in locals()) else None, + raw_service=raw_service_yaml, + models_config=models + ) + dry_run_services.append(service_config) + + # if service_name == 'models': + # dry_run_has_models = True + if is_mcp_gateway: + dry_run_has_gateway = True + + + # Phase 4: MCP Gateway Post-Creation Setup + if is_mcp_gateway and not dry_run: + logger.info(f"Configuring MCP gateway: {service_name}") + + # Enable system-assigned managed identity + try: + identity = enable_managed_identity(cmd, resource_group_name, service_name) + principal_id = identity.get('principalId') + + if principal_id: + # Attempt role assignment (graceful fallback if it fails) + attempt_role_assignment(cmd, principal_id, resource_group_name, service_name) + else: + logger.warning(f"Principal ID not available yet for '{service_name}' - skipping role assignment") + + # Inject MCP gateway environment variables + from azure.cli.core.commands.client_factory import get_subscription_id + subscription_id = get_subscription_id(cmd.cli_ctx) + + # Note: Environment variables are injected during initial creation + # For updates, would need to retrieve and update the app + logger.info(f"MCP gateway '{service_name}' configured successfully") + + except Exception as e: + logger.error(f"Failed to configure MCP gateway '{service_name}': {str(e)}") + # Don't fail deployment - gateway is created, just not fully configured + logger.warning("MCP gateway created but requires manual configuration") + + # Check if this service depends on MCP gateway + service_depends_on_gateway = False + if hasattr(service, 'depends_on') and service.depends_on: + if isinstance(service.depends_on, list): + service_depends_on_gateway = any('mcp-gateway' in str(dep).lower() for dep in service.depends_on) + elif isinstance(service.depends_on, dict): + service_depends_on_gateway = any('mcp-gateway' in str(dep).lower() for dep in service.depends_on.keys()) + + # Inject MCP_GATEWAY_URL if service depends on gateway + if service_depends_on_gateway: + # MCP gateway URL format: http://mcp-gateway:8811 + gateway_url = "http://mcp-gateway:8811" + logger.info(f"Service '{service_name}' depends on MCP gateway - will inject MCP_GATEWAY_URL") + # Note: Environment was already set during creation above + # If needed to update, would retrieve the created app and update it + + # Phase 5: Dry-run mode - print preview and return early + if dry_run: + # Print each service plan + for service_config in dry_run_services: + print_dry_run_service_plan(service_config['service_name'], service_config) + + # Print models deployment if present + if dry_run_has_models and models: + # Extract GPU profile from x-azure-deployment + x_azure = models.get("x-azure-deployment", {}) + workload_profiles = x_azure.get("workloadProfiles", {}) + profile_type = workload_profiles.get("workloadProfileType", "Consumption") + gpu_profile_info = { + "type": profile_type, + "name": profile_type + } + print_dry_run_models_deployment(models, gpu_profile_info) + + # Print summary + print_dry_run_summary( + total_services=len(dry_run_services), + has_models=dry_run_has_models, + has_gateway=dry_run_has_gateway ) - return containerapps_from_compose + + # Return empty list (no actual deployment) + return None # Dry-run complete, no resources created return containerapps_from_compose def set_workload_profile(cmd, resource_group_name, env_name, workload_profile_name, workload_profile_type=None, min_nodes=None, max_nodes=None): diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_mcp_gateway.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_mcp_gateway.py new file mode 100644 index 00000000000..917327911e0 --- /dev/null +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_mcp_gateway.py @@ -0,0 +1,127 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import os +import unittest +from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only) +from azure.cli.core.azclierror import ( + ResourceNotFoundError, + InvalidArgumentValueError, + UnauthorizedError +) + + +class ContainerAppComposeMCPGatewayScenarioTest(ScenarioTest): + """ + Test scenarios for MCP Gateway detection and configuration. + Per Constitution Principle III: These tests are written BEFORE implementation. + They MUST FAIL until the implementation is complete. + """ + + @ResourceGroupPreparer(location='eastus') + @live_only() + def test_mcp_gateway_deployment(self, resource_group): + """ + Test US2 Acceptance Scenario 1: Deploy compose file with MCP gateway. + + Expected: MCP gateway service is created without being assigned to GPU profile. + + This test MUST FAIL until T032-T043 are implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/mcp-gateway.yml') + }) + + self.cmd('containerapp env create -g {rg} -n {env_name} --location eastus') + self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file}') + + # Verify MCP gateway app was created + mcp_gateway = self.cmd('containerapp show -g {rg} -n mcp-gateway').get_output_in_json() + self.assertIsNotNone(mcp_gateway) + + # Verify MCP gateway is NOT on GPU profile + workload_profile = mcp_gateway['properties'].get('workloadProfileName') + if workload_profile: + self.assertNotIn('GPU', workload_profile.upper()) + + @ResourceGroupPreparer(location='eastus') + @live_only() + def test_managed_identity_enablement(self, resource_group): + """ + Test US2 Acceptance Scenario 2: System-assigned managed identity enabled for MCP gateway. + + Expected: MCP gateway container app has system-assigned managed identity enabled. + + This test MUST FAIL until T034-T037 are implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/mcp-gateway.yml') + }) + + self.cmd('containerapp env create -g {rg} -n {env_name} --location eastus') + self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file}') + + # Verify system-assigned managed identity is enabled + mcp_gateway = self.cmd('containerapp show -g {rg} -n mcp-gateway').get_output_in_json() + identity = mcp_gateway['identity'] + self.assertEqual(identity['type'], 'SystemAssigned') + self.assertIsNotNone(identity.get('principalId')) + + @ResourceGroupPreparer(location='eastus') + @live_only() + def test_role_assignment(self, resource_group): + """ + Test US2 Acceptance Scenario 3: Azure AI Developer role assigned to MCP gateway identity. + + Expected: MCP gateway managed identity has "Azure AI Developer" role on resource group. + + This test MUST FAIL until T038-T043 are implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/mcp-gateway.yml') + }) + + self.cmd('containerapp env create -g {rg} -n {env_name} --location eastus') + self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file}') + + # Get MCP gateway identity + mcp_gateway = self.cmd('containerapp show -g {rg} -n mcp-gateway').get_output_in_json() + principal_id = mcp_gateway['identity']['principalId'] + + # Verify role assignment exists (would need az role assignment list) + # This is a placeholder - actual implementation would check Azure RBAC + self.assertIsNotNone(principal_id) + + @ResourceGroupPreparer(location='eastus') + @live_only() + def test_mcp_environment_injection(self, resource_group): + """ + Test US2 Acceptance Scenario 4: Environment variable injection for MCP gateway dependencies. + + Expected: Services that depend on MCP gateway receive MCP_ENDPOINT environment variable. + + This test MUST FAIL until T040-T043 are implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/models-and-mcp.yml') + }) + + self.cmd('containerapp env create -g {rg} -n {env_name} --location eastus --enable-workload-profiles') + self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file}') + + # Check web service has MCP_ENDPOINT + web_app = self.cmd('containerapp show -g {rg} -n web').get_output_in_json() + env_vars = {e['name']: e.get('value', '') for e in web_app['properties']['template']['containers'][0].get('env', [])} + + self.assertIn('MCP_ENDPOINT', env_vars) + self.assertIn('mcp-gateway', env_vars['MCP_ENDPOINT'].lower()) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_models.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_models.py new file mode 100644 index 00000000000..38364f5c288 --- /dev/null +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_models.py @@ -0,0 +1,300 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import os +import unittest +from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only) +from azure.cli.core.azclierror import ( + ResourceNotFoundError, + InvalidArgumentValueError, + CLIInternalError, + FileOperationError, + UnauthorizedError +) + + +class ContainerAppComposeModelsScenarioTest(ScenarioTest): + """ + Test scenarios for Docker Compose files with models section. + Per Constitution Principle III: These tests are written BEFORE implementation. + They MUST FAIL until the implementation is complete. + """ + + @ResourceGroupPreparer(location='westus3') # GPU region + @live_only() + def test_basic_models_deployment(self, resource_group): + """ + Test US1 Acceptance Scenario 1: Deploy compose file with models section. + + Expected: Creates models container app with model-runner and model-runner-config containers. + Services receive MODELS_ENDPOINT and MODELS_AVAILABLE environment variables. + + This test MUST FAIL until T013-T027 are implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/models-basic.yml') + }) + + # Create Container Apps environment + self.cmd('containerapp env create -g {rg} -n {env_name} --location westus3') + + # Deploy compose file with models section + result = self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file}').get_output_in_json() + + # Verify models container app was created + models_app = self.cmd('containerapp show -g {rg} -n models').get_output_in_json() + self.assertIsNotNone(models_app) + + # Verify models app has 2 containers + containers = models_app['properties']['template']['containers'] + self.assertEqual(len(containers), 2) + container_names = [c['name'] for c in containers] + self.assertIn('model-runner', container_names) + self.assertIn('model-runner-config', container_names) + + # Verify GPU workload profile assignment + workload_profile = models_app['properties'].get('workloadProfileName') + self.assertIsNotNone(workload_profile) + self.assertTrue('GPU' in workload_profile.upper()) + + # Verify dependent service has environment variables + web_app = self.cmd('containerapp show -g {rg} -n web').get_output_in_json() + env_vars = {e['name']: e.get('value') for e in web_app['properties']['template']['containers'][0].get('env', [])} + self.assertIn('MODELS_ENDPOINT', env_vars) + self.assertIn('MODELS_AVAILABLE', env_vars) + self.assertIn('http://models', env_vars['MODELS_ENDPOINT']) + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_gpu_profile_creation(self, resource_group): + """ + Test US1 Acceptance Scenario 2: GPU workload profile creation. + + Expected: System checks for GPU profile, creates if doesn't exist, + deploys models app to that profile. + + This test MUST FAIL until T016-T019 are implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/models-basic.yml') + }) + + # Create environment WITHOUT GPU workload profile + self.cmd('containerapp env create -g {rg} -n {env_name} --location westus3') + + # Deploy - should create GPU profile automatically + self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file}') + + # Verify GPU workload profile exists + env = self.cmd('containerapp env show -g {rg} -n {env_name}').get_output_in_json() + workload_profiles = env['properties'].get('workloadProfiles', []) + gpu_profiles = [p for p in workload_profiles if 'GPU' in p.get('workloadProfileType', '').upper()] + self.assertGreater(len(gpu_profiles), 0) + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_models_environment_injection(self, resource_group): + """ + Test US1 Acceptance Scenario 3: Environment variable injection. + + Expected: Services that depend on models receive MODELS_ENDPOINT and MODELS_AVAILABLE. + + This test MUST FAIL until T023-T027 are implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/models-basic.yml') + }) + + self.cmd('containerapp env create -g {rg} -n {env_name} --location westus3 --enable-workload-profiles') + self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file}') + + # Check web service has environment variables + web_app = self.cmd('containerapp show -g {rg} -n web').get_output_in_json() + env_vars = {e['name']: e.get('value', '') for e in web_app['properties']['template']['containers'][0].get('env', [])} + + self.assertIn('MODELS_ENDPOINT', env_vars) + self.assertIn('MODELS_AVAILABLE', env_vars) + + # Verify MODELS_ENDPOINT format + self.assertIn('models', env_vars['MODELS_ENDPOINT'].lower()) + + # Verify MODELS_AVAILABLE contains expected model names + self.assertIn('phi', env_vars['MODELS_AVAILABLE'].lower()) + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_gpu_unavailable_error(self, resource_group): + """ + Test FR-065: GPU unavailable error handling. + + Expected: Raises ResourceNotFoundError with helpful message. + + This test MUST FAIL until proper error handling is implemented. + """ + # This test would attempt deployment in a region without GPU support + # For now, placeholder - would need non-GPU region + pass + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_dry_run_no_resources(self, resource_group): + """ + Test US3 Acceptance Scenario 1-2: Dry-run generates preview without creating resources. + + Expected: No actual Azure resources created, detailed report generated. + + This test MUST FAIL until T048-T056 are implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/models-basic.yml') + }) + + self.cmd('containerapp env create -g {rg} -n {env_name} --location westus3 --enable-workload-profiles') + + # Run with --dry-run flag + result = self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file} --dry-run', + checks=[]).get_output_in_json() + + # Verify report contains expected elements + self.assertIn('container_apps', result) + self.assertIn('workload_profiles', result) + self.assertIn('role_assignments', result) + self.assertIn('environment_injections', result) + + # Verify no actual container apps were created + apps_list = self.cmd('containerapp list -g {rg}').get_output_in_json() + app_names = [app['name'] for app in apps_list] + self.assertNotIn('models', app_names) + self.assertNotIn('web', app_names) + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_custom_image_override(self, resource_group): + """ + Test US4 Acceptance Scenario 1: Custom image via x-azure-deployment. + + Expected: Container app uses custom image from x-azure-deployment.image. + + This test MUST FAIL until T059-T064 are implemented. + """ + # This would need a compose file with x-azure-deployment.image override + # Placeholder for when that compose file is created + pass + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_custom_workload_profile_override(self, resource_group): + """ + Test US4 Acceptance Scenario 3: Custom workload profile via x-azure-deployment. + + Expected: Models app uses specified workload profile type. + + This test MUST FAIL until T063 is implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + 'compose_file': os.path.join(os.path.dirname(__file__), '../../../../../../test-resources/compose-files/models-basic.yml') + }) + + self.cmd('containerapp env create -g {rg} -n {env_name} --location westus3 --enable-workload-profiles') + self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path {compose_file}') + + # Verify models app uses GPU profile specified in x-azure-deployment + models_app = self.cmd('containerapp show -g {rg} -n models').get_output_in_json() + workload_profile = models_app['properties'].get('workloadProfileName') + # Should match the type in compose file: Consumption-GPU-NC8as-T4 + self.assertIn('NC8as-T4', workload_profile) + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_declarative_update(self, resource_group): + """ + Test Declarative Update: Deploy, modify compose, redeploy. + + Expected: Existing apps updated, new apps created, unmanaged apps preserved. + + This test MUST FAIL until T067-T071 are implemented. + """ + # Placeholder for declarative update test + pass + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_replace_all_flag(self, resource_group): + """ + Test --replace-all flag. + + Expected: All matching container apps are deleted and recreated. + + This test MUST FAIL until T070 is implemented. + """ + # Placeholder for replace-all test + pass + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_error_types_compliance(self, resource_group): + """ + Test FR-064 through FR-071: Error handling with proper error types. + + Expected: Specific error types from azure.cli.core.azclierror used: + - ResourceNotFoundError for GPU unavailable (FR-065) + - InvalidArgumentValueError for schema validation (FR-066) + - UnauthorizedError for role assignment failures (FR-067) + - CLIInternalError for model-runner failures (FR-068) + - FileOperationError for compose file issues (FR-070) + + This test MUST FAIL until proper error handling is implemented. + """ + self.kwargs.update({ + 'env_name': self.create_random_name(prefix='env', length=24), + }) + + # Test invalid compose file (FileOperationError expected) + with self.assertRaises(FileOperationError): + self.cmd('containerapp compose create -g {rg} --environment {env_name} ' + '--compose-file-path /nonexistent/file.yml') + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_posix_compliance(self, resource_group): + """ + Test FR-072, FR-073, FR-075-078: POSIX compliance. + + Expected: + - Command output to stdout (FR-072) + - Status/errors to stderr (FR-073) + - Exit code 0 for success (FR-075) + - Exit code 1 for errors (FR-076) + - Exit code 2 for parser errors (FR-077) + - Exit code 3 for missing resources (FR-078) + + This test MUST FAIL until POSIX compliance is implemented. + """ + # This test validates CLI behavior at the shell level + # Would need to capture stdout/stderr separately + pass + + @ResourceGroupPreparer(location='westus3') + @live_only() + def test_logger_usage(self, resource_group): + """ + Test FR-074: logger.error() and logger.warning() used instead of print(). + + Expected: All user messages use logger, not print(). + + This test validates implementation follows logging standards. + """ + # This is a code inspection test - validates no print() calls in implementation + pass From 96cec3bbd8b67a56101beed95e0ee331eeff441f Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Sun, 9 Nov 2025 21:21:32 -0600 Subject: [PATCH 02/10] feat(containerapp): Enhance compose deployment with GPU and ingress improvements - Auto-detect GPU requirements and select appropriate workload profiles - Prefer T4 GPU over A100 for cost optimization - Default to internal ingress for models and mcp-gateway services - Support image override via x-azure-deployment.image - Add allowInsecure ingress option for development scenarios - Fix model endpoint variable injection (use getattr for safe access) - Fix MODELS_CONFIG schema to properly include runtime_flags - Fix dynamic subscription ID in role assignments - Remove azure-cli core dependency requirements (extension is self-contained) --- .../azext_containerapp/_compose_ported.py | 235 ++++++++ .../azext_containerapp/_compose_utils.py | 534 ++++++++++++++++-- .../azext_containerapp/_constants.py | 10 + src/containerapp/azext_containerapp/custom.py | 125 ++-- 4 files changed, 834 insertions(+), 70 deletions(-) create mode 100644 src/containerapp/azext_containerapp/_compose_ported.py diff --git a/src/containerapp/azext_containerapp/_compose_ported.py b/src/containerapp/azext_containerapp/_compose_ported.py new file mode 100644 index 00000000000..fc709a754ef --- /dev/null +++ b/src/containerapp/azext_containerapp/_compose_ported.py @@ -0,0 +1,235 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +"""Helpers ported from azure-cli core compose utilities. + +This module contains logic copied from azure-cli commits: +- 092e028c556c5d98c06ea1a337c26b97fe00ce59 +- 2f7ef21a0d6c4afb9f066c0d65affcc84a8b36a4 + +The implementations are kept in the extension to avoid depending on +those specific core revisions. Keep in sync with CLI >= 2.78.0. +""" + +from __future__ import annotations + +from typing import Dict, Iterable, List + +from knack.log import get_logger + +LOGGER = get_logger(__name__) + + +def parse_models_section(compose_yaml: Dict) -> Dict[str, Dict]: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59. + + Extract the ``models`` block from the docker-compose YAML and normalise the + structure so downstream helpers can reason about model metadata. + """ + models: Dict[str, Dict] = {} + if "models" not in compose_yaml or compose_yaml["models"] is None: + return models + + models_section = compose_yaml["models"] + for model_name, model_config in models_section.items(): + if isinstance(model_config, dict): + # Pass through all keys except x-azure-deployment (which is handled separately) + # This preserves keys like runtime_flags, model, etc. + models[model_name] = {k: v for k, v in model_config.items() if k != 'x-azure-deployment'} + elif isinstance(model_config, str): + models[model_name] = { + "model": model_config, + } + + if models: + LOGGER.info("Ported models section parser found %s model(s)", len(models)) + return models + + +def parse_service_models_config(service) -> Dict[str, Dict[str, str]]: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59. + + The original helper returns everything under ``service.models`` unchanged + when it is a mapping. This keeps per-service overrides intact. + """ + if not hasattr(service, "models") or service.models is None: + return {} + if not isinstance(service.models, dict): + return {} + return service.models + + +def detect_service_type(service) -> str: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59. + + Classify a compose service so that compose processing can customise + behaviour for MCP gateway, model-runner, agent, or generic services. + """ + service_name = service.name.lower() if hasattr(service, "name") else "" + image_name = service.image.lower() if getattr(service, "image", None) else "" + command_str = "" + if getattr(service, "command", None) is not None: + command = service.command + command_str = command.command_string().lower() if hasattr(command, "command_string") else str(command).lower() + + if "mcp-gateway" in service_name or "mcp-gateway" in image_name or "--servers" in command_str: + return "mcp-gateway" + if "model-runner" in service_name or "model-runner" in image_name: + return "model-runner" + if hasattr(service, "models") and service.models: + return "agent" + if hasattr(service, "depends_on") and service.depends_on: + depends_on_iter = service.depends_on + if isinstance(depends_on_iter, dict): + depends_on_iter = depends_on_iter.keys() + for dependency in depends_on_iter: + dep_str = str(dependency).lower() + if "mcp-gateway" in dep_str or "model-runner" in dep_str: + return "agent" + return "generic" + + +def parse_mcp_servers_from_command(service) -> List[Dict[str, object]]: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59. + + Inspect the MCP gateway command line for ``--servers``/``--tools`` flags + and return a normalised list of server definitions. + """ + if getattr(service, "command", None) is None: + return [] + + command = service.command + command_str = command.command_string() if hasattr(command, "command_string") else str(command) + command_parts = command_str.split() + + servers: List[str] = [] + tools: List[str] = [] + for idx, part in enumerate(command_parts): + if part == "--servers" and idx + 1 < len(command_parts): + servers = [item.strip() for item in command_parts[idx + 1].split(",") if item.strip()] + if part == "--tools" and idx + 1 < len(command_parts): + tools = [item.strip() for item in command_parts[idx + 1].split(",") if item.strip()] + + return [ + { + "name": server_name, + "server_type": server_name, + "tools": tools if tools else ["*"], + "image": f"docker/mcp-server-{server_name}", + "resources": {"cpu": "0.5", "memory": "1.0"}, + } + for server_name in servers + ] + + +def should_deploy_model_runner(compose_yaml: Dict, parsed_compose_file) -> bool: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + if compose_yaml.get("models"): + return True + for service in getattr(parsed_compose_file, "services", {}).values(): + if hasattr(service, "models") and service.models: + return True + return False + + +def get_model_runner_environment_vars(models_config: Dict, aca_environment_name: str) -> List[str]: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + if not aca_environment_name: + return [] + base = f"http://model-runner.internal.{aca_environment_name}.azurecontainerapps.io:8080" + return ["MODEL_RUNNER_URL=" + base] + + +def get_mcp_gateway_environment_vars(aca_environment_name: str) -> List[str]: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + if not aca_environment_name: + return [] + base = f"http://mcp-gateway.internal.{aca_environment_name}.azurecontainerapps.io:8811" + return [ + "MCP_GATEWAY_URL=" + base, + "MCPGATEWAY_ENDPOINT=" + base + "/sse", + ] + + +def extract_model_definitions(compose_yaml: Dict, parsed_compose_file) -> List[Dict[str, object]]: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + definitions: List[Dict[str, object]] = [] + models = parse_models_section(compose_yaml) + + endpoint_var_mapping: Dict[str, List[str]] = {} + for service in getattr(parsed_compose_file, "services", {}).values(): + service_models = parse_service_models_config(service) + for model_ref, model_config in service_models.items(): + if not isinstance(model_config, dict): + continue + endpoint_var = model_config.get("endpoint_var") + model_var = model_config.get("model_var") + endpoint_var_mapping.setdefault(model_ref, []) + if endpoint_var: + endpoint_var_mapping[model_ref].append(endpoint_var) + if model_var: + endpoint_var_mapping[model_ref].append(model_var) + + for model_name, model_config in models.items(): + definition = { + "name": model_name, + "model": model_config.get("model"), + "volume": model_config.get("volume"), + "context_size": model_config.get("context_size"), + "gpu": model_config.get("gpu", False), + "endpoint_vars": endpoint_var_mapping.get(model_name, []), + } + definitions.append(definition) + + return definitions + + +def get_model_endpoint_environment_vars( + service_models: Dict[str, Dict[str, str]], + models_config: Dict[str, Dict[str, object]], + aca_environment_name: str, +) -> List[str]: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + env_vars: List[str] = [] + if not service_models or not isinstance(service_models, dict): + return env_vars + + base_url = f"http://model-runner.internal.{aca_environment_name}.azurecontainerapps.io:8080" + for model_ref, model_config in service_models.items(): + if not isinstance(model_config, dict): + continue + endpoint_var = model_config.get("endpoint_var") + model_var = model_config.get("model_var") + model_name = None + if models_config and model_ref in models_config: + model_name = models_config[model_ref].get("model") + if endpoint_var: + env_vars.append(f"{endpoint_var}={base_url}/v1/chat/completions") + if model_var and model_name: + env_vars.append(f"{model_var}={model_name}") + return env_vars + + +def calculate_model_runner_resources(model_definitions: Iterable[Dict[str, object]]) -> tuple[str, str]: + """Ported from 092e028c556c5d98c06ea1a337c26b97fe00ce59. + + Mirrors the upstream helper by returning a ``(cpu, memory)`` tuple as + strings. + """ + definitions = list(model_definitions) + if not definitions: + return "1.0", "4.0" + + base_cpu = 1.0 + base_memory = 4.0 + if any(definition.get("gpu", False) for definition in definitions): + base_cpu = 2.0 + base_memory = 8.0 + + extra_models = max(0, len(definitions) - 1) + if extra_models: + base_cpu = min(4.0, base_cpu + extra_models * 0.5) + base_memory = min(16.0, base_memory + extra_models * 2.0) + + return str(base_cpu), str(base_memory) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index 3521159e8ec..da284f46c8c 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -5,10 +5,386 @@ # pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals, logging-fstring-interpolation, arguments-differ, abstract-method, logging-format-interpolation, broad-except from knack.log import get_logger +from knack.prompting import prompt, prompt_choice_list + +from ._compose_ported import ( + calculate_model_runner_resources as _ported_calculate_model_runner_resources, + detect_service_type as _ported_detect_service_type, + extract_model_definitions as _ported_extract_model_definitions, + get_mcp_gateway_environment_vars as _ported_get_mcp_gateway_environment_vars, + get_model_endpoint_environment_vars as _ported_get_model_endpoint_environment_vars, + get_model_runner_environment_vars as _ported_get_model_runner_environment_vars, + parse_mcp_servers_from_command as _ported_parse_mcp_servers_from_command, + parse_models_section as _ported_parse_models_section, + parse_service_models_config as _ported_parse_service_models_config, + should_deploy_model_runner as _ported_should_deploy_model_runner, +) logger = get_logger(__name__) +# ============================================================================ +# Functions copied from azure-cli core to make extension self-contained +# These eliminate the dependency on azure-cli core's containerapp module +# ============================================================================ + +def build_containerapp_from_compose_service(cmd, + name, + source, + dockerfile, + resource_group_name, + managed_env, + location, + image, + target_port, + ingress, + registry_server, + registry_user, + registry_pass, + env_vars, + logs_key=None, + logs_customer_id=None): + from .custom import create_managed_environment + from ._up_utils import (ContainerApp, + ContainerAppEnvironment, + ResourceGroup, + _get_registry_from_app, + _get_registry_details, + _get_acr_from_image) + + resource_group = ResourceGroup(cmd, name=resource_group_name, location=location) + env = ContainerAppEnvironment(cmd, + managed_env, + resource_group, + location=location, + logs_key=logs_key, + logs_customer_id=logs_customer_id) + app = ContainerApp(cmd, + name, + resource_group, + None, + image, + env, + target_port, + registry_server, + registry_user, + registry_pass, + env_vars, + ingress) + + if not registry_server: + _get_registry_from_app(app, True) + + if app.registry_server is None and app.image is not None: + _get_acr_from_image(cmd, app) + _get_registry_details(cmd, app, True) + + app.create_acr_if_needed() + app.run_acr_build(dockerfile, source, False) + return app.image, app.registry_server, app.registry_user, app.registry_pass + + +def resolve_configuration_element_list(compose_service, unsupported_configuration, area=None): + if area is not None: + compose_service = getattr(compose_service, area) + config_list = [] + for configuration_element in unsupported_configuration: + try: + attribute = getattr(compose_service, configuration_element) + except AttributeError: + logger.critical("Failed to resolve %s", configuration_element) + if attribute is not None: + config_list.append(f"{compose_service.compose_path}/{configuration_element}") + return config_list + + +def warn_about_unsupported_build_configuration(compose_service): + unsupported_configuration = ["args", "ssh", "cache_from", "cache_to", "extra_hosts", + "isolation", "labels", "no_cache", "pull", "shm_size", + "target", "secrets", "tags"] + if compose_service.build is not None: + config_list = resolve_configuration_element_list(compose_service, unsupported_configuration, 'build') + message = "These build configuration settings from the docker-compose file are yet supported." + message += " Currently, we support supplying a build context and optionally target Dockerfile for a service." + message += " See https://aka.ms/containerapp/compose/build_support for more information or to add feedback." + if len(config_list) >= 1: + logger.warning(message) + for item in config_list: + logger.warning(" %s", item) + + +def warn_about_unsupported_runtime_host_configuration(compose_service): + unsupported_configuration = ["blkio_config", "cpu_count", "cpu_percent", "cpu_shares", "cpu_period", + "cpu_quota", "cpu_rt_runtime", "cpu_rt_period", "cpuset", "cap_add", + "cap_drop", "cgroup_parent", "configs", "credential_spec", + "device_cgroup_rules", "devices", "dns", "dns_opt", "dns_search", + "domainname", "external_links", "extra_hosts", "group_add", "healthcheck", + "hostname", "init", "ipc", "isolation", "links", "logging", "mem_limit", + "mem_swappiness", "memswap_limit", "oom_kill_disable", "oom_score_adj", + "pid", "pids_limit", "privileged", "profiles", "pull_policy", "read_only", + "restart", "runtime", "security_opt", "shm_size", "stdin_open", + "stop_grace_period", "stop_signal", "storage_opt", "sysctls", "tmpfs", + "tty", "ulimits", "user", "working_dir"] + config_list = resolve_configuration_element_list(compose_service, unsupported_configuration) + message = "These container and host configuration elements from the docker-compose file are not supported" + message += " in Azure Container Apps. For more information about supported configuration," + message += " please see https://aka.ms/containerapp/compose/configuration" + if len(config_list) >= 1: + logger.warning(message) + for item in config_list: + logger.warning(" %s", item) + + +def warn_about_unsupported_volumes(compose_service): + unsupported_configuration = ["volumes", "volumes_from"] + config_list = resolve_configuration_element_list(compose_service, unsupported_configuration) + message = "These volume mount elements from the docker-compose file are not supported" + message += " in Azure Container Apps. For more information about supported storage configuration," + message += " please see https://aka.ms/containerapp/compose/volumes" + if len(config_list) >= 1: + logger.warning(message) + for item in config_list: + logger.warning(" %s", item) + + +def warn_about_unsupported_network(compose_service): + unsupported_configuration = ["networks", "network_mode", "mac_address"] + config_list = resolve_configuration_element_list(compose_service, unsupported_configuration) + message = "These network configuration settings from the docker-compose file are not supported" + message += " in Azure Container Apps. For more information about supported networking configuration," + message += " please see https://aka.ms/containerapp/compose/networking" + if len(config_list) >= 1: + logger.warning(message) + for item in config_list: + logger.warning(" %s", item) + + +def warn_about_unsupported_elements(compose_service): + warn_about_unsupported_build_configuration(compose_service) + warn_about_unsupported_runtime_host_configuration(compose_service) + warn_about_unsupported_volumes(compose_service) + warn_about_unsupported_network(compose_service) + + +def check_supported_platform(platform): + if platform is not None: + platform = platform.split('/') + if len(platform) >= 2: + return platform[0] == 'linux' and platform[1] == 'amd64' + return platform[0] == 'linux' + return True + + +def service_deploy_exists(service): + return service.deploy is not None + + +def service_deploy_resources_exists(service): + return service_deploy_exists(service) and service.deploy.resources is not None + + +def flatten_list(source_value): + flat_list = [] + for sub_list in source_value: + flat_list += sub_list + return flat_list + + +def resolve_transport_from_cli_args(service_name, transport): + if transport is not None: + transport = flatten_list(transport) + for setting in transport: + key, value = setting.split('=') + if key.lower() == service_name.lower(): + return value + return 'auto' + + +def resolve_registry_from_cli_args(registry_server, registry_user, registry_pass): + if registry_server is not None: + if registry_user is None and registry_pass is None: + registry_user = prompt("Please enter the registry's username: ") + registry_pass = prompt("Please enter the registry's password: ") + elif registry_user is not None and registry_pass is None: + registry_pass = prompt("Please enter the registry's password: ") + return (registry_server, registry_user, registry_pass) + + +def resolve_environment_from_service(service): + env_array = [] + + env_vars = service.resolve_environment_hierarchy() + + if env_vars is None: + return None + + for k, v in env_vars.items(): + if v is None: + v = prompt(f"{k} is empty. What would you like the value to be? ") + env_array.append(f"{k}={v}") + + return env_array + + +def resolve_secret_from_service(service, secrets_map): + secret_array = [] + secret_env_ref = [] + + if service.secrets is None: + return (None, None) + + for secret in service.secrets: + + secret_config = secrets_map[secret.source] + if secret_config is not None and secret_config.file is not None: + value = secret_config.file.readFile() + if secret.target is None: + secret_name = secret.source.replace('_', '-') + else: + secret_name = secret.target.replace('_', '-') + secret_array.append(f"{secret_name}={value}") + secret_env_ref.append(f"{secret_name}=secretref:{secret_name}") + + if len(secret_array) == 0: + return (None, None) + + logger.warning("Note: Secrets will be mapped as secure environment variables in Azure Container Apps.") + + return (secret_array, secret_env_ref) + + +def resolve_replicas_from_service(service): + replicas = None + + if service.scale: + replicas = service.scale + if service_deploy_exists(service): + if service.deploy.replicas is not None: + replicas = service.deploy.replicas + if service.deploy.mode == "global": + replicas = 1 + + return replicas + + +# Ported from azure-cli commit 2f7ef21a0d6c4afb9f066c0d65affcc84a8b36a4. +def resolve_cpu_configuration_from_service(service): + cpu = None + # Check x-azure-deployment.resources.cpu first + if hasattr(service, 'x_azure_deployment') and service.x_azure_deployment: + resources = service.x_azure_deployment.get('resources', {}) + if 'cpu' in resources: + cpu = str(resources['cpu']) + return cpu + + if service_deploy_resources_exists(service): + resources = service.deploy.resources + if resources.reservations is not None and resources.reservations.cpus is not None: + cpu = str(resources.reservations.cpus) + elif service.cpus is not None: + cpu = str(service.cpus) + + # Default to 1.0 CPU if not specified + if cpu is None: + cpu = '1.0' + return cpu + + +# Ported from azure-cli commit 2f7ef21a0d6c4afb9f066c0d65affcc84a8b36a4. +def resolve_memory_configuration_from_service(service): + memory = None + # Check x-azure-deployment.resources.memory first + if hasattr(service, 'x_azure_deployment') and service.x_azure_deployment: + resources = service.x_azure_deployment.get('resources', {}) + if 'memory' in resources: + memory = str(resources['memory']) + return memory + + if service_deploy_resources_exists(service): + resources = service.deploy.resources + if resources.reservations is not None and resources.reservations.memory is not None: + memory = str(resources.reservations.memory.as_gigabytes()) + elif service.mem_reservation is not None: + memory = str(service.mem_reservation.as_gigabytes()) + + # Default to 2Gi if not specified + if memory is None: + memory = '2' + return memory + + +def resolve_port_or_expose_list(ports, name): + if len(ports) > 1: + message = f"You have more than one {name} mapping defined in your docker-compose file." + message += " Which port would you like to use? " + choice_index = prompt_choice_list(message, ports) + + return ports[choice_index] + + +def resolve_ingress_and_target_port(service): + # External Ingress Check + if service.ports is not None: + ingress_type = "external" + + if len(service.ports) == 1: + target_port = service.ports[0].target + else: + ports_list = [] + + for p in service.ports: + ports_list.append(p.target) + target_port = resolve_port_or_expose_list(ports_list, "port") + + # Internal Ingress Check + elif service.expose is not None: + ingress_type = "internal" + + if len(service.expose) == 1: + target_port = service.expose[0] + else: + target_port = resolve_port_or_expose_list(service.expose, "expose") + else: + ingress_type = None + target_port = None + return (ingress_type, target_port) + + +# Ported from azure-cli commit 2f7ef21a0d6c4afb9f066c0d65affcc84a8b36a4. +def resolve_service_startup_command(service): + startup_command_array = [] + startup_args_array = [] + if service.entrypoint is not None: + startup_command = service.entrypoint.command_string() + startup_command_array.append(startup_command) + if service.command is not None: + # Preserve individual command items as separate args + if hasattr(service.command, '__iter__') and not isinstance(service.command, str): + startup_args_array = list(service.command) + else: + startup_args = service.command.command_string() + startup_args_array.append(startup_args) + elif service.command is not None: + # When there's no entrypoint, command should be args + # Preserve individual command items as separate args + if hasattr(service.command, '__iter__') and not isinstance(service.command, str): + startup_command_array = None + startup_args_array = list(service.command) + else: + startup_args = service.command.command_string() + startup_command_array = None + startup_args_array.append(startup_args) + else: + startup_command_array = None + startup_args_array = None + return (startup_command_array, startup_args_array) + + +# ============================================================================ +# End of functions copied from azure-cli core +# ============================================================================ + + def valid_resource_settings(): # vCPU and Memory reservations # https://docs.microsoft.com/azure/container-apps/containers#configuration @@ -50,24 +426,36 @@ def validate_memory_and_cpu_setting(cpu, memory, managed_environment): def parse_models_section(compose_yaml): - """ - Extract models section from raw YAML compose file. - - Args: - compose_yaml: Dictionary representation of compose file - - Returns: - Dictionary of models configuration or empty dict if not present - """ - from knack.log import get_logger - logger = get_logger(__name__) - - models = compose_yaml.get('models', {}) + """Wrapper around the ported compose helper from commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + models = _ported_parse_models_section(compose_yaml) if models: - logger.info(f"Found {len(models)} model(s) in compose file: {list(models.keys())}") + logger.info( + "Models section detected with entries: %s", + ", ".join(sorted(models.keys())), + ) return models +def parse_service_models_config(service): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_parse_service_models_config(service) + + +def detect_service_type(service): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_detect_service_type(service) + + +def parse_mcp_servers_from_command(service): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_parse_mcp_servers_from_command(service) + + +def should_deploy_model_runner(compose_yaml, parsed_compose_file): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_should_deploy_model_runner(compose_yaml, parsed_compose_file) + + def detect_mcp_gateway_service(service_name, service): """ Check if a service is an MCP gateway by examining the image name. @@ -217,6 +605,16 @@ def validate_model_source(model_name, source): logger.info(f"Validated model source for '{model_name}': {source}") +def get_model_runner_environment_vars(models_config, aca_environment_name): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_get_model_runner_environment_vars(models_config, aca_environment_name) + + +def get_mcp_gateway_environment_vars(aca_environment_name): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_get_mcp_gateway_environment_vars(aca_environment_name) + + def check_gpu_profile_availability(cmd, resource_group_name, env_name, location): """ Check if GPU workload profiles are available in the region. @@ -337,10 +735,9 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo from azure.cli.core.azclierror import ResourceNotFoundError from ._client_factory import handle_raw_exception from ._clients import ManagedEnvironmentClient - + logger = get_logger(__name__) - - # Check if environment already has a GPU profile that matches the request + logger.warning(f"[GPU DEBUG] create_gpu_workload_profile_if_needed called with requested_gpu_profile_type: {requested_gpu_profile_type}") # Check if environment already has a GPU profile that matches the request try: # Use class methods directly - no instantiation needed env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) @@ -377,27 +774,40 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo # Validate that requested profile is available # The API returns profiles with 'name' field containing the workload profile type available_types = [p.get('name') for p in available_gpu] - + logger.warning(f"[GPU DEBUG] Requested GPU profile: {requested_gpu_profile_type}") + logger.warning(f"[GPU DEBUG] Available GPU profiles in {location}: {available_types}") + # Pass through the requested type as-is - it's already the API value # (e.g., Consumption-GPU-NC8as-T4, Consumption-GPU-NC24-A100, Consumption, Flex) if requested_gpu_profile_type in available_types: gpu_profile_type = requested_gpu_profile_type + logger.warning(f"[GPU DEBUG] ✅ Requested profile IS available, using: {gpu_profile_type}") logger.info(f"Using requested GPU profile type: {gpu_profile_type}") else: # Don't fall back - user explicitly requested this type logger.error(f"Requested GPU profile '{requested_gpu_profile_type}' not available in {location}") logger.error(f"Available GPU profiles: {available_types}") + logger.warning(f"[GPU DEBUG] ❌ Requested profile NOT available - will raise error") raise ResourceNotFoundError( f"Requested workload profile type '{requested_gpu_profile_type}' is not available in region '{location}'. " f"Available types: {', '.join(available_types)}" ) else: - gpu_profile_type = available_gpu[0].get('name') + # Default to T4 if available, otherwise first available + available_types = [p.get('name') for p in available_gpu] + if 'Consumption-GPU-NC8as-T4' in available_types: + gpu_profile_type = 'Consumption-GPU-NC8as-T4' + logger.warning(f"[GPU DEBUG] ⚠️ No specific GPU profile requested, defaulting to T4: {gpu_profile_type}") + else: + gpu_profile_type = available_gpu[0].get('name') + logger.warning(f"[GPU DEBUG] ⚠️ No specific GPU profile requested, T4 not available, defaulting to first: {gpu_profile_type}") logger.info(f"No specific GPU profile requested, using: {gpu_profile_type}") # Check if it's a consumption-based GPU profile + logger.warning(f"[GPU DEBUG] Selected gpu_profile_type: {gpu_profile_type}") if gpu_profile_type.startswith('Consumption-'): # Consumption profiles need to be added to the environment's workloadProfiles array + logger.warning(f"[GPU DEBUG] Profile is consumption-based, will add to environment") logger.info(f"Adding consumption-based GPU profile to environment: {gpu_profile_type}") from azure.cli.core.util import send_raw_request @@ -409,14 +819,23 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo # Check if profile already exists existing_profiles = env.get('properties', {}).get('workloadProfiles', []) + profile_exists = False for profile in existing_profiles: if profile.get('workloadProfileType') == gpu_profile_type: + profile_exists = True logger.info(f"GPU profile already exists: {gpu_profile_type}") - # Even if profile exists, environment might still be provisioning from a previous operation - wait_for_environment_provisioning(cmd, resource_group_name, env_name) - return gpu_profile_type + break + + # If profile already exists, skip PATCH and return early + if profile_exists: + logger.info(f"Skipping environment update - profile '{gpu_profile_type}' already configured") + # Wait for environment to be ready in case it's still provisioning from a previous operation + wait_for_environment_provisioning(cmd, resource_group_name, env_name) + return gpu_profile_type + + # Profile doesn't exist - add it to the environment + logger.info(f"Adding new GPU profile '{gpu_profile_type}' to environment") - # Add the consumption GPU profile to the environment new_profile = { "name": gpu_profile_type, "workloadProfileType": gpu_profile_type @@ -541,6 +960,7 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode """ from knack.log import get_logger from ._clients import ManagedEnvironmentClient + from ._constants import MODEL_RUNNER_CONFIG_IMAGE import json logger = get_logger(__name__) @@ -553,6 +973,23 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode model_runner_url = "http://localhost:12434" logger.info(f"Model runner URL (localhost): {model_runner_url}") + # Determine model-runner image based on x-azure-deployment.image or GPU profile + model_runner_image = None + for model_name, model_config in models.items(): + if isinstance(model_config, dict) and 'x-azure-deployment' in model_config: + azure_deployment = model_config.get('x-azure-deployment', {}) + if 'image' in azure_deployment: + model_runner_image = azure_deployment['image'] + logger.info(f"Using custom model-runner image from x-azure-deployment: {model_runner_image}") + break + + # Default to GPU-aware image selection if not specified + if not model_runner_image: + from ._constants import MODEL_RUNNER_IMAGE, MODEL_RUNNER_IMAGE_CUDA + is_gpu_profile = 'GPU' in gpu_profile_name.upper() + model_runner_image = MODEL_RUNNER_IMAGE_CUDA if is_gpu_profile else MODEL_RUNNER_IMAGE + logger.info(f"Using default model-runner image: {model_runner_image} (GPU profile: {is_gpu_profile})") + # Determine GPU-appropriate resources based on profile type # T4 GPU: 8 vCPUs, 56GB memory (NC8as_T4_v3) # A100 GPU: 24 vCPUs, 220GB memory (NC24ads_A100_v4) @@ -586,7 +1023,7 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode containers = [ { 'name': 'model-runner', - 'image': 'docker/model-runner:latest', + 'image': model_runner_image, 'resources': { 'cpu': gpu_cpu, 'memory': gpu_memory @@ -612,7 +1049,7 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode }, { 'name': 'model-runner-config', - 'image': 'simon.azurecr.io/model-runner-config:09112025-1504', + 'image': MODEL_RUNNER_CONFIG_IMAGE, 'resources': { 'cpu': 0.5, 'memory': '1Gi' @@ -708,6 +1145,21 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode raise +def extract_model_definitions(compose_yaml, parsed_compose_file): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_extract_model_definitions(compose_yaml, parsed_compose_file) + + +def get_model_endpoint_environment_vars(service_models, models_config, aca_environment_name): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_get_model_endpoint_environment_vars(service_models, models_config, aca_environment_name) + + +def calculate_model_runner_resources(model_definitions): + """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" + return _ported_calculate_model_runner_resources(model_definitions) + + def get_containerapp_fqdn(cmd, resource_group_name, env_name, app_name, is_external=False): """ Generate Container Apps FQDN for a given app. @@ -804,10 +1256,12 @@ def get_mcp_gateway_configuration(service): logger = get_logger(__name__) + from ._constants import MCP_GATEWAY_IMAGE + config = { 'port': 8811, # Standard MCP gateway port 'ingress_type': 'internal', # MCP gateway should be internal-only - 'image': service.image if hasattr(service, 'image') else 'mcr.microsoft.com/mcp-gateway:latest' + 'image': service.image if hasattr(service, 'image') else MCP_GATEWAY_IMAGE } # Check for custom port in service ports @@ -897,13 +1351,13 @@ def attempt_role_assignment(cmd, principal_id, resource_group_name, app_name): from azure.cli.core.commands.client_factory import get_subscription_id logger = get_logger(__name__) - - # Use the specific role definition ID for container app management - # This role allows the MCP gateway to modify container apps - role_definition_id = "/subscriptions/30501c6c-81f6-41ac-a388-d29cf43a020d/providers/Microsoft.Authorization/roleDefinitions/358470bc-b998-42bd-ab17-a7e34c199c0f" - + # Build scope - resource group level subscription_id = get_subscription_id(cmd.cli_ctx) + + # Use the specific role definition ID for container app management + # This role allows the MCP gateway to modify container apps + role_definition_id = f"/subscriptions/{subscription_id}/providers/Microsoft.Authorization/roleDefinitions/358470bc-b998-42bd-ab17-a7e34c199c0f" scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" logger.info(f"Attempting to assign role to principal {principal_id}") @@ -1249,6 +1703,9 @@ def parse_x_azure_deployment(service): # Handle port if 'port' in ingress_config: overrides['target_port'] = int(ingress_config['port']) + # Handle allowInsecure + if 'allowInsecure' in ingress_config: + overrides['allow_insecure'] = bool(ingress_config['allowInsecure']) elif isinstance(ingress_config, str): overrides['ingress_type'] = ingress_config @@ -1678,8 +2135,21 @@ def print_dry_run_models_deployment(models_config, gpu_profile_info): # Container App name print(f" Container App: models") + # Determine model-runner image from x-azure-deployment.image or GPU profile + model_runner_image = None + for model_name, model_spec in models_config.items(): + if isinstance(model_spec, dict) and 'x-azure-deployment' in model_spec: + azure_deployment = model_spec.get('x-azure-deployment', {}) + if 'image' in azure_deployment: + model_runner_image = azure_deployment['image'] + break + + if not model_runner_image: + is_gpu = gpu_profile_info and 'GPU' in str(gpu_profile_info.get('type', '')).upper() + model_runner_image = 'docker/model-runner:latest-cuda' if is_gpu else 'docker/model-runner:latest' + # Image - print(f" Image: docker/model-runner:latest") + print(f" Image: {model_runner_image}") # Show model names and paths (filter out x-azure-* keys) model_names = [k for k in models_config.keys() if not k.startswith('x-')] diff --git a/src/containerapp/azext_containerapp/_constants.py b/src/containerapp/azext_containerapp/_constants.py index 55e31b6899e..d72831c9264 100644 --- a/src/containerapp/azext_containerapp/_constants.py +++ b/src/containerapp/azext_containerapp/_constants.py @@ -124,6 +124,16 @@ HELLO_WORLD_IMAGE = "mcr.microsoft.com/k8se/quickstart:latest" +# Model runner images +MODEL_RUNNER_IMAGE = "docker/model-runner:latest" +MODEL_RUNNER_IMAGE_CUDA = "docker/model-runner:latest-cuda" +# replace with mcr image when available +MODEL_RUNNER_CONFIG_IMAGE = "simon.azurecr.io/model-runner-config:latest" + +# MCP gateway image +# default image, currently does not exist publicly +MCP_GATEWAY_IMAGE = "mcr.microsoft.com/mcp-gateway:latest" + LOGS_STRING = '[{"category":"ContainerAppConsoleLogs","categoryGroup":null,"enabled":true,"retentionPolicy":{"days":0,"enabled":false}},{"category":"ContainerAppSystemLogs","categoryGroup":null,"enabled":true,"retentionPolicy":{"days":0,"enabled":false}}]' # pylint: disable=line-too-long DEFAULT_CONNECTED_CLUSTER_EXTENSION_NAME = "containerapp-ext" diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 0d7f1473782..e135a9794cd 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -1545,19 +1545,20 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 replace_all=None): from pycomposefile import ComposeFile - from azure.cli.command_modules.containerapp._compose_utils import (build_containerapp_from_compose_service, - check_supported_platform, - warn_about_unsupported_elements, - resolve_ingress_and_target_port, - resolve_registry_from_cli_args, - resolve_transport_from_cli_args, - resolve_service_startup_command, - resolve_cpu_configuration_from_service, - resolve_memory_configuration_from_service, - resolve_replicas_from_service, - resolve_environment_from_service, - resolve_secret_from_service) - from ._compose_utils import validate_memory_and_cpu_setting, parse_x_azure_deployment + from ._compose_utils import (build_containerapp_from_compose_service, + check_supported_platform, + warn_about_unsupported_elements, + resolve_ingress_and_target_port, + resolve_registry_from_cli_args, + resolve_transport_from_cli_args, + resolve_service_startup_command, + resolve_cpu_configuration_from_service, + resolve_memory_configuration_from_service, + resolve_replicas_from_service, + resolve_environment_from_service, + resolve_secret_from_service, + validate_memory_and_cpu_setting, + parse_x_azure_deployment) # Validate managed environment parsed_managed_env = parse_resource_id(managed_env) @@ -1628,16 +1629,30 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Get or create GPU workload profile # Extract requested GPU profile type from models x-azure-deployment section # x-azure-deployment is nested inside each model (models.gemma.x-azure-deployment) + # IMPORTANT: We need to read from compose_yaml_original because parse_models_section() + # strips out x-azure-deployment from the returned models dict requested_gpu_type = None + + logger.warning(f"[GPU DEBUG] Inspecting RAW models YAML for GPU profile configuration") + raw_models = compose_yaml_original.get('models', {}) + logger.warning(f"[GPU DEBUG] Raw models keys: {list(raw_models.keys())}") - for model_name, model_config in models.items(): - if isinstance(model_config, dict) and 'x-azure-deployment' in model_config: - azure_deployment = model_config.get('x-azure-deployment', {}) - workload_profiles = azure_deployment.get('workloadProfiles', {}) - requested_gpu_type = workload_profiles.get('workloadProfileType') - if requested_gpu_type: - logger.info(f"Found GPU profile in model '{model_name}': {requested_gpu_type}") - break + for model_name, model_config in raw_models.items(): + logger.warning(f"[GPU DEBUG] Raw model '{model_name}' config type: {type(model_config)}") + if isinstance(model_config, dict): + logger.warning(f"[GPU DEBUG] Raw model '{model_name}' has x-azure-deployment: {'x-azure-deployment' in model_config}") + if 'x-azure-deployment' in model_config: + azure_deployment = model_config.get('x-azure-deployment', {}) + logger.warning(f"[GPU DEBUG] Model '{model_name}' azure_deployment: {azure_deployment}") + workload_profiles = azure_deployment.get('workloadProfiles', {}) + logger.warning(f"[GPU DEBUG] Model '{model_name}' workloadProfiles: {workload_profiles}") + requested_gpu_type = workload_profiles.get('workloadProfileType') + logger.warning(f"[GPU DEBUG] Model '{model_name}' requested GPU type: {requested_gpu_type}") + if requested_gpu_type: + logger.info(f"Found GPU profile in model '{model_name}': {requested_gpu_type}") + break + + logger.warning(f"[GPU DEBUG] Final requested_gpu_type to be passed: {requested_gpu_type}") try: gpu_profile_name = create_gpu_workload_profile_if_needed( @@ -1748,18 +1763,24 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Check if service depends on mcp-gateway and needs URL injection needs_gateway_url = False gateway_allow_insecure = False + mcp_gateway_vars = [] # Track all variables that reference mcp-gateway - # Check for MCP_GATEWAY_URL in environment + # Check for any environment variable referencing mcp-gateway by value for env_var in environment: var_name = None + var_value = None + if isinstance(env_var, dict): var_name = env_var.get('name') + var_value = env_var.get('value', '') elif isinstance(env_var, str) and '=' in env_var: - var_name = env_var.split('=', 1)[0] + var_name, var_value = env_var.split('=', 1) - if var_name == 'MCP_GATEWAY_URL': + # Check if value contains reference to mcp-gateway service + if var_value and '://mcp-gateway' in var_value: needs_gateway_url = True - break + mcp_gateway_vars.append(var_name) + logger.info(f"Detected MCP gateway reference in {var_name}: {var_value}") # Determine mcp-gateway allowInsecure setting if needs_gateway_url: @@ -1772,8 +1793,11 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 for i, env_var in enumerate(environment): if isinstance(env_var, dict) and 'value' in env_var: - # Check if this is MCP_GATEWAY_URL that needs FQDN injection - if env_var.get('name') == 'MCP_GATEWAY_URL' and needs_gateway_url: + var_name = env_var.get('name') + var_value = env_var.get('value', '') + + # Check if this variable references mcp-gateway and needs FQDN injection + if var_name in mcp_gateway_vars and '://mcp-gateway' in var_value: from ._compose_utils import get_containerapp_fqdn # Determine if mcp-gateway has external or internal ingress gateway_external = False @@ -1788,8 +1812,14 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 gateway_fqdn = get_containerapp_fqdn(cmd, resource_group_name, managed_env_name, 'mcp-gateway', is_external=gateway_external) gateway_protocol = 'http' if gateway_allow_insecure else 'https' gateway_fqdn = gateway_fqdn.replace('https://', f'{gateway_protocol}://') - env_var['value'] = gateway_fqdn - logger.info(f"Injected MCP_GATEWAY_URL with FQDN: {gateway_fqdn}") + + # Extract and preserve the path from the original URL (e.g., /sse from http://mcp-gateway:8811/sse) + import re + path_match = re.search(r'://mcp-gateway(?::\d+)?(/[^\s]*)', var_value) + preserved_path = path_match.group(1) if path_match else '' + + env_var['value'] = gateway_fqdn + preserved_path + logger.info(f"Transformed {var_name} to FQDN with path: {env_var['value']}") else: # Match http://hostname:port or https://hostname:port patterns env_var['value'] = re.sub(r'(https?://[^:/]+):\d+', r'\1', env_var['value']) @@ -1797,7 +1827,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Handle string format "KEY=VALUE" key, value = env_var.split('=', 1) - if key == 'MCP_GATEWAY_URL' and needs_gateway_url: + if key in mcp_gateway_vars and '://mcp-gateway' in value: from ._compose_utils import get_containerapp_fqdn gateway_external = False for svc_name in parsed_compose_file.services.keys(): @@ -1811,8 +1841,14 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 gateway_fqdn = get_containerapp_fqdn(cmd, resource_group_name, managed_env_name, 'mcp-gateway', is_external=gateway_external) gateway_protocol = 'http' if gateway_allow_insecure else 'https' gateway_fqdn = gateway_fqdn.replace('https://', f'{gateway_protocol}://') - environment[i] = f"{key}={gateway_fqdn}" - logger.info(f"Injected MCP_GATEWAY_URL with FQDN: {gateway_fqdn}") + + # Extract and preserve the path from the original URL + import re + path_match = re.search(r'://mcp-gateway(?::\d+)?(/[^\s]*)', value) + preserved_path = path_match.group(1) if path_match else '' + + environment[i] = f"{key}={gateway_fqdn}{preserved_path}" + logger.info(f"Transformed {key} to FQDN with path: {gateway_fqdn}{preserved_path}") else: value = re.sub(r'(https?://[^:/]+):\d+', r'\1', value) environment[i] = f"{key}={value}" @@ -1868,7 +1904,9 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Phase 3: Inject environment variables for services with models declarations # Services with "models:" section and endpoint_var/model_var get those specific variables injected - if hasattr(service, 'models') and service.models and models: + # Note: pycomposefile sets models=None if not present, so we just check if service.models is truthy + service_models = getattr(service, 'models', None) + if service_models: logger.info(f"Service '{service_name}' has models section - processing endpoint_var and model_var") # Ensure environment list exists @@ -1895,7 +1933,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 logger.info(f"Generated MODEL_RUNNER_URL: {models_endpoint_with_path}") # Process each model reference - for model_name, model_config in service.models.items(): + for model_name, model_config in service_models.items(): if isinstance(model_config, dict): endpoint_var = model_config.get('endpoint_var') model_var = model_config.get('model_var') @@ -1917,8 +1955,9 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 logger.info(f" Injected {endpoint_var}={models_endpoint_with_path}") # Inject model_var if specified - if model_var and model_name in models: - model_path = models[model_name].get('model', '') + if model_var: + # Try to get model path from top-level models section, fallback to model name + model_path = models.get(model_name, {}).get('model', model_name) if models else model_name # Check if variable already exists var_exists = any( @@ -1966,11 +2005,17 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Get raw service YAML for override parsing raw_service_yaml = compose_yaml_original.get('services', {}).get(service_name, {}) overrides = parse_x_azure_deployment(raw_service_yaml) - - # Priority: x-azure-deployment override > default based on ports + + # Check if this service has models section + has_models = service_name == 'models' + + # Priority: x-azure-deployment override > default based on service type > default based on ports # NOTE: We ignore ingress_type from resolve_ingress_and_target_port because it always returns 'external' if overrides.get('ingress_type') is not None: final_ingress_type = overrides['ingress_type'] + elif is_mcp_gateway or has_models: + # MCP gateway and models apps default to internal ingress + final_ingress_type = 'internal' elif service.ports: final_ingress_type = 'internal' # Default to internal if ports exist else: @@ -1979,6 +2024,9 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Apply image override if specified in x-azure-deployment final_image = overrides.get('image') or image + # Extract allowInsecure from x-azure-deployment.ingress + allow_insecure = overrides.get('allow_insecure', False) + # Check if container app already exists try: existing_app = show_containerapp(cmd=cmd, resource_group_name=resource_group_name, name=service_name) @@ -2053,6 +2101,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 registry_user=registry_username, registry_pass=registry_password, transport=transport_setting, + allow_insecure=allow_insecure, startup_command=startup_command, args=startup_args, cpu=cpu, From 004c93f871b626ab81fd28982a5933acba7146b0 Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Thu, 13 Nov 2025 08:24:13 -0600 Subject: [PATCH 03/10] fix: Address PR review feedback - Fix syntax error: Remove duplicate return statement (line 2218) - Fix grammar: 'are yet supported' -> 'are not yet supported' - Remove commented-out code - Fix dry-run allow_insecure to show actual config value - Fix choice_index undefined variable with proper error handling - Remove duplicate 'import re' statements - Fix gpu_profile undefined variable by reading from raw YAML - Revert version from 1.2.0b6 to 1.2.0b5 (maintainers handle releases) - Remove premium-ingress params that came from upstream rebase Addresses feedback from @Greedygre and Copilot AI review --- src/containerapp/azext_containerapp/_compose_utils.py | 11 +++++++---- src/containerapp/azext_containerapp/_params.py | 11 ----------- src/containerapp/azext_containerapp/custom.py | 8 ++------ 3 files changed, 9 insertions(+), 21 deletions(-) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index da284f46c8c..3f0fabbc5bb 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -104,7 +104,7 @@ def warn_about_unsupported_build_configuration(compose_service): "target", "secrets", "tags"] if compose_service.build is not None: config_list = resolve_configuration_element_list(compose_service, unsupported_configuration, 'build') - message = "These build configuration settings from the docker-compose file are yet supported." + message = "These build configuration settings from the docker-compose file are not yet supported." message += " Currently, we support supplying a build context and optionally target Dockerfile for a service." message += " See https://aka.ms/containerapp/compose/build_support for more information or to add feedback." if len(config_list) >= 1: @@ -314,12 +314,15 @@ def resolve_memory_configuration_from_service(service): def resolve_port_or_expose_list(ports, name): - if len(ports) > 1: + if len(ports) == 1: + return ports[0] + elif len(ports) > 1: message = f"You have more than one {name} mapping defined in your docker-compose file." message += " Which port would you like to use? " choice_index = prompt_choice_list(message, ports) - - return ports[choice_index] + return ports[choice_index] + else: + raise ValueError(f"No {name} mappings defined.") def resolve_ingress_and_target_port(service): diff --git a/src/containerapp/azext_containerapp/_params.py b/src/containerapp/azext_containerapp/_params.py index b62bd64817f..0ed5f52cbce 100644 --- a/src/containerapp/azext_containerapp/_params.py +++ b/src/containerapp/azext_containerapp/_params.py @@ -521,17 +521,6 @@ def load_arguments(self, _): c.argument('mode', arg_type=get_enum_type(['single', 'multiple', 'labels']), help="The active revisions mode for the container app.") c.argument('target_label', help="The label to apply to new revisions. Required for revision mode 'labels'.", is_preview=True) - with self.argument_context('containerapp env premium-ingress') as c: - c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None) - c.argument('name', options_list=['--name', '-n'], help="The name of the managed environment.") - c.argument('workload_profile_name', options_list=['--workload-profile-name', '-w'], help="The workload profile to run ingress replicas on. This profile must not be shared with any container app or job.") - c.argument('min_replicas', options_list=['--min-replicas'], type=int, deprecate_info=c.deprecate(hide=True, expiration='2.79.0'), help="The workload profile minimum instances is used instead.") - c.argument('max_replicas', options_list=['--max-replicas'], type=int, deprecate_info=c.deprecate(hide=True, expiration='2.79.0'), help="The workload profile maximum instances is used instead.") - c.argument('termination_grace_period', options_list=['--termination-grace-period', '-t'], type=int, help="Time in seconds to drain requests during ingress shutdown. Default 500, minimum 0, maximum 3600.") - c.argument('request_idle_timeout', options_list=['--request-idle-timeout'], type=int, help="Timeout in minutes for idle requests. Default 4, minimum 4, maximum 30.") - c.argument('header_count_limit', options_list=['--header-count-limit'], type=int, help="Limit of http headers per request. Default 100, minimum 1.") - - # Compose with self.argument_context('containerapp compose create') as c: c.argument('dry_run', options_list=['--dry-run'], arg_type=get_three_state_flag(), help='Preview deployment without creating actual Azure resources. Generates a detailed report of container apps, workload profiles, role assignments, and environment variable injections.') diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index e135a9794cd..646bb6008d8 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -1814,7 +1814,6 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 gateway_fqdn = gateway_fqdn.replace('https://', f'{gateway_protocol}://') # Extract and preserve the path from the original URL (e.g., /sse from http://mcp-gateway:8811/sse) - import re path_match = re.search(r'://mcp-gateway(?::\d+)?(/[^\s]*)', var_value) preserved_path = path_match.group(1) if path_match else '' @@ -1843,7 +1842,6 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 gateway_fqdn = gateway_fqdn.replace('https://', f'{gateway_protocol}://') # Extract and preserve the path from the original URL - import re path_match = re.search(r'://mcp-gateway(?::\d+)?(/[^\s]*)', value) preserved_path = path_match.group(1) if path_match else '' @@ -2133,14 +2131,12 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 replicas=replicas, is_models_service=(service_name == 'models'), is_mcp_gateway=is_mcp_gateway, - gpu_type=gpu_profile.get('type') if (service_name == 'models' and 'gpu_profile' in locals()) else None, + gpu_type=raw_service_yaml.get('x-azure-deployment', {}).get('workloadProfiles', {}).get('workloadProfileType') if service_name == 'models' else None, raw_service=raw_service_yaml, models_config=models ) dry_run_services.append(service_config) - # if service_name == 'models': - # dry_run_has_models = True if is_mcp_gateway: dry_run_has_gateway = True @@ -2215,7 +2211,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 ) # Return empty list (no actual deployment) - return None # Dry-run complete, no resources created return containerapps_from_compose + return None # Dry-run complete, no resources created def set_workload_profile(cmd, resource_group_name, env_name, workload_profile_name, workload_profile_type=None, min_nodes=None, max_nodes=None): From 815954b09bfe675f1b594e4ab39785093e74b93f Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Thu, 13 Nov 2025 11:37:56 -0600 Subject: [PATCH 04/10] fix: Resolve CI linting failures - Fix undefined variables (update_containerapp_from_compose, ContainerAppClient) - Remove 322 trailing whitespace errors across all Python files - Fix 4 unused variable warnings (models_app, models_list, gateway_url) - Convert f-strings without placeholders to regular strings - Fix PEP8 formatting (spacing, indentation, blank lines) Addresses CI test failures from PR #9422 review feedback. --- .../azext_containerapp/_compose_utils.py | 1140 ++++-- .../azext_containerapp/_validators.py | 556 +-- src/containerapp/azext_containerapp/custom.py | 3588 ++++++++++++----- .../app.py | 38 +- .../latest/test_containerapp_auth_commands.py | 6 +- .../latest/test_containerapp_commands.py | 6 +- .../test_containerapp_compose_command.py | 2 +- .../test_containerapp_compose_environment.py | 2 +- .../test_containerapp_compose_ingress.py | 2 +- .../test_containerapp_compose_mcp_gateway.py | 18 +- .../test_containerapp_compose_models.py | 56 +- .../test_containerapp_compose_registries.py | 2 +- .../test_containerapp_compose_resources.py | 2 +- .../latest/test_containerapp_compose_scale.py | 2 +- .../test_containerapp_compose_secrets.py | 4 +- ...ontainerapp_compose_transport_overrides.py | 4 +- ..._containerapp_create_update_with_source.py | 202 +- .../test_containerapp_dapr_resiliency.py | 4 +- .../latest/test_containerapp_env_commands.py | 28 +- .../latest/test_containerapp_env_telemetry.py | 14 +- ...est_containerapp_ingress_sticky_session.py | 4 +- .../test_containerapp_java_component.py | 10 +- .../test_containerapp_maintenance_config.py | 8 +- .../test_containerapp_mount_secret_volume.py | 30 +- ...t_containerapp_session_code_interpreter.py | 2 +- ...ainerapp_session_code_interpreter_files.py | 2 +- ...nerapp_session_code_interpreter_nodelts.py | 2 +- .../latest/test_containerapp_sessionpool.py | 12 +- .../tests/latest/test_containerappjob_crud.py | 10 +- ...st_containerappjob_event_triggered_crud.py | 2 +- .../latest/test_containerappjob_executions.py | 12 +- .../test_containerappjob_with_identity.py | 2 +- .../test_containerappjob_with_secrets.py | 1 - .../aio/operations/_extensions_operations.py | 8 +- ...flux_config_operation_status_operations.py | 2 +- .../_flux_configurations_operations.py | 8 +- .../_operation_status_operations.py | 6 +- .../v2022_03_01/aio/operations/_operations.py | 4 +- ...ource_control_configurations_operations.py | 8 +- .../operations/_extensions_operations.py | 8 +- ...flux_config_operation_status_operations.py | 2 +- .../_flux_configurations_operations.py | 8 +- .../_operation_status_operations.py | 6 +- .../v2022_03_01/operations/_operations.py | 4 +- ...ource_control_configurations_operations.py | 8 +- 45 files changed, 3882 insertions(+), 1963 deletions(-) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index 3f0fabbc5bb..a424daebeb6 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -2,7 +2,10 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals, logging-fstring-interpolation, arguments-differ, abstract-method, logging-format-interpolation, broad-except +# pylint: disable=line-too-long, consider-using-f-string, no-else-return, +# duplicate-string-formatting-argument, expression-not-assigned, +# too-many-locals, logging-fstring-interpolation, arguments-differ, +# abstract-method, logging-format-interpolation, broad-except from knack.log import get_logger from knack.prompting import prompt, prompt_choice_list @@ -22,12 +25,12 @@ logger = get_logger(__name__) - # ============================================================================ # Functions copied from azure-cli core to make extension self-contained # These eliminate the dependency on azure-cli core's containerapp module # ============================================================================ + def build_containerapp_from_compose_service(cmd, name, source, @@ -52,7 +55,8 @@ def build_containerapp_from_compose_service(cmd, _get_registry_details, _get_acr_from_image) - resource_group = ResourceGroup(cmd, name=resource_group_name, location=location) + resource_group = ResourceGroup( + cmd, name=resource_group_name, location=location) env = ContainerAppEnvironment(cmd, managed_env, resource_group, @@ -84,7 +88,10 @@ def build_containerapp_from_compose_service(cmd, return app.image, app.registry_server, app.registry_user, app.registry_pass -def resolve_configuration_element_list(compose_service, unsupported_configuration, area=None): +def resolve_configuration_element_list( + compose_service, + unsupported_configuration, + area=None): if area is not None: compose_service = getattr(compose_service, area) config_list = [] @@ -94,16 +101,29 @@ def resolve_configuration_element_list(compose_service, unsupported_configuratio except AttributeError: logger.critical("Failed to resolve %s", configuration_element) if attribute is not None: - config_list.append(f"{compose_service.compose_path}/{configuration_element}") + config_list.append( + f"{compose_service.compose_path}/{configuration_element}") return config_list def warn_about_unsupported_build_configuration(compose_service): - unsupported_configuration = ["args", "ssh", "cache_from", "cache_to", "extra_hosts", - "isolation", "labels", "no_cache", "pull", "shm_size", - "target", "secrets", "tags"] + unsupported_configuration = [ + "args", + "ssh", + "cache_from", + "cache_to", + "extra_hosts", + "isolation", + "labels", + "no_cache", + "pull", + "shm_size", + "target", + "secrets", + "tags"] if compose_service.build is not None: - config_list = resolve_configuration_element_list(compose_service, unsupported_configuration, 'build') + config_list = resolve_configuration_element_list( + compose_service, unsupported_configuration, 'build') message = "These build configuration settings from the docker-compose file are not yet supported." message += " Currently, we support supplying a build context and optionally target Dockerfile for a service." message += " See https://aka.ms/containerapp/compose/build_support for more information or to add feedback." @@ -114,18 +134,64 @@ def warn_about_unsupported_build_configuration(compose_service): def warn_about_unsupported_runtime_host_configuration(compose_service): - unsupported_configuration = ["blkio_config", "cpu_count", "cpu_percent", "cpu_shares", "cpu_period", - "cpu_quota", "cpu_rt_runtime", "cpu_rt_period", "cpuset", "cap_add", - "cap_drop", "cgroup_parent", "configs", "credential_spec", - "device_cgroup_rules", "devices", "dns", "dns_opt", "dns_search", - "domainname", "external_links", "extra_hosts", "group_add", "healthcheck", - "hostname", "init", "ipc", "isolation", "links", "logging", "mem_limit", - "mem_swappiness", "memswap_limit", "oom_kill_disable", "oom_score_adj", - "pid", "pids_limit", "privileged", "profiles", "pull_policy", "read_only", - "restart", "runtime", "security_opt", "shm_size", "stdin_open", - "stop_grace_period", "stop_signal", "storage_opt", "sysctls", "tmpfs", - "tty", "ulimits", "user", "working_dir"] - config_list = resolve_configuration_element_list(compose_service, unsupported_configuration) + unsupported_configuration = [ + "blkio_config", + "cpu_count", + "cpu_percent", + "cpu_shares", + "cpu_period", + "cpu_quota", + "cpu_rt_runtime", + "cpu_rt_period", + "cpuset", + "cap_add", + "cap_drop", + "cgroup_parent", + "configs", + "credential_spec", + "device_cgroup_rules", + "devices", + "dns", + "dns_opt", + "dns_search", + "domainname", + "external_links", + "extra_hosts", + "group_add", + "healthcheck", + "hostname", + "init", + "ipc", + "isolation", + "links", + "logging", + "mem_limit", + "mem_swappiness", + "memswap_limit", + "oom_kill_disable", + "oom_score_adj", + "pid", + "pids_limit", + "privileged", + "profiles", + "pull_policy", + "read_only", + "restart", + "runtime", + "security_opt", + "shm_size", + "stdin_open", + "stop_grace_period", + "stop_signal", + "storage_opt", + "sysctls", + "tmpfs", + "tty", + "ulimits", + "user", + "working_dir"] + config_list = resolve_configuration_element_list( + compose_service, unsupported_configuration) message = "These container and host configuration elements from the docker-compose file are not supported" message += " in Azure Container Apps. For more information about supported configuration," message += " please see https://aka.ms/containerapp/compose/configuration" @@ -137,7 +203,8 @@ def warn_about_unsupported_runtime_host_configuration(compose_service): def warn_about_unsupported_volumes(compose_service): unsupported_configuration = ["volumes", "volumes_from"] - config_list = resolve_configuration_element_list(compose_service, unsupported_configuration) + config_list = resolve_configuration_element_list( + compose_service, unsupported_configuration) message = "These volume mount elements from the docker-compose file are not supported" message += " in Azure Container Apps. For more information about supported storage configuration," message += " please see https://aka.ms/containerapp/compose/volumes" @@ -149,7 +216,8 @@ def warn_about_unsupported_volumes(compose_service): def warn_about_unsupported_network(compose_service): unsupported_configuration = ["networks", "network_mode", "mac_address"] - config_list = resolve_configuration_element_list(compose_service, unsupported_configuration) + config_list = resolve_configuration_element_list( + compose_service, unsupported_configuration) message = "These network configuration settings from the docker-compose file are not supported" message += " in Azure Container Apps. For more information about supported networking configuration," message += " please see https://aka.ms/containerapp/compose/networking" @@ -180,7 +248,8 @@ def service_deploy_exists(service): def service_deploy_resources_exists(service): - return service_deploy_exists(service) and service.deploy.resources is not None + return service_deploy_exists( + service) and service.deploy.resources is not None def flatten_list(source_value): @@ -200,7 +269,10 @@ def resolve_transport_from_cli_args(service_name, transport): return 'auto' -def resolve_registry_from_cli_args(registry_server, registry_user, registry_pass): +def resolve_registry_from_cli_args( + registry_server, + registry_user, + registry_pass): if registry_server is not None: if registry_user is None and registry_pass is None: registry_user = prompt("Please enter the registry's username: ") @@ -248,7 +320,8 @@ def resolve_secret_from_service(service, secrets_map): if len(secret_array) == 0: return (None, None) - logger.warning("Note: Secrets will be mapped as secure environment variables in Azure Container Apps.") + logger.warning( + "Note: Secrets will be mapped as secure environment variables in Azure Container Apps.") return (secret_array, secret_env_ref) @@ -266,8 +339,9 @@ def resolve_replicas_from_service(service): return replicas - # Ported from azure-cli commit 2f7ef21a0d6c4afb9f066c0d65affcc84a8b36a4. + + def resolve_cpu_configuration_from_service(service): cpu = None # Check x-azure-deployment.resources.cpu first @@ -276,21 +350,22 @@ def resolve_cpu_configuration_from_service(service): if 'cpu' in resources: cpu = str(resources['cpu']) return cpu - + if service_deploy_resources_exists(service): resources = service.deploy.resources if resources.reservations is not None and resources.reservations.cpus is not None: cpu = str(resources.reservations.cpus) elif service.cpus is not None: cpu = str(service.cpus) - + # Default to 1.0 CPU if not specified if cpu is None: cpu = '1.0' return cpu - # Ported from azure-cli commit 2f7ef21a0d6c4afb9f066c0d65affcc84a8b36a4. + + def resolve_memory_configuration_from_service(service): memory = None # Check x-azure-deployment.resources.memory first @@ -299,14 +374,14 @@ def resolve_memory_configuration_from_service(service): if 'memory' in resources: memory = str(resources['memory']) return memory - + if service_deploy_resources_exists(service): resources = service.deploy.resources if resources.reservations is not None and resources.reservations.memory is not None: memory = str(resources.reservations.memory.as_gigabytes()) elif service.mem_reservation is not None: memory = str(service.mem_reservation.as_gigabytes()) - + # Default to 2Gi if not specified if memory is None: memory = '2' @@ -352,8 +427,9 @@ def resolve_ingress_and_target_port(service): target_port = None return (ingress_type, target_port) - # Ported from azure-cli commit 2f7ef21a0d6c4afb9f066c0d65affcc84a8b36a4. + + def resolve_service_startup_command(service): startup_command_array = [] startup_args_array = [] @@ -362,7 +438,11 @@ def resolve_service_startup_command(service): startup_command_array.append(startup_command) if service.command is not None: # Preserve individual command items as separate args - if hasattr(service.command, '__iter__') and not isinstance(service.command, str): + if hasattr( + service.command, + '__iter__') and not isinstance( + service.command, + str): startup_args_array = list(service.command) else: startup_args = service.command.command_string() @@ -370,7 +450,11 @@ def resolve_service_startup_command(service): elif service.command is not None: # When there's no entrypoint, command should be args # Preserve individual command items as separate args - if hasattr(service.command, '__iter__') and not isinstance(service.command, str): + if hasattr( + service.command, + '__iter__') and not isinstance( + service.command, + str): startup_command_array = None startup_args_array = list(service.command) else: @@ -382,7 +466,6 @@ def resolve_service_startup_command(service): startup_args_array = None return (startup_command_array, startup_args_array) - # ============================================================================ # End of functions copied from azure-cli core # ============================================================================ @@ -456,47 +539,49 @@ def parse_mcp_servers_from_command(service): def should_deploy_model_runner(compose_yaml, parsed_compose_file): """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" - return _ported_should_deploy_model_runner(compose_yaml, parsed_compose_file) + return _ported_should_deploy_model_runner( + compose_yaml, parsed_compose_file) def detect_mcp_gateway_service(service_name, service): """ Check if a service is an MCP gateway by examining the image name. - + Args: service_name: Name of the service service: Service configuration object - + Returns: Boolean indicating if this is an MCP gateway service """ if not hasattr(service, 'image') or not service.image: return False - + # Check if image contains 'mcp-gateway' substring is_mcp_gateway = 'mcp-gateway' in service.image.lower() - + if is_mcp_gateway: from knack.log import get_logger logger = get_logger(__name__) - logger.info(f"Detected MCP gateway service: {service_name} (image: {service.image})") - + logger.info( + f"Detected MCP gateway service: {service_name} (image: {service.image})") + return is_mcp_gateway def resolve_dependency_graph(services): """ Build service dependency order from depends_on relationships. - + Args: services: Dictionary of service configurations - + Returns: List of service names in dependency order (dependencies first) """ from knack.log import get_logger logger = get_logger(__name__) - + # Build adjacency list for dependencies dependencies = {} for service_name, service in services.items(): @@ -507,11 +592,11 @@ def resolve_dependency_graph(services): elif isinstance(service.depends_on, dict): deps = list(service.depends_on.keys()) dependencies[service_name] = deps - + # Topological sort using DFS visited = set() result = [] - + def visit(name): if name in visited: return @@ -520,58 +605,58 @@ def visit(name): if dep in services: # Only visit dependencies that exist visit(dep) result.append(name) - + for service_name in services.keys(): visit(service_name) - + logger.info(f"Resolved service deployment order: {result}") return result - # ============================================================================ # Phase 3: Models Deployment Implementation # ============================================================================ + def validate_models_configuration(models): """ Validate models configuration from compose file. - + Args: models: Dictionary of model configurations from compose file - + Returns: Boolean indicating if models configuration is valid - + Raises: InvalidArgumentValueError: If model names are invalid DNS labels """ from knack.log import get_logger from azure.cli.core.azclierror import InvalidArgumentValueError import re - + logger = get_logger(__name__) - + if not models: return True - - # DNS label regex: lowercase alphanumeric and hyphens, start/end with alphanumeric + + # DNS label regex: lowercase alphanumeric and hyphens, start/end with + # alphanumeric dns_label_pattern = re.compile(r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?$') - + for model_name, model_config in models.items(): # Skip x-azure-deployment metadata if model_name == 'x-azure-deployment': continue - + if not dns_label_pattern.match(model_name): raise InvalidArgumentValueError( f"Invalid model name '{model_name}'. Model names must be valid DNS labels: " - f"lowercase alphanumeric characters or hyphens, starting and ending with alphanumeric." - ) - + f"lowercase alphanumeric characters or hyphens, starting and ending with alphanumeric.") + # Validate source format if isinstance(model_config, dict) and 'source' in model_config: validate_model_source(model_name, model_config['source']) - + logger.info(f"Validated {len(models)} model configuration(s)") return True @@ -579,38 +664,39 @@ def validate_models_configuration(models): def validate_model_source(model_name, source): """ Validate model source format. - + Args: model_name: Name of the model source: Model source string (e.g., ollama://phi, hf://model-name) - + Raises: InvalidArgumentValueError: If source format is invalid """ from knack.log import get_logger from azure.cli.core.azclierror import InvalidArgumentValueError - + logger = get_logger(__name__) - + if not source: raise InvalidArgumentValueError( f"Model '{model_name}' missing required 'source' field" ) - + valid_prefixes = ['ollama://', 'hf://', 'https://', 'azureml://'] - + if not any(source.startswith(prefix) for prefix in valid_prefixes): raise InvalidArgumentValueError( f"Invalid source '{source}' for model '{model_name}'. " f"Source must start with one of: {', '.join(valid_prefixes)}" ) - + logger.info(f"Validated model source for '{model_name}': {source}") def get_model_runner_environment_vars(models_config, aca_environment_name): """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" - return _ported_get_model_runner_environment_vars(models_config, aca_environment_name) + return _ported_get_model_runner_environment_vars( + models_config, aca_environment_name) def get_mcp_gateway_environment_vars(aca_environment_name): @@ -618,16 +704,20 @@ def get_mcp_gateway_environment_vars(aca_environment_name): return _ported_get_mcp_gateway_environment_vars(aca_environment_name) -def check_gpu_profile_availability(cmd, resource_group_name, env_name, location): +def check_gpu_profile_availability( + cmd, + resource_group_name, + env_name, + location): """ Check if GPU workload profiles are available in the region. - + Args: cmd: Azure CLI command context resource_group_name: Resource group name env_name: Container Apps environment name location: Azure region - + Returns: List of available GPU profile types """ @@ -635,15 +725,15 @@ def check_gpu_profile_availability(cmd, resource_group_name, env_name, location) from azure.cli.core.util import send_raw_request from azure.cli.core.commands.client_factory import get_subscription_id import json - + logger = get_logger(__name__) - + try: # Use Azure REST API to list supported workload profiles management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager sub_id = get_subscription_id(cmd.cli_ctx) api_version = "2024-03-01" - + url_fmt = "{}/subscriptions/{}/providers/Microsoft.App/locations/{}/availableManagedEnvironmentsWorkloadProfileTypes?api-version={}" request_url = url_fmt.format( management_hostname.strip('/'), @@ -651,86 +741,102 @@ def check_gpu_profile_availability(cmd, resource_group_name, env_name, location) location, api_version ) - + r = send_raw_request(cmd.cli_ctx, "GET", request_url) response = r.json() - + supported_profiles = response.get('value', []) - + # Filter for GPU profiles gpu_profiles = [ - profile for profile in supported_profiles - if 'GPU' in profile.get('name', '').upper() or - 'GPU' in profile.get('properties', {}).get('category', '').upper() - ] - + profile for profile in supported_profiles if 'GPU' in profile.get( + 'name', '').upper() or 'GPU' in profile.get( + 'properties', {}).get( + 'category', '').upper()] + if gpu_profiles: - logger.info(f"Found {len(gpu_profiles)} GPU profile(s) in {location}") + logger.info( + f"Found {len(gpu_profiles)} GPU profile(s) in {location}") else: logger.warning(f"No GPU profiles available in {location}") - + return gpu_profiles - + except Exception as e: logger.warning(f"Failed to check GPU profile availability: {str(e)}") return [] -def wait_for_environment_provisioning(cmd, resource_group_name, env_name, timeout_seconds=600): + +def wait_for_environment_provisioning( + cmd, + resource_group_name, + env_name, + timeout_seconds=600): """ Wait for Container Apps environment to reach 'Succeeded' provisioning state. - + Args: cmd: Azure CLI command context resource_group_name: Resource group name env_name: Container Apps environment name timeout_seconds: Maximum time to wait (default 600 seconds = 10 minutes) - + Raises: Exception: If environment doesn't reach Succeeded state within timeout """ from knack.log import get_logger from ._clients import ManagedEnvironmentClient import time - + logger = get_logger(__name__) logger.info(f"Waiting for environment '{env_name}' to be provisioned...") - + start_time = time.time() while True: env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) - provisioning_state = env.get('properties', {}).get('provisioningState', 'Unknown') - + provisioning_state = env.get( + 'properties', {}).get( + 'provisioningState', 'Unknown') + logger.info(f"Environment provisioning state: {provisioning_state}") - + if provisioning_state == 'Succeeded': logger.info(f"Environment '{env_name}' is ready") return - + if provisioning_state in ['Failed', 'Canceled']: - raise Exception(f"Environment provisioning failed with state: {provisioning_state}") - + raise Exception( + f"Environment provisioning failed with state: {provisioning_state}") + elapsed = time.time() - start_time if elapsed > timeout_seconds: - raise Exception(f"Timeout waiting for environment provisioning. Current state: {provisioning_state}") - - logger.info(f"Waiting... (elapsed: {int(elapsed)}s, timeout: {timeout_seconds}s)") + raise Exception( + f"Timeout waiting for environment provisioning. Current state: {provisioning_state}") + + logger.info( + f"Waiting... (elapsed: {int(elapsed)}s, timeout: {timeout_seconds}s)") time.sleep(5) # Check every 5 seconds -def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, location, requested_gpu_profile_type=None): +def create_gpu_workload_profile_if_needed( + cmd, + resource_group_name, + env_name, + location, + requested_gpu_profile_type=None): """ Check for existing GPU profile and create one if needed. - + Args: cmd: Azure CLI command context resource_group_name: Resource group name env_name: Container Apps environment name location: Azure region requested_gpu_profile_type: Specific GPU profile type requested (e.g., Consumption-GPU-NC8as-T4) - + Returns: Name of the GPU workload profile to use - + Raises: ResourceNotFoundError: If no GPU profiles available in region """ @@ -740,132 +846,164 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo from ._clients import ManagedEnvironmentClient logger = get_logger(__name__) - logger.warning(f"[GPU DEBUG] create_gpu_workload_profile_if_needed called with requested_gpu_profile_type: {requested_gpu_profile_type}") # Check if environment already has a GPU profile that matches the request + # Check if environment already has a GPU profile that matches the request + logger.warning( + f"[GPU DEBUG] create_gpu_workload_profile_if_needed called with requested_gpu_profile_type: {requested_gpu_profile_type}") try: # Use class methods directly - no instantiation needed env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) - - workload_profiles = env.get('properties', {}).get('workloadProfiles', []) - + + workload_profiles = env.get( + 'properties', {}).get( + 'workloadProfiles', []) + # If specific GPU type requested, check if it exists if requested_gpu_profile_type: for profile in workload_profiles: - if profile.get('workloadProfileType') == requested_gpu_profile_type: - logger.info(f"Found requested GPU profile in environment: {requested_gpu_profile_type}") + if profile.get( + 'workloadProfileType') == requested_gpu_profile_type: + logger.info( + f"Found requested GPU profile in environment: {requested_gpu_profile_type}") return requested_gpu_profile_type else: # No specific request, return any GPU profile found for profile in workload_profiles: profile_type = profile.get('workloadProfileType', '') if 'GPU' in profile_type.upper(): - logger.info(f"Found existing GPU profile: {profile.get('name')}") + logger.info( + f"Found existing GPU profile: {profile.get('name')}") return profile.get('name') except Exception as e: logger.warning(f"Failed to check existing profiles: {str(e)}") - + # Get available GPU profiles in region - available_gpu = check_gpu_profile_availability(cmd, resource_group_name, env_name, location) - + available_gpu = check_gpu_profile_availability( + cmd, resource_group_name, env_name, location) + if not available_gpu: alternatives_msg = "GPU workload profiles are not available in this region." - # Try to suggest nearby regions (simplified - would need proper region mapping) + # Try to suggest nearby regions (simplified - would need proper region + # mapping) alternatives_msg += "\n\nTry deploying to regions with GPU support: westus3, eastus, northeurope" raise ResourceNotFoundError(alternatives_msg) - + # Use requested GPU profile type if provided, otherwise use first available if requested_gpu_profile_type: # Validate that requested profile is available - # The API returns profiles with 'name' field containing the workload profile type + # The API returns profiles with 'name' field containing the workload + # profile type available_types = [p.get('name') for p in available_gpu] - logger.warning(f"[GPU DEBUG] Requested GPU profile: {requested_gpu_profile_type}") - logger.warning(f"[GPU DEBUG] Available GPU profiles in {location}: {available_types}") + logger.warning( + f"[GPU DEBUG] Requested GPU profile: {requested_gpu_profile_type}") + logger.warning( + f"[GPU DEBUG] Available GPU profiles in {location}: {available_types}") # Pass through the requested type as-is - it's already the API value # (e.g., Consumption-GPU-NC8as-T4, Consumption-GPU-NC24-A100, Consumption, Flex) if requested_gpu_profile_type in available_types: gpu_profile_type = requested_gpu_profile_type - logger.warning(f"[GPU DEBUG] ✅ Requested profile IS available, using: {gpu_profile_type}") - logger.info(f"Using requested GPU profile type: {gpu_profile_type}") + logger.warning( + f"[GPU DEBUG] ✅ Requested profile IS available, using: {gpu_profile_type}") + logger.info( + f"Using requested GPU profile type: {gpu_profile_type}") else: # Don't fall back - user explicitly requested this type - logger.error(f"Requested GPU profile '{requested_gpu_profile_type}' not available in {location}") + logger.error( + f"Requested GPU profile '{requested_gpu_profile_type}' not available in {location}") logger.error(f"Available GPU profiles: {available_types}") - logger.warning(f"[GPU DEBUG] ❌ Requested profile NOT available - will raise error") + logger.warning( + "[GPU DEBUG] ❌ Requested profile NOT available - will raise error") raise ResourceNotFoundError( f"Requested workload profile type '{requested_gpu_profile_type}' is not available in region '{location}'. " - f"Available types: {', '.join(available_types)}" - ) + f"Available types: {', '.join(available_types)}") else: # Default to T4 if available, otherwise first available available_types = [p.get('name') for p in available_gpu] if 'Consumption-GPU-NC8as-T4' in available_types: gpu_profile_type = 'Consumption-GPU-NC8as-T4' - logger.warning(f"[GPU DEBUG] ⚠️ No specific GPU profile requested, defaulting to T4: {gpu_profile_type}") + logger.warning( + f"[GPU DEBUG] ⚠️ No specific GPU profile requested, defaulting to T4: {gpu_profile_type}") else: gpu_profile_type = available_gpu[0].get('name') - logger.warning(f"[GPU DEBUG] ⚠️ No specific GPU profile requested, T4 not available, defaulting to first: {gpu_profile_type}") - logger.info(f"No specific GPU profile requested, using: {gpu_profile_type}") - + logger.warning( + f"[GPU DEBUG] ⚠️ No specific GPU profile requested, T4 not available, defaulting to first: {gpu_profile_type}") + logger.info( + f"No specific GPU profile requested, using: {gpu_profile_type}") + # Check if it's a consumption-based GPU profile - logger.warning(f"[GPU DEBUG] Selected gpu_profile_type: {gpu_profile_type}") + logger.warning( + f"[GPU DEBUG] Selected gpu_profile_type: {gpu_profile_type}") if gpu_profile_type.startswith('Consumption-'): - # Consumption profiles need to be added to the environment's workloadProfiles array - logger.warning(f"[GPU DEBUG] Profile is consumption-based, will add to environment") - logger.info(f"Adding consumption-based GPU profile to environment: {gpu_profile_type}") - + # Consumption profiles need to be added to the environment's + # workloadProfiles array + logger.warning( + "[GPU DEBUG] Profile is consumption-based, will add to environment") + logger.info( + f"Adding consumption-based GPU profile to environment: {gpu_profile_type}") + from azure.cli.core.util import send_raw_request from azure.cli.core.commands.client_factory import get_subscription_id import json - + # Get current environment configuration env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) - + # Check if profile already exists - existing_profiles = env.get('properties', {}).get('workloadProfiles', []) + existing_profiles = env.get( + 'properties', {}).get( + 'workloadProfiles', []) profile_exists = False for profile in existing_profiles: if profile.get('workloadProfileType') == gpu_profile_type: profile_exists = True logger.info(f"GPU profile already exists: {gpu_profile_type}") break - + # If profile already exists, skip PATCH and return early if profile_exists: - logger.info(f"Skipping environment update - profile '{gpu_profile_type}' already configured") - # Wait for environment to be ready in case it's still provisioning from a previous operation - wait_for_environment_provisioning(cmd, resource_group_name, env_name) + logger.info( + f"Skipping environment update - profile '{gpu_profile_type}' already configured") + # Wait for environment to be ready in case it's still provisioning + # from a previous operation + wait_for_environment_provisioning( + cmd, resource_group_name, env_name) return gpu_profile_type - + # Profile doesn't exist - add it to the environment - logger.info(f"Adding new GPU profile '{gpu_profile_type}' to environment") - + logger.info( + f"Adding new GPU profile '{gpu_profile_type}' to environment") + new_profile = { "name": gpu_profile_type, "workloadProfileType": gpu_profile_type } - - # Clean existing profiles - remove unsupported properties like enableFips + + # Clean existing profiles - remove unsupported properties like + # enableFips cleaned_profiles = [] for profile in existing_profiles: cleaned = { "name": profile.get("name"), "workloadProfileType": profile.get("workloadProfileType") } - # Only add minimumCount/maximumCount if they exist and aren't for Consumption profile - if not profile.get("workloadProfileType", "").startswith("Consumption"): + # Only add minimumCount/maximumCount if they exist and aren't for + # Consumption profile + if not profile.get( + "workloadProfileType", + "").startswith("Consumption"): if "minimumCount" in profile: cleaned["minimumCount"] = profile["minimumCount"] if "maximumCount" in profile: cleaned["maximumCount"] = profile["maximumCount"] cleaned_profiles.append(cleaned) - + cleaned_profiles.append(new_profile) - + # Update the environment management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager sub_id = get_subscription_id(cmd.cli_ctx) api_version = "2024-03-01" - + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/managedEnvironments/{}?api-version={}" request_url = url_fmt.format( management_hostname.strip('/'), @@ -874,7 +1012,7 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo env_name, api_version ) - + # Update environment with new workload profile env_update = { "properties": { @@ -882,30 +1020,35 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo }, "location": env.get('location') } - - r = send_raw_request(cmd.cli_ctx, "PATCH", request_url, body=json.dumps(env_update)) + + r = send_raw_request( + cmd.cli_ctx, + "PATCH", + request_url, + body=json.dumps(env_update)) logger.info(f"Added GPU profile to environment: {gpu_profile_type}") - + # Wait for environment to be provisioned before continuing wait_for_environment_provisioning(cmd, resource_group_name, env_name) - + return gpu_profile_type - + profile_name = 'gpu-profile' - - logger.info(f"Creating GPU workload profile '{profile_name}' of type '{gpu_profile_type}'") - + + logger.info( + f"Creating GPU workload profile '{profile_name}' of type '{gpu_profile_type}'") + # Create the workload profile using existing add_workload_profile function # Create the workload profile using Azure REST API try: from azure.cli.core.util import send_raw_request from azure.cli.core.commands.client_factory import get_subscription_id import json - + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager sub_id = get_subscription_id(cmd.cli_ctx) api_version = "2024-03-01" - + # Create workload profile url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/managedEnvironments/{}/workloadProfiles/{}?api-version={}" request_url = url_fmt.format( @@ -916,7 +1059,7 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo profile_name, api_version ) - + profile_body = { "properties": { "workloadProfileType": gpu_profile_type, @@ -924,16 +1067,22 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo "maximumCount": 3 } } - + logger.info(f"Creating workload profile at: {request_url}") logger.info(f"Profile body: {json.dumps(profile_body)}") - - r = send_raw_request(cmd.cli_ctx, "PUT", request_url, body=json.dumps(profile_body)) - + + r = send_raw_request( + cmd.cli_ctx, + "PUT", + request_url, + body=json.dumps(profile_body)) + if r.status_code >= 400: - logger.error(f"Failed to create workload profile. Status: {r.status_code}, Response: {r.text}") - raise Exception(f"Failed to create workload profile: {r.status_code} - {r.text}") - + logger.error( + f"Failed to create workload profile. Status: {r.status_code}, Response: {r.text}") + raise Exception( + f"Failed to create workload profile: {r.status_code} - {r.text}") + logger.info(f"Successfully created GPU profile: {profile_name}") return profile_name except Exception as e: @@ -942,13 +1091,17 @@ def create_gpu_workload_profile_if_needed(cmd, resource_group_name, env_name, lo raise - - -def create_models_container_app(cmd, resource_group_name, env_name, env_id, models, - gpu_profile_name, location): +def create_models_container_app( + cmd, + resource_group_name, + env_name, + env_id, + models, + gpu_profile_name, + location): """ Create the models container app with model-runner and model-runner-config containers. - + Args: cmd: Azure CLI command context resource_group_name: Resource group name @@ -957,7 +1110,7 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode models: Dictionary of model configurations gpu_profile_name: Name of the GPU workload profile location: Azure region - + Returns: Created container app resource """ @@ -965,47 +1118,55 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode from ._clients import ManagedEnvironmentClient from ._constants import MODEL_RUNNER_CONFIG_IMAGE import json - + logger = get_logger(__name__) - + app_name = 'models' - logger.info(f"Creating models container app '{app_name}' with GPU profile '{gpu_profile_name}'") - + logger.info( + f"Creating models container app '{app_name}' with GPU profile '{gpu_profile_name}'") + # MODEL_RUNNER_URL uses localhost since model-runner and model-runner-config # are in the same container app (in-app communication) model_runner_url = "http://localhost:12434" logger.info(f"Model runner URL (localhost): {model_runner_url}") - - # Determine model-runner image based on x-azure-deployment.image or GPU profile + + # Determine model-runner image based on x-azure-deployment.image or GPU + # profile model_runner_image = None for model_name, model_config in models.items(): - if isinstance(model_config, dict) and 'x-azure-deployment' in model_config: + if isinstance( + model_config, + dict) and 'x-azure-deployment' in model_config: azure_deployment = model_config.get('x-azure-deployment', {}) if 'image' in azure_deployment: model_runner_image = azure_deployment['image'] - logger.info(f"Using custom model-runner image from x-azure-deployment: {model_runner_image}") + logger.info( + f"Using custom model-runner image from x-azure-deployment: {model_runner_image}") break - + # Default to GPU-aware image selection if not specified if not model_runner_image: from ._constants import MODEL_RUNNER_IMAGE, MODEL_RUNNER_IMAGE_CUDA is_gpu_profile = 'GPU' in gpu_profile_name.upper() model_runner_image = MODEL_RUNNER_IMAGE_CUDA if is_gpu_profile else MODEL_RUNNER_IMAGE - logger.info(f"Using default model-runner image: {model_runner_image} (GPU profile: {is_gpu_profile})") - + logger.info( + f"Using default model-runner image: {model_runner_image} (GPU profile: {is_gpu_profile})") + # Determine GPU-appropriate resources based on profile type # T4 GPU: 8 vCPUs, 56GB memory (NC8as_T4_v3) # A100 GPU: 24 vCPUs, 220GB memory (NC24ads_A100_v4) if 'A100' in gpu_profile_name.upper(): gpu_cpu = 24.0 gpu_memory = '220Gi' - logger.info(f"Detected A100 GPU profile - setting resources to {gpu_cpu} CPU / {gpu_memory}") + logger.info( + f"Detected A100 GPU profile - setting resources to {gpu_cpu} CPU / {gpu_memory}") else: # Default to T4 resources gpu_cpu = 8.0 gpu_memory = '56Gi' - logger.info(f"Detected T4 GPU profile - setting resources to {gpu_cpu} CPU / {gpu_memory}") - + logger.info( + f"Detected T4 GPU profile - setting resources to {gpu_cpu} CPU / {gpu_memory}") + # Build model configuration for model-runner-config model_config = { 'models': {} @@ -1016,12 +1177,15 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode continue if isinstance(model_spec, dict): # Exclude x-azure-deployment from model configuration - clean_model_spec = {k: v for k, v in model_spec.items() if k != 'x-azure-deployment'} + clean_model_spec = { + k: v for k, + v in model_spec.items() if k != 'x-azure-deployment'} model_config['models'][model_name] = clean_model_spec else: # Simple string format (just model name) - model_config['models'][model_name] = {'source': f'ollama://{model_spec}'} - + model_config['models'][model_name] = { + 'source': f'ollama://{model_spec}'} + # Container configuration matching successful deployment pattern containers = [ { @@ -1067,17 +1231,19 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode 'value': json.dumps(model_config) }, { - 'name': 'CONFIGURE_ON_STARTUP', # whether to configure models on startup + 'name': 'CONFIGURE_ON_STARTUP', # whether to configure models on startup 'value': 'true' }, { - 'name': 'STARTUP_DELAY', # how long do we want to wait after startup before configuring models - 'value': '30' # seconds + # how long do we want to wait after startup before + # configuring models + 'name': 'STARTUP_DELAY', + 'value': '30' # seconds } ] } ] - + # Check for ingress configuration in x-azure-deployment ingress_config = { 'external': False, @@ -1085,7 +1251,7 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode 'transport': 'http', 'allowInsecure': False } - + # Override with x-azure-deployment ingress settings if present # Check inside each model (models.gemma.x-azure-deployment) for model_name, model_spec in models.items(): @@ -1099,9 +1265,10 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode ingress_config['external'] = ingress_override['external'] if 'allowInsecure' in ingress_override: ingress_config['allowInsecure'] = ingress_override['allowInsecure'] - logger.info(f"Applied ingress overrides from model '{model_name}': {ingress_override}") + logger.info( + f"Applied ingress overrides from model '{model_name}': {ingress_override}") break # Use first model's ingress settings - + # Build container app definition container_app_def = { 'location': location, @@ -1120,16 +1287,16 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode } } } - + # Create the container app using REST API try: from azure.cli.core.util import send_raw_request from azure.cli.core.commands.client_factory import get_subscription_id - + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager sub_id = get_subscription_id(cmd.cli_ctx) api_version = "2024-03-01" - + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}?api-version={}" request_url = url_fmt.format( management_hostname.strip('/'), @@ -1138,8 +1305,12 @@ def create_models_container_app(cmd, resource_group_name, env_name, env_id, mode app_name, api_version ) - - r = send_raw_request(cmd.cli_ctx, "PUT", request_url, body=json.dumps(container_app_def)) + + r = send_raw_request( + cmd.cli_ctx, + "PUT", + request_url, + body=json.dumps(container_app_def)) models_app = r.json() logger.info(f"Successfully created models container app: {app_name}") return models_app @@ -1153,9 +1324,13 @@ def extract_model_definitions(compose_yaml, parsed_compose_file): return _ported_extract_model_definitions(compose_yaml, parsed_compose_file) -def get_model_endpoint_environment_vars(service_models, models_config, aca_environment_name): +def get_model_endpoint_environment_vars( + service_models, + models_config, + aca_environment_name): """Ported helper from azure-cli commit 092e028c556c5d98c06ea1a337c26b97fe00ce59.""" - return _ported_get_model_endpoint_environment_vars(service_models, models_config, aca_environment_name) + return _ported_get_model_endpoint_environment_vars( + service_models, models_config, aca_environment_name) def calculate_model_runner_resources(model_definitions): @@ -1163,36 +1338,41 @@ def calculate_model_runner_resources(model_definitions): return _ported_calculate_model_runner_resources(model_definitions) -def get_containerapp_fqdn(cmd, resource_group_name, env_name, app_name, is_external=False): +def get_containerapp_fqdn( + cmd, + resource_group_name, + env_name, + app_name, + is_external=False): """ Generate Container Apps FQDN for a given app. - + Args: cmd: Azure CLI command context resource_group_name: Resource group name env_name: Container Apps environment name app_name: Container app name is_external: Whether the app has external ingress (True) or internal (False) - + Returns: FQDN string (e.g., https://myapp.internal.example.azurecontainerapps.io) """ from knack.log import get_logger from ._clients import ManagedEnvironmentClient - + logger = get_logger(__name__) - + # Get environment to retrieve the default domain env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) env_default_domain = env.get('properties', {}).get('defaultDomain', '') - + if is_external: # External ingress: https://. fqdn = f"https://{app_name}.{env_default_domain}" else: # Internal ingress: https://.internal. fqdn = f"https://{app_name}.internal.{env_default_domain}" - + logger.info(f"Generated FQDN for '{app_name}': {fqdn}") return fqdn @@ -1200,77 +1380,85 @@ def get_containerapp_fqdn(cmd, resource_group_name, env_name, app_name, is_exter def inject_models_environment_variables(app_def, models_endpoint, models_list): """ Inject MODELS_ENDPOINT and MODELS_AVAILABLE environment variables into a container app. - + Args: app_def: Container app definition dictionary models_endpoint: URL of the models service endpoint models_list: List of available model names - + Returns: Modified app_def with injected environment variables """ from knack.log import get_logger - + logger = get_logger(__name__) - + # Get existing environment variables from first container - containers = app_def.get('properties', {}).get('template', {}).get('containers', []) + containers = app_def.get( + 'properties', + {}).get( + 'template', + {}).get( + 'containers', + []) if not containers: logger.warning("No containers found in app definition") return app_def - + env_vars = containers[0].get('env', []) - + # Add MODELS_ENDPOINT models_endpoint_var = { 'name': 'MODELS_ENDPOINT', 'value': models_endpoint } env_vars.append(models_endpoint_var) - + # Add MODELS_AVAILABLE (comma-separated list) models_available_var = { 'name': 'MODELS_AVAILABLE', 'value': ','.join(models_list) } env_vars.append(models_available_var) - + containers[0]['env'] = env_vars - logger.info(f"Injected models environment variables: {models_endpoint}, {len(models_list)} model(s)") - - return app_def + logger.info( + f"Injected models environment variables: {models_endpoint}, {len(models_list)} model(s)") + return app_def # ============================================================================ # Phase 4: MCP Gateway Implementation # ============================================================================ + def get_mcp_gateway_configuration(service): """ Extract MCP gateway configuration from compose service. - + Args: service: Compose service object - + Returns: Dictionary with gateway configuration settings """ from knack.log import get_logger - + logger = get_logger(__name__) - + from ._constants import MCP_GATEWAY_IMAGE - + config = { 'port': 8811, # Standard MCP gateway port 'ingress_type': 'internal', # MCP gateway should be internal-only 'image': service.image if hasattr(service, 'image') else MCP_GATEWAY_IMAGE } - + # Check for custom port in service ports if hasattr(service, 'ports') and service.ports: if isinstance(service.ports, list) and len(service.ports) > 0: - # Port object has 'target' (internal) and 'published' (external) attributes + # Port object has 'target' (internal) and 'published' (external) + # attributes port_obj = service.ports[0] if hasattr(port_obj, 'target'): # Use target (internal container port) for ingress @@ -1284,53 +1472,57 @@ def get_mcp_gateway_configuration(service): config['port'] = int(port_str.split(':')[-1]) else: config['port'] = int(port_str) - - logger.info(f"MCP Gateway configuration: port={config['port']}, ingress={config['ingress_type']}") + + logger.info( + f"MCP Gateway configuration: port={config['port']}, ingress={config['ingress_type']}") return config def enable_managed_identity(cmd, resource_group_name, app_name): """ Enable system-assigned managed identity for a container app. - + Args: cmd: Azure CLI command context resource_group_name: Resource group name app_name: Container app name - + Returns: Dictionary with identity information including principal_id """ from knack.log import get_logger from ._clients import ContainerAppClient - + logger = get_logger(__name__) - + logger.info(f"Enabling system-assigned managed identity for '{app_name}'") - + try: # Get current app using show classmethod app = ContainerAppClient.show(cmd, resource_group_name, app_name) - + # Set identity type to SystemAssigned if 'identity' not in app: app['identity'] = {} - + app['identity']['type'] = 'SystemAssigned' - + # Update the app using create_or_update classmethod - updated_app = ContainerAppClient.create_or_update(cmd, resource_group_name, app_name, app) - + updated_app = ContainerAppClient.create_or_update( + cmd, resource_group_name, app_name, app) + identity = updated_app.get('identity', {}) principal_id = identity.get('principalId') - + if principal_id: - logger.info(f"Successfully enabled managed identity. Principal ID: {principal_id}") + logger.info( + f"Successfully enabled managed identity. Principal ID: {principal_id}") else: - logger.warning("Managed identity enabled but principal ID not yet available") - + logger.warning( + "Managed identity enabled but principal ID not yet available") + return identity - + except Exception as e: logger.error(f"Failed to enable managed identity: {str(e)}") raise @@ -1340,19 +1532,19 @@ def attempt_role_assignment(cmd, principal_id, resource_group_name, app_name): """ Attempt to assign Container Apps Contributor role to managed identity at resource group scope. This allows the MCP gateway to modify container apps to add MCP server containers. - + Args: cmd: Azure CLI command context principal_id: Principal ID of the managed identity resource_group_name: Resource group name app_name: Container app name - + Returns: Boolean indicating if assignment succeeded """ from knack.log import get_logger from azure.cli.core.commands.client_factory import get_subscription_id - + logger = get_logger(__name__) # Build scope - resource group level @@ -1362,14 +1554,14 @@ def attempt_role_assignment(cmd, principal_id, resource_group_name, app_name): # This role allows the MCP gateway to modify container apps role_definition_id = f"/subscriptions/{subscription_id}/providers/Microsoft.Authorization/roleDefinitions/358470bc-b998-42bd-ab17-a7e34c199c0f" scope = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" - + logger.info(f"Attempting to assign role to principal {principal_id}") logger.info(f"Scope: Resource Group '{resource_group_name}'") - + try: # Import role assignment function from azure.cli.command_modules.role.custom import create_role_assignment - + # Attempt role assignment using the full role definition ID create_role_assignment( cmd, @@ -1378,120 +1570,147 @@ def attempt_role_assignment(cmd, principal_id, resource_group_name, app_name): scope=scope, assignee_principal_type='ServicePrincipal' ) - - logger.info(f"✅ Successfully assigned role to '{app_name}' managed identity") + + logger.info( + f"✅ Successfully assigned role to '{app_name}' managed identity") return True - + except Exception as e: # Log warning with manual assignment instructions logger.warning("━" * 70) logger.warning(f"⚠️ Could not automatically assign role: {str(e)}") logger.warning("") logger.warning("To manually assign the role, run:") - logger.warning(f" az role assignment create \\") + logger.warning(" az role assignment create \\") logger.warning(f" --role '{role_definition_id}' \\") logger.warning(f" --assignee-object-id {principal_id} \\") - logger.warning(f" --assignee-principal-type ServicePrincipal \\") + logger.warning(" --assignee-principal-type ServicePrincipal \\") logger.warning(f" --scope {scope}") logger.warning("━" * 70) - + return False -def inject_mcp_gateway_environment_variables(app_def, subscription_id, resource_group_name, app_name): +def inject_mcp_gateway_environment_variables( + app_def, subscription_id, resource_group_name, app_name): """ Inject MCP gateway-specific environment variables into the container app. - + Args: app_def: Container app definition dictionary subscription_id: Azure subscription ID resource_group_name: Resource group name app_name: Container app name - + Returns: Modified app_def with injected environment variables """ from knack.log import get_logger - + logger = get_logger(__name__) - + # Get existing environment variables from first container - containers = app_def.get('properties', {}).get('template', {}).get('containers', []) + containers = app_def.get( + 'properties', + {}).get( + 'template', + {}).get( + 'containers', + []) if not containers: logger.warning("No containers found in MCP gateway app definition") return app_def - + env_vars = containers[0].get('env', []) - + # Add MCP runtime identifier env_vars.append({ 'name': 'MCP_RUNTIME', 'value': 'ACA' }) - + # Add Azure context variables env_vars.append({ 'name': 'AZURE_SUBSCRIPTION_ID', 'value': subscription_id }) - + env_vars.append({ 'name': 'AZURE_RESOURCE_GROUP', 'value': resource_group_name }) - + env_vars.append({ 'name': 'AZURE_APP_NAME', 'value': app_name }) - + containers[0]['env'] = env_vars logger.info(f"Injected MCP gateway environment variables for '{app_name}'") - + return app_def def inject_gateway_url_to_dependents(app_def, gateway_url): """ Inject MCP_GATEWAY_URL environment variable to services that depend on MCP gateway. - + Args: app_def: Container app definition dictionary gateway_url: URL of the MCP gateway service - + Returns: Modified app_def with injected MCP_GATEWAY_URL """ from knack.log import get_logger - + logger = get_logger(__name__) - + # Get existing environment variables from first container - containers = app_def.get('properties', {}).get('template', {}).get('containers', []) + containers = app_def.get( + 'properties', + {}).get( + 'template', + {}).get( + 'containers', + []) if not containers: logger.warning("No containers found in app definition") return app_def - + env_vars = containers[0].get('env', []) - + # Add MCP_GATEWAY_URL env_vars.append({ 'name': 'MCP_GATEWAY_URL', 'value': gateway_url }) - + containers[0]['env'] = env_vars logger.info(f"Injected MCP_GATEWAY_URL: {gateway_url}") - - return app_def + return app_def # ============================================================================ # Phase 5: Dry-Run Mode Functions # ============================================================================ -def collect_dry_run_service_config(service_name, service, image=None, ingress_type=None, target_port=None, - cpu=None, memory=None, environment=None, replicas=None, is_models_service=False, is_mcp_gateway=False, gpu_type=None, raw_service=None, models_config=None): + +def collect_dry_run_service_config( + service_name, + service, + image=None, + ingress_type=None, + target_port=None, + cpu=None, + memory=None, + environment=None, + replicas=None, + is_models_service=False, + is_mcp_gateway=False, + gpu_type=None, + raw_service=None, + models_config=None): """Collect service configuration for dry-run preview""" # Extract x-azure-deployment overrides # Use raw_service (YAML dict) if provided, otherwise use service object @@ -1506,13 +1725,18 @@ def collect_dry_run_service_config(service_name, service, image=None, ingress_ty service_depends_on = getattr(service, 'depends_on', None) service_command = getattr(service, 'command', None) - # Get image with override priority (x-azure-deployment takes highest precedence) - final_image = overrides.get('image') or image or service_image or 'Not specified' - - # Determine ingress enabled and type (x-azure-deployment takes highest precedence) - ingress_enabled = service_ports is not None or overrides.get('ingress_type') is not None - - # Priority: x-azure-deployment override > parameter > default based on ports + # Get image with override priority (x-azure-deployment takes highest + # precedence) + final_image = overrides.get( + 'image') or image or service_image or 'Not specified' + + # Determine ingress enabled and type (x-azure-deployment takes highest + # precedence) + ingress_enabled = service_ports is not None or overrides.get( + 'ingress_type') is not None + + # Priority: x-azure-deployment override > parameter > default based on + # ports if overrides.get('ingress_type') is not None: final_ingress_type = overrides['ingress_type'] elif ingress_type is not None: @@ -1521,22 +1745,23 @@ def collect_dry_run_service_config(service_name, service, image=None, ingress_ty final_ingress_type = 'internal' # Default to internal if ports exist else: final_ingress_type = None - + # Get target port from override or parameter final_target_port = overrides.get('target_port') or target_port - + # Get CPU and memory from overrides or parameters final_cpu = overrides.get('cpu') or cpu final_memory = overrides.get('memory') or memory - + # Get replica counts from overrides min_replicas = overrides.get('min_replicas') max_replicas = overrides.get('max_replicas') - # Collect environment variables (handle both dict and string formats, avoid duplicates) + # Collect environment variables (handle both dict and string formats, + # avoid duplicates) env_vars = [] env_var_names = set() # Track names to avoid duplicates - + # Helper to add env var and track names def add_env_var(var): if isinstance(var, dict) and 'name' in var and 'value' in var: @@ -1547,11 +1772,11 @@ def add_env_var(var): var_name = var.split('=')[0] if '=' in var else var else: return # Skip invalid format - + if var_name not in env_var_names: env_vars.append(var_str) env_var_names.add(var_name) - + # First add from service definition (compose file) if service_env: if isinstance(service_env, dict): @@ -1560,7 +1785,7 @@ def add_env_var(var): elif isinstance(service_env, list): for var in service_env: add_env_var(var) - + # Then add from environment parameter (injected vars, these can override) if environment: for var in environment: @@ -1569,7 +1794,8 @@ def add_env_var(var): var_name = var['name'] if var_name in env_var_names: # Remove old value - env_vars = [v for v in env_vars if not v.startswith(f"{var_name}=")] + env_vars = [ + v for v in env_vars if not v.startswith(f"{var_name}=")] env_var_names.discard(var_name) add_env_var(var) @@ -1601,14 +1827,15 @@ def add_env_var(var): if isinstance(model_info, dict) and 'model' in model_info: model_path = model_info['model'] models_ref.append(f"{model_name} ({model_path})") - + # Add environment variables for this model model_env = service_models[model_name] if isinstance(model_env, dict): endpoint_var = model_env.get('endpoint_var') model_var = model_env.get('model_var') if endpoint_var: - env_vars.append(f"{endpoint_var}=https:///") + env_vars.append( + f"{endpoint_var}=https:///") if model_var: env_vars.append(f"{model_var}={model_path}") elif isinstance(model_info, str): @@ -1634,6 +1861,7 @@ def add_env_var(var): return config + def parse_x_azure_deployment(service): """ Parse x-azure-deployment custom extension from compose service. @@ -1663,7 +1891,7 @@ def parse_x_azure_deployment(service): x_azure = service.x_azure_deployment else: return overrides - + if not x_azure: return overrides @@ -1671,13 +1899,15 @@ def parse_x_azure_deployment(service): if 'image' in x_azure: overrides['image'] = str(x_azure['image']) - # Parse resources (cpu and memory can be at top level OR nested under 'resources') + # Parse resources (cpu and memory can be at top level OR nested under + # 'resources') resources = x_azure.get('resources', {}) if 'cpu' in resources: - overrides['cpu'] = resources['cpu'] # Don't convert to string, keep as float/int + # Don't convert to string, keep as float/int + overrides['cpu'] = resources['cpu'] elif 'cpu' in x_azure: overrides['cpu'] = x_azure['cpu'] - + if 'memory' in resources: overrides['memory'] = str(resources['memory']) elif 'memory' in x_azure: @@ -1708,7 +1938,8 @@ def parse_x_azure_deployment(service): overrides['target_port'] = int(ingress_config['port']) # Handle allowInsecure if 'allowInsecure' in ingress_config: - overrides['allow_insecure'] = bool(ingress_config['allowInsecure']) + overrides['allow_insecure'] = bool( + ingress_config['allowInsecure']) elif isinstance(ingress_config, str): overrides['ingress_type'] = ingress_config @@ -1717,17 +1948,24 @@ def parse_x_azure_deployment(service): overrides['managed_identity'] = x_azure['managed_identity'] return overrides -def apply_resource_overrides(cpu, memory, min_replicas, max_replicas, overrides): + + +def apply_resource_overrides( + cpu, + memory, + min_replicas, + max_replicas, + overrides): """ Apply x-azure-deployment overrides to resource configuration. - + Args: cpu: Default CPU value memory: Default memory value min_replicas: Default min replicas max_replicas: Default max replicas overrides: Dictionary from parse_x_azure_deployment - + Returns: Tuple of (cpu, memory, min_replicas, max_replicas) with overrides applied """ @@ -1735,94 +1973,101 @@ def apply_resource_overrides(cpu, memory, min_replicas, max_replicas, overrides) final_memory = overrides.get('memory', memory) final_min_replicas = overrides.get('min_replicas', min_replicas) final_max_replicas = overrides.get('max_replicas', max_replicas) - + return final_cpu, final_memory, final_min_replicas, final_max_replicas def apply_ingress_overrides(ingress_type, target_port, overrides): """ Apply x-azure-deployment ingress overrides. - + Args: ingress_type: Default ingress type target_port: Default target port overrides: Dictionary from parse_x_azure_deployment - + Returns: Tuple of (ingress_type, target_port) with overrides applied """ final_ingress_type = overrides.get('ingress_type', ingress_type) final_target_port = overrides.get('target_port', target_port) - + return final_ingress_type, final_target_port def validate_x_azure_overrides(overrides, service_name, logger): """ Validate x-azure-deployment overrides and log warnings for invalid values. - + Args: overrides: Dictionary from parse_x_azure_deployment service_name: Name of the service logger: Logger instance - + Returns: True if all overrides are valid, False otherwise """ valid = True - + # Validate CPU if 'cpu' in overrides: cpu = overrides['cpu'] try: cpu_float = float(cpu) if cpu_float <= 0 or cpu_float > 4: - logger.warning(f"Service '{service_name}': Invalid CPU '{cpu}' (must be 0.25-4.0)") + logger.warning( + f"Service '{service_name}': Invalid CPU '{cpu}' (must be 0.25-4.0)") valid = False except ValueError: - logger.warning(f"Service '{service_name}': Invalid CPU format '{cpu}'") + logger.warning( + f"Service '{service_name}': Invalid CPU format '{cpu}'") valid = False - + # Validate memory if 'memory' in overrides: memory = overrides['memory'] if not (memory.endswith('Gi') or memory.endswith('G')): - logger.warning(f"Service '{service_name}': Memory '{memory}' should end with 'Gi' or 'G'") + logger.warning( + f"Service '{service_name}': Memory '{memory}' should end with 'Gi' or 'G'") valid = False - + # Validate replicas if 'min_replicas' in overrides: min_rep = overrides['min_replicas'] if min_rep < 0 or min_rep > 30: - logger.warning(f"Service '{service_name}': min_replicas '{min_rep}' out of range (0-30)") + logger.warning( + f"Service '{service_name}': min_replicas '{min_rep}' out of range (0-30)") valid = False - + if 'max_replicas' in overrides: max_rep = overrides['max_replicas'] if max_rep < 1 or max_rep > 30: - logger.warning(f"Service '{service_name}': max_replicas '{max_rep}' out of range (1-30)") + logger.warning( + f"Service '{service_name}': max_replicas '{max_rep}' out of range (1-30)") valid = False - + # Validate min <= max if 'min_replicas' in overrides and 'max_replicas' in overrides: if overrides['min_replicas'] > overrides['max_replicas']: - logger.warning(f"Service '{service_name}': min_replicas cannot exceed max_replicas") + logger.warning( + f"Service '{service_name}': min_replicas cannot exceed max_replicas") valid = False - + # Validate ingress type if 'ingress_type' in overrides: ingress = overrides['ingress_type'].lower() if ingress not in ['external', 'internal']: - logger.warning(f"Service '{service_name}': Invalid ingress type '{ingress}' (use 'external' or 'internal')") + logger.warning( + f"Service '{service_name}': Invalid ingress type '{ingress}' (use 'external' or 'internal')") valid = False - + return valid def log_applied_overrides(service_name, overrides, logger): """ Log which x-azure-deployment overrides were applied. - + Args: service_name: Name of the service overrides: Dictionary from parse_x_azure_deployment @@ -1830,52 +2075,54 @@ def log_applied_overrides(service_name, overrides, logger): """ if not overrides: return - - logger.info(f"Service '{service_name}': Applying x-azure-deployment overrides:") - + + logger.info( + f"Service '{service_name}': Applying x-azure-deployment overrides:") + if 'cpu' in overrides: logger.info(f" - CPU: {overrides['cpu']}") - + if 'memory' in overrides: logger.info(f" - Memory: {overrides['memory']}") - + if 'min_replicas' in overrides: logger.info(f" - Min Replicas: {overrides['min_replicas']}") - + if 'max_replicas' in overrides: logger.info(f" - Max Replicas: {overrides['max_replicas']}") - + if 'ingress_type' in overrides: logger.info(f" - Ingress Type: {overrides['ingress_type']}") - + if 'target_port' in overrides: logger.info(f" - Target Port: {overrides['target_port']}") - + if 'managed_identity' in overrides: logger.info(f" - Managed Identity: {overrides['managed_identity']}") - - # ============================================================================ # Phase 7: Declarative Update Strategy Functions # ============================================================================ + def check_containerapp_exists(cmd, resource_group_name, app_name): """ Check if a container app already exists. - + Args: cmd: Command context resource_group_name: Resource group name app_name: Container app name - + Returns: Existing containerapp object if it exists, None otherwise """ + from ._clients import ContainerAppClient from azure.core.exceptions import ResourceNotFoundError - + try: - existing_app = ContainerAppClient.show(cmd, resource_group_name=resource_group_name, name=app_name) + existing_app = ContainerAppClient.show( + cmd, resource_group_name=resource_group_name, name=app_name) return existing_app except ResourceNotFoundError: return None @@ -1886,11 +2133,11 @@ def check_containerapp_exists(cmd, resource_group_name, app_name): def detect_configuration_changes(existing_app, new_config): """ Detect changes between existing and new container app configuration. - + Args: existing_app: Existing container app object new_config: Dictionary with new configuration - + Returns: Dictionary describing detected changes """ @@ -1902,59 +2149,78 @@ def detect_configuration_changes(existing_app, new_config): 'resources_changed': False, 'ingress_changed': False } - + if not existing_app: changes['has_changes'] = True return changes - + # Check image change if hasattr(existing_app, 'properties'): props = existing_app.properties - + # Image change - if hasattr(props, 'template') and hasattr(props.template, 'containers'): - if props.template.containers and len(props.template.containers) > 0: + if hasattr( + props, + 'template') and hasattr( + props.template, + 'containers'): + if props.template.containers and len( + props.template.containers) > 0: existing_image = props.template.containers[0].image new_image = new_config.get('image', '') if existing_image != new_image: changes['image_changed'] = True changes['has_changes'] = True - + # Replica change if hasattr(props, 'template') and hasattr(props.template, 'scale'): existing_min = getattr(props.template.scale, 'minReplicas', 0) existing_max = getattr(props.template.scale, 'maxReplicas', 1) new_min = new_config.get('min_replicas', 1) new_max = new_config.get('max_replicas', 1) - + if existing_min != new_min or existing_max != new_max: changes['replicas_changed'] = True changes['has_changes'] = True - + # Resource change (CPU/Memory) - if hasattr(props, 'template') and hasattr(props.template, 'containers'): - if props.template.containers and len(props.template.containers) > 0: + if hasattr( + props, + 'template') and hasattr( + props.template, + 'containers'): + if props.template.containers and len( + props.template.containers) > 0: container = props.template.containers[0] if hasattr(container, 'resources'): existing_cpu = getattr(container.resources, 'cpu', '0.5') - existing_memory = getattr(container.resources, 'memory', '1.0Gi') + existing_memory = getattr( + container.resources, 'memory', '1.0Gi') new_cpu = new_config.get('cpu', '0.5') new_memory = new_config.get('memory', '1.0Gi') - - if str(existing_cpu) != str(new_cpu) or str(existing_memory) != str(new_memory): + + if str(existing_cpu) != str(new_cpu) or str( + existing_memory) != str(new_memory): changes['resources_changed'] = True changes['has_changes'] = True - + return changes -def update_containerapp_from_compose(cmd, resource_group_name, app_name, - image=None, env_vars=None, cpu=None, - memory=None, min_replicas=None, max_replicas=None, - logger=None): +def update_containerapp_from_compose( + cmd, + resource_group_name, + app_name, + image=None, + env_vars=None, + cpu=None, + memory=None, + min_replicas=None, + max_replicas=None, + logger=None): """ Update an existing container app with new configuration. - + Args: cmd: Command context resource_group_name: Resource group name @@ -1966,40 +2232,40 @@ def update_containerapp_from_compose(cmd, resource_group_name, app_name, min_replicas: New min replicas (optional) max_replicas: New max replicas (optional) logger: Logger instance - + Returns: Updated containerapp object """ from azure.cli.command_modules.containerapp.custom import update_containerapp - + if logger: logger.info(f"Updating existing container app: {app_name}") - + # Build update arguments update_args = { 'cmd': cmd, 'name': app_name, 'resource_group_name': resource_group_name } - + if image: update_args['image'] = image - + if env_vars: update_args['set_env_vars'] = env_vars - + if cpu: update_args['cpu'] = cpu - + if memory: update_args['memory'] = memory - + if min_replicas is not None: update_args['min_replicas'] = min_replicas - + if max_replicas is not None: update_args['max_replicas'] = max_replicas - + # Perform update try: updated_app = update_containerapp(**update_args) @@ -2008,81 +2274,88 @@ def update_containerapp_from_compose(cmd, resource_group_name, app_name, return updated_app except Exception as e: if logger: - logger.error(f"Failed to update container app '{app_name}': {str(e)}") + logger.error( + f"Failed to update container app '{app_name}': {str(e)}") raise def log_update_detection(service_name, changes, logger): """ Log detected changes for a container app update. - + Args: service_name: Name of the service changes: Dictionary from detect_configuration_changes logger: Logger instance """ if not changes['has_changes']: - logger.info(f"Service '{service_name}': No changes detected - skipping update") + logger.info( + f"Service '{service_name}': No changes detected - skipping update") return - + logger.info(f"Service '{service_name}': Detected changes:") - + if changes['image_changed']: - logger.info(f" - Container image changed") - + logger.info(" - Container image changed") + if changes['env_vars_changed']: - logger.info(f" - Environment variables changed") - + logger.info(" - Environment variables changed") + if changes['replicas_changed']: - logger.info(f" - Replica configuration changed") - + logger.info(" - Replica configuration changed") + if changes['resources_changed']: - logger.info(f" - Resource allocation (CPU/Memory) changed") - - if changes['ingress_changed']: - logger.info(f" - Ingress configuration changed") + logger.info(" - Resource allocation (CPU/Memory) changed") + if changes['ingress_changed']: + logger.info(" - Ingress configuration changed") # Dry-run print functions (stubs for now - can be enhanced later) -def print_dry_run_header(compose_file_path, resource_group_name, managed_env_name): + + +def print_dry_run_header( + compose_file_path, + resource_group_name, + managed_env_name): """Print dry-run header""" - print("\n" + "="*80) + print("\n" + "=" * 80) print("DRY-RUN MODE: Deployment Preview") - print("="*80 + "\n") + print("=" * 80 + "\n") print(f"Compose File: {compose_file_path}") print(f"Resource Group: {resource_group_name}") print(f"Managed Environment: {managed_env_name}") print("\nNote: No resources will be created. This is a preview only.\n") + def print_dry_run_service_plan(service_name, service_config): """Print dry-run service plan""" print(f"\nService: {service_name}") print("-" * 60) - + # Always show image if 'image' in service_config and service_config['image']: print(f" Image: {service_config['image']}") - + # Show ingress if enabled or if type is specified ingress_type = service_config.get('ingress_type') if service_config.get('ingress_enabled') or ingress_type: if ingress_type: print(f" Ingress: {ingress_type}") - + # Show target port if specified target_port = service_config.get('target_port') if target_port: print(f" Target Port: {target_port}") # Show allow insecure if target port is set - print(f" Allow Insecure: true") - + print(" Allow Insecure: true") + # Show environment variables env_vars = service_config.get('environment', []) if env_vars: - print(f" Environment Variables:") + print(" Environment Variables:") for env_var in env_vars: print(f" - {env_var}") - + # Show depends_on depends_on = service_config.get('depends_on', []) if depends_on: @@ -2091,26 +2364,26 @@ def print_dry_run_service_plan(service_name, service_config): print(f" Depends On: {depends_on[0]}") else: print(f" Depends On: {', '.join(depends_on)}") - + # Show models reference models = service_config.get('models', []) if models: - print(f" Models:") + print(" Models:") for model in models: print(f" - {model}") - + # Show command command = service_config.get('command', []) if command: - print(f" Command:") + print(" Command:") for cmd in command: print(f" - {cmd}") - + # Show CPU cpu = service_config.get('cpu') if cpu: print(f" CPU: {cpu}") - + # Show memory with proper formatting memory = service_config.get('memory') if memory: @@ -2121,7 +2394,7 @@ def print_dry_run_service_plan(service_name, service_config): print(f" Memory: {memory}Gi") else: print(f" Memory: {memory}") - + # Show replica counts min_replicas = service_config.get('min_replicas') max_replicas = service_config.get('max_replicas') @@ -2130,14 +2403,15 @@ def print_dry_run_service_plan(service_name, service_config): if max_replicas is not None: print(f" Max Replicas: {max_replicas}") + def print_dry_run_models_deployment(models_config, gpu_profile_info): """Print dry-run models deployment""" print("\nModels Deployment Preview") - print("="*80) + print("=" * 80) # Container App name - print(f" Container App: models") - + print(" Container App: models") + # Determine model-runner image from x-azure-deployment.image or GPU profile model_runner_image = None for model_name, model_spec in models_config.items(): @@ -2146,18 +2420,19 @@ def print_dry_run_models_deployment(models_config, gpu_profile_info): if 'image' in azure_deployment: model_runner_image = azure_deployment['image'] break - + if not model_runner_image: - is_gpu = gpu_profile_info and 'GPU' in str(gpu_profile_info.get('type', '')).upper() + is_gpu = gpu_profile_info and 'GPU' in str( + gpu_profile_info.get('type', '')).upper() model_runner_image = 'docker/model-runner:latest-cuda' if is_gpu else 'docker/model-runner:latest' - + # Image print(f" Image: {model_runner_image}") - + # Show model names and paths (filter out x-azure-* keys) model_names = [k for k in models_config.keys() if not k.startswith('x-')] if model_names: - print(f" Models:") + print(" Models:") for model_name in model_names: model_info = models_config[model_name] if isinstance(model_info, dict) and 'model' in model_info: @@ -2168,23 +2443,28 @@ def print_dry_run_models_deployment(models_config, gpu_profile_info): # Show GPU workload profile if gpu_profile_info and 'type' in gpu_profile_info: print(f" Workload Profile: {gpu_profile_info['type']}") - + # Ingress is always internal for models - print(f" Ingress: internal") + print(" Ingress: internal") + def print_dry_run_mcp_gateway(gateway_config): """Print dry-run MCP gateway""" print("\nMCP Gateway Preview") - print("="*80) + print("=" * 80) if 'service_name' in gateway_config: print(f" Gateway: {gateway_config['service_name']}") -def print_dry_run_summary(total_services=0, has_models=False, has_gateway=False): + +def print_dry_run_summary( + total_services=0, + has_models=False, + has_gateway=False): """Print dry-run summary""" - print("\n" + "="*80) + print("\n" + "=" * 80) print("Deployment Summary") - print("="*80) + print("=" * 80) print(f"Total Services: {total_services}") print(f"Has Models: {has_models}") print(f"Has MCP Gateway: {has_gateway}") - print("="*80) + print("=" * 80) diff --git a/src/containerapp/azext_containerapp/_validators.py b/src/containerapp/azext_containerapp/_validators.py index 66944ce79c0..60bb7a1de86 100644 --- a/src/containerapp/azext_containerapp/_validators.py +++ b/src/containerapp/azext_containerapp/_validators.py @@ -1,278 +1,278 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- -# pylint: disable=line-too-long, unused-argument - -from knack.log import get_logger -from urllib.parse import urlparse - -from azure.cli.core.azclierror import (InvalidArgumentValueError, - MutuallyExclusiveArgumentError, RequiredArgumentMissingError, - ResourceNotFoundError, ValidationError) -from azure.cli.command_modules.containerapp._utils import is_registry_msi_system, safe_get -from azure.cli.command_modules.containerapp._validators import _validate_revision_exists, _validate_replica_exists, \ - _validate_container_exists -from azure.mgmt.core.tools import is_valid_resource_id - -from ._clients import ContainerAppPreviewClient -from ._utils import is_registry_msi_system_environment - -from ._constants import ACR_IMAGE_SUFFIX, \ - CONNECTED_ENVIRONMENT_TYPE, \ - EXTENDED_LOCATION_RP, CUSTOM_LOCATION_RESOURCE_TYPE, MAXIMUM_SECRET_LENGTH, CONTAINER_APPS_RP, \ - CONNECTED_ENVIRONMENT_RESOURCE_TYPE, MANAGED_ENVIRONMENT_RESOURCE_TYPE, MANAGED_ENVIRONMENT_TYPE, \ - RUNTIME_GENERIC - -logger = get_logger(__name__) - - -# called directly from custom method bc otherwise it disrupts the --environment auto RID functionality -def validate_create(registry_identity, registry_pass, registry_user, registry_server, no_wait, revisions_mode=None, target_label=None, source=None, artifact=None, repo=None, yaml=None, environment_type=None): - if source and repo: - raise MutuallyExclusiveArgumentError("Usage error: --source and --repo cannot be used together. Can either deploy from a local directory or a GitHub repository") - if (source or repo) and yaml: - raise MutuallyExclusiveArgumentError("Usage error: --source or --repo cannot be used with --yaml together. Can either deploy from a local directory or provide a yaml file") - if (source or repo) and environment_type == CONNECTED_ENVIRONMENT_TYPE: - raise MutuallyExclusiveArgumentError("Usage error: --source or --repo cannot be used with --environment-type connectedEnvironment together. Please use --environment-type managedEnvironment") - if repo: - if not registry_server: - raise RequiredArgumentMissingError('Usage error: --registry-server is required while using --repo') - if ACR_IMAGE_SUFFIX not in registry_server: - raise InvalidArgumentValueError("Usage error: --registry-server: expected an ACR registry (*.azurecr.io) for --repo") - if repo and registry_server and "azurecr.io" in registry_server: - parsed = urlparse(registry_server) - registry_name = (parsed.netloc if parsed.scheme else parsed.path).split(".")[0] - if registry_name and len(registry_name) > MAXIMUM_SECRET_LENGTH: - raise ValidationError(f"--registry-server ACR name must be less than {MAXIMUM_SECRET_LENGTH} " - "characters when using --repo") - if registry_identity and (registry_pass or registry_user): - raise MutuallyExclusiveArgumentError("Cannot provide both registry identity and username/password") - if is_registry_msi_system(registry_identity) and no_wait: - raise MutuallyExclusiveArgumentError("--no-wait is not supported with system registry identity") - if registry_identity and not is_valid_resource_id(registry_identity) and not is_registry_msi_system(registry_identity) and not is_registry_msi_system_environment(registry_identity): - raise InvalidArgumentValueError("--registry-identity must be an identity resource ID or 'system' or 'system-environment'") - if registry_identity and ACR_IMAGE_SUFFIX not in (registry_server or ""): - raise InvalidArgumentValueError("--registry-identity: expected an ACR registry (*.azurecr.io) for --registry-server") - if target_label and (not revisions_mode or revisions_mode.lower() != 'labels'): - raise InvalidArgumentValueError("--target-label must only be specified with --revisions-mode labels.") - - -def validate_runtime(runtime, enable_java_metrics, enable_java_agent): - def is_java_enhancement_enabled(): - return enable_java_agent is not None or enable_java_metrics is not None - - if runtime is None: - return - if runtime.lower() == RUNTIME_GENERIC and is_java_enhancement_enabled(): - raise ValidationError("Usage error: --runtime java is required when using --enable-java-metrics or --enable-java-agent") - - -def validate_env_name_or_id(cmd, namespace): - from azure.cli.core.commands.client_factory import get_subscription_id - from azure.mgmt.core.tools import resource_id, parse_resource_id - - if not namespace.managed_env: - return - - # Set environment type - environment_type = None - - if namespace.__dict__.get("environment_type"): - environment_type = namespace.environment_type - - if is_valid_resource_id(namespace.managed_env): - env_dict = parse_resource_id(namespace.managed_env) - resource_type = env_dict.get("resource_type") - if resource_type: - if CONNECTED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): - environment_type = CONNECTED_ENVIRONMENT_TYPE - if MANAGED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): - environment_type = MANAGED_ENVIRONMENT_TYPE - - # Validate resource id / format resource id - if environment_type == CONNECTED_ENVIRONMENT_TYPE: - if not is_valid_resource_id(namespace.managed_env): - namespace.managed_env = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), - resource_group=namespace.resource_group_name, - namespace=CONTAINER_APPS_RP, - type=CONNECTED_ENVIRONMENT_RESOURCE_TYPE, - name=namespace.managed_env - ) - else: - if not is_valid_resource_id(namespace.managed_env): - namespace.managed_env = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), - resource_group=namespace.resource_group_name, - namespace=CONTAINER_APPS_RP, - type=MANAGED_ENVIRONMENT_RESOURCE_TYPE, - name=namespace.managed_env - ) - - -def validate_env_name_or_id_for_up(cmd, namespace): - from azure.cli.core.commands.client_factory import get_subscription_id - from azure.mgmt.core.tools import resource_id, parse_resource_id - - if not namespace.environment: - return - - # Set environment type - environment_type = None - - if is_valid_resource_id(namespace.environment): - env_dict = parse_resource_id(namespace.environment) - resource_type = env_dict.get("resource_type") - if resource_type: - if CONNECTED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): - environment_type = CONNECTED_ENVIRONMENT_TYPE - if MANAGED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): - environment_type = MANAGED_ENVIRONMENT_TYPE - - if namespace.__dict__.get("custom_location") or namespace.__dict__.get("connected_cluster_id"): - environment_type = CONNECTED_ENVIRONMENT_TYPE - - # Validate resource id / format resource id - if environment_type == CONNECTED_ENVIRONMENT_TYPE: - if not is_valid_resource_id(namespace.environment): - namespace.environment = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), - resource_group=namespace.resource_group_name, - namespace=CONTAINER_APPS_RP, - type=CONNECTED_ENVIRONMENT_RESOURCE_TYPE, - name=namespace.environment - ) - elif environment_type == MANAGED_ENVIRONMENT_TYPE: - if not is_valid_resource_id(namespace.environment): - namespace.environment = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), - resource_group=namespace.resource_group_name, - namespace=CONTAINER_APPS_RP, - type=MANAGED_ENVIRONMENT_RESOURCE_TYPE, - name=namespace.environment - ) - - -def validate_custom_location_name_or_id(cmd, namespace): - from azure.cli.core.commands.client_factory import get_subscription_id - from azure.mgmt.core.tools import resource_id - - if not namespace.custom_location or not namespace.resource_group_name: - return - - if not is_valid_resource_id(namespace.custom_location): - namespace.custom_location = resource_id( - subscription=get_subscription_id(cmd.cli_ctx), - resource_group=namespace.resource_group_name, - namespace=EXTENDED_LOCATION_RP, - type=CUSTOM_LOCATION_RESOURCE_TYPE, - name=namespace.custom_location - ) - - -def validate_build_env_vars(cmd, namespace): - env_list = namespace.build_env_vars - - if not env_list: - return - - env_pairs = {} - - for pair in env_list: - key_val = pair.split('=', 1) - if len(key_val) <= 1: - raise ValidationError("Build environment variables must be in the format \"=\".") - if key_val[0] in env_pairs: - raise ValidationError( - "Duplicate build environment variable {env} found, environment variable names must be unique.".format( - env=key_val[0])) - env_pairs[key_val[0]] = key_val[1] - - -def validate_otlp_headers(cmd, namespace): - headers = namespace.headers - - if not headers: - return - - header_pairs = {} - - for pair in headers: - key_val = pair.split('=', 1) - if len(key_val) != 2: - raise ValidationError("Otlp headers must be in the format \"=\".") - if key_val[0] in header_pairs: - raise ValidationError( - "Duplicate headers {header} found, header names must be unique.".format( - header=key_val[0])) - header_pairs[key_val[0]] = key_val[1] - - -def validate_target_port_range(cmd, namespace): - target_port = namespace.target_port - if target_port is not None: - if target_port < 1 or target_port > 65535: - raise ValidationError("Port must be in range [1, 65535].") - - -def validate_session_timeout_in_seconds(cmd, namespace): - timeout_in_seconds = namespace.timeout_in_seconds - if timeout_in_seconds is not None: - if timeout_in_seconds <= 0 or timeout_in_seconds > 220: - raise ValidationError("Timeout in seconds must be in range [1, 220].") - - -def validate_debug(cmd, namespace): - logger.warning("Validating...") - revision_already_set = bool(namespace.revision) - replica_already_set = bool(namespace.replica) - container_already_set = bool(namespace.container) - _set_debug_defaults(cmd, namespace) - if revision_already_set: - _validate_revision_exists(cmd, namespace) - if replica_already_set: - _validate_replica_exists(cmd, namespace) - if container_already_set: - _validate_container_exists(cmd, namespace) - - -def _set_debug_defaults(cmd, namespace): - app = ContainerAppPreviewClient.show(cmd, namespace.resource_group_name, namespace.name) - if not app: - raise ResourceNotFoundError("Could not find a container app") - - from azure.mgmt.core.tools import parse_resource_id - parsed_env = parse_resource_id(safe_get(app, "properties", "environmentId")) - resource_type = parsed_env.get("resource_type") - if resource_type: - if CONNECTED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): - raise ValidationError( - "The app belongs to ConnectedEnvironment, which is not support debug console. Please use the apps belong to ManagedEnvironment.") - - if not namespace.revision: - namespace.revision = app.get("properties", {}).get("latestRevisionName") - if not namespace.revision: - raise ResourceNotFoundError("Could not find a revision") - if not namespace.replica: - replicas = ContainerAppPreviewClient.list_replicas( - cmd=cmd, - resource_group_name=namespace.resource_group_name, - container_app_name=namespace.name, - revision_name=namespace.revision - ) - if not replicas: - raise ResourceNotFoundError("Could not find an active replica") - namespace.replica = replicas[0]["name"] - if not namespace.container and replicas[0]["properties"]["containers"]: - namespace.container = replicas[0]["properties"]["containers"][0]["name"] - if not namespace.container: - revision = ContainerAppPreviewClient.show_revision( - cmd, - resource_group_name=namespace.resource_group_name, - container_app_name=namespace.name, - name=namespace.revision - ) - revision_containers = safe_get(revision, "properties", "template", "containers") - if revision_containers: - namespace.container = revision_containers[0]["name"] +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=line-too-long, unused-argument + +from knack.log import get_logger +from urllib.parse import urlparse + +from azure.cli.core.azclierror import (InvalidArgumentValueError, + MutuallyExclusiveArgumentError, RequiredArgumentMissingError, + ResourceNotFoundError, ValidationError) +from azure.cli.command_modules.containerapp._utils import is_registry_msi_system, safe_get +from azure.cli.command_modules.containerapp._validators import _validate_revision_exists, _validate_replica_exists, \ + _validate_container_exists +from azure.mgmt.core.tools import is_valid_resource_id + +from ._clients import ContainerAppPreviewClient +from ._utils import is_registry_msi_system_environment + +from ._constants import ACR_IMAGE_SUFFIX, \ + CONNECTED_ENVIRONMENT_TYPE, \ + EXTENDED_LOCATION_RP, CUSTOM_LOCATION_RESOURCE_TYPE, MAXIMUM_SECRET_LENGTH, CONTAINER_APPS_RP, \ + CONNECTED_ENVIRONMENT_RESOURCE_TYPE, MANAGED_ENVIRONMENT_RESOURCE_TYPE, MANAGED_ENVIRONMENT_TYPE, \ + RUNTIME_GENERIC + +logger = get_logger(__name__) + + +# called directly from custom method bc otherwise it disrupts the --environment auto RID functionality +def validate_create(registry_identity, registry_pass, registry_user, registry_server, no_wait, revisions_mode=None, target_label=None, source=None, artifact=None, repo=None, yaml=None, environment_type=None): + if source and repo: + raise MutuallyExclusiveArgumentError("Usage error: --source and --repo cannot be used together. Can either deploy from a local directory or a GitHub repository") + if (source or repo) and yaml: + raise MutuallyExclusiveArgumentError("Usage error: --source or --repo cannot be used with --yaml together. Can either deploy from a local directory or provide a yaml file") + if (source or repo) and environment_type == CONNECTED_ENVIRONMENT_TYPE: + raise MutuallyExclusiveArgumentError("Usage error: --source or --repo cannot be used with --environment-type connectedEnvironment together. Please use --environment-type managedEnvironment") + if repo: + if not registry_server: + raise RequiredArgumentMissingError('Usage error: --registry-server is required while using --repo') + if ACR_IMAGE_SUFFIX not in registry_server: + raise InvalidArgumentValueError("Usage error: --registry-server: expected an ACR registry (*.azurecr.io) for --repo") + if repo and registry_server and "azurecr.io" in registry_server: + parsed = urlparse(registry_server) + registry_name = (parsed.netloc if parsed.scheme else parsed.path).split(".")[0] + if registry_name and len(registry_name) > MAXIMUM_SECRET_LENGTH: + raise ValidationError(f"--registry-server ACR name must be less than {MAXIMUM_SECRET_LENGTH} " + "characters when using --repo") + if registry_identity and (registry_pass or registry_user): + raise MutuallyExclusiveArgumentError("Cannot provide both registry identity and username/password") + if is_registry_msi_system(registry_identity) and no_wait: + raise MutuallyExclusiveArgumentError("--no-wait is not supported with system registry identity") + if registry_identity and not is_valid_resource_id(registry_identity) and not is_registry_msi_system(registry_identity) and not is_registry_msi_system_environment(registry_identity): + raise InvalidArgumentValueError("--registry-identity must be an identity resource ID or 'system' or 'system-environment'") + if registry_identity and ACR_IMAGE_SUFFIX not in (registry_server or ""): + raise InvalidArgumentValueError("--registry-identity: expected an ACR registry (*.azurecr.io) for --registry-server") + if target_label and (not revisions_mode or revisions_mode.lower() != 'labels'): + raise InvalidArgumentValueError("--target-label must only be specified with --revisions-mode labels.") + + +def validate_runtime(runtime, enable_java_metrics, enable_java_agent): + def is_java_enhancement_enabled(): + return enable_java_agent is not None or enable_java_metrics is not None + + if runtime is None: + return + if runtime.lower() == RUNTIME_GENERIC and is_java_enhancement_enabled(): + raise ValidationError("Usage error: --runtime java is required when using --enable-java-metrics or --enable-java-agent") + + +def validate_env_name_or_id(cmd, namespace): + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.mgmt.core.tools import resource_id, parse_resource_id + + if not namespace.managed_env: + return + + # Set environment type + environment_type = None + + if namespace.__dict__.get("environment_type"): + environment_type = namespace.environment_type + + if is_valid_resource_id(namespace.managed_env): + env_dict = parse_resource_id(namespace.managed_env) + resource_type = env_dict.get("resource_type") + if resource_type: + if CONNECTED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): + environment_type = CONNECTED_ENVIRONMENT_TYPE + if MANAGED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): + environment_type = MANAGED_ENVIRONMENT_TYPE + + # Validate resource id / format resource id + if environment_type == CONNECTED_ENVIRONMENT_TYPE: + if not is_valid_resource_id(namespace.managed_env): + namespace.managed_env = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=namespace.resource_group_name, + namespace=CONTAINER_APPS_RP, + type=CONNECTED_ENVIRONMENT_RESOURCE_TYPE, + name=namespace.managed_env + ) + else: + if not is_valid_resource_id(namespace.managed_env): + namespace.managed_env = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=namespace.resource_group_name, + namespace=CONTAINER_APPS_RP, + type=MANAGED_ENVIRONMENT_RESOURCE_TYPE, + name=namespace.managed_env + ) + + +def validate_env_name_or_id_for_up(cmd, namespace): + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.mgmt.core.tools import resource_id, parse_resource_id + + if not namespace.environment: + return + + # Set environment type + environment_type = None + + if is_valid_resource_id(namespace.environment): + env_dict = parse_resource_id(namespace.environment) + resource_type = env_dict.get("resource_type") + if resource_type: + if CONNECTED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): + environment_type = CONNECTED_ENVIRONMENT_TYPE + if MANAGED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): + environment_type = MANAGED_ENVIRONMENT_TYPE + + if namespace.__dict__.get("custom_location") or namespace.__dict__.get("connected_cluster_id"): + environment_type = CONNECTED_ENVIRONMENT_TYPE + + # Validate resource id / format resource id + if environment_type == CONNECTED_ENVIRONMENT_TYPE: + if not is_valid_resource_id(namespace.environment): + namespace.environment = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=namespace.resource_group_name, + namespace=CONTAINER_APPS_RP, + type=CONNECTED_ENVIRONMENT_RESOURCE_TYPE, + name=namespace.environment + ) + elif environment_type == MANAGED_ENVIRONMENT_TYPE: + if not is_valid_resource_id(namespace.environment): + namespace.environment = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=namespace.resource_group_name, + namespace=CONTAINER_APPS_RP, + type=MANAGED_ENVIRONMENT_RESOURCE_TYPE, + name=namespace.environment + ) + + +def validate_custom_location_name_or_id(cmd, namespace): + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.mgmt.core.tools import resource_id + + if not namespace.custom_location or not namespace.resource_group_name: + return + + if not is_valid_resource_id(namespace.custom_location): + namespace.custom_location = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=namespace.resource_group_name, + namespace=EXTENDED_LOCATION_RP, + type=CUSTOM_LOCATION_RESOURCE_TYPE, + name=namespace.custom_location + ) + + +def validate_build_env_vars(cmd, namespace): + env_list = namespace.build_env_vars + + if not env_list: + return + + env_pairs = {} + + for pair in env_list: + key_val = pair.split('=', 1) + if len(key_val) <= 1: + raise ValidationError("Build environment variables must be in the format \"=\".") + if key_val[0] in env_pairs: + raise ValidationError( + "Duplicate build environment variable {env} found, environment variable names must be unique.".format( + env=key_val[0])) + env_pairs[key_val[0]] = key_val[1] + + +def validate_otlp_headers(cmd, namespace): + headers = namespace.headers + + if not headers: + return + + header_pairs = {} + + for pair in headers: + key_val = pair.split('=', 1) + if len(key_val) != 2: + raise ValidationError("Otlp headers must be in the format \"=\".") + if key_val[0] in header_pairs: + raise ValidationError( + "Duplicate headers {header} found, header names must be unique.".format( + header=key_val[0])) + header_pairs[key_val[0]] = key_val[1] + + +def validate_target_port_range(cmd, namespace): + target_port = namespace.target_port + if target_port is not None: + if target_port < 1 or target_port > 65535: + raise ValidationError("Port must be in range [1, 65535].") + + +def validate_session_timeout_in_seconds(cmd, namespace): + timeout_in_seconds = namespace.timeout_in_seconds + if timeout_in_seconds is not None: + if timeout_in_seconds <= 0 or timeout_in_seconds > 220: + raise ValidationError("Timeout in seconds must be in range [1, 220].") + + +def validate_debug(cmd, namespace): + logger.warning("Validating...") + revision_already_set = bool(namespace.revision) + replica_already_set = bool(namespace.replica) + container_already_set = bool(namespace.container) + _set_debug_defaults(cmd, namespace) + if revision_already_set: + _validate_revision_exists(cmd, namespace) + if replica_already_set: + _validate_replica_exists(cmd, namespace) + if container_already_set: + _validate_container_exists(cmd, namespace) + + +def _set_debug_defaults(cmd, namespace): + app = ContainerAppPreviewClient.show(cmd, namespace.resource_group_name, namespace.name) + if not app: + raise ResourceNotFoundError("Could not find a container app") + + from azure.mgmt.core.tools import parse_resource_id + parsed_env = parse_resource_id(safe_get(app, "properties", "environmentId")) + resource_type = parsed_env.get("resource_type") + if resource_type: + if CONNECTED_ENVIRONMENT_RESOURCE_TYPE.lower() == resource_type.lower(): + raise ValidationError( + "The app belongs to ConnectedEnvironment, which is not support debug console. Please use the apps belong to ManagedEnvironment.") + + if not namespace.revision: + namespace.revision = app.get("properties", {}).get("latestRevisionName") + if not namespace.revision: + raise ResourceNotFoundError("Could not find a revision") + if not namespace.replica: + replicas = ContainerAppPreviewClient.list_replicas( + cmd=cmd, + resource_group_name=namespace.resource_group_name, + container_app_name=namespace.name, + revision_name=namespace.revision + ) + if not replicas: + raise ResourceNotFoundError("Could not find an active replica") + namespace.replica = replicas[0]["name"] + if not namespace.container and replicas[0]["properties"]["containers"]: + namespace.container = replicas[0]["properties"]["containers"][0]["name"] + if not namespace.container: + revision = ContainerAppPreviewClient.show_revision( + cmd, + resource_group_name=namespace.resource_group_name, + container_app_name=namespace.name, + name=namespace.revision + ) + revision_containers = safe_get(revision, "properties", "template", "containers") + if revision_containers: + namespace.container = revision_containers[0]["name"] diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 646bb6008d8..209dae73fa3 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -2,7 +2,14 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -# pylint: disable=line-too-long, unused-argument, logging-fstring-interpolation, logging-not-lazy, consider-using-f-string, logging-format-interpolation, inconsistent-return-statements, broad-except, bare-except, too-many-statements, too-many-locals, too-many-boolean-expressions, too-many-branches, too-many-nested-blocks, pointless-statement, expression-not-assigned, unbalanced-tuple-unpacking, unsupported-assignment-operation +# pylint: disable=line-too-long, unused-argument, +# logging-fstring-interpolation, logging-not-lazy, +# consider-using-f-string, logging-format-interpolation, +# inconsistent-return-statements, broad-except, bare-except, +# too-many-statements, too-many-locals, too-many-boolean-expressions, +# too-many-branches, too-many-nested-blocks, pointless-statement, +# expression-not-assigned, unbalanced-tuple-unpacking, +# unsupported-assignment-operation import threading import time @@ -32,19 +39,31 @@ from azure.cli.command_modules.containerapp.containerapp_env_decorator import ContainerAppEnvDecorator from azure.cli.command_modules.containerapp._decorator_utils import load_yaml_file from azure.cli.command_modules.containerapp._github_oauth import get_github_access_token -from azure.cli.command_modules.containerapp._utils import (safe_set, - _validate_subscription_registered, - _convert_object_from_snake_to_camel_case, - _object_to_dict, _remove_additional_attributes, - raise_missing_token_suggestion, - _remove_dapr_readonly_attributes, - _get_acr_cred, safe_get, await_github_action, repo_url_to_name, - validate_container_app_name, register_provider_if_needed, - generate_randomized_cert_name, load_cert_file, - trigger_workflow, _ensure_identity_resource_id, - AppType, _get_existing_secrets, store_as_secret_and_return_secret_ref, - set_managed_identity, is_registry_msi_system, _get_app_from_revision, - _validate_revision_name) +from azure.cli.command_modules.containerapp._utils import ( + safe_set, + _validate_subscription_registered, + _convert_object_from_snake_to_camel_case, + _object_to_dict, + _remove_additional_attributes, + raise_missing_token_suggestion, + _remove_dapr_readonly_attributes, + _get_acr_cred, + safe_get, + await_github_action, + repo_url_to_name, + validate_container_app_name, + register_provider_if_needed, + generate_randomized_cert_name, + load_cert_file, + trigger_workflow, + _ensure_identity_resource_id, + AppType, + _get_existing_secrets, + store_as_secret_and_return_secret_ref, + set_managed_identity, + is_registry_msi_system, + _get_app_from_revision, + _validate_revision_name) from azure.cli.command_modules.containerapp._models import ( RegistryCredentials as RegistryCredentialsModel, ) @@ -127,316 +146,561 @@ AzureFileProperties as AzureFilePropertiesModel ) -from ._ssh_utils import (SSH_DEFAULT_ENCODING, DebugWebSocketConnection, read_debug_ssh) - -from ._utils import (connected_env_check_cert_name_availability, get_oryx_run_image_tags, patchable_check, - get_pack_exec_path, is_docker_running, parse_build_env_vars, env_has_managed_identity) - -from ._arc_utils import (extract_domain_from_configmap, get_core_dns_deployment, get_core_dns_configmap, backup_custom_core_dns_configmap, - replace_configmap, replace_deployment, delete_configmap, patch_coredns, - create_folder, create_sub_folder, - check_kube_connection, create_kube_client, restart_openshift_dns_daemonset) - -from ._constants import (AKS_AZURE_LOCAL_DISTRO, CONTAINER_APPS_RP, - NAME_INVALID, NAME_ALREADY_EXISTS, ACR_IMAGE_SUFFIX, DEV_POSTGRES_IMAGE, DEV_POSTGRES_SERVICE_TYPE, - DEV_POSTGRES_CONTAINER_NAME, DEV_REDIS_IMAGE, DEV_REDIS_SERVICE_TYPE, DEV_REDIS_CONTAINER_NAME, DEV_KAFKA_CONTAINER_NAME, - DEV_KAFKA_IMAGE, DEV_KAFKA_SERVICE_TYPE, DEV_MARIADB_CONTAINER_NAME, DEV_MARIADB_IMAGE, DEV_MARIADB_SERVICE_TYPE, DEV_QDRANT_IMAGE, - DEV_QDRANT_CONTAINER_NAME, DEV_QDRANT_SERVICE_TYPE, DEV_WEAVIATE_IMAGE, DEV_WEAVIATE_CONTAINER_NAME, DEV_WEAVIATE_SERVICE_TYPE, - DEV_MILVUS_IMAGE, DEV_MILVUS_CONTAINER_NAME, DEV_MILVUS_SERVICE_TYPE, DEV_SERVICE_LIST, CONTAINER_APPS_SDK_MODELS, BLOB_STORAGE_TOKEN_STORE_SECRET_SETTING_NAME, - DAPR_SUPPORTED_STATESTORE_DEV_SERVICE_LIST, DAPR_SUPPORTED_PUBSUB_DEV_SERVICE_LIST, - JAVA_COMPONENT_CONFIG, JAVA_COMPONENT_EUREKA, JAVA_COMPONENT_ADMIN, JAVA_COMPONENT_NACOS, JAVA_COMPONENT_GATEWAY, DOTNET_COMPONENT_RESOURCE_TYPE, - CUSTOM_CORE_DNS, CORE_DNS, KUBE_SYSTEM, OPENSHIFT_DISTRO, OPENSHIFT_DNS) - +from ._ssh_utils import ( + SSH_DEFAULT_ENCODING, + DebugWebSocketConnection, + read_debug_ssh) + +from ._utils import ( + connected_env_check_cert_name_availability, + get_oryx_run_image_tags, + patchable_check, + get_pack_exec_path, + is_docker_running, + parse_build_env_vars, + env_has_managed_identity) + +from ._arc_utils import ( + extract_domain_from_configmap, + get_core_dns_deployment, + get_core_dns_configmap, + backup_custom_core_dns_configmap, + replace_configmap, + replace_deployment, + delete_configmap, + patch_coredns, + create_folder, + create_sub_folder, + check_kube_connection, + create_kube_client, + restart_openshift_dns_daemonset) + +from ._constants import ( + AKS_AZURE_LOCAL_DISTRO, + CONTAINER_APPS_RP, + NAME_INVALID, + NAME_ALREADY_EXISTS, + ACR_IMAGE_SUFFIX, + DEV_POSTGRES_IMAGE, + DEV_POSTGRES_SERVICE_TYPE, + DEV_POSTGRES_CONTAINER_NAME, + DEV_REDIS_IMAGE, + DEV_REDIS_SERVICE_TYPE, + DEV_REDIS_CONTAINER_NAME, + DEV_KAFKA_CONTAINER_NAME, + DEV_KAFKA_IMAGE, + DEV_KAFKA_SERVICE_TYPE, + DEV_MARIADB_CONTAINER_NAME, + DEV_MARIADB_IMAGE, + DEV_MARIADB_SERVICE_TYPE, + DEV_QDRANT_IMAGE, + DEV_QDRANT_CONTAINER_NAME, + DEV_QDRANT_SERVICE_TYPE, + DEV_WEAVIATE_IMAGE, + DEV_WEAVIATE_CONTAINER_NAME, + DEV_WEAVIATE_SERVICE_TYPE, + DEV_MILVUS_IMAGE, + DEV_MILVUS_CONTAINER_NAME, + DEV_MILVUS_SERVICE_TYPE, + DEV_SERVICE_LIST, + CONTAINER_APPS_SDK_MODELS, + BLOB_STORAGE_TOKEN_STORE_SECRET_SETTING_NAME, + DAPR_SUPPORTED_STATESTORE_DEV_SERVICE_LIST, + DAPR_SUPPORTED_PUBSUB_DEV_SERVICE_LIST, + JAVA_COMPONENT_CONFIG, + JAVA_COMPONENT_EUREKA, + JAVA_COMPONENT_ADMIN, + JAVA_COMPONENT_NACOS, + JAVA_COMPONENT_GATEWAY, + DOTNET_COMPONENT_RESOURCE_TYPE, + CUSTOM_CORE_DNS, + CORE_DNS, + KUBE_SYSTEM, + OPENSHIFT_DISTRO, + OPENSHIFT_DNS) logger = get_logger(__name__) def list_all_services(cmd, environment_name, resource_group_name): - services = list_containerapp(cmd, resource_group_name=resource_group_name, managed_env=environment_name) + services = list_containerapp( + cmd, + resource_group_name=resource_group_name, + managed_env=environment_name) dev_service_list = [] for service in services: - service_type = safe_get(service, "properties", "configuration", "service", "type", default="") + service_type = safe_get( + service, + "properties", + "configuration", + "service", + "type", + default="") if service_type in DEV_SERVICE_LIST: dev_service_list.append(service) return dev_service_list -def create_redis_service(cmd, service_name, environment_name, resource_group_name, no_wait=False, - disable_warnings=True): - return DevServiceUtils.create_service(cmd, service_name, environment_name, resource_group_name, no_wait, - disable_warnings, DEV_REDIS_IMAGE, DEV_REDIS_SERVICE_TYPE, - DEV_REDIS_CONTAINER_NAME) - - -def delete_redis_service(cmd, service_name, resource_group_name, no_wait=False): - return DevServiceUtils.delete_service(cmd, service_name, resource_group_name, no_wait, DEV_REDIS_SERVICE_TYPE) - - -def create_postgres_service(cmd, service_name, environment_name, resource_group_name, no_wait=False, - disable_warnings=True): - return DevServiceUtils.create_service(cmd, service_name, environment_name, resource_group_name, no_wait, - disable_warnings, DEV_POSTGRES_IMAGE, DEV_POSTGRES_SERVICE_TYPE, - DEV_POSTGRES_CONTAINER_NAME) - - -def delete_postgres_service(cmd, service_name, resource_group_name, no_wait=False): - return DevServiceUtils.delete_service(cmd, service_name, resource_group_name, no_wait, DEV_POSTGRES_SERVICE_TYPE) - - -def create_kafka_service(cmd, service_name, environment_name, resource_group_name, no_wait=False, - disable_warnings=True): - return DevServiceUtils.create_service(cmd, service_name, environment_name, resource_group_name, no_wait, - disable_warnings, DEV_KAFKA_IMAGE, DEV_KAFKA_SERVICE_TYPE, - DEV_KAFKA_CONTAINER_NAME) - - -def delete_kafka_service(cmd, service_name, resource_group_name, no_wait=False): - return DevServiceUtils.delete_service(cmd, service_name, resource_group_name, no_wait, DEV_KAFKA_SERVICE_TYPE) - - -def create_mariadb_service(cmd, service_name, environment_name, resource_group_name, no_wait=False, - disable_warnings=True): - return DevServiceUtils.create_service(cmd, service_name, environment_name, resource_group_name, no_wait, - disable_warnings, DEV_MARIADB_IMAGE, DEV_MARIADB_SERVICE_TYPE, - DEV_MARIADB_CONTAINER_NAME) - - -def delete_mariadb_service(cmd, service_name, resource_group_name, no_wait=False): - return DevServiceUtils.delete_service(cmd, service_name, resource_group_name, no_wait, DEV_MARIADB_SERVICE_TYPE) - - -def create_qdrant_service(cmd, service_name, environment_name, resource_group_name, no_wait=False, - disable_warnings=True): - return DevServiceUtils.create_service(cmd, service_name, environment_name, resource_group_name, no_wait, - disable_warnings, DEV_QDRANT_IMAGE, DEV_QDRANT_SERVICE_TYPE, - DEV_QDRANT_CONTAINER_NAME) - - -def delete_qdrant_service(cmd, service_name, resource_group_name, no_wait=False): - return DevServiceUtils.delete_service(cmd, service_name, resource_group_name, no_wait, DEV_QDRANT_SERVICE_TYPE) - - -def create_weaviate_service(cmd, service_name, environment_name, resource_group_name, no_wait=False, - disable_warnings=True): - return DevServiceUtils.create_service(cmd, service_name, environment_name, resource_group_name, no_wait, - disable_warnings, DEV_WEAVIATE_IMAGE, DEV_WEAVIATE_SERVICE_TYPE, - DEV_WEAVIATE_CONTAINER_NAME) - - -def delete_weaviate_service(cmd, service_name, resource_group_name, no_wait=False): - return DevServiceUtils.delete_service(cmd, service_name, resource_group_name, no_wait, DEV_WEAVIATE_SERVICE_TYPE) - - -def create_milvus_service(cmd, service_name, environment_name, resource_group_name, no_wait=False, - disable_warnings=True): - return DevServiceUtils.create_service(cmd, service_name, environment_name, resource_group_name, no_wait, - disable_warnings, DEV_MILVUS_IMAGE, DEV_MILVUS_SERVICE_TYPE, - DEV_MILVUS_CONTAINER_NAME) - - -def delete_milvus_service(cmd, service_name, resource_group_name, no_wait=False): - return DevServiceUtils.delete_service(cmd, service_name, resource_group_name, no_wait, DEV_MILVUS_SERVICE_TYPE) - - -def create_container_app_resiliency(cmd, name, resource_group_name, container_app_name, - yaml=None, - no_wait=False, - disable_warnings=False, - tcp_retry_max_connect_attempts=None, - circuit_breaker_consecutive_errors=None, - circuit_breaker_interval=None, - circuit_breaker_max_ejection=None, - tcp_connection_pool_max_connections=None, - http_connection_pool_http1_max_pending_req=None, - http_connection_pool_http2_max_req=None, - timeout_response_in_seconds=None, - timeout_connection_in_seconds=None, - http_retry_max=None, - http_retry_delay_in_milliseconds=None, - http_retry_interval_in_milliseconds=None, - http_retry_status_codes=None, - http_retry_errors=None, - default=False): +def create_redis_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait=False, + disable_warnings=True): + return DevServiceUtils.create_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait, + disable_warnings, + DEV_REDIS_IMAGE, + DEV_REDIS_SERVICE_TYPE, + DEV_REDIS_CONTAINER_NAME) + + +def delete_redis_service( + cmd, + service_name, + resource_group_name, + no_wait=False): + return DevServiceUtils.delete_service( + cmd, + service_name, + resource_group_name, + no_wait, + DEV_REDIS_SERVICE_TYPE) + + +def create_postgres_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait=False, + disable_warnings=True): + return DevServiceUtils.create_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait, + disable_warnings, + DEV_POSTGRES_IMAGE, + DEV_POSTGRES_SERVICE_TYPE, + DEV_POSTGRES_CONTAINER_NAME) + + +def delete_postgres_service( + cmd, + service_name, + resource_group_name, + no_wait=False): + return DevServiceUtils.delete_service( + cmd, + service_name, + resource_group_name, + no_wait, + DEV_POSTGRES_SERVICE_TYPE) + + +def create_kafka_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait=False, + disable_warnings=True): + return DevServiceUtils.create_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait, + disable_warnings, + DEV_KAFKA_IMAGE, + DEV_KAFKA_SERVICE_TYPE, + DEV_KAFKA_CONTAINER_NAME) + + +def delete_kafka_service( + cmd, + service_name, + resource_group_name, + no_wait=False): + return DevServiceUtils.delete_service( + cmd, + service_name, + resource_group_name, + no_wait, + DEV_KAFKA_SERVICE_TYPE) + + +def create_mariadb_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait=False, + disable_warnings=True): + return DevServiceUtils.create_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait, + disable_warnings, + DEV_MARIADB_IMAGE, + DEV_MARIADB_SERVICE_TYPE, + DEV_MARIADB_CONTAINER_NAME) + + +def delete_mariadb_service( + cmd, + service_name, + resource_group_name, + no_wait=False): + return DevServiceUtils.delete_service( + cmd, + service_name, + resource_group_name, + no_wait, + DEV_MARIADB_SERVICE_TYPE) + + +def create_qdrant_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait=False, + disable_warnings=True): + return DevServiceUtils.create_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait, + disable_warnings, + DEV_QDRANT_IMAGE, + DEV_QDRANT_SERVICE_TYPE, + DEV_QDRANT_CONTAINER_NAME) + + +def delete_qdrant_service( + cmd, + service_name, + resource_group_name, + no_wait=False): + return DevServiceUtils.delete_service( + cmd, + service_name, + resource_group_name, + no_wait, + DEV_QDRANT_SERVICE_TYPE) + + +def create_weaviate_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait=False, + disable_warnings=True): + return DevServiceUtils.create_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait, + disable_warnings, + DEV_WEAVIATE_IMAGE, + DEV_WEAVIATE_SERVICE_TYPE, + DEV_WEAVIATE_CONTAINER_NAME) + + +def delete_weaviate_service( + cmd, + service_name, + resource_group_name, + no_wait=False): + return DevServiceUtils.delete_service( + cmd, + service_name, + resource_group_name, + no_wait, + DEV_WEAVIATE_SERVICE_TYPE) + + +def create_milvus_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait=False, + disable_warnings=True): + return DevServiceUtils.create_service( + cmd, + service_name, + environment_name, + resource_group_name, + no_wait, + disable_warnings, + DEV_MILVUS_IMAGE, + DEV_MILVUS_SERVICE_TYPE, + DEV_MILVUS_CONTAINER_NAME) + + +def delete_milvus_service( + cmd, + service_name, + resource_group_name, + no_wait=False): + return DevServiceUtils.delete_service( + cmd, + service_name, + resource_group_name, + no_wait, + DEV_MILVUS_SERVICE_TYPE) + + +def create_container_app_resiliency( + cmd, + name, + resource_group_name, + container_app_name, + yaml=None, + no_wait=False, + disable_warnings=False, + tcp_retry_max_connect_attempts=None, + circuit_breaker_consecutive_errors=None, + circuit_breaker_interval=None, + circuit_breaker_max_ejection=None, + tcp_connection_pool_max_connections=None, + http_connection_pool_http1_max_pending_req=None, + http_connection_pool_http2_max_req=None, + timeout_response_in_seconds=None, + timeout_connection_in_seconds=None, + http_retry_max=None, + http_retry_delay_in_milliseconds=None, + http_retry_interval_in_milliseconds=None, + http_retry_status_codes=None, + http_retry_errors=None, + default=False): raw_parameters = locals() containerapp_resiliency_create_decorator = ContainerAppResiliencyPreviewCreateDecorator( cmd=cmd, client=ContainerAppsResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) containerapp_resiliency_create_decorator.validate_arguments() containerapp_resiliency_create_decorator.construct_payload() return containerapp_resiliency_create_decorator.create() -def update_container_app_resiliency(cmd, name, resource_group_name, container_app_name, - yaml=None, - no_wait=False, - disable_warnings=False, - tcp_retry_max_connect_attempts=None, - circuit_breaker_consecutive_errors=None, - circuit_breaker_interval=None, - circuit_breaker_max_ejection=None, - tcp_connection_pool_max_connections=None, - http_connection_pool_http1_max_pending_req=None, - http_connection_pool_http2_max_req=None, - timeout_response_in_seconds=None, - timeout_connection_in_seconds=None, - http_retry_max=None, - http_retry_delay_in_milliseconds=None, - http_retry_interval_in_milliseconds=None, - http_retry_status_codes=None, - http_retry_errors=None): +def update_container_app_resiliency( + cmd, + name, + resource_group_name, + container_app_name, + yaml=None, + no_wait=False, + disable_warnings=False, + tcp_retry_max_connect_attempts=None, + circuit_breaker_consecutive_errors=None, + circuit_breaker_interval=None, + circuit_breaker_max_ejection=None, + tcp_connection_pool_max_connections=None, + http_connection_pool_http1_max_pending_req=None, + http_connection_pool_http2_max_req=None, + timeout_response_in_seconds=None, + timeout_connection_in_seconds=None, + http_retry_max=None, + http_retry_delay_in_milliseconds=None, + http_retry_interval_in_milliseconds=None, + http_retry_status_codes=None, + http_retry_errors=None): raw_parameters = locals() containerapp_resiliency_update_decorator = ContainerAppResiliencyPreviewUpdateDecorator( cmd=cmd, client=ContainerAppsResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) containerapp_resiliency_update_decorator.validate_arguments() containerapp_resiliency_update_decorator.construct_payload() return containerapp_resiliency_update_decorator.update() -def delete_container_app_resiliency(cmd, name, resource_group_name, container_app_name, no_wait=False): +def delete_container_app_resiliency( + cmd, + name, + resource_group_name, + container_app_name, + no_wait=False): raw_parameters = locals() containerapp_resiliency_delete_decorator = ContainerAppResiliencyPreviewDeleteDecorator( cmd=cmd, client=ContainerAppsResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) return containerapp_resiliency_delete_decorator.delete() -def show_container_app_resiliency(cmd, name, resource_group_name, container_app_name): +def show_container_app_resiliency( + cmd, + name, + resource_group_name, + container_app_name): raw_parameters = locals() containerapp_resiliency_show_decorator = ContainerAppResiliencyPreviewShowDecorator( cmd=cmd, client=ContainerAppsResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) return containerapp_resiliency_show_decorator.show() -def list_container_app_resiliencies(cmd, resource_group_name, container_app_name): +def list_container_app_resiliencies( + cmd, + resource_group_name, + container_app_name): raw_parameters = locals() containerapp_resiliency_list_decorator = ContainerAppResiliencyPreviewListDecorator( cmd=cmd, client=ContainerAppsResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) return containerapp_resiliency_list_decorator.list() -def create_dapr_component_resiliency(cmd, name, resource_group_name, dapr_component_name, environment, - yaml=None, - no_wait=False, - disable_warnings=False, - in_timeout_response_in_seconds=None, - out_timeout_response_in_seconds=None, - in_http_retry_max=None, - out_http_retry_max=None, - in_http_retry_delay_in_milliseconds=None, - out_http_retry_delay_in_milliseconds=None, - in_http_retry_interval_in_milliseconds=None, - out_http_retry_interval_in_milliseconds=None, - in_circuit_breaker_consecutive_errors=None, - out_circuit_breaker_consecutive_errors=None, - in_circuit_breaker_interval=None, - out_circuit_breaker_interval=None, - in_circuit_breaker_timeout=None, - out_circuit_breaker_timeout=None): +def create_dapr_component_resiliency( + cmd, + name, + resource_group_name, + dapr_component_name, + environment, + yaml=None, + no_wait=False, + disable_warnings=False, + in_timeout_response_in_seconds=None, + out_timeout_response_in_seconds=None, + in_http_retry_max=None, + out_http_retry_max=None, + in_http_retry_delay_in_milliseconds=None, + out_http_retry_delay_in_milliseconds=None, + in_http_retry_interval_in_milliseconds=None, + out_http_retry_interval_in_milliseconds=None, + in_circuit_breaker_consecutive_errors=None, + out_circuit_breaker_consecutive_errors=None, + in_circuit_breaker_interval=None, + out_circuit_breaker_interval=None, + in_circuit_breaker_timeout=None, + out_circuit_breaker_timeout=None): raw_parameters = locals() component_resiliency_create_decorator = DaprComponentResiliencyPreviewCreateDecorator( cmd=cmd, client=DaprComponentResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) component_resiliency_create_decorator.validate_arguments() component_resiliency_create_decorator.construct_payload() return component_resiliency_create_decorator.create() -def update_dapr_component_resiliency(cmd, name, resource_group_name, dapr_component_name, environment, - yaml=None, - no_wait=False, - disable_warnings=False, - in_timeout_response_in_seconds=None, - out_timeout_response_in_seconds=None, - in_http_retry_max=None, - out_http_retry_max=None, - in_http_retry_delay_in_milliseconds=None, - out_http_retry_delay_in_milliseconds=None, - in_http_retry_interval_in_milliseconds=None, - out_http_retry_interval_in_milliseconds=None, - in_circuit_breaker_consecutive_errors=None, - out_circuit_breaker_consecutive_errors=None, - in_circuit_breaker_interval=None, - out_circuit_breaker_interval=None, - in_circuit_breaker_timeout=None, - out_circuit_breaker_timeout=None): +def update_dapr_component_resiliency( + cmd, + name, + resource_group_name, + dapr_component_name, + environment, + yaml=None, + no_wait=False, + disable_warnings=False, + in_timeout_response_in_seconds=None, + out_timeout_response_in_seconds=None, + in_http_retry_max=None, + out_http_retry_max=None, + in_http_retry_delay_in_milliseconds=None, + out_http_retry_delay_in_milliseconds=None, + in_http_retry_interval_in_milliseconds=None, + out_http_retry_interval_in_milliseconds=None, + in_circuit_breaker_consecutive_errors=None, + out_circuit_breaker_consecutive_errors=None, + in_circuit_breaker_interval=None, + out_circuit_breaker_interval=None, + in_circuit_breaker_timeout=None, + out_circuit_breaker_timeout=None): raw_parameters = locals() component_resiliency_update_decorator = DaprComponentResiliencyPreviewUpdateDecorator( cmd=cmd, client=DaprComponentResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) component_resiliency_update_decorator.validate_arguments() component_resiliency_update_decorator.construct_payload() return component_resiliency_update_decorator.update() -def delete_dapr_component_resiliency(cmd, name, resource_group_name, environment, dapr_component_name, no_wait=False): +def delete_dapr_component_resiliency( + cmd, + name, + resource_group_name, + environment, + dapr_component_name, + no_wait=False): raw_parameters = locals() containerapp_resiliency_delete_decorator = DaprComponentResiliencyPreviewDeleteDecorator( cmd=cmd, client=DaprComponentResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) return containerapp_resiliency_delete_decorator.delete() -def show_dapr_component_resiliency(cmd, name, resource_group_name, environment, dapr_component_name, no_wait=False): +def show_dapr_component_resiliency( + cmd, + name, + resource_group_name, + environment, + dapr_component_name, + no_wait=False): raw_parameters = locals() containerapp_resiliency_show_decorator = DaprComponentResiliencyPreviewShowDecorator( cmd=cmd, client=DaprComponentResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) return containerapp_resiliency_show_decorator.show() -def list_dapr_component_resiliencies(cmd, resource_group_name, dapr_component_name, environment, no_wait=False): +def list_dapr_component_resiliencies( + cmd, + resource_group_name, + dapr_component_name, + environment, + no_wait=False): raw_parameters = locals() containerapp_resiliency_list_decorator = DaprComponentResiliencyPreviewListDecorator( cmd=cmd, client=DaprComponentResiliencyPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) return containerapp_resiliency_list_decorator.list() @@ -637,46 +901,47 @@ def update_containerapp(cmd, enable_java_agent=None): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) - return update_containerapp_logic(cmd=cmd, - name=name, - resource_group_name=resource_group_name, - yaml=yaml, - image=image, - container_name=container_name, - min_replicas=min_replicas, - max_replicas=max_replicas, - scale_rule_name=scale_rule_name, - scale_rule_type=scale_rule_type, - scale_rule_http_concurrency=scale_rule_http_concurrency, - scale_rule_metadata=scale_rule_metadata, - scale_rule_auth=scale_rule_auth, - scale_rule_identity=scale_rule_identity, - service_bindings=service_bindings, - customized_keys=customized_keys, - unbind_service_bindings=unbind_service_bindings, - set_env_vars=set_env_vars, - remove_env_vars=remove_env_vars, - replace_env_vars=replace_env_vars, - remove_all_env_vars=remove_all_env_vars, - revisions_mode=revisions_mode, - target_label=target_label, - cpu=cpu, - memory=memory, - revision_suffix=revision_suffix, - startup_command=startup_command, - args=args, - tags=tags, - workload_profile_name=workload_profile_name, - termination_grace_period=termination_grace_period, - no_wait=no_wait, - secret_volume_mount=secret_volume_mount, - source=source, - artifact=artifact, - build_env_vars=build_env_vars, - max_inactive_revisions=max_inactive_revisions, - runtime=runtime, - enable_java_metrics=enable_java_metrics, - enable_java_agent=enable_java_agent) + return update_containerapp_logic( + cmd=cmd, + name=name, + resource_group_name=resource_group_name, + yaml=yaml, + image=image, + container_name=container_name, + min_replicas=min_replicas, + max_replicas=max_replicas, + scale_rule_name=scale_rule_name, + scale_rule_type=scale_rule_type, + scale_rule_http_concurrency=scale_rule_http_concurrency, + scale_rule_metadata=scale_rule_metadata, + scale_rule_auth=scale_rule_auth, + scale_rule_identity=scale_rule_identity, + service_bindings=service_bindings, + customized_keys=customized_keys, + unbind_service_bindings=unbind_service_bindings, + set_env_vars=set_env_vars, + remove_env_vars=remove_env_vars, + replace_env_vars=replace_env_vars, + remove_all_env_vars=remove_all_env_vars, + revisions_mode=revisions_mode, + target_label=target_label, + cpu=cpu, + memory=memory, + revision_suffix=revision_suffix, + startup_command=startup_command, + args=args, + tags=tags, + workload_profile_name=workload_profile_name, + termination_grace_period=termination_grace_period, + no_wait=no_wait, + secret_volume_mount=secret_volume_mount, + source=source, + artifact=artifact, + build_env_vars=build_env_vars, + max_inactive_revisions=max_inactive_revisions, + runtime=runtime, + enable_java_metrics=enable_java_metrics, + enable_java_agent=enable_java_agent) def show_containerapp(cmd, name, resource_group_name, show_secrets=False): @@ -687,12 +952,17 @@ def show_containerapp(cmd, name, resource_group_name, show_secrets=False): raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_base_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_base_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_base_decorator.show() -def list_containerapp(cmd, resource_group_name=None, managed_env=None, environment_type="all"): +def list_containerapp( + cmd, + resource_group_name=None, + managed_env=None, + environment_type="all"): raw_parameters = locals() containerapp_list_decorator = ContainerAppPreviewListDecorator( cmd=cmd, @@ -700,7 +970,8 @@ def list_containerapp(cmd, resource_group_name=None, managed_env=None, environme raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_list_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_list_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_list_decorator.list() @@ -713,7 +984,8 @@ def delete_containerapp(cmd, name, resource_group_name, no_wait=False): raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_base_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_base_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_base_decorator.delete() @@ -886,7 +1158,8 @@ def show_managed_environment(cmd, name, resource_group_name): raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_env_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_env_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_env_decorator.show() @@ -899,7 +1172,8 @@ def list_managed_environments(cmd, resource_group_name=None): raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_env_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_env_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_env_decorator.list() @@ -912,7 +1186,8 @@ def delete_managed_environment(cmd, name, resource_group_name, no_wait=False): raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_env_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_env_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_env_decorator.delete() @@ -1071,7 +1346,8 @@ def update_containerappsjob(cmd, raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_job_update_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_job_update_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) containerapp_job_update_decorator.validate_arguments() containerapp_job_update_decorator.construct_payload() @@ -1087,7 +1363,8 @@ def show_containerappsjob(cmd, name, resource_group_name): raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_job_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_job_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_job_decorator.show() @@ -1100,7 +1377,8 @@ def list_containerappsjob(cmd, resource_group_name=None): raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_job_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_job_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_job_decorator.list() @@ -1113,7 +1391,8 @@ def delete_containerappsjob(cmd, name, resource_group_name, no_wait=False): raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_job_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + containerapp_job_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) return containerapp_job_decorator.delete() @@ -1144,31 +1423,37 @@ def create_or_update_github_action(cmd, scopes = ["admin:repo_hook", "repo", "workflow"] token = get_github_access_token(cmd, scopes) elif token and login_with_github: - logger.warning("Both token and --login-with-github flag are provided. Will use provided token") + logger.warning( + "Both token and --login-with-github flag are provided. Will use provided token") repo = repo_url_to_name(repo_url) - repo_url = f"https://github.com/{repo}" # allow specifying repo as / without the full github url + # allow specifying repo as / without the full github url + repo_url = f"https://github.com/{repo}" branch = _validate_github(repo, branch, token) source_control_info = None try: - source_control_info = GitHubActionPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) + source_control_info = GitHubActionPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) except Exception as ex: if not service_principal_client_id or not service_principal_client_secret or not service_principal_tenant_id: - raise RequiredArgumentMissingError('Service principal client ID, secret and tenant ID are required to add github actions for the first time. Please create one using the command \"az ad sp create-for-rbac --name {{name}} --role contributor --scopes /subscriptions/{{subscription}}/resourceGroups/{{resourceGroup}} --sdk-auth\"') from ex + raise RequiredArgumentMissingError( + 'Service principal client ID, secret and tenant ID are required to add github actions for the first time. Please create one using the command \"az ad sp create-for-rbac --name {{name}} --role contributor --scopes /subscriptions/{{subscription}}/resourceGroups/{{resourceGroup}} --sdk-auth\"') from ex source_control_info = SourceControlModel - # Need to trigger the workflow manually if it already exists (performing an update) + # Need to trigger the workflow manually if it already exists (performing + # an update) try: - workflow_name = GitHubActionPreviewClient.get_workflow_name(cmd=cmd, repo=repo, branch_name=branch, container_app_name=name, token=token) + workflow_name = GitHubActionPreviewClient.get_workflow_name( + cmd=cmd, repo=repo, branch_name=branch, container_app_name=name, token=token) if workflow_name is not None: if trigger_existing_workflow: trigger_workflow(token, repo, workflow_name, branch) return source_control_info - except: # pylint: disable=bare-except + except BaseException: # pylint: disable=bare-except pass source_control_info["properties"]["repoUrl"] = repo_url @@ -1185,17 +1470,23 @@ def create_or_update_github_action(cmd, # Registry if registry_username is None or registry_password is None: - # If registry is Azure Container Registry, we can try inferring credentials + # If registry is Azure Container Registry, we can try inferring + # credentials if not registry_url or ACR_IMAGE_SUFFIX not in registry_url: - raise RequiredArgumentMissingError('Registry url is required if using Azure Container Registry, otherwise Registry username and password are required if using Dockerhub') - logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') + raise RequiredArgumentMissingError( + 'Registry url is required if using Azure Container Registry, otherwise Registry username and password are required if using Dockerhub') + logger.warning( + 'No credential was provided to access Azure Container Registry. Trying to look up...') parsed = urlparse(registry_url) - registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] + registry_name = ( + parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: - registry_username, registry_password, _ = _get_acr_cred(cmd.cli_ctx, registry_name) + registry_username, registry_password, _ = _get_acr_cred( + cmd.cli_ctx, registry_name) except Exception as ex: - raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry. Please provide the registry username and password') from ex + raise RequiredArgumentMissingError( + 'Failed to retrieve credentials for container registry. Please provide the registry username and password') from ex registry_info = RegistryInfoModel registry_info["registryUrl"] = registry_url @@ -1206,7 +1497,8 @@ def create_or_update_github_action(cmd, github_action_configuration["registryInfo"] = registry_info github_action_configuration["azureCredentials"] = azure_credentials github_action_configuration["contextPath"] = context_path - github_action_configuration["buildEnvironmentVariables"] = parse_build_env_vars(build_env_vars) + github_action_configuration["buildEnvironmentVariables"] = parse_build_env_vars( + build_env_vars) github_action_configuration["image"] = image source_control_info["properties"]["githubActionConfiguration"] = github_action_configuration @@ -1215,7 +1507,13 @@ def create_or_update_github_action(cmd, try: logger.warning("Creating Github action...") - r = GitHubActionPreviewClient.create_or_update(cmd=cmd, resource_group_name=resource_group_name, name=name, github_action_envelope=source_control_info, headers=headers, no_wait=no_wait) + r = GitHubActionPreviewClient.create_or_update( + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + github_action_envelope=source_control_info, + headers=headers, + no_wait=no_wait) if not no_wait: WORKFLOW_POLL_RETRY = 6 WORKFLOW_POLL_SLEEP = 10 @@ -1223,7 +1521,8 @@ def create_or_update_github_action(cmd, # Poll for the workflow file just created (may take up to 30s) for _ in range(0, WORKFLOW_POLL_RETRY): time.sleep(WORKFLOW_POLL_SLEEP) - workflow_name = GitHubActionPreviewClient.get_workflow_name(cmd=cmd, repo=repo, branch_name=branch, container_app_name=name, token=token) + workflow_name = GitHubActionPreviewClient.get_workflow_name( + cmd=cmd, repo=repo, branch_name=branch, container_app_name=name, token=token) if workflow_name is not None: await_github_action(token, repo, workflow_name) return r @@ -1242,10 +1541,11 @@ def list_replicas(cmd, resource_group_name, name, revision=None): app = ContainerAppPreviewClient.show(cmd, resource_group_name, name) if not revision: revision = app["properties"]["latestRevisionName"] - return ContainerAppPreviewClient.list_replicas(cmd=cmd, - resource_group_name=resource_group_name, - container_app_name=name, - revision_name=revision) + return ContainerAppPreviewClient.list_replicas( + cmd=cmd, + resource_group_name=resource_group_name, + container_app_name=name, + revision_name=revision) except Exception as e: handle_raw_exception(e) @@ -1262,10 +1562,12 @@ def count_replicas(cmd, resource_group_name, name, revision=None): handle_raw_exception(e) try: - count = len(ContainerAppPreviewClient.list_replicas(cmd=cmd, - resource_group_name=resource_group_name, - container_app_name=name, - revision_name=revision)) + count = len( + ContainerAppPreviewClient.list_replicas( + cmd=cmd, + resource_group_name=resource_group_name, + container_app_name=name, + revision_name=revision)) return count except Exception as e: handle_raw_exception(e) @@ -1277,11 +1579,12 @@ def get_replica(cmd, resource_group_name, name, replica, revision=None): app = ContainerAppPreviewClient.show(cmd, resource_group_name, name) if not revision: revision = app["properties"]["latestRevisionName"] - return ContainerAppPreviewClient.get_replica(cmd=cmd, - resource_group_name=resource_group_name, - container_app_name=name, - revision_name=revision, - replica_name=replica) + return ContainerAppPreviewClient.get_replica( + cmd=cmd, + resource_group_name=resource_group_name, + container_app_name=name, + revision_name=revision, + replica_name=replica) except Exception as e: handle_raw_exception(e) @@ -1323,41 +1626,87 @@ def containerapp_up(cmd, model_name=None, model_version=None, kind=None): - from ._up_utils import (_validate_up_args, _validate_custom_location_connected_cluster_args, _reformat_image, _get_dockerfile_content, _get_ingress_and_target_port, - ResourceGroup, Extension, CustomLocation, ContainerAppEnvironment, ContainerApp, _get_registry_from_app, - _get_registry_details, _get_registry_details_without_get_creds, _create_github_action, _set_up_defaults, up_output, - check_env_name_on_rg, get_token, _has_dockerfile, _validate_azml_args, _is_azml_app, _validate_azml_env_and_create_if_needed, _set_azml_env_vars) + from ._up_utils import ( + _validate_up_args, + _validate_custom_location_connected_cluster_args, + _reformat_image, + _get_dockerfile_content, + _get_ingress_and_target_port, + ResourceGroup, + Extension, + CustomLocation, + ContainerAppEnvironment, + ContainerApp, + _get_registry_from_app, + _get_registry_details, + _get_registry_details_without_get_creds, + _create_github_action, + _set_up_defaults, + up_output, + check_env_name_on_rg, + get_token, + _has_dockerfile, + _validate_azml_args, + _is_azml_app, + _validate_azml_env_and_create_if_needed, + _set_azml_env_vars) from azure.cli.command_modules.containerapp._github_oauth import cache_github_token HELLOWORLD = "mcr.microsoft.com/k8se/quickstart" ACA_AZML_MCR = "mcr.microsoft.com/k8se/aca-foundry-model-host" - dockerfile = "Dockerfile" # for now the dockerfile name must be "Dockerfile" (until GH actions API is updated) + # for now the dockerfile name must be "Dockerfile" (until GH actions API + # is updated) + dockerfile = "Dockerfile" register_provider_if_needed(cmd, CONTAINER_APPS_RP) is_azureml_app = _is_azml_app(model_registry, model_name, model_version) model_asset_id = None model_reference_endpoint = None if is_azureml_app: - model_asset_id, model_reference_endpoint, model_type, model_load_class, image = _validate_azml_args(cmd, ACA_AZML_MCR, image, model_registry, model_name, model_version) - env_vars = _set_azml_env_vars(cmd, env_vars, model_asset_id, model_reference_endpoint, model_type, model_load_class, is_azml_mcr_app=image.lower() == ACA_AZML_MCR.lower()) - - _validate_up_args(cmd, source, artifact, build_env_vars, image, repo, registry_server) + model_asset_id, model_reference_endpoint, model_type, model_load_class, image = _validate_azml_args( + cmd, ACA_AZML_MCR, image, model_registry, model_name, model_version) + env_vars = _set_azml_env_vars( + cmd, + env_vars, + model_asset_id, + model_reference_endpoint, + model_type, + model_load_class, + is_azml_mcr_app=image.lower() == ACA_AZML_MCR.lower()) + + _validate_up_args( + cmd, + source, + artifact, + build_env_vars, + image, + repo, + registry_server) validate_create(registry_identity=registry_identity, registry_pass=registry_pass, registry_user=registry_user, registry_server=registry_server, no_wait=False) - _validate_custom_location_connected_cluster_args(cmd, - env=environment, - resource_group_name=resource_group_name, - location=location, - custom_location_id=custom_location_id, - connected_cluster_id=connected_cluster_id) + _validate_custom_location_connected_cluster_args( + cmd, + env=environment, + resource_group_name=resource_group_name, + location=location, + custom_location_id=custom_location_id, + connected_cluster_id=connected_cluster_id) if artifact: # Artifact is mostly a convenience argument provided to use --source specifically with a single artifact file. - # At this point we know for sure that source isn't set (else _validate_up_args would have failed), so we can build with this value. + # At this point we know for sure that source isn't set (else + # _validate_up_args would have failed), so we can build with this + # value. source = artifact validate_container_app_name(name, AppType.ContainerApp.name) - check_env_name_on_rg(cmd, environment, resource_group_name, location, custom_location_id, connected_cluster_id) + check_env_name_on_rg( + cmd, + environment, + resource_group_name, + location, + custom_location_id, + connected_cluster_id) image = _reformat_image(source, repo, image) token = get_token(cmd, repo, token) @@ -1373,30 +1722,90 @@ def containerapp_up(cmd, if image: if ingress and not target_port and target_port != 0: target_port = 0 - logger.warning("No target-port provided, defaulting to auto-detect. Try `az containerapp up --ingress %s --target-port ` to set a custom port.", ingress) + logger.warning( + "No target-port provided, defaulting to auto-detect. Try `az containerapp up --ingress %s --target-port ` to set a custom port.", + ingress) # Check if source contains a Dockerfile - # and ignore checking if Dockerfile exists in repo since GitHub action inherently checks for it. + # and ignore checking if Dockerfile exists in repo since GitHub action + # inherently checks for it. if _has_dockerfile(source, dockerfile): - dockerfile_content = _get_dockerfile_content(repo, branch, token, source, context_path, dockerfile) - ingress, target_port = _get_ingress_and_target_port(ingress, target_port, dockerfile_content) - - resource_group = ResourceGroup(cmd, name=resource_group_name, location=location) - custom_location = CustomLocation(cmd, name=custom_location_id, resource_group_name=resource_group_name, connected_cluster_id=connected_cluster_id) - extension = Extension(cmd, logs_rg=resource_group_name, logs_location=location, logs_share_key=logs_key, logs_customer_id=logs_customer_id, connected_cluster_id=connected_cluster_id) - env = ContainerAppEnvironment(cmd, environment, resource_group, location=location, logs_key=logs_key, logs_customer_id=logs_customer_id, custom_location_id=custom_location_id, connected_cluster_id=connected_cluster_id, is_env_for_azml_app=is_azureml_app) - app = ContainerApp(cmd, name, resource_group, None, image, env, target_port, registry_server, registry_user, registry_pass, env_vars, workload_profile_name, ingress, registry_identity=registry_identity, user_assigned=user_assigned, system_assigned=system_assigned, revisions_mode=revisions_mode, target_label=target_label, kind=kind) - - # Check and see if registry (username and passwords) or registry-identity are specified. If so, set is_registry_server_params_set to True to use those creds. - is_registry_server_params_set = bool(registry_server and ((registry_user and registry_pass) or registry_identity)) - _set_up_defaults(cmd, name, resource_group_name, logs_customer_id, location, resource_group, env, app, custom_location, extension, is_registry_server_params_set) + dockerfile_content = _get_dockerfile_content( + repo, branch, token, source, context_path, dockerfile) + ingress, target_port = _get_ingress_and_target_port( + ingress, target_port, dockerfile_content) + + resource_group = ResourceGroup( + cmd, name=resource_group_name, location=location) + custom_location = CustomLocation( + cmd, + name=custom_location_id, + resource_group_name=resource_group_name, + connected_cluster_id=connected_cluster_id) + extension = Extension( + cmd, + logs_rg=resource_group_name, + logs_location=location, + logs_share_key=logs_key, + logs_customer_id=logs_customer_id, + connected_cluster_id=connected_cluster_id) + env = ContainerAppEnvironment( + cmd, + environment, + resource_group, + location=location, + logs_key=logs_key, + logs_customer_id=logs_customer_id, + custom_location_id=custom_location_id, + connected_cluster_id=connected_cluster_id, + is_env_for_azml_app=is_azureml_app) + app = ContainerApp( + cmd, + name, + resource_group, + None, + image, + env, + target_port, + registry_server, + registry_user, + registry_pass, + env_vars, + workload_profile_name, + ingress, + registry_identity=registry_identity, + user_assigned=user_assigned, + system_assigned=system_assigned, + revisions_mode=revisions_mode, + target_label=target_label, + kind=kind) + + # Check and see if registry (username and passwords) or registry-identity + # are specified. If so, set is_registry_server_params_set to True to use + # those creds. + is_registry_server_params_set = bool(registry_server and ( + (registry_user and registry_pass) or registry_identity)) + _set_up_defaults( + cmd, + name, + resource_group_name, + logs_customer_id, + location, + resource_group, + env, + app, + custom_location, + extension, + is_registry_server_params_set) if app.check_exists(): if app.get()["properties"]["provisioningState"] == "InProgress": - raise ValidationError("Containerapp has an existing provisioning in progress. Please wait until provisioning has completed and rerun the command.") + raise ValidationError( + "Containerapp has an existing provisioning in progress. Please wait until provisioning has completed and rerun the command.") if is_azureml_app: - env, workload_profile_name, app_cpu_limit, app_memory_limit = _validate_azml_env_and_create_if_needed(cmd, app, env, environment, resource_group, resource_group_name, workload_profile_name) + env, workload_profile_name, app_cpu_limit, app_memory_limit = _validate_azml_env_and_create_if_needed( + cmd, app, env, environment, resource_group, resource_group_name, workload_profile_name) app.workload_profile_name = workload_profile_name app.cpu = app_cpu_limit app.memory = app_memory_limit @@ -1407,98 +1816,239 @@ def containerapp_up(cmd, env.create_if_needed(name) if source or repo: if not registry_server: - _get_registry_from_app(app, source) # if the app exists, get the registry + # if the app exists, get the registry + _get_registry_from_app(app, source) if source: _get_registry_details_without_get_creds(cmd, app, source) if repo: - _get_registry_details(cmd, app, source) # fetch ACR creds from arguments registry arguments, --repo will create Github Actions, which requires ACR registry's creds + # fetch ACR creds from arguments registry arguments, --repo will + # create Github Actions, which requires ACR registry's creds + _get_registry_details(cmd, app, source) force_single_container_updates = False if source: app.get_acr_creds = False - force_single_container_updates = app.run_source_to_cloud_flow(source, dockerfile, build_env_vars, can_create_acr_if_needed=True, registry_server=registry_server) + force_single_container_updates = app.run_source_to_cloud_flow( + source, + dockerfile, + build_env_vars, + can_create_acr_if_needed=True, + registry_server=registry_server) app.set_force_single_container_updates(force_single_container_updates) else: app.create_acr_if_needed() app.create(no_registry=bool(repo or force_single_container_updates)) if repo: - _create_github_action(app, env, service_principal_client_id, service_principal_client_secret, - service_principal_tenant_id, branch, token, repo, context_path, build_env_vars) + _create_github_action( + app, + env, + service_principal_client_id, + service_principal_client_secret, + service_principal_tenant_id, + branch, + token, + repo, + context_path, + build_env_vars) cache_github_token(cmd, token, repo) if browse: open_containerapp_in_browser(cmd, app.name, app.resource_group.name) - up_output(app, no_dockerfile=(source and not _has_dockerfile(source, dockerfile))) - - -def containerapp_up_logic(cmd, resource_group_name, name, managed_env, image, env_vars, ingress, target_port, registry_server, registry_user, workload_profile_name, registry_pass, environment_type=None, force_single_container_updates=False, registry_identity=None, system_assigned=None, user_assigned=None, revisions_mode=None, target_label=None, cpu=None, memory=None, kind=None): + up_output( + app, no_dockerfile=( + source and not _has_dockerfile( + source, dockerfile))) + + +def containerapp_up_logic( + cmd, + resource_group_name, + name, + managed_env, + image, + env_vars, + ingress, + target_port, + registry_server, + registry_user, + workload_profile_name, + registry_pass, + environment_type=None, + force_single_container_updates=False, + registry_identity=None, + system_assigned=None, + user_assigned=None, + revisions_mode=None, + target_label=None, + cpu=None, + memory=None, + kind=None): containerapp_def = None try: - containerapp_def = ContainerAppPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) - except: + containerapp_def = ContainerAppPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) + except BaseException: pass if containerapp_def: - return update_containerapp_logic(cmd=cmd, name=name, resource_group_name=resource_group_name, image=image, replace_env_vars=env_vars, ingress=ingress, target_port=target_port, - registry_server=registry_server, registry_user=registry_user, registry_pass=registry_pass, workload_profile_name=workload_profile_name, container_name=name, force_single_container_updates=force_single_container_updates, - registry_identity=registry_identity, system_assigned=system_assigned, user_assigned=user_assigned, revisions_mode=revisions_mode, target_label=target_label) - return create_containerapp(cmd=cmd, name=name, resource_group_name=resource_group_name, managed_env=managed_env, image=image, env_vars=env_vars, ingress=ingress, target_port=target_port, registry_server=registry_server, registry_user=registry_user, registry_pass=registry_pass, workload_profile_name=workload_profile_name, environment_type=environment_type, - registry_identity=registry_identity, system_assigned=system_assigned, user_assigned=user_assigned, revisions_mode=revisions_mode, target_label=target_label, cpu=cpu, memory=memory, kind=kind) - - -def list_certificates(cmd, name, resource_group_name, location=None, certificate=None, thumbprint=None, managed_certificates_only=False, private_key_certificates_only=False): + return update_containerapp_logic( + cmd=cmd, + name=name, + resource_group_name=resource_group_name, + image=image, + replace_env_vars=env_vars, + ingress=ingress, + target_port=target_port, + registry_server=registry_server, + registry_user=registry_user, + registry_pass=registry_pass, + workload_profile_name=workload_profile_name, + container_name=name, + force_single_container_updates=force_single_container_updates, + registry_identity=registry_identity, + system_assigned=system_assigned, + user_assigned=user_assigned, + revisions_mode=revisions_mode, + target_label=target_label) + return create_containerapp( + cmd=cmd, + name=name, + resource_group_name=resource_group_name, + managed_env=managed_env, + image=image, + env_vars=env_vars, + ingress=ingress, + target_port=target_port, + registry_server=registry_server, + registry_user=registry_user, + registry_pass=registry_pass, + workload_profile_name=workload_profile_name, + environment_type=environment_type, + registry_identity=registry_identity, + system_assigned=system_assigned, + user_assigned=user_assigned, + revisions_mode=revisions_mode, + target_label=target_label, + cpu=cpu, + memory=memory, + kind=kind) + + +def list_certificates( + cmd, + name, + resource_group_name, + location=None, + certificate=None, + thumbprint=None, + managed_certificates_only=False, + private_key_certificates_only=False): raw_parameters = locals() containerapp_env_certificate_list_decorator = ContainerappPreviewEnvCertificateListDecorator( cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) - containerapp_env_certificate_list_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + models=CONTAINER_APPS_SDK_MODELS) + containerapp_env_certificate_list_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) containerapp_env_certificate_list_decorator.validate_arguments() return containerapp_env_certificate_list_decorator.list() -def upload_certificate(cmd, name, resource_group_name, certificate_file=None, certificate_name=None, certificate_password=None, location=None, prompt=False, certificate_identity=None, certificate_key_vault_url=None): +def upload_certificate( + cmd, + name, + resource_group_name, + certificate_file=None, + certificate_name=None, + certificate_password=None, + location=None, + prompt=False, + certificate_identity=None, + certificate_key_vault_url=None): raw_parameters = locals() containerapp_env_certificate_upload_decorator = ContainerappEnvCertificatePreviweUploadDecorator( cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) - containerapp_env_certificate_upload_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + models=CONTAINER_APPS_SDK_MODELS) + containerapp_env_certificate_upload_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) containerapp_env_certificate_upload_decorator.validate_arguments() containerapp_env_certificate_upload_decorator.construct_payload() return containerapp_env_certificate_upload_decorator.create_or_update() -def delete_certificate(cmd, resource_group_name, name, location=None, certificate=None, thumbprint=None): +def delete_certificate( + cmd, + resource_group_name, + name, + location=None, + certificate=None, + thumbprint=None): from azure.cli.command_modules.containerapp.custom import delete_certificate_logic - delete_certificate_logic(cmd=cmd, resource_group_name=resource_group_name, name=name, cert_name=certificate, location=location, certificate=certificate, thumbprint=thumbprint) - - -def bind_hostname(cmd, resource_group_name, name, hostname, thumbprint=None, certificate=None, location=None, environment=None, validation_method=None): + delete_certificate_logic( + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + cert_name=certificate, + location=location, + certificate=certificate, + thumbprint=thumbprint) + + +def bind_hostname( + cmd, + resource_group_name, + name, + hostname, + thumbprint=None, + certificate=None, + location=None, + environment=None, + validation_method=None): from azure.cli.command_modules.containerapp.custom import bind_hostname_logic - return bind_hostname_logic(cmd=cmd, resource_group_name=resource_group_name, name=name, hostname=hostname, thumbprint=thumbprint, certificate=certificate, location=location, environment=environment, validation_method=validation_method) - - -def update_auth_config(cmd, resource_group_name, name, set_string=None, enabled=None, - runtime_version=None, config_file_path=None, unauthenticated_client_action=None, - redirect_provider=None, require_https=None, - proxy_convention=None, proxy_custom_host_header=None, - proxy_custom_proto_header=None, excluded_paths=None, - token_store=None, sas_url_secret=None, sas_url_secret_name=None, - blob_container_uri=None, blob_container_identity=None, - yes=False): + return bind_hostname_logic( + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + hostname=hostname, + thumbprint=thumbprint, + certificate=certificate, + location=location, + environment=environment, + validation_method=validation_method) + + +def update_auth_config( + cmd, + resource_group_name, + name, + set_string=None, + enabled=None, + runtime_version=None, + config_file_path=None, + unauthenticated_client_action=None, + redirect_provider=None, + require_https=None, + proxy_convention=None, + proxy_custom_host_header=None, + proxy_custom_proto_header=None, + excluded_paths=None, + token_store=None, + sas_url_secret=None, + sas_url_secret_name=None, + blob_container_uri=None, + blob_container_identity=None, + yes=False): raw_parameters = locals() containerapp_auth_decorator = ContainerAppPreviewAuthDecorator( cmd=cmd, @@ -1508,13 +2058,20 @@ def update_auth_config(cmd, resource_group_name, name, set_string=None, enabled= ) containerapp_auth_decorator.construct_payload() - if containerapp_auth_decorator.get_argument_token_store() and containerapp_auth_decorator.get_argument_sas_url_secret() is not None: + if containerapp_auth_decorator.get_argument_token_store( + ) and containerapp_auth_decorator.get_argument_sas_url_secret() is not None: if not containerapp_auth_decorator.get_argument_yes(): msg = 'Configuring --sas-url-secret will add a secret to the containerapp. Are you sure you want to continue?' if not prompt_y_n(msg, default="n"): raise ArgumentUsageError( 'Usage Error: --sas-url-secret cannot be used without agreeing to add secret to the containerapp.') - set_secrets(cmd, name, resource_group_name, secrets=[f"{BLOB_STORAGE_TOKEN_STORE_SECRET_SETTING_NAME}={containerapp_auth_decorator.get_argument_sas_url_secret()}"], no_wait=False, disable_max_length=True) + set_secrets( + cmd, + name, + resource_group_name, + secrets=[f"{BLOB_STORAGE_TOKEN_STORE_SECRET_SETTING_NAME}={containerapp_auth_decorator.get_argument_sas_url_secret()}"], + no_wait=False, + disable_max_length=True) return containerapp_auth_decorator.create_or_update() @@ -1529,8 +2086,9 @@ def show_auth_config(cmd, resource_group_name, name): return containerapp_auth_decorator.show() - # Compose + + def create_containerapps_from_compose(cmd, # pylint: disable=R0914 resource_group_name, managed_env, @@ -1566,20 +2124,21 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 env_rg = parsed_managed_env.get('resource_group', resource_group_name) try: - managed_environment = show_managed_environment(cmd=cmd, - name=managed_env_name, - resource_group_name=env_rg) + managed_environment = show_managed_environment( + cmd=cmd, name=managed_env_name, resource_group_name=env_rg) except CLIInternalError: # pylint: disable=W0702 logger.info( # pylint: disable=W1203 f"Creating the Container Apps managed environment {managed_env_name} under {env_rg} in {location}.") - managed_environment = create_managed_environment(cmd, - name=managed_env_name, - resource_group_name=env_rg, - tags=tags, - location=location) + managed_environment = create_managed_environment( + cmd, + name=managed_env_name, + resource_group_name=env_rg, + tags=tags, + location=location) compose_yaml = load_yaml_file(compose_file_path) - # Make a deep copy to preserve original YAML for x-azure-deployment extraction + # Make a deep copy to preserve original YAML for x-azure-deployment + # extraction import copy compose_yaml_original = copy.deepcopy(compose_yaml) parsed_compose_file = ComposeFile(compose_yaml) @@ -1589,7 +2148,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # ======================================================================== # Phase 3: Models Deployment Implementation # ======================================================================== - + # Parse models section from compose file from ._compose_utils import ( parse_models_section, @@ -1612,48 +2171,60 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 detect_configuration_changes, log_update_detection, ) - + models = parse_models_section(compose_yaml_original) - models_app = None + _ = None # models_app models_endpoint = None - models_list = [] - + _ = [] # models_list + # Phase 3: Models Deployment (before services since they depend on it) # If models section exists, validate and create models container app if models and not dry_run: logger.info(f"Models section found with {len(models)} model(s)") - + # Validate models configuration validate_models_configuration(models) - + # Get or create GPU workload profile # Extract requested GPU profile type from models x-azure-deployment section # x-azure-deployment is nested inside each model (models.gemma.x-azure-deployment) - # IMPORTANT: We need to read from compose_yaml_original because parse_models_section() + # IMPORTANT: We need to read from compose_yaml_original because parse_models_section() # strips out x-azure-deployment from the returned models dict requested_gpu_type = None - logger.warning(f"[GPU DEBUG] Inspecting RAW models YAML for GPU profile configuration") + logger.warning( + "[GPU DEBUG] Inspecting RAW models YAML for GPU profile configuration") raw_models = compose_yaml_original.get('models', {}) - logger.warning(f"[GPU DEBUG] Raw models keys: {list(raw_models.keys())}") - + logger.warning( + f"[GPU DEBUG] Raw models keys: {list(raw_models.keys())}") + for model_name, model_config in raw_models.items(): - logger.warning(f"[GPU DEBUG] Raw model '{model_name}' config type: {type(model_config)}") + logger.warning( + f"[GPU DEBUG] Raw model '{model_name}' config type: {type(model_config)}") if isinstance(model_config, dict): - logger.warning(f"[GPU DEBUG] Raw model '{model_name}' has x-azure-deployment: {'x-azure-deployment' in model_config}") + logger.warning( + f"[GPU DEBUG] Raw model '{model_name}' has x-azure-deployment: {'x-azure-deployment' in model_config}") if 'x-azure-deployment' in model_config: - azure_deployment = model_config.get('x-azure-deployment', {}) - logger.warning(f"[GPU DEBUG] Model '{model_name}' azure_deployment: {azure_deployment}") - workload_profiles = azure_deployment.get('workloadProfiles', {}) - logger.warning(f"[GPU DEBUG] Model '{model_name}' workloadProfiles: {workload_profiles}") - requested_gpu_type = workload_profiles.get('workloadProfileType') - logger.warning(f"[GPU DEBUG] Model '{model_name}' requested GPU type: {requested_gpu_type}") + azure_deployment = model_config.get( + 'x-azure-deployment', {}) + logger.warning( + f"[GPU DEBUG] Model '{model_name}' azure_deployment: {azure_deployment}") + workload_profiles = azure_deployment.get( + 'workloadProfiles', {}) + logger.warning( + f"[GPU DEBUG] Model '{model_name}' workloadProfiles: {workload_profiles}") + requested_gpu_type = workload_profiles.get( + 'workloadProfileType') + logger.warning( + f"[GPU DEBUG] Model '{model_name}' requested GPU type: {requested_gpu_type}") if requested_gpu_type: - logger.info(f"Found GPU profile in model '{model_name}': {requested_gpu_type}") + logger.info( + f"Found GPU profile in model '{model_name}': {requested_gpu_type}") break - logger.warning(f"[GPU DEBUG] Final requested_gpu_type to be passed: {requested_gpu_type}") - + logger.warning( + f"[GPU DEBUG] Final requested_gpu_type to be passed: {requested_gpu_type}") + try: gpu_profile_name = create_gpu_workload_profile_if_needed( cmd, @@ -1662,9 +2233,9 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 managed_environment['location'], requested_gpu_type ) - + # Create models container app - models_app = create_models_container_app( + _ = create_models_container_app( cmd, resource_group_name, managed_env_name, @@ -1673,30 +2244,34 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 gpu_profile_name, managed_environment['location'] ) - + # Set models endpoint for environment variable injection - models_endpoint = f"http://models" - models_list = list(models.keys()) - - logger.info(f"Models container app created successfully: {models_endpoint}") - + models_endpoint = "http://models" + # models_list = list(models.keys()) # Unused + + logger.info( + f"Models container app created successfully: {models_endpoint}") + except Exception as e: logger.error(f"Failed to create models deployment: {str(e)}") raise - + # Track services that depend on models for environment injection - services_needing_models_env = [] - + # services_needing_models_env = [ # Unused] + # Using the key to iterate to get the service name # pylint: disable=C0201,C0206 # Phase 5: Dry-run mode initialization if dry_run: - print_dry_run_header(compose_file_path, resource_group_name, managed_env_name) + print_dry_run_header( + compose_file_path, + resource_group_name, + managed_env_name) dry_run_services = [] dry_run_has_models = bool(models) # True if models section exists dry_run_has_gateway = False - + for service_name in parsed_compose_file.ordered_services.keys(): service = parsed_compose_file.services[service_name] if not check_supported_platform(service.platform): @@ -1708,40 +2283,52 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 logger.info( # pylint: disable=W1203 f"Creating the Container Apps instance for {service_name} under {resource_group_name} in {location}.") ingress_type, target_port = resolve_ingress_and_target_port(service) - - # Enhanced ACA Compose Support: Override ingress for MCP gateway in ACA mode + + # Enhanced ACA Compose Support: Override ingress for MCP gateway in ACA + # mode # Phase 4: MCP Gateway Detection and Configuration is_mcp_gateway = detect_mcp_gateway_service(service_name, service) mcp_gateway_config = None - + if is_mcp_gateway: mcp_gateway_config = get_mcp_gateway_configuration(service) ingress_type = mcp_gateway_config['ingress_type'] target_port = mcp_gateway_config['port'] - logger.info(f"MCP gateway detected: {service_name} - setting internal ingress on port {target_port}") - - registry, registry_username, registry_password = resolve_registry_from_cli_args(registry_server, registry_user, registry_pass) # pylint: disable=C0301 - transport_setting = resolve_transport_from_cli_args(service_name, transport_mapping) - startup_command, startup_args = resolve_service_startup_command(service) - - # For services with command array (like mcp-gateway), treat as args not command - if service.command and isinstance(service.command, list) and len(service.command) > 0: + logger.info( + f"MCP gateway detected: {service_name} - setting internal ingress on port {target_port}") + + registry, registry_username, registry_password = resolve_registry_from_cli_args( + registry_server, registry_user, registry_pass) # pylint: disable=C0301 + transport_setting = resolve_transport_from_cli_args( + service_name, transport_mapping) + startup_command, startup_args = resolve_service_startup_command( + service) + + # For services with command array (like mcp-gateway), treat as args not + # command + if service.command and isinstance( + service.command, list) and len( + service.command) > 0: # All elements become args, no startup_command startup_command = None startup_args = service.command - logger.info(f"Service '{service_name}' has command array - using as args: {startup_args}") - + logger.info( + f"Service '{service_name}' has command array - using as args: {startup_args}") + # Get x-azure-deployment overrides for resources - raw_service_yaml = compose_yaml_original.get('services', {}).get(service_name, {}) + raw_service_yaml = compose_yaml_original.get( + 'services', {}).get(service_name, {}) overrides = parse_x_azure_deployment(raw_service_yaml) override_cpu = overrides.get('cpu') override_memory = overrides.get('memory') - + # Apply resource overrides if specified - base_cpu = override_cpu if override_cpu is not None else resolve_cpu_configuration_from_service(service) - base_memory = override_memory if override_memory is not None else resolve_memory_configuration_from_service(service) - + base_cpu = override_cpu if override_cpu is not None else resolve_cpu_configuration_from_service( + service) + base_memory = override_memory if override_memory is not None else resolve_memory_configuration_from_service( + service) + cpu, memory = validate_memory_and_cpu_setting( base_cpu, base_memory, @@ -1749,126 +2336,164 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 ) replicas = resolve_replicas_from_service(service) environment = resolve_environment_from_service(service) - secret_vars, secret_env_ref = resolve_secret_from_service(service, parsed_compose_file.secrets) + secret_vars, secret_env_ref = resolve_secret_from_service( + service, parsed_compose_file.secrets) if environment is not None and secret_env_ref is not None: environment.extend(secret_env_ref) elif secret_env_ref is not None: environment = secret_env_ref - + # Strip ports from URLs in environment variables for ACA (Envoy handles port mapping) # Also inject proper FQDNs for MCP_GATEWAY_URL references if environment is not None: import re - + # Check if service depends on mcp-gateway and needs URL injection needs_gateway_url = False gateway_allow_insecure = False mcp_gateway_vars = [] # Track all variables that reference mcp-gateway - - # Check for any environment variable referencing mcp-gateway by value + + # Check for any environment variable referencing mcp-gateway by + # value for env_var in environment: var_name = None var_value = None - + if isinstance(env_var, dict): var_name = env_var.get('name') var_value = env_var.get('value', '') elif isinstance(env_var, str) and '=' in env_var: var_name, var_value = env_var.split('=', 1) - + # Check if value contains reference to mcp-gateway service if var_value and '://mcp-gateway' in var_value: needs_gateway_url = True mcp_gateway_vars.append(var_name) - logger.info(f"Detected MCP gateway reference in {var_name}: {var_value}") - + logger.info( + f"Detected MCP gateway reference in {var_name}: {var_value}") + # Determine mcp-gateway allowInsecure setting if needs_gateway_url: - for svc_name, svc_config in compose_yaml_original.get('services', {}).items(): - if detect_mcp_gateway_service(svc_name, type('Service', (), svc_config)()): - azure_deployment = svc_config.get('x-azure-deployment', {}) + for svc_name, svc_config in compose_yaml_original.get( + 'services', {}).items(): + if detect_mcp_gateway_service( + svc_name, type('Service', (), svc_config)()): + azure_deployment = svc_config.get( + 'x-azure-deployment', {}) ingress_config = azure_deployment.get('ingress', {}) - gateway_allow_insecure = ingress_config.get('allowInsecure', False) + gateway_allow_insecure = ingress_config.get( + 'allowInsecure', False) break - + for i, env_var in enumerate(environment): if isinstance(env_var, dict) and 'value' in env_var: var_name = env_var.get('name') var_value = env_var.get('value', '') - - # Check if this variable references mcp-gateway and needs FQDN injection + + # Check if this variable references mcp-gateway and needs + # FQDN injection if var_name in mcp_gateway_vars and '://mcp-gateway' in var_value: from ._compose_utils import get_containerapp_fqdn - # Determine if mcp-gateway has external or internal ingress + # Determine if mcp-gateway has external or internal + # ingress gateway_external = False for svc_name in parsed_compose_file.services.keys(): - if detect_mcp_gateway_service(svc_name, parsed_compose_file.services[svc_name]): - raw_gateway_yaml = compose_yaml_original.get('services', {}).get(svc_name, {}) - gateway_overrides = parse_x_azure_deployment(raw_gateway_yaml) - if gateway_overrides.get('ingress_type') == 'external': + if detect_mcp_gateway_service( + svc_name, parsed_compose_file.services[svc_name]): + raw_gateway_yaml = compose_yaml_original.get( + 'services', {}).get(svc_name, {}) + gateway_overrides = parse_x_azure_deployment( + raw_gateway_yaml) + if gateway_overrides.get( + 'ingress_type') == 'external': gateway_external = True break - - gateway_fqdn = get_containerapp_fqdn(cmd, resource_group_name, managed_env_name, 'mcp-gateway', is_external=gateway_external) + + gateway_fqdn = get_containerapp_fqdn( + cmd, + resource_group_name, + managed_env_name, + 'mcp-gateway', + is_external=gateway_external) gateway_protocol = 'http' if gateway_allow_insecure else 'https' - gateway_fqdn = gateway_fqdn.replace('https://', f'{gateway_protocol}://') - - # Extract and preserve the path from the original URL (e.g., /sse from http://mcp-gateway:8811/sse) - path_match = re.search(r'://mcp-gateway(?::\d+)?(/[^\s]*)', var_value) - preserved_path = path_match.group(1) if path_match else '' - + gateway_fqdn = gateway_fqdn.replace( + 'https://', f'{gateway_protocol}://') + + # Extract and preserve the path from the original URL + # (e.g., /sse from http://mcp-gateway:8811/sse) + path_match = re.search( + r'://mcp-gateway(?::\d+)?(/[^\s]*)', var_value) + preserved_path = path_match.group( + 1) if path_match else '' + env_var['value'] = gateway_fqdn + preserved_path - logger.info(f"Transformed {var_name} to FQDN with path: {env_var['value']}") + logger.info( + f"Transformed {var_name} to FQDN with path: {env_var['value']}") else: - # Match http://hostname:port or https://hostname:port patterns - env_var['value'] = re.sub(r'(https?://[^:/]+):\d+', r'\1', env_var['value']) + # Match http://hostname:port or https://hostname:port + # patterns + env_var['value'] = re.sub( + r'(https?://[^:/]+):\d+', r'\1', env_var['value']) elif isinstance(env_var, str) and '=' in env_var: # Handle string format "KEY=VALUE" key, value = env_var.split('=', 1) - + if key in mcp_gateway_vars and '://mcp-gateway' in value: from ._compose_utils import get_containerapp_fqdn gateway_external = False for svc_name in parsed_compose_file.services.keys(): - if detect_mcp_gateway_service(svc_name, parsed_compose_file.services[svc_name]): - raw_gateway_yaml = compose_yaml_original.get('services', {}).get(svc_name, {}) - gateway_overrides = parse_x_azure_deployment(raw_gateway_yaml) - if gateway_overrides.get('ingress_type') == 'external': + if detect_mcp_gateway_service( + svc_name, parsed_compose_file.services[svc_name]): + raw_gateway_yaml = compose_yaml_original.get( + 'services', {}).get(svc_name, {}) + gateway_overrides = parse_x_azure_deployment( + raw_gateway_yaml) + if gateway_overrides.get( + 'ingress_type') == 'external': gateway_external = True break - - gateway_fqdn = get_containerapp_fqdn(cmd, resource_group_name, managed_env_name, 'mcp-gateway', is_external=gateway_external) + + gateway_fqdn = get_containerapp_fqdn( + cmd, + resource_group_name, + managed_env_name, + 'mcp-gateway', + is_external=gateway_external) gateway_protocol = 'http' if gateway_allow_insecure else 'https' - gateway_fqdn = gateway_fqdn.replace('https://', f'{gateway_protocol}://') - + gateway_fqdn = gateway_fqdn.replace( + 'https://', f'{gateway_protocol}://') + # Extract and preserve the path from the original URL - path_match = re.search(r'://mcp-gateway(?::\d+)?(/[^\s]*)', value) - preserved_path = path_match.group(1) if path_match else '' - + path_match = re.search( + r'://mcp-gateway(?::\d+)?(/[^\s]*)', value) + preserved_path = path_match.group( + 1) if path_match else '' + environment[i] = f"{key}={gateway_fqdn}{preserved_path}" - logger.info(f"Transformed {key} to FQDN with path: {gateway_fqdn}{preserved_path}") + logger.info( + f"Transformed {key} to FQDN with path: {gateway_fqdn}{preserved_path}") else: value = re.sub(r'(https?://[^:/]+):\d+', r'\1', value) environment[i] = f"{key}={value}" - # Phase 4.5: Inject MCP Gateway Environment Variables if is_mcp_gateway: - logger.info(f"Injecting required MCP gateway environment variables for {service_name}") - + logger.info( + f"Injecting required MCP gateway environment variables for {service_name}") + # Ensure environment list exists if environment is None: environment = [] - + # Get Azure subscription ID and tenant ID from CLI context from azure.cli.core._profile import Profile profile = Profile(cli_ctx=cmd.cli_ctx) subscription_id = profile.get_subscription_id() - + # Get tenant ID from subscription subscription_info = profile.get_subscription(subscription_id) tenant_id = subscription_info.get('tenantId', '') - + # Required environment variables for MCP gateway in ACA mode mcp_env_vars = { 'MCP_RUNTIME': 'ACA', @@ -1877,93 +2502,107 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 'AZURE_APP_NAME': service_name, 'AZURE_TENANT_ID': tenant_id } - + # Add environment variables (check for duplicates) for var_name, var_value in mcp_env_vars.items(): # Check if variable already exists var_exists = any( - env.get('name') == var_name for env in environment if isinstance(env, dict) - ) - + env.get('name') == var_name for env in environment if isinstance( + env, dict)) + if not var_exists: environment.append({ 'name': var_name, 'value': var_value }) - logger.info(f" Set {var_name}={var_value if var_name not in ['AZURE_SUBSCRIPTION_ID', 'AZURE_TENANT_ID'] else var_value[:8] + '...'}") + logger.info( + f" Set {var_name}={var_value if var_name not in ['AZURE_SUBSCRIPTION_ID', 'AZURE_TENANT_ID'] else var_value[:8] + '...'}") else: - logger.info(f" {var_name} already exists in environment, skipping") - + logger.info( + f" {var_name} already exists in environment, skipping") + # Override CPU and memory for MCP gateway - logger.info(f"Setting MCP gateway resources to 2.0 CPU / 4Gi memory") + logger.info( + "Setting MCP gateway resources to 2.0 CPU / 4Gi memory") cpu = 2.0 memory = "4Gi" - # Phase 3: Inject environment variables for services with models declarations # Services with "models:" section and endpoint_var/model_var get those specific variables injected - # Note: pycomposefile sets models=None if not present, so we just check if service.models is truthy + # Note: pycomposefile sets models=None if not present, so we just check + # if service.models is truthy service_models = getattr(service, 'models', None) if service_models: - logger.info(f"Service '{service_name}' has models section - processing endpoint_var and model_var") - + logger.info( + f"Service '{service_name}' has models section - processing endpoint_var and model_var") + # Ensure environment list exists if environment is None: environment = [] - + # Generate proper MODEL_RUNNER_URL with FQDN and /engines/v1/ suffix # Check if models app has allowInsecure set models_allow_insecure = False for model_name, model_spec in models.items(): - if isinstance(model_spec, dict) and 'x-azure-deployment' in model_spec: - ingress_config = model_spec['x-azure-deployment'].get('ingress', {}) - models_allow_insecure = ingress_config.get('allowInsecure', False) + if isinstance(model_spec, + dict) and 'x-azure-deployment' in model_spec: + ingress_config = model_spec['x-azure-deployment'].get( + 'ingress', {}) + models_allow_insecure = ingress_config.get( + 'allowInsecure', False) break - + from ._compose_utils import get_containerapp_fqdn - models_fqdn = get_containerapp_fqdn(cmd, resource_group_name, managed_env_name, 'models', is_external=False) - + models_fqdn = get_containerapp_fqdn( + cmd, resource_group_name, managed_env_name, 'models', is_external=False) + # Use http if allowInsecure, otherwise https models_protocol = 'http' if models_allow_insecure else 'https' - models_fqdn = models_fqdn.replace('https://', f'{models_protocol}://') + models_fqdn = models_fqdn.replace( + 'https://', f'{models_protocol}://') models_endpoint_with_path = f"{models_fqdn}/engines/v1/" - - logger.info(f"Generated MODEL_RUNNER_URL: {models_endpoint_with_path}") - + + logger.info( + f"Generated MODEL_RUNNER_URL: {models_endpoint_with_path}") + # Process each model reference for model_name, model_config in service_models.items(): if isinstance(model_config, dict): endpoint_var = model_config.get('endpoint_var') model_var = model_config.get('model_var') - + # Inject endpoint_var if specified if endpoint_var: - # Check if variable already exists (handle both string and dict formats) + # Check if variable already exists (handle both string + # and dict formats) var_exists = any( (isinstance(env, dict) and env.get('name') == endpoint_var) or (isinstance(env, str) and env.startswith(f"{endpoint_var}=")) for env in environment ) - + if not var_exists: environment.append({ 'name': endpoint_var, 'value': models_endpoint_with_path }) - logger.info(f" Injected {endpoint_var}={models_endpoint_with_path}") - + logger.info( + f" Injected {endpoint_var}={models_endpoint_with_path}") + # Inject model_var if specified if model_var: - # Try to get model path from top-level models section, fallback to model name - model_path = models.get(model_name, {}).get('model', model_name) if models else model_name - + # Try to get model path from top-level models section, + # fallback to model name + model_path = models.get(model_name, {}).get( + 'model', model_name) if models else model_name + # Check if variable already exists var_exists = any( (isinstance(env, dict) and env.get('name') == model_var) or (isinstance(env, str) and env.startswith(f"{model_var}=")) for env in environment ) - + if not var_exists: environment.append({ 'name': model_var, @@ -1971,12 +2610,13 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 }) logger.info(f" Injected {model_var}={model_path}") - # Phase 4: No automatic environment variable injection for mcp-gateway dependencies - # Services should explicitly define any needed environment variables in their compose file + # Services should explicitly define any needed environment variables in + # their compose file if service.build is not None: logger.warning("Build configuration defined for this service.") - logger.warning("The build will be performed by Azure Container Registry.") + logger.warning( + "The build will be performed by Azure Container Registry.") context = service.build.context dockerfile = "Dockerfile" if service.build.dockerfile is not None: @@ -1997,18 +2637,21 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 registry_password, environment) - # Phase 7: Check if container app exists and detect changes (skip in dry-run) + # Phase 7: Check if container app exists and detect changes (skip in + # dry-run) if not dry_run: # Determine final ingress type using x-azure-deployment overrides # Get raw service YAML for override parsing - raw_service_yaml = compose_yaml_original.get('services', {}).get(service_name, {}) + raw_service_yaml = compose_yaml_original.get( + 'services', {}).get(service_name, {}) overrides = parse_x_azure_deployment(raw_service_yaml) # Check if this service has models section has_models = service_name == 'models' # Priority: x-azure-deployment override > default based on service type > default based on ports - # NOTE: We ignore ingress_type from resolve_ingress_and_target_port because it always returns 'external' + # NOTE: We ignore ingress_type from resolve_ingress_and_target_port + # because it always returns 'external' if overrides.get('ingress_type') is not None: final_ingress_type = overrides['ingress_type'] elif is_mcp_gateway or has_models: @@ -2018,7 +2661,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 final_ingress_type = 'internal' # Default to internal if ports exist else: final_ingress_type = None - + # Apply image override if specified in x-azure-deployment final_image = overrides.get('image') or image @@ -2027,10 +2670,11 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Check if container app already exists try: - existing_app = show_containerapp(cmd=cmd, resource_group_name=resource_group_name, name=service_name) + existing_app = show_containerapp( + cmd=cmd, resource_group_name=resource_group_name, name=service_name) except Exception: existing_app = None - + if existing_app: # Build new configuration for comparison new_config = { @@ -2041,15 +2685,17 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 'max_replicas': replicas, 'env_vars': environment } - + # Detect changes - changes = detect_configuration_changes(existing_app, new_config) + changes = detect_configuration_changes( + existing_app, new_config) log_update_detection(service_name, changes, logger) - + # If changes detected, update instead of create if changes['has_changes']: - logger.info(f"Updating existing container app: {service_name}") - updated_app = update_containerapp_from_compose( + logger.info( + f"Updating existing container app: {service_name}") + updated_app = build_containerapp_from_compose_service( cmd=cmd, resource_group_name=resource_group_name, app_name=service_name, @@ -2059,33 +2705,36 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 memory=memory if changes['resources_changed'] else None, min_replicas=replicas if changes['replicas_changed'] else None, max_replicas=replicas if changes['replicas_changed'] else None, - logger=logger - ) + logger=logger) containerapps_from_compose.append(updated_app) continue # Skip creation, move to next service - - # Convert environment variables from dict format to CLI string format (KEY=VALUE) + + # Convert environment variables from dict format to CLI string + # format (KEY=VALUE) env_vars_cli_format = None if environment: env_vars_cli_format = [] for env in environment: - if isinstance(env, dict) and "name" in env and "value" in env: - env_vars_cli_format.append(f"{env['name']}={env['value']}") + if isinstance( + env, dict) and "name" in env and "value" in env: + env_vars_cli_format.append( + f"{env['name']}={env['value']}") elif isinstance(env, str): env_vars_cli_format.append(env) - + # Determine final replica count with appropriate defaults # - MCP gateway and models: min 1 replica (always) # - Other services: default to 1 if not specified final_min_replicas = replicas if replicas is not None else 1 final_max_replicas = replicas if replicas is not None else 1 - + # Ensure MCP gateway always has at least 1 replica if is_mcp_gateway and final_min_replicas < 1: - logger.info(f"MCP gateway {service_name}: forcing min replicas to 1") + logger.info( + f"MCP gateway {service_name}: forcing min replicas to 1") final_min_replicas = 1 final_max_replicas = max(1, final_max_replicas) - + containerapps_from_compose.append( create_containerapp(cmd, service_name, @@ -2110,15 +2759,17 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 max_replicas=final_max_replicas, ) ) - # Phase 5: Dry-run mode - collect service config instead of deploying if dry_run: - # Get raw service YAML for x-azure-deployment parsing (use original, unmodified YAML) - raw_service_yaml = compose_yaml_original.get('services', {}).get(service_name, {}) - + # Get raw service YAML for x-azure-deployment parsing (use + # original, unmodified YAML) + raw_service_yaml = compose_yaml_original.get( + 'services', {}).get(service_name, {}) + # In dry-run mode, don't pass ingress_type from resolve_ingress_and_target_port # because it always returns 'external', which interferes with x-azure-deployment overrides - # Instead, pass None and let collect_dry_run_service_config determine ingress from overrides or ports + # Instead, pass None and let collect_dry_run_service_config + # determine ingress from overrides or ports service_config = collect_dry_run_service_config( service_name=service_name, service=service, @@ -2131,57 +2782,70 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 replicas=replicas, is_models_service=(service_name == 'models'), is_mcp_gateway=is_mcp_gateway, - gpu_type=raw_service_yaml.get('x-azure-deployment', {}).get('workloadProfiles', {}).get('workloadProfileType') if service_name == 'models' else None, + gpu_type=raw_service_yaml.get( + 'x-azure-deployment', + {}).get( + 'workloadProfiles', + {}).get('workloadProfileType') if service_name == 'models' else None, raw_service=raw_service_yaml, models_config=models ) dry_run_services.append(service_config) - + if is_mcp_gateway: dry_run_has_gateway = True - # Phase 4: MCP Gateway Post-Creation Setup if is_mcp_gateway and not dry_run: logger.info(f"Configuring MCP gateway: {service_name}") - + # Enable system-assigned managed identity try: - identity = enable_managed_identity(cmd, resource_group_name, service_name) + identity = enable_managed_identity( + cmd, resource_group_name, service_name) principal_id = identity.get('principalId') - + if principal_id: # Attempt role assignment (graceful fallback if it fails) - attempt_role_assignment(cmd, principal_id, resource_group_name, service_name) + attempt_role_assignment( + cmd, principal_id, resource_group_name, service_name) else: - logger.warning(f"Principal ID not available yet for '{service_name}' - skipping role assignment") - + logger.warning( + f"Principal ID not available yet for '{service_name}' - skipping role assignment") + # Inject MCP gateway environment variables from azure.cli.core.commands.client_factory import get_subscription_id subscription_id = get_subscription_id(cmd.cli_ctx) - + # Note: Environment variables are injected during initial creation # For updates, would need to retrieve and update the app - logger.info(f"MCP gateway '{service_name}' configured successfully") - + logger.info( + f"MCP gateway '{service_name}' configured successfully") + except Exception as e: - logger.error(f"Failed to configure MCP gateway '{service_name}': {str(e)}") - # Don't fail deployment - gateway is created, just not fully configured - logger.warning("MCP gateway created but requires manual configuration") - + logger.error( + f"Failed to configure MCP gateway '{service_name}': {str(e)}") + # Don't fail deployment - gateway is created, just not fully + # configured + logger.warning( + "MCP gateway created but requires manual configuration") + # Check if this service depends on MCP gateway service_depends_on_gateway = False if hasattr(service, 'depends_on') and service.depends_on: if isinstance(service.depends_on, list): - service_depends_on_gateway = any('mcp-gateway' in str(dep).lower() for dep in service.depends_on) + service_depends_on_gateway = any( + 'mcp-gateway' in str(dep).lower() for dep in service.depends_on) elif isinstance(service.depends_on, dict): - service_depends_on_gateway = any('mcp-gateway' in str(dep).lower() for dep in service.depends_on.keys()) - + service_depends_on_gateway = any( + 'mcp-gateway' in str(dep).lower() for dep in service.depends_on.keys()) + # Inject MCP_GATEWAY_URL if service depends on gateway if service_depends_on_gateway: # MCP gateway URL format: http://mcp-gateway:8811 - gateway_url = "http://mcp-gateway:8811" - logger.info(f"Service '{service_name}' depends on MCP gateway - will inject MCP_GATEWAY_URL") + _ = "http://mcp-gateway:8811" # gateway_url + logger.info( + f"Service '{service_name}' depends on MCP gateway - will inject MCP_GATEWAY_URL") # Note: Environment was already set during creation above # If needed to update, would retrieve the created app and update it @@ -2189,14 +2853,16 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 if dry_run: # Print each service plan for service_config in dry_run_services: - print_dry_run_service_plan(service_config['service_name'], service_config) + print_dry_run_service_plan( + service_config['service_name'], service_config) # Print models deployment if present if dry_run_has_models and models: # Extract GPU profile from x-azure-deployment x_azure = models.get("x-azure-deployment", {}) workload_profiles = x_azure.get("workloadProfiles", {}) - profile_type = workload_profiles.get("workloadProfileType", "Consumption") + profile_type = workload_profiles.get( + "workloadProfileType", "Consumption") gpu_profile_info = { "type": profile_type, "name": profile_type @@ -2214,12 +2880,31 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 return None # Dry-run complete, no resources created -def set_workload_profile(cmd, resource_group_name, env_name, workload_profile_name, workload_profile_type=None, min_nodes=None, max_nodes=None): - return update_managed_environment(cmd, env_name, resource_group_name, workload_profile_type=workload_profile_type, workload_profile_name=workload_profile_name, min_nodes=min_nodes, max_nodes=max_nodes) - - -def patch_list(cmd, resource_group_name=None, managed_env=None, show_all=False): - # Ensure that Docker is running locally before attempting to use the pack CLI +def set_workload_profile( + cmd, + resource_group_name, + env_name, + workload_profile_name, + workload_profile_type=None, + min_nodes=None, + max_nodes=None): + return update_managed_environment( + cmd, + env_name, + resource_group_name, + workload_profile_type=workload_profile_type, + workload_profile_name=workload_profile_name, + min_nodes=min_nodes, + max_nodes=max_nodes) + + +def patch_list( + cmd, + resource_group_name=None, + managed_env=None, + show_all=False): + # Ensure that Docker is running locally before attempting to use the pack + # CLI if is_docker_running() is False: logger.error("Please install or start Docker and try again.") return @@ -2229,18 +2914,21 @@ def patch_list(cmd, resource_group_name=None, managed_env=None, show_all=False): if pack_exec_path is None: return - # List all Container Apps in the given resource group and managed environment + # List all Container Apps in the given resource group and managed + # environment logger.warning("Listing container apps...") ca_list = list_containerapp(cmd, resource_group_name, managed_env) - # Fetch all images currently deployed to containers for the listed Container Apps + # Fetch all images currently deployed to containers for the listed + # Container Apps imgs = [] if ca_list: for ca in ca_list: id_parts = parse_resource_id(ca["id"]) resource_group_name = id_parts.get('resource_group') container_app_name = id_parts.get('name') - managed_env_id_parts = parse_resource_id(ca["properties"]["environmentId"]) + managed_env_id_parts = parse_resource_id( + ca["properties"]["environmentId"]) managed_env_name = managed_env_id_parts.get('name') containers = safe_get(ca, "properties", "template", "containers") for container in containers: @@ -2252,25 +2940,32 @@ def patch_list(cmd, resource_group_name=None, managed_env=None, show_all=False): targetResourceGroup=resource_group_name) imgs.append(result) - # Iterate over each image and execute the `pack inspect` command to fetch the run image used (if previously built via buildpacks) + # Iterate over each image and execute the `pack inspect` command to fetch + # the run image used (if previously built via buildpacks) results = [] inspect_results = [] logger.warning("Inspecting container apps images...") with ThreadPoolExecutor(max_workers=10) as executor: - [executor.submit(patch_get_image_inspection, pack_exec_path, img, inspect_results) for img in imgs] + [executor.submit(patch_get_image_inspection, + pack_exec_path, img, inspect_results) for img in imgs] - # Fetch the list of Oryx-based run images that could be used to patch previously built images + # Fetch the list of Oryx-based run images that could be used to patch + # previously built images oryx_run_images = get_oryx_run_image_tags() # Start checking if the images are based on an Oryx image results = [] logger.warning("Checking for patches...") for inspect_result in inspect_results: - results.append(_get_patchable_check_result(inspect_result, oryx_run_images)) + results.append( + _get_patchable_check_result( + inspect_result, + oryx_run_images)) if show_all is False: results = [result for result in results if result["id"] is not None] if not results: - logger.warning("No container apps available to patch at this time. Use --show-all to show the container apps that cannot be patched.") + logger.warning( + "No container apps available to patch at this time. Use --show-all to show the container apps that cannot be patched.") return results @@ -2310,13 +3005,20 @@ def _get_patchable_check_result(inspect_result, oryx_run_images): return result # Define the MCR repositories that are supported for patching - mcr_repos = ["oryx/dotnetcore", "oryx/node", "oryx/python", "azure-buildpacks/java"] + mcr_repos = [ + "oryx/dotnetcore", + "oryx/node", + "oryx/python", + "azure-buildpacks/java"] # Iterate over each base run image found to see if a patch can be applied for run_images_prop in run_images_props: base_run_image_name = run_images_prop["name"] if any(base_run_image_name.find(repo) != -1 for repo in mcr_repos): - return patchable_check(base_run_image_name, oryx_run_images, inspect_result=inspect_result) + return patchable_check( + base_run_image_name, + oryx_run_images, + inspect_result=inspect_result) # Not based on a supported MCR repository result.update( @@ -2326,10 +3028,12 @@ def _get_patchable_check_result(inspect_result, oryx_run_images): def patch_get_image_inspection(pack_exec_path, img, info_list): - # Execute the 'pack inspect' command on an image and return the result (with additional Container App metadata) + # Execute the 'pack inspect' command on an image and return the result + # (with additional Container App metadata) with subprocess.Popen(pack_exec_path + " inspect-image " + img["imageName"] + " --output json", shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as img_info: img_info_out, img_info_err = img_info.communicate() - if img_info_err.find(b"status code 401 Unauthorized") != -1 or img_info_err.find(b"unable to find image") != -1: + if img_info_err.find(b"status code 401 Unauthorized") != - \ + 1 or img_info_err.find(b"unable to find image") != -1: inspect_result = dict(remote_info=401, image_name=img["imageName"]) else: inspect_result = json.loads(img_info_out) @@ -2342,42 +3046,63 @@ def patch_get_image_inspection(pack_exec_path, img, info_list): info_list.append(inspect_result) -def patch_interactive(cmd, resource_group_name=None, managed_env=None, show_all=False): +def patch_interactive( + cmd, + resource_group_name=None, + managed_env=None, + show_all=False): if is_docker_running() is False: logger.error("Please install or start Docker and try again.") return - patchable_check_results = patch_list(cmd, resource_group_name, managed_env, show_all=show_all) + patchable_check_results = patch_list( + cmd, resource_group_name, managed_env, show_all=show_all) pack_exec_path = get_pack_exec_path() if pack_exec_path is None: return if patchable_check_results is None: return - patchable_check_results_json = json.dumps(patchable_check_results, indent=2) + patchable_check_results_json = json.dumps( + patchable_check_results, indent=2) without_unpatchable_results = [] - without_unpatchable_results = [result for result in patchable_check_results if result["id"] is not None] - if without_unpatchable_results == [] and (patchable_check_results is None or show_all is False): + without_unpatchable_results = [ + result for result in patchable_check_results if result["id"] is not None] + if without_unpatchable_results == [] and ( + patchable_check_results is None or show_all is False): return logger.warning(patchable_check_results_json) if without_unpatchable_results == []: return - user_input = input("Do you want to apply all the patches or specify by id? (y/n/id)\n") - patch_apply_handle_input(cmd, patchable_check_results, user_input, pack_exec_path) - - -def patch_apply(cmd, resource_group_name=None, managed_env=None, show_all=False): + user_input = input( + "Do you want to apply all the patches or specify by id? (y/n/id)\n") + patch_apply_handle_input( + cmd, + patchable_check_results, + user_input, + pack_exec_path) + + +def patch_apply( + cmd, + resource_group_name=None, + managed_env=None, + show_all=False): if is_docker_running() is False: logger.error("Please install or start Docker and try again.") return - patchable_check_results = patch_list(cmd, resource_group_name, managed_env, show_all=show_all) + patchable_check_results = patch_list( + cmd, resource_group_name, managed_env, show_all=show_all) pack_exec_path = get_pack_exec_path() if pack_exec_path is None: return if patchable_check_results is None: return - patchable_check_results_json = json.dumps(patchable_check_results, indent=2) + patchable_check_results_json = json.dumps( + patchable_check_results, indent=2) without_unpatchable_results = [] - without_unpatchable_results = [result for result in patchable_check_results if result["id"] is not None] - if without_unpatchable_results == [] and (patchable_check_results is None or show_all is False): + without_unpatchable_results = [ + result for result in patchable_check_results if result["id"] is not None] + if without_unpatchable_results == [] and ( + patchable_check_results is None or show_all is False): return logger.warning(patchable_check_results_json) if without_unpatchable_results == []: @@ -2431,31 +3156,64 @@ def patch_apply_handle_input(cmd, patch_check_list, method, pack_exec_path): return -def patch_cli_call(cmd, resource_group, container_app_name, container_name, target_image_name, new_run_image, pack_exec_path): +def patch_cli_call( + cmd, + resource_group, + container_app_name, + container_name, + target_image_name, + new_run_image, + pack_exec_path): try: - logger.warning("Applying patch for container app: " + container_app_name + " container: " + container_name) - subprocess.run(f"{pack_exec_path} rebase -q {target_image_name} --run-image {new_run_image} --force", shell=True, check=True) - new_target_image_name = target_image_name.split(":")[0] + ":" + new_run_image.split(":")[1] - subprocess.run(f"docker tag {target_image_name} {new_target_image_name}", shell=True, check=True) + logger.warning( + "Applying patch for container app: " + + container_app_name + + " container: " + + container_name) + subprocess.run( + f"{pack_exec_path} rebase -q {target_image_name} --run-image {new_run_image} --force", + shell=True, + check=True) + new_target_image_name = target_image_name.split( + ":")[0] + ":" + new_run_image.split(":")[1] + subprocess.run( + f"docker tag {target_image_name} {new_target_image_name}", + shell=True, + check=True) logger.debug(f"Publishing {new_target_image_name} to registry...") - subprocess.run(f"docker push -q {new_target_image_name}", shell=True, check=True) - logger.warning("Patch applied and published successfully.\nNew image: " + new_target_image_name) + subprocess.run( + f"docker push -q {new_target_image_name}", + shell=True, + check=True) + logger.warning( + "Patch applied and published successfully.\nNew image: " + + new_target_image_name) except Exception: - logger.error("Error: Failed to apply patch and publish. Check if registry is logged in and has write access.") + logger.error( + "Error: Failed to apply patch and publish. Check if registry is logged in and has write access.") raise try: - logger.warning("Patching container app: " + container_app_name + " container: " + container_name) - logger.info("Creating new revision with image: " + new_target_image_name) - update_info_json = update_containerapp(cmd, - name=container_app_name, - resource_group_name=resource_group, - container_name=container_name, - image=new_target_image_name) + logger.warning( + "Patching container app: " + + container_app_name + + " container: " + + container_name) + logger.info( + "Creating new revision with image: " + + new_target_image_name) + update_info_json = update_containerapp( + cmd, + name=container_app_name, + resource_group_name=resource_group, + container_name=container_name, + image=new_target_image_name) logger.warning(json.dumps(update_info_json, indent=2)) - logger.warning("Container app revision created successfully from the patched image.") + logger.warning( + "Container app revision created successfully from the patched image.") return except Exception: - logger.error("Error: Failed to create new revision with the container app.") + logger.error( + "Error: Failed to create new revision with the container app.") raise @@ -2472,7 +3230,10 @@ def show_connected_environment(cmd, name, resource_group_name): return connected_env_decorator.show() -def list_connected_environments(cmd, resource_group_name=None, custom_location=None): +def list_connected_environments( + cmd, + resource_group_name=None, + custom_location=None): raw_parameters = locals() connected_env_decorator = ConnectedEnvironmentDecorator( cmd=cmd, @@ -2485,7 +3246,11 @@ def list_connected_environments(cmd, resource_group_name=None, custom_location=N return connected_env_decorator.list() -def delete_connected_environment(cmd, name, resource_group_name, no_wait=False): +def delete_connected_environment( + cmd, + name, + resource_group_name, + no_wait=False): raw_parameters = locals() connected_env_decorator = ConnectedEnvironmentDecorator( cmd=cmd, @@ -2523,46 +3288,78 @@ def create_connected_environment(cmd, return r -def connected_env_list_dapr_components(cmd, resource_group_name, environment_name): +def connected_env_list_dapr_components( + cmd, resource_group_name, environment_name): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) - return ConnectedEnvDaprComponentClient.list(cmd, resource_group_name, environment_name) + return ConnectedEnvDaprComponentClient.list( + cmd, resource_group_name, environment_name) -def connected_env_show_dapr_component(cmd, resource_group_name, dapr_component_name, environment_name): +def connected_env_show_dapr_component( + cmd, + resource_group_name, + dapr_component_name, + environment_name): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) - return ConnectedEnvDaprComponentClient.show(cmd, resource_group_name, environment_name, name=dapr_component_name) + return ConnectedEnvDaprComponentClient.show( + cmd, resource_group_name, environment_name, name=dapr_component_name) -def connected_env_remove_dapr_component(cmd, resource_group_name, dapr_component_name, environment_name, no_wait=False): +def connected_env_remove_dapr_component( + cmd, + resource_group_name, + dapr_component_name, + environment_name, + no_wait=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) try: - r = ConnectedEnvDaprComponentClient.delete(cmd, resource_group_name, environment_name, name=dapr_component_name, no_wait=no_wait) + r = ConnectedEnvDaprComponentClient.delete( + cmd, + resource_group_name, + environment_name, + name=dapr_component_name, + no_wait=no_wait) logger.warning("Dapr componenet successfully deleted.") return r except Exception as e: handle_raw_exception(e) -def connected_env_create_or_update_dapr_component(cmd, resource_group_name, environment_name, dapr_component_name, yaml, no_wait=False): +def connected_env_create_or_update_dapr_component( + cmd, + resource_group_name, + environment_name, + dapr_component_name, + yaml, + no_wait=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) yaml_dapr_component = load_yaml_file(yaml) - if not isinstance(yaml_dapr_component, dict): # pylint: disable=unidiomatic-typecheck - raise ValidationError('Invalid YAML provided. Please see https://learn.microsoft.com/en-us/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml#component-schema for a valid Dapr Component YAML spec.') + if not isinstance( + yaml_dapr_component, + dict): # pylint: disable=unidiomatic-typecheck + raise ValidationError( + 'Invalid YAML provided. Please see https://learn.microsoft.com/en-us/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml#component-schema for a valid Dapr Component YAML spec.') - # Deserialize the yaml into a DaprComponent object. Need this since we're not using SDK + # Deserialize the yaml into a DaprComponent object. Need this since we're + # not using SDK try: deserializer = create_deserializer(CONTAINER_APPS_SDK_MODELS) - daprcomponent_def = deserializer('ConnectedEnvironmentDaprComponent', yaml_dapr_component) + daprcomponent_def = deserializer( + 'ConnectedEnvironmentDaprComponent', + yaml_dapr_component) except DeserializationError as ex: - raise ValidationError('Invalid YAML provided. Please see https://learn.microsoft.com/en-us/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml#component-schema for a valid Dapr Component YAML spec.') from ex + raise ValidationError( + 'Invalid YAML provided. Please see https://learn.microsoft.com/en-us/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml#component-schema for a valid Dapr Component YAML spec.') from ex - daprcomponent_def = _convert_object_from_snake_to_camel_case(_object_to_dict(daprcomponent_def)) + daprcomponent_def = _convert_object_from_snake_to_camel_case( + _object_to_dict(daprcomponent_def)) - # Remove "additionalProperties" and read-only attributes that are introduced in the deserialization. Need this since we're not using SDK + # Remove "additionalProperties" and read-only attributes that are + # introduced in the deserialization. Need this since we're not using SDK _remove_additional_attributes(daprcomponent_def) _remove_dapr_readonly_attributes(daprcomponent_def) @@ -2572,17 +3369,25 @@ def connected_env_create_or_update_dapr_component(cmd, resource_group_name, envi dapr_component_envelope = {"properties": daprcomponent_def} try: - r = ConnectedEnvDaprComponentClient.create_or_update(cmd, resource_group_name=resource_group_name, - environment_name=environment_name, - dapr_component_envelope=dapr_component_envelope, - name=dapr_component_name, - no_wait=no_wait) + r = ConnectedEnvDaprComponentClient.create_or_update( + cmd, + resource_group_name=resource_group_name, + environment_name=environment_name, + dapr_component_envelope=dapr_component_envelope, + name=dapr_component_name, + no_wait=no_wait) return r except Exception as e: handle_raw_exception(e) -def connected_env_list_certificates(cmd, name, resource_group_name, location=None, certificate=None, thumbprint=None): +def connected_env_list_certificates( + cmd, + name, + resource_group_name, + location=None, + certificate=None, + thumbprint=None): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) def location_match(c): @@ -2600,34 +3405,49 @@ def both_match(c): else: certificate_name = certificate try: - r = ConnectedEnvCertificateClient.show_certificate(cmd, resource_group_name, name, certificate_name) + r = ConnectedEnvCertificateClient.show_certificate( + cmd, resource_group_name, name, certificate_name) return [r] if both_match(r) else [] except Exception as e: handle_raw_exception(e) else: try: - r = ConnectedEnvCertificateClient.list_certificates(cmd, resource_group_name, name) + r = ConnectedEnvCertificateClient.list_certificates( + cmd, resource_group_name, name) return list(filter(both_match, r)) except Exception as e: handle_raw_exception(e) -def connected_env_upload_certificate(cmd, name, resource_group_name, certificate_file, certificate_name=None, certificate_password=None, location=None, prompt=False, no_wait=False): +def connected_env_upload_certificate( + cmd, + name, + resource_group_name, + certificate_file, + certificate_name=None, + certificate_password=None, + location=None, + prompt=False, + no_wait=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) blob, thumbprint = load_cert_file(certificate_file, certificate_password) cert_name = None if certificate_name: - name_availability = connected_env_check_cert_name_availability(cmd, resource_group_name, name, certificate_name) + name_availability = connected_env_check_cert_name_availability( + cmd, resource_group_name, name, certificate_name) if not name_availability["nameAvailable"]: if name_availability["reason"] == NAME_ALREADY_EXISTS: msg = '{}. If continue with this name, it will be overwritten by the new certificate file.\nOverwrite?' overwrite = True if prompt: - overwrite = prompt_y_n(msg.format(name_availability["message"])) + overwrite = prompt_y_n(msg.format( + name_availability["message"])) else: - logger.warning('{}. It will be overwritten by the new certificate file.'.format(name_availability["message"])) + logger.warning( + '{}. It will be overwritten by the new certificate file.'.format( + name_availability["message"])) if overwrite: cert_name = certificate_name else: @@ -2636,8 +3456,10 @@ def connected_env_upload_certificate(cmd, name, resource_group_name, certificate cert_name = certificate_name while not cert_name: - random_name = generate_randomized_cert_name(thumbprint, name, resource_group_name) - check_result = connected_env_check_cert_name_availability(cmd, resource_group_name, name, random_name) + random_name = generate_randomized_cert_name( + thumbprint, name, resource_group_name) + check_result = connected_env_check_cert_name_availability( + cmd, resource_group_name, name, random_name) if check_result["nameAvailable"]: cert_name = random_name elif not check_result["nameAvailable"] and (check_result["reason"] == NAME_INVALID): @@ -2649,28 +3471,44 @@ def connected_env_upload_certificate(cmd, name, resource_group_name, certificate certificate["location"] = location if not certificate["location"]: try: - env = ConnectedEnvironmentClient.show(cmd, resource_group_name, name) + env = ConnectedEnvironmentClient.show( + cmd, resource_group_name, name) certificate["location"] = env["location"] except Exception as e: handle_raw_exception(e) try: - r = ConnectedEnvCertificateClient.create_or_update_certificate(cmd, resource_group_name, name, cert_name, certificate, no_wait) + r = ConnectedEnvCertificateClient.create_or_update_certificate( + cmd, resource_group_name, name, cert_name, certificate, no_wait) return r except Exception as e: handle_raw_exception(e) -def connected_env_delete_certificate(cmd, resource_group_name, name, certificate=None, thumbprint=None): +def connected_env_delete_certificate( + cmd, + resource_group_name, + name, + certificate=None, + thumbprint=None): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) if not certificate and not thumbprint: - raise RequiredArgumentMissingError('Please specify at least one of parameters: --certificate and --thumbprint') - certs = connected_env_list_certificates(cmd, name, resource_group_name, certificate=certificate, thumbprint=thumbprint) + raise RequiredArgumentMissingError( + 'Please specify at least one of parameters: --certificate and --thumbprint') + certs = connected_env_list_certificates( + cmd, + name, + resource_group_name, + certificate=certificate, + thumbprint=thumbprint) for cert in certs: try: - ConnectedEnvCertificateClient.delete_certificate(cmd, resource_group_name, name, cert["name"]) - logger.warning('Successfully deleted certificate: {}'.format(cert["name"])) + ConnectedEnvCertificateClient.delete_certificate( + cmd, resource_group_name, name, cert["name"]) + logger.warning( + 'Successfully deleted certificate: {}'.format( + cert["name"])) except Exception as e: handle_raw_exception(e) @@ -2679,7 +3517,8 @@ def connected_env_show_storage(cmd, name, storage_name, resource_group_name): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) try: - return ConnectedEnvStorageClient.show(cmd, resource_group_name, name, storage_name) + return ConnectedEnvStorageClient.show( + cmd, resource_group_name, name, storage_name) except CLIError as e: handle_raw_exception(e) @@ -2699,12 +3538,14 @@ def connected_env_create_or_update_storage(cmd, storage_name, resource_group_nam r = None try: - r = ConnectedEnvStorageClient.show(cmd, resource_group_name, name, storage_name) - except: + r = ConnectedEnvStorageClient.show( + cmd, resource_group_name, name, storage_name) + except BaseException: pass if r: - logger.warning("Only AzureFile account keys can be updated. In order to change the AzureFile share name or account name, please delete this storage and create a new one.") + logger.warning( + "Only AzureFile account keys can be updated. In order to change the AzureFile share name or account name, please delete this storage and create a new one.") storage_def = AzureFilePropertiesModel storage_def["accountKey"] = azure_file_account_key @@ -2716,56 +3557,77 @@ def connected_env_create_or_update_storage(cmd, storage_name, resource_group_nam storage_envelope["properties"]["azureFile"] = storage_def try: - return ConnectedEnvStorageClient.create_or_update(cmd, resource_group_name, name, storage_name, storage_envelope, no_wait) + return ConnectedEnvStorageClient.create_or_update( + cmd, resource_group_name, name, storage_name, storage_envelope, no_wait) except CLIError as e: handle_raw_exception(e) -def connected_env_remove_storage(cmd, storage_name, name, resource_group_name, no_wait=False): +def connected_env_remove_storage( + cmd, + storage_name, + name, + resource_group_name, + no_wait=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) try: - return ConnectedEnvStorageClient.delete(cmd, resource_group_name, name, storage_name, no_wait) + return ConnectedEnvStorageClient.delete( + cmd, resource_group_name, name, storage_name, no_wait) except CLIError as e: handle_raw_exception(e) -def setup_core_dns(cmd, distro=None, kube_config=None, kube_context=None, skip_ssl_verification=False): +def setup_core_dns( + cmd, + distro=None, + kube_config=None, + kube_context=None, + skip_ssl_verification=False): # Checking the connection to kubernetes cluster. check_kube_connection(kube_config, kube_context, skip_ssl_verification) - # create a local path to store the original and the changed deployment and core dns configmap. + # create a local path to store the original and the changed deployment and + # core dns configmap. time_stamp = time.strftime("%Y-%m-%d-%H.%M.%S") - parent_folder, folder_status, error = create_folder("setup-core-dns", time_stamp) + parent_folder, folder_status, error = create_folder( + "setup-core-dns", time_stamp) if not folder_status: raise ValidationError(error) - original_folder, folder_status, error = create_sub_folder(parent_folder, "original") + original_folder, folder_status, error = create_sub_folder( + parent_folder, "original") if not folder_status: raise ValidationError(error) - new_filepath_with_timestamp, folder_status, error = create_sub_folder(parent_folder, "new") + new_filepath_with_timestamp, folder_status, error = create_sub_folder( + parent_folder, "new") if not folder_status: raise ValidationError(error) - kube_client = create_kube_client(kube_config, kube_context, skip_ssl_verification) + kube_client = create_kube_client( + kube_config, kube_context, skip_ssl_verification) if distro == AKS_AZURE_LOCAL_DISTRO: # backup original deployment and configmap logger.info("Backup existing coredns deployment and configmap") - original_coredns_deployment = get_core_dns_deployment(kube_client, original_folder) + original_coredns_deployment = get_core_dns_deployment( + kube_client, original_folder) coredns_deployment = copy.deepcopy(original_coredns_deployment) - original_coredns_configmap = get_core_dns_configmap(kube_client, original_folder) + original_coredns_configmap = get_core_dns_configmap( + kube_client, original_folder) coredns_configmap = copy.deepcopy(original_coredns_configmap) volumes = coredns_deployment.spec.template.spec.volumes if volumes is None: - raise ValidationError('Unexpected Volumes in coredns deployment, Volumes not found') + raise ValidationError( + 'Unexpected Volumes in coredns deployment, Volumes not found') volume_mounts = coredns_deployment.spec.template.spec.containers[0].volume_mounts if volume_mounts is None: - raise ValidationError('Unexpected Volume mounts in coredns deployment, VolumeMounts not found') + raise ValidationError( + 'Unexpected Volume mounts in coredns deployment, VolumeMounts not found') coredns_configmap_volume_set = False custom_coredns_configmap_volume_set = False @@ -2786,58 +3648,82 @@ def setup_core_dns(cmd, distro=None, kube_config=None, kube_context=None, skip_s break if not coredns_configmap_volume_set: - raise ValidationError("Cannot find volume and volume mounts for core dns config map") + raise ValidationError( + "Cannot find volume and volume mounts for core dns config map") - original_custom_core_dns_configmap = backup_custom_core_dns_configmap(kube_client, original_folder) + original_custom_core_dns_configmap = backup_custom_core_dns_configmap( + kube_client, original_folder) try: - patch_coredns(kube_client, coredns_configmap, coredns_deployment, new_filepath_with_timestamp, - original_custom_core_dns_configmap is not None, not custom_coredns_configmap_volume_set, not custom_coredns_configmap_volume_mounted) + patch_coredns( + kube_client, + coredns_configmap, + coredns_deployment, + new_filepath_with_timestamp, + original_custom_core_dns_configmap is not None, + not custom_coredns_configmap_volume_set, + not custom_coredns_configmap_volume_mounted) except Exception as e: logger.error(f"Failed to setup custom coredns. {e}") logger.info("Start to reverted coredns") replace_succeeded = False retry_count = 0 while not replace_succeeded and retry_count < 10: - logger.info(f"Retry the revert operation with retry count {retry_count}") + logger.info( + f"Retry the revert operation with retry count {retry_count}") try: logger.info("Start to reverted coredns configmap") - latest_core_dns_configmap = get_core_dns_configmap(kube_client) + latest_core_dns_configmap = get_core_dns_configmap( + kube_client) latest_core_dns_configmap.data = original_coredns_configmap.data - replace_configmap(CORE_DNS, KUBE_SYSTEM, kube_client, latest_core_dns_configmap) + replace_configmap( + CORE_DNS, + KUBE_SYSTEM, + kube_client, + latest_core_dns_configmap) logger.info("Reverted coredns configmap successfully") logger.info("Start to reverted coredns deployment") - latest_core_dns_deployment = get_core_dns_deployment(kube_client) + latest_core_dns_deployment = get_core_dns_deployment( + kube_client) latest_core_dns_deployment.spec.template.spec = original_coredns_deployment.spec.template.spec - replace_deployment(CORE_DNS, KUBE_SYSTEM, kube_client, latest_core_dns_deployment) + replace_deployment( + CORE_DNS, + KUBE_SYSTEM, + kube_client, + latest_core_dns_deployment) logger.info("Reverted coredns deployment successfully") if original_custom_core_dns_configmap is None: - delete_configmap(CUSTOM_CORE_DNS, KUBE_SYSTEM, kube_client) + delete_configmap( + CUSTOM_CORE_DNS, KUBE_SYSTEM, kube_client) replace_succeeded = True except Exception as revertEx: - logger.warning(f"Failed to revert coredns configmap or deployment {revertEx}") + logger.warning( + f"Failed to revert coredns configmap or deployment {revertEx}") retry_count = retry_count + 1 time.sleep(2) if not replace_succeeded: - logger.error(f"Failed to revert the deployment and configuration. " - f"You can get the original coredns config and deployment from {original_folder}") + logger.error( + f"Failed to revert the deployment and configuration. " + f"You can get the original coredns config and deployment from {original_folder}") elif distro == OPENSHIFT_DISTRO: logger.info("Setting up CoreDNS for OpenShift") try: from ._arc_utils import create_openshift_custom_coredns_resources, patch_openshift_dns_operator - create_openshift_custom_coredns_resources(kube_client, OPENSHIFT_DNS) + create_openshift_custom_coredns_resources( + kube_client, OPENSHIFT_DNS) domain = extract_domain_from_configmap(kube_client) - # Patch the OpenShift DNS operator to use the custom CoreDNS service + # Patch the OpenShift DNS operator to use the custom CoreDNS + # service patch_openshift_dns_operator(kube_client, domain, original_folder) restart_openshift_dns_daemonset(kube_client) @@ -2845,12 +3731,19 @@ def setup_core_dns(cmd, distro=None, kube_config=None, kube_context=None, skip_s logger.info("Successfully set up CoreDNS for OpenShift") except Exception as e: logger.error(f"Failed to setup CoreDNS for OpenShift. {e}") - raise ValidationError("Failed to setup CoreDNS for OpenShift distro") + raise ValidationError( + "Failed to setup CoreDNS for OpenShift distro") else: - raise ValidationError(f"Unsupported distro: {distro}. Supported distros are: {AKS_AZURE_LOCAL_DISTRO}, {OPENSHIFT_DISTRO}.") + raise ValidationError( + f"Unsupported distro: {distro}. Supported distros are: {AKS_AZURE_LOCAL_DISTRO}, {OPENSHIFT_DISTRO}.") -def init_dapr_components(cmd, resource_group_name, environment_name, statestore="redis", pubsub="redis"): +def init_dapr_components( + cmd, + resource_group_name, + environment_name, + statestore="redis", + pubsub="redis"): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) if statestore not in DAPR_SUPPORTED_STATESTORE_DEV_SERVICE_LIST: @@ -2880,24 +3773,33 @@ def init_dapr_components(cmd, resource_group_name, environment_name, statestore= return { "message": "Operation successful.", "resources": { - # Remove duplicates for services like Redis, which can be used for both statestore and pubsub + # Remove duplicates for services like Redis, which can be used for + # both statestore and pubsub "devServices": list(set([statestore_service_id, pubsub_service_id])), "daprComponents": [statestore_component_id, pubsub_component_id] } } -def assign_env_managed_identity(cmd, name, resource_group_name, system_assigned=False, user_assigned=None, no_wait=False): +def assign_env_managed_identity( + cmd, + name, + resource_group_name, + system_assigned=False, + user_assigned=None, + no_wait=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) managed_env_def = None try: - managed_env_def = ManagedEnvironmentPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) - except: + managed_env_def = ManagedEnvironmentPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) + except BaseException: pass if not managed_env_def: - raise ResourceNotFoundError("The containerapp env '{}' does not exist".format(name)) + raise ResourceNotFoundError( + "The containerapp env '{}' does not exist".format(name)) assign_system_identity = system_assigned if not user_assigned: @@ -2918,15 +3820,17 @@ def assign_env_managed_identity(cmd, name, resource_group_name, system_assigned= try: managed_env_def["identity"] managed_env_def["identity"]["type"] - except: + except BaseException: managed_env_def["identity"] = {} managed_env_def["identity"]["type"] = "None" payload = {"identity": {"type": managed_env_def["identity"]["type"]}} # check system identity is already assigned - if assign_system_identity and managed_env_def["identity"]["type"].__contains__("SystemAssigned"): + if assign_system_identity and managed_env_def["identity"]["type"].__contains__( + "SystemAssigned"): is_system_identity_changed = False - logger.warning("System identity is already assigned to containerapp environment") + logger.warning( + "System identity is already assigned to containerapp environment") # check user identity is already assigned if assign_user_identities: @@ -2936,25 +3840,32 @@ def assign_env_managed_identity(cmd, name, resource_group_name, system_assigned= subscription_id = get_subscription_id(cmd.cli_ctx) for r in assign_user_identities: - r = _ensure_identity_resource_id(subscription_id, resource_group_name, r).replace("resourceGroup", "resourcegroup") + r = _ensure_identity_resource_id( + subscription_id, resource_group_name, r).replace( + "resourceGroup", "resourcegroup") isExisting = False for old_user_identity in managed_env_def["identity"]["userAssignedIdentities"]: if old_user_identity.lower() == r.lower(): isExisting = True - logger.warning("User identity %s is already assigned to containerapp environment", old_user_identity) + logger.warning( + "User identity %s is already assigned to containerapp environment", + old_user_identity) break if not isExisting: to_add_user_assigned_identity.append(r) # no changes to containerapp environment - if (not is_system_identity_changed) and (not to_add_user_assigned_identity): - logger.warning("No managed identities changes to containerapp environment") + if (not is_system_identity_changed) and ( + not to_add_user_assigned_identity): + logger.warning( + "No managed identities changes to containerapp environment") return managed_env_def if to_add_user_assigned_identity: - payload["identity"]["userAssignedIdentities"] = {k: {} for k in to_add_user_assigned_identity} + payload["identity"]["userAssignedIdentities"] = { + k: {} for k in to_add_user_assigned_identity} # Assign correct type try: @@ -2972,19 +3883,29 @@ def assign_env_managed_identity(cmd, name, resource_group_name, system_assigned= payload["identity"]["type"] = "SystemAssigned" elif assign_user_identities: payload["identity"]["type"] = "UserAssigned" - except: + except BaseException: # Always returns "type": "None" when CA has no previous identities pass try: r = ManagedEnvironmentPreviewClient.update( - cmd=cmd, resource_group_name=resource_group_name, name=name, managed_environment_envelope=payload, no_wait=no_wait) + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + managed_environment_envelope=payload, + no_wait=no_wait) return r["identity"] except Exception as e: handle_raw_exception(e) -def remove_env_managed_identity(cmd, name, resource_group_name, system_assigned=False, user_assigned=None, no_wait=False): +def remove_env_managed_identity( + cmd, + name, + resource_group_name, + system_assigned=False, + user_assigned=None, + no_wait=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) remove_system_identity = system_assigned @@ -3001,34 +3922,40 @@ def remove_env_managed_identity(cmd, name, resource_group_name, system_assigned= managed_env_def = None # Get containerapp env properties of CA we are updating try: - managed_env_def = ManagedEnvironmentPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) - except: + managed_env_def = ManagedEnvironmentPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) + except BaseException: pass if not managed_env_def: - raise ResourceNotFoundError("The containerapp env '{}' does not exist".format(name)) + raise ResourceNotFoundError( + "The containerapp env '{}' does not exist".format(name)) # If identity not returned try: managed_env_def["identity"] managed_env_def["identity"]["type"] - except: + except BaseException: managed_env_def["identity"] = {} managed_env_def["identity"]["type"] = "None" payload = {"identity": {"type": managed_env_def["identity"]["type"]}} if managed_env_def["identity"]["type"] == "None": - raise InvalidArgumentValueError("The containerapp env {} has no system or user assigned identities.".format(name)) + raise InvalidArgumentValueError( + "The containerapp env {} has no system or user assigned identities.".format(name)) if remove_system_identity: if managed_env_def["identity"]["type"] == "UserAssigned": - raise InvalidArgumentValueError("The containerapp job {} has no system assigned identities.".format(name)) - payload["identity"]["type"] = ("None" if payload["identity"]["type"] == "SystemAssigned" else "UserAssigned") + raise InvalidArgumentValueError( + "The containerapp job {} has no system assigned identities.".format(name)) + payload["identity"]["type"] = ( + "None" if payload["identity"]["type"] == "SystemAssigned" else "UserAssigned") # Remove all user identities if isinstance(user_assigned, list) and not user_assigned: logger.warning("remvoe all user identities.") - payload["identity"]["type"] = ("None" if payload["identity"]["type"] == "UserAssigned" else "SystemAssigned") + payload["identity"]["type"] = ( + "None" if payload["identity"]["type"] == "UserAssigned" else "SystemAssigned") remove_user_identities = [] if remove_user_identities: @@ -3036,11 +3963,12 @@ def remove_env_managed_identity(cmd, name, resource_group_name, system_assigned= subscription_id = get_subscription_id(cmd.cli_ctx) try: managed_env_def["identity"]["userAssignedIdentities"] - except: + except BaseException: managed_env_def["identity"]["userAssignedIdentities"] = {} for remove_id in remove_user_identities: given_id = remove_id - remove_id = _ensure_identity_resource_id(subscription_id, resource_group_name, remove_id) + remove_id = _ensure_identity_resource_id( + subscription_id, resource_group_name, remove_id) wasRemoved = False for old_user_identity in managed_env_def["identity"]["userAssignedIdentities"]: @@ -3050,23 +3978,31 @@ def remove_env_managed_identity(cmd, name, resource_group_name, system_assigned= break if not wasRemoved: - raise InvalidArgumentValueError("The containerapp job does not have specified user identity '{}' assigned, so it cannot be removed.".format(given_id)) + raise InvalidArgumentValueError( + "The containerapp job does not have specified user identity '{}' assigned, so it cannot be removed.".format(given_id)) # all user identities are removed - if len(managed_env_def["identity"]["userAssignedIdentities"]) == len(payload["identity"]["userAssignedIdentities"]): + if len(managed_env_def["identity"]["userAssignedIdentities"]) == len( + payload["identity"]["userAssignedIdentities"]): payload["identity"].pop("userAssignedIdentities", None) - payload["identity"]["type"] = ("None" if payload["identity"]["type"] == "UserAssigned" else "SystemAssigned") + payload["identity"]["type"] = ( + "None" if payload["identity"]["type"] == "UserAssigned" else "SystemAssigned") else: - payload["identity"]["type"] = ("UserAssigned" if payload["identity"]["type"] == "UserAssigned" else "SystemAssigned,UserAssigned") + payload["identity"]["type"] = ( + "UserAssigned" if payload["identity"]["type"] == "UserAssigned" else "SystemAssigned,UserAssigned") try: r = ManagedEnvironmentPreviewClient.update( - cmd=cmd, resource_group_name=resource_group_name, name=name, managed_environment_envelope=payload, no_wait=no_wait) + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + managed_environment_envelope=payload, + no_wait=no_wait) except Exception as e: handle_raw_exception(e) try: return r["identity"] - except: + except BaseException: r["identity"] = {} r["identity"]["type"] = "None" return r["identity"] @@ -3076,13 +4012,14 @@ def show_env_managed_identity(cmd, name, resource_group_name): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) try: - r = ManagedEnvironmentPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) + r = ManagedEnvironmentPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) except CLIError as e: handle_raw_exception(e) try: return r["identity"] - except: + except BaseException: r["identity"] = {} r["identity"]["type"] = "None" return r["identity"] @@ -3099,7 +4036,12 @@ def list_java_components(cmd, environment_name, resource_group_name): return java_component_decorator.list() -def show_java_component(cmd, java_component_name, environment_name, resource_group_name, target_java_component_type): +def show_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + target_java_component_type): raw_parameters = locals() java_component_decorator = BaseJavaComponentDecorator( cmd=cmd, @@ -3111,12 +4053,19 @@ def show_java_component(cmd, java_component_name, environment_name, resource_gro current_type = safe_get(result, "properties", "componentType") if current_type and target_java_component_type.lower() != current_type.lower(): - raise ResourceNotFoundError(f"(JavaComponentNotFound) JavaComponent '{java_component_name}' was not found.") + raise ResourceNotFoundError( + f"(JavaComponentNotFound) JavaComponent '{java_component_name}' was not found.") return result -def delete_java_component(cmd, java_component_name, environment_name, resource_group_name, target_java_component_type, no_wait): +def delete_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + target_java_component_type, + no_wait): raw_parameters = locals() java_component_decorator = BaseJavaComponentDecorator( cmd=cmd, @@ -3127,18 +4076,33 @@ def delete_java_component(cmd, java_component_name, environment_name, resource_g result = None try: - result = java_component_decorator.client.show(cmd, resource_group_name, environment_name, java_component_name) + result = java_component_decorator.client.show( + cmd, resource_group_name, environment_name, java_component_name) except Exception as e: handle_non_404_status_code_exception(e) current_type = safe_get(result, "properties", "componentType") if current_type and target_java_component_type.lower() != current_type.lower(): - raise ResourceNotFoundError(f"(JavaComponentNotFound) JavaComponent '{java_component_name}' was not found.") + raise ResourceNotFoundError( + f"(JavaComponentNotFound) JavaComponent '{java_component_name}' was not found.") return java_component_decorator.delete() -def create_java_component(cmd, java_component_name, environment_name, resource_group_name, target_java_component_type, configuration, set_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait, route_yaml=None): +def create_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + target_java_component_type, + configuration, + set_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait, + route_yaml=None): raw_parameters = locals() java_component_decorator = JavaComponentCreateDecorator( cmd=cmd, @@ -3150,7 +4114,23 @@ def create_java_component(cmd, java_component_name, environment_name, resource_g return java_component_decorator.create() -def update_java_component(cmd, java_component_name, environment_name, resource_group_name, target_java_component_type, configuration, set_configurations, replace_configurations, remove_configurations, remove_all_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait, route_yaml=None): +def update_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + target_java_component_type, + configuration, + set_configurations, + replace_configurations, + remove_configurations, + remove_all_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait, + route_yaml=None): raw_parameters = locals() java_component_decorator = JavaComponentUpdateDecorator( cmd=cmd, @@ -3162,84 +4142,448 @@ def update_java_component(cmd, java_component_name, environment_name, resource_g return java_component_decorator.update() -def create_config_server_for_spring(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, unbind_service_bindings=None, service_bindings=None, min_replicas=1, max_replicas=1, no_wait=False): - return create_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_CONFIG, configuration, set_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait) - - -def update_config_server_for_spring(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, replace_configurations=None, remove_configurations=None, remove_all_configurations=None, unbind_service_bindings=None, service_bindings=None, min_replicas=None, max_replicas=None, no_wait=False): - return update_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_CONFIG, configuration, set_configurations, replace_configurations, remove_configurations, remove_all_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait) - - -def show_config_server_for_spring(cmd, java_component_name, environment_name, resource_group_name): - return show_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_CONFIG) - - -def delete_config_server_for_spring(cmd, java_component_name, environment_name, resource_group_name, no_wait=False): - return delete_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_CONFIG, no_wait) - - -def create_eureka_server_for_spring(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, service_bindings=None, unbind_service_bindings=None, min_replicas=1, max_replicas=1, no_wait=False): - return create_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_EUREKA, configuration, set_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait) - - -def update_eureka_server_for_spring(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, replace_configurations=None, remove_configurations=None, remove_all_configurations=None, service_bindings=None, unbind_service_bindings=None, min_replicas=None, max_replicas=None, no_wait=False): - return update_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_EUREKA, configuration, set_configurations, replace_configurations, remove_configurations, remove_all_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait) - - -def show_eureka_server_for_spring(cmd, java_component_name, environment_name, resource_group_name): - return show_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_EUREKA) - - -def delete_eureka_server_for_spring(cmd, java_component_name, environment_name, resource_group_name, no_wait=False): - return delete_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_EUREKA, no_wait) - - -def create_nacos(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, service_bindings=None, unbind_service_bindings=None, min_replicas=1, max_replicas=1, no_wait=False): - return create_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_NACOS, configuration, set_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait) - - -def update_nacos(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, replace_configurations=None, remove_configurations=None, remove_all_configurations=None, service_bindings=None, unbind_service_bindings=None, min_replicas=None, max_replicas=None, no_wait=False): - return update_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_NACOS, configuration, set_configurations, replace_configurations, remove_configurations, remove_all_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait) - - -def show_nacos(cmd, java_component_name, environment_name, resource_group_name): - return show_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_NACOS) - - -def delete_nacos(cmd, java_component_name, environment_name, resource_group_name, no_wait=False): - return delete_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_NACOS, no_wait) - - -def create_admin_for_spring(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, service_bindings=None, unbind_service_bindings=None, min_replicas=1, max_replicas=1, no_wait=False): - return create_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_ADMIN, configuration, set_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait) - - -def update_admin_for_spring(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, replace_configurations=None, remove_configurations=None, remove_all_configurations=None, service_bindings=None, unbind_service_bindings=None, min_replicas=None, max_replicas=None, no_wait=False): - return update_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_ADMIN, configuration, set_configurations, replace_configurations, remove_configurations, remove_all_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait) - - -def show_admin_for_spring(cmd, java_component_name, environment_name, resource_group_name): - return show_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_ADMIN) - - -def delete_admin_for_spring(cmd, java_component_name, environment_name, resource_group_name, no_wait=False): - return delete_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_ADMIN, no_wait) - - -def create_gateway_for_spring(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, service_bindings=None, unbind_service_bindings=None, min_replicas=1, max_replicas=1, no_wait=False, route_yaml=None): - return create_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_GATEWAY, configuration, set_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait, route_yaml) - - -def update_gateway_for_spring(cmd, java_component_name, environment_name, resource_group_name, configuration=None, set_configurations=None, replace_configurations=None, remove_configurations=None, remove_all_configurations=None, service_bindings=None, unbind_service_bindings=None, min_replicas=None, max_replicas=None, no_wait=False, route_yaml=None): - return update_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_GATEWAY, configuration, set_configurations, replace_configurations, remove_configurations, remove_all_configurations, service_bindings, unbind_service_bindings, min_replicas, max_replicas, no_wait, route_yaml) - - -def show_gateway_for_spring(cmd, java_component_name, environment_name, resource_group_name): - return show_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_GATEWAY) - - -def delete_gateway_for_spring(cmd, java_component_name, environment_name, resource_group_name, no_wait=False): - return delete_java_component(cmd, java_component_name, environment_name, resource_group_name, JAVA_COMPONENT_GATEWAY, no_wait) +def create_config_server_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + unbind_service_bindings=None, + service_bindings=None, + min_replicas=1, + max_replicas=1, + no_wait=False): + return create_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_CONFIG, + configuration, + set_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait) + + +def update_config_server_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + replace_configurations=None, + remove_configurations=None, + remove_all_configurations=None, + unbind_service_bindings=None, + service_bindings=None, + min_replicas=None, + max_replicas=None, + no_wait=False): + return update_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_CONFIG, + configuration, + set_configurations, + replace_configurations, + remove_configurations, + remove_all_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait) + + +def show_config_server_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name): + return show_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_CONFIG) + + +def delete_config_server_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + no_wait=False): + return delete_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_CONFIG, + no_wait) + + +def create_eureka_server_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + service_bindings=None, + unbind_service_bindings=None, + min_replicas=1, + max_replicas=1, + no_wait=False): + return create_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_EUREKA, + configuration, + set_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait) + + +def update_eureka_server_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + replace_configurations=None, + remove_configurations=None, + remove_all_configurations=None, + service_bindings=None, + unbind_service_bindings=None, + min_replicas=None, + max_replicas=None, + no_wait=False): + return update_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_EUREKA, + configuration, + set_configurations, + replace_configurations, + remove_configurations, + remove_all_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait) + + +def show_eureka_server_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name): + return show_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_EUREKA) + + +def delete_eureka_server_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + no_wait=False): + return delete_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_EUREKA, + no_wait) + + +def create_nacos( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + service_bindings=None, + unbind_service_bindings=None, + min_replicas=1, + max_replicas=1, + no_wait=False): + return create_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_NACOS, + configuration, + set_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait) + + +def update_nacos( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + replace_configurations=None, + remove_configurations=None, + remove_all_configurations=None, + service_bindings=None, + unbind_service_bindings=None, + min_replicas=None, + max_replicas=None, + no_wait=False): + return update_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_NACOS, + configuration, + set_configurations, + replace_configurations, + remove_configurations, + remove_all_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait) + + +def show_nacos( + cmd, + java_component_name, + environment_name, + resource_group_name): + return show_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_NACOS) + + +def delete_nacos( + cmd, + java_component_name, + environment_name, + resource_group_name, + no_wait=False): + return delete_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_NACOS, + no_wait) + + +def create_admin_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + service_bindings=None, + unbind_service_bindings=None, + min_replicas=1, + max_replicas=1, + no_wait=False): + return create_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_ADMIN, + configuration, + set_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait) + + +def update_admin_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + replace_configurations=None, + remove_configurations=None, + remove_all_configurations=None, + service_bindings=None, + unbind_service_bindings=None, + min_replicas=None, + max_replicas=None, + no_wait=False): + return update_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_ADMIN, + configuration, + set_configurations, + replace_configurations, + remove_configurations, + remove_all_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait) + + +def show_admin_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name): + return show_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_ADMIN) + + +def delete_admin_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + no_wait=False): + return delete_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_ADMIN, + no_wait) + + +def create_gateway_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + service_bindings=None, + unbind_service_bindings=None, + min_replicas=1, + max_replicas=1, + no_wait=False, + route_yaml=None): + return create_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_GATEWAY, + configuration, + set_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait, + route_yaml) + + +def update_gateway_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + configuration=None, + set_configurations=None, + replace_configurations=None, + remove_configurations=None, + remove_all_configurations=None, + service_bindings=None, + unbind_service_bindings=None, + min_replicas=None, + max_replicas=None, + no_wait=False, + route_yaml=None): + return update_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_GATEWAY, + configuration, + set_configurations, + replace_configurations, + remove_configurations, + remove_all_configurations, + service_bindings, + unbind_service_bindings, + min_replicas, + max_replicas, + no_wait, + route_yaml) + + +def show_gateway_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name): + return show_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_GATEWAY) + + +def delete_gateway_for_spring( + cmd, + java_component_name, + environment_name, + resource_group_name, + no_wait=False): + return delete_java_component( + cmd, + java_component_name, + environment_name, + resource_group_name, + JAVA_COMPONENT_GATEWAY, + no_wait) def set_environment_telemetry_data_dog(cmd, @@ -3255,9 +4599,9 @@ def set_environment_telemetry_data_dog(cmd, cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) - containerapp_env_telemetry_data_dog_decorator.register_provider(CONTAINER_APPS_RP) + models=CONTAINER_APPS_SDK_MODELS) + containerapp_env_telemetry_data_dog_decorator.register_provider( + CONTAINER_APPS_RP) containerapp_env_telemetry_data_dog_decorator.construct_payload() r = containerapp_env_telemetry_data_dog_decorator.update() @@ -3278,9 +4622,9 @@ def delete_environment_telemetry_data_dog(cmd, cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) - containerapp_env_telemetry_data_dog_decorator.register_provider(CONTAINER_APPS_RP) + models=CONTAINER_APPS_SDK_MODELS) + containerapp_env_telemetry_data_dog_decorator.register_provider( + CONTAINER_APPS_RP) containerapp_env_telemetry_data_dog_decorator.construct_payload() r = containerapp_env_telemetry_data_dog_decorator.update() @@ -3297,8 +4641,7 @@ def show_environment_telemetry_data_dog(cmd, cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) containerapp_env_def = None try: @@ -3306,9 +4649,15 @@ def show_environment_telemetry_data_dog(cmd, except Exception as e: handle_non_404_status_code_exception(e) - r = safe_get(containerapp_env_def, "properties", "openTelemetryConfiguration", "destinationsConfiguration", "dataDogConfiguration") + r = safe_get( + containerapp_env_def, + "properties", + "openTelemetryConfiguration", + "destinationsConfiguration", + "dataDogConfiguration") if r is None: - raise ValidationError("The containerapp environment '{}' does not have data dog enabled.".format(name)) + raise ValidationError( + "The containerapp environment '{}' does not have data dog enabled.".format(name)) return containerapp_env_def @@ -3327,7 +4676,8 @@ def set_environment_telemetry_app_insights(cmd, raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_env_telemetry_app_insights_decorator.register_provider(CONTAINER_APPS_RP) + containerapp_env_telemetry_app_insights_decorator.register_provider( + CONTAINER_APPS_RP) containerapp_env_telemetry_app_insights_decorator.construct_payload() r = containerapp_env_telemetry_app_insights_decorator.update() @@ -3349,7 +4699,8 @@ def delete_environment_telemetry_app_insights(cmd, raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - containerapp_env_telemetry_app_insights_decorator.register_provider(CONTAINER_APPS_RP) + containerapp_env_telemetry_app_insights_decorator.register_provider( + CONTAINER_APPS_RP) containerapp_env_telemetry_app_insights_decorator.construct_payload() r = containerapp_env_telemetry_app_insights_decorator.update() @@ -3375,13 +4726,18 @@ def show_environment_telemetry_app_insights(cmd, except Exception as e: handle_non_404_status_code_exception(e) - r = safe_get(containerapp_env_def, "properties", "appInsightsConfiguration") + r = safe_get( + containerapp_env_def, + "properties", + "appInsightsConfiguration") if r is None: - raise ValidationError("The containerapp environment '{}' does not have app insights enabled.".format(name)) + raise ValidationError( + "The containerapp environment '{}' does not have app insights enabled.".format(name)) if not containerapp_env_def: - raise ResourceNotFoundError("The containerapp environment '{}' does not exist".format(name)) + raise ResourceNotFoundError( + "The containerapp environment '{}' does not exist".format(name)) return containerapp_env_def @@ -3402,9 +4758,9 @@ def add_environment_telemetry_otlp(cmd, cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) - containerapp_env_telemetry_otlp_decorator.register_provider(CONTAINER_APPS_RP) + models=CONTAINER_APPS_SDK_MODELS) + containerapp_env_telemetry_otlp_decorator.register_provider( + CONTAINER_APPS_RP) r = {} @@ -3413,12 +4769,19 @@ def add_environment_telemetry_otlp(cmd, except CLIError as e: handle_raw_exception(e) - existing_otlps = safe_get(r, "properties", "openTelemetryConfiguration", "destinationsConfiguration", "otlpConfigurations") + existing_otlps = safe_get( + r, + "properties", + "openTelemetryConfiguration", + "destinationsConfiguration", + "otlpConfigurations") if existing_otlps is not None: - otlp = [p for p in existing_otlps if p["name"].lower() == otlp_name.lower()] + otlp = [p for p in existing_otlps if p["name"].lower() == + otlp_name.lower()] if otlp: - raise ValidationError(f"Otlp entry with name --otlp-name {otlp_name} already exist, please retry with different name") + raise ValidationError( + f"Otlp entry with name --otlp-name {otlp_name} already exist, please retry with different name") containerapp_env_telemetry_otlp_decorator.construct_payload() r = containerapp_env_telemetry_otlp_decorator.update() @@ -3442,9 +4805,9 @@ def update_environment_telemetry_otlp(cmd, cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) - containerapp_env_telemetry_otlp_decorator.register_provider(CONTAINER_APPS_RP) + models=CONTAINER_APPS_SDK_MODELS) + containerapp_env_telemetry_otlp_decorator.register_provider( + CONTAINER_APPS_RP) r = {} @@ -3453,12 +4816,19 @@ def update_environment_telemetry_otlp(cmd, except Exception as e: handle_non_404_status_code_exception(e) - existing_otlps = safe_get(r, "properties", "openTelemetryConfiguration", "destinationsConfiguration", "otlpConfigurations") + existing_otlps = safe_get( + r, + "properties", + "openTelemetryConfiguration", + "destinationsConfiguration", + "otlpConfigurations") if existing_otlps is not None: - otlp = [p for p in existing_otlps if p["name"].lower() == otlp_name.lower()] + otlp = [p for p in existing_otlps if p["name"].lower() == + otlp_name.lower()] if not otlp: - raise ValidationError(f"Otlp entry with name --otlp-name {otlp_name} does not exist, please make sure the {otlp_name} already added") + raise ValidationError( + f"Otlp entry with name --otlp-name {otlp_name} does not exist, please make sure the {otlp_name} already added") containerapp_env_telemetry_otlp_decorator.construct_payload() r = containerapp_env_telemetry_otlp_decorator.update() @@ -3479,9 +4849,9 @@ def remove_environment_telemetry_otlp(cmd, cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) - containerapp_env_telemetry_otlp_decorator.register_provider(CONTAINER_APPS_RP) + models=CONTAINER_APPS_SDK_MODELS) + containerapp_env_telemetry_otlp_decorator.register_provider( + CONTAINER_APPS_RP) containerapp_env_telemetry_otlp_decorator.construct_remove_payload() r = containerapp_env_telemetry_otlp_decorator.update() @@ -3499,8 +4869,7 @@ def show_environment_telemetry_otlp(cmd, cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) containerapp_env_def = None try: @@ -3509,17 +4878,31 @@ def show_environment_telemetry_otlp(cmd, handle_non_404_status_code_exception(e) if not containerapp_env_def: - raise ResourceNotFoundError("The containerapp environment '{}' does not exist".format(name)) - - existing_otlps = safe_get(containerapp_env_def, "properties", "openTelemetryConfiguration", "destinationsConfiguration", "otlpConfigurations") + raise ResourceNotFoundError( + "The containerapp environment '{}' does not exist".format(name)) + + existing_otlps = safe_get( + containerapp_env_def, + "properties", + "openTelemetryConfiguration", + "destinationsConfiguration", + "otlpConfigurations") if existing_otlps is not None: - otlp = [p for p in existing_otlps if p["name"].lower() == otlp_name.lower()] + otlp = [p for p in existing_otlps if p["name"].lower() == + otlp_name.lower()] if not otlp: - raise ResourceNotFoundError(f"Otlp entry with name --otlp-name {otlp_name} does not exist, please retry with different name") + raise ResourceNotFoundError( + f"Otlp entry with name --otlp-name {otlp_name} does not exist, please retry with different name") existing_otlps = otlp - safe_set(containerapp_env_def, "properties", "openTelemetryConfiguration", "destinationsConfiguration", "otlpConfigurations", value=existing_otlps) + safe_set( + containerapp_env_def, + "properties", + "openTelemetryConfiguration", + "destinationsConfiguration", + "otlpConfigurations", + value=existing_otlps) return containerapp_env_def @@ -3533,8 +4916,7 @@ def list_environment_telemetry_otlp(cmd, cmd=cmd, client=ManagedEnvironmentPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) containerapp_env_def = None try: @@ -3545,25 +4927,43 @@ def list_environment_telemetry_otlp(cmd, return containerapp_env_def -def list_replica_containerappsjob(cmd, resource_group_name, name, execution=None): +def list_replica_containerappsjob( + cmd, + resource_group_name, + name, + execution=None): if execution is None: - executions = ContainerAppsJobPreviewClient.get_executions(cmd=cmd, resource_group_name=resource_group_name, name=name) + executions = ContainerAppsJobPreviewClient.get_executions( + cmd=cmd, resource_group_name=resource_group_name, name=name) execution = executions['value'][0]['name'] - logger.warning('No execution specified. Using the latest execution: %s', execution) + logger.warning( + 'No execution specified. Using the latest execution: %s', + execution) try: - replicas = ContainerAppsJobPreviewClient.get_replicas(cmd, resource_group_name, name, execution) + replicas = ContainerAppsJobPreviewClient.get_replicas( + cmd, resource_group_name, name, execution) return replicas['value'] except CLIError as e: handle_raw_exception(e) -def stream_job_logs(cmd, resource_group_name, name, container, execution=None, replica=None, follow=False, tail=None, output_format=None): +def stream_job_logs( + cmd, + resource_group_name, + name, + container, + execution=None, + replica=None, + follow=False, + tail=None, + output_format=None): if tail: if tail < 0 or tail > 300: raise ValidationError("--tail must be between 0 and 300.") sub = get_subscription_id(cmd.cli_ctx) - token_response = ContainerAppsJobPreviewClient.get_auth_token(cmd, resource_group_name, name) + token_response = ContainerAppsJobPreviewClient.get_auth_token( + cmd, resource_group_name, name) token = token_response["properties"]["token"] job = ContainerAppsJobPreviewClient.show(cmd, resource_group_name, name) @@ -3574,21 +4974,28 @@ def stream_job_logs(cmd, resource_group_name, name, container, execution=None, r raise ValidationError("Cannot specify a replica without an execution") if execution is None: - executions = ContainerAppsJobPreviewClient.get_executions(cmd, resource_group_name, name)['value'] + executions = ContainerAppsJobPreviewClient.get_executions( + cmd, resource_group_name, name)['value'] if not executions: raise ValidationError("No executions found for this job") execution = executions[0]["name"] - logger.warning("No execution provided, defaulting to latest execution: %s", execution) + logger.warning( + "No execution provided, defaulting to latest execution: %s", + execution) if replica is None: - replicas = ContainerAppsJobPreviewClient.get_replicas(cmd, resource_group_name, name, execution)['value'] + replicas = ContainerAppsJobPreviewClient.get_replicas( + cmd, resource_group_name, name, execution)['value'] if not replicas: raise ValidationError("No replicas found for execution") replica = replicas[0]["name"] - logger.warning("No replica provided, defaulting to latest replica: %s", replica) + logger.warning( + "No replica provided, defaulting to latest replica: %s", + replica) - url = (f"{base_url}/subscriptions/{sub}/resourceGroups/{resource_group_name}/jobs/{name}" - f"/executions/{execution}/replicas/{replica}/containers/{container}/logstream") + url = ( + f"{base_url}/subscriptions/{sub}/resourceGroups/{resource_group_name}/jobs/{name}" + f"/executions/{execution}/replicas/{replica}/containers/{container}/logstream") logger.info("connecting to : %s", url) request_params = {"follow": str(follow).lower(), @@ -3602,31 +5009,54 @@ def stream_job_logs(cmd, resource_group_name, name, container, execution=None, r headers=headers) if not resp.ok: - raise ValidationError(f"Got bad status from the logstream API: {resp.status_code}. Error: {str(resp.content)}") + raise ValidationError( + f"Got bad status from the logstream API: {resp.status_code}. Error: {str(resp.content)}") for line in resp.iter_lines(): if line: logger.info("received raw log line: %s", line) # these .replaces are needed to display color/quotations properly - # for some reason the API returns garbled unicode special characters (may need to add more in the future) - print(line.decode("utf-8").replace("\\u0022", "\u0022").replace("\\u001B", "\u001B").replace("\\u002B", "\u002B").replace("\\u0027", "\u0027")) - - -def create_or_update_java_logger(cmd, logger_name, logger_level, name, resource_group_name, no_wait=False): + # for some reason the API returns garbled unicode special + # characters (may need to add more in the future) + print( + line.decode("utf-8").replace( + "\\u0022", + "\u0022").replace( + "\\u001B", + "\u001B").replace( + "\\u002B", + "\u002B").replace( + "\\u0027", + "\u0027")) + + +def create_or_update_java_logger( + cmd, + logger_name, + logger_level, + name, + resource_group_name, + no_wait=False): raw_parameters = locals() containerapp_java_logger_set_decorator = ContainerappJavaLoggerSetDecorator( cmd=cmd, client=ContainerAppPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) containerapp_java_logger_set_decorator.validate_arguments() containerapp_java_logger_set_decorator.construct_payload() return containerapp_java_logger_set_decorator.create_or_update() - # pylint: disable=redefined-builtin -def delete_java_logger(cmd, name, resource_group_name, logger_name=None, all=None, no_wait=False): + + +def delete_java_logger( + cmd, + name, + resource_group_name, + logger_name=None, + all=None, + no_wait=False): raw_parameters = locals() containerapp_java_logger_decorator = ContainerappJavaLoggerDeleteDecorator( cmd=cmd, @@ -3638,9 +5068,15 @@ def delete_java_logger(cmd, name, resource_group_name, logger_name=None, all=Non containerapp_java_logger_decorator.construct_payload() return containerapp_java_logger_decorator.delete() - # pylint: disable=redefined-builtin -def show_java_logger(cmd, name, resource_group_name, logger_name=None, all=None): + + +def show_java_logger( + cmd, + name, + resource_group_name, + logger_name=None, + all=None): raw_parameters = locals() containerapp_java_logger_decorator = ContainerappJavaLoggerDecorator( cmd=cmd, @@ -3781,8 +5217,9 @@ def delete_session_pool(cmd, return r - # session code interpreter commands + + def execute_session_code_interpreter(cmd, name, resource_group_name, @@ -3795,8 +5232,7 @@ def execute_session_code_interpreter(cmd, cmd=cmd, client=SessionCodeInterpreterPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) session_code_interpreter_decorator.validate_arguments() session_code_interpreter_decorator.register_provider(CONTAINER_APPS_RP) @@ -3818,8 +5254,7 @@ def upload_session_code_interpreter(cmd, cmd=cmd, client=SessionCodeInterpreterPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) session_code_interpreter_decorator.register_provider(CONTAINER_APPS_RP) r = session_code_interpreter_decorator.upload() @@ -3839,8 +5274,7 @@ def show_file_content_session_code_interpreter(cmd, cmd=cmd, client=SessionCodeInterpreterPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) session_code_interpreter_decorator.register_provider(CONTAINER_APPS_RP) r = session_code_interpreter_decorator.show_file_content() @@ -3860,8 +5294,7 @@ def show_file_metadata_session_code_interpreter(cmd, cmd=cmd, client=SessionCodeInterpreterPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) session_code_interpreter_decorator.register_provider(CONTAINER_APPS_RP) r = session_code_interpreter_decorator.show_file_metadata() @@ -3880,8 +5313,7 @@ def list_files_session_code_interpreter(cmd, cmd=cmd, client=SessionCodeInterpreterPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) session_code_interpreter_decorator.register_provider(CONTAINER_APPS_RP) r = session_code_interpreter_decorator.list_files() @@ -3901,16 +5333,16 @@ def delete_file_session_code_interpreter(cmd, cmd=cmd, client=SessionCodeInterpreterPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) session_code_interpreter_decorator.register_provider(CONTAINER_APPS_RP) r = session_code_interpreter_decorator.delete_file() return r - # session custom container commands + + def stop_session_custom_container(cmd, name, resource_group_name, @@ -3920,8 +5352,7 @@ def stop_session_custom_container(cmd, cmd=cmd, client=SessionCustomContainerPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) session_custom_container_decorator.register_provider(CONTAINER_APPS_RP) r = session_custom_container_decorator.stop_session() @@ -3939,9 +5370,14 @@ def list_dotnet_components(cmd, environment_name, resource_group_name): ) return dotnet_component_decorator.list() - # pylint: disable=protected-access -def show_dotnet_component(cmd, dotnet_component_name, environment_name, resource_group_name): + + +def show_dotnet_component( + cmd, + dotnet_component_name, + environment_name, + resource_group_name): raw_parameters = locals() dotnet_component_decorator = DotNetComponentDecorator( cmd=cmd, @@ -3951,16 +5387,26 @@ def show_dotnet_component(cmd, dotnet_component_name, environment_name, resource ) result = dotnet_component_decorator.show() if result is not None: - logger.warning("Found DotNet Component '%s' in environment '%s' in resource group '%s'.", dotnet_component_name, environment_name, resource_group_name) + logger.warning( + "Found DotNet Component '%s' in environment '%s' in resource group '%s'.", + dotnet_component_name, + environment_name, + resource_group_name) component_type = safe_get(result, "properties", "componentType") if component_type == DOTNET_COMPONENT_RESOURCE_TYPE: - aspire_dashboard_url = dotnet_component_decorator._get_aspire_dashboard_url(environment_name, resource_group_name, dotnet_component_name) + aspire_dashboard_url = dotnet_component_decorator._get_aspire_dashboard_url( + environment_name, resource_group_name, dotnet_component_name) logger.warning("Aspire Dashboard URL: %s.", aspire_dashboard_url) return result -def delete_dotnet_component(cmd, dotnet_component_name, environment_name, resource_group_name, no_wait=False): +def delete_dotnet_component( + cmd, + dotnet_component_name, + environment_name, + resource_group_name, + no_wait=False): raw_parameters = locals() dotnet_component_decorator = DotNetComponentDecorator( cmd=cmd, @@ -3968,13 +5414,28 @@ def delete_dotnet_component(cmd, dotnet_component_name, environment_name, resour raw_parameters=raw_parameters, models=CONTAINER_APPS_SDK_MODELS ) - logger.warning("Deleting DotNet Component '%s' in environment '%s' in resource group '%s'.", dotnet_component_name, environment_name, resource_group_name) + logger.warning( + "Deleting DotNet Component '%s' in environment '%s' in resource group '%s'.", + dotnet_component_name, + environment_name, + resource_group_name) dotnet_component_decorator.delete() - logger.warning("Successfully deleted DotNet Component '%s' in environment '%s' in resource group '%s'.", dotnet_component_name, environment_name, resource_group_name) - + logger.warning( + "Successfully deleted DotNet Component '%s' in environment '%s' in resource group '%s'.", + dotnet_component_name, + environment_name, + resource_group_name) # pylint: disable=useless-return, protected-access -def create_dotnet_component(cmd, dotnet_component_name, environment_name, resource_group_name, dotnet_component_type=DOTNET_COMPONENT_RESOURCE_TYPE, no_wait=False): + + +def create_dotnet_component( + cmd, + dotnet_component_name, + environment_name, + resource_group_name, + dotnet_component_type=DOTNET_COMPONENT_RESOURCE_TYPE, + no_wait=False): raw_parameters = locals() dotnet_component_decorator = DotNetComponentDecorator( cmd=cmd, @@ -3983,57 +5444,91 @@ def create_dotnet_component(cmd, dotnet_component_name, environment_name, resour models=CONTAINER_APPS_SDK_MODELS ) dotnet_component_decorator.construct_payload() - logger.warning("Creating DotNet Component '%s' in environment '%s' in resource group '%s.", dotnet_component_name, environment_name, resource_group_name) + logger.warning( + "Creating DotNet Component '%s' in environment '%s' in resource group '%s.", + dotnet_component_name, + environment_name, + resource_group_name) r = dotnet_component_decorator.create() if r is not None: - logger.warning("Successfully created DotNet Component '%s' in environment '%s' in resource group '%s'.", dotnet_component_name, environment_name, resource_group_name) + logger.warning( + "Successfully created DotNet Component '%s' in environment '%s' in resource group '%s'.", + dotnet_component_name, + environment_name, + resource_group_name) component_type = safe_get(r, "properties", "componentType") if component_type == DOTNET_COMPONENT_RESOURCE_TYPE: - aspire_dashboard_url = dotnet_component_decorator._get_aspire_dashboard_url(environment_name, resource_group_name, dotnet_component_name) - logger.warning("Access your Aspire Dashboard at %s.", aspire_dashboard_url) + aspire_dashboard_url = dotnet_component_decorator._get_aspire_dashboard_url( + environment_name, resource_group_name, dotnet_component_name) + logger.warning( + "Access your Aspire Dashboard at %s.", + aspire_dashboard_url) return -def set_registry(cmd, name, resource_group_name, server, username=None, password=None, disable_warnings=False, identity=None, no_wait=False): +def set_registry( + cmd, + name, + resource_group_name, + server, + username=None, + password=None, + disable_warnings=False, + identity=None, + no_wait=False): # copy from parent _validate_subscription_registered(cmd, CONTAINER_APPS_RP) if (username or password) and identity: - raise MutuallyExclusiveArgumentError("Use either identity or username/password.") + raise MutuallyExclusiveArgumentError( + "Use either identity or username/password.") containerapp_def = None try: - containerapp_def = ContainerAppPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) - except: + containerapp_def = ContainerAppPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) + except BaseException: pass if not containerapp_def: - raise ResourceNotFoundError("The containerapp '{}' does not exist".format(name)) + raise ResourceNotFoundError( + "The containerapp '{}' does not exist".format(name)) _get_existing_secrets(cmd, resource_group_name, name, containerapp_def) registry = None - registries_def = safe_get(containerapp_def, "properties", "configuration", "registries", default=[]) + registries_def = safe_get( + containerapp_def, + "properties", + "configuration", + "registries", + default=[]) containerapp_def["properties"]["configuration"]["registries"] = registries_def if (not username or not password) and not identity: - # If registry is Azure Container Registry, we can try inferring credentials + # If registry is Azure Container Registry, we can try inferring + # credentials if ACR_IMAGE_SUFFIX not in server: - raise RequiredArgumentMissingError('Registry username and password are required if you are not using Azure Container Registry.') - not disable_warnings and logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') + raise RequiredArgumentMissingError( + 'Registry username and password are required if you are not using Azure Container Registry.') + not disable_warnings and logger.warning( + 'No credential was provided to access Azure Container Registry. Trying to look up...') parsed = urlparse(server) - registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] + registry_name = ( + parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: username, password, _ = _get_acr_cred(cmd.cli_ctx, registry_name) except Exception as ex: - raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry. Please provide the registry username and password') from ex + raise RequiredArgumentMissingError( + 'Failed to retrieve credentials for container registry. Please provide the registry username and password') from ex # Check if updating existing registry updating_existing_registry = False for r in registries_def: if r['server'].lower() == server.lower(): - not disable_warnings and logger.warning("Updating existing registry.") + not disable_warnings and logger.warning( + "Updating existing registry.") updating_existing_registry = True if username: r["username"] = username @@ -4071,64 +5566,101 @@ def set_registry(cmd, name, resource_group_name, server, username=None, password # preview logic if identity: if is_registry_msi_system(identity): - set_managed_identity(cmd, resource_group_name, containerapp_def, system_assigned=True) + set_managed_identity( + cmd, + resource_group_name, + containerapp_def, + system_assigned=True) if is_valid_resource_id(identity): - env_id = safe_get(containerapp_def, "properties", "environmentId", default="") + env_id = safe_get( + containerapp_def, + "properties", + "environmentId", + default="") parsed_managed_env = parse_resource_id(env_id) managed_env_name = parsed_managed_env['name'] managed_env_rg = parsed_managed_env['resource_group'] - if not env_has_managed_identity(cmd, managed_env_rg, managed_env_name, identity): - set_managed_identity(cmd, resource_group_name, containerapp_def, user_assigned=[identity]) + if not env_has_managed_identity( + cmd, managed_env_rg, managed_env_name, identity): + set_managed_identity( + cmd, + resource_group_name, + containerapp_def, + user_assigned=[identity]) try: r = ContainerAppPreviewClient.create_or_update( - cmd=cmd, resource_group_name=resource_group_name, name=name, container_app_envelope=containerapp_def, no_wait=no_wait) + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + container_app_envelope=containerapp_def, + no_wait=no_wait) return r["properties"]["configuration"]["registries"] except Exception as e: handle_raw_exception(e) -def set_registry_job(cmd, name, resource_group_name, server, username=None, password=None, disable_warnings=False, identity=None, no_wait=False): +def set_registry_job( + cmd, + name, + resource_group_name, + server, + username=None, + password=None, + disable_warnings=False, + identity=None, + no_wait=False): raw_parameters = locals() containerapp_job_registry_set_decorator = ContainerAppJobRegistryPreviewSetDecorator( cmd=cmd, client=ContainerAppsJobPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) - containerapp_job_registry_set_decorator.validate_subscription_registered(CONTAINER_APPS_RP) + models=CONTAINER_APPS_SDK_MODELS) + containerapp_job_registry_set_decorator.validate_subscription_registered( + CONTAINER_APPS_RP) containerapp_job_registry_set_decorator.validate_arguments() containerapp_job_registry_set_decorator.construct_payload() r = containerapp_job_registry_set_decorator.set() return r - # maintenance config -def add_maintenance_config(cmd, resource_group_name, env_name, duration, start_hour_utc, weekday): + + +def add_maintenance_config( + cmd, + resource_group_name, + env_name, + duration, + start_hour_utc, + weekday): raw_parameters = locals() maintenance_config_decorator = ContainerAppEnvMaintenanceConfigPreviewDecorator( cmd=cmd, client=MaintenanceConfigPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) maintenance_config_decorator.construct_payload() maintenance_config_decorator.validate_arguments() r = maintenance_config_decorator.create_or_update() return r -def update_maintenance_config(cmd, resource_group_name, env_name, duration=None, start_hour_utc=None, weekday=None): +def update_maintenance_config( + cmd, + resource_group_name, + env_name, + duration=None, + start_hour_utc=None, + weekday=None): raw_parameters = locals() maintenance_config_decorator = ContainerAppEnvMaintenanceConfigPreviewDecorator( cmd=cmd, client=MaintenanceConfigPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) forUpdate = True maintenance_config_decorator.construct_payload(forUpdate) maintenance_config_decorator.validate_arguments() @@ -4142,8 +5674,7 @@ def remove_maintenance_config(cmd, resource_group_name, env_name): cmd=cmd, client=MaintenanceConfigPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) r = maintenance_config_decorator.remove() return r @@ -4154,13 +5685,18 @@ def list_maintenance_config(cmd, resource_group_name, env_name): cmd=cmd, client=MaintenanceConfigPreviewClient, raw_parameters=raw_parameters, - models=CONTAINER_APPS_SDK_MODELS - ) + models=CONTAINER_APPS_SDK_MODELS) r = maintenance_config_decorator.list() return r -def containerapp_debug(cmd, resource_group_name, name, container=None, revision=None, replica=None): +def containerapp_debug( + cmd, + resource_group_name, + name, + container=None, + revision=None, + replica=None): logger.warning("Connecting...") conn = DebugWebSocketConnection( cmd=cmd, @@ -4182,14 +5718,16 @@ def containerapp_debug(cmd, resource_group_name, name, container=None, revision= while conn.is_connected: if not reader.is_alive() or not writer.is_alive(): - logger.warning("Reader or Writer for WebSocket is not alive. Closing the connection.") + logger.warning( + "Reader or Writer for WebSocket is not alive. Closing the connection.") conn.disconnect() try: time.sleep(0.1) except KeyboardInterrupt: if conn.is_connected: - logger.info("Caught KeyboardInterrupt. Sending ctrl+c to server") + logger.info( + "Caught KeyboardInterrupt. Sending ctrl+c to server") conn.send(SSH_CTRL_C_MSG) @@ -4204,52 +5742,96 @@ def list_label_history(cmd, resource_group_name, name): def show_label_history(cmd, resource_group_name, name, label): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) try: - return LabelHistoryPreviewClient.show(cmd, resource_group_name, name, label) + return LabelHistoryPreviewClient.show( + cmd, resource_group_name, name, label) except Exception as e: handle_raw_exception(e) -def set_revision_mode(cmd, resource_group_name, name, mode, target_label=None, no_wait=False): +def set_revision_mode( + cmd, + resource_group_name, + name, + mode, + target_label=None, + no_wait=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) containerapp_def = None try: - containerapp_def = ContainerAppPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) + containerapp_def = ContainerAppPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) except Exception as e: handle_raw_exception(e) if not containerapp_def: - raise ResourceNotFoundError("The containerapp '{}' does not exist".format(name)) + raise ResourceNotFoundError( + "The containerapp '{}' does not exist".format(name)) # Mode unchanged, no-op - if containerapp_def["properties"]["configuration"]["activeRevisionsMode"].lower() == mode.lower(): + if containerapp_def["properties"]["configuration"]["activeRevisionsMode"].lower( + ) == mode.lower(): return containerapp_def["properties"]["configuration"]["activeRevisionsMode"] - if mode.lower() == "labels" and "ingress" not in containerapp_def['properties']['configuration']: + if mode.lower( + ) == "labels" and "ingress" not in containerapp_def['properties']['configuration']: raise ValidationError("Ingress is requried for labels mode.") containerapp_patch_def = {} - safe_set(containerapp_patch_def, "properties", "configuration", "activeRevisionsMode", value=mode) - safe_set(containerapp_patch_def, "properties", "configuration", "targetLabel", value=target_label) + safe_set( + containerapp_patch_def, + "properties", + "configuration", + "activeRevisionsMode", + value=mode) + safe_set( + containerapp_patch_def, + "properties", + "configuration", + "targetLabel", + value=target_label) # If we're going into labels mode, replace the default traffic config with the target label and latest revision. # Otherwise all revisions will be deactivated. - traffic = safe_get(containerapp_def, "properties", "configuration", "ingress", "traffic") - if mode.lower() == "labels" and len(traffic) == 1 and safe_get(traffic[0], "latestRevision") is True: - safe_set(containerapp_patch_def, "properties", "configuration", "ingress", "traffic", value=[{ - "revisionName": containerapp_def["properties"]["latestRevisionName"], - "weight": 100, - "label": target_label - }]) + traffic = safe_get( + containerapp_def, + "properties", + "configuration", + "ingress", + "traffic") + if mode.lower() == "labels" and len(traffic) == 1 and safe_get( + traffic[0], "latestRevision") is True: + safe_set( + containerapp_patch_def, + "properties", + "configuration", + "ingress", + "traffic", + value=[ + { + "revisionName": containerapp_def["properties"]["latestRevisionName"], + "weight": 100, + "label": target_label}]) try: r = ContainerAppPreviewClient.update( - cmd=cmd, resource_group_name=resource_group_name, name=name, container_app_envelope=containerapp_patch_def, no_wait=no_wait) + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + container_app_envelope=containerapp_patch_def, + no_wait=no_wait) return r["properties"]["configuration"]["activeRevisionsMode"] except Exception as e: handle_raw_exception(e) -def add_revision_label(cmd, resource_group_name, revision, label, name=None, no_wait=False, yes=False): +def add_revision_label( + cmd, + resource_group_name, + revision, + label, + name=None, + no_wait=False, + yes=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) if not name: @@ -4257,34 +5839,51 @@ def add_revision_label(cmd, resource_group_name, revision, label, name=None, no_ containerapp_def = None try: - containerapp_def = ContainerAppPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) + containerapp_def = ContainerAppPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) except Exception as e: handle_raw_exception(e) if not containerapp_def: - raise ResourceNotFoundError(f"The containerapp '{name}' does not exist in group '{resource_group_name}'") + raise ResourceNotFoundError( + f"The containerapp '{name}' does not exist in group '{resource_group_name}'") - if "ingress" not in containerapp_def['properties']['configuration'] or "traffic" not in containerapp_def['properties']['configuration']['ingress']: - raise ValidationError("Ingress and traffic weights are required to add labels.") + if "ingress" not in containerapp_def['properties'][ + 'configuration'] or "traffic" not in containerapp_def['properties']['configuration']['ingress']: + raise ValidationError( + "Ingress and traffic weights are required to add labels.") - traffic_weight = safe_get(containerapp_def, 'properties', 'configuration', 'ingress', 'traffic') + traffic_weight = safe_get( + containerapp_def, + 'properties', + 'configuration', + 'ingress', + 'traffic') _validate_revision_name(cmd, revision, resource_group_name, name) - is_labels_mode = safe_get(containerapp_def, 'properties', 'configuration', 'activeRevisionsMode', default='single').lower() == 'labels' + is_labels_mode = safe_get( + containerapp_def, + 'properties', + 'configuration', + 'activeRevisionsMode', + default='single').lower() == 'labels' label_added = False for weight in traffic_weight: if "label" in weight and weight["label"].lower() == label.lower(): - r_name = "latest" if "latestRevision" in weight and weight["latestRevision"] else weight["revisionName"] + r_name = "latest" if "latestRevision" in weight and weight[ + "latestRevision"] else weight["revisionName"] if not yes and r_name.lower() != revision.lower(): msg = f"A weight with the label '{label}' already exists. Remove existing label '{label}' from '{r_name}' and add to '{revision}'?" if is_labels_mode: msg = f"Label '{label}' already exists with revision '{r_name}'. Replace '{r_name}' with '{revision}'?" if not prompt_y_n(msg, default="n"): - raise ArgumentUsageError("Usage Error: cannot specify existing label without agreeing to update values.") + raise ArgumentUsageError( + "Usage Error: cannot specify existing label without agreeing to update values.") - # in labels mode we update the revision assigned to the label, not the label assigned to the revision + # in labels mode we update the revision assigned to the label, not + # the label assigned to the revision if is_labels_mode: weight["revisionName"] = revision label_added = True @@ -4309,41 +5908,71 @@ def add_revision_label(cmd, resource_group_name, revision, label, name=None, no_ }) containerapp_patch_def = {} - safe_set(containerapp_patch_def, 'properties', 'configuration', 'ingress', 'traffic', value=traffic_weight) + safe_set( + containerapp_patch_def, + 'properties', + 'configuration', + 'ingress', + 'traffic', + value=traffic_weight) try: r = ContainerAppPreviewClient.update( - cmd=cmd, resource_group_name=resource_group_name, name=name, container_app_envelope=containerapp_patch_def, no_wait=no_wait) + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + container_app_envelope=containerapp_patch_def, + no_wait=no_wait) return r['properties']['configuration']['ingress']['traffic'] except Exception as e: handle_raw_exception(e) -def remove_revision_label(cmd, resource_group_name, name, label, no_wait=False): +def remove_revision_label( + cmd, + resource_group_name, + name, + label, + no_wait=False): _validate_subscription_registered(cmd, CONTAINER_APPS_RP) containerapp_def = None try: - containerapp_def = ContainerAppPreviewClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) + containerapp_def = ContainerAppPreviewClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name) except Exception as e: handle_raw_exception(e) if not containerapp_def: - raise ResourceNotFoundError(f"The containerapp '{name}' does not exist in group '{resource_group_name}'") + raise ResourceNotFoundError( + f"The containerapp '{name}' does not exist in group '{resource_group_name}'") - if "ingress" not in containerapp_def['properties']['configuration'] or "traffic" not in containerapp_def['properties']['configuration']['ingress']: - raise ValidationError("Ingress and traffic weights are required to remove labels.") + if "ingress" not in containerapp_def['properties'][ + 'configuration'] or "traffic" not in containerapp_def['properties']['configuration']['ingress']: + raise ValidationError( + "Ingress and traffic weights are required to remove labels.") - traffic_list = safe_get(containerapp_def, 'properties', 'configuration', 'ingress', 'traffic') + traffic_list = safe_get( + containerapp_def, + 'properties', + 'configuration', + 'ingress', + 'traffic') label_removed = False - is_labels_mode = safe_get(containerapp_def, 'properties', 'configuration', 'activeRevisionsMode', default='single').lower() == 'labels' + is_labels_mode = safe_get( + containerapp_def, + 'properties', + 'configuration', + 'activeRevisionsMode', + default='single').lower() == 'labels' new_traffic_list = [] for traffic in traffic_list: if "label" in traffic and traffic["label"].lower() == label.lower(): label_removed = True - # in labels mode we remove the whole entry, otherwise just clear the label. + # in labels mode we remove the whole entry, otherwise just clear + # the label. if not is_labels_mode: traffic["label"] = None new_traffic_list.append(traffic) @@ -4351,14 +5980,25 @@ def remove_revision_label(cmd, resource_group_name, name, label, no_wait=False): new_traffic_list.append(traffic) if not label_removed: - raise ValidationError("Please specify a label name with an associated traffic weight.") + raise ValidationError( + "Please specify a label name with an associated traffic weight.") containerapp_patch_def = {} - safe_set(containerapp_patch_def, 'properties', 'configuration', 'ingress', 'traffic', value=new_traffic_list) + safe_set( + containerapp_patch_def, + 'properties', + 'configuration', + 'ingress', + 'traffic', + value=new_traffic_list) try: r = ContainerAppPreviewClient.update( - cmd=cmd, resource_group_name=resource_group_name, name=name, container_app_envelope=containerapp_patch_def, no_wait=no_wait) + cmd=cmd, + resource_group_name=resource_group_name, + name=name, + container_app_envelope=containerapp_patch_def, + no_wait=no_wait) return r['properties']['configuration']['ingress']['traffic'] except Exception as e: handle_raw_exception(e) diff --git a/src/containerapp/azext_containerapp/tests/latest/data/source_built_using_bullseye_buildpack_python310/app.py b/src/containerapp/azext_containerapp/tests/latest/data/source_built_using_bullseye_buildpack_python310/app.py index 5ae120bdc15..805ecdbc1eb 100644 --- a/src/containerapp/azext_containerapp/tests/latest/data/source_built_using_bullseye_buildpack_python310/app.py +++ b/src/containerapp/azext_containerapp/tests/latest/data/source_built_using_bullseye_buildpack_python310/app.py @@ -1,19 +1,19 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -from flask import Flask -from datetime import datetime -from flask import current_app - -app = Flask(__name__) - -@app.route("/") -def hello(): - return "Hello World! " + str(datetime.now()) - -@app.route("/applicationpath") -def applicationPath(): - return current_app.root_path - +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from flask import Flask +from datetime import datetime +from flask import current_app + +app = Flask(__name__) + +@app.route("/") +def hello(): + return "Hello World! " + str(datetime.now()) + +@app.route("/applicationpath") +def applicationPath(): + return current_app.root_path + diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_auth_commands.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_auth_commands.py index 69a8f3c455d..0e19449e1a3 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_auth_commands.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_auth_commands.py @@ -29,7 +29,7 @@ def test_containerapp_blob_storage_token_store_msi(self, resource_group): env = prepare_containerapp_env_for_app_e2e_tests(self, location) self.cmd('containerapp create -g {} -n {} --environment {} --image mcr.microsoft.com/k8se/quickstart:latest --ingress external --target-port 80 --system-assigned --user-assigned {}'.format(resource_group, app, env, user_identity_name)) - + client_id = 'c0d23eb5-ea9f-4a4d-9519-bfa0a422c491' client_secret = 'c0d23eb5-ea9f-4a4d-9519-bfa0a422c491' issuer = 'https://sts.windows.net/54826b22-38d6-4fb2-bad9-b7983a3e9c5a/' @@ -43,7 +43,7 @@ def test_containerapp_blob_storage_token_store_msi(self, resource_group): "microsoft-provider-authentication-secret"), JMESPathCheck('registration.openIdIssuer', issuer), ]) - + self.cmd( 'containerapp auth update -g {} -n {} --token-store true --blob-container-uri {} --yes' .format(resource_group, app, blobContainerUri), checks=[ @@ -51,7 +51,7 @@ def test_containerapp_blob_storage_token_store_msi(self, resource_group): JMESPathCheck('properties.login.tokenStore.azureBlobStorage.blobContainerUri', blobContainerUri), JMESPathCheckNotExists('properties.login.tokenStore.azureBlobStorage.managedIdentityResourceId'), ]) - + self.cmd( 'containerapp auth update -g {} -n {} --token-store true --blob-container-uri {} --blob-container-identity {}' .format(resource_group, app, blobContainerUri, user_identity_id), checks=[ diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_commands.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_commands.py index cf80df55ede..128db827782 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_commands.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_commands.py @@ -325,13 +325,13 @@ def test_containerapp_ingress_traffic_labels_e2e(self, resource_group): self.cmd('containerapp update -g {} -n {} --cpu 1.0 --memory 2Gi --target-label label2'.format(resource_group, ca_name)) # it may take a minute for the new revision to be created and added to traffic. - traffic = self.cmd('containerapp ingress traffic show -g {} -n {}'.format(resource_group, ca_name)).get_output_in_json() + traffic = self.cmd('containerapp ingress traffic show -g {} -n {}'.format(resource_group, ca_name)).get_output_in_json() for _ in range(100): if len(traffic) >= 2: break time.sleep(5) traffic = self.cmd('containerapp ingress traffic show -g {} -n {}'.format(resource_group, ca_name)).get_output_in_json() - + self.assertEqual(len(traffic), 2) revisions_list = self.cmd('containerapp revision list -g {} -n {}'.format(resource_group, ca_name)).get_output_in_json() @@ -1636,7 +1636,7 @@ def test_containerapp_update_mode_e2e(self, resource_group): self.cmd(f"containerapp create -g {resource_group} -n {ca_name} --environment {env} --image mcr.microsoft.com/k8se/quickstart:latest --ingress external --target-port 80", checks=[ - JMESPathCheck('properties.configuration.activeRevisionsMode', "Single"), + JMESPathCheck('properties.configuration.activeRevisionsMode', "Single"), ]) try: diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_command.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_command.py index d221c56924e..8a17ab08817 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_command.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_command.py @@ -70,7 +70,7 @@ def test_containerapp_compose_with_command_list(self, resource_group): 'environment': env_id, 'compose': compose_file_name, }) - + command_string = 'containerapp compose create' command_string += ' --compose-file-path {compose}' command_string += ' --resource-group {rg}' diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_environment.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_environment.py index 1f36f78172d..d11ed0f513c 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_environment.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_environment.py @@ -76,7 +76,7 @@ def test_containerapp_compose_create_with_environment_prompt(self, resource_grou 'environment': env_id, 'compose': compose_file_name, }) - + command_string = 'containerapp compose create' command_string += ' --compose-file-path {compose}' command_string += ' --resource-group {rg}' diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_ingress.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_ingress.py index 0a303ff7738..e5210b673a9 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_ingress.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_ingress.py @@ -139,7 +139,7 @@ def test_containerapp_compose_create_with_ingress_prompt(self, resource_group): 'environment': env_id, 'compose': compose_file_name, }) - + command_string = 'containerapp compose create' command_string += ' --compose-file-path {compose}' command_string += ' --resource-group {rg}' diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_mcp_gateway.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_mcp_gateway.py index 917327911e0..6f3fcf75e00 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_mcp_gateway.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_mcp_gateway.py @@ -25,9 +25,9 @@ class ContainerAppComposeMCPGatewayScenarioTest(ScenarioTest): def test_mcp_gateway_deployment(self, resource_group): """ Test US2 Acceptance Scenario 1: Deploy compose file with MCP gateway. - + Expected: MCP gateway service is created without being assigned to GPU profile. - + This test MUST FAIL until T032-T043 are implemented. """ self.kwargs.update({ @@ -53,9 +53,9 @@ def test_mcp_gateway_deployment(self, resource_group): def test_managed_identity_enablement(self, resource_group): """ Test US2 Acceptance Scenario 2: System-assigned managed identity enabled for MCP gateway. - + Expected: MCP gateway container app has system-assigned managed identity enabled. - + This test MUST FAIL until T034-T037 are implemented. """ self.kwargs.update({ @@ -78,9 +78,9 @@ def test_managed_identity_enablement(self, resource_group): def test_role_assignment(self, resource_group): """ Test US2 Acceptance Scenario 3: Azure AI Developer role assigned to MCP gateway identity. - + Expected: MCP gateway managed identity has "Azure AI Developer" role on resource group. - + This test MUST FAIL until T038-T043 are implemented. """ self.kwargs.update({ @@ -105,9 +105,9 @@ def test_role_assignment(self, resource_group): def test_mcp_environment_injection(self, resource_group): """ Test US2 Acceptance Scenario 4: Environment variable injection for MCP gateway dependencies. - + Expected: Services that depend on MCP gateway receive MCP_ENDPOINT environment variable. - + This test MUST FAIL until T040-T043 are implemented. """ self.kwargs.update({ @@ -122,6 +122,6 @@ def test_mcp_environment_injection(self, resource_group): # Check web service has MCP_ENDPOINT web_app = self.cmd('containerapp show -g {rg} -n web').get_output_in_json() env_vars = {e['name']: e.get('value', '') for e in web_app['properties']['template']['containers'][0].get('env', [])} - + self.assertIn('MCP_ENDPOINT', env_vars) self.assertIn('mcp-gateway', env_vars['MCP_ENDPOINT'].lower()) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_models.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_models.py index 38364f5c288..c5ae9d9bb35 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_models.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_models.py @@ -27,10 +27,10 @@ class ContainerAppComposeModelsScenarioTest(ScenarioTest): def test_basic_models_deployment(self, resource_group): """ Test US1 Acceptance Scenario 1: Deploy compose file with models section. - + Expected: Creates models container app with model-runner and model-runner-config containers. Services receive MODELS_ENDPOINT and MODELS_AVAILABLE environment variables. - + This test MUST FAIL until T013-T027 are implemented. """ self.kwargs.update({ @@ -48,7 +48,7 @@ def test_basic_models_deployment(self, resource_group): # Verify models container app was created models_app = self.cmd('containerapp show -g {rg} -n models').get_output_in_json() self.assertIsNotNone(models_app) - + # Verify models app has 2 containers containers = models_app['properties']['template']['containers'] self.assertEqual(len(containers), 2) @@ -73,10 +73,10 @@ def test_basic_models_deployment(self, resource_group): def test_gpu_profile_creation(self, resource_group): """ Test US1 Acceptance Scenario 2: GPU workload profile creation. - + Expected: System checks for GPU profile, creates if doesn't exist, deploys models app to that profile. - + This test MUST FAIL until T016-T019 are implemented. """ self.kwargs.update({ @@ -102,9 +102,9 @@ def test_gpu_profile_creation(self, resource_group): def test_models_environment_injection(self, resource_group): """ Test US1 Acceptance Scenario 3: Environment variable injection. - + Expected: Services that depend on models receive MODELS_ENDPOINT and MODELS_AVAILABLE. - + This test MUST FAIL until T023-T027 are implemented. """ self.kwargs.update({ @@ -119,13 +119,13 @@ def test_models_environment_injection(self, resource_group): # Check web service has environment variables web_app = self.cmd('containerapp show -g {rg} -n web').get_output_in_json() env_vars = {e['name']: e.get('value', '') for e in web_app['properties']['template']['containers'][0].get('env', [])} - + self.assertIn('MODELS_ENDPOINT', env_vars) self.assertIn('MODELS_AVAILABLE', env_vars) - + # Verify MODELS_ENDPOINT format self.assertIn('models', env_vars['MODELS_ENDPOINT'].lower()) - + # Verify MODELS_AVAILABLE contains expected model names self.assertIn('phi', env_vars['MODELS_AVAILABLE'].lower()) @@ -134,9 +134,9 @@ def test_models_environment_injection(self, resource_group): def test_gpu_unavailable_error(self, resource_group): """ Test FR-065: GPU unavailable error handling. - + Expected: Raises ResourceNotFoundError with helpful message. - + This test MUST FAIL until proper error handling is implemented. """ # This test would attempt deployment in a region without GPU support @@ -148,9 +148,9 @@ def test_gpu_unavailable_error(self, resource_group): def test_dry_run_no_resources(self, resource_group): """ Test US3 Acceptance Scenario 1-2: Dry-run generates preview without creating resources. - + Expected: No actual Azure resources created, detailed report generated. - + This test MUST FAIL until T048-T056 are implemented. """ self.kwargs.update({ @@ -182,9 +182,9 @@ def test_dry_run_no_resources(self, resource_group): def test_custom_image_override(self, resource_group): """ Test US4 Acceptance Scenario 1: Custom image via x-azure-deployment. - + Expected: Container app uses custom image from x-azure-deployment.image. - + This test MUST FAIL until T059-T064 are implemented. """ # This would need a compose file with x-azure-deployment.image override @@ -196,9 +196,9 @@ def test_custom_image_override(self, resource_group): def test_custom_workload_profile_override(self, resource_group): """ Test US4 Acceptance Scenario 3: Custom workload profile via x-azure-deployment. - + Expected: Models app uses specified workload profile type. - + This test MUST FAIL until T063 is implemented. """ self.kwargs.update({ @@ -221,9 +221,9 @@ def test_custom_workload_profile_override(self, resource_group): def test_declarative_update(self, resource_group): """ Test Declarative Update: Deploy, modify compose, redeploy. - + Expected: Existing apps updated, new apps created, unmanaged apps preserved. - + This test MUST FAIL until T067-T071 are implemented. """ # Placeholder for declarative update test @@ -234,9 +234,9 @@ def test_declarative_update(self, resource_group): def test_replace_all_flag(self, resource_group): """ Test --replace-all flag. - + Expected: All matching container apps are deleted and recreated. - + This test MUST FAIL until T070 is implemented. """ # Placeholder for replace-all test @@ -247,14 +247,14 @@ def test_replace_all_flag(self, resource_group): def test_error_types_compliance(self, resource_group): """ Test FR-064 through FR-071: Error handling with proper error types. - + Expected: Specific error types from azure.cli.core.azclierror used: - ResourceNotFoundError for GPU unavailable (FR-065) - InvalidArgumentValueError for schema validation (FR-066) - UnauthorizedError for role assignment failures (FR-067) - CLIInternalError for model-runner failures (FR-068) - FileOperationError for compose file issues (FR-070) - + This test MUST FAIL until proper error handling is implemented. """ self.kwargs.update({ @@ -271,7 +271,7 @@ def test_error_types_compliance(self, resource_group): def test_posix_compliance(self, resource_group): """ Test FR-072, FR-073, FR-075-078: POSIX compliance. - + Expected: - Command output to stdout (FR-072) - Status/errors to stderr (FR-073) @@ -279,7 +279,7 @@ def test_posix_compliance(self, resource_group): - Exit code 1 for errors (FR-076) - Exit code 2 for parser errors (FR-077) - Exit code 3 for missing resources (FR-078) - + This test MUST FAIL until POSIX compliance is implemented. """ # This test validates CLI behavior at the shell level @@ -291,9 +291,9 @@ def test_posix_compliance(self, resource_group): def test_logger_usage(self, resource_group): """ Test FR-074: logger.error() and logger.warning() used instead of print(). - + Expected: All user messages use logger, not print(). - + This test validates implementation follows logging standards. """ # This is a code inspection test - validates no print() calls in implementation diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_registries.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_registries.py index 0d83ec36207..acd8f148c8e 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_registries.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_registries.py @@ -76,7 +76,7 @@ def test_containerapp_compose_create_with_registry_server_arg_only(self, resourc 'compose': compose_file_name, 'registry_server': "foobar.azurecr.io", }) - + command_string = 'containerapp compose create' command_string += ' --compose-file-path {compose}' command_string += ' --resource-group {rg}' diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_resources.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_resources.py index 200d1138452..4e911b6a40b 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_resources.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_resources.py @@ -112,7 +112,7 @@ def test_containerapp_compose_create_with_resources_from_both_cpus_and_deploy_cp 'environment': env, 'compose': compose_file_name, }) - + command_string = 'containerapp compose create' command_string += ' --compose-file-path {compose}' command_string += ' --resource-group {rg}' diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_scale.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_scale.py index 10a4ec64b63..4f8d40f5b6e 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_scale.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_scale.py @@ -72,7 +72,7 @@ def test_containerapp_compose_create_with_replicas_replicated_mode(self, resourc 'environment': env_id, 'compose': compose_file_name, }) - + command_string = 'containerapp compose create' command_string += ' --compose-file-path {compose}' command_string += ' --resource-group {rg}' diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_secrets.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_secrets.py index 7978277f357..efbb98df5d4 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_secrets.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_secrets.py @@ -39,7 +39,7 @@ def test_containerapp_compose_create_with_secrets(self, resource_group): """ compose_file_name = f"{self._testMethodName}_compose.yml" write_test_file(compose_file_name, compose_text) - + secrets_file_name = "./my_secret.txt" secrets_text = "Lorem Ipsum\n" write_test_file(secrets_file_name, secrets_text) @@ -145,7 +145,7 @@ def test_containerapp_compose_create_with_secrets_and_existing_environment_confl 'environment': env_id, 'compose': compose_file_name, }) - + command_string = 'containerapp compose create' command_string += ' --compose-file-path {compose}' command_string += ' --resource-group {rg}' diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_transport_overrides.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_transport_overrides.py index bc2e3dbc182..339c3b46e48 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_transport_overrides.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_compose_transport_overrides.py @@ -29,14 +29,14 @@ def test_containerapp_compose_create_with_transport_arg(self, resource_group): compose_file_name = f"{self._testMethodName}_compose.yml" write_test_file(compose_file_name, compose_text) env_id = prepare_containerapp_env_for_app_e2e_tests(self) - + self.kwargs.update({ 'environment': env_id, 'compose': compose_file_name, 'transport': f"foo=http2 bar=auto", 'second_transport': "baz=http", }) - + command_string = 'containerapp compose create' command_string += ' --compose-file-path {compose}' command_string += ' --resource-group {rg}' diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_create_update_with_source.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_create_update_with_source.py index 557e7c8ec7f..c697d132bc9 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_create_update_with_source.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_create_update_with_source.py @@ -1,101 +1,101 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -import os -import unittest - -from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only) - -from azext_containerapp.tests.latest.utils import create_and_verify_containerapp_create_and_update, verify_containerapp_create_exception, create_and_verify_containerapp_create_and_update_env_vars - -TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..')) - - -class ContainerAppCreateTest(ScenarioTest): - # These tests should have the `@live_only`attribute because they - # require a docker push operation to push the image built as part of the test to the container registry - # and would not execute from the CI pipeline since docker is not installed in the CI. - @live_only() - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_source_with_Dockerfile_e2e(self, resource_group): - source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) - ingress = 'external' - target_port = '80' - create_and_verify_containerapp_create_and_update(self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port) - - @live_only() - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_source_with_buildpack_e2e(self, resource_group): - source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_bullseye_buildpack_net7")) - ingress = 'external' - target_port = '8080' - create_and_verify_containerapp_create_and_update(self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port) - - @live_only() - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_source_and_image_e2e(self, resource_group): - image = "mcr.microsoft.com/dotnet/runtime:7.0" - source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) - create_and_verify_containerapp_create_and_update(self, resource_group=resource_group, image=image, source_path=source_path) - - @live_only() - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_source_with_acr_task_e2e(self, resource_group): - source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_acr_task")) - ingress = 'external' - target_port = '8080' - create_and_verify_containerapp_create_and_update(self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port) - - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_source_and_repo_e2e(self, resource_group): - source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) - repo = "https://github.com/test/repo" - err = ("Usage error: --source and --repo cannot be used together. Can either deploy from a local directory or a GitHub repository") - verify_containerapp_create_exception(self, resource_group=resource_group, err= err, source_path=source_path, repo=repo) - - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_source_and_yaml_e2e(self, resource_group): - source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) - yaml = "./test.yaml" - err = ("Usage error: --source or --repo cannot be used with --yaml together. Can either deploy from a local directory or provide a yaml file") - verify_containerapp_create_exception(self, resource_group=resource_group, err=err, source_path=source_path, yaml=yaml) - - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_repo_and_yaml_e2e(self, resource_group): - repo = "https://github.com/test/repo" - yaml = "./test.yaml" - err = ("Usage error: --source or --repo cannot be used with --yaml together. Can either deploy from a local directory or provide a yaml file") - verify_containerapp_create_exception(self, resource_group=resource_group, err=err, repo = repo, yaml=yaml) - - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_repo_and_connected_environment_e2e(self, resource_group): - repo = "https://github.com/test/repo" - err = ("Usage error: --source or --repo cannot be used with --environment-type connectedEnvironment together. Please use --environment-type managedEnvironment") - verify_containerapp_create_exception(self, resource_group=resource_group, err=err, repo = repo, environment_type="connected") - - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_source_and_connected_environment_e2e(self, resource_group): - source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) - err = ("Usage error: --source or --repo cannot be used with --environment-type connectedEnvironment together. Please use --environment-type managedEnvironment") - verify_containerapp_create_exception(self, resource_group=resource_group, err=err, source_path=source_path, environment_type="connected") - - @ResourceGroupPreparer(location="eastus") - def test_containerapp_create_repo_with_non_ACR_registry_server_e2e(self, resource_group): - repo = "https://github.com/test/repo" - registry_server = "docker.io" - registry_user = "test" - registry_pass = "test" - err = ("Usage error: --registry-server: expected an ACR registry (*.azurecr.io) for --repo") - verify_containerapp_create_exception(self, resource_group, err=err, repo=repo, registry_server=registry_server, registry_user=registry_user, registry_pass=registry_pass) - - # We have to use @live_only() here as cloud builder and build resource name is generated randomly - # and no matched request could be found for all builder/build ARM requests - @live_only() - @ResourceGroupPreparer() - def test_containerapp_create_and_update_with_env_vars_e2e(self, resource_group): - containerapp_name = self.create_random_name(prefix='aca', length=24) - source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_source_to_cloud_dotnet")) - create_and_verify_containerapp_create_and_update_env_vars(self, resource_group=resource_group, name=containerapp_name, source_path=source_path) - +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import os +import unittest + +from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only) + +from azext_containerapp.tests.latest.utils import create_and_verify_containerapp_create_and_update, verify_containerapp_create_exception, create_and_verify_containerapp_create_and_update_env_vars + +TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..')) + + +class ContainerAppCreateTest(ScenarioTest): + # These tests should have the `@live_only`attribute because they + # require a docker push operation to push the image built as part of the test to the container registry + # and would not execute from the CI pipeline since docker is not installed in the CI. + @live_only() + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_source_with_Dockerfile_e2e(self, resource_group): + source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) + ingress = 'external' + target_port = '80' + create_and_verify_containerapp_create_and_update(self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port) + + @live_only() + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_source_with_buildpack_e2e(self, resource_group): + source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_bullseye_buildpack_net7")) + ingress = 'external' + target_port = '8080' + create_and_verify_containerapp_create_and_update(self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port) + + @live_only() + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_source_and_image_e2e(self, resource_group): + image = "mcr.microsoft.com/dotnet/runtime:7.0" + source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) + create_and_verify_containerapp_create_and_update(self, resource_group=resource_group, image=image, source_path=source_path) + + @live_only() + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_source_with_acr_task_e2e(self, resource_group): + source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_acr_task")) + ingress = 'external' + target_port = '8080' + create_and_verify_containerapp_create_and_update(self, resource_group=resource_group, source_path=source_path, ingress=ingress, target_port=target_port) + + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_source_and_repo_e2e(self, resource_group): + source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) + repo = "https://github.com/test/repo" + err = ("Usage error: --source and --repo cannot be used together. Can either deploy from a local directory or a GitHub repository") + verify_containerapp_create_exception(self, resource_group=resource_group, err= err, source_path=source_path, repo=repo) + + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_source_and_yaml_e2e(self, resource_group): + source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) + yaml = "./test.yaml" + err = ("Usage error: --source or --repo cannot be used with --yaml together. Can either deploy from a local directory or provide a yaml file") + verify_containerapp_create_exception(self, resource_group=resource_group, err=err, source_path=source_path, yaml=yaml) + + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_repo_and_yaml_e2e(self, resource_group): + repo = "https://github.com/test/repo" + yaml = "./test.yaml" + err = ("Usage error: --source or --repo cannot be used with --yaml together. Can either deploy from a local directory or provide a yaml file") + verify_containerapp_create_exception(self, resource_group=resource_group, err=err, repo = repo, yaml=yaml) + + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_repo_and_connected_environment_e2e(self, resource_group): + repo = "https://github.com/test/repo" + err = ("Usage error: --source or --repo cannot be used with --environment-type connectedEnvironment together. Please use --environment-type managedEnvironment") + verify_containerapp_create_exception(self, resource_group=resource_group, err=err, repo = repo, environment_type="connected") + + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_source_and_connected_environment_e2e(self, resource_group): + source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_dockerfile")) + err = ("Usage error: --source or --repo cannot be used with --environment-type connectedEnvironment together. Please use --environment-type managedEnvironment") + verify_containerapp_create_exception(self, resource_group=resource_group, err=err, source_path=source_path, environment_type="connected") + + @ResourceGroupPreparer(location="eastus") + def test_containerapp_create_repo_with_non_ACR_registry_server_e2e(self, resource_group): + repo = "https://github.com/test/repo" + registry_server = "docker.io" + registry_user = "test" + registry_pass = "test" + err = ("Usage error: --registry-server: expected an ACR registry (*.azurecr.io) for --repo") + verify_containerapp_create_exception(self, resource_group, err=err, repo=repo, registry_server=registry_server, registry_user=registry_user, registry_pass=registry_pass) + + # We have to use @live_only() here as cloud builder and build resource name is generated randomly + # and no matched request could be found for all builder/build ARM requests + @live_only() + @ResourceGroupPreparer() + def test_containerapp_create_and_update_with_env_vars_e2e(self, resource_group): + containerapp_name = self.create_random_name(prefix='aca', length=24) + source_path = os.path.join(TEST_DIR, os.path.join("data", "source_built_using_source_to_cloud_dotnet")) + create_and_verify_containerapp_create_and_update_env_vars(self, resource_group=resource_group, name=containerapp_name, source_path=source_path) + diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_dapr_resiliency.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_dapr_resiliency.py index 16d1739ccc7..12bc5b09f29 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_dapr_resiliency.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_dapr_resiliency.py @@ -30,7 +30,7 @@ def test_containerapp_resiliency(self, resource_group): bad_rg = "bad-rg" bad_capp = "bad-capp" resil_policy_count = 1 - + create_containerapp_env(self, env_name, resource_group) self.cmd('containerapp create -g {} -n {} --environment {}'.format(resource_group, ca_name, env_name)) @@ -225,7 +225,7 @@ def test_dapr_component_resiliency(self, resource_group, subnet_id, vnet_name, s with open(dapr_file, 'w') as outfile: yaml.dump(daprloaded, outfile, default_flow_style=False) - + self.cmd('containerapp env dapr-component set -n {} -g {} --dapr-component-name {} --yaml {}'.format(env_name, resource_group, dapr_comp_name, dapr_file.replace(os.sep, os.sep + os.sep)), checks=[ JMESPathCheck('name', dapr_comp_name), ]) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_commands.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_commands.py index ce7fb7f2fbe..1543a139fb3 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_commands.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_commands.py @@ -39,7 +39,7 @@ def test_containerapp_env_identity_e2e(self, resource_group): env_name = self.create_random_name(prefix='containerapp-e2e-env', length=24) self.cmd('containerapp env create -g {} -n {} --mi-system-assigned --mi-user-assigned {} {} --logs-destination none'.format(resource_group, env_name, user_identity_id1, user_identity_id2)) - + containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() while containerapp_env["properties"]["provisioningState"].lower() == "waiting": time.sleep(5) @@ -90,7 +90,7 @@ def test_containerapp_env_identity_system(self, resource_group): self.cmd('configure --defaults location={}'.format(location)) env_name = self.create_random_name(prefix='containerapp-e2e-env', length=24) - + self.cmd('containerapp env create -g {} -n {} --mi-system-assigned --logs-destination none'.format(resource_group, env_name)) containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() @@ -150,12 +150,12 @@ def test_containerapp_env_identity_user(self, resource_group): self.cmd('containerapp env identity assign --system-assigned -g {} -n {}'.format(resource_group, env_name), checks=[ JMESPathCheck('type', 'SystemAssigned, UserAssigned'), ]) - + self.cmd('containerapp env identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name1, resource_group, env_name), checks=[ JMESPathCheck('type', 'SystemAssigned, UserAssigned'), JMESPathCheckExists(f'userAssignedIdentities."{user_identity_id2}"') ]) - + self.cmd('containerapp env identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name2, resource_group, env_name), checks=[ JMESPathCheck('type', 'SystemAssigned'), ]) @@ -205,11 +205,11 @@ def test_containerapp_env_msi_custom_domains(self, resource_group): hostname_1 = "{}.{}".format(subdomain_1, zone_name) self.cmd("appservice domain create -g {} --hostname {} --contact-info=@'{}' --accept-terms".format(resource_group, zone_name, contacts)).get_output_in_json() self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(resource_group, zone_name, txt_name_1, verification_id)).get_output_in_json() - + defaultPolicy = self.cmd("keyvault certificate get-default-policy").get_output_in_json() defaultPolicy["x509CertificateProperties"]["subject"] = f"CN=*.{hostname_1}" defaultPolicy["secretProperties"]["contentType"] = "application/x-pem-file" - + temp = tempfile.NamedTemporaryFile(prefix='capp_', suffix='_tmp', mode="w+", delete=False) temp.write(json.dumps(defaultPolicy, default=lambda o: dict((key, value) for key, value in o.__dict__.items() if value), allow_nan=False)) temp.close() @@ -292,7 +292,7 @@ def test_containerapp_env_msi_certificate(self, resource_group): defaultPolicy = self.cmd("keyvault certificate get-default-policy").get_output_in_json() defaultPolicy["x509CertificateProperties"]["subject"] = f"CN=*.contoso.com" defaultPolicy["secretProperties"]["contentType"] = "application/x-pem-file" - + temp = tempfile.NamedTemporaryFile(prefix='capp_', suffix='_tmp', mode="w+", delete=False) temp.write(json.dumps(defaultPolicy, default=lambda o: dict((key, value) for key, value in o.__dict__.items() if value), allow_nan=False)) temp.close() @@ -303,7 +303,7 @@ def test_containerapp_env_msi_certificate(self, resource_group): # create an environment with custom domain and user assigned identity self.cmd('containerapp env create -g {} -n {} --mi-system-assigned --logs-destination none'.format( resource_group, env_name)) - + containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() while containerapp_env["properties"]["provisioningState"].lower() == "waiting": time.sleep(5) @@ -312,7 +312,7 @@ def test_containerapp_env_msi_certificate(self, resource_group): principal_id = containerapp_env["identity"]["principalId"] roleAssignmentName2 = self.create_guid() self.cmd(f'role assignment create --role "Key Vault Secrets User" --assignee {principal_id} --scope {kv["id"]} --name {roleAssignmentName2}') - + containerapp_cert_name = self.create_random_name(prefix='containerapp-cert', length=24) cert = self.cmd(f"containerapp env certificate upload -g {resource_group} -n {env_name} -c {containerapp_cert_name} --akv-url {akv_secret_url}", checks=[ JMESPathCheck('type', "Microsoft.App/managedEnvironments/certificates"), @@ -338,7 +338,7 @@ def test_containerapp_env_msi_certificate(self, resource_group): JMESPathCheck('type', "Microsoft.App/managedEnvironments/certificates"), JMESPathCheck('properties.certificateKeyVaultProperties', None), ]) - + containerapp_cert_name = self.create_random_name(prefix='containerapp-cert', length=24) self.cmd(f"containerapp env certificate upload -g {resource_group} -n {env_name} -c {containerapp_cert_name} --akv-url {akv_secret_url}", checks=[ JMESPathCheck('type', "Microsoft.App/managedEnvironments/certificates"), @@ -370,7 +370,7 @@ def test_containerapp_env_msi_certificate_random_name(self, resource_group): defaultPolicy = self.cmd("keyvault certificate get-default-policy").get_output_in_json() defaultPolicy["x509CertificateProperties"]["subject"] = f"CN=*.contoso.com" defaultPolicy["secretProperties"]["contentType"] = "application/x-pem-file" - + temp = tempfile.NamedTemporaryFile(prefix='capp_', suffix='_tmp', mode="w+", delete=False) temp.write(json.dumps(defaultPolicy, default=lambda o: dict((key, value) for key, value in o.__dict__.items() if value), allow_nan=False)) temp.close() @@ -392,12 +392,12 @@ def test_containerapp_env_msi_certificate_random_name(self, resource_group): # create an environment with custom domain and user assigned identity self.cmd('containerapp env create -g {} -n {} --mi-user-assigned {} --logs-destination none'.format( resource_group, env_name, user_identity_id)) - + containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() while containerapp_env["properties"]["provisioningState"].lower() == "waiting": time.sleep(5) containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() - + cert = self.cmd(f"containerapp env certificate upload -g {resource_group} -n {env_name} --akv-url {akv_secret_url} --identity {user_identity_id}", checks=[ JMESPathCheck('type', "Microsoft.App/managedEnvironments/certificates"), ]).get_output_in_json() @@ -415,7 +415,7 @@ def test_containerapp_env_msi_certificate_random_name(self, resource_group): JMESPathCheck('[0].name', containerapp_cert_name), JMESPathCheck('[0].id', containerapp_cert_id), ]) - + class ContainerappEnvScenarioTest(ScenarioTest): @AllowLargeResponse(8192) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_telemetry.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_telemetry.py index f5ddad358a2..fbdc9044378 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_telemetry.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_env_telemetry.py @@ -47,7 +47,7 @@ def test_containerapp_env_telemetry_data_dog_e2e(self, resource_group, subnet_id while containerapp_env["properties"]["provisioningState"].lower() == "waiting": time.sleep(5) containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() - + self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[ JMESPathCheck('name', env_name), JMESPathCheck('properties.provisioningState', "Succeeded"), @@ -100,7 +100,7 @@ def test_containerapp_env_telemetry_data_dog_e2e(self, resource_group, subnet_id ]) self.cmd(f'containerapp env telemetry data-dog show -g {resource_group} -n {env_name}', expect_failure=True) - + self.cmd(f'containerapp env delete -g {resource_group} -n {env_name} --yes --no-wait') @@ -132,7 +132,7 @@ def test_containerapp_env_telemetry_app_insights_e2e(self, resource_group, subne while containerapp_env["properties"]["provisioningState"].lower() == "waiting": time.sleep(5) containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() - + self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[ JMESPathCheck('name', env_name), JMESPathCheck('properties.provisioningState', "Succeeded"), @@ -145,7 +145,7 @@ def test_containerapp_env_telemetry_app_insights_e2e(self, resource_group, subne JMESPathCheck('enableOpenTelemetryTraces', True), JMESPathCheck('connectionString', None), ]) - + self.cmd(f'containerapp env telemetry app-insights set -g {resource_group} -n {env_name} --enable-open-telemetry-traces false') containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() @@ -211,7 +211,7 @@ def test_containerapp_env_telemetry_otlp_e2e(self, resource_group, subnet_id): otlp_insecure = False otlp_headers = "api-key=test" otlp_incorrect_headers="key" - + self.cmd(f'containerapp env telemetry otlp add -g {resource_group} -n {env_name} --otlp-name {otlp_name} --endpoint {otlp_endpoint} --insecure {otlp_insecure} --headers {otlp_incorrect_headers} --enable-open-telemetry-traces true --enable-open-telemetry-logs true --enable-open-telemetry-metrics true', expect_failure=True) self.cmd(f'containerapp env telemetry otlp add -g {resource_group} -n {env_name} --otlp-name {otlp_name} --endpoint {otlp_endpoint} --insecure {otlp_insecure} --headers {otlp_headers} --enable-open-telemetry-traces true --enable-open-telemetry-logs true --enable-open-telemetry-metrics true') @@ -220,7 +220,7 @@ def test_containerapp_env_telemetry_otlp_e2e(self, resource_group, subnet_id): while containerapp_env["properties"]["provisioningState"].lower() == "waiting": time.sleep(5) containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() - + self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[ JMESPathCheck('name', env_name), JMESPathCheck('properties.provisioningState', "Succeeded"), @@ -249,7 +249,7 @@ def test_containerapp_env_telemetry_otlp_e2e(self, resource_group, subnet_id): while containerapp_env["properties"]["provisioningState"].lower() == "waiting": time.sleep(5) containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() - + self.cmd('containerapp env show -n {} -g {}'.format(env_name, resource_group), checks=[ JMESPathCheck('name', env_name), JMESPathCheck('properties.provisioningState', "Succeeded"), diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_ingress_sticky_session.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_ingress_sticky_session.py index 93fcb304f35..cc07adcae76 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_ingress_sticky_session.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_ingress_sticky_session.py @@ -32,7 +32,7 @@ def test_containerapp_ingress_sticky_sessions_e2e(self, resource_group): ]) self.cmd("az containerapp create -g {} --target-port 80 --ingress external --image mcr.microsoft.com/k8se/quickstart:latest --environment {} -n {} ".format(resource_group, env_id, app)) - self.cmd("az containerapp ingress sticky-sessions set -n {} -g {} --affinity sticky".format(app, resource_group)) + self.cmd("az containerapp ingress sticky-sessions set -n {} -g {} --affinity sticky".format(app, resource_group)) self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[ - JMESPathCheck('properties.configuration.ingress.stickySessions.affinity', "sticky"), + JMESPathCheck('properties.configuration.ingress.stickySessions.affinity', "sticky"), ]) \ No newline at end of file diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_java_component.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_java_component.py index 135c10eb890..24cf2faaa96 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_java_component.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_java_component.py @@ -57,7 +57,7 @@ def test_containerapp_java_component(self, resource_group): # List Java Components java_component_list = self.cmd("containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json() self.assertTrue(len(java_component_list) == 2) - + # Create SBA and bind with eureka self.cmd('containerapp env java-component admin-for-spring create -g {} -n {} --environment {} --min-replicas 2 --max-replicas 2 --configuration'.format(resource_group, sba_name, env_name), checks=[ JMESPathCheck('name', sba_name), @@ -67,7 +67,7 @@ def test_containerapp_java_component(self, resource_group): JMESPathCheck('properties.scale.minReplicas', 2), JMESPathCheck('properties.scale.maxReplicas', 2) ]) - + # List Java Components java_component_list = self.cmd("containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json() self.assertTrue(len(java_component_list) == 3) @@ -165,7 +165,7 @@ def test_containerapp_java_component(self, resource_group): # Delete Java Components self.cmd('containerapp env java-component config-server-for-spring delete -g {} -n {} --environment {} --yes'.format(resource_group, config_name, env_name), expect_failure=False) - self.cmd('containerapp env java-component eureka-server-for-spring delete -g {} -n {} --environment {} --yes'.format(resource_group, eureka_name, env_name), expect_failure=False) + self.cmd('containerapp env java-component eureka-server-for-spring delete -g {} -n {} --environment {} --yes'.format(resource_group, eureka_name, env_name), expect_failure=False) self.cmd('containerapp env java-component admin-for-spring delete -g {} -n {} --environment {} --yes'.format(resource_group, sba_name, env_name), expect_failure=False) # List Java Components @@ -306,7 +306,7 @@ def test_containerapp_java_component_configurations(self, resource_group): JMESPathCheck('properties.provisioningState', "Succeeded"), JMESPathCheck('length(properties.configurations)', 2), ]) - + self.cmd( 'containerapp env java-component eureka-server-for-spring update -g {} -n {} --environment {} --set-configurations eureka.server.renewal-percent-threshold=0.95 eureka.server.renewal-threshold-update-interval-ms=1000'.format( resource_group, eureka_name, env_name), checks=[ @@ -343,7 +343,7 @@ def test_containerapp_java_component_configurations(self, resource_group): JMESPathCheck('length(properties.configurations)', 0) ]) - self.cmd('containerapp env java-component eureka-server-for-spring delete -g {} -n {} --environment {} --yes'.format(resource_group, eureka_name, env_name), expect_failure=False) + self.cmd('containerapp env java-component eureka-server-for-spring delete -g {} -n {} --environment {} --yes'.format(resource_group, eureka_name, env_name), expect_failure=False) # List Java Components java_component_list = self.cmd("containerapp env java-component list -g {} --environment {}".format(resource_group, env_name)).get_output_in_json() diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_maintenance_config.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_maintenance_config.py index 6c12d02426c..ebaf0612a51 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_maintenance_config.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_maintenance_config.py @@ -25,10 +25,10 @@ class ContainerAppMaintenanceConfigTest(ScenarioTest): @ResourceGroupPreparer(location="eastus") @SubnetPreparer(location="centralus", delegations='Microsoft.App/environments', service_endpoints="Microsoft.Storage.Global") def test_containerapp_maintenanceconfig_crudoperations_e2e(self, resource_group, subnet_id): - + self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) - - + + env_name = self.create_random_name(prefix='aca-maintenance-config-env', length=30) self.cmd('containerapp env create -g {} -n {} --location {} --logs-destination none --enable-workload-profiles -s {}'.format(resource_group, env_name, TEST_LOCATION, subnet_id)) @@ -70,7 +70,7 @@ def test_containerapp_maintenanceconfig_crudoperations_e2e(self, resource_group, # delete the Container App Maintenance Config resource self.cmd("az containerapp env maintenance-config remove --resource-group {} --environment {} -y".format(resource_group, env_name)) - + self.cmd("az containerapp env maintenance-config list --resource-group {} --environment {}".format(resource_group, env_name), checks=[ JMESPathCheck('length(@)', 0), ]) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_mount_secret_volume.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_mount_secret_volume.py index b5561976551..c6543413496 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_mount_secret_volume.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_mount_secret_volume.py @@ -24,7 +24,7 @@ def test_container_app_mount_secret_e2e(self, resource_group): create_containerapp_env(self, env, resource_group) self.cmd('containerapp env show -n {} -g {}'.format(env, resource_group), checks=[ - JMESPathCheck('name', env) + JMESPathCheck('name', env) ]) secretRef1 = "mysecret" @@ -32,21 +32,21 @@ def test_container_app_mount_secret_e2e(self, resource_group): secretRef2 = "anothersecret" secretValue2 = "secretvalue2" - self.cmd(f'az containerapp create -g {resource_group} --environment {env} -n {app} --secrets {secretRef1}={secretValue1} {secretRef2}={secretValue2} --secret-volume-mount "mnt/secrets"') - + self.cmd(f'az containerapp create -g {resource_group} --environment {env} -n {app} --secrets {secretRef1}={secretValue1} {secretRef2}={secretValue2} --secret-volume-mount "mnt/secrets"') + self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[ - JMESPathCheck('properties.template.volumes[0].storageType', 'Secret'), + JMESPathCheck('properties.template.volumes[0].storageType', 'Secret'), # --secret-volume-mount mounts all secrets, not specific secrets, therefore no secrets should be returned. JMESPathCheck('properties.template.volumes[0].secrets', None), - JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/secrets'), + JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/secrets'), ]) # test using update to update the secret volume mount path - self.cmd(f'az containerapp update -n {app} -g {resource_group} --secret-volume-mount "mnt/newpath"') - + self.cmd(f'az containerapp update -n {app} -g {resource_group} --secret-volume-mount "mnt/newpath"') + self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[ - JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/newpath'), + JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/newpath'), ]) - + @AllowLargeResponse(8192) @ResourceGroupPreparer(location="northcentralus") def test_container_app_mount_secret_update_e2e(self, resource_group): @@ -64,21 +64,21 @@ def test_container_app_mount_secret_update_e2e(self, resource_group): secretValue2 = "secretvalue2" self.cmd('containerapp env show -n {} -g {}'.format(env, resource_group), checks=[ - JMESPathCheck('name', env) + JMESPathCheck('name', env) ]) self.cmd(f'az containerapp create -g {resource_group} --environment {env} -n {app} --secrets {secretRef1}={secretValue1} {secretRef2}={secretValue2}') - + self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[ - JMESPathCheck('properties.template.volumes', None), + JMESPathCheck('properties.template.volumes', None), ]) self.cmd(f'az containerapp update -n {app} -g {resource_group} --secret-volume-mount "mnt/secrets"') - + self.cmd('containerapp show -g {} -n {}'.format(resource_group, app), checks=[ - JMESPathCheck('properties.template.volumes[0].storageType', 'Secret'), + JMESPathCheck('properties.template.volumes[0].storageType', 'Secret'), JMESPathCheck('properties.template.volumes[0].secrets', None), - JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/secrets'), + JMESPathCheck('properties.template.containers[0].volumeMounts[0].mountPath', 'mnt/secrets'), ]) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter.py index ddd95678003..a36d9a0c3fe 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter.py @@ -110,6 +110,6 @@ def test_containerapp_session_code_interpreter_e2e(self, resource_group): sessionpool_name_python, resource_group, )) - + sessionpool_list = self.cmd("containerapp sessionpool list -g {}".format(resource_group)).get_output_in_json() self.assertTrue(len(sessionpool_list) == 0) \ No newline at end of file diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter_files.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter_files.py index c50f9ce1979..f0e4a1ee031 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter_files.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter_files.py @@ -117,6 +117,6 @@ def test_containerapp_session_code_interpreter_files_e2e(self, resource_group): sessionpool_name_python, resource_group, )) - + sessionpool_list = self.cmd("containerapp sessionpool list -g {}".format(resource_group)).get_output_in_json() self.assertTrue(len(sessionpool_list) == 0) \ No newline at end of file diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter_nodelts.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter_nodelts.py index 9198b75d28d..df0591ddbd4 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter_nodelts.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_session_code_interpreter_nodelts.py @@ -110,6 +110,6 @@ def test_containerapp_session_code_interpreter_nodelts_e2e(self, resource_group) sessionpool_name_nodelts, resource_group, )) - + sessionpool_list = self.cmd("containerapp sessionpool list -g {}".format(resource_group)).get_output_in_json() self.assertTrue(len(sessionpool_list) == 0) \ No newline at end of file diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_sessionpool.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_sessionpool.py index 159b02e266c..51b7a1045d7 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_sessionpool.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_sessionpool.py @@ -312,7 +312,7 @@ def test_containerapp_sessionpool_registry_identity(self, resource_group, subnet identity_json = self.cmd('identity create -g {} -n {} -l {}'.format(resource_group, user_identity_name, location)).get_output_in_json() user_identity_id = identity_json["id"] principal_id = identity_json["principalId"] - + env_name = self.create_random_name(prefix='aca-sp-env-registry', length=24) self.cmd('containerapp env create -g {} -n {} -l {} --logs-destination none -s {}'.format(resource_group, env_name, location, subnet_id), expect_failure=False) containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() @@ -328,10 +328,10 @@ def test_containerapp_sessionpool_registry_identity(self, resource_group, subnet self.cmd(f'acr import -n {acr} --source {image_source}') roleAssignmentName = self.create_guid() self.cmd(f'role assignment create --role acrpull --assignee {principal_id} --scope {acr_resource_id} --name {roleAssignmentName}') - + # wait for role assignment take effect time.sleep(30) - + # Create CustomContainer SessionPool sessionpool_name_custom = self.create_random_name(prefix='spcc', length=24) ready_instances = 2 @@ -371,7 +371,7 @@ def test_containerapp_sessionpool_registry_identity(self, resource_group, subnet JMESPathCheck("properties.managedIdentitySettings[0].identity", user_identity_id), JMESPathCheck("properties.managedIdentitySettings[0].lifecycle", "None"), ]) - + # Update session pool image self.cmd( f'containerapp sessionpool update -g {resource_group} -n {sessionpool_name_custom} -l {location} --image {image_name} --registry-server {acr}.azurecr.io --registry-identity {user_identity_id}', @@ -382,7 +382,7 @@ def test_containerapp_sessionpool_registry_identity(self, resource_group, subnet JMESPathCheck("properties.managedIdentitySettings[0].identity", user_identity_id), JMESPathCheck("properties.managedIdentitySettings[0].lifecycle", "None"), ]) - + self.cmd('containerapp sessionpool delete -g {} -n {} --yes'.format(resource_group, sessionpool_name_custom)) @@ -506,7 +506,7 @@ def test_containerapp_sessionpool_oncontainerexit(self, resource_group, subnet_i service_endpoints="Microsoft.Storage.Global") def test_containerapp_sessionpool_health_probe(self, resource_group, subnet_id, vnet_name, subnet_name): location = TEST_LOCATION - + self.cmd('configure --defaults location={}'.format(location)) env_name = self.create_random_name(prefix='aca-sp-env-probe', length=24) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_crud.py b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_crud.py index 4323fa78821..63b45768959 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_crud.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_crud.py @@ -22,7 +22,7 @@ class ContainerAppJobsCRUDOperationsTest(ScenarioTest): @ResourceGroupPreparer(location="northcentralus") # test for CRUD operations on Container App Job resource with trigger type as manual def test_containerapp_manualjob_crudoperations_e2e(self, resource_group): - + self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) job = self.create_random_name(prefix='job1', length=24) @@ -80,7 +80,7 @@ def test_containerapp_manualjob_crudoperations_e2e(self, resource_group): # verify the Container App Job resource is deleted jobs_list = self.cmd("az containerapp job list --resource-group {}".format(resource_group)).get_output_in_json() self.assertTrue(len(jobs_list) == 0) - + ## test for CRUD operations on Container App Job resource with trigger type as schedule job2 = self.create_random_name(prefix='job2', length=24) @@ -197,7 +197,7 @@ def test_containerapp_manualjob_registry_acr_look_up_credentical(self, resource_ @AllowLargeResponse(8192) @ResourceGroupPreparer(location="northcentralus") # test for CRUD operations on Container App Job resource with trigger type as manual - def test_containerapp_eventjob_defaults_e2e(self, resource_group): + def test_containerapp_eventjob_defaults_e2e(self, resource_group): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) job = self.create_random_name(prefix='job3', length=24) @@ -232,7 +232,7 @@ def test_containerapp_eventjob_defaults_e2e(self, resource_group): @AllowLargeResponse(8192) @ResourceGroupPreparer(location="northcentralus") # test for CRUD operations on Container App Job resource with trigger type as manual - def test_containerapp_manualjob_defaults_e2e(self, resource_group): + def test_containerapp_manualjob_defaults_e2e(self, resource_group): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) job = self.create_random_name(prefix='job4', length=24) @@ -264,7 +264,7 @@ def test_containerapp_manualjob_defaults_e2e(self, resource_group): @AllowLargeResponse(8192) @ResourceGroupPreparer(location="northcentralus") # test for CRUD operations on Container App Job resource with trigger type as manual - def test_containerapp_cronjob_defaults_e2e(self, resource_group): + def test_containerapp_cronjob_defaults_e2e(self, resource_group): self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) job = self.create_random_name(prefix='job5', length=24) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_event_triggered_crud.py b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_event_triggered_crud.py index bdd0aff5017..546c3e29aaf 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_event_triggered_crud.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_event_triggered_crud.py @@ -21,7 +21,7 @@ class ContainerAppJobsCRUDOperationsTest(ScenarioTest): @ResourceGroupPreparer(location="northcentralus") # test for CRUD operations on Container App Job resource with trigger type as event def test_containerapp_eventjob_crudoperations_e2e(self, resource_group): - + self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) job = self.create_random_name(prefix='job1', length=24) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_executions.py b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_executions.py index 2702194ddcb..372cc0f211b 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_executions.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_executions.py @@ -22,7 +22,7 @@ class ContainerAppJobsExecutionsTest(ScenarioTest): @AllowLargeResponse(8192) @ResourceGroupPreparer(location="northcentralus") def test_containerapp_job_executionstest_e2e(self, resource_group): - + self.cmd('configure --defaults location={}'.format(TEST_LOCATION)) job = self.create_random_name(prefix='job2', length=24) @@ -59,11 +59,11 @@ def test_containerapp_job_executionstest_e2e(self, resource_group): # get list of all executions for the job executionList = self.cmd("az containerapp job execution list --resource-group {} --name {}".format(resource_group, job)).get_output_in_json() self.assertTrue(len(executionList) == 1) - + # get single execution for the job singleExecution = self.cmd("az containerapp job execution show --resource-group {} --name {} --job-execution-name {}".format(resource_group, job, execution['name'])).get_output_in_json() self.assertEqual(job in singleExecution['name'], True) - + # start a job execution and stop it execution = self.cmd("az containerapp job start --resource-group {} --name {}".format(resource_group, job)).get_output_in_json() if "id" in execution: @@ -75,7 +75,7 @@ def test_containerapp_job_executionstest_e2e(self, resource_group): # stop the most recently started execution self.cmd("az containerapp job stop --resource-group {} --name {} --job-execution-name {}".format(resource_group, job, execution['name'])).get_output_in_json() - + # get stopped execution for the job and check status singleExecution = self.cmd("az containerapp job execution show --resource-group {} --name {} --job-execution-name {}".format(resource_group, job, execution['name'])).get_output_in_json() self.assertEqual(job in singleExecution['name'], True) @@ -157,7 +157,7 @@ def test_containerapp_job_custom_executionstest_e2e(self, resource_group): # start job execution with yaml file execution = self.cmd("az containerapp job start --resource-group {} --name {} --yaml {}".format(resource_group, job, containerappjob_file_name)).get_output_in_json() self.assertEqual(job in execution['id'], True) - self.assertEqual(job in execution['name'], True) + self.assertEqual(job in execution['name'], True) # get the execution and check if the custom container information is present self.cmd("az containerapp job execution show --resource-group {} --name {} --job-execution-name {}".format(resource_group, job, execution['name']), checks=[ @@ -209,7 +209,7 @@ def test_containerappjob_logstream(self, resource_group): # get list of all executions for the job executionList = self.cmd("az containerapp job execution list --resource-group {} --name {}".format(resource_group, job)).get_output_in_json() self.assertTrue(len(executionList) == 1) - + replicas = self.cmd("az containerapp job replica list -g {} -n {} --execution {}".format(resource_group, job, execution['name'])).get_output_in_json() print(replicas) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_with_identity.py b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_with_identity.py index 65e9418ce13..9c498f89a11 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_with_identity.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_with_identity.py @@ -186,7 +186,7 @@ def test_containerappjob_identity_registry_env_msi(self, resource_group): user_identity_name = self.create_random_name(prefix='env-msi', length=24) identity_json = self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name)).get_output_in_json() user_identity_id = identity_json["id"] - + self.cmd('containerapp env create -g {} -n {} --mi-system-assigned --mi-user-assigned {} --logs-destination none'.format(resource_group, env_name, user_identity_id)) containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() while containerapp_env["properties"]["provisioningState"].lower() == "waiting": diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_with_secrets.py b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_with_secrets.py index 5c34402867a..8adad6e9293 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_with_secrets.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerappjob_with_secrets.py @@ -99,4 +99,3 @@ def test_containerapp_manualjob_withsecret_crudoperations_e2e(self, resource_gro JMESPathCheck('[0].value', "testsecretvaluev2"), ]).get_output_in_json() self.assertTrue(len(job_secret_list) == 1) - \ No newline at end of file diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_extensions_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_extensions_operations.py index bba4b3105d5..852b089afc6 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_extensions_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_extensions_operations.py @@ -224,7 +224,7 @@ async def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -271,7 +271,7 @@ async def _delete_initial( } error_map.update(kwargs.pop('error_map', {})) - + request = build_delete_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -550,7 +550,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -563,7 +563,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_flux_config_operation_status_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_flux_config_operation_status_operations.py index 53766735ba9..0df750bcec0 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_flux_config_operation_status_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_flux_config_operation_status_operations.py @@ -82,7 +82,7 @@ async def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_flux_configurations_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_flux_configurations_operations.py index 80b175ce087..a19975ef468 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_flux_configurations_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_flux_configurations_operations.py @@ -83,7 +83,7 @@ async def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -410,7 +410,7 @@ async def _delete_initial( } error_map.update(kwargs.pop('error_map', {})) - + request = build_delete_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -553,7 +553,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -566,7 +566,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_operation_status_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_operation_status_operations.py index c190ec1e38b..f5372eca72b 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_operation_status_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_operation_status_operations.py @@ -84,7 +84,7 @@ async def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -150,7 +150,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -163,7 +163,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_operations.py index 3131058170a..cf46f54cbeb 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_operations.py @@ -67,7 +67,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( template_url=self.list.metadata['url'], ) @@ -75,7 +75,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( template_url=next_link, ) diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_source_control_configurations_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_source_control_configurations_operations.py index 71375d427f2..a175d15423d 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_source_control_configurations_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/aio/operations/_source_control_configurations_operations.py @@ -83,7 +83,7 @@ async def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -208,7 +208,7 @@ async def _delete_initial( } error_map.update(kwargs.pop('error_map', {})) - + request = build_delete_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -345,7 +345,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -358,7 +358,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_extensions_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_extensions_operations.py index 5ebf5788b77..1964fa6edea 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_extensions_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_extensions_operations.py @@ -450,7 +450,7 @@ def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -497,7 +497,7 @@ def _delete_initial( } error_map.update(kwargs.pop('error_map', {})) - + request = build_delete_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -775,7 +775,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -788,7 +788,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_flux_config_operation_status_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_flux_config_operation_status_operations.py index aa9a53eca61..11bad489514 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_flux_config_operation_status_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_flux_config_operation_status_operations.py @@ -127,7 +127,7 @@ def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_flux_configurations_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_flux_configurations_operations.py index cb2bddb55aa..e607ab27b52 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_flux_configurations_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_flux_configurations_operations.py @@ -310,7 +310,7 @@ def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -637,7 +637,7 @@ def _delete_initial( } error_map.update(kwargs.pop('error_map', {})) - + request = build_delete_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -780,7 +780,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -793,7 +793,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_operation_status_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_operation_status_operations.py index 74483698fc0..5d4c1383d0e 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_operation_status_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_operation_status_operations.py @@ -167,7 +167,7 @@ def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -233,7 +233,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -246,7 +246,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_operations.py index 71758018e4f..724a6421388 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_operations.py @@ -93,7 +93,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( template_url=self.list.metadata['url'], ) @@ -101,7 +101,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( template_url=next_link, ) diff --git a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_source_control_configurations_operations.py b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_source_control_configurations_operations.py index 300824b2190..6895327a72e 100644 --- a/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_source_control_configurations_operations.py +++ b/src/containerapp/azext_containerapp/vendored_sdks/kubernetesconfiguration/v2022_03_01/operations/_source_control_configurations_operations.py @@ -256,7 +256,7 @@ def get( } error_map.update(kwargs.pop('error_map', {})) - + request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -381,7 +381,7 @@ def _delete_initial( } error_map.update(kwargs.pop('error_map', {})) - + request = build_delete_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -518,7 +518,7 @@ def list( error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, @@ -531,7 +531,7 @@ def prepare_request(next_link=None): request.url = self._client.format_url(request.url) else: - + request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, From 3337a0316a4e7b48c8424f8355be54ec0e1c6057 Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Sat, 15 Nov 2025 14:02:43 -0600 Subject: [PATCH 05/10] added app logging and cleaup --- .../azext_containerapp/_compose_utils.py | 44 +++----------- src/containerapp/azext_containerapp/custom.py | 60 +++++++------------ 2 files changed, 28 insertions(+), 76 deletions(-) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index a424daebeb6..446413dcfc0 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -847,8 +847,6 @@ def create_gpu_workload_profile_if_needed( logger = get_logger(__name__) # Check if environment already has a GPU profile that matches the request - logger.warning( - f"[GPU DEBUG] create_gpu_workload_profile_if_needed called with requested_gpu_profile_type: {requested_gpu_profile_type}") try: # Use class methods directly - no instantiation needed env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) @@ -893,17 +891,11 @@ def create_gpu_workload_profile_if_needed( # The API returns profiles with 'name' field containing the workload # profile type available_types = [p.get('name') for p in available_gpu] - logger.warning( - f"[GPU DEBUG] Requested GPU profile: {requested_gpu_profile_type}") - logger.warning( - f"[GPU DEBUG] Available GPU profiles in {location}: {available_types}") # Pass through the requested type as-is - it's already the API value # (e.g., Consumption-GPU-NC8as-T4, Consumption-GPU-NC24-A100, Consumption, Flex) if requested_gpu_profile_type in available_types: gpu_profile_type = requested_gpu_profile_type - logger.warning( - f"[GPU DEBUG] ✅ Requested profile IS available, using: {gpu_profile_type}") logger.info( f"Using requested GPU profile type: {gpu_profile_type}") else: @@ -911,8 +903,6 @@ def create_gpu_workload_profile_if_needed( logger.error( f"Requested GPU profile '{requested_gpu_profile_type}' not available in {location}") logger.error(f"Available GPU profiles: {available_types}") - logger.warning( - "[GPU DEBUG] ❌ Requested profile NOT available - will raise error") raise ResourceNotFoundError( f"Requested workload profile type '{requested_gpu_profile_type}' is not available in region '{location}'. " f"Available types: {', '.join(available_types)}") @@ -921,23 +911,15 @@ def create_gpu_workload_profile_if_needed( available_types = [p.get('name') for p in available_gpu] if 'Consumption-GPU-NC8as-T4' in available_types: gpu_profile_type = 'Consumption-GPU-NC8as-T4' - logger.warning( - f"[GPU DEBUG] ⚠️ No specific GPU profile requested, defaulting to T4: {gpu_profile_type}") else: gpu_profile_type = available_gpu[0].get('name') - logger.warning( - f"[GPU DEBUG] ⚠️ No specific GPU profile requested, T4 not available, defaulting to first: {gpu_profile_type}") logger.info( f"No specific GPU profile requested, using: {gpu_profile_type}") # Check if it's a consumption-based GPU profile - logger.warning( - f"[GPU DEBUG] Selected gpu_profile_type: {gpu_profile_type}") if gpu_profile_type.startswith('Consumption-'): # Consumption profiles need to be added to the environment's # workloadProfiles array - logger.warning( - "[GPU DEBUG] Profile is consumption-based, will add to environment") logger.info( f"Adding consumption-based GPU profile to environment: {gpu_profile_type}") @@ -1122,13 +1104,6 @@ def create_models_container_app( logger = get_logger(__name__) app_name = 'models' - logger.info( - f"Creating models container app '{app_name}' with GPU profile '{gpu_profile_name}'") - - # MODEL_RUNNER_URL uses localhost since model-runner and model-runner-config - # are in the same container app (in-app communication) - model_runner_url = "http://localhost:12434" - logger.info(f"Model runner URL (localhost): {model_runner_url}") # Determine model-runner image based on x-azure-deployment.image or GPU # profile @@ -1140,8 +1115,6 @@ def create_models_container_app( azure_deployment = model_config.get('x-azure-deployment', {}) if 'image' in azure_deployment: model_runner_image = azure_deployment['image'] - logger.info( - f"Using custom model-runner image from x-azure-deployment: {model_runner_image}") break # Default to GPU-aware image selection if not specified @@ -1149,8 +1122,6 @@ def create_models_container_app( from ._constants import MODEL_RUNNER_IMAGE, MODEL_RUNNER_IMAGE_CUDA is_gpu_profile = 'GPU' in gpu_profile_name.upper() model_runner_image = MODEL_RUNNER_IMAGE_CUDA if is_gpu_profile else MODEL_RUNNER_IMAGE - logger.info( - f"Using default model-runner image: {model_runner_image} (GPU profile: {is_gpu_profile})") # Determine GPU-appropriate resources based on profile type # T4 GPU: 8 vCPUs, 56GB memory (NC8as_T4_v3) @@ -1158,14 +1129,10 @@ def create_models_container_app( if 'A100' in gpu_profile_name.upper(): gpu_cpu = 24.0 gpu_memory = '220Gi' - logger.info( - f"Detected A100 GPU profile - setting resources to {gpu_cpu} CPU / {gpu_memory}") else: # Default to T4 resources gpu_cpu = 8.0 gpu_memory = '56Gi' - logger.info( - f"Detected T4 GPU profile - setting resources to {gpu_cpu} CPU / {gpu_memory}") # Build model configuration for model-runner-config model_config = { @@ -1265,10 +1232,16 @@ def create_models_container_app( ingress_config['external'] = ingress_override['external'] if 'allowInsecure' in ingress_override: ingress_config['allowInsecure'] = ingress_override['allowInsecure'] - logger.info( - f"Applied ingress overrides from model '{model_name}': {ingress_override}") break # Use first model's ingress settings + # Log clean [models] deployment info + ingress_str = f"{'external' if ingress_config['external'] else 'internal'}, allowInsecure={ingress_config['allowInsecure']}" + models_list = ', '.join(models.keys()) + logger.info(f"[models] Deploying image: {model_runner_image}") + logger.info(f"[models] Ingress: {ingress_str}") + logger.info(f"[models] Configured models: {models_list}") + logger.info(f"[models] Workload profile: {gpu_profile_name}") + # Build container app definition container_app_def = { 'location': location, @@ -1312,7 +1285,6 @@ def create_models_container_app( request_url, body=json.dumps(container_app_def)) models_app = r.json() - logger.info(f"Successfully created models container app: {app_name}") return models_app except Exception as e: logger.error(f"Failed to create models container app: {str(e)}") diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 209dae73fa3..9ed7de3af6b 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -2192,39 +2192,20 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # strips out x-azure-deployment from the returned models dict requested_gpu_type = None - logger.warning( - "[GPU DEBUG] Inspecting RAW models YAML for GPU profile configuration") raw_models = compose_yaml_original.get('models', {}) - logger.warning( - f"[GPU DEBUG] Raw models keys: {list(raw_models.keys())}") for model_name, model_config in raw_models.items(): - logger.warning( - f"[GPU DEBUG] Raw model '{model_name}' config type: {type(model_config)}") if isinstance(model_config, dict): - logger.warning( - f"[GPU DEBUG] Raw model '{model_name}' has x-azure-deployment: {'x-azure-deployment' in model_config}") if 'x-azure-deployment' in model_config: azure_deployment = model_config.get( 'x-azure-deployment', {}) - logger.warning( - f"[GPU DEBUG] Model '{model_name}' azure_deployment: {azure_deployment}") workload_profiles = azure_deployment.get( 'workloadProfiles', {}) - logger.warning( - f"[GPU DEBUG] Model '{model_name}' workloadProfiles: {workload_profiles}") requested_gpu_type = workload_profiles.get( 'workloadProfileType') - logger.warning( - f"[GPU DEBUG] Model '{model_name}' requested GPU type: {requested_gpu_type}") if requested_gpu_type: - logger.info( - f"Found GPU profile in model '{model_name}': {requested_gpu_type}") break - logger.warning( - f"[GPU DEBUG] Final requested_gpu_type to be passed: {requested_gpu_type}") - try: gpu_profile_name = create_gpu_workload_profile_if_needed( cmd, @@ -2280,8 +2261,6 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 raise InvalidArgumentValueError(message) image = service.image warn_about_unsupported_elements(service) - logger.info( # pylint: disable=W1203 - f"Creating the Container Apps instance for {service_name} under {resource_group_name} in {location}.") ingress_type, target_port = resolve_ingress_and_target_port(service) # Enhanced ACA Compose Support: Override ingress for MCP gateway in ACA @@ -2295,8 +2274,6 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 mcp_gateway_config = get_mcp_gateway_configuration(service) ingress_type = mcp_gateway_config['ingress_type'] target_port = mcp_gateway_config['port'] - logger.info( - f"MCP gateway detected: {service_name} - setting internal ingress on port {target_port}") registry, registry_username, registry_password = resolve_registry_from_cli_args( registry_server, registry_user, registry_pass) # pylint: disable=C0301 @@ -2313,8 +2290,6 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # All elements become args, no startup_command startup_command = None startup_args = service.command - logger.info( - f"Service '{service_name}' has command array - using as args: {startup_args}") # Get x-azure-deployment overrides for resources raw_service_yaml = compose_yaml_original.get( @@ -2478,9 +2453,6 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Phase 4.5: Inject MCP Gateway Environment Variables if is_mcp_gateway: - logger.info( - f"Injecting required MCP gateway environment variables for {service_name}") - # Ensure environment list exists if environment is None: environment = [] @@ -2515,19 +2487,10 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 'name': var_name, 'value': var_value }) - logger.info( - f" Set {var_name}={var_value if var_name not in ['AZURE_SUBSCRIPTION_ID', 'AZURE_TENANT_ID'] else var_value[:8] + '...'}") - else: - logger.info( - f" {var_name} already exists in environment, skipping") # Override CPU and memory for MCP gateway - logger.info( - "Setting MCP gateway resources to 2.0 CPU / 4Gi memory") cpu = 2.0 - memory = "4Gi" - - # Phase 3: Inject environment variables for services with models declarations + memory = "4Gi" # Phase 3: Inject environment variables for services with models declarations # Services with "models:" section and endpoint_var/model_var get those specific variables injected # Note: pycomposefile sets models=None if not present, so we just check # if service.models is truthy @@ -2730,11 +2693,28 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 # Ensure MCP gateway always has at least 1 replica if is_mcp_gateway and final_min_replicas < 1: - logger.info( - f"MCP gateway {service_name}: forcing min replicas to 1") final_min_replicas = 1 final_max_replicas = max(1, final_max_replicas) + # Add clean logging for mcp-gateway + if is_mcp_gateway: + from azure.cli.core._profile import Profile + profile = Profile(cli_ctx=cmd.cli_ctx) + subscription_id = profile.get_subscription_id() + ingress_str = f"{'external' if final_ingress_type == 'external' else 'internal'}, allowInsecure={allow_insecure}" + args_str = ', '.join(startup_args) if startup_args else 'none' + logger.info(f"[mcp-gateway] Deploying image: {final_image}") + logger.info(f"[mcp-gateway] Ingress: {ingress_str}") + logger.info(f"[mcp-gateway] Args: {args_str}") + logger.info(f"[mcp-gateway] Subscription for mcp-tooling: {subscription_id}") + # Add clean logging for other services (skip models as it's created separately) + elif service_name != 'models': + ingress_str = f"{'external' if final_ingress_type == 'external' else 'internal'}, allowInsecure={allow_insecure}" if final_ingress_type else 'disabled' + env_count = len(env_vars_cli_format) if env_vars_cli_format else 0 + logger.info(f"[{service_name}] Deploying image: {final_image}") + logger.info(f"[{service_name}] Ingress: {ingress_str}") + logger.info(f"[{service_name}] Environment variables: {env_count} injected") + containerapps_from_compose.append( create_containerapp(cmd, service_name, From b72dde6f36f72314083770e7d06f98bf3880d676 Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Sat, 15 Nov 2025 14:41:02 -0600 Subject: [PATCH 06/10] improving aca env logging --- .../azext_containerapp/_compose_utils.py | 89 ++++++++++++++++++- src/containerapp/azext_containerapp/custom.py | 13 ++- 2 files changed, 96 insertions(+), 6 deletions(-) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index 446413dcfc0..cdc35ddcb2a 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -31,6 +31,76 @@ # ============================================================================ +def log_environment_status(cmd, resource_group_name, env_name, env): + """ + Log comprehensive environment status with [env: ] prefix. + + Args: + cmd: Azure CLI command context + resource_group_name: Resource group name + env_name: Environment name + env: Environment resource object + """ + logger = get_logger(__name__) + + # Log environment status + provisioning_state = env.get('properties', {}).get('provisioningState', 'Unknown') + location = env.get('location', 'Unknown') + logger.info(f"[env: {env_name}] Status: {provisioning_state}") + logger.info(f"[env: {env_name}] Location: {location}") + + # Get and log workload profiles + workload_profiles = env.get('properties', {}).get('workloadProfiles', []) + if workload_profiles: + logger.info(f"[env: {env_name}] Workload profiles ({len(workload_profiles)}):") + for profile in workload_profiles: + profile_name = profile.get('name', 'Unknown') + profile_type = profile.get('workloadProfileType', 'Unknown') + logger.info(f"[env: {env_name}] - {profile_name}: {profile_type}") + else: + logger.info(f"[env: {env_name}] Workload profiles: None (Consumption only)") + + # Get and log running container apps + try: + from azure.cli.core.util import send_raw_request + from azure.cli.core.commands.client_factory import get_subscription_id + + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + sub_id = get_subscription_id(cmd.cli_ctx) + api_version = "2024-03-01" + + # List container apps in the environment + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + api_version + ) + + r = send_raw_request(cmd.cli_ctx, "GET", request_url) + apps_response = r.json() + + # Filter apps by environment + env_id = env.get('id', '') + apps_in_env = [] + for app in apps_response.get('value', []): + app_env_id = app.get('properties', {}).get('environmentId', '') + if app_env_id == env_id: + apps_in_env.append(app) + + if apps_in_env: + logger.info(f"[env: {env_name}] Running apps ({len(apps_in_env)}):") + for app in apps_in_env: + app_name = app.get('name', 'Unknown') + replica_count = app.get('properties', {}).get('template', {}).get('scale', {}).get('minReplicas', 'Unknown') + logger.info(f"[env: {env_name}] - {app_name} (min replicas: {replica_count})") + else: + logger.info(f"[env: {env_name}] Running apps: None") + except Exception as e: + logger.warning(f"[env: {env_name}] Could not retrieve running apps: {str(e)}") + + def build_containerapp_from_compose_service(cmd, name, source, @@ -938,13 +1008,13 @@ def create_gpu_workload_profile_if_needed( for profile in existing_profiles: if profile.get('workloadProfileType') == gpu_profile_type: profile_exists = True - logger.info(f"GPU profile already exists: {gpu_profile_type}") + logger.info(f"[env: {env_name}] GPU profile '{gpu_profile_type}' already exists") break # If profile already exists, skip PATCH and return early if profile_exists: logger.info( - f"Skipping environment update - profile '{gpu_profile_type}' already configured") + f"[env: {env_name}] Skipping environment update - profile '{gpu_profile_type}' already configured") # Wait for environment to be ready in case it's still provisioning # from a previous operation wait_for_environment_provisioning( @@ -953,7 +1023,10 @@ def create_gpu_workload_profile_if_needed( # Profile doesn't exist - add it to the environment logger.info( - f"Adding new GPU profile '{gpu_profile_type}' to environment") + f"[env: {env_name}] Adding new GPU profile '{gpu_profile_type}' to environment") + logger.info(f"[env: {env_name}] Existing profiles: {len(existing_profiles)}") + for profile in existing_profiles: + logger.info(f"[env: {env_name}] - {profile.get('name')}: {profile.get('workloadProfileType')}") new_profile = { "name": gpu_profile_type, @@ -981,6 +1054,12 @@ def create_gpu_workload_profile_if_needed( cleaned_profiles.append(new_profile) + # Log what will be sent in PATCH + logger.info(f"[env: {env_name}] PATCH operation: updating workload profiles") + logger.info(f"[env: {env_name}] Total profiles after update: {len(cleaned_profiles)}") + for profile in cleaned_profiles: + logger.info(f"[env: {env_name}] - {profile.get('name')}: {profile.get('workloadProfileType')}") + # Update the environment management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager sub_id = get_subscription_id(cmd.cli_ctx) @@ -1008,10 +1087,12 @@ def create_gpu_workload_profile_if_needed( "PATCH", request_url, body=json.dumps(env_update)) - logger.info(f"Added GPU profile to environment: {gpu_profile_type}") + logger.info(f"[env: {env_name}] Successfully added GPU profile: {gpu_profile_type}") # Wait for environment to be provisioned before continuing + logger.info(f"[env: {env_name}] Waiting for environment provisioning to complete...") wait_for_environment_provisioning(cmd, resource_group_name, env_name) + logger.info(f"[env: {env_name}] Environment is ready") return gpu_profile_type diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 9ed7de3af6b..4bffd885811 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -2123,18 +2123,27 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 managed_env_name = parsed_managed_env['name'] env_rg = parsed_managed_env.get('resource_group', resource_group_name) + # Import helper for environment status logging + from ._compose_utils import log_environment_status + try: managed_environment = show_managed_environment( cmd=cmd, name=managed_env_name, resource_group_name=env_rg) + logger.info(f"[env: {managed_env_name}] Environment found") + # Log comprehensive environment status + log_environment_status(cmd, env_rg, managed_env_name, managed_environment) except CLIInternalError: # pylint: disable=W0702 - logger.info( # pylint: disable=W1203 - f"Creating the Container Apps managed environment {managed_env_name} under {env_rg} in {location}.") + logger.info(f"[env: {managed_env_name}] Environment does not exist, will create") + logger.info(f"[env: {managed_env_name}] Creating in resource group '{env_rg}', location '{location}'") managed_environment = create_managed_environment( cmd, name=managed_env_name, resource_group_name=env_rg, tags=tags, location=location) + logger.info(f"[env: {managed_env_name}] Environment created successfully") + # Log status of newly created environment + log_environment_status(cmd, env_rg, managed_env_name, managed_environment) compose_yaml = load_yaml_file(compose_file_path) # Make a deep copy to preserve original YAML for x-azure-deployment From d64880182d7e5c0d4f1296f074db045a3ae39709 Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Sat, 15 Nov 2025 18:02:30 -0600 Subject: [PATCH 07/10] logging improvements --- .../azext_containerapp/_compose_utils.py | 12 +++++++++ .../azext_containerapp/_constants.py | 26 ++++++++++++------- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index cdc35ddcb2a..f47e55b7c65 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -1185,6 +1185,11 @@ def create_models_container_app( logger = get_logger(__name__) app_name = 'models' + logger.info(f"[models] Using model runner config image: {MODEL_RUNNER_CONFIG_IMAGE}") + + # MODEL_RUNNER_URL uses localhost since model-runner and model-runner-config + # are in the same container app (in-app communication) + model_runner_url = "http://localhost:12434" # Determine model-runner image based on x-azure-deployment.image or GPU # profile @@ -1203,6 +1208,7 @@ def create_models_container_app( from ._constants import MODEL_RUNNER_IMAGE, MODEL_RUNNER_IMAGE_CUDA is_gpu_profile = 'GPU' in gpu_profile_name.upper() model_runner_image = MODEL_RUNNER_IMAGE_CUDA if is_gpu_profile else MODEL_RUNNER_IMAGE + logger.info(f"[models] Using default {'GPU' if is_gpu_profile else 'CPU'} model runner image: {model_runner_image}") # Determine GPU-appropriate resources based on profile type # T4 GPU: 8 vCPUs, 56GB memory (NC8as_T4_v3) @@ -1506,6 +1512,12 @@ def get_mcp_gateway_configuration(service): 'ingress_type': 'internal', # MCP gateway should be internal-only 'image': service.image if hasattr(service, 'image') else MCP_GATEWAY_IMAGE } + + # Log which image is being used + if hasattr(service, 'image') and service.image: + logger.info(f"[mcp-gateway] Using custom image from compose: {service.image}") + else: + logger.info(f"[mcp-gateway] Using default image: {MCP_GATEWAY_IMAGE}") # Check for custom port in service ports if hasattr(service, 'ports') and service.ports: diff --git a/src/containerapp/azext_containerapp/_constants.py b/src/containerapp/azext_containerapp/_constants.py index d72831c9264..d6274d82406 100644 --- a/src/containerapp/azext_containerapp/_constants.py +++ b/src/containerapp/azext_containerapp/_constants.py @@ -124,15 +124,23 @@ HELLO_WORLD_IMAGE = "mcr.microsoft.com/k8se/quickstart:latest" -# Model runner images -MODEL_RUNNER_IMAGE = "docker/model-runner:latest" -MODEL_RUNNER_IMAGE_CUDA = "docker/model-runner:latest-cuda" -# replace with mcr image when available -MODEL_RUNNER_CONFIG_IMAGE = "simon.azurecr.io/model-runner-config:latest" - -# MCP gateway image -# default image, currently does not exist publicly -MCP_GATEWAY_IMAGE = "mcr.microsoft.com/mcp-gateway:latest" +# ============================================================================ +# Default Container Images for Azure Container Apps Compose +# ============================================================================ + +# Model Runner Images (for AI/ML model hosting) +# NOTE: These are placeholder images - update to your registry/versions as needed +MODEL_RUNNER_IMAGE = "docker/model-runner:latest" # CPU-only model runner +MODEL_RUNNER_IMAGE_CUDA = "docker/model-runner:latest-cuda" # GPU-enabled model runner +MODEL_RUNNER_CONFIG_IMAGE = "acateam.azurecr.io/preview-ai-compose/model-runner-config:latest" # Model configuration sidecar +# TODO: Replace with public MCR images when available + +# MCP Gateway Image (Model Context Protocol gateway) +# NOTE: This image does not exist publicly yet - update to your registry +MCP_GATEWAY_IMAGE = "acateam.azurecr.io/preview-ai-compose/mcp-gateway:latest" +# TODO: Replace with actual public MCR image when available + +# ============================================================================ LOGS_STRING = '[{"category":"ContainerAppConsoleLogs","categoryGroup":null,"enabled":true,"retentionPolicy":{"days":0,"enabled":false}},{"category":"ContainerAppSystemLogs","categoryGroup":null,"enabled":true,"retentionPolicy":{"days":0,"enabled":false}}]' # pylint: disable=line-too-long From f08f8f7217c16d231c6d58c80aa12786d7cf1800 Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Sat, 15 Nov 2025 22:06:49 -0600 Subject: [PATCH 08/10] adding secret parsing and respective logging --- .../azext_containerapp/_compose_utils.py | 93 +++++++++++++++---- src/containerapp/azext_containerapp/custom.py | 13 ++- src/containerapp/setup.py | 2 +- 3 files changed, 85 insertions(+), 23 deletions(-) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index f47e55b7c65..7608aba4385 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -9,6 +9,7 @@ from knack.log import get_logger from knack.prompting import prompt, prompt_choice_list +from azure.cli.core.azclierror import InvalidArgumentValueError from ._compose_ported import ( calculate_model_runner_resources as _ported_calculate_model_runner_resources, @@ -368,32 +369,84 @@ def resolve_environment_from_service(service): return env_array -def resolve_secret_from_service(service, secrets_map): - secret_array = [] - secret_env_ref = [] +def _normalize_secret_store_name(raw_name): + """Normalize secret names to a Container Apps friendly format.""" + if raw_name is None: + return "secret" + sanitized = str(raw_name).strip() + if not sanitized: + sanitized = "secret" + sanitized = sanitized.replace(' ', '-').replace('_', '-').lower() + cleaned = [] + for char in sanitized: + if char.isalnum() or char == '-': + cleaned.append(char) + else: + cleaned.append('-') + normalized = ''.join(cleaned).strip('-') + return normalized or "secret" - if service.secrets is None: - return (None, None) - for secret in service.secrets: +def _normalize_env_var_name(raw_name): + """Keep the requested environment variable name, falling back to the secret name.""" + if raw_name is None: + return "SECRET_VALUE" + candidate = str(raw_name).strip() + return candidate or "SECRET_VALUE" - secret_config = secrets_map[secret.source] - if secret_config is not None and secret_config.file is not None: - value = secret_config.file.readFile() - if secret.target is None: - secret_name = secret.source.replace('_', '-') - else: - secret_name = secret.target.replace('_', '-') - secret_array.append(f"{secret_name}={value}") - secret_env_ref.append(f"{secret_name}=secretref:{secret_name}") - if len(secret_array) == 0: - return (None, None) +def resolve_secret_from_service(service, secrets_map, service_name=None): + secret_value_map = {} + secret_env_ref = [] + secret_usage = [] + + if not getattr(service, 'secrets', None): + return (None, None, None) - logger.warning( - "Note: Secrets will be mapped as secure environment variables in Azure Container Apps.") + if not secrets_map: + logger.warning( + "Service '%s' references secrets but none are defined in the compose file. Skipping secret injection.", + service_name or getattr(service, 'compose_path', 'unknown')) + return (None, None, None) - return (secret_array, secret_env_ref) + for secret in service.secrets: + source_name = getattr(secret, 'source', None) + if not source_name: + logger.warning("A secret entry without a source was found on service '%s' and will be ignored.", + service_name or getattr(service, 'compose_path', 'unknown')) + continue + + secret_config = secrets_map.get(source_name) + if secret_config is None: + raise InvalidArgumentValueError( + f"Secret '{source_name}' referenced by service '{service_name}' is not defined under compose secrets.") + + if getattr(secret_config, 'file', None) is None: + raise InvalidArgumentValueError( + f"Secret '{source_name}' must specify a file path to be used with Azure Container Apps.") + + value = secret_config.file.readFile() + aca_secret_name = _normalize_secret_store_name(source_name) + env_var_candidate = getattr(secret, 'target', None) or source_name + env_var_name = _normalize_env_var_name(env_var_candidate) + + # Deduplicate secret storage per service while allowing multiple env refs + secret_value_map[aca_secret_name] = value + secret_env_ref.append(f"{env_var_name}=secretref:{aca_secret_name}") + secret_usage.append({ + "compose_source": source_name, + "secret_name": aca_secret_name, + "env_var": env_var_name + }) + + if not secret_value_map: + return (None, None, None) + + logger.info("Secrets detected for service '%s'; values will be stored securely and referenced via env vars.", + service_name or getattr(service, 'compose_path', 'unknown')) + + secret_array = [f"{name}={value}" for name, value in secret_value_map.items()] + return (secret_array, secret_env_ref or None, secret_usage or None) def resolve_replicas_from_service(service): diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 4bffd885811..500e07bf2c5 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -2320,13 +2320,22 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 ) replicas = resolve_replicas_from_service(service) environment = resolve_environment_from_service(service) - secret_vars, secret_env_ref = resolve_secret_from_service( - service, parsed_compose_file.secrets) + secret_vars, secret_env_ref, secret_usage = resolve_secret_from_service( + service, parsed_compose_file.secrets, service_name) if environment is not None and secret_env_ref is not None: environment.extend(secret_env_ref) elif secret_env_ref is not None: environment = secret_env_ref + if secret_usage: + for usage in secret_usage: + logger.info( + "[%s] Secret '%s' stored as '%s' and injected via env '%s'", + service_name, + usage.get('compose_source'), + usage.get('secret_name'), + usage.get('env_var')) + # Strip ports from URLs in environment variables for ACA (Envoy handles port mapping) # Also inject proper FQDNs for MCP_GATEWAY_URL references if environment is not None: diff --git a/src/containerapp/setup.py b/src/containerapp/setup.py index 6b7b5aa7b1f..a4750a64509 100644 --- a/src/containerapp/setup.py +++ b/src/containerapp/setup.py @@ -28,7 +28,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '1.2.0b5' +VERSION = '1.2.0b5+ai-compose' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers From 638b6b1b25f204b7f595ac948914da3364d35720 Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Sun, 16 Nov 2025 11:06:43 -0600 Subject: [PATCH 09/10] introducting local build capability --- .../azext_containerapp/_compose_utils.py | 400 ++++++++++++++++-- .../containerapp_env_decorator.py | 107 +++-- src/containerapp/azext_containerapp/custom.py | 13 +- 3 files changed, 454 insertions(+), 66 deletions(-) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index 7608aba4385..ba97edb3153 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -7,9 +7,14 @@ # too-many-locals, logging-fstring-interpolation, arguments-differ, # abstract-method, logging-format-interpolation, broad-except +import os +import shutil +import subprocess +from datetime import datetime + from knack.log import get_logger from knack.prompting import prompt, prompt_choice_list -from azure.cli.core.azclierror import InvalidArgumentValueError +from azure.cli.core.azclierror import CLIError, InvalidArgumentValueError from ._compose_ported import ( calculate_model_runner_resources as _ported_calculate_model_runner_resources, @@ -23,16 +28,326 @@ parse_service_models_config as _ported_parse_service_models_config, should_deploy_model_runner as _ported_should_deploy_model_runner, ) +from ._utils import is_docker_running logger = get_logger(__name__) + +def _docker_cli_available(): + return shutil.which('docker') is not None + + +def _ensure_local_docker_ready(): + if not _docker_cli_available(): + return False, "Docker CLI not found in PATH" + if not is_docker_running(): + return False, "Docker daemon is not running" + return True, None + + +def _sanitize_registry_server(registry_server): + if not registry_server: + return None + sanitized = registry_server.strip() + if sanitized.startswith('https://'): + sanitized = sanitized[len('https://'):] + elif sanitized.startswith('http://'): + sanitized = sanitized[len('http://'):] + return sanitized.rstrip('/') + + +def _extract_registry_from_image(image_name): + if not image_name: + return None + first_segment = image_name.split('/', 1)[0] + if '.' in first_segment or ':' in first_segment or first_segment == 'localhost': + return first_segment + return None + + +def _current_timestamp_tag(): + return datetime.utcnow().strftime('%Y%m%d%H%M%S') + + +def _resolve_build_context_path(context_value, compose_directory): + candidate = context_value or '.' + if isinstance(candidate, dict): + candidate = candidate.get('path') or candidate.get('context') or '.' + base_dir = compose_directory or os.getcwd() + resolved = candidate if os.path.isabs(candidate) else os.path.abspath(os.path.join(base_dir, candidate)) + if not os.path.exists(resolved): + raise CLIError(f"Build context directory '{resolved}' does not exist.") + return resolved + + +def _resolve_dockerfile_path(dockerfile_value, context_path): + dockerfile_name = dockerfile_value or 'Dockerfile' + resolved = dockerfile_name if os.path.isabs(dockerfile_name) else os.path.abspath(os.path.join(context_path, dockerfile_name)) + if not os.path.exists(resolved): + raise CLIError(f"Dockerfile '{resolved}' does not exist.") + return resolved + + +def _ensure_iterable(obj): + if obj is None: + return [] + if isinstance(obj, (list, tuple, set)): + return list(obj) + return [obj] + + +def _iter_key_value_pairs(value): + if value is None: + return [] + if isinstance(value, dict): + return [f"{k}={v}" if v is not None else str(k) for k, v in value.items()] + + pairs = [] + for entry in _ensure_iterable(value): + if isinstance(entry, dict): + for k, v in entry.items(): + pairs.append(f"{k}={v}" if v is not None else str(k)) + else: + pairs.append(str(entry)) + return pairs + + +def _serialize_cache_entry(entry): + if isinstance(entry, dict): + serialized = [] + for key, value in entry.items(): + if value is None: + continue + serialized.append(f"{key}={value}") + return ','.join(serialized) + return str(entry) + + +def _normalize_extra_hosts(extra_hosts): + hosts = [] + for entry in _ensure_iterable(extra_hosts): + if isinstance(entry, dict): + for host, addr in entry.items(): + hosts.append(f"{host}:{addr}") + else: + hosts.append(str(entry)) + return hosts + + +def _normalize_ssh_entries(ssh_value): + entries = [] + for entry in _ensure_iterable(ssh_value): + if isinstance(entry, dict): + for key, val in entry.items(): + entries.append(f"{key}={val}") + else: + entries.append(str(entry)) + return entries + + +def _apply_registry_to_tag(tag, registry_server): + if not tag: + return None + registry = _extract_registry_from_image(tag) + normalized_registry = _sanitize_registry_server(registry_server) + candidate = tag + if not registry and normalized_registry: + candidate = f"{normalized_registry}/{tag.lstrip('/')}" + if ':' not in candidate.split('/')[-1]: + candidate = f"{candidate}:{_current_timestamp_tag()}" + return candidate + + +def _prepare_image_name(image_name, registry_server, default_repo): + candidate = image_name or default_repo + normalized_registry = _sanitize_registry_server(registry_server) + image_registry = _extract_registry_from_image(candidate) + if normalized_registry and not image_registry: + candidate = f"{normalized_registry}/{candidate.lstrip('/')}" + if ':' not in candidate.split('/')[-1]: + candidate = f"{candidate}:{_current_timestamp_tag()}" + return candidate + + +def _collect_docker_build_options(build_config, registry_server): + if build_config is None: + return [], [], [] + + flags = [] + extra_tags = [] + unsupported = [] + + if getattr(build_config, 'target', None): + flags.extend(['--target', build_config.target]) + + for arg in _iter_key_value_pairs(getattr(build_config, 'args', None)): + flags.extend(['--build-arg', arg]) + + for label in _iter_key_value_pairs(getattr(build_config, 'labels', None)): + flags.extend(['--label', label]) + + if getattr(build_config, 'no_cache', None): + flags.append('--no-cache') + + if getattr(build_config, 'pull', None): + flags.append('--pull') + + for cache_from in _ensure_iterable(getattr(build_config, 'cache_from', None)): + flags.extend(['--cache-from', _serialize_cache_entry(cache_from)]) + + for cache_to in _ensure_iterable(getattr(build_config, 'cache_to', None)): + flags.extend(['--cache-to', _serialize_cache_entry(cache_to)]) + + for host_entry in _normalize_extra_hosts(getattr(build_config, 'extra_hosts', None)): + flags.extend(['--add-host', host_entry]) + + for ssh_entry in _normalize_ssh_entries(getattr(build_config, 'ssh', None)): + flags.extend(['--ssh', ssh_entry]) + + if getattr(build_config, 'shm_size', None): + flags.extend(['--shm-size', str(build_config.shm_size)]) + + if getattr(build_config, 'isolation', None): + flags.extend(['--isolation', str(build_config.isolation)]) + + if getattr(build_config, 'tags', None): + for tag in _ensure_iterable(build_config.tags): + normalized_tag = _apply_registry_to_tag(str(tag), registry_server) + if normalized_tag: + extra_tags.append(normalized_tag) + + if getattr(build_config, 'secrets', None): + unsupported.append('services..build.secrets') + + return flags, extra_tags, unsupported + + +def _stream_command_output(command, env=None): + logger.debug("Running command: %s", ' '.join(command)) + process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, env=env) + output_lines = [] + while True: + line = process.stdout.readline() + if not line: + break + output_lines.append(line.rstrip()) + logger.info(line.rstrip()) + process.wait() + if process.returncode != 0: + raise CLIError(f"Command '{' '.join(command)}' failed with exit code {process.returncode}.") + return output_lines + + +def _docker_login_with_credentials(registry_server, username, password): + if not username or not password: + raise CLIError(f"Credentials are required to push to registry '{registry_server}'.") + command = ['docker', 'login', registry_server, '--username', username, '--password-stdin'] + logger.debug("Logging into registry %s with docker CLI", registry_server) + process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + stdout, stderr = process.communicate(password) + if process.returncode != 0: + raise CLIError(f"Failed to login to registry '{registry_server}': {stderr.strip()}") + logger.info("Authenticated docker client for registry '%s'", registry_server) + + +def _ensure_acr_login(cmd, registry_server): + from azure.cli.command_modules.acr.custom import acr_login + from azure.cli.core.profiles import ResourceType + + registry_name = registry_server.split('.')[0] + task_command_kwargs = {"resource_type": ResourceType.MGMT_CONTAINERREGISTRY, 'operation_group': 'webhooks'} + old_command_kwargs = {} + for key in task_command_kwargs: + old_command_kwargs[key] = cmd.command_kwargs.get(key) + cmd.command_kwargs[key] = task_command_kwargs[key] + try: + acr_login(cmd, registry_name) + finally: + for k, v in old_command_kwargs.items(): + cmd.command_kwargs[k] = v + logger.info("Docker client authenticated with ACR '%s'", registry_name) + + +def _docker_push_image(cmd, image_name, registry_server, registry_user, registry_pass): + registry_host = _extract_registry_from_image(image_name) or _sanitize_registry_server(registry_server) + if registry_host and registry_host.endswith('.azurecr.io'): + _ensure_acr_login(cmd, registry_host) + elif registry_host and (registry_user or registry_pass): + _docker_login_with_credentials(registry_host, registry_user, registry_pass) + command = ['docker', 'push', image_name] + _stream_command_output(command) + logger.info("Pushed image '%s'", image_name) + + +def _build_with_local_docker(cmd, + app, + service_name, + image_name, + build_configuration, + context_value, + dockerfile_value, + compose_directory, + registry_server, + registry_user, + registry_pass): + build_context = _resolve_build_context_path(context_value, compose_directory) + dockerfile_path = _resolve_dockerfile_path(dockerfile_value, build_context) + + requested_registry = _sanitize_registry_server(registry_server) + image_registry = _extract_registry_from_image(image_name) + fallback_registry = _sanitize_registry_server(app.registry_server) + + target_registry = image_registry or requested_registry or fallback_registry + + if target_registry is None: + logger.warning("[%s] No registry information available; skipping local Docker push. Use --registry-server to enable local image publishing.", service_name) + return None + + if target_registry.endswith('.azurecr.io'): + from ._up_utils import _get_registry_details + app.registry_server = target_registry + _get_registry_details(cmd, app, False) + app.create_acr_if_needed() + registry_user = app.registry_user + registry_pass = app.registry_pass + + final_image = _prepare_image_name(image_name, target_registry, service_name) + tags_to_push = [final_image] + + default_registry_for_tags = _extract_registry_from_image(final_image) or target_registry + build_flags, additional_tags, unsupported = _collect_docker_build_options(build_configuration, default_registry_for_tags) + tags_to_push.extend(additional_tags) + + if unsupported: + logger.warning("These build configuration settings from the docker-compose file are not yet supported. See https://aka.ms/containerapp/compose/build_support for details.") + for item in unsupported: + logger.warning(" %s", item) + + build_env = os.environ.copy() + build_env.setdefault('DOCKER_BUILDKIT', '1') + + command = ['docker', 'build', '--file', dockerfile_path] + for tag in tags_to_push: + command.extend(['--tag', tag]) + command.extend(build_flags) + command.append(build_context) + + logger.info("[%s] Building image locally with Docker (context: %s, dockerfile: %s)", service_name, build_context, dockerfile_path) + _stream_command_output(command, env=build_env) + + for tag in tags_to_push: + _docker_push_image(cmd, tag, default_registry_for_tags or target_registry, registry_user, registry_pass) + + app.image = final_image + return final_image, default_registry_for_tags or target_registry, registry_user, registry_pass + # ============================================================================ # Functions copied from azure-cli core to make extension self-contained # These eliminate the dependency on azure-cli core's containerapp module # ============================================================================ -def log_environment_status(cmd, resource_group_name, env_name, env): +def log_environment_status(cmd, resource_group_name, env_name, env, log_level='info'): """ Log comprehensive environment status with [env: ] prefix. @@ -41,25 +356,27 @@ def log_environment_status(cmd, resource_group_name, env_name, env): resource_group_name: Resource group name env_name: Environment name env: Environment resource object + log_level: Logger method name ("info", "warning", etc.) used for status lines """ logger = get_logger(__name__) + log_method = getattr(logger, log_level, logger.info) # Log environment status provisioning_state = env.get('properties', {}).get('provisioningState', 'Unknown') location = env.get('location', 'Unknown') - logger.info(f"[env: {env_name}] Status: {provisioning_state}") - logger.info(f"[env: {env_name}] Location: {location}") + log_method(f"[env: {env_name}] Status: {provisioning_state}") + log_method(f"[env: {env_name}] Location: {location}") # Get and log workload profiles workload_profiles = env.get('properties', {}).get('workloadProfiles', []) if workload_profiles: - logger.info(f"[env: {env_name}] Workload profiles ({len(workload_profiles)}):") + log_method(f"[env: {env_name}] Workload profiles ({len(workload_profiles)}):") for profile in workload_profiles: profile_name = profile.get('name', 'Unknown') profile_type = profile.get('workloadProfileType', 'Unknown') - logger.info(f"[env: {env_name}] - {profile_name}: {profile_type}") + log_method(f"[env: {env_name}] - {profile_name}: {profile_type}") else: - logger.info(f"[env: {env_name}] Workload profiles: None (Consumption only)") + log_method(f"[env: {env_name}] Workload profiles: None (Consumption only)") # Get and log running container apps try: @@ -91,13 +408,13 @@ def log_environment_status(cmd, resource_group_name, env_name, env): apps_in_env.append(app) if apps_in_env: - logger.info(f"[env: {env_name}] Running apps ({len(apps_in_env)}):") + log_method(f"[env: {env_name}] Running apps ({len(apps_in_env)}):") for app in apps_in_env: app_name = app.get('name', 'Unknown') replica_count = app.get('properties', {}).get('template', {}).get('scale', {}).get('minReplicas', 'Unknown') - logger.info(f"[env: {env_name}] - {app_name} (min replicas: {replica_count})") + log_method(f"[env: {env_name}] - {app_name} (min replicas: {replica_count})") else: - logger.info(f"[env: {env_name}] Running apps: None") + log_method(f"[env: {env_name}] Running apps: None") except Exception as e: logger.warning(f"[env: {env_name}] Could not retrieve running apps: {str(e)}") @@ -117,7 +434,9 @@ def build_containerapp_from_compose_service(cmd, registry_pass, env_vars, logs_key=None, - logs_customer_id=None): + logs_customer_id=None, + compose_directory=None, + build_configuration=None): from .custom import create_managed_environment from ._up_utils import (ContainerApp, ContainerAppEnvironment, @@ -126,6 +445,7 @@ def build_containerapp_from_compose_service(cmd, _get_registry_details, _get_acr_from_image) + compose_directory = os.path.abspath(compose_directory) if compose_directory else None resource_group = ResourceGroup( cmd, name=resource_group_name, location=location) env = ContainerAppEnvironment(cmd, @@ -152,6 +472,26 @@ def build_containerapp_from_compose_service(cmd, if app.registry_server is None and app.image is not None: _get_acr_from_image(cmd, app) + + if build_configuration is not None: + docker_ready, docker_reason = _ensure_local_docker_ready() + if docker_ready: + local_result = _build_with_local_docker(cmd, + app, + name, + image, + build_configuration, + source, + dockerfile, + compose_directory, + registry_server, + registry_user, + registry_pass) + if local_result is not None: + return local_result + logger.warning("[%s] Local Docker build was skipped; falling back to Azure Container Registry build.", name) + logger.warning("[%s] %s. Falling back to Azure Container Registry build.", name, docker_reason) + _get_registry_details(cmd, app, True) app.create_acr_if_needed() @@ -179,24 +519,11 @@ def resolve_configuration_element_list( def warn_about_unsupported_build_configuration(compose_service): unsupported_configuration = [ - "args", - "ssh", - "cache_from", - "cache_to", - "extra_hosts", - "isolation", - "labels", - "no_cache", - "pull", - "shm_size", - "target", - "secrets", - "tags"] + "secrets"] if compose_service.build is not None: config_list = resolve_configuration_element_list( compose_service, unsupported_configuration, 'build') message = "These build configuration settings from the docker-compose file are not yet supported." - message += " Currently, we support supplying a build context and optionally target Dockerfile for a service." message += " See https://aka.ms/containerapp/compose/build_support for more information or to add feedback." if len(config_list) >= 1: logger.warning(message) @@ -974,6 +1301,14 @@ def create_gpu_workload_profile_if_needed( # Use class methods directly - no instantiation needed env = ManagedEnvironmentClient.show(cmd, resource_group_name, env_name) + # Always inspect/log current environment before mutating it + log_environment_status( + cmd, + resource_group_name, + env_name, + env, + log_level='warning') + workload_profiles = env.get( 'properties', {}).get( 'workloadProfiles', []) @@ -983,9 +1318,12 @@ def create_gpu_workload_profile_if_needed( for profile in workload_profiles: if profile.get( 'workloadProfileType') == requested_gpu_profile_type: + existing_name = profile.get('name', requested_gpu_profile_type) logger.info( f"Found requested GPU profile in environment: {requested_gpu_profile_type}") - return requested_gpu_profile_type + logger.warning( + f"[env: {env_name}] Reusing existing workload profile '{existing_name}'") + return existing_name else: # No specific request, return any GPU profile found for profile in workload_profiles: @@ -993,6 +1331,8 @@ def create_gpu_workload_profile_if_needed( if 'GPU' in profile_type.upper(): logger.info( f"Found existing GPU profile: {profile.get('name')}") + logger.warning( + f"[env: {env_name}] Reusing GPU workload profile '{profile.get('name')}'") return profile.get('name') except Exception as e: logger.warning(f"Failed to check existing profiles: {str(e)}") @@ -1058,21 +1398,23 @@ def create_gpu_workload_profile_if_needed( 'properties', {}).get( 'workloadProfiles', []) profile_exists = False + existing_profile_name = gpu_profile_type for profile in existing_profiles: if profile.get('workloadProfileType') == gpu_profile_type: profile_exists = True + existing_profile_name = profile.get('name', gpu_profile_type) logger.info(f"[env: {env_name}] GPU profile '{gpu_profile_type}' already exists") break # If profile already exists, skip PATCH and return early if profile_exists: - logger.info( - f"[env: {env_name}] Skipping environment update - profile '{gpu_profile_type}' already configured") + logger.warning( + f"[env: {env_name}] Skipping workload profile update; using '{existing_profile_name}'") # Wait for environment to be ready in case it's still provisioning # from a previous operation wait_for_environment_provisioning( cmd, resource_group_name, env_name) - return gpu_profile_type + return existing_profile_name # Profile doesn't exist - add it to the environment logger.info( diff --git a/src/containerapp/azext_containerapp/containerapp_env_decorator.py b/src/containerapp/azext_containerapp/containerapp_env_decorator.py index a750db4f43f..bc516e94296 100644 --- a/src/containerapp/azext_containerapp/containerapp_env_decorator.py +++ b/src/containerapp/azext_containerapp/containerapp_env_decorator.py @@ -110,44 +110,85 @@ def set_up_managed_identity(self): self.managed_env_def["identity"] = identity_def def set_up_workload_profiles(self): - if self.get_argument_enable_workload_profiles(): - # If the environment exists, infer the environment type - existing_environment = None - try: - existing_environment = self.client.show(cmd=self.cmd, resource_group_name=self.get_argument_resource_group_name(), name=self.get_argument_name()) - except Exception as e: - handle_non_404_status_code_exception(e) - - if existing_environment and safe_get(existing_environment, "properties", "workloadProfiles") is None: - # check if input params include -w/--enable-workload-profiles + if not self.get_argument_enable_workload_profiles(): + return + + def _profile_exists(profiles, profile_type): + return any(p.get("workloadProfileType") == profile_type for p in profiles) + + requested_profiles = [] + if self.get_argument_enable_dedicated_gpu(): + requested_profiles.append({ + "workloadProfileType": "NC24-A100", + "name": "gpu", + "minimumCount": 1, + "maximumCount": 1 + }) + + if self.is_env_for_azml_app() and not self.get_argument_enable_dedicated_gpu(): + wp_type = self.get_argument_workload_profile_type() + if wp_type is None or wp_type.lower() == "consumption-gpu-nc24-a100": + requested_profiles.append({ + "workloadProfileType": "Consumption-GPU-NC24-A100", + "name": self.get_argument_workload_profile_name() if self.get_argument_workload_profile_name() else "serverless-A100", + }) + else: + requested_profiles.append({ + "workloadProfileType": wp_type, + "name": self.get_argument_workload_profile_name() if self.get_argument_workload_profile_name() else "serverless-gpu", + }) + + # If the environment exists, infer the environment type and reuse profiles + existing_environment = None + try: + existing_environment = self.client.show(cmd=self.cmd, + resource_group_name=self.get_argument_resource_group_name(), + name=self.get_argument_name()) + except Exception as e: + handle_non_404_status_code_exception(e) + + if existing_environment: + existing_profiles = safe_get(existing_environment, "properties", "workloadProfiles") + if existing_profiles is None: if self.cmd.cli_ctx.data.get('safe_params') and ('-w' in self.cmd.cli_ctx.data.get('safe_params') or '--enable-workload-profiles' in self.cmd.cli_ctx.data.get('safe_params')): raise ValidationError(f"Existing environment {self.get_argument_name()} cannot enable workload profiles. If you want to use Consumption and Dedicated environment, please create a new one.") return - workload_profiles = get_default_workload_profiles(self.cmd, self.get_argument_location()) - if self.get_argument_enable_dedicated_gpu(): - gpu_profile = { - "workloadProfileType": "NC24-A100", - "name": "gpu", - "minimumCount": 1, - "maximumCount": 1 + cleaned_profiles = [] + for profile in existing_profiles: + cleaned = { + "name": profile.get("name"), + "workloadProfileType": profile.get("workloadProfileType") } - workload_profiles.append(gpu_profile) - if self.is_env_for_azml_app() and not self.get_argument_enable_dedicated_gpu(): - wp_type = self.get_argument_workload_profile_type() - if wp_type is None or wp_type.lower() == "consumption-gpu-nc24-a100": - serverless_a100_profile = { - "workloadProfileType": "Consumption-GPU-NC24-A100", - "name": self.get_argument_workload_profile_name() if self.get_argument_workload_profile_name() else "serverless-A100", - } - workload_profiles.append(serverless_a100_profile) - else: - serverless_gpu_profile = { - "workloadProfileType": wp_type, - "name": self.get_argument_workload_profile_name() if self.get_argument_workload_profile_name() else "serverless-gpu", - } - workload_profiles.append(serverless_gpu_profile) - self.managed_env_def["properties"]["workloadProfiles"] = workload_profiles + if profile.get("minimumCount") is not None: + cleaned["minimumCount"] = profile.get("minimumCount") + if profile.get("maximumCount") is not None: + cleaned["maximumCount"] = profile.get("maximumCount") + cleaned_profiles.append(cleaned) + + added_profile = False + for profile in requested_profiles: + if not _profile_exists(cleaned_profiles, profile.get("workloadProfileType")): + cleaned_profiles.append(profile) + added_profile = True + + if added_profile: + logger.warning("Environment %s already exists; appending %d workload profile(s) without removing existing ones", + self.get_argument_name(), len(cleaned_profiles) - len(existing_profiles)) + else: + logger.warning("Environment %s already has the requested workload profiles; reusing existing configuration", + self.get_argument_name()) + + if cleaned_profiles: + self.managed_env_def["properties"]["workloadProfiles"] = cleaned_profiles + return + + # No existing environment, fall back to defaults and append requested profiles + workload_profiles = get_default_workload_profiles(self.cmd, self.get_argument_location()) + for profile in requested_profiles: + if not _profile_exists(workload_profiles, profile.get("workloadProfileType")): + workload_profiles.append(profile) + self.managed_env_def["properties"]["workloadProfiles"] = workload_profiles def set_up_custom_domain_configuration(self): if self.get_argument_hostname(): diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 500e07bf2c5..aa49b6dcb93 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -17,6 +17,7 @@ import json import requests import copy +import os import subprocess from concurrent.futures import ThreadPoolExecutor @@ -2131,7 +2132,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 cmd=cmd, name=managed_env_name, resource_group_name=env_rg) logger.info(f"[env: {managed_env_name}] Environment found") # Log comprehensive environment status - log_environment_status(cmd, env_rg, managed_env_name, managed_environment) + log_environment_status(cmd, env_rg, managed_env_name, managed_environment, log_level='warning') except CLIInternalError: # pylint: disable=W0702 logger.info(f"[env: {managed_env_name}] Environment does not exist, will create") logger.info(f"[env: {managed_env_name}] Creating in resource group '{env_rg}', location '{location}'") @@ -2143,8 +2144,10 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 location=location) logger.info(f"[env: {managed_env_name}] Environment created successfully") # Log status of newly created environment - log_environment_status(cmd, env_rg, managed_env_name, managed_environment) + log_environment_status(cmd, env_rg, managed_env_name, managed_environment, log_level='warning') + compose_file_path = os.path.abspath(compose_file_path) + compose_directory = os.path.dirname(compose_file_path) compose_yaml = load_yaml_file(compose_file_path) # Make a deep copy to preserve original YAML for x-azure-deployment # extraction @@ -2597,7 +2600,7 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 if service.build is not None: logger.warning("Build configuration defined for this service.") logger.warning( - "The build will be performed by Azure Container Registry.") + "Attempting local Docker build; will fall back to Azure Container Registry if needed.") context = service.build.context dockerfile = "Dockerfile" if service.build.dockerfile is not None: @@ -2616,7 +2619,9 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 registry, registry_username, registry_password, - environment) + environment, + compose_directory=compose_directory, + build_configuration=service.build) # Phase 7: Check if container app exists and detect changes (skip in # dry-run) From 80a128b5f43932023041e9624628ddfbb8611c65 Mon Sep 17 00:00:00 2001 From: Simon Jakesch Date: Sun, 16 Nov 2025 12:31:08 -0600 Subject: [PATCH 10/10] fixing error for secrets, suppressing wrong logging --- .../azext_containerapp/_compose_utils.py | 45 +++++++++++-------- .../containerapp_decorator.py | 16 ++++--- src/containerapp/azext_containerapp/custom.py | 6 ++- 3 files changed, 41 insertions(+), 26 deletions(-) diff --git a/src/containerapp/azext_containerapp/_compose_utils.py b/src/containerapp/azext_containerapp/_compose_utils.py index ba97edb3153..dec5bc3296f 100644 --- a/src/containerapp/azext_containerapp/_compose_utils.py +++ b/src/containerapp/azext_containerapp/_compose_utils.py @@ -1940,7 +1940,8 @@ def get_mcp_gateway_configuration(service): def enable_managed_identity(cmd, resource_group_name, app_name): """ - Enable system-assigned managed identity for a container app. + Enable system-assigned managed identity for a container app using a scoped PATCH + so that existing secrets (which are write-only) are not re-sent without values. Args: cmd: Azure CLI command context @@ -1950,36 +1951,42 @@ def enable_managed_identity(cmd, resource_group_name, app_name): Returns: Dictionary with identity information including principal_id """ + import json from knack.log import get_logger - from ._clients import ContainerAppClient - - logger = get_logger(__name__) + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.cli.core.util import send_raw_request - logger.info(f"Enabling system-assigned managed identity for '{app_name}'") + from ._clients import PREVIEW_API_VERSION - try: - # Get current app using show classmethod - app = ContainerAppClient.show(cmd, resource_group_name, app_name) + logger = get_logger(__name__) - # Set identity type to SystemAssigned - if 'identity' not in app: - app['identity'] = {} + logger.info(f"Enabling system-assigned managed identity for '{app_name}' via PATCH") - app['identity']['type'] = 'SystemAssigned' + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + subscription_id = get_subscription_id(cmd.cli_ctx) + request_url = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}?api-version={}".format( + management_hostname.strip('/'), + subscription_id, + resource_group_name, + app_name, + PREVIEW_API_VERSION) - # Update the app using create_or_update classmethod - updated_app = ContainerAppClient.create_or_update( - cmd, resource_group_name, app_name, app) + payload = { + "identity": { + "type": "SystemAssigned" + } + } + try: + response = send_raw_request(cmd.cli_ctx, "PATCH", request_url, body=json.dumps(payload)) + updated_app = response.json() identity = updated_app.get('identity', {}) principal_id = identity.get('principalId') if principal_id: - logger.info( - f"Successfully enabled managed identity. Principal ID: {principal_id}") + logger.info(f"Successfully enabled managed identity. Principal ID: {principal_id}") else: - logger.warning( - "Managed identity enabled but principal ID not yet available") + logger.warning("Managed identity enabled but principal ID not yet available") return identity diff --git a/src/containerapp/azext_containerapp/containerapp_decorator.py b/src/containerapp/azext_containerapp/containerapp_decorator.py index 451cbc3968b..5713b3d3fb5 100644 --- a/src/containerapp/azext_containerapp/containerapp_decorator.py +++ b/src/containerapp/azext_containerapp/containerapp_decorator.py @@ -133,6 +133,9 @@ def get_argument_from_revision(self): def get_argument_target_label(self): return self.get_param("target_label") + def get_argument_suppress_ingress_warning(self): + return self.get_param("suppress_ingress_warning") + def validate_arguments(self): self.containerapp_def = None try: @@ -1089,15 +1092,18 @@ def post_process(self, r): if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not self.get_argument_no_wait(): not self.get_argument_disable_warnings() and logger.warning('Containerapp creation in progress. Please monitor the creation using `az containerapp show -n {} -g {}`'.format(self.get_argument_name(), self.get_argument_resource_group_name())) + suppress_ingress_warning = bool(self.get_argument_suppress_ingress_warning()) + if "configuration" in r["properties"] and "ingress" in r["properties"]["configuration"] and \ r["properties"]["configuration"]["ingress"] and "fqdn" in r["properties"]["configuration"]["ingress"]: not self.get_argument_disable_warnings() and logger.warning("\nContainer app created. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) else: - target_port = self.get_argument_target_port() or "" - not self.get_argument_disable_warnings() and logger.warning( - "\nContainer app created. To access it over HTTPS, enable ingress: " - "az containerapp ingress enable -n %s -g %s --type external --target-port %s" - " --transport auto\n", self.get_argument_name(), self.get_argument_resource_group_name(), target_port) + if not suppress_ingress_warning: + target_port = self.get_argument_target_port() or "" + not self.get_argument_disable_warnings() and logger.warning( + "\nContainer app created. To access it over HTTPS, enable ingress: " + "az containerapp ingress enable -n %s -g %s --type external --target-port %s" + " --transport auto\n", self.get_argument_name(), self.get_argument_resource_group_name(), target_port) if self.get_argument_service_connectors_def_list() is not None: linker_client = get_linker_client(self.cmd) diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index aa49b6dcb93..6791e2410c1 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -773,7 +773,8 @@ def create_containerapp(cmd, runtime=None, enable_java_metrics=None, enable_java_agent=None, - kind=None): + kind=None, + suppress_ingress_warning=False): raw_parameters = locals() containerapp_create_decorator = ContainerAppPreviewCreateDecorator( @@ -2759,7 +2760,8 @@ def create_containerapps_from_compose(cmd, # pylint: disable=R0914 env_vars=env_vars_cli_format, secrets=secret_vars, min_replicas=final_min_replicas, - max_replicas=final_max_replicas, ) + max_replicas=final_max_replicas, + suppress_ingress_warning=True) ) # Phase 5: Dry-run mode - collect service config instead of deploying