diff --git a/internal/gcs-sidecar/handlers.go b/internal/gcs-sidecar/handlers.go index e3f423faf8..9b014a7bba 100644 --- a/internal/gcs-sidecar/handlers.go +++ b/internal/gcs-sidecar/handlers.go @@ -12,6 +12,8 @@ import ( "strings" "time" + "regexp" + "github.com/Microsoft/hcsshim/hcn" "github.com/Microsoft/hcsshim/internal/bridgeutils/commonutils" "github.com/Microsoft/hcsshim/internal/fsformatter" @@ -37,6 +39,8 @@ const ( UVMContainerID = "00000000-0000-0000-0000-000000000000" ) +var volumeGUIDRe = regexp.MustCompile(`^\\\\\?\\Volume\{([0-9A-Fa-f\-]+)\}\\Files$`) + // - Handler functions handle the incoming message requests. It // also enforces security policy for confidential cwcow containers. // - These handler functions may do some additional processing before @@ -544,6 +548,14 @@ func (b *Bridge) lifecycleNotification(req *request) (err error) { return nil } +func volumeGUIDFromLayerPath(p string) (string, bool) { + m := volumeGUIDRe.FindStringSubmatch(p) + if len(m) != 2 { + return "", false + } + return m[1], true +} + func (b *Bridge) modifySettings(req *request) (err error) { ctx, span := oc.StartSpan(req.ctx, "sidecar::modifySettings") defer span.End() @@ -676,6 +688,20 @@ func (b *Bridge) modifySettings(req *request) (err error) { return errors.Wrap(err, "CIM mount is denied by policy") } + // Volume GUID from request + volGUID := wcowBlockCimMounts.VolumeGUID.String() + + // Cache hashes along with volGUID + b.hostState.blockCIMVolumeHashes[volGUID] = hashesToVerify + + // Store the containerID (associated with volGUID) to mark that hashes are verified for this container + if _, ok := b.hostState.blockCIMVolumeContainers[volGUID]; !ok { + b.hostState.blockCIMVolumeContainers[volGUID] = make(map[string]struct{}) + } + b.hostState.blockCIMVolumeContainers[volGUID][containerID] = struct{}{} + + log.G(ctx).Tracef("Cached %d verified CIM layer hashes for volume %s (container %s)", len(hashesToVerify), volGUID, containerID) + if len(layerCIMs) > 1 { _, err = cimfs.MountMergedVerifiedBlockCIMs(layerCIMs[0], layerCIMs[1:], wcowBlockCimMounts.MountFlags, wcowBlockCimMounts.VolumeGUID, layerDigests[0]) if err != nil { @@ -707,9 +733,31 @@ func (b *Bridge) modifySettings(req *request) (err error) { settings := modifyGuestSettingsRequest.Settings.(*guestresource.CWCOWCombinedLayers) containerID := settings.ContainerID - log.G(ctx).Tracef("CWCOWCombinedLayers:: ContainerID: %v, ContainerRootPath: %v, Layers: %v, ScratchPath: %v", + log.G(ctx).Tracef("cwcowCombinedLayers:: ContainerID: %v, ContainerRootPath: %v, Layers: %v, ScratchPath: %v", containerID, settings.CombinedLayers.ContainerRootPath, settings.CombinedLayers.Layers, settings.CombinedLayers.ScratchPath) + // The layers size is only one, this is just defensive checking + if len(settings.CombinedLayers.Layers) == 1 { + layerPath := settings.CombinedLayers.Layers[0].Path + if guidStr, ok := volumeGUIDFromLayerPath(layerPath); ok { + hashes, haveHashes := b.hostState.blockCIMVolumeHashes[guidStr] + // This must always be true for every container as the hashes are recorded during initial CIM mount in ResourceTypeWCOWBlockCims request + if haveHashes { + containers := b.hostState.blockCIMVolumeContainers[guidStr] + if _, seen := containers[containerID]; !seen { + // This is a container with CIMs already mounted. Just Call EnforceVerifiedCIMsPolicy on this to record in policy metadata + log.G(ctx).Tracef("verified cim hashes for reused mount volume %s (container %s)", guidStr, containerID) + if err := b.hostState.securityPolicyEnforcer.EnforceVerifiedCIMsPolicy(ctx, containerID, hashes); err != nil { + return fmt.Errorf("cim mount is denied by policy for this container: %w", err) + } + containers[containerID] = struct{}{} + } + } else { + return fmt.Errorf("no cim hashes found for container ID %s", containerID) + } + } + } + //Since unencrypted scratch is not an option, always pass true if err := b.hostState.securityPolicyEnforcer.EnforceScratchMountPolicy(ctx, settings.CombinedLayers.ContainerRootPath, true); err != nil { return fmt.Errorf("scratch mounting denied by policy: %w", err) diff --git a/internal/gcs-sidecar/host.go b/internal/gcs-sidecar/host.go index 82519ac414..57f5d8cb6d 100644 --- a/internal/gcs-sidecar/host.go +++ b/internal/gcs-sidecar/host.go @@ -32,6 +32,11 @@ type Host struct { containersMutex sync.Mutex containers map[string]*Container + // mapping of volumeGUID to container layer hashes + blockCIMVolumeHashes map[string][]string + // mapping of volumeGUID to container IDs + blockCIMVolumeContainers map[string]map[string]struct{} + // state required for the security policy enforcement policyMutex sync.Mutex securityPolicyEnforcer securitypolicy.SecurityPolicyEnforcer @@ -58,6 +63,8 @@ type containerProcess struct { func NewHost(initialEnforcer securitypolicy.SecurityPolicyEnforcer) *Host { return &Host{ containers: make(map[string]*Container), + blockCIMVolumeHashes: make(map[string][]string), + blockCIMVolumeContainers: make(map[string]map[string]struct{}), securityPolicyEnforcer: initialEnforcer, securityPolicyEnforcerSet: false, } diff --git a/internal/regopolicyinterpreter/regopolicyinterpreter.go b/internal/regopolicyinterpreter/regopolicyinterpreter.go index 047a4a27b7..66f62c5114 100644 --- a/internal/regopolicyinterpreter/regopolicyinterpreter.go +++ b/internal/regopolicyinterpreter/regopolicyinterpreter.go @@ -585,6 +585,26 @@ func (r *RegoPolicyInterpreter) RawQuery(rule string, input map[string]interface return resultSet, nil } +// MetadataJSON returns the entire metadata object as a JSON string. +// The returned JSON is a snapshot (deep-copied via marshal/unmarshal). +func (r *RegoPolicyInterpreter) MetadataJSON() (string, error) { + r.dataAndModulesMutex.Lock() + defer r.dataAndModulesMutex.Unlock() + + root, ok := r.data["metadata"].(regoMetadata) + if !ok { + return "", errors.New("incorrect interpreter state: invalid metadata object type") + } + + // Deep copy to avoid callers modifying internal maps. + b, err := json.Marshal(root) + if err != nil { + return "", fmt.Errorf("unable to marshal metadata: %w", err) + } + + return string(b), nil +} + // Query queries the policy with the given rule and input data and returns the result. func (r *RegoPolicyInterpreter) Query(rule string, input map[string]interface{}) (RegoQueryResult, error) { // this mutex ensures no other threads modify the data and compiledModules fields during query execution diff --git a/pkg/securitypolicy/framework.rego b/pkg/securitypolicy/framework.rego index 8a28f3e312..bb6e86ea4d 100644 --- a/pkg/securitypolicy/framework.rego +++ b/pkg/securitypolicy/framework.rego @@ -1360,6 +1360,7 @@ env_rule_matches(rule) { } errors["missing required environment variable"] { + is_linux input.rule == "create_container" not container_started diff --git a/pkg/securitypolicy/regopolicy_windows_test.go b/pkg/securitypolicy/regopolicy_windows_test.go index 1675410d33..64a410de83 100644 --- a/pkg/securitypolicy/regopolicy_windows_test.go +++ b/pkg/securitypolicy/regopolicy_windows_test.go @@ -8,6 +8,7 @@ import ( _ "embed" "fmt" "math/rand" + "strconv" "strings" "testing" "testing/quick" @@ -315,6 +316,45 @@ func Test_Rego_EnforceCreateContainer_Same_Container_Twice_Windows(t *testing.T) } } +func Test_Rego_EnforceVerifiedCIMSPolicy_Multiple_Instances_Same_Container(t *testing.T) { + for containersToCreate := 5; containersToCreate <= maxContainersInGeneratedConstraints; containersToCreate++ { + constraints := new(generatedWindowsConstraints) + constraints.ctx = context.Background() + constraints.externalProcesses = generateExternalProcesses(testRand) + + for i := 1; i <= containersToCreate; i++ { + arg := "command " + strconv.Itoa(i) + c := &securityPolicyWindowsContainer{ + Command: []string{arg}, + Layers: []string{"1", "2"}, + } + + constraints.containers = append(constraints.containers, c) + } + + securityPolicy := constraints.toPolicy() + policy, err := newRegoPolicy(securityPolicy.marshalWindowsRego(), []oci.Mount{}, []oci.Mount{}, testOSType) + + if err != nil { + t.Fatalf("failed create enforcer") + } + + for _, container := range constraints.containers { + // Reverse container.Layers to satisfy layerHashes_ok ordering + layerHashes := make([]string, len(container.Layers)) + for i, layer := range container.Layers { + layerHashes[len(container.Layers)-1-i] = layer + } + + id := testDataGenerator.uniqueContainerID() + err = policy.EnforceVerifiedCIMsPolicy(constraints.ctx, id, layerHashes) + if err != nil { + t.Fatalf("failed with %d containers", containersToCreate) + } + } + } +} + // -- Capabilities/Mount/Rego version tests are removed -- Add back Rego versions test// func Test_Rego_ExecInContainerPolicy_Windows(t *testing.T) { f := func(p *generatedWindowsConstraints) bool { diff --git a/pkg/securitypolicy/securitypolicyenforcer_rego.go b/pkg/securitypolicy/securitypolicyenforcer_rego.go index f12d8d1fb1..61eb2bc768 100644 --- a/pkg/securitypolicy/securitypolicyenforcer_rego.go +++ b/pkg/securitypolicy/securitypolicyenforcer_rego.go @@ -729,6 +729,13 @@ func (policy *regoEnforcer) EnforceCreateContainerPolicyV2( "seccompProfileSHA256": opts.SeccompProfileSHA256, } case "windows": + // Dump full interpreter metadata for debugging diagnostics. + if mdJSON, err := policy.rego.MetadataJSON(); err == nil { + log.G(ctx).Debugf("Current policy metadata: %s", mdJSON) + } else { + log.G(ctx).WithError(err).Warn("failed to obtain policy metadata snapshot") + } + input = inputData{ "containerID": containerID, "argList": argList, @@ -792,18 +799,6 @@ func appendMountData(mountData []interface{}, mounts []oci.Mount) []interface{} return mountData } -func appendMountDataWindows(mountData []interface{}, mounts []oci.Mount) []interface{} { - for _, mount := range mounts { - mountData = append(mountData, inputData{ - "destination": mount.Destination, - "source": mount.Source, - "options": mount.Options, - }) - } - - return mountData -} - func (policy *regoEnforcer) ExtendDefaultMounts(mounts []oci.Mount) error { policy.defaultMounts = append(policy.defaultMounts, mounts...) defaultMounts := appendMountData([]interface{}{}, policy.defaultMounts)