Skip to content

Terraform hotfix#1

Open
borbul2007 wants to merge 11 commits intoterraform-05from
terraform-hotfix
Open

Terraform hotfix#1
borbul2007 wants to merge 11 commits intoterraform-05from
terraform-hotfix

Conversation

@borbul2007
Copy link
Owner

TFlint:
root@nt src]# docker run --rm -v $(pwd):/tflint -t ghcr.io/terraform-linters/tflint --config=/tflint/.tflint.hcl --chdir=/tflint
[root@nt src]#

Chekov:
terraform scan results:
Passed checks: 2, Failed checks: 0, Skipped checks: 0
Check: CKV_TF_2: "Ensure Terraform module sources use a tag with a version number"
PASSED for resource: analytics_vm
File: /main.tf:29-47
Guide: https://docs.prismacloud.io/en/enterprise-edition/policy-reference/supply-chain-policies/terraform-policies/ensure-terraform-module-sources-use-tag
Check: CKV_TF_2: "Ensure Terraform module sources use a tag with a version number"
PASSED for resource: marketing_vm
File: /main.tf:49-68
Guide: https://docs.prismacloud.io/en/enterprise-edition/policy-reference/supply-chain-policies/terraform-policies/ensure-terraform-module-sources-use-tag

[root@nt src]# terraform plan
data.template_file.cloudinit: Reading...
module.marketing_vm.data.yandex_compute_image.my_image: Reading...
module.analytics_vm.data.yandex_compute_image.my_image: Reading...
data.template_file.cloudinit: Read complete after 0s [id=c570666f4efd1577763e8af5d8fd7d948de7a69724a57e5ea3cc3a7fe7458e3c]
module.analytics_vm.data.yandex_compute_image.my_image: Read complete after 0s [id=fd892vjp5gajiqr0g1b3]
module.marketing_vm.data.yandex_compute_image.my_image: Read complete after 1s [id=fd892vjp5gajiqr0g1b3]

Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:

  • create

Terraform will perform the following actions:

yandex_vpc_network.develop will be created

  • resource "yandex_vpc_network" "develop" {
    • created_at = (known after apply)
    • default_security_group_id = (known after apply)
    • folder_id = (known after apply)
    • id = (known after apply)
    • labels = (known after apply)
    • name = "develop"
    • subnet_ids = (known after apply)
      }

yandex_vpc_subnet.develop_a will be created

  • resource "yandex_vpc_subnet" "develop_a" {
    • created_at = (known after apply)
    • folder_id = (known after apply)
    • id = (known after apply)
    • labels = (known after apply)
    • name = "develop-ru-central1-a"
    • network_id = (known after apply)
    • v4_cidr_blocks = [
      • "10.0.1.0/24",
        ]
    • v6_cidr_blocks = (known after apply)
    • zone = "ru-central1-a"
      }

yandex_vpc_subnet.develop_b will be created

  • resource "yandex_vpc_subnet" "develop_b" {
    • created_at = (known after apply)
    • folder_id = (known after apply)
    • id = (known after apply)
    • labels = (known after apply)
    • name = "develop-ru-central1-b"
    • network_id = (known after apply)
    • v4_cidr_blocks = [
      • "10.0.2.0/24",
        ]
    • v6_cidr_blocks = (known after apply)
    • zone = "ru-central1-b"
      }

module.analytics_vm.yandex_compute_instance.vm[0] will be created

  • resource "yandex_compute_instance" "vm" {
    • allow_stopping_for_update = true

    • created_at = (known after apply)

    • description = "TODO: description; {{terraform managed}}"

    • folder_id = (known after apply)

    • fqdn = (known after apply)

    • gpu_cluster_id = (known after apply)

    • hostname = "develop-vm-0"

    • id = (known after apply)

    • labels = {

      • "project" = "acanalytics"
        }
    • maintenance_grace_period = (known after apply)

    • maintenance_policy = (known after apply)

    • metadata = {

      • "serial-port-enable" = "1"
      • "user-data" = <<-EOT
        #cloud-config
        users:
        - name: ubuntu
        groups: sudo
        shell: /bin/bash
        sudo: ["ALL=(ALL) NOPASSWD:ALL"]
        ssh_authorized_keys:
        - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJbFQwlXpyF5D6x8yiptgTG/Are3CfQ94MRINvltKRs2
        package_update: true
        package_upgrade: false
        packages:
        - vim
        - nginx
        EOT
        }
    • name = "develop-vm-0"

    • network_acceleration_type = "standard"

    • platform_id = "standard-v1"

    • service_account_id = (known after apply)

    • status = (known after apply)

    • zone = "ru-central1-a"

    • boot_disk {

      • auto_delete = true

      • device_name = (known after apply)

      • disk_id = (known after apply)

      • mode = (known after apply)

      • initialize_params {

        • block_size = (known after apply)
        • description = (known after apply)
        • image_id = "fd892vjp5gajiqr0g1b3"
        • name = (known after apply)
        • size = 10
        • snapshot_id = (known after apply)
        • type = "network-hdd"
          }
          }
    • network_interface {

      • index = (known after apply)
      • ip_address = (known after apply)
      • ipv4 = true
      • ipv6 = (known after apply)
      • ipv6_address = (known after apply)
      • mac_address = (known after apply)
      • nat = true
      • nat_ip_address = (known after apply)
      • nat_ip_version = (known after apply)
      • security_group_ids = (known after apply)
      • subnet_id = (known after apply)
        }
    • resources {

      • core_fraction = 5
      • cores = 2
      • memory = 1
        }
    • scheduling_policy {

      • preemptible = true
        }
        }

module.analytics_vm.yandex_compute_instance.vm[1] will be created

  • resource "yandex_compute_instance" "vm" {
    • allow_stopping_for_update = true

    • created_at = (known after apply)

    • description = "TODO: description; {{terraform managed}}"

    • folder_id = (known after apply)

    • fqdn = (known after apply)

    • gpu_cluster_id = (known after apply)

    • hostname = "develop-vm-1"

    • id = (known after apply)

    • labels = {

      • "project" = "acanalytics"
        }
    • maintenance_grace_period = (known after apply)

    • maintenance_policy = (known after apply)

    • metadata = {

      • "serial-port-enable" = "1"
      • "user-data" = <<-EOT
        #cloud-config
        users:
        - name: ubuntu
        groups: sudo
        shell: /bin/bash
        sudo: ["ALL=(ALL) NOPASSWD:ALL"]
        ssh_authorized_keys:
        - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJbFQwlXpyF5D6x8yiptgTG/Are3CfQ94MRINvltKRs2
        package_update: true
        package_upgrade: false
        packages:
        - vim
        - nginx
        EOT
        }
    • name = "develop-vm-1"

    • network_acceleration_type = "standard"

    • platform_id = "standard-v1"

    • service_account_id = (known after apply)

    • status = (known after apply)

    • zone = "ru-central1-a"

    • boot_disk {

      • auto_delete = true

      • device_name = (known after apply)

      • disk_id = (known after apply)

      • mode = (known after apply)

      • initialize_params {

        • block_size = (known after apply)
        • description = (known after apply)
        • image_id = "fd892vjp5gajiqr0g1b3"
        • name = (known after apply)
        • size = 10
        • snapshot_id = (known after apply)
        • type = "network-hdd"
          }
          }
    • network_interface {

      • index = (known after apply)
      • ip_address = (known after apply)
      • ipv4 = true
      • ipv6 = (known after apply)
      • ipv6_address = (known after apply)
      • mac_address = (known after apply)
      • nat = true
      • nat_ip_address = (known after apply)
      • nat_ip_version = (known after apply)
      • security_group_ids = (known after apply)
      • subnet_id = (known after apply)
        }
    • resources {

      • core_fraction = 5
      • cores = 2
      • memory = 1
        }
    • scheduling_policy {

      • preemptible = true
        }
        }

module.marketing_vm.yandex_compute_instance.vm[0] will be created

  • resource "yandex_compute_instance" "vm" {
    • allow_stopping_for_update = true

    • created_at = (known after apply)

    • description = "TODO: description; {{terraform managed}}"

    • folder_id = (known after apply)

    • fqdn = (known after apply)

    • gpu_cluster_id = (known after apply)

    • hostname = "stage-web-stage-0"

    • id = (known after apply)

    • labels = {

      • "project" = "marketing"
        }
    • maintenance_grace_period = (known after apply)

    • maintenance_policy = (known after apply)

    • metadata = {

      • "serial-port-enable" = "1"
      • "user-data" = <<-EOT
        #cloud-config
        users:
        - name: ubuntu
        groups: sudo
        shell: /bin/bash
        sudo: ["ALL=(ALL) NOPASSWD:ALL"]
        ssh_authorized_keys:
        - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJbFQwlXpyF5D6x8yiptgTG/Are3CfQ94MRINvltKRs2
        package_update: true
        package_upgrade: false
        packages:
        - vim
        - nginx
        EOT
        }
    • name = "stage-web-stage-0"

    • network_acceleration_type = "standard"

    • platform_id = "standard-v1"

    • service_account_id = (known after apply)

    • status = (known after apply)

    • zone = "ru-central1-a"

    • boot_disk {

      • auto_delete = true

      • device_name = (known after apply)

      • disk_id = (known after apply)

      • mode = (known after apply)

      • initialize_params {

        • block_size = (known after apply)
        • description = (known after apply)
        • image_id = "fd892vjp5gajiqr0g1b3"
        • name = (known after apply)
        • size = 10
        • snapshot_id = (known after apply)
        • type = "network-hdd"
          }
          }
    • network_interface {

      • index = (known after apply)
      • ip_address = (known after apply)
      • ipv4 = true
      • ipv6 = (known after apply)
      • ipv6_address = (known after apply)
      • mac_address = (known after apply)
      • nat = true
      • nat_ip_address = (known after apply)
      • nat_ip_version = (known after apply)
      • security_group_ids = (known after apply)
      • subnet_id = (known after apply)
        }
    • resources {

      • core_fraction = 5
      • cores = 2
      • memory = 1
        }
    • scheduling_policy {

      • preemptible = true
        }
        }

module.vpc_dev.yandex_vpc_network.vpc_dev will be created

  • resource "yandex_vpc_network" "vpc_dev" {
    • created_at = (known after apply)
    • default_security_group_id = (known after apply)
    • folder_id = (known after apply)
    • id = (known after apply)
    • labels = (known after apply)
    • name = "dev"
    • subnet_ids = (known after apply)
      }

module.vpc_dev.yandex_vpc_subnet.vpc_subnet_dev will be created

  • resource "yandex_vpc_subnet" "vpc_subnet_dev" {
    • created_at = (known after apply)
    • folder_id = (known after apply)
    • id = (known after apply)
    • labels = (known after apply)
    • name = "develop"
    • network_id = (known after apply)
    • v4_cidr_blocks = [
      • "10.0.1.0/24",
        ]
    • v6_cidr_blocks = (known after apply)
    • zone = "ru-central1-a"
      }

Plan: 8 to add, 0 to change, 0 to destroy.

Changes to Outputs:

  • vpc_subnet_dev = {
    • created_at = (known after apply)
    • description = null
    • dhcp_options = []
    • folder_id = (known after apply)
    • id = (known after apply)
    • labels = (known after apply)
    • name = "develop"
    • network_id = (known after apply)
    • route_table_id = null
    • timeouts = null
    • v4_cidr_blocks = [
      • "10.0.1.0/24",
        ]
    • v6_cidr_blocks = (known after apply)
    • zone = "ru-central1-a"
      }

@Jiraiya106
Copy link

Отлично

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

2 participants