diff --git a/.cursor/rules/common b/.cursor/rules/common index 402558c9..0048a591 160000 --- a/.cursor/rules/common +++ b/.cursor/rules/common @@ -1 +1 @@ -Subproject commit 402558c9424d32857338fd40a8c52f63050e424b +Subproject commit 0048a59175d3774a9ed517c1fe8ade681d8cea42 diff --git a/.dockerignore b/.dockerignore index 47691999..13b0a0ae 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,6 +2,10 @@ .git .gitignore +# Submodules (not needed in container) +.cursor/ +website-pages/ + # Node.js dependencies node_modules coverage diff --git a/.github/workflows/deploy_to_aws.yml b/.github/workflows/deploy_to_aws.yml index 6aa07f9f..78063326 100644 --- a/.github/workflows/deploy_to_aws.yml +++ b/.github/workflows/deploy_to_aws.yml @@ -3,7 +3,7 @@ name: 'Deploy to AWS' on: push: branches: - - origin/create-terraform-configuration + - create-terraform-configuration # - 'releases/**' workflow_dispatch: @@ -34,7 +34,7 @@ on: jobs: init-and-plan: runs-on: ubuntu-latest - environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'prod/') && 'Production') || (startsWith(github.ref_name, 'staging/') && 'Staging') || 'None' }} + environment: Development steps: - name: Get Environment Name for ${{ vars.ENV_NAME }} id: get_env_name @@ -47,507 +47,125 @@ jobs: - name: Checkout config repository uses: actions/checkout@v4 with: - repository: 'speedandfunction/websie-ci-secrets' + repository: 'speedandfunction/website-ci-secret' path: 'terraform-config' token: ${{ secrets.PAT }} - name: Copy tfvars for ${{ vars.ENV_NAME }} run: | - cat "terraform-config/environments/${{ env.TFVAR_NAME }}" "deployment/environments/${{env.TFVAR_NAME}}" > "deployment/terraform/${{ env.SETTINGS_BUCKET }}.tfvars" - env: - SETTINGS_BUCKET: oshub-settings-${{ steps.get_env_name.outputs.lowercase }} - TFVAR_NAME: terraform-${{steps.get_env_name.outputs.lowercase}}.tfvars + cat "terraform-config/${{ vars.ENV_NAME }}.tfvars" "deployment/environments/${{ vars.ENV_NAME }}.tfvars" > "deployment/terraform.tfvars" + - name: Setup Terraform uses: hashicorp/setup-terraform@v2 with: - terraform_version: 1.5.0 + terraform_version: 1.12.0 terraform_wrapper: false - - name: Terraform Plan for ${{ vars.ENV_NAME }} - run: ./deployment/infra plan + run: | + cd deployment + terraform init -backend-config="environments/backend-${{ vars.ENV_NAME }}.hcl" + terraform plan -out="${{ vars.ENV_NAME }}.tfplan" env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: "eu-west-1" - SETTINGS_BUCKET: oshub-settings-${{ steps.get_env_name.outputs.lowercase }} - + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: "us-east-1" + - name: Copy planfile to S3 bucket for ${{ vars.ENV_NAME }} - run: aws s3 cp "deployment/terraform/${{ env.SETTINGS_BUCKET }}.tfplan" "s3://${{ env.SETTINGS_BUCKET }}/terraform/${{ env.SETTINGS_BUCKET }}.tfplan" + run: aws s3 cp "deployment/${{ vars.ENV_NAME }}.tfplan" "s3://${{ env.SETTINGS_BUCKET }}/plans/${{ vars.ENV_NAME }}.tfplan" env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - SETTINGS_BUCKET: oshub-settings-${{ steps.get_env_name.outputs.lowercase }} - AWS_DEFAULT_REGION: "eu-west-1" - - # detach-waf-if-needed: - # needs: init-and-plan - # if: ${{ inputs.deploy-plan-only == false }} - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - - # - name: Checkout repo - # uses: actions/checkout@v4 - - # - name: Checkout config repository - # uses: actions/checkout@v4 - # with: - # repository: 'opensupplyhub/ci-deployment' - # path: 'terraform-config' - # token: ${{ secrets.PAT }} - - # - name: Copy tfvars for ${{ vars.ENV_NAME }} - # run: | - # cat "terraform-config/environments/${{ env.TFVAR_NAME }}" "deployment/environments/${{ env.TFVAR_NAME }}" > "deployment/terraform/${{ env.SETTINGS_BUCKET }}.tfvars" - # env: - # SETTINGS_BUCKET: oshub-settings-${{ steps.get_env_name.outputs.lowercase }} - # TFVAR_NAME: terraform-${{ steps.get_env_name.outputs.lowercase }}.tfvars - - # - name: Check whether AWS WAF enabled and find CloudFront distribution id - # run: | - # CLOUDFRONT_DISTRIBUTION_ID=$(./scripts/find_cloudfront_distribution_id.sh "$CLOUDFRONT_DOMAIN") - # waf_enabled=$(grep -E '^waf_enabled\s*=' deployment/environments/${{ env.TFVAR_NAME }} | awk '{print $3}') - - # echo "WAF Enabled: $waf_enabled" - # echo "Distribution ID: $CLOUDFRONT_DISTRIBUTION_ID" - - # if [[ "$waf_enabled" == "false" && -n "$CLOUDFRONT_DISTRIBUTION_ID" ]]; then - # echo "Detaching WAF from CloudFront distribution $CLOUDFRONT_DISTRIBUTION_ID" - # config=$(aws cloudfront get-distribution-config --id "$CLOUDFRONT_DISTRIBUTION_ID") - # etag=$(echo "$config" | jq -r '.ETag') - # dist=$(echo "$config" | jq '.DistributionConfig | .WebACLId = ""') - - # echo "$dist" > updated_config.json - - # aws cloudfront update-distribution \ - # --id "$CLOUDFRONT_DISTRIBUTION_ID" \ - # --if-match "$etag" \ - # --distribution-config file://updated_config.json - # else - # echo "Skipping WAF detachment" - # fi - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" - # TFVAR_NAME: terraform-${{ steps.get_env_name.outputs.lowercase }}.tfvars - # SETTINGS_BUCKET: oshub-settings-${{ steps.get_env_name.outputs.lowercase }} - # CLOUDFRONT_DOMAIN: ${{ vars.CLOUDFRONT_DOMAIN }} - - # apply: - # needs: [init-and-plan, detach-waf-if-needed] - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - - # - name: Setup Terraform - # uses: hashicorp/setup-terraform@v2 - # with: - # terraform_version: 1.5.0 - # terraform_wrapper: false - - # - name: Checkout - # uses: actions/checkout@v4 - - # - name: Get planfile from S3 bucket for ${{ vars.ENV_NAME }} - # run: aws s3 cp "s3://${{ env.SETTINGS_BUCKET }}/terraform/${{ env.SETTINGS_BUCKET }}.tfplan" "deployment/terraform/${{ env.SETTINGS_BUCKET }}.tfplan" - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # SETTINGS_BUCKET: oshub-settings-${{ steps.get_env_name.outputs.lowercase }} - # AWS_DEFAULT_REGION: "eu-west-1" - - # - name: Terraform Apply for ${{ vars.ENV_NAME }} - # run: | - # ./deployment/infra apply - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # SETTINGS_BUCKET: oshub-settings-${{ steps.get_env_name.outputs.lowercase }} - # AWS_DEFAULT_REGION: "eu-west-1" - - # build_and_push_react_app: - # needs: apply - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - - # - name: Checkout repo - # uses: actions/checkout@v4 - - # - name: Setup Node.js - # uses: actions/setup-node@v2 - # with: - # node-version: '14' - - # - name: Cache dependencies - # uses: actions/cache@v4 - # with: - # path: | - # src/react/node_modules - # key: ${{ runner.os }}-node-${{ hashFiles('**/yarn.lock') }} - - # - name: Install dependencies - # working-directory: src/react - # run: yarn install - - # - name: Build static assets - # working-directory: src/react - # run: yarn run build - - # - id: project - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.PROJECT }} - - # - name: Move static - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # FRONTEND_BUCKET: ${{ steps.project.outputs.lowercase }}-${{ steps.get_env_name.outputs.lowercase }}-frontend - # AWS_DEFAULT_REGION: "eu-west-1" - # CLOUDFRONT_DOMAIN: ${{ vars.CLOUDFRONT_DOMAIN }} - # run: | - # CLOUDFRONT_DISTRIBUTION_ID=$(./scripts/find_cloudfront_distribution_id.sh "$CLOUDFRONT_DOMAIN") - # if [ -z "$CLOUDFRONT_DISTRIBUTION_ID" ]; then - # echo "Error: No CloudFront distribution found for domain: $CLOUDFRONT_DOMAIN" - # exit 1 - # fi - # aws s3 sync src/react/build/ s3://$FRONTEND_BUCKET-$AWS_DEFAULT_REGION/ --delete - # aws cloudfront create-invalidation --distribution-id "$CLOUDFRONT_DISTRIBUTION_ID" --paths "/*" - - # build_and_push_docker_image: - # needs: build_and_push_react_app - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - - # - name: Checkout repo - # uses: actions/checkout@v4 - - # - name: Configure AWS credentials for ${{ vars.ENV_NAME }} - # uses: aws-actions/configure-aws-credentials@v1 - # with: - # aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - # aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # aws-region: "eu-west-1" - - # - name: Get GIT_COMMIT - # run: | - # export SHORT_SHA="$(git rev-parse --short HEAD)" - # export GIT_COMMIT_CI="${SHORT_SHA:0:7}" - # echo "GIT_COMMIT=$GIT_COMMIT_CI" >> $GITHUB_ENV - - # - name: Login to Amazon ECR - # uses: aws-actions/amazon-ecr-login@v1 + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" + - name: Copy lock file to S3 bucket for ${{ vars.ENV_NAME }} + run: aws s3 cp "deployment/.terraform.lock.hcl" "s3://${{ env.SETTINGS_BUCKET }}/plans/${{ vars.ENV_NAME }}.terraform.lock.hcl" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" + apply: + needs: [init-and-plan] + runs-on: ubuntu-latest + environment: Development + if: ${{ inputs.deploy-plan-only == false }} + steps: + - name: Get Environment Name for ${{ vars.ENV_NAME }} + id: get_env_name + uses: Entepotenz/change-string-case-action-min-dependencies@v1 + with: + string: ${{ vars.ENV_NAME }} - # - name: Build and push Kafka Tools Docker image to ECR for ${{ vars.ENV_NAME }} - # uses: docker/build-push-action@v2 - # with: - # context: src/kafka-tools/ - # file: src/kafka-tools/Dockerfile - # push: true - # tags: ${{ vars.ECR_REGISTRY }}/${{ vars.IMAGE_NAME }}-kafka-${{ steps.get_env_name.outputs.lowercase }}:${{ env.GIT_COMMIT }} - - # - name: Build and push Django Docker image to ECR for ${{ vars.ENV_NAME }} - # uses: docker/build-push-action@v2 - # with: - # context: src/django - # file: src/django/Dockerfile - # push: true - # tags: ${{ vars.ECR_REGISTRY }}/${{ vars.IMAGE_NAME }}-${{ steps.get_env_name.outputs.lowercase }}:${{ env.GIT_COMMIT }} - - # - name: Build and push Batch Docker image to ECR for ${{ vars.ENV_NAME }} - # uses: docker/build-push-action@v2 - # with: - # context: src/batch - # file: src/batch/Dockerfile - # push: true - # tags: ${{ vars.ECR_REGISTRY }}/${{ vars.IMAGE_NAME }}-batch-${{ steps.get_env_name.outputs.lowercase }}:${{ env.GIT_COMMIT }} - # build-args: | - # GIT_COMMIT=${{ env.GIT_COMMIT }} - # DOCKER_IMAGE=${{ vars.DOCKER_IMAGE }} - # ENVIRONMENT=${{ steps.get_env_name.outputs.lowercase }} - - # - name: Build and push Dedupe Hub Docker image to ECR for ${{ vars.ENV_NAME }} - # uses: docker/build-push-action@v2 - # with: - # context: src/dedupe-hub/api - # file: src/dedupe-hub/api/Dockerfile - # push: true - # tags: ${{ vars.ECR_REGISTRY }}/${{ vars.IMAGE_NAME }}-deduplicate-${{ steps.get_env_name.outputs.lowercase }}:${{ env.GIT_COMMIT }} + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.12.0 + terraform_wrapper: false - # - name: Build and push Logstash Docker image to ECR for ${{ vars.ENV_NAME }} - # uses: docker/build-push-action@v2 - # with: - # context: src/logstash - # file: src/logstash/Dockerfile - # push: true - # tags: ${{ vars.ECR_REGISTRY }}/${{ vars.IMAGE_NAME }}-logstash-${{ steps.get_env_name.outputs.lowercase }}:${{ env.GIT_COMMIT }} + - name: Checkout + uses: actions/checkout@v4 - # - name: Build and push Database Anonymizer Docker image to ECR for ${{ vars.ENV_NAME }} - # uses: docker/build-push-action@v2 - # if: ${{ steps.get_env_name.outputs.lowercase == 'production' }} - # with: - # context: deployment/terraform/database_anonymizer_scheduled_task/docker - # file: deployment/terraform/database_anonymizer_scheduled_task/docker/Dockerfile - # push: true - # tags: ${{ vars.ECR_REGISTRY }}/${{ vars.IMAGE_NAME }}-database-anonymizer-${{ steps.get_env_name.outputs.lowercase }}:${{ env.GIT_COMMIT }} + - name: Get planfile from S3 bucket for ${{ vars.ENV_NAME }} + run: aws s3 cp "s3://${{ env.SETTINGS_BUCKET }}/plans/${{ vars.ENV_NAME }}.tfplan" "deployment/${{ vars.ENV_NAME }}.tfplan" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" - # - name: Build and push Anonymize Database Dump Docker image to ECR for ${{ vars.ENV_NAME }} - # uses: docker/build-push-action@v2 - # if: ${{ steps.get_env_name.outputs.lowercase == 'test' }} - # with: - # context: deployment/terraform/anonymized_database_dump_scheduled_task/docker - # file: deployment/terraform/anonymized_database_dump_scheduled_task/docker/Dockerfile - # push: true - # tags: ${{ vars.ECR_REGISTRY }}/${{ vars.IMAGE_NAME }}-anonymized-database-dump-${{ steps.get_env_name.outputs.lowercase }}:${{ env.GIT_COMMIT }} + - name: Get lock file from S3 bucket for ${{ vars.ENV_NAME }} + run: aws s3 cp "s3://${{ env.SETTINGS_BUCKET }}/plans/${{ vars.ENV_NAME }}.terraform.lock.hcl" "deployment/.terraform.lock.hcl" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" - # create_kafka_topic: - # needs: build_and_push_docker_image - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - # - name: Checkout repo - # uses: actions/checkout@v4 + - name: Terraform Apply for ${{ vars.ENV_NAME }} + run: | + cd deployment + terraform init -backend-config="environments/backend-${{ vars.ENV_NAME }}.hcl" + terraform apply "${{ vars.ENV_NAME }}.tfplan" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: "us-east-1" - # - name: Create or update kafka topics for ${{ vars.ENV_NAME }} - # run: | - # ./deployment/run_kafka_task ${{ vars.ENV_NAME }} - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" + + build_and_push_docker_image: + needs: [apply] + runs-on: ubuntu-latest + environment: Development + if: ${{ inputs.deploy-plan-only == false }} + steps: + - name: Get Environment Name for ${{ vars.ENV_NAME }} + id: get_env_name + uses: Entepotenz/change-string-case-action-min-dependencies@v1 + with: + string: ${{ vars.ENV_NAME }} - # stop_logstash: - # needs: create_kafka_topic - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - # - name: Checkout repo - # uses: actions/checkout@v4 - # - name: Stop Logstash for ${{ vars.ENV_NAME }} - # if: ${{ inputs.restore-db == true || inputs.clear-opensearch == true}} - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" - # run: | - # aws \ - # ecs update-service --desired-count 0 --cluster=ecsOpenSupplyHub${{vars.ENV_NAME}}Cluster \ - # --service=OpenSupplyHub${{vars.ENV_NAME}}AppLogstash + - name: Checkout repo + uses: actions/checkout@v4 - # restore_database: - # needs: stop_logstash - # runs-on: self-hosted - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - # - name: Checkout repo - # uses: actions/checkout@v4 - # - name: Restore database for ${{ vars.ENV_NAME }} - # if: ${{ (vars.ENV_NAME == 'Preprod' || vars.ENV_NAME == 'Test') && inputs.restore-db == true}} - # run: | - # cd ./src/anon-tools - # mkdir -p ./keys - # echo "${{ secrets.KEY_FILE }}" > ./keys/key - # docker build -t restore -f Dockerfile.restore . - # docker run -v ./keys/key:/keys/key --shm-size=2gb --rm \ - # -e AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} \ - # -e AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} \ - # -e AWS_DEFAULT_REGION=eu-west-1 \ - # -e ENVIRONMENT=${{ vars.ENV_NAME }} \ - # -e DATABASE_NAME=opensupplyhub \ - # -e DATABASE_USERNAME=opensupplyhub \ - # -e DATABASE_PASSWORD=${{ secrets.DATABASE_PASSWORD }} \ - # restore - # - name: Reset database for ${{ vars.ENV_NAME }} - # if: ${{ vars.ENV_NAME == 'Development' && inputs.restore-db == true}} - # run: | - # echo "Creating an S3 folder with production location lists to reset DB if it doesn't exist." - # aws s3api put-object --bucket ${{ env.LIST_S3_BUCKET }} --key ${{ env.RESET_LISTS_FOLDER }} - # echo "Coping all production location files from the repo to S3 to ensure consistency between local and environment resets." - # aws s3 cp ./src/django/${{ env.RESET_LISTS_FOLDER }} s3://${{ env.LIST_S3_BUCKET }}/${{ env.RESET_LISTS_FOLDER }} --recursive - # echo "Triggering reset commands." - # ./deployment/run_cli_task ${{ vars.ENV_NAME }} "reset_database" - # ./deployment/run_cli_task ${{ vars.ENV_NAME }} "matchfixtures" - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" - # LIST_S3_BUCKET: opensupplyhub-${{ steps.get_env_name.outputs.lowercase }}-files-eu-west-1 - # RESET_LISTS_FOLDER: "api/fixtures/list_files/" + - name: Configure AWS credentials for ${{ vars.ENV_NAME }} + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + aws-region: "us-east-1" - # update_services: - # needs: restore_database - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - # - name: Update ECS Django Service with new Image for ${{ vars.ENV_NAME }} - # run: | - # aws ecs update-service --cluster ${{ vars.CLUSTER }} --service ${{ vars.SERVICE_NAME }} --force-new-deployment --region ${{env.AWS_DEFAULT_REGION}} - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" - # - name: Update ECS Dedupe Hub Service with new Image for ${{ vars.ENV_NAME }} - # run: | - # aws ecs update-service --cluster ${{ vars.CLUSTER }} --service ${{ vars.SERVICE_NAME }}DD --force-new-deployment --region ${{env.AWS_DEFAULT_REGION}} - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" + - name: Login to Amazon ECR + uses: aws-actions/amazon-ecr-login@v1 - # post_deploy: - # needs: update_services - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - # - name: Checkout repo - # uses: actions/checkout@v4 - # - name: Run migrations and other post-deployment tasks for ${{ vars.ENV_NAME }} - # run: | - # ./deployment/run_cli_task ${{ vars.ENV_NAME }} "post_deployment" - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" - # clear_opensearch: - # needs: post_deploy - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - # - name: Checkout repo - # uses: actions/checkout@v4 - # - name: Get OpenSearch domain, filesystem and access point IDs for ${{ vars.ENV_NAME }} - # if: ${{ inputs.restore-db == true || inputs.clear-opensearch == true}} - # id: export_variables - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" - # run: | - # OS_DOMAIN_NAME=$(echo "${{ vars.ENV_NAME }}-os-domain" | tr '[:upper:]' '[:lower:]') - # OPENSEARCH_DOMAIN=$(aws \ - # es describe-elasticsearch-domains --domain-names $OS_DOMAIN_NAME \ - # --query "DomainStatusList[].Endpoints.vpc" --output text) - # EFS_ID=$(aws \ - # efs describe-file-systems \ - # --query "FileSystems[?Tags[?Key=='Environment' && Value=='${{ vars.ENV_NAME }}']].FileSystemId" \ - # --output text) - # EFS_AP_ID=$(aws \ - # efs describe-access-points \ - # --query "AccessPoints[?FileSystemId=='$EFS_ID'].AccessPointId" \ - # --output text) - # echo "EFS_ID=$EFS_ID" >> $GITHUB_OUTPUT - # echo "EFS_AP_ID=$EFS_AP_ID" >> $GITHUB_OUTPUT - # echo "OPENSEARCH_DOMAIN=$OPENSEARCH_DOMAIN" >> $GITHUB_OUTPUT - # - name: Clear the custom OpenSearch indexes and templates for ${{ vars.ENV_NAME }} - # if: ${{ inputs.restore-db == true || inputs.clear-opensearch == true}} - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # OPENSEARCH_DOMAIN: ${{ steps.export_variables.outputs.OPENSEARCH_DOMAIN }} - # EFS_AP_ID: ${{ steps.export_variables.outputs.EFS_AP_ID }} - # EFS_ID: ${{ steps.export_variables.outputs.EFS_ID }} - # BASTION_IP: ${{ vars.BASTION_IP }} - # run: | - # cd ./deployment/clear_opensearch - # mkdir -p script - # mkdir -p ssh - # echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ssh/config - # printf "%s\n" "${{ secrets.SSH_PRIVATE_KEY }}" > ssh/id_rsa - # echo "" >> ssh/id_rsa - # echo -n ${{ vars.BASTION_IP }} > script/.env - # envsubst < clear_opensearch.sh.tpl > script/clear_opensearch.sh - # envsubst < run.sh.tpl > script/run.sh - # docker run --rm \ - # -v ./script:/script \ - # -v ./ssh:/root/.ssh \ - # kroniak/ssh-client bash /script/run.sh + - name: Build and push Apostrophe Docker image to ECR for ${{ vars.ENV_NAME }} + run: | + docker build --platform linux/amd64 -t $ECR_REPOSITORY:$IMAGE_TAG . + docker push $ECR_REPOSITORY:$IMAGE_TAG + env: + IMAGE_TAG: latest + ECR_REPOSITORY: 695912022152.dkr.ecr.us-east-1.amazonaws.com/sf-website-${{ vars.ENV_NAME }} - # start_logstash: - # needs: clear_opensearch - # runs-on: ubuntu-latest - # environment: ${{ inputs.deploy-env || (github.ref_name == 'main' && 'Development') || (startsWith(github.ref_name, 'releases/') && 'Pre-prod') }} - # if: ${{ inputs.deploy-plan-only == false }} - # steps: - # - name: Get Environment Name for ${{ vars.ENV_NAME }} - # id: get_env_name - # uses: Entepotenz/change-string-case-action-min-dependencies@v1 - # with: - # string: ${{ vars.ENV_NAME }} - # - name: Checkout repo - # uses: actions/checkout@v4 - # - name: Start Logstash for ${{ vars.ENV_NAME }} - # if: ${{ inputs.restore-db == true || inputs.clear-opensearch == true}} - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_DEFAULT_REGION: "eu-west-1" - # run: | - # aws \ - # ecs update-service --desired-count 1 --cluster=ecsOpenSupplyHub${{vars.ENV_NAME}}Cluster \ - # --service=OpenSupplyHub${{vars.ENV_NAME}}AppLogstash diff --git a/.github/workflows/destroy_aws_environment.yml b/.github/workflows/destroy_aws_environment.yml index 4760343d..670affc3 100644 --- a/.github/workflows/destroy_aws_environment.yml +++ b/.github/workflows/destroy_aws_environment.yml @@ -22,30 +22,175 @@ permissions: jobs: destroy-plan: runs-on: ubuntu-latest - environment: ${{ inputs.environment }} + environment: Development steps: - - name: Placeholder - Destroy plan step - run: | - echo "This step is empty - destroy plan functionality not implemented on main branch" - echo "Environment: ${{ inputs.environment }}" - echo "Plan only: ${{ inputs.plan_only }}" + - name: Get Environment Name for ${{ vars.ENV_NAME }} + id: get_env_name + uses: Entepotenz/change-string-case-action-min-dependencies@v1 + with: + string: ${{ vars.ENV_NAME }} + + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Checkout config repository + uses: actions/checkout@v4 + with: + repository: 'speedandfunction/website-ci-secret' + path: 'terraform-config' + token: ${{ secrets.PAT }} + + - name: Copy tfvars for ${{ vars.ENV_NAME }} + run: | + cat "terraform-config/${{ vars.ENV_NAME }}.tfvars" "deployment/environments/${{ vars.ENV_NAME }}.tfvars" > "deployment/terraform.tfvars" + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.12.0 + terraform_wrapper: false + + - name: Terraform Destroy Plan for ${{ vars.ENV_NAME }} + run: | + cd deployment + terraform init -backend-config="environments/backend-${{ vars.ENV_NAME }}.hcl" + terraform plan -destroy -out="${{ vars.ENV_NAME }}-destroy.tfplan" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: "us-east-1" + + - name: Show Destroy Plan Summary + run: | + cd deployment + echo "## 🚨 DESTROY PLAN SUMMARY FOR ${{ vars.ENV_NAME }} 🚨" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "The following resources will be **DESTROYED**:" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + terraform show -no-color "${{ vars.ENV_NAME }}-destroy.tfplan" | head -50 >> $GITHUB_STEP_SUMMARY + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: "us-east-1" + + - name: Copy destroy plan to S3 bucket for ${{ vars.ENV_NAME }} + if: ${{ inputs.plan_only == false }} + run: aws s3 cp "deployment/${{ vars.ENV_NAME }}-destroy.tfplan" "s3://${{ env.SETTINGS_BUCKET }}/destroy-plans/${{ vars.ENV_NAME }}-destroy.tfplan" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" + + - name: Copy lock file to S3 bucket for ${{ vars.ENV_NAME }} + if: ${{ !inputs.plan_only }} + run: aws s3 cp "deployment/.terraform.lock.hcl" "s3://${{ env.SETTINGS_BUCKET }}/destroy-plans/${{ vars.ENV_NAME }}.terraform.lock.hcl" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" manual-approval: needs: [destroy-plan] runs-on: ubuntu-latest + environment: Development if: ${{ !inputs.plan_only }} steps: - - name: Placeholder - Manual approval step - run: | - echo "This step is empty - manual approval functionality not implemented on main branch" - echo "Would wait for approval to destroy ${{ inputs.environment }}" + - name: Wait for approval to destroy ${{ vars.ENV_NAME }} + if: ${{ inputs.environment != 'Development' }} + uses: trstringer/manual-approval@v1 + with: + secret: ${{ github.TOKEN }} + approvers: killev + minimum-approvals: 1 + issue-title: "🚨 DESTROY ${{ vars.ENV_NAME }} AWS Environment 🚨" + issue-body: | + ## ⚠️ CRITICAL: Infrastructure Destruction Request ⚠️ + + **Environment**: ${{ vars.ENV_NAME }} + **Requested by**: @${{ github.actor }} + **Workflow run**: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + ### 🚨 WARNING: This action will PERMANENTLY DESTROY the following infrastructure: + - VPC and all networking components + - ECS cluster and services + - DocumentDB cluster and all data + - ElastiCache Redis cluster and all data + - Application Load Balancer + - CloudFront distribution + - S3 buckets and all stored files + - ECR repository and all images + - IAM roles and policies + - CloudWatch logs and metrics + - Parameter Store secrets + + ### 📋 Before approving, please verify: + - [ ] This is the correct environment to destroy + - [ ] All important data has been backed up + - [ ] Team has been notified of the destruction + - [ ] No critical services depend on this infrastructure + + **To approve**: Comment "approve" or "approved" + **To deny**: Comment "deny" or "denied" + + **⚠️ THIS ACTION CANNOT BE UNDONE ⚠️** destroy-apply: needs: [manual-approval] runs-on: ubuntu-latest - if: ${{ !inputs.plan_only }} + environment: Development + if: ${{ inputs.plan_only == false && (inputs.environment == 'Development' || needs.manual-approval.result == 'success') }} steps: - - name: Placeholder - Destroy apply step - run: | - echo "This step is empty - destroy apply functionality not implemented on main branch" - echo "Would destroy infrastructure for ${{ inputs.environment }}" + - name: Get Environment Name for ${{ vars.ENV_NAME }} + id: get_env_name + uses: Entepotenz/change-string-case-action-min-dependencies@v1 + with: + string: ${{ vars.ENV_NAME }} + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.12.0 + terraform_wrapper: false + + - name: Checkout + uses: actions/checkout@v4 + + - name: Get destroy plan from S3 bucket for ${{ vars.ENV_NAME }} + run: aws s3 cp "s3://${{ env.SETTINGS_BUCKET }}/destroy-plans/${{ vars.ENV_NAME }}-destroy.tfplan" "deployment/${{ vars.ENV_NAME }}-destroy.tfplan" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" + + - name: Get lock file from S3 bucket for ${{ vars.ENV_NAME }} + run: aws s3 cp "s3://${{ env.SETTINGS_BUCKET }}/destroy-plans/${{ vars.ENV_NAME }}.terraform.lock.hcl" "deployment/.terraform.lock.hcl" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" + + - name: 🚨 DESTROY Infrastructure for ${{ vars.ENV_NAME }} 🚨 + run: | + cd deployment + terraform init -backend-config="environments/backend-${{ vars.ENV_NAME }}.hcl" + echo "🚨 DESTROYING INFRASTRUCTURE FOR ${{ vars.ENV_NAME }} - THIS CANNOT BE UNDONE! 🚨" + terraform apply "${{ vars.ENV_NAME }}-destroy.tfplan" + echo "✅ Infrastructure for ${{ vars.ENV_NAME }} has been destroyed" + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: "us-east-1" + + - name: Clean up destroy plan files + run: | + aws s3 rm "s3://${{ env.SETTINGS_BUCKET }}/destroy-plans/${{ vars.ENV_NAME }}-destroy.tfplan" || true + aws s3 rm "s3://${{ env.SETTINGS_BUCKET }}/destroy-plans/${{ vars.ENV_NAME }}.terraform.lock.hcl" || true + env: + AWS_ACCESS_KEY_ID: ${{ secrets.TF_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TF_AWS_SECRET_ACCESS_KEY }} + SETTINGS_BUCKET: sf-website-infrastructure + AWS_DEFAULT_REGION: "us-east-1" diff --git a/.gitignore b/.gitignore index d8aacc0c..600e7477 100644 --- a/.gitignore +++ b/.gitignore @@ -144,3 +144,10 @@ website/public/apos-frontend dump.archive .prettierrc aposUsersSafe.json +terraform.tfvars +deployment/.terraform +*.tfplan +plan.out.txt +.cursor/tmp +.terraform.lock.hcl +*.pem diff --git a/Dockerfile b/Dockerfile index 3fb3b075..8ce66e4d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,6 +5,9 @@ WORKDIR /app # Install dependencies needed for health checks with pinned version RUN apk add --no-cache wget=1.25.0-r1 + + RUN wget https://truststore.pki.rds.amazonaws.com/global/global-bundle.pem + RUN chmod 600 global-bundle.pem # Create a non-root user and group RUN addgroup -S appgroup && adduser -S appuser -G appgroup diff --git a/deploy-ecs.sh b/deploy-ecs.sh new file mode 100755 index 00000000..9c70becc --- /dev/null +++ b/deploy-ecs.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# ECS Deployment Script +# This script rebuilds the Docker image, pushes it to ECR, and restarts the ECS service + +set -e # Exit on any error + +# Configuration +AWS_PROFILE="tf-sf-website" +AWS_REGION="us-east-1" +ECR_REPOSITORY="695912022152.dkr.ecr.us-east-1.amazonaws.com/sf-website-development" +ECS_CLUSTER="sf-website-dev-cluster" +ECS_SERVICE="sf-website-dev-service" +IMAGE_TAG="latest" + +echo "🚀 Starting ECS deployment process..." + +# Step 1: Login to ECR +echo "📝 Logging into ECR..." +AWS_PROFILE=$AWS_PROFILE aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $ECR_REPOSITORY + +# Step 2: Build the Docker image +echo "🔨 Building Docker image for linux/amd64 platform..." +docker build --platform linux/amd64 -t $ECR_REPOSITORY:$IMAGE_TAG . + +# Step 3: Push the image to ECR +echo "📤 Pushing image to ECR..." +docker push $ECR_REPOSITORY:$IMAGE_TAG + +# Step 4: Force ECS service to restart with new image +echo "🔄 Restarting ECS service..." +AWS_PROFILE=$AWS_PROFILE aws ecs update-service \ + --cluster $ECS_CLUSTER \ + --service $ECS_SERVICE \ + --force-new-deployment \ + --region $AWS_REGION + +# Step 5: Wait for deployment to complete +echo "⏳ Waiting for deployment to complete..." +AWS_PROFILE=$AWS_PROFILE aws ecs wait services-stable \ + --cluster $ECS_CLUSTER \ + --services $ECS_SERVICE \ + --region $AWS_REGION + +echo "✅ Deployment completed successfully!" +echo "🌐 Your application should be available at: https://sf-website-dev.sandbox-prettyclear.com" \ No newline at end of file diff --git a/deployment/README.md b/deployment/README.md new file mode 100644 index 00000000..4e112849 --- /dev/null +++ b/deployment/README.md @@ -0,0 +1,350 @@ +# SF Website Infrastructure - Terraform + +This Terraform configuration implements the complete infrastructure for the SF Website project as documented in `docs/Infrastructure.md`. + +## 🏗️ **Architecture Overview** + +- **VPC**: Multi-AZ setup with public/private subnets +- **ECS Fargate**: Containerized ApostropheCMS application +- **DocumentDB**: MongoDB-compatible database +- **ElastiCache Redis**: Session storage and caching +- **S3**: Media attachments and logs storage +- **CloudFront**: CDN for media delivery +- **ALB**: Load balancer with SSL termination +- **CloudWatch**: Monitoring and alerting + +## 📋 **Prerequisites** + +1. **AWS CLI** configured with appropriate permissions +2. **Terraform** >= 1.0 installed +3. **S3 buckets** for Terraform state storage (see Backend Setup) +4. **SSL Certificate** in AWS Certificate Manager +5. **Route 53 hosted zone** (optional) + +## 🚀 **Quick Start** + +### 1. **Clone and Setup** +```bash +cd terraform +cp terraform.tfvars.example terraform.tfvars +``` + +### 2. **Configure Variables** +Edit `terraform.tfvars` with your specific values: +- Domain names +- Certificate ARN +- Database credentials +- GitHub repository +- Slack webhook URL + +### 3. **Setup Backend Storage** +Create S3 buckets and DynamoDB table for state management: +```bash +# Create S3 buckets for each environment +aws s3 mb s3://sf-website-terraform-state-development +aws s3 mb s3://sf-website-terraform-state-staging +aws s3 mb s3://sf-website-terraform-state-production + +# Enable versioning +aws s3api put-bucket-versioning \ + --bucket sf-website-terraform-state-development \ + --versioning-configuration Status=Enabled + +# Create DynamoDB table for state locking +aws dynamodb create-table \ + --table-name sf-website-terraform-locks \ + --attribute-definitions AttributeName=LockID,AttributeType=S \ + --key-schema AttributeName=LockID,KeyType=HASH \ + --billing-mode PAY_PER_REQUEST +``` + +### 4. **Initialize and Deploy** +```bash +# Initialize with backend +terraform init -backend-config=backend-development.hcl + +# Plan deployment +terraform plan + +# Apply changes +terraform apply +``` + +## 🌍 **Environment Management** + +Deploy different environments using different backend configurations: + +```bash +# Development +terraform init -backend-config=backend-development.hcl +terraform apply -var-file=development.tfvars + +# Staging +terraform init -backend-config=backend-staging.hcl +terraform apply -var-file=staging.tfvars + +# Production +terraform init -backend-config=backend-production.hcl +terraform apply -var-file=production.tfvars +``` + +## 📁 **File Structure** + +``` +terraform/ +├── main.tf # Main configuration +├── variables.tf # Variable definitions +├── outputs.tf # Output values +├── terraform.tfvars.example # Example variables +├── backend-{env}.hcl # Backend configurations +├── modules/ +│ ├── vpc/ # VPC and networking +│ ├── security_groups/ # Security groups +│ ├── s3/ # S3 buckets +│ ├── iam/ # IAM roles and policies +│ ├── ecr/ # Container registry +│ ├── parameter_store/ # Secrets management +│ ├── documentdb/ # Database cluster +│ ├── redis/ # Cache cluster +│ ├── alb/ # Load balancer +│ ├── cloudfront/ # CDN +│ ├── ecs/ # Container service +│ └── cloudwatch/ # Monitoring +└── README.md # This file +``` + +## 🔐 **Security Best Practices** + +### **Secrets Management** +- All sensitive values are stored in AWS Parameter Store +- Database credentials are provided via tfvars (never committed) +- Redis auth token is auto-generated + +### **Network Security** +- Private subnets for all data services +- Security groups with least-privilege access +- VPC Flow Logs enabled + +### **IAM Security** +- Separate roles for ECS tasks and execution +- GitHub Actions OIDC integration (no long-term keys) +- Scoped permissions for each service + +## 🏷️ **Resource Naming** + +All resources follow the naming convention: +``` +sf-website-{resource-type}-{environment} +``` + +Examples: +- `sf-website-vpc-development` +- `sf-website-ecs-cluster-production` +- `sf-website-documentdb-staging` + +## 📊 **Monitoring & Alerts** + +CloudWatch alarms are configured for: +- ECS CPU/Memory utilization +- DocumentDB connections and performance +- Redis memory usage and hit ratio +- ALB response times and error rates + +Alerts are sent to Slack via webhook URL. + +## 🔄 **CI/CD Integration** + +The configuration includes IAM roles for GitHub Actions: +- **Terraform Role**: Deploy infrastructure changes +- **ECR Push Role**: Build and push container images + +Example GitHub Actions workflow: +```yaml +name: Deploy Infrastructure +on: + push: + branches: [main] + paths: ['terraform/**'] + +jobs: + terraform: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + + steps: + - uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ROLE_ARN }} + aws-region: us-east-1 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init -backend-config=backend-production.hcl + + - name: Terraform Apply + run: terraform apply -auto-approve +``` + +## 📋 **Common Commands** + +```bash +# Format code +terraform fmt -recursive + +# Validate configuration +terraform validate + +# Plan with specific var file +terraform plan -var-file=production.tfvars + +# Show current state +terraform show + +# List resources +terraform state list + +# Import existing resource +terraform import aws_s3_bucket.example bucket-name + +# Destroy environment (BE CAREFUL!) +terraform destroy +``` + +## 🔧 **Customization** + +### **Environment-Specific Variables** +Create separate tfvars files for each environment: +- `development.tfvars` +- `staging.tfvars` +- `production.tfvars` + +### **Scaling Configuration** +Adjust container resources and auto-scaling: +```hcl +# For production +container_cpu = 2048 # 2 vCPU +container_memory = 4096 # 4 GB +ecs_desired_count = 2 +ecs_max_capacity = 10 +``` + +### **Database Sizing** +Configure instance types per environment: +```hcl +# Development +documentdb_instance_class = "db.t3.medium" +redis_node_type = "cache.t3.micro" + +# Production +documentdb_instance_class = "db.r5.large" +redis_node_type = "cache.r6g.large" +``` + +## 🆘 **Troubleshooting** + +### **Common Issues** + +1. **Backend bucket doesn't exist** + ```bash + aws s3 mb s3://sf-website-terraform-state-development + ``` + +2. **Certificate ARN invalid** + - Verify certificate exists in correct region + - Ensure certificate covers required domains + +3. **State lock conflicts** + ```bash + terraform force-unlock LOCK_ID + ``` + +4. **Module source errors** + ```bash + terraform get -update + ``` + +### **Getting Help** +- Check AWS CloudTrail for API errors +- Review CloudWatch logs for application issues +- Validate Terraform syntax: `terraform validate` + +## 📈 **Cost Optimization** + +- Use `t3.micro` instances for development +- Schedule ECS tasks to scale down during off-hours +- Enable S3 lifecycle policies for log retention +- Consider Reserved Instances for production + +## 🔄 **Updates and Maintenance** + +1. **Provider Updates**: Regularly update AWS provider version +2. **Module Updates**: Test module changes in development first +3. **State Backup**: S3 versioning provides automatic backups +4. **Security Updates**: Monitor AWS security bulletins + +--- + +**Next Steps**: After deploying infrastructure, configure your CI/CD pipeline to build and deploy the ApostropheCMS application to the created ECS cluster. + +# Terraform Infrastructure Setup + +This directory contains the Terraform configuration and initialization scripts for the SF Website infrastructure. + +## Quick Start + +### Initialize AWS Resources + +The `init-aws-for-terraform.sh` script manages the Terraform backend resources (S3 bucket and DynamoDB table). + +```bash +# Create resources +./init-aws-for-terraform.sh create --profile tf-sf-website + +# Check status +./init-aws-for-terraform.sh status --profile tf-sf-website + +# Delete resources (interactive) +./init-aws-for-terraform.sh delete --profile tf-sf-website + +# Delete resources (non-interactive for automation) +TERRAFORM_NON_INTERACTIVE=true ./init-aws-for-terraform.sh delete --profile tf-sf-website +``` + +### Non-Interactive Mode + +The script supports non-interactive mode for automation and CI/CD pipelines: + +- Set `TERRAFORM_NON_INTERACTIVE=true` environment variable +- The script will automatically skip confirmation prompts +- Useful for automated deployments and scripts + +### Error Handling + +The script includes robust error handling: + +- Proper AWS CLI profile support +- Graceful handling of existing resources +- Suppression of spurious AWS CLI configuration errors +- Clear status reporting and logging + +## Script Features + +- ✅ **Non-interactive support** - No user prompts when `TERRAFORM_NON_INTERACTIVE=true` +- ✅ **AWS Profile support** - Works with named AWS profiles +- ✅ **Error suppression** - Filters out spurious AWS CLI configuration errors +- ✅ **Resource validation** - Checks if resources exist before creating/deleting +- ✅ **Comprehensive logging** - Clear status messages and error reporting +- ✅ **Safe deletion** - Confirms before deleting resources (unless non-interactive) + +## Resources Managed + +- **S3 Bucket**: `sf-website-infrastructure` (with versioning and encryption) +- **DynamoDB Table**: `sf-website-terraform-locks` (for state locking) +- **Region**: `us-east-1` \ No newline at end of file diff --git a/deployment/environments/backend-development.hcl b/deployment/environments/backend-development.hcl new file mode 100644 index 00000000..7fdc94a0 --- /dev/null +++ b/deployment/environments/backend-development.hcl @@ -0,0 +1,5 @@ +# Backend configuration for Development environment +bucket = "sf-website-infrastructure" +key = "terraform/terraform-development.tfstate" +region = "us-east-1" +encrypt = true \ No newline at end of file diff --git a/deployment/environments/backend-production.hcl b/deployment/environments/backend-production.hcl new file mode 100644 index 00000000..64bf2d47 --- /dev/null +++ b/deployment/environments/backend-production.hcl @@ -0,0 +1,5 @@ +# Backend configuration for Production environment +bucket = "sf-website-infrastructure" +key = "terraform/terraform-production.tfstate" +region = "us-east-1" +encrypt = true \ No newline at end of file diff --git a/deployment/environments/backend-staging.hcl b/deployment/environments/backend-staging.hcl new file mode 100644 index 00000000..70025e3c --- /dev/null +++ b/deployment/environments/backend-staging.hcl @@ -0,0 +1,5 @@ +# Backend configuration for Staging environment +bucket = "sf-website-infrastructure" +key = "terraform/terraform-staging.tfstate" +region = "us-east-1" +encrypt = true \ No newline at end of file diff --git a/deployment/environments/development.tfvars b/deployment/environments/development.tfvars new file mode 100644 index 00000000..b5476dc0 --- /dev/null +++ b/deployment/environments/development.tfvars @@ -0,0 +1,44 @@ +# Example Terraform Variables for SF Website Infrastructure +# Copy this file to terraform.tfvars and customize the values + +# General Configuration +aws_region = "us-east-1" +environment = "development" # Can be any name: development, staging, production, qa, demo, sandbox, etc. + +apos_release_id = "v1.0.0(0)" +# Domain Configuration +domain_name = "sf-website-development.sandbox-prettyclear.com" +media_domain_name = "sf-website-media-development.sandbox-prettyclear.com" +certificate_arn = "arn:aws:acm:us-east-1:695912022152:certificate/3dd0c51e-6749-485b-95f8-c92a4950ac93" + +# GitHub Actions Configuration +github_repository = "speedandfunction/website" # Format: owner/repo + +# Monitoring Configuration (SENSITIVE) +slack_webhook_url = "" + +# Optional: Route 53 Configuration +route53_zone_id = "Z031220720LW1I1AB9GUY" # Leave empty to skip DNS record creation + +# Container Configuration +container_image_tag = "latest" +container_cpu = 1024 # 1024 = 1 vCPU +container_memory = 2048 # 2048 MB = 2 GB +container_port = 3000 # Application port +log_retention_days = 7 # CloudWatch log retention + +# Scaling Configuration +ecs_desired_count = 1 +ecs_max_capacity = 3 + +# Environment-specific Instance Types +documentdb_instance_class = "db.t3.micro" +redis_node_type = "cache.t3.micro" # cache.t3.small for production + +# Tags +default_tags = { + Project = "Website" + Environment = "development" + CostCenter = "Website" + Owner = "peter.ovchyn" +} \ No newline at end of file diff --git a/deployment/main.tf b/deployment/main.tf new file mode 100644 index 00000000..460fd810 --- /dev/null +++ b/deployment/main.tf @@ -0,0 +1,303 @@ +# Terraform configuration for SF Website Infrastructure +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + + backend "s3" { + # Backend configuration will be provided via backend.hcl files + # Example: terraform init -backend-config=backend-development.hcl + } +} + +# AWS Provider Configuration +provider "aws" { + region = var.aws_region + + default_tags { + tags = var.default_tags + } +} + +# Data sources +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + +# Hardcoded availability zones for us-east-1 to avoid IAM permission issues +# Comment out if you have ec2:DescribeAvailabilityZones permission +# data "aws_availability_zones" "available" { +# state = "available" +# } + +# Local values for common naming +locals { + name_prefix = "sf-website" + environment = var.environment + + # Hardcoded availability zones for us-east-1 (first 2) + # Change this if using a different region + azs = ["us-east-1a", "us-east-1b"] + + # If you have ec2:DescribeAvailabilityZones permission, uncomment this line and comment the one above: + # azs = slice(data.aws_availability_zones.available.names, 0, 2) + + # Common tags for all resources + common_tags = merge(var.default_tags, { + Environment = var.environment + Project = "Website" + CostCenter = "Website" + Owner = "peter.ovchyn" + }) +} + +# VPC Module +module "vpc" { + source = "./modules/vpc" + + name_prefix = local.name_prefix + environment = local.environment + + vpc_cidr = var.vpc_cidr + azs = local.azs + + tags = local.common_tags +} + +# Bastion Host Module +module "bastion" { + source = "./modules/bastion" + + name_prefix = local.name_prefix + environment = local.environment + + vpc_id = module.vpc.vpc_id + subnet_id = module.vpc.public_subnet_ids[0] + instance_type = var.bastion_instance_type + key_pair_name = var.bastion_key_pair_name + allowed_cidr_blocks = var.bastion_allowed_cidr_blocks + + tags = local.common_tags +} + +# Security Groups Module +module "security_groups" { + source = "./modules/security_groups" + + name_prefix = local.name_prefix + environment = local.environment + + vpc_id = module.vpc.vpc_id + + # Container configuration + container_port = var.container_port + + tags = local.common_tags +} + +# S3 Module +module "s3" { + source = "./modules/s3" + + name_prefix = local.name_prefix + environment = local.environment + + tags = local.common_tags +} + +# IAM Module +module "iam" { + source = "./modules/iam" + + name_prefix = local.name_prefix + environment = local.environment + + s3_attachments_bucket_arn = module.s3.attachments_bucket_arn + ecr_repository_arn = module.ecr.repository_arn + + # GitHub Actions configuration + github_repo = var.github_repository + + tags = local.common_tags +} + +# ECR Module +module "ecr" { + source = "./modules/ecr" + + name_prefix = local.name_prefix + environment = local.environment + + tags = local.common_tags +} + +# Parameter Store Module +module "parameter_store" { + source = "./modules/parameter_store" + + name_prefix = local.name_prefix + environment = local.environment + + # Secrets from tfvars + documentdb_master_username = var.documentdb_master_username + documentdb_master_password = var.documentdb_master_password + session_secret = var.session_secret + + # Auto-generated Redis auth token + redis_auth_token = var.redis_auth_token + + # Optional Google Cloud Storage key + gcs_service_account_key = var.gcs_service_account_key + + tags = local.common_tags +} + +# DocumentDB Module +module "documentdb" { + source = "./modules/documentdb" + + name_prefix = local.name_prefix + environment = local.environment + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnet_ids + security_group_ids = [module.security_groups.documentdb_sg_id] + + master_username = var.documentdb_master_username + master_password = var.documentdb_master_password + + tags = local.common_tags +} + +# ElastiCache Redis Module +module "redis" { + source = "./modules/redis" + + name_prefix = local.name_prefix + environment = local.environment + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnet_ids + security_group_ids = [module.security_groups.redis_sg_id] + + auth_token = var.redis_auth_token + + tags = local.common_tags +} + +# ALB Module +module "alb" { + source = "./modules/alb" + + name_prefix = local.name_prefix + environment = local.environment + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.public_subnet_ids + security_group_ids = [module.security_groups.alb_sg_id] + + domain_name = var.domain_name + certificate_arn = var.certificate_arn + + # Container configuration + container_port = var.container_port + health_check_path = var.health_check_path + + tags = local.common_tags +} + +# CloudFront Module +module "cloudfront" { + source = "./modules/cloudfront" + + name_prefix = local.name_prefix + environment = local.environment + + s3_bucket_domain_name = module.s3.attachments_bucket_domain_name + s3_bucket_id = module.s3.attachments_bucket_id + + media_domain_name = var.media_domain_name + certificate_arn = var.certificate_arn + + tags = local.common_tags +} + +# ECS Module +module "ecs" { + source = "./modules/ecs" + + name_prefix = local.name_prefix + environment = local.environment + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnet_ids + security_group_ids = [module.security_groups.ecs_sg_id] + + # Service configuration + ecr_repository_url = module.ecr.repository_url + task_role_arn = module.iam.ecs_task_role_arn + execution_role_arn = module.iam.ecs_execution_role_arn + + # Target group for ALB + target_group_arn = module.alb.target_group_arn + + # Container configuration + container_cpu = var.container_cpu + container_memory = var.container_memory + container_port = var.container_port + log_retention_days = var.log_retention_days + ecs_desired_count = var.ecs_desired_count + ecs_max_capacity = var.ecs_max_capacity + + # Environment variables + environment_variables = { + NODE_ENV = "production" + DOCUMENTDB_HOST = module.documentdb.cluster_endpoint + DOCUMENTDB_PORT = "27017" + DOCUMENTDB_DATABASE = "apostrophe" + REDIS_URI = "redis://${module.redis.cluster_endpoint}:6379" + BASE_URL = "https://${var.domain_name}" + APOS_S3_BUCKET = module.s3.attachments_bucket_id + APOS_S3_REGION = var.aws_region + APOS_CDN_URL = "https://${var.media_domain_name}" + APOS_CDN_ENABLED = "true" + APOS_RELEASE_ID = var.apos_release_id + } + + # Secrets from Parameter Store - filter out empty values + secrets = { + SESSION_SECRET = module.parameter_store.session_secret_arn + SERVICE_ACCOUNT_PRIVATE_KEY = module.parameter_store.gcs_service_account_key_arn + DOCUMENTDB_USERNAME = module.parameter_store.documentdb_username_arn + DOCUMENTDB_PASSWORD = module.parameter_store.documentdb_password_arn + } + + tags = local.common_tags +} + +# CloudWatch Module +module "cloudwatch" { + source = "./modules/cloudwatch" + + name_prefix = local.name_prefix + environment = local.environment + + # Resources to monitor + ecs_cluster_name = module.ecs.cluster_name + ecs_service_name = module.ecs.service_name + alb_arn_suffix = module.alb.arn_suffix + documentdb_cluster_id = module.documentdb.cluster_identifier + redis_cluster_id = module.redis.cluster_id + + # Configuration + log_retention_days = var.log_retention_days + + # Slack webhook for notifications + slack_webhook_url = var.slack_webhook_url + + tags = local.common_tags +} \ No newline at end of file diff --git a/deployment/modules/alb/main.tf b/deployment/modules/alb/main.tf new file mode 100644 index 00000000..821b2f0e --- /dev/null +++ b/deployment/modules/alb/main.tf @@ -0,0 +1,84 @@ +# Application Load Balancer +resource "aws_lb" "main" { + name = "${var.name_prefix}-${var.environment}-alb" + internal = false + load_balancer_type = "application" + security_groups = var.security_group_ids + subnets = var.subnet_ids + + enable_deletion_protection = false + + tags = var.tags +} + +# Target Group +resource "aws_lb_target_group" "main" { + name = "${var.name_prefix}-${var.environment}-tg" + port = var.container_port + protocol = "HTTP" + vpc_id = var.vpc_id + target_type = "ip" + + health_check { + enabled = true + healthy_threshold = 2 + interval = 30 + matcher = "200" + path = var.health_check_path + port = "traffic-port" + protocol = "HTTP" + timeout = 5 + unhealthy_threshold = 2 + } + + tags = var.tags +} + +# HTTPS Listener +resource "aws_lb_listener" "https" { + load_balancer_arn = aws_lb.main.arn + port = "443" + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS-1-2-2017-01" + certificate_arn = var.certificate_arn + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.main.arn + } + + # Ensure this listener is created after target group and destroyed before target group + depends_on = [aws_lb_target_group.main] + + lifecycle { + create_before_destroy = false + } + + tags = var.tags +} + +# HTTP Listener (redirect to HTTPS) +resource "aws_lb_listener" "http" { + load_balancer_arn = aws_lb.main.arn + port = "80" + protocol = "HTTP" + + default_action { + type = "redirect" + + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } + + # Ensure this listener is destroyed before target group (even though it doesn't reference it) + depends_on = [aws_lb_target_group.main] + + lifecycle { + create_before_destroy = false + } + + tags = var.tags +} \ No newline at end of file diff --git a/deployment/modules/alb/outputs.tf b/deployment/modules/alb/outputs.tf new file mode 100644 index 00000000..fffb0216 --- /dev/null +++ b/deployment/modules/alb/outputs.tf @@ -0,0 +1,19 @@ +output "target_group_arn" { + description = "ARN of the target group" + value = aws_lb_target_group.main.arn +} + +output "alb_dns_name" { + description = "DNS name of the ALB" + value = aws_lb.main.dns_name +} + +output "alb_zone_id" { + description = "Zone ID of the ALB" + value = aws_lb.main.zone_id +} + +output "arn_suffix" { + description = "ARN suffix for CloudWatch monitoring" + value = aws_lb.main.arn_suffix +} \ No newline at end of file diff --git a/deployment/modules/alb/variables.tf b/deployment/modules/alb/variables.tf new file mode 100644 index 00000000..bec2026b --- /dev/null +++ b/deployment/modules/alb/variables.tf @@ -0,0 +1,52 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "vpc_id" { + description = "VPC ID where the ALB will be created" + type = string +} + +variable "subnet_ids" { + description = "List of subnet IDs for the ALB" + type = list(string) +} + +variable "security_group_ids" { + description = "List of security group IDs for the ALB" + type = list(string) +} + +variable "domain_name" { + description = "Domain name for the ALB" + type = string +} + +variable "certificate_arn" { + description = "ACM certificate ARN for HTTPS" + type = string +} + +variable "container_port" { + description = "Port that the container exposes" + type = number + default = 3000 +} + +variable "health_check_path" { + description = "Path for ALB health checks" + type = string + default = "/health" +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/bastion/main.tf b/deployment/modules/bastion/main.tf new file mode 100644 index 00000000..cf15f3b9 --- /dev/null +++ b/deployment/modules/bastion/main.tf @@ -0,0 +1,97 @@ +# Bastion Host Module + +# Data source for latest Amazon Linux 2023 AMI +data "aws_ami" "amazon_linux" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["al2023-ami-*-x86_64"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } +} + +# Security Group for Bastion Host +resource "aws_security_group" "bastion" { + name_prefix = "${var.name_prefix}-bastion-${var.environment}" + description = "Security group for bastion host" + vpc_id = var.vpc_id + + # SSH access from allowed CIDR blocks + ingress { + description = "SSH access" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = var.allowed_cidr_blocks + } + + # All outbound traffic + egress { + description = "All outbound traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge(var.tags, { + Name = "${var.name_prefix}-bastion-sg-${var.environment}" + }) +} + +# Bastion Host EC2 Instance +resource "aws_instance" "bastion" { + ami = data.aws_ami.amazon_linux.id + instance_type = var.instance_type + key_name = var.key_pair_name + subnet_id = var.subnet_id + vpc_security_group_ids = [aws_security_group.bastion.id] + associate_public_ip_address = true + + # User data script for basic setup + user_data = base64encode(<<-EOF + #!/bin/bash + yum update -y + yum install -y aws-cli + + # Start and enable SSM agent for Session Manager access + systemctl start amazon-ssm-agent + systemctl enable amazon-ssm-agent + + # Basic security hardening + sed -i 's/#PermitRootLogin yes/PermitRootLogin no/' /etc/ssh/sshd_config + systemctl restart sshd + + # Install additional useful tools + yum install -y htop vim wget curl git + + # Configure timezone + timedatectl set-timezone UTC + + # Create a log file to verify user data execution + echo "Bastion host user data script completed at $(date)" > /var/log/user-data.log + EOF + ) + + tags = merge(var.tags, { + Name = "${var.name_prefix}-bastion-${var.environment}" + }) +} + +# Elastic IP for Bastion Host +resource "aws_eip" "bastion" { + instance = aws_instance.bastion.id + domain = "vpc" + + tags = merge(var.tags, { + Name = "${var.name_prefix}-bastion-eip-${var.environment}" + }) + + depends_on = [aws_instance.bastion] +} \ No newline at end of file diff --git a/deployment/modules/bastion/outputs.tf b/deployment/modules/bastion/outputs.tf new file mode 100644 index 00000000..72e8015a --- /dev/null +++ b/deployment/modules/bastion/outputs.tf @@ -0,0 +1,26 @@ +# Outputs for Bastion Host Module + +output "instance_id" { + description = "ID of the bastion host instance" + value = aws_instance.bastion.id +} + +output "public_ip" { + description = "Public IP address of the bastion host" + value = aws_eip.bastion.public_ip +} + +output "private_ip" { + description = "Private IP address of the bastion host" + value = aws_instance.bastion.private_ip +} + +output "security_group_id" { + description = "ID of the bastion host security group" + value = aws_security_group.bastion.id +} + +output "ssh_command" { + description = "SSH command to connect to bastion host" + value = "ssh -i /path/to/${var.key_pair_name}.pem ec2-user@${aws_eip.bastion.public_ip}" +} \ No newline at end of file diff --git a/deployment/modules/bastion/variables.tf b/deployment/modules/bastion/variables.tf new file mode 100644 index 00000000..362f99c3 --- /dev/null +++ b/deployment/modules/bastion/variables.tf @@ -0,0 +1,44 @@ +# Variables for Bastion Host Module + +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "vpc_id" { + description = "VPC ID where bastion host will be deployed" + type = string +} + +variable "subnet_id" { + description = "Public subnet ID where bastion host will be deployed" + type = string +} + +variable "instance_type" { + description = "EC2 instance type for bastion host" + type = string + default = "t3.micro" +} + +variable "key_pair_name" { + description = "Name of the EC2 key pair for SSH access (optional - leave null for Session Manager only)" + type = string + default = null +} + +variable "allowed_cidr_blocks" { + description = "List of CIDR blocks allowed to SSH to bastion host" + type = list(string) +} + +variable "tags" { + description = "Tags to apply to all resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/cloudfront/main.tf b/deployment/modules/cloudfront/main.tf new file mode 100644 index 00000000..3434789a --- /dev/null +++ b/deployment/modules/cloudfront/main.tf @@ -0,0 +1,51 @@ +# CloudFront Distribution +resource "aws_cloudfront_distribution" "main" { + origin { + domain_name = var.s3_bucket_domain_name + origin_id = "S3-${var.s3_bucket_id}" + + s3_origin_config { + origin_access_identity = aws_cloudfront_origin_access_identity.main.cloudfront_access_identity_path + } + } + + enabled = true + comment = "CloudFront distribution for ${var.name_prefix} ${var.environment}" + + aliases = [var.media_domain_name] + + default_cache_behavior { + allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "S3-${var.s3_bucket_id}" + compress = true + viewer_protocol_policy = "redirect-to-https" + + forwarded_values { + query_string = false + cookies { + forward = "none" + } + } + } + + price_class = "PriceClass_100" + + restrictions { + geo_restriction { + restriction_type = "none" + } + } + + viewer_certificate { + acm_certificate_arn = var.certificate_arn + ssl_support_method = "sni-only" + } + + tags = var.tags +} + +# Origin Access Identity +resource "aws_cloudfront_origin_access_identity" "main" { + comment = "OAI for ${var.name_prefix} ${var.environment}" +} \ No newline at end of file diff --git a/deployment/modules/cloudfront/outputs.tf b/deployment/modules/cloudfront/outputs.tf new file mode 100644 index 00000000..dfea10df --- /dev/null +++ b/deployment/modules/cloudfront/outputs.tf @@ -0,0 +1,9 @@ +output "distribution_id" { + description = "CloudFront distribution ID" + value = aws_cloudfront_distribution.main.id +} + +output "distribution_domain_name" { + description = "CloudFront distribution domain name" + value = aws_cloudfront_distribution.main.domain_name +} \ No newline at end of file diff --git a/deployment/modules/cloudfront/variables.tf b/deployment/modules/cloudfront/variables.tf new file mode 100644 index 00000000..2201a58b --- /dev/null +++ b/deployment/modules/cloudfront/variables.tf @@ -0,0 +1,35 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "s3_bucket_domain_name" { + description = "S3 bucket domain name" + type = string +} + +variable "s3_bucket_id" { + description = "S3 bucket ID" + type = string +} + +variable "media_domain_name" { + description = "Media domain name" + type = string +} + +variable "certificate_arn" { + description = "ACM certificate ARN" + type = string +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/cloudwatch/main.tf b/deployment/modules/cloudwatch/main.tf new file mode 100644 index 00000000..46160da7 --- /dev/null +++ b/deployment/modules/cloudwatch/main.tf @@ -0,0 +1,36 @@ +# CloudWatch Log Group +resource "aws_cloudwatch_log_group" "app_logs" { + name = "/aws/ecs/${var.name_prefix}-${var.environment}" + retention_in_days = var.log_retention_days + + tags = var.tags +} + +# CloudWatch Dashboard +resource "aws_cloudwatch_dashboard" "main" { + dashboard_name = "${var.name_prefix}-${var.environment}-dashboard" + + dashboard_body = jsonencode({ + widgets = [ + { + type = "metric" + x = 0 + y = 0 + width = 12 + height = 6 + + properties = { + metrics = [ + ["AWS/ECS", "CPUUtilization", "ServiceName", var.ecs_service_name, "ClusterName", var.ecs_cluster_name], + [".", "MemoryUtilization", ".", ".", ".", "."] + ] + view = "timeSeries" + stacked = false + region = "us-east-1" + title = "ECS Service Metrics" + period = 300 + } + } + ] + }) +} \ No newline at end of file diff --git a/deployment/modules/cloudwatch/outputs.tf b/deployment/modules/cloudwatch/outputs.tf new file mode 100644 index 00000000..8acb3019 --- /dev/null +++ b/deployment/modules/cloudwatch/outputs.tf @@ -0,0 +1,9 @@ +output "log_group_name" { + description = "CloudWatch log group name" + value = aws_cloudwatch_log_group.app_logs.name +} + +output "dashboard_name" { + description = "CloudWatch dashboard name" + value = aws_cloudwatch_dashboard.main.dashboard_name +} \ No newline at end of file diff --git a/deployment/modules/cloudwatch/variables.tf b/deployment/modules/cloudwatch/variables.tf new file mode 100644 index 00000000..795f4284 --- /dev/null +++ b/deployment/modules/cloudwatch/variables.tf @@ -0,0 +1,52 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "ecs_cluster_name" { + description = "ECS cluster name" + type = string +} + +variable "ecs_service_name" { + description = "ECS service name" + type = string +} + +variable "alb_arn_suffix" { + description = "ALB ARN suffix" + type = string +} + +variable "documentdb_cluster_id" { + description = "DocumentDB cluster ID" + type = string +} + +variable "redis_cluster_id" { + description = "Redis cluster ID" + type = string +} + +variable "slack_webhook_url" { + description = "Slack webhook URL for notifications" + type = string + default = "" +} + +variable "log_retention_days" { + description = "Number of days to retain CloudWatch logs" + type = number + default = 7 +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/documentdb/main.tf b/deployment/modules/documentdb/main.tf new file mode 100644 index 00000000..04e594e2 --- /dev/null +++ b/deployment/modules/documentdb/main.tf @@ -0,0 +1,54 @@ +# DocumentDB Subnet Group +resource "aws_docdb_subnet_group" "main" { + name = "${var.name_prefix}-${var.environment}-docdb-subnet-group" + subnet_ids = var.subnet_ids + + tags = var.tags +} + +# DocumentDB Parameter Group +resource "aws_docdb_cluster_parameter_group" "main" { + family = "docdb5.0" + name = "${var.name_prefix}-${var.environment}-docdb-parameter-group" + description = "DocumentDB cluster parameter group for ${var.name_prefix}-${var.environment}" + + parameter { + name = "tls" + value = "enabled" + } + + tags = var.tags +} + +# DocumentDB Cluster +resource "aws_docdb_cluster" "main" { + cluster_identifier = "${var.name_prefix}-${var.environment}-docdb-cluster" + engine = "docdb" + engine_version = "5.0.0" + master_username = var.master_username + master_password = var.master_password + db_cluster_parameter_group_name = aws_docdb_cluster_parameter_group.main.name + db_subnet_group_name = aws_docdb_subnet_group.main.name + vpc_security_group_ids = var.security_group_ids + storage_encrypted = true + skip_final_snapshot = true + + # Enable audit logging for security compliance + enabled_cloudwatch_logs_exports = ["audit"] + + # Backup configuration + backup_retention_period = 7 + preferred_backup_window = "03:00-04:00" + preferred_maintenance_window = "sun:04:00-sun:05:00" + + tags = var.tags +} + +# DocumentDB Instance +resource "aws_docdb_cluster_instance" "cluster_instances" { + identifier = "${var.name_prefix}-${var.environment}-docdb" + cluster_identifier = aws_docdb_cluster.main.id + instance_class = "db.t3.medium" + + tags = var.tags +} \ No newline at end of file diff --git a/deployment/modules/documentdb/outputs.tf b/deployment/modules/documentdb/outputs.tf new file mode 100644 index 00000000..d1a6bb4b --- /dev/null +++ b/deployment/modules/documentdb/outputs.tf @@ -0,0 +1,14 @@ +output "cluster_endpoint" { + description = "DocumentDB cluster endpoint" + value = aws_docdb_cluster.main.endpoint +} + +output "cluster_identifier" { + description = "DocumentDB cluster identifier" + value = aws_docdb_cluster.main.cluster_identifier +} + +output "cluster_arn" { + description = "DocumentDB cluster ARN" + value = aws_docdb_cluster.main.arn +} \ No newline at end of file diff --git a/deployment/modules/documentdb/variables.tf b/deployment/modules/documentdb/variables.tf new file mode 100644 index 00000000..5b449ec6 --- /dev/null +++ b/deployment/modules/documentdb/variables.tf @@ -0,0 +1,41 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "vpc_id" { + description = "VPC ID" + type = string +} + +variable "subnet_ids" { + description = "List of subnet IDs" + type = list(string) +} + +variable "security_group_ids" { + description = "List of security group IDs" + type = list(string) +} + +variable "master_username" { + description = "Master username for DocumentDB" + type = string +} + +variable "master_password" { + description = "Master password for DocumentDB" + type = string + sensitive = true +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/ecr/main.tf b/deployment/modules/ecr/main.tf new file mode 100644 index 00000000..328b409b --- /dev/null +++ b/deployment/modules/ecr/main.tf @@ -0,0 +1,48 @@ +# ECR Repository +resource "aws_ecr_repository" "main" { + name = "${var.name_prefix}-${var.environment}" + image_tag_mutability = "MUTABLE" + force_delete = true + + image_scanning_configuration { + scan_on_push = true + } + + tags = var.tags +} + +# ECR Lifecycle Policy +resource "aws_ecr_lifecycle_policy" "main" { + repository = aws_ecr_repository.main.name + + policy = jsonencode({ + rules = [ + { + rulePriority = 1 + description = "Keep last 10 images" + selection = { + tagStatus = "tagged" + tagPrefixList = ["v"] + countType = "imageCountMoreThan" + countNumber = 10 + } + action = { + type = "expire" + } + }, + { + rulePriority = 2 + description = "Delete untagged images older than 1 day" + selection = { + tagStatus = "untagged" + countType = "sinceImagePushed" + countUnit = "days" + countNumber = 1 + } + action = { + type = "expire" + } + } + ] + }) +} \ No newline at end of file diff --git a/deployment/modules/ecr/outputs.tf b/deployment/modules/ecr/outputs.tf new file mode 100644 index 00000000..d2b4d9ba --- /dev/null +++ b/deployment/modules/ecr/outputs.tf @@ -0,0 +1,14 @@ +output "repository_url" { + description = "URL of the ECR repository" + value = aws_ecr_repository.main.repository_url +} + +output "repository_arn" { + description = "ARN of the ECR repository" + value = aws_ecr_repository.main.arn +} + +output "repository_name" { + description = "Name of the ECR repository" + value = aws_ecr_repository.main.name +} \ No newline at end of file diff --git a/deployment/modules/ecr/variables.tf b/deployment/modules/ecr/variables.tf new file mode 100644 index 00000000..9935f6a8 --- /dev/null +++ b/deployment/modules/ecr/variables.tf @@ -0,0 +1,15 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/ecs/main.tf b/deployment/modules/ecs/main.tf new file mode 100644 index 00000000..a5894247 --- /dev/null +++ b/deployment/modules/ecs/main.tf @@ -0,0 +1,143 @@ +# ECS Cluster +resource "aws_ecs_cluster" "main" { + name = "${var.name_prefix}-${var.environment}-cluster" + + setting { + name = "containerInsights" + value = "enabled" + } + + tags = var.tags +} + +# ECS Task Definition +resource "aws_ecs_task_definition" "main" { + family = "${var.name_prefix}-${var.environment}-app" + network_mode = "awsvpc" + requires_compatibilities = ["FARGATE"] + cpu = var.container_cpu + memory = var.container_memory + execution_role_arn = var.execution_role_arn + task_role_arn = var.task_role_arn + + container_definitions = jsonencode([ + { + name = "app" + image = "${var.ecr_repository_url}:latest" + + portMappings = [ + { + containerPort = var.container_port + protocol = "tcp" + } + ] + + environment = [ + for key, value in var.environment_variables : { + name = key + value = value + } + ] + + secrets = [ + for key, value in var.secrets : { + name = key + valueFrom = value + } + ] + + logConfiguration = { + logDriver = "awslogs" + options = { + awslogs-group = aws_cloudwatch_log_group.main.name + awslogs-region = data.aws_region.current.name + awslogs-stream-prefix = "ecs" + } + } + } + ]) + + tags = var.tags +} + +# CloudWatch Log Group +resource "aws_cloudwatch_log_group" "main" { + name = "/ecs/${var.name_prefix}-${var.environment}" + retention_in_days = var.log_retention_days + + tags = var.tags +} + +# Data source for current region +data "aws_region" "current" {} + +# ECS Service +resource "aws_ecs_service" "main" { + name = "${var.name_prefix}-${var.environment}-service" + cluster = aws_ecs_cluster.main.id + task_definition = aws_ecs_task_definition.main.arn + desired_count = var.ecs_desired_count + launch_type = "FARGATE" + + network_configuration { + subnets = var.subnet_ids + security_groups = var.security_group_ids + } + + load_balancer { + target_group_arn = var.target_group_arn + container_name = "app" + container_port = var.container_port + } + + depends_on = [var.target_group_arn] + + tags = var.tags +} + +# Application Auto Scaling Target +resource "aws_appautoscaling_target" "ecs_target" { + max_capacity = var.ecs_max_capacity + min_capacity = var.ecs_desired_count + resource_id = "service/${aws_ecs_cluster.main.name}/${aws_ecs_service.main.name}" + scalable_dimension = "ecs:service:DesiredCount" + service_namespace = "ecs" + + tags = var.tags +} + +# Auto Scaling Policy - Scale Up +resource "aws_appautoscaling_policy" "scale_up" { + name = "${var.name_prefix}-${var.environment}-scale-up" + policy_type = "TargetTrackingScaling" + resource_id = aws_appautoscaling_target.ecs_target.resource_id + scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension + service_namespace = aws_appautoscaling_target.ecs_target.service_namespace + + target_tracking_scaling_policy_configuration { + predefined_metric_specification { + predefined_metric_type = "ECSServiceAverageCPUUtilization" + } + target_value = 70.0 + scale_in_cooldown = 300 + scale_out_cooldown = 300 + } +} + +# Auto Scaling Policy - Memory Based +resource "aws_appautoscaling_policy" "scale_memory" { + name = "${var.name_prefix}-${var.environment}-scale-memory" + policy_type = "TargetTrackingScaling" + resource_id = aws_appautoscaling_target.ecs_target.resource_id + scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension + service_namespace = aws_appautoscaling_target.ecs_target.service_namespace + + target_tracking_scaling_policy_configuration { + predefined_metric_specification { + predefined_metric_type = "ECSServiceAverageMemoryUtilization" + } + target_value = 80.0 + scale_in_cooldown = 300 + scale_out_cooldown = 300 + } +} \ No newline at end of file diff --git a/deployment/modules/ecs/outputs.tf b/deployment/modules/ecs/outputs.tf new file mode 100644 index 00000000..109c4fbd --- /dev/null +++ b/deployment/modules/ecs/outputs.tf @@ -0,0 +1,29 @@ +output "cluster_name" { + description = "ECS cluster name" + value = aws_ecs_cluster.main.name +} + +output "service_name" { + description = "ECS service name" + value = aws_ecs_service.main.name +} + +output "cluster_arn" { + description = "ECS cluster ARN" + value = aws_ecs_cluster.main.arn +} + +output "autoscaling_target_arn" { + description = "Application Auto Scaling target ARN" + value = aws_appautoscaling_target.ecs_target.arn +} + +output "cpu_scaling_policy_arn" { + description = "CPU-based scaling policy ARN" + value = aws_appautoscaling_policy.scale_up.arn +} + +output "memory_scaling_policy_arn" { + description = "Memory-based scaling policy ARN" + value = aws_appautoscaling_policy.scale_memory.arn +} \ No newline at end of file diff --git a/deployment/modules/ecs/variables.tf b/deployment/modules/ecs/variables.tf new file mode 100644 index 00000000..27182087 --- /dev/null +++ b/deployment/modules/ecs/variables.tf @@ -0,0 +1,92 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "vpc_id" { + description = "VPC ID" + type = string +} + +variable "subnet_ids" { + description = "List of subnet IDs" + type = list(string) +} + +variable "security_group_ids" { + description = "List of security group IDs" + type = list(string) +} + +variable "ecr_repository_url" { + description = "ECR repository URL" + type = string +} + +variable "task_role_arn" { + description = "ECS task role ARN" + type = string +} + +variable "execution_role_arn" { + description = "ECS execution role ARN" + type = string +} + +variable "target_group_arn" { + description = "ALB target group ARN" + type = string +} + +variable "container_cpu" { + description = "CPU units for the container" + type = number +} + +variable "container_memory" { + description = "Memory in MB for the container" + type = number +} + +variable "container_port" { + description = "Port that the container exposes" + type = number +} + +variable "log_retention_days" { + description = "Number of days to retain CloudWatch logs" + type = number +} + +variable "ecs_desired_count" { + description = "Desired number of ECS tasks" + type = number +} + +variable "ecs_max_capacity" { + description = "Maximum number of ECS tasks for auto-scaling" + type = number +} + +variable "environment_variables" { + description = "Environment variables for the container" + type = map(string) + default = {} +} + +variable "secrets" { + description = "Secrets for the container" + type = map(string) + default = {} +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/iam/main.tf b/deployment/modules/iam/main.tf new file mode 100644 index 00000000..137cc16d --- /dev/null +++ b/deployment/modules/iam/main.tf @@ -0,0 +1,172 @@ +# ECS Task Execution Role +resource "aws_iam_role" "ecs_execution_role" { + name = "${var.name_prefix}-${var.environment}-ecs-execution-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ecs-tasks.amazonaws.com" + } + } + ] + }) + + tags = var.tags +} + +# Attach AWS managed policy for ECS task execution +resource "aws_iam_role_policy_attachment" "ecs_execution_role_policy" { + role = aws_iam_role.ecs_execution_role.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" +} + +# SSM access policy for ECS execution role +resource "aws_iam_role_policy" "ecs_execution_ssm_policy" { + name = "${var.name_prefix}-${var.environment}-ecs-execution-ssm-policy" + role = aws_iam_role.ecs_execution_role.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "ssm:GetParameters", + "ssm:GetParameter" + ] + Resource = [ + "arn:aws:ssm:*:*:parameter/${var.name_prefix}/${var.environment}/*" + ] + } + ] + }) +} + +# ECS Task Role +resource "aws_iam_role" "ecs_task_role" { + name = "${var.name_prefix}-${var.environment}-ecs-task-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ecs-tasks.amazonaws.com" + } + } + ] + }) + + tags = var.tags +} + +# S3 access policy for ECS task +resource "aws_iam_role_policy" "ecs_s3_policy" { + name = "${var.name_prefix}-${var.environment}-ecs-s3-policy" + role = aws_iam_role.ecs_task_role.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket" + ] + Resource = [ + var.s3_attachments_bucket_arn, + "${var.s3_attachments_bucket_arn}/*" + ] + } + ] + }) +} + +# GitHub Actions OIDC provider +resource "aws_iam_openid_connect_provider" "github_actions" { + url = "https://token.actions.githubusercontent.com" + + client_id_list = [ + "sts.amazonaws.com" + ] + + thumbprint_list = [ + "6938fd4d98bab03faadb97b34396831e3780aea1" + ] + + tags = var.tags +} + +# GitHub Actions Role +resource "aws_iam_role" "github_actions" { + name = "${var.name_prefix}-${var.environment}-github-actions-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Federated = aws_iam_openid_connect_provider.github_actions.arn + } + Action = "sts:AssumeRoleWithWebIdentity" + Condition = { + StringEquals = { + "token.actions.githubusercontent.com:aud" = "sts.amazonaws.com" + } + StringLike = { + "token.actions.githubusercontent.com:sub" = "repo:${var.github_repo}:*" + } + } + } + ] + }) + + tags = var.tags +} + +# GitHub Actions policy for ECR and ECS +resource "aws_iam_role_policy" "github_actions_policy" { + name = "${var.name_prefix}-${var.environment}-github-actions-policy" + role = aws_iam_role.github_actions.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload", + "ecr:PutImage" + ] + Resource = [ + var.ecr_repository_arn, + "*" + ] + }, + { + Effect = "Allow" + Action = [ + "ecs:UpdateService", + "ecs:DescribeServices" + ] + Resource = "*" + } + ] + }) +} \ No newline at end of file diff --git a/deployment/modules/iam/outputs.tf b/deployment/modules/iam/outputs.tf new file mode 100644 index 00000000..b3b4ca41 --- /dev/null +++ b/deployment/modules/iam/outputs.tf @@ -0,0 +1,14 @@ +output "ecs_task_role_arn" { + description = "ARN of the ECS task role" + value = aws_iam_role.ecs_task_role.arn +} + +output "ecs_execution_role_arn" { + description = "ARN of the ECS execution role" + value = aws_iam_role.ecs_execution_role.arn +} + +output "github_actions_role_arn" { + description = "ARN of the GitHub Actions role" + value = aws_iam_role.github_actions.arn +} \ No newline at end of file diff --git a/deployment/modules/iam/variables.tf b/deployment/modules/iam/variables.tf new file mode 100644 index 00000000..d3e6bda2 --- /dev/null +++ b/deployment/modules/iam/variables.tf @@ -0,0 +1,30 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "s3_attachments_bucket_arn" { + description = "ARN of the S3 attachments bucket" + type = string +} + +variable "ecr_repository_arn" { + description = "ARN of the ECR repository" + type = string +} + +variable "github_repo" { + description = "GitHub repository in format owner/repo" + type = string +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/parameter_store/main.tf b/deployment/modules/parameter_store/main.tf new file mode 100644 index 00000000..ace82e88 --- /dev/null +++ b/deployment/modules/parameter_store/main.tf @@ -0,0 +1,46 @@ +# Session Secret Parameter +resource "aws_ssm_parameter" "session_secret" { + name = "/${var.name_prefix}/${var.environment}/session-secret" + type = "SecureString" + value = var.session_secret + + tags = var.tags +} + +# DocumentDB Master Username Parameter +resource "aws_ssm_parameter" "documentdb_username" { + name = "/${var.name_prefix}/${var.environment}/documentdb-username" + type = "String" + value = var.documentdb_master_username + + tags = var.tags +} + +# DocumentDB Master Password Parameter +resource "aws_ssm_parameter" "documentdb_password" { + name = "/${var.name_prefix}/${var.environment}/documentdb-password" + type = "SecureString" + value = var.documentdb_master_password + + tags = var.tags +} + +# Redis Auth Token Parameter +resource "aws_ssm_parameter" "redis_auth_token" { + name = "/${var.name_prefix}/${var.environment}/redis-auth-token" + type = "SecureString" + value = var.redis_auth_token + + tags = var.tags +} + +# Google Cloud Storage Service Account Key Parameter (optional) +resource "aws_ssm_parameter" "gcs_service_account_key" { + count = var.gcs_service_account_key != "" ? 1 : 0 + + name = "/${var.name_prefix}/${var.environment}/gcs-service-account-key" + type = "SecureString" + value = var.gcs_service_account_key + + tags = var.tags +} \ No newline at end of file diff --git a/deployment/modules/parameter_store/outputs.tf b/deployment/modules/parameter_store/outputs.tf new file mode 100644 index 00000000..acebd32b --- /dev/null +++ b/deployment/modules/parameter_store/outputs.tf @@ -0,0 +1,24 @@ +output "session_secret_arn" { + description = "ARN of the session secret parameter" + value = aws_ssm_parameter.session_secret.arn +} + +output "documentdb_username_arn" { + description = "ARN of the DocumentDB username parameter" + value = aws_ssm_parameter.documentdb_username.arn +} + +output "documentdb_password_arn" { + description = "ARN of the DocumentDB password parameter" + value = aws_ssm_parameter.documentdb_password.arn +} + +output "redis_auth_token_arn" { + description = "ARN of the Redis auth token parameter" + value = aws_ssm_parameter.redis_auth_token.arn +} + +output "gcs_service_account_key_arn" { + description = "ARN of the GCS service account key parameter" + value = var.gcs_service_account_key != "" ? aws_ssm_parameter.gcs_service_account_key[0].arn : "" +} \ No newline at end of file diff --git a/deployment/modules/parameter_store/variables.tf b/deployment/modules/parameter_store/variables.tf new file mode 100644 index 00000000..fdfbe606 --- /dev/null +++ b/deployment/modules/parameter_store/variables.tf @@ -0,0 +1,45 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "documentdb_master_username" { + description = "Master username for DocumentDB" + type = string +} + +variable "documentdb_master_password" { + description = "Master password for DocumentDB" + type = string + sensitive = true +} + +variable "session_secret" { + description = "Session secret for the application" + type = string + sensitive = true +} + +variable "redis_auth_token" { + description = "Auth token for Redis" + type = string + sensitive = true +} + +variable "gcs_service_account_key" { + description = "Google Cloud Storage service account key (JSON)" + type = string + default = "" + sensitive = true +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/redis/main.tf b/deployment/modules/redis/main.tf new file mode 100644 index 00000000..b5702ddc --- /dev/null +++ b/deployment/modules/redis/main.tf @@ -0,0 +1,26 @@ +# ElastiCache Subnet Group +resource "aws_elasticache_subnet_group" "main" { + name = "${var.name_prefix}-${var.environment}-redis-subnet-group" + subnet_ids = var.subnet_ids + + tags = var.tags +} + +# ElastiCache Redis Cluster +resource "aws_elasticache_replication_group" "main" { + replication_group_id = "${var.name_prefix}-${var.environment}-redis" + description = "Redis cluster for ${var.name_prefix} ${var.environment}" + node_type = "cache.t3.micro" + port = 6379 + parameter_group_name = "default.redis7" + num_cache_clusters = 1 + + subnet_group_name = aws_elasticache_subnet_group.main.name + security_group_ids = var.security_group_ids + + auth_token = var.auth_token + transit_encryption_enabled = true + at_rest_encryption_enabled = true + + tags = var.tags +} \ No newline at end of file diff --git a/deployment/modules/redis/outputs.tf b/deployment/modules/redis/outputs.tf new file mode 100644 index 00000000..472deae1 --- /dev/null +++ b/deployment/modules/redis/outputs.tf @@ -0,0 +1,9 @@ +output "cluster_endpoint" { + description = "Redis cluster endpoint" + value = aws_elasticache_replication_group.main.primary_endpoint_address +} + +output "cluster_id" { + description = "Redis cluster ID" + value = aws_elasticache_replication_group.main.replication_group_id +} \ No newline at end of file diff --git a/deployment/modules/redis/variables.tf b/deployment/modules/redis/variables.tf new file mode 100644 index 00000000..c4c76d88 --- /dev/null +++ b/deployment/modules/redis/variables.tf @@ -0,0 +1,36 @@ +variable "name_prefix" { + description = "Prefix for resource names" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "vpc_id" { + description = "VPC ID" + type = string +} + +variable "subnet_ids" { + description = "List of subnet IDs" + type = list(string) +} + +variable "security_group_ids" { + description = "List of security group IDs" + type = list(string) +} + +variable "auth_token" { + description = "Auth token for Redis" + type = string + sensitive = true +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/s3/main.tf b/deployment/modules/s3/main.tf new file mode 100644 index 00000000..0f3f1813 --- /dev/null +++ b/deployment/modules/s3/main.tf @@ -0,0 +1,219 @@ +# S3 Module for SF Website Infrastructure + +# S3 Bucket for Attachments +resource "aws_s3_bucket" "attachments" { + bucket = "${var.name_prefix}-s3-attachments-${var.environment}" + + tags = merge(var.tags, { + Name = "${var.name_prefix}-s3-attachments-${var.environment}" + Type = "Attachments" + }) +} + +# S3 Bucket for Logs +resource "aws_s3_bucket" "logs" { + bucket = "${var.name_prefix}-s3-logs-${var.environment}" + + tags = merge(var.tags, { + Name = "${var.name_prefix}-s3-logs-${var.environment}" + Type = "Logs" + }) +} + +# Attachments Bucket Configuration +resource "aws_s3_bucket_versioning" "attachments" { + bucket = aws_s3_bucket.attachments.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "attachments" { + bucket = aws_s3_bucket.attachments.id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +resource "aws_s3_bucket_public_access_block" "attachments" { + bucket = aws_s3_bucket.attachments.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_lifecycle_configuration" "attachments" { + bucket = aws_s3_bucket.attachments.id + + rule { + id = "transition_to_ia" + status = "Enabled" + + filter { + prefix = "" + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 90 + storage_class = "GLACIER" + } + } + + rule { + id = "expire_non_current_versions" + status = "Enabled" + + filter { + prefix = "" + } + + noncurrent_version_expiration { + noncurrent_days = 30 + } + } +} + +resource "aws_s3_bucket_cors_configuration" "attachments" { + bucket = aws_s3_bucket.attachments.id + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["GET", "PUT", "POST", "DELETE"] + allowed_origins = [ + "https://${var.domain_name}", + "https://${var.media_domain_name}" + ] + expose_headers = ["ETag"] + max_age_seconds = 3000 + } +} + +# Logs Bucket Configuration +resource "aws_s3_bucket_versioning" "logs" { + bucket = aws_s3_bucket.logs.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "logs" { + bucket = aws_s3_bucket.logs.id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +resource "aws_s3_bucket_public_access_block" "logs" { + bucket = aws_s3_bucket.logs.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_lifecycle_configuration" "logs" { + bucket = aws_s3_bucket.logs.id + + rule { + id = "log_lifecycle" + status = "Enabled" + + filter { + prefix = "" + } + + transition { + days = 90 + storage_class = "GLACIER" + } + + expiration { + days = 365 + } + } +} + +# CloudFront Origin Access Control for Attachments Bucket +resource "aws_cloudfront_origin_access_control" "attachments" { + name = "${var.name_prefix}-attachments-oac-${var.environment}" + description = "OAC for attachments bucket" + origin_access_control_origin_type = "s3" + signing_behavior = "always" + signing_protocol = "sigv4" +} + +# Bucket Policy for CloudFront Access +resource "aws_s3_bucket_policy" "attachments" { + bucket = aws_s3_bucket.attachments.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "AllowCloudFrontServicePrincipal" + Effect = "Allow" + Principal = { + Service = "cloudfront.amazonaws.com" + } + Action = "s3:GetObject" + Resource = "${aws_s3_bucket.attachments.arn}/*" + Condition = { + StringEquals = { + "AWS:SourceArn" = var.cloudfront_distribution_arn + } + } + } + ] + }) + + depends_on = [aws_s3_bucket_public_access_block.attachments] +} + +# Bucket Policy for Application Access (via IAM role) +data "aws_iam_policy_document" "attachments_access" { + statement { + sid = "AllowECSTaskAccess" + effect = "Allow" + + principals { + type = "AWS" + identifiers = [var.ecs_task_role_arn] + } + + actions = [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ] + + resources = ["${aws_s3_bucket.attachments.arn}/*"] + } + + statement { + sid = "AllowECSTaskListBucket" + effect = "Allow" + + principals { + type = "AWS" + identifiers = [var.ecs_task_role_arn] + } + + actions = ["s3:ListBucket"] + + resources = [aws_s3_bucket.attachments.arn] + } +} \ No newline at end of file diff --git a/deployment/modules/s3/outputs.tf b/deployment/modules/s3/outputs.tf new file mode 100644 index 00000000..97f9f777 --- /dev/null +++ b/deployment/modules/s3/outputs.tf @@ -0,0 +1,31 @@ +# Outputs for S3 Module + +output "attachments_bucket_id" { + description = "ID of the attachments bucket" + value = aws_s3_bucket.attachments.id +} + +output "attachments_bucket_arn" { + description = "ARN of the attachments bucket" + value = aws_s3_bucket.attachments.arn +} + +output "attachments_bucket_domain_name" { + description = "Domain name of the attachments bucket" + value = aws_s3_bucket.attachments.bucket_domain_name +} + +output "logs_bucket_id" { + description = "ID of the logs bucket" + value = aws_s3_bucket.logs.id +} + +output "logs_bucket_arn" { + description = "ARN of the logs bucket" + value = aws_s3_bucket.logs.arn +} + +output "cloudfront_oac_id" { + description = "ID of the CloudFront Origin Access Control" + value = aws_cloudfront_origin_access_control.attachments.id +} \ No newline at end of file diff --git a/deployment/modules/s3/variables.tf b/deployment/modules/s3/variables.tf new file mode 100644 index 00000000..151f4ec5 --- /dev/null +++ b/deployment/modules/s3/variables.tf @@ -0,0 +1,41 @@ +# Variables for S3 Module + +variable "name_prefix" { + description = "Name prefix for resources" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} + +variable "domain_name" { + description = "Application domain name for CORS" + type = string + default = "" +} + +variable "media_domain_name" { + description = "Media domain name for CORS" + type = string + default = "" +} + +variable "cloudfront_distribution_arn" { + description = "ARN of CloudFront distribution for bucket policy" + type = string + default = "" +} + +variable "ecs_task_role_arn" { + description = "ARN of ECS task role for bucket access" + type = string + default = "" +} \ No newline at end of file diff --git a/deployment/modules/security_groups/main.tf b/deployment/modules/security_groups/main.tf new file mode 100644 index 00000000..19125873 --- /dev/null +++ b/deployment/modules/security_groups/main.tf @@ -0,0 +1,117 @@ +# Security Groups Module for SF Website Infrastructure + +# ALB Security Group +resource "aws_security_group" "alb" { + name = "${var.name_prefix}-alb-sg-${var.environment}" + description = "Security group for Application Load Balancer" + vpc_id = var.vpc_id + + ingress { + description = "HTTP" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + description = "HTTPS" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + description = "All outbound traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge(var.tags, { + Name = "${var.name_prefix}-alb-sg-${var.environment}" + }) +} + +# ECS Security Group +resource "aws_security_group" "ecs" { + name = "${var.name_prefix}-ecs-sg-${var.environment}" + description = "Security group for ECS tasks" + vpc_id = var.vpc_id + + ingress { + description = "HTTP from ALB" + from_port = var.container_port + to_port = var.container_port + protocol = "tcp" + security_groups = [aws_security_group.alb.id] + } + + egress { + description = "All outbound traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge(var.tags, { + Name = "${var.name_prefix}-ecs-sg-${var.environment}" + }) +} + +# DocumentDB Security Group +resource "aws_security_group" "documentdb" { + name = "${var.name_prefix}-documentdb-sg-${var.environment}" + description = "Security group for DocumentDB cluster" + vpc_id = var.vpc_id + + ingress { + description = "MongoDB from ECS" + from_port = 27017 + to_port = 27017 + protocol = "tcp" + security_groups = [aws_security_group.ecs.id] + } + + egress { + description = "All outbound traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge(var.tags, { + Name = "${var.name_prefix}-documentdb-sg-${var.environment}" + }) +} + +# Redis Security Group +resource "aws_security_group" "redis" { + name = "${var.name_prefix}-redis-sg-${var.environment}" + description = "Security group for ElastiCache Redis cluster" + vpc_id = var.vpc_id + + ingress { + description = "Redis from ECS" + from_port = 6379 + to_port = 6379 + protocol = "tcp" + security_groups = [aws_security_group.ecs.id] + } + + egress { + description = "All outbound traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge(var.tags, { + Name = "${var.name_prefix}-redis-sg-${var.environment}" + }) +} \ No newline at end of file diff --git a/deployment/modules/security_groups/outputs.tf b/deployment/modules/security_groups/outputs.tf new file mode 100644 index 00000000..47149ec9 --- /dev/null +++ b/deployment/modules/security_groups/outputs.tf @@ -0,0 +1,21 @@ +# Outputs for Security Groups Module + +output "alb_sg_id" { + description = "ID of the ALB security group" + value = aws_security_group.alb.id +} + +output "ecs_sg_id" { + description = "ID of the ECS security group" + value = aws_security_group.ecs.id +} + +output "documentdb_sg_id" { + description = "ID of the DocumentDB security group" + value = aws_security_group.documentdb.id +} + +output "redis_sg_id" { + description = "ID of the Redis security group" + value = aws_security_group.redis.id +} \ No newline at end of file diff --git a/deployment/modules/security_groups/variables.tf b/deployment/modules/security_groups/variables.tf new file mode 100644 index 00000000..d501264b --- /dev/null +++ b/deployment/modules/security_groups/variables.tf @@ -0,0 +1,28 @@ +# Variables for Security Groups Module + +variable "name_prefix" { + description = "Name prefix for resources" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "vpc_id" { + description = "ID of the VPC" + type = string +} + +variable "container_port" { + description = "Port that the container exposes" + type = number + default = 3000 +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/modules/vpc/main.tf b/deployment/modules/vpc/main.tf new file mode 100644 index 00000000..6c25cbf0 --- /dev/null +++ b/deployment/modules/vpc/main.tf @@ -0,0 +1,184 @@ +# VPC Module for SF Website Infrastructure + +# VPC +resource "aws_vpc" "main" { + cidr_block = var.vpc_cidr + enable_dns_hostnames = true + enable_dns_support = true + + tags = merge(var.tags, { + Name = "${var.name_prefix}-vpc-${var.environment}" + }) +} + +# Internet Gateway +resource "aws_internet_gateway" "main" { + vpc_id = aws_vpc.main.id + + tags = merge(var.tags, { + Name = "${var.name_prefix}-igw-${var.environment}" + }) +} + +# Public Subnets +resource "aws_subnet" "public" { + count = length(var.azs) + + vpc_id = aws_vpc.main.id + cidr_block = cidrsubnet(var.vpc_cidr, 8, count.index) + availability_zone = var.azs[count.index] + map_public_ip_on_launch = true + + tags = merge(var.tags, { + Name = "${var.name_prefix}-public-subnet-${count.index + 1}-${var.environment}" + Type = "Public" + }) +} + +# Private Subnets +resource "aws_subnet" "private" { + count = length(var.azs) + + vpc_id = aws_vpc.main.id + cidr_block = cidrsubnet(var.vpc_cidr, 8, count.index + 10) + availability_zone = var.azs[count.index] + + tags = merge(var.tags, { + Name = "${var.name_prefix}-private-subnet-${count.index + 1}-${var.environment}" + Type = "Private" + }) +} + +# Elastic IPs for NAT Gateways +resource "aws_eip" "nat" { + count = length(var.azs) + + domain = "vpc" + depends_on = [aws_internet_gateway.main] + + tags = merge(var.tags, { + Name = "${var.name_prefix}-eip-nat-${count.index + 1}-${var.environment}" + }) +} + +# NAT Gateways +resource "aws_nat_gateway" "main" { + count = length(var.azs) + + allocation_id = aws_eip.nat[count.index].id + subnet_id = aws_subnet.public[count.index].id + + tags = merge(var.tags, { + Name = "${var.name_prefix}-nat-${count.index + 1}-${var.environment}" + }) + + depends_on = [aws_internet_gateway.main] +} + +# Route Table for Public Subnets +resource "aws_route_table" "public" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.main.id + } + + tags = merge(var.tags, { + Name = "${var.name_prefix}-public-rt-${var.environment}" + }) +} + +# Route Table Associations for Public Subnets +resource "aws_route_table_association" "public" { + count = length(aws_subnet.public) + + subnet_id = aws_subnet.public[count.index].id + route_table_id = aws_route_table.public.id +} + +# Route Tables for Private Subnets (one per AZ) +resource "aws_route_table" "private" { + count = length(var.azs) + + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.main[count.index].id + } + + tags = merge(var.tags, { + Name = "${var.name_prefix}-private-rt-${count.index + 1}-${var.environment}" + }) +} + +# Route Table Associations for Private Subnets +resource "aws_route_table_association" "private" { + count = length(aws_subnet.private) + + subnet_id = aws_subnet.private[count.index].id + route_table_id = aws_route_table.private[count.index].id +} + +# VPC Flow Logs +resource "aws_flow_log" "vpc" { + iam_role_arn = aws_iam_role.flow_log.arn + log_destination = aws_cloudwatch_log_group.vpc_flow_log.arn + traffic_type = "ALL" + vpc_id = aws_vpc.main.id +} + +# CloudWatch Log Group for VPC Flow Logs +resource "aws_cloudwatch_log_group" "vpc_flow_log" { + name = "/aws/vpc/flowlogs/${var.name_prefix}-${var.environment}" + retention_in_days = 30 + + tags = merge(var.tags, { + Name = "${var.name_prefix}-vpc-flowlogs-${var.environment}" + }) +} + +# IAM Role for VPC Flow Logs +resource "aws_iam_role" "flow_log" { + name = "${var.name_prefix}-vpc-flow-log-role-${var.environment}" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "" + Principal = { + Service = "vpc-flow-logs.amazonaws.com" + } + }, + ] + }) + + tags = var.tags +} + +# IAM Policy for VPC Flow Logs +resource "aws_iam_role_policy" "flow_log" { + name = "${var.name_prefix}-vpc-flow-log-policy-${var.environment}" + role = aws_iam_role.flow_log.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + ] + Effect = "Allow" + Resource = "*" + }, + ] + }) +} \ No newline at end of file diff --git a/deployment/modules/vpc/outputs.tf b/deployment/modules/vpc/outputs.tf new file mode 100644 index 00000000..8c2643a1 --- /dev/null +++ b/deployment/modules/vpc/outputs.tf @@ -0,0 +1,46 @@ +# Outputs for VPC Module + +output "vpc_id" { + description = "ID of the VPC" + value = aws_vpc.main.id +} + +output "vpc_cidr_block" { + description = "CIDR block of the VPC" + value = aws_vpc.main.cidr_block +} + +output "internet_gateway_id" { + description = "ID of the Internet Gateway" + value = aws_internet_gateway.main.id +} + +output "public_subnet_ids" { + description = "IDs of the public subnets" + value = aws_subnet.public[*].id +} + +output "private_subnet_ids" { + description = "IDs of the private subnets" + value = aws_subnet.private[*].id +} + +output "public_route_table_id" { + description = "ID of the public route table" + value = aws_route_table.public.id +} + +output "private_route_table_ids" { + description = "IDs of the private route tables" + value = aws_route_table.private[*].id +} + +output "nat_gateway_ids" { + description = "IDs of the NAT gateways" + value = aws_nat_gateway.main[*].id +} + +output "nat_gateway_ips" { + description = "Elastic IP addresses of the NAT gateways" + value = aws_eip.nat[*].public_ip +} \ No newline at end of file diff --git a/deployment/modules/vpc/variables.tf b/deployment/modules/vpc/variables.tf new file mode 100644 index 00000000..76d19bdd --- /dev/null +++ b/deployment/modules/vpc/variables.tf @@ -0,0 +1,27 @@ +# Variables for VPC Module + +variable "name_prefix" { + description = "Name prefix for resources" + type = string +} + +variable "environment" { + description = "Environment name" + type = string +} + +variable "vpc_cidr" { + description = "CIDR block for VPC" + type = string +} + +variable "azs" { + description = "List of availability zones" + type = list(string) +} + +variable "tags" { + description = "Tags to apply to resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/deployment/outputs.tf b/deployment/outputs.tf new file mode 100644 index 00000000..5cc40b2d --- /dev/null +++ b/deployment/outputs.tf @@ -0,0 +1,171 @@ +# Outputs for SF Website Infrastructure + +# VPC Outputs +output "vpc_id" { + description = "ID of the VPC" + value = module.vpc.vpc_id +} + +output "vpc_cidr_block" { + description = "CIDR block of the VPC" + value = module.vpc.vpc_cidr_block +} + +output "public_subnet_ids" { + description = "IDs of the public subnets" + value = module.vpc.public_subnet_ids +} + +output "private_subnet_ids" { + description = "IDs of the private subnets" + value = module.vpc.private_subnet_ids +} + +# Bastion Host Outputs +output "bastion_instance_id" { + description = "ID of the bastion host instance" + value = module.bastion.instance_id +} + +output "bastion_public_ip" { + description = "Public IP address of the bastion host" + value = module.bastion.public_ip +} + +output "bastion_private_ip" { + description = "Private IP address of the bastion host" + value = module.bastion.private_ip +} + +output "bastion_ssh_command" { + description = "SSH command to connect to bastion host" + value = module.bastion.ssh_command +} + +# S3 Outputs +output "attachments_bucket_name" { + description = "Name of the S3 attachments bucket" + value = module.s3.attachments_bucket_id +} + +output "logs_bucket_name" { + description = "Name of the S3 logs bucket" + value = module.s3.logs_bucket_id +} + +# ECR Outputs +output "ecr_repository_url" { + description = "URL of the ECR repository" + value = module.ecr.repository_url +} + +output "ecr_repository_arn" { + description = "ARN of the ECR repository" + value = module.ecr.repository_arn +} + +# ALB Outputs +output "alb_dns_name" { + description = "DNS name of the Application Load Balancer" + value = module.alb.alb_dns_name +} + +output "alb_zone_id" { + description = "Zone ID of the Application Load Balancer" + value = module.alb.alb_zone_id +} + +output "domain_name" { + description = "Domain name for the application" + value = var.domain_name +} + +# CloudFront Outputs +output "cloudfront_distribution_id" { + description = "ID of the CloudFront distribution" + value = module.cloudfront.distribution_id +} + +output "cloudfront_domain_name" { + description = "Domain name of the CloudFront distribution" + value = module.cloudfront.distribution_domain_name +} + +output "media_domain_name" { + description = "Domain name for media CDN" + value = var.media_domain_name +} + +# Database Outputs +output "documentdb_cluster_endpoint" { + description = "DocumentDB cluster endpoint" + value = module.documentdb.cluster_endpoint + sensitive = true +} + +# Cache Outputs +output "redis_cluster_endpoint" { + description = "ElastiCache Redis cluster endpoint" + value = module.redis.cluster_endpoint + sensitive = true +} + +# ECS Outputs +output "ecs_cluster_name" { + description = "Name of the ECS cluster" + value = module.ecs.cluster_name +} + +output "ecs_service_name" { + description = "Name of the ECS service" + value = module.ecs.service_name +} + +# IAM Outputs +output "ecs_task_role_arn" { + description = "ARN of the ECS task role" + value = module.iam.ecs_task_role_arn +} + +output "ecs_execution_role_arn" { + description = "ARN of the ECS execution role" + value = module.iam.ecs_execution_role_arn +} + +output "github_actions_role_arn" { + description = "ARN of the GitHub Actions IAM role" + value = module.iam.github_actions_role_arn +} + +# Security Group Outputs +output "security_groups" { + description = "Security group IDs" + value = { + alb = module.security_groups.alb_sg_id + ecs = module.security_groups.ecs_sg_id + documentdb = module.security_groups.documentdb_sg_id + redis = module.security_groups.redis_sg_id + } +} + +# Application URLs +output "application_url" { + description = "URL to access the application" + value = "https://${var.domain_name}" +} + +output "media_url" { + description = "URL for media assets" + value = "https://${var.media_domain_name}" +} + +# Environment Information +output "environment" { + description = "Environment name" + value = var.environment +} + +output "aws_region" { + description = "AWS region" + value = var.aws_region +} \ No newline at end of file diff --git a/deployment/terraform-core-management-policy.json b/deployment/terraform-core-management-policy.json new file mode 100644 index 00000000..d34084b1 --- /dev/null +++ b/deployment/terraform-core-management-policy.json @@ -0,0 +1,186 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TerraformIAMPermissions", + "Effect": "Allow", + "Action": [ + "iam:CreateRole", + "iam:DeleteRole", + "iam:GetRole", + "iam:ListRoles", + "iam:UpdateRole", + "iam:PutRolePolicy", + "iam:GetRolePolicy", + "iam:DeleteRolePolicy", + "iam:ListRolePolicies", + "iam:CreatePolicy", + "iam:DeletePolicy", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:ListPolicyVersions", + "iam:AttachRolePolicy", + "iam:DetachRolePolicy", + "iam:ListAttachedRolePolicies", + "iam:PassRole", + "iam:CreateOpenIDConnectProvider", + "iam:DeleteOpenIDConnectProvider", + "iam:GetOpenIDConnectProvider", + "iam:ListOpenIDConnectProviders", + "iam:UpdateOpenIDConnectProviderThumbprint", + "iam:TagRole", + "iam:UntagRole", + "iam:ListRoleTags", + "iam:TagOpenIDConnectProvider", + "iam:UntagOpenIDConnectProvider", + "iam:ListOpenIDConnectProviderTags", + "iam:CreateServiceLinkedRole", + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus", + "iam:ListInstanceProfilesForRole" + ], + "Resource": "*" + }, + { + "Sid": "TerraformCloudWatchPermissions", + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:DeleteLogGroup", + "logs:DescribeLogGroups", + "logs:PutRetentionPolicy", + "logs:DeleteRetentionPolicy", + "logs:TagLogGroup", + "logs:UntagLogGroup", + "logs:ListTagsForResource", + "logs:ListTagsLogGroup", + "cloudwatch:PutDashboard", + "cloudwatch:GetDashboard", + "cloudwatch:DeleteDashboards", + "cloudwatch:ListDashboards", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DeleteAlarms", + "cloudwatch:DescribeAlarms", + "cloudwatch:EnableAlarmActions", + "cloudwatch:DisableAlarmActions", + "cloudwatch:TagResource", + "cloudwatch:UntagResource", + "cloudwatch:ListTagsForResource" + ], + "Resource": "*" + }, + { + "Sid": "STSPermissions", + "Effect": "Allow", + "Action": [ + "sts:GetCallerIdentity" + ], + "Resource": "*" + }, + { + "Sid": "TerraformDocumentDBPermissions", + "Effect": "Allow", + "Action": [ + "rds:DescribeGlobalClusters", + "rds:CreateGlobalCluster", + "rds:DeleteGlobalCluster", + "rds:ModifyGlobalCluster", + "rds:CreateDBCluster", + "rds:DeleteDBCluster", + "rds:DescribeDBClusters", + "rds:ModifyDBCluster", + "rds:CreateDBSubnetGroup", + "rds:DeleteDBSubnetGroup", + "rds:DescribeDBSubnetGroups", + "rds:ModifyDBSubnetGroup", + "rds:CreateDBClusterParameterGroup", + "rds:DeleteDBClusterParameterGroup", + "rds:DescribeDBClusterParameterGroups", + "rds:DescribeDBClusterParameters", + "rds:ModifyDBClusterParameterGroup", + "rds:CreateDBInstance", + "rds:DeleteDBInstance", + "rds:DescribeDBInstances", + "rds:ModifyDBInstance", + "rds:AddTagsToResource", + "rds:RemoveTagsFromResource", + "rds:ListTagsForResource" + ], + "Resource": "*" + }, + { + "Sid": "TerraformALBPermissions", + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeListenerAttributes", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:CreateRule", + "elasticloadbalancing:DeleteRule", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:ModifyRule", + "elasticloadbalancing:SetRulePriorities", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags", + "elasticloadbalancing:DescribeTags" + ], + "Resource": "*" + }, + { + "Sid": "TerraformCloudFrontPermissions", + "Effect": "Allow", + "Action": [ + "cloudfront:CreateDistribution", + "cloudfront:DeleteDistribution", + "cloudfront:GetDistribution", + "cloudfront:UpdateDistribution", + "cloudfront:ListDistributions", + "cloudfront:CreateOriginAccessControl", + "cloudfront:DeleteOriginAccessControl", + "cloudfront:GetOriginAccessControl", + "cloudfront:UpdateOriginAccessControl", + "cloudfront:ListOriginAccessControls", + "cloudfront:CreateCloudFrontOriginAccessIdentity", + "cloudfront:DeleteCloudFrontOriginAccessIdentity", + "cloudfront:GetCloudFrontOriginAccessIdentity", + "cloudfront:UpdateCloudFrontOriginAccessIdentity", + "cloudfront:ListCloudFrontOriginAccessIdentities", + "cloudfront:TagResource", + "cloudfront:UntagResource", + "cloudfront:ListTagsForResource" + ], + "Resource": "*" + }, + { + "Sid": "TerraformACMPermissions", + "Effect": "Allow", + "Action": [ + "acm:DescribeCertificate", + "acm:ListCertificates", + "acm:GetCertificate", + "acm:RequestCertificate", + "acm:DeleteCertificate", + "acm:AddTagsToCertificate", + "acm:RemoveTagsFromCertificate", + "acm:ListTagsForCertificate" + ], + "Resource": "*" + } + ] +} \ No newline at end of file diff --git a/deployment/terraform-infrastructure-management-policy.json b/deployment/terraform-infrastructure-management-policy.json new file mode 100644 index 00000000..ae57aeb3 --- /dev/null +++ b/deployment/terraform-infrastructure-management-policy.json @@ -0,0 +1,94 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TerraformEC2VPCPermissions", + "Effect": "Allow", + "Action": [ + "ec2:CreateVpc", + "ec2:DeleteVpc", + "ec2:DescribeVpcs", + "ec2:DescribeVpcAttribute", + "ec2:ModifyVpcAttribute", + "ec2:CreateSubnet", + "ec2:DeleteSubnet", + "ec2:DescribeSubnets", + "ec2:ModifySubnetAttribute", + "ec2:CreateInternetGateway", + "ec2:DeleteInternetGateway", + "ec2:DescribeInternetGateways", + "ec2:AttachInternetGateway", + "ec2:DetachInternetGateway", + "ec2:CreateNatGateway", + "ec2:DeleteNatGateway", + "ec2:DescribeNatGateways", + "ec2:AllocateAddress", + "ec2:ReleaseAddress", + "ec2:DescribeAddresses", + "ec2:DescribeAddressesAttribute", + "ec2:DisassociateAddress", + "ec2:AssociateAddress", + "ec2:CreateRouteTable", + "ec2:DeleteRouteTable", + "ec2:DescribeRouteTables", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:ReplaceRoute", + "ec2:AssociateRouteTable", + "ec2:DisassociateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:DeleteSecurityGroup", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSecurityGroupRules", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RevokeSecurityGroupEgress", + "ec2:CreateFlowLogs", + "ec2:DeleteFlowLogs", + "ec2:DescribeFlowLogs", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeNetworkInterfaces", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:DescribeTags", + "ec2:DescribeImages", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceCreditSpecifications", + "ec2:DescribeVolumes", + "ec2:RunInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstances", + "ec2:DescribeInstanceAttribute", + "ec2:ModifyInstanceAttribute", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:RebootInstances", + "ec2:DescribeKeyPairs" + ], + "Resource": "*" + }, + { + "Sid": "TerraformElastiCachePermissions", + "Effect": "Allow", + "Action": [ + "elasticache:CreateCacheSubnetGroup", + "elasticache:DeleteCacheSubnetGroup", + "elasticache:DescribeCacheSubnetGroups", + "elasticache:ModifyCacheSubnetGroup", + "elasticache:CreateReplicationGroup", + "elasticache:DeleteReplicationGroup", + "elasticache:DescribeReplicationGroups", + "elasticache:ModifyReplicationGroup", + "elasticache:CreateCacheCluster", + "elasticache:DeleteCacheCluster", + "elasticache:DescribeCacheClusters", + "elasticache:ModifyCacheCluster", + "elasticache:AddTagsToResource", + "elasticache:RemoveTagsFromResource", + "elasticache:ListTagsForResource" + ], + "Resource": "*" + } + ] +} \ No newline at end of file diff --git a/deployment/terraform-s3-management-policy.json b/deployment/terraform-s3-management-policy.json new file mode 100644 index 00000000..0a0fc609 --- /dev/null +++ b/deployment/terraform-s3-management-policy.json @@ -0,0 +1,67 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TerraformBackendS3Permissions", + "Effect": "Allow", + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:ListBucket", + "s3:ListBucketVersions", + "s3:GetBucketLocation", + "s3:GetBucketVersioning", + "s3:PutBucketVersioning", + "s3:PutEncryptionConfiguration", + "s3:GetEncryptionConfiguration", + "s3:PutBucketPublicAccessBlock", + "s3:GetBucketPublicAccessBlock", + "s3:PutBucketPolicy", + "s3:GetBucketPolicy", + "s3:DeleteBucketPolicy", + "s3:GetBucketAcl", + "s3:PutBucketAcl", + "s3:GetBucketCORS", + "s3:PutBucketCORS", + "s3:GetBucketWebsite", + "s3:PutBucketWebsite", + "s3:DeleteBucketWebsite", + "s3:GetAccelerateConfiguration", + "s3:PutAccelerateConfiguration", + "s3:GetBucketRequestPayment", + "s3:PutBucketRequestPayment", + "s3:GetBucketLogging", + "s3:PutBucketLogging", + "s3:GetLifecycleConfiguration", + "s3:PutLifecycleConfiguration", + "s3:GetReplicationConfiguration", + "s3:PutReplicationConfiguration", + "s3:GetBucketObjectLockConfiguration", + "s3:PutBucketObjectLockConfiguration", + "s3:GetBucketTagging", + "s3:PutBucketTagging" + ], + "Resource": [ + "arn:aws:s3:::sf-website-infrastructure", + "arn:aws:s3:::sf-website-infrastructure/*", + "arn:aws:s3:::sf-website-s3-*", + "arn:aws:s3:::sf-website-s3-*/*" + ] + }, + { + "Sid": "TerraformBackendS3ObjectPermissions", + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:GetObjectVersion", + "s3:DeleteObjectVersion" + ], + "Resource": [ + "arn:aws:s3:::sf-website-infrastructure/*", + "arn:aws:s3:::sf-website-s3-*/*" + ] + } + ] +} \ No newline at end of file diff --git a/deployment/terraform-service-management-policy.json b/deployment/terraform-service-management-policy.json new file mode 100644 index 00000000..be7e53bf --- /dev/null +++ b/deployment/terraform-service-management-policy.json @@ -0,0 +1,104 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TerraformECRPermissions", + "Effect": "Allow", + "Action": [ + "ecr:CreateRepository", + "ecr:DeleteRepository", + "ecr:DescribeRepositories", + "ecr:PutLifecyclePolicy", + "ecr:GetLifecyclePolicy", + "ecr:DeleteLifecyclePolicy", + "ecr:PutImageScanningConfiguration", + "ecr:PutImageTagMutability", + "ecr:TagResource", + "ecr:UntagResource", + "ecr:ListTagsForResource", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload", + "ecr:PutImage" + ], + "Resource": "*" + }, + { + "Sid": "TerraformECSPermissions", + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeleteCluster", + "ecs:DescribeClusters", + "ecs:ListClusters", + "ecs:CreateService", + "ecs:DeleteService", + "ecs:DescribeServices", + "ecs:UpdateService", + "ecs:RegisterTaskDefinition", + "ecs:DeregisterTaskDefinition", + "ecs:DescribeTaskDefinition", + "ecs:ListServices", + "ecs:DescribeTasks", + "ecs:ListTasks", + "ecs:TagResource", + "ecs:UntagResource", + "ecs:ListTagsForResource" + ], + "Resource": "*" + }, + { + "Sid": "TerraformApplicationAutoScalingPermissions", + "Effect": "Allow", + "Action": [ + "application-autoscaling:RegisterScalableTarget", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DescribeScalingPolicies", + "application-autoscaling:DescribeScalingActivities", + "application-autoscaling:TagResource", + "application-autoscaling:UntagResource", + "application-autoscaling:ListTagsForResource" + ], + "Resource": "*" + }, + { + "Sid": "TerraformSSMParameterStorePermissions", + "Effect": "Allow", + "Action": [ + "ssm:PutParameter", + "ssm:GetParameter", + "ssm:GetParameters", + "ssm:DeleteParameter", + "ssm:DescribeParameters", + "ssm:AddTagsToResource", + "ssm:RemoveTagsFromResource", + "ssm:ListTagsForResource" + ], + "Resource": "*" + }, + { + "Sid": "TerraformCloudWatchLogsPermissions", + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:DeleteLogGroup", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "logs:FilterLogEvents", + "logs:PutRetentionPolicy", + "logs:TagResource", + "logs:UntagResource", + "logs:ListTagsForResource" + ], + "Resource": "*" + } + ] +} \ No newline at end of file diff --git a/deployment/terraform.tfvars.example b/deployment/terraform.tfvars.example new file mode 100644 index 00000000..883d8879 --- /dev/null +++ b/deployment/terraform.tfvars.example @@ -0,0 +1,65 @@ +# Example Terraform Variables for SF Website Infrastructure +# Copy this file to terraform.tfvars and customize the values + +# General Configuration +aws_region = "us-east-1" +environment = "development" # Can be any name: development, staging, production, qa, demo, sandbox, etc. + +# Networking +vpc_cidr = "10.0.0.0/16" # Choose unique CIDR for each environment to avoid conflicts + +# Domain Configuration +domain_name = "sf-website-development.sandbox-prettyclear.com" +media_domain_name = "sf-website-media-development.sandbox-prettyclear.com" +certificate_arn = "arn:aws:acm:us-east-1:548271326349:certificate/7e11016f-f90e-4800-972d-622bf1a82948" + +# Database Configuration (SENSITIVE - Store securely) +documentdb_master_username = "apostrophe_admin" +documentdb_master_password = "your-secure-password-here" # Min 8 characters + +# Application Secrets (SENSITIVE - Store securely) +session_secret = "${SESSION_SECRET_KEY}" +redis_auth_token = "your-secure-redis-token-here" # 16-128 chars, alphanumeric and symbols (no @, ", /) + +# Optional: Google Cloud Storage Service Account (if using GCS) +gcs_service_account_key = "" # Leave empty if not using GCS + +# GitHub Actions Configuration +github_repository = "your-username/sf-website" # Format: owner/repo + +# Monitoring Configuration (SENSITIVE) +slack_webhook_url = "https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK" + +# Optional: Route 53 Configuration +route53_zone_id = "" # Leave empty to skip DNS record creation + +# Container Configuration +container_image_tag = "latest" +apos_release_id = "v1.0.0" # Release ID for deployment tracking (required) +container_cpu = 1024 # 1024 = 1 vCPU +container_memory = 2048 # 2048 MB = 2 GB +container_port = 3000 # Application port +log_retention_days = 7 # CloudWatch log retention + +# Scaling Configuration +ecs_desired_count = 1 +ecs_max_capacity = 3 + +# Environment-specific Instance Types +documentdb_instance_class = "db.t3.medium" +redis_node_type = "cache.t3.micro" # cache.t3.small for production + +# Bastion Host Configuration +bastion_instance_type = "t3.micro" +bastion_key_pair_name = "your-ec2-key-pair-name" # Must exist in AWS +bastion_allowed_cidr_blocks = [ + "203.0.113.0/24", # Example: Your office IP range + "198.51.100.0/24" # Example: Your home IP range +] + +# Tags +default_tags = { + Project = "Website" + CostCenter = "Website" + Owner = "peter.ovchyn" +} \ No newline at end of file diff --git a/deployment/variables.tf b/deployment/variables.tf new file mode 100644 index 00000000..accc1624 --- /dev/null +++ b/deployment/variables.tf @@ -0,0 +1,249 @@ +# Variables for SF Website Infrastructure + +# General Configuration +variable "aws_region" { + description = "AWS region for resources" + type = string + default = "us-east-1" +} + +variable "environment" { + description = "Environment name (e.g., development, staging, production, qa, demo, sandbox, etc.)" + type = string +} + +variable "default_tags" { + description = "Default tags to apply to all resources" + type = map(string) + default = { + Project = "Website" + CostCenter = "Website" + Owner = "peter.ovchyn" + } +} + +# Networking Configuration +variable "vpc_cidr" { + description = "CIDR block for VPC" + type = string + default = "10.0.0.0/16" + + validation { + condition = can(cidrhost(var.vpc_cidr, 0)) + error_message = "VPC CIDR must be a valid IPv4 CIDR block." + } +} + +# Domain Configuration +variable "domain_name" { + description = "Domain name for the application (e.g., sf-website-development.sandbox-prettyclear.com)" + type = string +} + +variable "media_domain_name" { + description = "Domain name for media CDN (e.g., sf-website-media-development.sandbox-prettyclear.com)" + type = string +} + +variable "certificate_arn" { + description = "ARN of the SSL certificate from ACM" + type = string +} + +# Database Configuration +variable "documentdb_master_username" { + description = "Master username for DocumentDB cluster" + type = string + sensitive = true +} + +variable "documentdb_master_password" { + description = "Master password for DocumentDB cluster" + type = string + sensitive = true + + validation { + condition = length(var.documentdb_master_password) >= 8 + error_message = "DocumentDB master password must be at least 8 characters long." + } +} + +# Application Secrets +variable "session_secret" { + description = "Secret key for session management" + type = string + sensitive = true + + validation { + condition = length(var.session_secret) >= 32 + error_message = "Session secret must be at least 32 characters long." + } +} + +variable "redis_auth_token" { + description = "Auth token for Redis cluster (alphanumeric and symbols excluding @, \", /)" + type = string + sensitive = true + + validation { + condition = length(var.redis_auth_token) >= 16 && length(var.redis_auth_token) <= 128 + error_message = "Redis auth token must be between 16 and 128 characters long." + } + + validation { + condition = can(regex("^[A-Za-z0-9!#$%&*()\\-_=\\+\\[\\]{}<>:;.,?~]+$", var.redis_auth_token)) + error_message = "Redis auth token can only contain alphanumeric characters and symbols (excluding @, \", and /)." + } +} + +variable "gcs_service_account_key" { + description = "Google Cloud Storage service account private key (optional)" + type = string + default = "" + sensitive = true +} + +# GitHub Actions Configuration +variable "github_repository" { + description = "GitHub repository in the format 'owner/repo' for OIDC integration" + type = string +} + +# Monitoring Configuration +variable "slack_webhook_url" { + description = "Slack webhook URL for CloudWatch alerts" + type = string + sensitive = true +} + +# Route 53 Configuration (optional) +variable "route53_zone_id" { + description = "Route 53 hosted zone ID for DNS records" + type = string + default = "" +} + +# Container Configuration +variable "container_image_tag" { + description = "Container image tag to deploy" + type = string + default = "latest" +} + +variable "apos_release_id" { + description = "Release ID for the application deployment" + type = string +} + +variable "container_cpu" { + description = "CPU units for the container (1024 = 1 vCPU)" + type = number + default = 1024 + + validation { + condition = contains([256, 512, 1024, 2048, 4096], var.container_cpu) + error_message = "Container CPU must be one of: 256, 512, 1024, 2048, 4096." + } +} + +variable "container_memory" { + description = "Memory in MB for the container" + type = number + default = 2048 + + validation { + condition = var.container_memory >= 512 && var.container_memory <= 30720 + error_message = "Container memory must be between 512 and 30720 MB." + } +} + +variable "container_port" { + description = "Port that the container exposes" + type = number + default = 3000 + + validation { + condition = var.container_port > 0 && var.container_port <= 65535 + error_message = "Container port must be between 1 and 65535." + } +} + +variable "health_check_path" { + description = "Path for ALB health checks" + type = string + default = "/health" + + validation { + condition = can(regex("^/", var.health_check_path)) + error_message = "Health check path must start with a forward slash." + } +} + +variable "log_retention_days" { + description = "Number of days to retain CloudWatch logs" + type = number + default = 7 + + validation { + condition = contains([1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653], var.log_retention_days) + error_message = "Log retention must be one of the valid CloudWatch retention periods." + } +} + +# Scaling Configuration +variable "ecs_desired_count" { + description = "Desired number of ECS tasks" + type = number + default = 1 + + validation { + condition = var.ecs_desired_count >= 1 && var.ecs_desired_count <= 10 + error_message = "ECS desired count must be between 1 and 10." + } +} + +variable "ecs_max_capacity" { + description = "Maximum number of ECS tasks for auto-scaling" + type = number + default = 3 + + validation { + condition = var.ecs_max_capacity >= var.ecs_desired_count + error_message = "ECS max capacity must be greater than or equal to desired count." + } +} + +# Environment-specific Instance Types +variable "documentdb_instance_class" { + description = "DocumentDB instance class" + type = string + default = "db.t3.medium" +} + +variable "redis_node_type" { + description = "ElastiCache Redis node type" + type = string + default = "cache.t3.micro" +} + +# Bastion Host Configuration +variable "bastion_instance_type" { + description = "EC2 instance type for bastion host" + type = string + default = "t3.micro" +} + +variable "bastion_key_pair_name" { + description = "Name of the EC2 key pair for SSH access to bastion host" + type = string +} + +variable "bastion_allowed_cidr_blocks" { + description = "List of CIDR blocks allowed to SSH to bastion host" + type = list(string) + + validation { + condition = length(var.bastion_allowed_cidr_blocks) > 0 + error_message = "At least one CIDR block must be specified for bastion access." + } +} \ No newline at end of file diff --git a/destroy_workflow_requirements.md b/destroy_workflow_requirements.md new file mode 100644 index 00000000..461908e8 --- /dev/null +++ b/destroy_workflow_requirements.md @@ -0,0 +1,105 @@ +# AWS Environment Destroy Workflow Requirements + +## Overview +Create a GitHub Actions workflow to destroy AWS environments created by the "Deploy to AWS" workflow. + +## Trigger Configuration +- **Manual trigger only**: `workflow_dispatch` +- **No automatic triggers**: Only humans can initiate the workflow +- **Environment selection**: Human selects environment when initiating workflow + +## Environment Support +- **Current scope**: Development environment only +- **Future scope**: Staging and Production environments are not ready yet +- **Environment source**: Match existing deploy workflow environment configuration + +## Safety Measures +### Approval Process +- **Method**: Use Manual Workflow Approval action (`trstringer/manual-approval@v1`) +- **Approver**: @killev (GitHub username) +- **Minimum approvals**: 1 approval required +- **Process**: Creates GitHub issue assigned to approver, requires "approve"/"deny" response + +### Confirmation Requirements +- **No manual confirmation typing**: User does not need to type "DESTROY" or environment name +- **Plan-only option**: Include "destroy plan only" option to preview what will be destroyed +- **Safety warnings**: Display warnings about what will be destroyed in approval issue + +## Workflow Structure +### Job 1: destroy-plan +- Run `terraform destroy -plan` +- Show what resources would be destroyed +- Store destroy plan file in S3 bucket +- Always runs when workflow is triggered + +### Job 2: manual-approval +- Use `trstringer/manual-approval@v1` action +- Create GitHub issue assigned to @killev +- Require 1 approval to proceed +- Only runs if `destroy-plan-only` is false + +### Job 3: destroy-apply +- Execute actual infrastructure destruction using stored plan +- Only runs after approval +- Only runs if `destroy-plan-only` is false + +## Technical Configuration +### AWS Configuration +- **Region**: us-east-1 +- **Credentials**: Same as deploy workflow (`TF_AWS_ACCESS_KEY_ID`, `TF_AWS_SECRET_ACCESS_KEY`) +- **S3 bucket**: sf-website-infrastructure (for storing plans) + +### Terraform Configuration +- **Version**: 1.12.0 +- **Backend**: Use same backend configuration as deploy workflow +- **Directory**: `deployment/` +- **Backend config**: `environments/backend-Development.hcl` + +### Workflow Inputs +```yaml +environment: + description: 'Environment to destroy' + required: true + type: choice + options: + - Development + default: Development + +destroy-plan-only: + description: 'Plan only (show what would be destroyed without destroying)' + required: false + type: boolean + default: false +``` + +## Infrastructure Scope +Based on existing Terraform configuration, the workflow will destroy: +- VPC and networking components +- ECS cluster and services +- DocumentDB cluster +- ElastiCache Redis cluster +- Application Load Balancer +- CloudFront distribution +- S3 buckets +- ECR repository +- IAM roles and policies +- CloudWatch resources +- Parameter Store secrets + +## Security Considerations +- Manual approval required before destruction +- Only authorized approver (@killev) can approve +- Plan-only option allows safe preview +- No automatic triggers prevent accidental execution +- Uses same secure credential management as deploy workflow +- Clear audit trail via GitHub issues +- Workflow will wait indefinitely for approval (subject to 35-day GitHub limit) + +## Setup Requirements +- **Permissions**: Workflow needs `issues: write` permission to create approval issues +- **Approver access**: @killev must have repository access to comment on issues +- **No additional environment setup required** + +## File Location +- **Path**: `.github/workflows/destroy_aws_environment.yml` +- **Repository**: Same repository as existing deploy workflow \ No newline at end of file diff --git a/docs/Infrastructure.md b/docs/Infrastructure.md index db89138c..6c2e71ac 100644 --- a/docs/Infrastructure.md +++ b/docs/Infrastructure.md @@ -4,7 +4,7 @@ * **AWS Region**: `us-east-1` * **Environments**: `dev`, `staging`, `prod` -* **Domain**: `sf-website-.prettyclear.com` +* **Domain**: `sf-website-.sandbox-prettyclear.com` * **Structure**: Modular Terraform setup for multi-environment support * **Resource Tags** (applied to all resources): @@ -158,7 +158,8 @@ * ECR (container image repository) * ALB (for ingress) * CloudWatch (for logs & metrics) - * MongoDB EC2 instance (database) + * DocumentDB Cluster (database) + * ElastiCache Redis (caching) * S3 Attachments Bucket * **Service Name**: `sf-website` * **Container Image**: Built from project Dockerfile and stored in ECR @@ -167,7 +168,8 @@ * **Auto-scaling**: Based on CPU usage (target: 70%) * **Environment Variables**: * `NODE_ENV=production` - * `APOS_MONGODB_URI=mongodb://:27017/apostrophe` + * `APOS_MONGODB_URI=mongodb://:27017/apostrophe` + * `REDIS_URI=redis://:6379` * `SESSION_SECRET=` * `APOS_S3_BUCKET=sf-website-s3-attachments-` * `APOS_S3_REGION=us-east-1` @@ -198,7 +200,7 @@ * ACM (for SSL certificates) * **Type**: HTTPS-only * **SSL**: Via AWS ACM - * **Domain**: `sf-website-.prettyclear.com` + * **Domain**: `sf-website-.sandbox-prettyclear.com` --- @@ -219,7 +221,7 @@ * ECS Cluster (via APOS_CDN_URL environment variable) * **Origin**: S3 bucket `sf-website-s3-attachments-` * **Access**: Origin access identity (OAI) to restrict direct S3 access - * **Custom domain**: `sf-website-media-.prettyclear.com` + * **Custom domain**: `sf-website-media-.sandbox-prettyclear.com` * **SSL Certificate**: Managed through AWS ACM * **Cache Behavior**: * Default TTL: 86400 seconds (1 day) @@ -247,51 +249,133 @@ * **Resource Integration**: * ECS Apostrophe Cluster * ALB + * DocumentDB Cluster + * ElastiCache Redis * Slack (for alerts) * **Features**: * ECS logs and detailed metrics * ALB metrics (e.g., 5xx, latency) + * DocumentDB cluster and instance metrics + * ElastiCache Redis performance metrics * CloudWatch alarms for key metrics * **Alerts**: Sent to Slack * **Log retention**: 90 days --- -### 📄 MongoDB on EC2 +### 🔴 Amazon ElastiCache (Redis) -* **MongoDB**: - * **Instance Name**: `sf-website-mongodb-` - * **Purpose**: Primary data store for ApostropheCMS +* **ElastiCache Redis Cluster**: + * **Cluster Name**: `sf-website-redis-` + * **Purpose**: Managed Redis service for session storage and application caching * **Resource Tags**: - * `Name: sf-website-mongodb-` + * `Name: sf-website-redis-` * `Project: Website` * `CostCenter: Website` * `Environment: ` * `Owner: peter.ovchyn` * **Resource Integration**: * ECS Apostrophe Cluster - * AWS Backup service * CloudWatch (for monitoring) - * Parameter Store (for credentials) - * **Instance Type**: t3.medium (2 vCPU, 4GB RAM) - * **Storage**: 100GB gp3 EBS volume with 3000 IOPS - * **AMI**: Amazon Linux 2 - * **Deployment**: Single EC2 instance in private subnet + * Cache Subnet Group (for networking) + * **Engine Version**: Redis 7.0 (latest stable) + * **Node Configuration**: + * **Node Type**: `cache.t3.micro` (1 vCPU, 0.5GB RAM) for dev/staging + * **Node Type**: `cache.t3.small` (2 vCPU, 1.5GB RAM) for production + * **Number of Nodes**: 1 (single node for simplicity) + * **Port**: 6379 (Redis standard) + * **Deployment**: + * Deployed in private subnets + * Cache Subnet Group spans both availability zones * **Security**: - * No public IP assigned - * Security group allows ingress only from ECS service security group on port 27017 - * SSH access via Session Manager (no direct SSH allowed) - * **Authentication**: Username/password authentication enabled - * Credentials stored in AWS Parameter Store + * VPC security group restricting access to ECS service only + * No public access + * Transit encryption enabled + * Auth token enabled for authentication + * **Authentication**: + * Auth token stored in AWS Parameter Store * Referenced in ECS task environment variables * **Backup Strategy**: - * Daily automated snapshots of EBS volume - * Retention period: 7 daily, 4 weekly - * Snapshot automation via AWS Backup service + * **Automatic Backups**: + * Daily snapshots enabled + * Retention period: 5 days + * Backup window: 02:00-03:00 UTC + * **Monitoring**: + * CloudWatch metrics for cluster performance + * CloudWatch alarms for: + * CPU utilization > 80% + * Memory usage > 80% + * Connection count thresholds + * Cache hit ratio < 80% + * **High Availability**: + * Automatic failover enabled + * Multi-AZ deployment for production environment + * Automatic minor version updates during maintenance window + * **Network Configuration**: + * **Cache Subnet Group**: `sf-website-redis-subnet-group-` + * **Security Group**: `sf-website-redis-sg-` + * **Endpoint**: Primary endpoint for read/write operations + +--- + +### 📄 Amazon DocumentDB + +* **DocumentDB Cluster**: + * **Cluster Name**: `sf-website-documentdb-` + * **Purpose**: Managed MongoDB-compatible database service for ApostropheCMS + * **Resource Tags**: + * `Name: sf-website-documentdb-` + * `Project: Website` + * `CostCenter: Website` + * `Environment: ` + * `Owner: peter.ovchyn` + * **Resource Integration**: + * ECS Apostrophe Cluster + * CloudWatch (for monitoring) + * Parameter Store (for credentials) + * DB Subnet Group (for networking) + * **Engine Version**: 4.0.0 (MongoDB compatible) + * **Cluster Configuration**: + * **Primary Instance**: `db.t3.medium` (2 vCPU, 4GB RAM) + * **Replica Instances**: 1 replica for high availability + * **Storage**: Encrypted with AWS managed keys + * **Port**: 27017 (MongoDB standard) + * **Deployment**: + * Multi-AZ deployment across private subnets + * DB Subnet Group spans both availability zones + * **Security**: + * VPC security group restricting access to ECS service only + * TLS encryption in transit required + * No public access + * Authentication required + * **Authentication**: + * Master username/password stored in AWS Parameter Store + * Referenced in ECS task environment variables via Parameter Store + * Database: `apostrophe` + * **Backup Strategy**: + * **Automated Backups**: + * Backup retention period: 7 days + * Backup window: 03:00-04:00 UTC + * Point-in-time recovery enabled + * **Manual Snapshots**: Available for major releases * **Monitoring**: - * CloudWatch agent for system metrics - * Custom MongoDB metrics published to CloudWatch - * Alerts for disk usage, connections, and query performance + * CloudWatch metrics for cluster and instance performance + * Enhanced monitoring enabled (60-second granularity) + * CloudWatch alarms for: + * CPU utilization > 80% + * Database connections > 80% of max + * Free storage < 20% + * Read/Write latency thresholds + * **Parameter Group**: + * Custom parameter group for performance optimization + * TLS enforcement enabled + * Audit logging enabled for security compliance * **High Availability**: - * Configured for future upgrade to a replica set - * Placeholder DNS record for future replica nodes + * Multi-AZ replica instance for automatic failover + * Cross-AZ backup replication + * Automatic minor version updates during maintenance window + * **Network Configuration**: + * **DB Subnet Group**: `sf-website-documentdb-subnet-group-` + * **Security Group**: `sf-website-documentdb-sg-` + * **Endpoint**: Cluster endpoint for write operations + * **Reader Endpoint**: Available for read-only operations diff --git a/docs/aws-architecture-diagram.md b/docs/aws-architecture-diagram.md new file mode 100644 index 00000000..d73e6545 --- /dev/null +++ b/docs/aws-architecture-diagram.md @@ -0,0 +1,115 @@ +# SF Website AWS Architecture + +## Principal AWS Architecture Diagram + +```mermaid +flowchart TB + %% External actors + Users[👥 Users] + GitHub[🐙 GitHub Actions CI/CD] + + %% Public facing components + ALB[⚖️ Application Load Balancer
sf-website-alb-env
HTTPS Only] + CF[🌐 CloudFront Distribution
sf-website-media-env
CDN for Media Assets] + + %% Compute layer + ECS[🚢 ECS Fargate Cluster
sf-website-ecs-cluster-env
Apostrophe CMS App] + ECR[🐳 ECR Repository
sf-website-ecr-env
Container Images] + + %% Storage layer + S3_Attachments[🪣 S3 Attachments Bucket
sf-website-s3-attachments-env
Media & Files] + S3_Logs[🪣 S3 Logs Bucket
sf-website-s3-logs-env
Centralized Logs] + MongoDB[📄 MongoDB on EC2
sf-website-mongodb-env
t3.medium + 100GB EBS] + + %% Security & Identity + IAM_Task[👤 ECS Task Role
sf-website-ecs-task-env
S3 Access Permissions] + IAM_Exec[👤 ECS Execution Role
sf-website-ecs-execution-env
ECR & Parameter Store] + ParamStore[🔐 Parameter Store
Session Secrets & DB Credentials] + + %% Monitoring & Backup + CloudWatch[📊 CloudWatch
sf-website-cloudwatch-env
Logs & Metrics] + AWSBackup[💾 AWS Backup
Daily EBS Snapshots
7 daily, 4 weekly retention] + + %% User flows + Users -->|HTTPS requests| ALB + Users -->|Media requests| CF + + %% CI/CD flow + GitHub -->|Build & Push| ECR + GitHub -->|Deploy| ECS + + %% Load balancer to application + ALB -->|Route traffic| ECS + + %% CloudFront to storage + CF -->|Origin requests| S3_Attachments + + %% ECS relationships + ECS -->|Pull images| ECR + ECS -->|Read/Write media| S3_Attachments + ECS -->|Database operations| MongoDB + ECS -->|Get secrets| ParamStore + ECS -->|Send logs| CloudWatch + + %% IAM relationships + IAM_Task -.->|Assume role| ECS + IAM_Exec -.->|Assume role| ECS + IAM_Task -.->|S3 permissions| S3_Attachments + IAM_Exec -.->|ECR permissions| ECR + IAM_Exec -.->|Parameter Store| ParamStore + + %% Logging flows + ALB -->|Access logs| S3_Logs + CF -->|Access logs| S3_Logs + S3_Attachments -->|Server logs| S3_Logs + + %% Monitoring + ECS -->|Metrics & logs| CloudWatch + ALB -->|Metrics| CloudWatch + MongoDB -->|System metrics| CloudWatch + + %% Backup + AWSBackup -->|Snapshot| MongoDB + + %% Styling + classDef public fill:#e1f5fe + classDef compute fill:#f3e5f5 + classDef storage fill:#e8f5e8 + classDef security fill:#fff3e0 + classDef monitoring fill:#fce4ec + + class ALB,CF public + class ECS,ECR compute + class S3_Attachments,S3_Logs,MongoDB storage + class IAM_Task,IAM_Exec,ParamStore security + class CloudWatch,AWSBackup monitoring +``` + +## Key Architecture Components + +### 🌐 Public Layer +- **Application Load Balancer**: HTTPS-only entry point for web traffic +- **CloudFront**: Global CDN for media asset delivery from S3 + +### 🚢 Compute Layer +- **ECS Fargate**: Serverless container hosting for Apostrophe CMS +- **ECR**: Private container registry for application images + +### 🪣 Storage Layer +- **S3 Attachments**: Media files and uploads from CMS +- **S3 Logs**: Centralized logging for all services +- **MongoDB on EC2**: Primary database with automated backups + +### 👤 Security Layer +- **IAM Roles**: Least-privilege access for ECS tasks +- **Parameter Store**: Secure storage for secrets and configuration + +### 📊 Operations Layer +- **CloudWatch**: Monitoring, metrics, and alerting +- **AWS Backup**: Automated daily snapshots with retention policies + +## Environment Isolation +All resources are tagged and named with environment suffix: +- `dev`, `staging`, `prod` +- Complete isolation between environments +- Consistent naming: `sf-website--` \ No newline at end of file diff --git a/docs/infrastructureQNA.md b/docs/infrastructureQNA.md new file mode 100644 index 00000000..b82dcff3 --- /dev/null +++ b/docs/infrastructureQNA.md @@ -0,0 +1,66 @@ +# Infrastructure Q&A for Terraform Implementation + +## Questions and Answers + +### Q1: Certificate ARNs +**Question**: What are the actual ARN values for your existing SSL certificates? +- Main app certificates: `sf-website-{env}.sandbox-prettyclear.com` +- Media certificates: `sf-website-media-{env}.sandbox-prettyclear.com` + +**Answer**: Wildcard certificate `*.sandbox-prettyclear.com` covers all subdomains +**ARN**: `arn:aws:acm:us-east-1:548271326349:certificate/7e11016f-f90e-4800-972d-622bf1a82948` + +--- + +### Q2: Route 53 Hosted Zone ID +**Question**: What's the hosted zone ID for `sandbox-prettyclear.com`? + +**Answer**: [Skipped for now - will address later] + +--- + +### Q3: Parameter Store Secrets +**Question**: Should I generate these automatically or do you have specific values? +- DocumentDB master username/password +- SESSION_SECRET +- Any other app secrets? + +**Answer**: +- **DocumentDB master username/password**: Store in tfvars files +- **SESSION_SECRET**: User will provide specific value in tfvars +- **Other secrets**: Based on docker-compose.yml: + - **REDIS_URI**: Will be auto-generated (ElastiCache endpoint) + - **BASE_URL**: Will be auto-generated from ALB domain + - **SERVICE_ACCOUNT_PRIVATE_KEY**: User will provide if using Google Cloud Storage + - **NODE_ENV**: Will be set to 'production' + +--- + +### Q4: Deployment Scope +**Question**: Should I create Terraform to deploy all three environments at once, or one environment at a time (which one first)? + +**Answer**: Terraform script should create 1 environment at a time. Environment should be specified via tfvars file. + +--- + +### Q5: Remote State +**Question**: Do you want S3 backend for Terraform state storage? + +**Answer**: Yes, use S3 bucket for Terraform state storage with DynamoDB for state locking. + +--- + +### Q6: CI/CD Integration +**Question**: Do you need IAM roles for GitHub Actions to deploy? + +**Answer**: Yes, include all 3: +- IAM role that GitHub Actions can assume +- Permissions for Terraform operations (creating/updating resources) +- ECR permissions for pushing Docker images + +--- + +### Q7: CloudWatch Alerts +**Question**: For notifications, do you have Slack webhook URLs, or should I create SNS topics instead? + +**Answer**: Slack webhook URLs - should be provided in tfvars file \ No newline at end of file diff --git a/scripts/build_assets.sh b/scripts/build_assets.sh new file mode 100755 index 00000000..ed698fb6 --- /dev/null +++ b/scripts/build_assets.sh @@ -0,0 +1,3 @@ +export APOS_RELEASE_ID=`cat /dev/urandom |env LC_CTYPE=C tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1` + +echo $APOS_RELEASE_ID > ./release-id \ No newline at end of file diff --git a/terraform-deploy.sh b/terraform-deploy.sh new file mode 100755 index 00000000..eb7c18ef --- /dev/null +++ b/terraform-deploy.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Terraform Deployment Script +# This script runs terraform plan and terraform apply for the SF Website infrastructure +# Run this script from the project root directory + +set -e # Exit on any error + +# Configuration from existing setup +AWS_PROFILE="tf-sf-website" +AWS_REGION="us-east-1" +TERRAFORM_DIR="terraform" + +echo "🚀 Starting Terraform deployment process..." + +# Change to terraform directory +echo "📁 Changing to terraform directory..." +cd $TERRAFORM_DIR + +# Step 1: Initialize Terraform (if needed) +echo "🔧 Initializing Terraform..." +AWS_PROFILE=$AWS_PROFILE terraform init + +# Step 2: Run Terraform Plan +echo "📋 Running Terraform Plan..." +AWS_PROFILE=$AWS_PROFILE terraform plan -out=tfplan + +# Step 3: Apply the changes automatically +echo "🚀 Applying Terraform changes..." +AWS_PROFILE=$AWS_PROFILE terraform apply tfplan + +echo "✅ Terraform deployment completed successfully!" + +# Clean up plan file +rm -f tfplan + +echo "🎉 All done!" \ No newline at end of file diff --git a/website/app.js b/website/app.js index 4caa05bf..a4d8adcc 100644 --- a/website/app.js +++ b/website/app.js @@ -2,6 +2,33 @@ const apostrophe = require('apostrophe'); require('dotenv').config({ path: '../.env' }); const { getEnv } = require('./utils/env'); +/* + * Construct MongoDB URI from environment variables + * This must happen before Apostrophe initialization + */ +function constructMongoDbUri() { + const mongoUsername = getEnv('DOCUMENTDB_USERNAME'); + const mongoPassword = getEnv('DOCUMENTDB_PASSWORD'); + const mongoHost = getEnv('DOCUMENTDB_HOST'); + const mongoPort = getEnv('DOCUMENTDB_PORT'); + const mongoDatabase = getEnv('DOCUMENTDB_DATABASE'); + + /* + * AWS DocumentDB requires TLS/SSL encryption and SCRAM-SHA-1 authentication + * Build connection string with proper DocumentDB parameters + */ + const mongoUri = `mongodb://${encodeURIComponent(mongoUsername)}:${encodeURIComponent(mongoPassword)}@${mongoHost}:${mongoPort}/${mongoDatabase}?tls=true&tlsCAFile=global-bundle.pem&retryWrites=false`; + + // Log success (using simple logging since Apostrophe isn't initialized yet) + process.stdout.write('✅ MongoDB URI constructed successfully\n'); + process.stdout.write(` Host: ${mongoHost}:${mongoPort}\n`); + process.stdout.write(` Database: ${mongoDatabase}\n`); + process.stdout.write(` Username: ${mongoUsername}\n`); + process.stdout.write(` TLS: enabled with CA file validation\n`); + + return mongoUri; +} + function createAposConfig() { return { shortName: 'apostrophe-site', @@ -9,6 +36,14 @@ function createAposConfig() { // Session configuration modules: { + // Database configuration with direct URI + '@apostrophecms/db': { + options: { + uri: constructMongoDbUri(), + // Additional MongoDB connection options for DocumentDB + }, + }, + // Core modules configuration '@apostrophecms/express': { options: { diff --git a/website/modules/@apostrophecms/uploadfs/index.js b/website/modules/@apostrophecms/uploadfs/index.js index 38898a73..56350c14 100644 --- a/website/modules/@apostrophecms/uploadfs/index.js +++ b/website/modules/@apostrophecms/uploadfs/index.js @@ -1,19 +1,36 @@ const { getEnv } = require('../../../utils/env'); +const s3aws = { + bucket: getEnv('APOS_S3_BUCKET'), + region: getEnv('APOS_S3_REGION'), + https: true, +}; + +/* + * Const s3localstack = { + * bucket: getEnv('APOS_S3_BUCKET'), + * region: getEnv('APOS_S3_REGION'), + * endpoint: getEnv('APOS_S3_ENDPOINT'), + * style: 'path', + * https: false, + * }; + */ + const res = { options: { uploadfs: { storage: 's3', + ...s3aws, // Get your credentials at aws.amazon.com - secret: getEnv('APOS_S3_SECRET'), - key: getEnv('APOS_S3_KEY'), - // Bucket name created on aws.amazon.com - bucket: getEnv('APOS_S3_BUCKET'), - // Region name for endpoint - region: getEnv('APOS_S3_REGION'), - endpoint: getEnv('APOS_S3_ENDPOINT'), - style: getEnv('APOS_S3_STYLE'), - https: getEnv('APOS_S3_HTTPS'), + // Secret: getEnv('APOS_S3_SECRET'), + // Key: getEnv('APOS_S3_KEY'), + // // Bucket name created on aws.amazon.com + // Bucket: getEnv('APOS_S3_BUCKET'), + // // Region name for endpoint + // Region: getEnv('APOS_S3_REGION'), + // Endpoint: getEnv('APOS_S3_ENDPOINT'), + // Style: getEnv('APOS_S3_STYLE'), + // Https: getEnv('APOS_S3_HTTPS'), cdn: { enabled: true, url: getEnv('APOS_CDN_URL'),