provider "aws" { profile = "" region = var.aws_region } // Create a security group with access to port 22 and port 80 open to serve HTTP traffic data "aws_vpc" "default" { default = true } resource "random_id" "server" { keepers = { # Generate a new id each time we switch to a new AMI id ami_id = "${var.ami_id}" } byte_length = 8 } resource "aws_security_group" "github_actions" { name = "${var.namespace}-${random_id.server.hex}" vpc_id = data.aws_vpc.default.id ingress { from_port = 22 to_port = 22 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] } ingress { from_port = 80 to_port = 80 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] } egress { from_port = 0 to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } tags = { Name = "${var.namespace}-SG" } } // instance setup resource "aws_instance" "testing_vm" { ami = var.ami_id associate_public_ip_address = true key_name = var.ami_key_pair_name # This is the key as known in the ec2 key_pairs instance_type = var.instance_type tags = var.instance_tags vpc_security_group_ids = [aws_security_group.github_actions.id] root_block_device { delete_on_termination = true } # SSH into instance - will ensure server is up before next step in workflows connection { # Host name host = self.public_ip # The default username for our AMI user = var.ami_username # Private key for connection private_key = "${file(var.private_key)}" # Type of connection type = "ssh" } provisioner "remote-exec" { command = "echo 'hello world'" } } // generate inventory file resource "local_file" "inventory" { filename = "./hosts.yml" directory_permission = "0755" file_permission = "0644" content = <