Launching Web-Server on AWS-EFS using Terraform

Gyanesh Sharma
5 min readDec 20, 2020

Task to perform

  1. Create one VPC and subnets to connect your EFS to EC2.
  2. Create a Security group that allows the port 80 and also enable NFS port.
  3. Launch EC2 instance and in this EC2 instance use the existing key or provided key and security group which we have created in step 2.
  4. Launch one Volume using the EFS service and attach it in the same VPC we create in step 1, then mount that volume into /var/www/html.
  5. Developer has uploaded the code into GitHub repo having some images.
  6. Copy the GitHub repo code into /var/www/html.
  7. Create S3 bucket, and copy/deploy the images from GitHub repo into the s3 bucket and change the permission to public readable.
  8. Create a Cloudfront using s3 bucket(which contains images) and use the Cloudfront URL to update in code in /var/www/html.

Step 1: Create key-pair

provider "aws" {
region = "ap-south-1"
}
resource "tls_private_key" "task2_key" {
algorithm = "RSA"
}
module "key_pair" {
source = "terraform-aws-modules/key-pair/aws"
key_name = "task2_key"
public_key = tls_private_key.task2_key.public_key_openssh
}

Step 2: Create VPC , Subnet and Internet Gateway

resource "aws_vpc" "myvpc" {
cidr_block = "10.5.0.0/16"
enable_dns_hostnames = true
tags = {
Name = "task2"
}
}
resource "aws_subnet" "mysubnet" {
vpc_id = "${aws_vpc.myvpc.id}"
availability_zone = "ap-south-1a"
cidr_block = "10.5.1.0/24"
map_public_ip_on_launch = true
tags = {
Name = "task2-1a"
}
}
resource "aws_internet_gateway" "mygateway" {
vpc_id = "${aws_vpc.myvpc.id}"
tags = {
Name = "task2-1a"
}
}
resource "aws_route_table" "mytable" {
vpc_id = "${aws_vpc.myvpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.mygateway.id}"
}
tags = {
Name = "task2-1a"
}
}
resource "aws_route_table_association" "myassociation" {
subnet_id = aws_subnet.mysubnet.id
route_table_id = aws_route_table.mytable.id
}


resource "aws_subnet" "alpha-1a" {
vpc_id = "${aws_vpc.myvpc.id}"
availability_zone = "ap-south-1a"
cidr_block = "10.5.1.0/24"
map_public_ip_on_launch = true
tags = {
Name = "main-1a"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.myvpc.id}"
tags = {
Name = "main-1a"
}
}
resource "aws_route_table" "rtable" {
vpc_id = "${aws_vpc.myvpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
tags = {
Name = "main-1a"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.alpha-1a.id
route_table_id = aws_route_table.rtable.id
}

Step 3: Create a Security group that allows http, ssh and NFS connections

resource "aws_security_group" "allow_http" {
name = "allow_http"
description = "Allow HTTP inbound traffic"
vpc_id = "${aws_vpc.myvpc.id}"
ingress {
description = "Http from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = [ "0.0.0.0/0" ]
}
ingress {
description = "SSH from VPC"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [ "0.0.0.0/0" ]
}
ingress {
description = "NFS"
from_port = 2049
to_port = 2049
protocol = "tcp"
cidr_blocks = [ "0.0.0.0/0" ]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}


tags = {
Name = "task2securitygroup"
}
}

Step 4: Create EFS file system

resource "aws_efs_file_system" "mypd" {
creation_token = "my-secure-pd"
tags = {
Name = "MyPersonalFileSystem"
}
}
resource "aws_efs_file_system_policy" "policy" {
file_system_id = "${aws_efs_file_system.mypd.id}"
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "efs-policy-wizard-c45981c9-af16-441d-aa48-0fbd69ffaf79",
"Statement": [
{
"Sid": "efs-statement-20e4323c-ca0e-418d-8490-3c3880f60788",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Resource": "${aws_efs_file_system.mypd.arn}",
"Action": [
"elasticfilesystem:ClientMount",
"elasticfilesystem:ClientWrite",
"elasticfilesystem:ClientRootAccess"
],
"Condition": {
"Bool": {
"aws:SecureTransport": "true"
}
}
}
]
}
POLICY
}


resource "aws_efs_mount_target" "mytarget" {
file_system_id = "${aws_efs_file_system.mypd.id}"
subnet_id = "${aws_subnet.mysubnet.id}"
security_groups = [ "${aws_security_group.allow_http.id}" ]
}

Step 5: Launch EC2 instance and mount EFS file storage

resource "aws_instance" "task2web" {
ami = "ami-00b494a3f139ba61f"
instance_type = "t2.micro"
key_name = "task2_key"
availability_zone = "ap-south-1a"
subnet_id = "${aws_subnet.mysubnet.id}"
security_groups = [ "${aws_security_group.allow_http.id}" ]
tags = {
Name = "MyWebServer"
}
}
resource "null_resource" "myattach" {
depends_on = [
aws_efs_mount_target.mytarget,
]
connection {
type = "ssh"
user = "ec2-user"
private_key = tls_private_key.task2_key.private_key_pem
host = aws_instance.task2web.public_ip
}
provisioner "remote-exec" {
inline = [
"sleep 30",
"sudo yum install -y httpd git php amazon-efs-utils nfs-utils",
"sudo systemctl start httpd",
"sudo systemctl enable httpd",
"sudo chmod ugo+rw /etc/fstab",
"sudo echo '${aws_efs_file_system.mypd.id}:/ /var/www/html efs tls,_netdev' >> /etc/fstab",
"sudo mount -a -t efs,nfs4 defaults",
"sudo rm -rf /var/www/html/*",
"sudo git clone https://github.com/gyaneshsharma/cloud_task2.git /var/www/html/"
]
}
}

Step 6: Create S3 bucket, and copy the images in it

resource "aws_s3_bucket" "task2-bucket" {
bucket = "task2-bucket"
acl = "public-read"
force_destroy = true
cors_rule {
allowed_headers = ["*"]
allowed_methods = ["PUT", "POST"]
allowed_origins = ["https://task2-bucket"]
expose_headers = ["ETag"]
max_age_seconds = 3000
}
depends_on = [
null_resource.myattach,
]
}
resource "aws_s3_bucket_object" "task2obj" {
key = "test.png"
bucket = aws_s3_bucket.task2-bucket.id
source = "test.png"
acl="public-read"
}

Step 7: Create a Cloudfront

resource "aws_cloudfront_distribution" "distribution" {
origin {
domain_name = "${aws_s3_bucket.task2-bucket.bucket_regional_domain_name}"
origin_id = "S3-${aws_s3_bucket.task2-bucket.bucket}"
custom_origin_config {
http_port = 80
https_port = 443
origin_protocol_policy = "match-viewer"
origin_ssl_protocols = ["TLSv1", "TLSv1.1", "TLSv1.2"]
}
}
default_root_object = "test.png"
enabled = true
custom_error_response {
error_caching_min_ttl = 3000
error_code = 404
response_code = 200
response_page_path = "/test.png"
}
default_cache_behavior {
allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
cached_methods = ["GET", "HEAD"]
target_origin_id = "S3-${aws_s3_bucket.task2-bucket.bucket}"


#Not Forward all query strings, cookies and headers
forwarded_values {
query_string = false
cookies {
forward = "none"
}

}


viewer_protocol_policy = "redirect-to-https"
min_ttl = 0
default_ttl = 3600
max_ttl = 86400
}



restrictions {
geo_restriction {
# type of restriction, blacklist, whitelist or none
restriction_type = "none"
}
}



viewer_certificate {
cloudfront_default_certificate = true
}
}
resource "null_resource" "mypic" {
depends_on = [
null_resource.myattach,
aws_cloudfront_distribution.distribution,
]
connection {
type = "ssh"
user = "ec2-user"
private_key = tls_private_key.task2_key.private_key_pem
host = aws_instance.task2web.public_ip
}
provisioner "remote-exec" {
inline = [
"sudo chmod ugo+rw /var/www/html/index.php",
"sudo echo '<img src=http://${aws_cloudfront_distribution.distribution.domain_name}/test.png alt='ANSHUL' width='500' height='600'</a>' >> /var/www/html/index.php"
]
}
}


output "cloudfront_ip_addr" {
value = aws_cloudfront_distribution.distribution.domain_name
}

After performing above steps, run the following commands:

terraform init
terraform validate
terraform apply

After completion of task we can destroy the environment using command.

terraform destroy

--

--