Master Amazon Web Services and become a certified cloud architect. Learn to design, deploy, and manage scalable cloud infrastructure.
Build production-ready applications using EC2, S3, Lambda, RDS, and more AWS services with industry best practices.
Prerequisites: Basic Linux/Windows, networking fundamentals, programming basics (Python/Node.js recommended)
Master EC2 instances, auto-scaling, load balancing, and compute optimization. Launch and manage virtual servers in the cloud.
# Launch EC2 instance using AWS CLI
aws ec2 run-instances \
--image-id ami-0c55b159cbfafe1f0 \
--instance-type t2.micro \
--key-name MyKeyPair \
--security-group-ids sg-903004f8 \
--subnet-id subnet-6e7f829e \
--tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=WebServer}]'
# Create Auto Scaling Group
aws autoscaling create-auto-scaling-group \
--auto-scaling-group-name my-asg \
--launch-configuration-name my-launch-config \
--min-size 2 \
--max-size 10 \
--desired-capacity 4 \
--availability-zones us-east-1a us-east-1b
# Python Boto3 - AWS SDK
import boto3
# Create EC2 resource
ec2 = boto3.resource('ec2', region_name='us-east-1')
# Launch instance
instances = ec2.create_instances(
ImageId='ami-0c55b159cbfafe1f0',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro',
KeyName='MyKeyPair',
TagSpecifications=[{
'ResourceType': 'instance',
'Tags': [{'Key': 'Name', 'Value': 'WebServer'}]
}]
)
print(f"Instance created: {instances[0].id}")
# Wait for instance to be running
instances[0].wait_until_running()
instances[0].reload()
print(f"Public IP: {instances[0].public_ip_address}")
Store and distribute files globally with S3 and CloudFront. Implement versioning, lifecycle policies, and secure access control.
import boto3
from botocore.exceptions import ClientError
# S3 client
s3 = boto3.client('s3')
# Create bucket
bucket_name = 'my-app-bucket-2024'
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}
)
# Upload file
s3.upload_file(
'local_file.txt',
bucket_name,
'remote_file.txt',
ExtraArgs={'ACL': 'private', 'ContentType': 'text/plain'}
)
# Generate presigned URL (temporary access)
url = s3.generate_presigned_url(
'get_object',
Params={'Bucket': bucket_name, 'Key': 'remote_file.txt'},
ExpiresIn=3600 # 1 hour
)
print(f"Temporary URL: {url}")
# Enable versioning
s3.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={'Status': 'Enabled'}
)
# Set lifecycle policy
lifecycle_policy = {
'Rules': [{
'Id': 'MoveToGlacier',
'Status': 'Enabled',
'Transitions': [{
'Days': 30,
'StorageClass': 'GLACIER'
}],
'NoncurrentVersionExpiration': {'NoncurrentDays': 90}
}]
}
s3.put_bucket_lifecycle_configuration(
Bucket=bucket_name,
LifecycleConfiguration=lifecycle_policy
)
Build serverless applications with AWS Lambda. Create event-driven architectures without managing servers.
# Lambda function handler (Python)
import json
import boto3
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Users')
def lambda_handler(event, context):
"""
Process API Gateway requests
CRUD operations on DynamoDB
"""
http_method = event['httpMethod']
if http_method == 'GET':
# Get user by ID
user_id = event['pathParameters']['id']
response = table.get_item(Key={'userId': user_id})
if 'Item' in response:
return {
'statusCode': 200,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps(response['Item'])
}
else:
return {
'statusCode': 404,
'body': json.dumps({'error': 'User not found'})
}
elif http_method == 'POST':
# Create new user
body = json.loads(event['body'])
table.put_item(Item=body)
return {
'statusCode': 201,
'body': json.dumps({'message': 'User created', 'user': body})
}
elif http_method == 'DELETE':
# Delete user
user_id = event['pathParameters']['id']
table.delete_item(Key={'userId': user_id})
return {
'statusCode': 200,
'body': json.dumps({'message': 'User deleted'})
}
# Deploy Lambda with SAM template (template.yaml)
"""
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Resources:
UserFunction:
Type: AWS::Serverless::Function
Properties:
Handler: app.lambda_handler
Runtime: python3.9
MemorySize: 256
Timeout: 30
Environment:
Variables:
TABLE_NAME: Users
Events:
GetUser:
Type: Api
Properties:
Path: /users/{id}
Method: GET
CreateUser:
Type: Api
Properties:
Path: /users
Method: POST
"""
Manage relational databases with RDS, NoSQL with DynamoDB, and caching with ElastiCache for optimal performance.
import boto3
import pymysql
# Create RDS instance
rds = boto3.client('rds')
rds.create_db_instance(
DBInstanceIdentifier='mydb-instance',
DBInstanceClass='db.t3.micro',
Engine='mysql',
MasterUsername='admin',
MasterUserPassword='SecurePassword123!',
AllocatedStorage=20,
BackupRetentionPeriod=7,
MultiAZ=True,
PubliclyAccessible=False
)
# Connect to RDS MySQL
connection = pymysql.connect(
host='mydb-instance.abc123.us-east-1.rds.amazonaws.com',
user='admin',
password='SecurePassword123!',
database='myapp'
)
# DynamoDB operations
dynamodb = boto3.resource('dynamodb')
# Create table
table = dynamodb.create_table(
TableName='Products',
KeySchema=[
{'AttributeName': 'productId', 'KeyType': 'HASH'},
{'AttributeName': 'category', 'KeyType': 'RANGE'}
],
AttributeDefinitions=[
{'AttributeName': 'productId', 'AttributeType': 'S'},
{'AttributeName': 'category', 'AttributeType': 'S'}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
# Wait for table creation
table.meta.client.get_waiter('table_exists').wait(TableName='Products')
# Put item
products_table = dynamodb.Table('Products')
products_table.put_item(
Item={
'productId': 'PROD001',
'category': 'Electronics',
'name': 'Laptop',
'price': 999.99,
'stock': 50
}
)
# Query items
response = products_table.query(
KeyConditionExpression=boto3.dynamodb.conditions.Key('category').eq('Electronics')
)
print(f"Found {response['Count']} products")
Automate infrastructure deployment with CloudFormation and Terraform. Version control your cloud resources.
# CloudFormation template (YAML)
"""
AWSTemplateFormatVersion: '2010-09-09'
Description: 'VPC with public and private subnets'
Parameters:
EnvironmentName:
Type: String
Default: Production
Resources:
VPC:
Type: AWS::EC2::VPC
Properties:
CidrBlock: 10.0.0.0/16
EnableDnsHostnames: true
Tags:
- Key: Name
Value: !Ref EnvironmentName
InternetGateway:
Type: AWS::EC2::InternetGateway
AttachGateway:
Type: AWS::EC2::VPCGatewayAttachment
Properties:
VpcId: !Ref VPC
InternetGatewayId: !Ref InternetGateway
PublicSubnet:
Type: AWS::EC2::Subnet
Properties:
VpcId: !Ref VPC
CidrBlock: 10.0.1.0/24
AvailabilityZone: !Select [0, !GetAZs '']
MapPublicIpOnLaunch: true
WebServerInstance:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-0c55b159cbfafe1f0
InstanceType: t2.micro
SubnetId: !Ref PublicSubnet
SecurityGroupIds:
- !Ref WebServerSecurityGroup
WebServerSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Allow HTTP and SSH
VpcId: !Ref VPC
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 80
ToPort: 80
CidrIp: 0.0.0.0/0
- IpProtocol: tcp
FromPort: 22
ToPort: 22
CidrIp: 0.0.0.0/0
Outputs:
VPCId:
Value: !Ref VPC
Export:
Name: !Sub ${EnvironmentName}-VPC
"""
# Terraform configuration
"""
provider "aws" {
region = "us-east-1"
}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "MainVPC"
}
}
resource "aws_subnet" "public" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
tags = {
Name = "PublicSubnet"
}
}
resource "aws_instance" "web" {
ami = "ami-0c55b159cbfafe1f0"
instance_type = "t2.micro"
subnet_id = aws_subnet.public.id
tags = {
Name = "WebServer"
}
}
"""
Architecture: ALB + Auto Scaling + EC2 + RDS Multi-AZ + S3 + CloudFront
Features: High availability, auto-scaling, database replication, CDN distribution
Architecture: API Gateway + Lambda + DynamoDB + Cognito
Features: RESTful API, authentication, NoSQL database, pay-per-use pricing
Architecture: S3 + Lambda + Kinesis + Athena + QuickSight
Features: Real-time data ingestion, serverless processing, analytics, visualization
Test your AWS cloud knowledge with 20 random questions!