Docker

Docker Basics

# Run a container
docker run -it ubuntu:20.04
docker run -d -p 8080:80 nginx

# Container management
docker ps # List running containers
docker ps -a # List all containers
docker stop <container_id>
docker start <container_id>
docker restart <container_id>
docker rm <container_id>
docker rm -f <container_id> # Force remove

# Image management
docker images # List images
docker rmi <image_id>
docker pull ubuntu:20.04
docker push myrepo/myimage:tag

# Inspect containers
docker logs <container_id>
docker exec -it <container_id> bash
docker inspect <container_id>

Dockerfile & Docker Compose

# Sample Dockerfile
FROM ubuntu:20.04

RUN apt-get update && apt-get install -y \
    nginx \
    curl

COPY index.html /var/www/html/
COPY nginx.conf /etc/nginx/

EXPOSE 80

CMD ["nginx", "-g", "daemon off;"]

# Build image from Dockerfile
docker build -t myapp:1.0 .
docker build -f Dockerfile.dev -t myapp:dev .

# Docker Compose (docker-compose.yml)
version: '3.8'
services:
  web:
    image: nginx:alpine
    ports:
      - "80:80"
  app:
    build: .
    ports:
      - "3000:3000"
  db:
    image: postgres:13
    environment:
      POSTGRES_DB: mydb
      POSTGRES_USER: user
      POSTGRES_PASSWORD: pass

# Docker Compose commands
docker-compose up
docker-compose up -d
docker-compose down
docker-compose build
docker-compose logs

Kubernetes

Kubectl Basics

# Cluster information
kubectl cluster-info
kubectl config current-context
kubectl get nodes

# Basic commands
kubectl get pods
kubectl get pods -A
kubectl get pods -o wide
kubectl get deployments
kubectl get services
kubectl get ingress
kubectl get all

# Describe resources
kubectl describe pod <pod_name>
kubectl describe node <node_name>
kubectl describe service <service_name>

# Create resources
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=LoadBalancer
kubectl apply -f deployment.yaml
kubectl delete -f deployment.yaml

# Debugging
kubectl logs <pod_name>
kubectl exec -it <pod_name> -- bash
kubectl port-forward <pod_name> 8080:80

YAML Configurations

# Deployment example
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.14.2
        ports:
        - containerPort: 80

# Service example
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  selector:
    app: nginx
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
  type: LoadBalancer

# ConfigMap example
apiVersion: v1
kind: ConfigMap
metadata:
  name: app-config
data:
  APP_COLOR: blue
  APP_MODE: prod

AWS Cloud

EC2 & S3

# EC2 instances
aws ec2 describe-instances
aws ec2 run-instances \
  --image-id ami-0abcdef1234567890 \
  --instance-type t2.micro \
  --key-name MyKeyPair

aws ec2 terminate-instances --instance-ids i-1234567890abcdef0

# S3 buckets
aws s3 ls
aws s3 mb s3://my-bucket
aws s3 cp file.txt s3://my-bucket/
aws s3 sync . s3://my-bucket/path/
aws s3 rm s3://my-bucket/file.txt
aws s3 rb s3://my-bucket

# IAM commands
aws iam list-users
aws iam create-user --user-name Bob
aws iam attach-user-policy \
  --user-name Bob \
  --policy-arn arn:aws:iam::aws:policy/AdministratorAccess

# Security groups
aws ec2 describe-security-groups
aws ec2 authorize-security-group-ingress \
  --group-id sg-903004f8 \
  --protocol tcp \
  --port 22 \
  --cidr 203.0.113.0/24

EKS & ECR

# EKS commands
aws eks list-clusters
aws eks describe-cluster --name my-cluster
aws eks update-kubeconfig --name my-cluster --region us-west-2

# ECR commands
aws ecr describe-repositories
aws ecr create-repository --repository-name my-repo

# Login to ECR
aws ecr get-login-password --region region | \
  docker login --username AWS --password-stdin \
  123456789012.dkr.ecr.region.amazonaws.com

# Push image to ECR
docker tag my-image:latest 123456789012.dkr.ecr.region.amazonaws.com/my-repo:latest
docker push 123456789012.dkr.ecr.region.amazonaws.com/my-repo:latest

# CloudWatch logs
aws logs describe-log-groups
aws logs filter-log-events \
  --log-group-name /aws/eks/my-cluster/cluster \
  --start-time 1500000000000

# RDS commands
aws rds describe-db-instances
aws rds create-db-instance \
  --db-instance-identifier mydbinstance \
  --db-instance-class db.t2.micro \
  --engine mysql

CI/CD Pipelines

Jenkins

# Jenkinsfile (Declarative Pipeline)
pipeline {
  agent any
  stages {
    stage('Build') {
      steps {
        sh 'mvn clean compile'
      }
    }
    stage('Test') {
      steps {
        sh 'mvn test'
      }
      post {
        always {
          junit '**/target/surefire-reports/TEST-*.xml'
        }
      }
    }
    stage('Deploy') {
      steps {
        sh 'kubectl apply -f k8s/'
      }
    }
  }
  post {
    always {
      emailext (
        subject: "Job '${env.JOB_NAME}' (${env.BUILD_NUMBER})",
        body: "Please check: ${env.BUILD_URL}",
        to: "devops@example.com"
      )
    }
  }
}

# Jenkins CLI
java -jar jenkins-cli.jar -s http://localhost:8080/ list-jobs
java -jar jenkins-cli.jar -s http://localhost:8080/ build MyJob

GitHub Actions

# .github/workflows/ci-cd.yml
name: CI/CD Pipeline

on:
  push:
    branches: [ main ]
  pull_request:
    branches: [ main ]

jobs:
  build:
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v2
    - name: Set up Node.js
      uses: actions/setup-node@v2
      with:
        node-version: '14'
    - name: Install dependencies
      run: npm ci
    - name: Run tests
      run: npm test
    - name: Build
      run: npm run build
    - name: Upload artifact
      uses: actions/upload-artifact@v2
      with:
        name: build-output
        path: build/

  deploy:
    needs: build
    runs-on: ubuntu-latest
    if: github.ref == 'refs/heads/main'
    steps:
    - uses: actions/checkout@v2
    - name: Download artifact
      uses: actions/download-artifact@v2
      with:
        name: build-output
    - name: Deploy to production
      run: ./deploy.sh
      env:
        AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
        AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

Infrastructure as Code

Terraform

# main.tf
terraform {
  required_providers {
    aws = {
      source = "hashicorp/aws"
      version = "~> 3.0"
    }
  }
}

provider "aws" {
  region = "us-west-2"
}

resource "aws_instance" "web" {
  ami = "ami-0c55b159cbfafe1f0"
  instance_type = "t2.micro"
  tags = {
    Name = "HelloWorld"
  }
}

# Terraform commands
terraform init
terraform plan
terraform apply
terraform apply -auto-approve
terraform destroy
terraform state list
terraform output
terraform fmt
terraform validate

# Workspaces
terraform workspace new dev
terraform workspace select dev
terraform workspace list

Ansible

# playbook.yml
- hosts: webservers
  become: yes
  vars:
    http_port: 80
    max_clients: 200
  tasks:
    - name: Ensure Apache is at the latest version
      yum:
        name: httpd
        state: latest
    - name: Write the Apache config file
      template:
        src: /srv/httpd.j2
        dest: /etc/httpd.conf
      notify:
        - Restart Apache
    - name: Ensure Apache is running
      service:
        name: httpd
        state: started
  handlers:
    - name: Restart Apache
      service:
        name: httpd
        state: restarted

# Ansible commands
ansible-playbook playbook.yml
ansible-playbook playbook.yml --limit webservers
ansible-playbook playbook.yml --check
ansible-playbook playbook.yml --tags configuration
ansible all -m ping
ansible webservers -a "uptime"
ansible webservers -m copy -a "src=/etc/hosts dest=/tmp/hosts"

Git & Linux

Git Commands

# Basic Git commands
git init
git clone https://github.com/user/repo.git
git status
git add .
git commit -m "Commit message"
git push
git pull

# Branching
git branch
git branch new-feature
git checkout new-feature
git checkout -b new-feature
git merge new-feature
git branch -d new-feature

# Remote repositories
git remote add origin https://github.com/user/repo.git
git remote -v
git fetch
git push -u origin main

# Undoing changes
git reset --hard HEAD
git revert <commit_id>
git checkout -- file.txt

# Stashing
git stash
git stash list
git stash apply
git stash drop

Linux Commands

# File operations
ls -la
cd /path/to/directory
pwd
mkdir new_directory
rm file.txt
rm -rf directory
cp file1.txt file2.txt
mv oldname.txt newname.txt
chmod 755 script.sh
chown user:group file.txt

# File viewing
cat file.txt
less file.txt
head -n 10 file.txt
tail -n 10 file.txt
tail -f /var/log/syslog

# Process management
ps aux
top
htop
kill 1234
kill -9 1234
pkill process_name

# System info
uname -a
df -h
free -h
uptime
whoami

# Networking
ifconfig
ping google.com
netstat -tulpn
ssh user@host
scp file.txt user@host:/path/

Monitoring & Logging

Prometheus & Grafana

# prometheus.yml
global:
  scrape_interval: 15s

scrape_configs:
  - job_name: 'prometheus'
    static_configs:
      - targets: ['localhost:9090']
  - job_name: 'node'
    static_configs:
      - targets: ['node-exporter:9100']

# Prometheus queries
up
node_memory_MemAvailable_bytes
rate(node_cpu_seconds_total{mode="idle"}[1m])
histogram_quantile(0.95, rate(
  http_request_duration_seconds_bucket[5m]
))

# Alert rules
groups:
- name: example
  rules:
  - alert: HighMemoryUsage
    expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "High memory usage on {{ $labels.instance }}"

# Grafana dashboard import
curl -X POST \
  -H "Content-Type: application/json" \
  -d @dashboard.json \
  http://admin:admin@grafana:3000/api/dashboards/db

ELK Stack

# Filebeat configuration
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/*.log

output.elasticsearch:
  hosts: ["elasticsearch:9200"]

# Logstash configuration
input {
  beats {
    port => 5044
  }
}

filter {
  grok {
    match => { "message" => "%{COMBINEDAPACHELOG}" }
  }
  date {
    match => [ "timestamp", "dd/MMM/yyyy:HH:mm:ss Z" ]
  }
}

output {
  elasticsearch {
    hosts => ["elasticsearch:9200"]
    index => "logs-%{+YYYY.MM.dd}"
  }
}

# Elasticsearch queries
GET /_search
{
  "query": {
    "match": {
      "message": "error"
    }
  }
}

GET /logs-*/_search
{
  "query": {
    "range": {
      "@timestamp": {
        "gte": "now-1h"
      }
    }
  }
}

# Kibana Discover queries
response:200
message: "error"
@timestamp > now-1h