Vamos juntar tudo num projeto completo: uma API de "tarefas" (todo list) em Go, dockerizada, rodando em Kubernetes com ConfigMap e atualização sem downtime.
Estrutura do projeto
todo-app/
├── main.go
├── go.mod
├── Dockerfile
├── .dockerignore
└── k8s/
├── configmap.yaml
├── deployment.yaml
└── service.yaml
main.go
go
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"
)
type Task struct {
ID int `json:"id"`
Text string `json:"text"`
Done bool `json:"done"`
}
var (
tasks = []Task{}
nextID = 1
mu sync.Mutex
)
func listTasks(w http.ResponseWriter, r *http.Request) {
mu.Lock()
defer mu.Unlock()
json.NewEncoder(w).Encode(tasks)
}
func createTask(w http.ResponseWriter, r *http.Request) {
var t Task
if err := json.NewDecoder(r.Body).Decode(&t); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
mu.Lock()
t.ID = nextID
nextID++
tasks = append(tasks, t)
mu.Unlock()
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(t)
}
func tasksHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
listTasks(w, r)
case http.MethodPost:
createTask(w, r)
default:
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
}
}
func health(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "OK")
}
func info(w http.ResponseWriter, r *http.Request) {
hostname, _ := os.Hostname()
json.NewEncoder(w).Encode(map[string]string{
"hostname": hostname,
"version": os.Getenv("APP_VERSION"),
"env": os.Getenv("APP_ENV"),
})
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
mux := http.NewServeMux()
mux.HandleFunc("/tasks", tasksHandler)
mux.HandleFunc("/health", health)
mux.HandleFunc("/info", info)
server := &http.Server{
Addr: ":" + port,
Handler: mux,
}
// Graceful shutdown
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
<-sigs
log.Println("Encerrando graciosamente...")
server.Shutdown(nil)
}()
log.Printf("Servidor na porta %s", port)
if err := server.ListenAndServe(); err != http.ErrServerClosed {
log.Fatal(err)
}
time.Sleep(2 * time.Second)
}Dockerfile
dockerfile
FROM golang:1.26-alpine AS builder
WORKDIR /build
COPY go.mod ./
RUN go mod download || true
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -o server .
FROM alpine:3.19
RUN adduser -D -u 1000 appuser
USER appuser
WORKDIR /app
COPY --from=builder /build/server .
EXPOSE 8080
CMD ["./server"]k8s/configmap.yaml
yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: todo-config
data:
PORT: "8080"
APP_ENV: "development"
APP_VERSION: "1.0.0"k8s/deployment.yaml
yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: todo-app
labels:
app: todo-app
spec:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app: todo-app
template:
metadata:
labels:
app: todo-app
spec:
containers:
- name: app
image: todo-app:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
envFrom:
- configMapRef:
name: todo-config
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 2
periodSeconds: 5
resources:
requests:
memory: "32Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "200m"k8s/service.yaml
yaml
apiVersion: v1
kind: Service
metadata:
name: todo-service
spec:
selector:
app: todo-app
ports:
- port: 80
targetPort: 8080
type: ClusterIPRoteiro de execução
bash
# 1. Subir minikube
minikube start
# 2. Configurar para usar docker do minikube
eval $(minikube docker-env)
# 3. Build
docker build -t todo-app:v1 .
# 4. Aplicar tudo
kubectl apply -f k8s/
# 5. Acompanhar
kubectl get pods -w
# (Ctrl+C quando os 3 estiverem Running)
# 6. Port-forward para testar
kubectl port-forward service/todo-service 8080:80 &
# 7. Testar a API
curl http://localhost:8080/health
curl http://localhost:8080/info
curl -X POST http://localhost:8080/tasks \
-H "Content-Type: application/json" \
-d '{"text":"estudar k8s","done":false}'
curl http://localhost:8080/tasks
# 8. Simular falha — matar um pod e ver o k8s recriar
kubectl delete pod -l app=todo-app --force
kubectl get pods -w
# 9. Atualizar configuração (mudar APP_VERSION)
# Edite k8s/configmap.yaml mudando para "1.1.0"
kubectl apply -f k8s/configmap.yaml
kubectl rollout restart deployment todo-app
kubectl rollout status deployment todo-app
# 10. Limpar tudo
kubectl delete -f k8s/