Docker 实战项目
本章将通过几个完整的实战项目,帮助你将前面学到的 Docker 知识应用到实际场景中,涵盖 Web 应用、微服务、数据处理等不同类型的项目。
项目一:全栈 Web 应用
项目概述
构建一个包含前端、后端、数据库和缓存的完整 Web 应用:
- 前端:React 应用
- 后端:Node.js API 服务
- 数据库:PostgreSQL
- 缓存:Redis
- 反向代理:Nginx
项目结构
fullstack-app/
├── frontend/
│ ├── src/
│ ├── public/
│ ├── package.json
│ └── Dockerfile
├── backend/
│ ├── src/
│ ├── package.json
│ └── Dockerfile
├── nginx/
│ └── nginx.conf
├── docker-compose.yml
├── docker-compose.prod.yml
└── .env前端 Dockerfile
dockerfile
# frontend/Dockerfile
# 多阶段构建
FROM node:16-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY . .
RUN npm run build
# 生产阶段
FROM nginx:alpine
# 复制构建产物
COPY --from=builder /app/build /usr/share/nginx/html
# 复制 nginx 配置
COPY nginx.conf /etc/nginx/conf.d/default.conf
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]后端 Dockerfile
dockerfile
# backend/Dockerfile
FROM node:16-alpine
# 创建应用用户
RUN addgroup -g 1001 -S nodejs && \
adduser -S nextjs -u 1001
WORKDIR /app
# 复制依赖文件
COPY package*.json ./
# 安装依赖
RUN npm ci --only=production && npm cache clean --force
# 复制应用代码
COPY --chown=nextjs:nodejs . .
# 切换到非 root 用户
USER nextjs
EXPOSE 3000
# 健康检查
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD node healthcheck.js
CMD ["npm", "start"]Docker Compose 配置
yaml
# docker-compose.yml
version: '3.8'
services:
# Nginx 反向代理
nginx:
image: nginx:alpine
ports:
- "80:80"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
depends_on:
- frontend
- backend
networks:
- frontend
restart: unless-stopped
# React 前端
frontend:
build:
context: ./frontend
dockerfile: Dockerfile
networks:
- frontend
restart: unless-stopped
# Node.js 后端
backend:
build:
context: ./backend
dockerfile: Dockerfile
environment:
- NODE_ENV=production
- DATABASE_URL=postgresql://postgres:${DB_PASSWORD}@db:5432/${DB_NAME}
- REDIS_URL=redis://redis:6379
- JWT_SECRET=${JWT_SECRET}
depends_on:
- db
- redis
networks:
- frontend
- backend
restart: unless-stopped
# PostgreSQL 数据库
db:
image: postgres:13-alpine
environment:
- POSTGRES_DB=${DB_NAME}
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=${DB_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
- ./backend/init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- backend
restart: unless-stopped
# Redis 缓存
redis:
image: redis:alpine
command: redis-server --appendonly yes
volumes:
- redis_data:/data
networks:
- backend
restart: unless-stopped
volumes:
postgres_data:
redis_data:
networks:
frontend:
driver: bridge
backend:
driver: bridge
internal: trueNginx 配置
nginx
# nginx/nginx.conf
upstream frontend {
server frontend:80;
}
upstream backend {
server backend:3000;
}
server {
listen 80;
server_name localhost;
# 前端路由
location / {
proxy_pass http://frontend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# API 路由
location /api/ {
proxy_pass http://backend/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# 健康检查
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}环境变量配置
bash
# .env
DB_NAME=fullstack_app
DB_PASSWORD=secure_password_123
JWT_SECRET=your_jwt_secret_key_here
REDIS_PASSWORD=redis_password_123部署脚本
bash
#!/bin/bash
# deploy.sh
set -e
echo "🚀 部署全栈应用..."
# 拉取最新代码
git pull origin main
# 构建并启动服务
docker-compose build --no-cache
docker-compose up -d
# 等待服务启动
echo "⏳ 等待服务启动..."
sleep 30
# 健康检查
echo "🔍 检查服务状态..."
docker-compose ps
# 测试应用
echo "🧪 测试应用..."
curl -f http://localhost/health || exit 1
curl -f http://localhost/api/health || exit 1
echo "✅ 部署成功!"
echo "🌐 应用访问地址: http://localhost"项目二:微服务电商系统
项目概述
构建一个微服务架构的电商系统:
- 用户服务:用户注册、登录、个人信息管理
- 商品服务:商品信息管理
- 订单服务:订单处理
- 支付服务:支付处理
- API 网关:统一入口
- 服务发现:Consul
- 消息队列:RabbitMQ
- 监控:Prometheus + Grafana
项目结构
microservices-ecommerce/
├── services/
│ ├── user-service/
│ ├── product-service/
│ ├── order-service/
│ └── payment-service/
├── api-gateway/
├── monitoring/
│ ├── prometheus/
│ └── grafana/
├── docker-compose.yml
└── docker-compose.monitoring.yml微服务 Docker Compose
yaml
# docker-compose.yml
version: '3.8'
services:
# API 网关
api-gateway:
build: ./api-gateway
ports:
- "8080:8080"
environment:
- CONSUL_URL=http://consul:8500
depends_on:
- consul
networks:
- microservices
restart: unless-stopped
# 服务发现
consul:
image: consul:latest
ports:
- "8500:8500"
command: agent -server -bootstrap -ui -client=0.0.0.0
networks:
- microservices
restart: unless-stopped
# 用户服务
user-service:
build: ./services/user-service
environment:
- DATABASE_URL=postgresql://postgres:password@user-db:5432/users
- CONSUL_URL=http://consul:8500
- RABBITMQ_URL=amqp://rabbitmq:5672
depends_on:
- user-db
- consul
- rabbitmq
networks:
- microservices
deploy:
replicas: 2
restart: unless-stopped
user-db:
image: postgres:13-alpine
environment:
- POSTGRES_DB=users
- POSTGRES_PASSWORD=password
volumes:
- user_db_data:/var/lib/postgresql/data
networks:
- microservices
restart: unless-stopped
# 商品服务
product-service:
build: ./services/product-service
environment:
- DATABASE_URL=postgresql://postgres:password@product-db:5432/products
- CONSUL_URL=http://consul:8500
- REDIS_URL=redis://redis:6379
depends_on:
- product-db
- consul
- redis
networks:
- microservices
deploy:
replicas: 2
restart: unless-stopped
product-db:
image: postgres:13-alpine
environment:
- POSTGRES_DB=products
- POSTGRES_PASSWORD=password
volumes:
- product_db_data:/var/lib/postgresql/data
networks:
- microservices
restart: unless-stopped
# 订单服务
order-service:
build: ./services/order-service
environment:
- DATABASE_URL=postgresql://postgres:password@order-db:5432/orders
- CONSUL_URL=http://consul:8500
- RABBITMQ_URL=amqp://rabbitmq:5672
depends_on:
- order-db
- consul
- rabbitmq
networks:
- microservices
deploy:
replicas: 2
restart: unless-stopped
order-db:
image: postgres:13-alpine
environment:
- POSTGRES_DB=orders
- POSTGRES_PASSWORD=password
volumes:
- order_db_data:/var/lib/postgresql/data
networks:
- microservices
restart: unless-stopped
# 支付服务
payment-service:
build: ./services/payment-service
environment:
- DATABASE_URL=postgresql://postgres:password@payment-db:5432/payments
- CONSUL_URL=http://consul:8500
- RABBITMQ_URL=amqp://rabbitmq:5672
depends_on:
- payment-db
- consul
- rabbitmq
networks:
- microservices
restart: unless-stopped
payment-db:
image: postgres:13-alpine
environment:
- POSTGRES_DB=payments
- POSTGRES_PASSWORD=password
volumes:
- payment_db_data:/var/lib/postgresql/data
networks:
- microservices
restart: unless-stopped
# Redis 缓存
redis:
image: redis:alpine
networks:
- microservices
restart: unless-stopped
# RabbitMQ 消息队列
rabbitmq:
image: rabbitmq:3-management-alpine
ports:
- "15672:15672" # 管理界面
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=password
networks:
- microservices
restart: unless-stopped
volumes:
user_db_data:
product_db_data:
order_db_data:
payment_db_data:
networks:
microservices:
driver: bridge监控配置
yaml
# docker-compose.monitoring.yml
version: '3.8'
services:
# Prometheus 监控
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
networks:
- monitoring
restart: unless-stopped
# Grafana 可视化
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources
networks:
- monitoring
restart: unless-stopped
# Node Exporter
node-exporter:
image: prom/node-exporter:latest
ports:
- "9100:9100"
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
networks:
- monitoring
restart: unless-stopped
# cAdvisor 容器监控
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
ports:
- "8081:8080"
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
privileged: true
devices:
- /dev/kmsg
networks:
- monitoring
restart: unless-stopped
volumes:
prometheus_data:
grafana_data:
networks:
monitoring:
driver: bridge服务启动脚本
bash
#!/bin/bash
# start-microservices.sh
set -e
echo "🚀 启动微服务系统..."
# 启动基础设施服务
echo "📦 启动基础设施服务..."
docker-compose up -d consul rabbitmq redis
# 等待基础设施服务启动
echo "⏳ 等待基础设施服务启动..."
sleep 30
# 启动数据库服务
echo "🗄️ 启动数据库服务..."
docker-compose up -d user-db product-db order-db payment-db
# 等待数据库启动
echo "⏳ 等待数据库启动..."
sleep 20
# 启动微服务
echo "🔧 启动微服务..."
docker-compose up -d user-service product-service order-service payment-service
# 启动 API 网关
echo "🌐 启动 API 网关..."
docker-compose up -d api-gateway
# 启动监控服务
echo "📊 启动监控服务..."
docker-compose -f docker-compose.monitoring.yml up -d
echo "✅ 微服务系统启动完成!"
echo "🌐 API 网关: http://localhost:8080"
echo "📊 Grafana: http://localhost:3000 (admin/admin)"
echo "🔍 Prometheus: http://localhost:9090"
echo "🐰 RabbitMQ: http://localhost:15672 (admin/password)"
echo "🔍 Consul: http://localhost:8500"项目三:数据处理管道
项目概述
构建一个数据处理管道系统:
- 数据采集:Kafka
- 数据处理:Apache Spark
- 数据存储:Elasticsearch
- 数据可视化:Kibana
- 工作流管理:Apache Airflow
Docker Compose 配置
yaml
# docker-compose.yml
version: '3.8'
services:
# Zookeeper (Kafka 依赖)
zookeeper:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
networks:
- data-pipeline
restart: unless-stopped
# Kafka 消息队列
kafka:
image: confluentinc/cp-kafka:latest
depends_on:
- zookeeper
ports:
- "9092:9092"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
networks:
- data-pipeline
restart: unless-stopped
# Spark Master
spark-master:
image: bitnami/spark:latest
environment:
- SPARK_MODE=master
- SPARK_RPC_AUTHENTICATION_ENABLED=no
- SPARK_RPC_ENCRYPTION_ENABLED=no
- SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no
- SPARK_SSL_ENABLED=no
ports:
- "8080:8080"
- "7077:7077"
networks:
- data-pipeline
restart: unless-stopped
# Spark Worker
spark-worker:
image: bitnami/spark:latest
environment:
- SPARK_MODE=worker
- SPARK_MASTER_URL=spark://spark-master:7077
- SPARK_WORKER_MEMORY=2G
- SPARK_WORKER_CORES=2
- SPARK_RPC_AUTHENTICATION_ENABLED=no
- SPARK_RPC_ENCRYPTION_ENABLED=no
- SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no
- SPARK_SSL_ENABLED=no
depends_on:
- spark-master
networks:
- data-pipeline
deploy:
replicas: 2
restart: unless-stopped
# Elasticsearch
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.0
environment:
- discovery.type=single-node
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ports:
- "9200:9200"
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
networks:
- data-pipeline
restart: unless-stopped
# Kibana
kibana:
image: docker.elastic.co/kibana/kibana:7.15.0
ports:
- "5601:5601"
environment:
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
depends_on:
- elasticsearch
networks:
- data-pipeline
restart: unless-stopped
# PostgreSQL (Airflow 元数据)
postgres:
image: postgres:13-alpine
environment:
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
volumes:
- postgres_data:/var/lib/postgresql/data
networks:
- data-pipeline
restart: unless-stopped
# Redis (Airflow Celery)
redis:
image: redis:alpine
networks:
- data-pipeline
restart: unless-stopped
# Airflow Webserver
airflow-webserver:
image: apache/airflow:2.5.0
depends_on:
- postgres
- redis
environment:
- AIRFLOW__CORE__EXECUTOR=CeleryExecutor
- AIRFLOW__DATABASE__SQL_ALCHEMY_CONN=postgresql+psycopg2://airflow:airflow@postgres/airflow
- AIRFLOW__CELERY__RESULT_BACKEND=db+postgresql://airflow:airflow@postgres/airflow
- AIRFLOW__CELERY__BROKER_URL=redis://:@redis:6379/0
- AIRFLOW__CORE__FERNET_KEY=''
- AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION=true
- AIRFLOW__CORE__LOAD_EXAMPLES=false
- AIRFLOW__API__AUTH_BACKENDS=airflow.api.auth.backend.basic_auth
volumes:
- ./dags:/opt/airflow/dags
- ./logs:/opt/airflow/logs
- ./plugins:/opt/airflow/plugins
ports:
- "8081:8080"
command: webserver
networks:
- data-pipeline
restart: unless-stopped
# Airflow Scheduler
airflow-scheduler:
image: apache/airflow:2.5.0
depends_on:
- postgres
- redis
environment:
- AIRFLOW__CORE__EXECUTOR=CeleryExecutor
- AIRFLOW__DATABASE__SQL_ALCHEMY_CONN=postgresql+psycopg2://airflow:airflow@postgres/airflow
- AIRFLOW__CELERY__RESULT_BACKEND=db+postgresql://airflow:airflow@postgres/airflow
- AIRFLOW__CELERY__BROKER_URL=redis://:@redis:6379/0
- AIRFLOW__CORE__FERNET_KEY=''
- AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION=true
- AIRFLOW__CORE__LOAD_EXAMPLES=false
volumes:
- ./dags:/opt/airflow/dags
- ./logs:/opt/airflow/logs
- ./plugins:/opt/airflow/plugins
command: scheduler
networks:
- data-pipeline
restart: unless-stopped
# Airflow Worker
airflow-worker:
image: apache/airflow:2.5.0
depends_on:
- postgres
- redis
environment:
- AIRFLOW__CORE__EXECUTOR=CeleryExecutor
- AIRFLOW__DATABASE__SQL_ALCHEMY_CONN=postgresql+psycopg2://airflow:airflow@postgres/airflow
- AIRFLOW__CELERY__RESULT_BACKEND=db+postgresql://airflow:airflow@postgres/airflow
- AIRFLOW__CELERY__BROKER_URL=redis://:@redis:6379/0
- AIRFLOW__CORE__FERNET_KEY=''
volumes:
- ./dags:/opt/airflow/dags
- ./logs:/opt/airflow/logs
- ./plugins:/opt/airflow/plugins
command: celery worker
networks:
- data-pipeline
restart: unless-stopped
volumes:
elasticsearch_data:
postgres_data:
networks:
data-pipeline:
driver: bridge数据处理 DAG 示例
python
# dags/data_processing_dag.py
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
default_args = {
'owner': 'data-team',
'depends_on_past': False,
'start_date': datetime(2023, 1, 1),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'data_processing_pipeline',
default_args=default_args,
description='数据处理管道',
schedule_interval=timedelta(hours=1),
catchup=False
)
def extract_data():
"""数据提取"""
print("从 Kafka 提取数据...")
# 实际的数据提取逻辑
pass
def transform_data():
"""数据转换"""
print("使用 Spark 转换数据...")
# 实际的数据转换逻辑
pass
def load_data():
"""数据加载"""
print("将数据加载到 Elasticsearch...")
# 实际的数据加载逻辑
pass
# 定义任务
extract_task = PythonOperator(
task_id='extract_data',
python_callable=extract_data,
dag=dag
)
transform_task = BashOperator(
task_id='transform_data',
bash_command='spark-submit --master spark://spark-master:7077 /opt/airflow/dags/transform_job.py',
dag=dag
)
load_task = PythonOperator(
task_id='load_data',
python_callable=load_data,
dag=dag
)
# 定义任务依赖
extract_task >> transform_task >> load_task项目四:DevOps 工具链
项目概述
构建一个完整的 DevOps 工具链:
- 版本控制:GitLab
- CI/CD:Jenkins
- 制品仓库:Nexus
- 容器仓库:Harbor
- 监控:Prometheus + Grafana
- 日志:ELK Stack
- 安全扫描:SonarQube
Docker Compose 配置
yaml
# docker-compose.devops.yml
version: '3.8'
services:
# GitLab
gitlab:
image: gitlab/gitlab-ce:latest
hostname: gitlab.local
ports:
- "80:80"
- "443:443"
- "22:22"
volumes:
- gitlab_config:/etc/gitlab
- gitlab_logs:/var/log/gitlab
- gitlab_data:/var/opt/gitlab
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'http://gitlab.local'
gitlab_rails['gitlab_shell_ssh_port'] = 22
networks:
- devops
restart: unless-stopped
# Jenkins
jenkins:
image: jenkins/jenkins:lts
ports:
- "8080:8080"
- "50000:50000"
volumes:
- jenkins_home:/var/jenkins_home
- /var/run/docker.sock:/var/run/docker.sock
environment:
- JAVA_OPTS=-Djenkins.install.runSetupWizard=false
networks:
- devops
restart: unless-stopped
# Nexus Repository
nexus:
image: sonatype/nexus3:latest
ports:
- "8081:8081"
volumes:
- nexus_data:/nexus-data
networks:
- devops
restart: unless-stopped
# Harbor Registry
harbor-core:
image: goharbor/harbor-core:latest
depends_on:
- harbor-db
- redis
environment:
- CORE_SECRET=not-a-secure-secret
- JOBSERVICE_SECRET=not-a-secure-secret
volumes:
- harbor_config:/etc/core
networks:
- devops
restart: unless-stopped
harbor-db:
image: goharbor/harbor-db:latest
environment:
- POSTGRES_PASSWORD=root123
volumes:
- harbor_db:/var/lib/postgresql/data
networks:
- devops
restart: unless-stopped
# SonarQube
sonarqube:
image: sonarqube:community
ports:
- "9000:9000"
environment:
- SONAR_JDBC_URL=jdbc:postgresql://sonar-db:5432/sonar
- SONAR_JDBC_USERNAME=sonar
- SONAR_JDBC_PASSWORD=sonar
volumes:
- sonarqube_data:/opt/sonarqube/data
- sonarqube_extensions:/opt/sonarqube/extensions
- sonarqube_logs:/opt/sonarqube/logs
depends_on:
- sonar-db
networks:
- devops
restart: unless-stopped
sonar-db:
image: postgres:13-alpine
environment:
- POSTGRES_USER=sonar
- POSTGRES_PASSWORD=sonar
- POSTGRES_DB=sonar
volumes:
- sonar_db:/var/lib/postgresql/data
networks:
- devops
restart: unless-stopped
volumes:
gitlab_config:
gitlab_logs:
gitlab_data:
jenkins_home:
nexus_data:
harbor_config:
harbor_db:
sonarqube_data:
sonarqube_extensions:
sonarqube_logs:
sonar_db:
networks:
devops:
driver: bridgeJenkins Pipeline 示例
groovy
// Jenkinsfile
pipeline {
agent any
environment {
DOCKER_REGISTRY = 'harbor.local'
IMAGE_NAME = 'myapp'
IMAGE_TAG = "${BUILD_NUMBER}"
}
stages {
stage('Checkout') {
steps {
checkout scm
}
}
stage('Test') {
steps {
script {
docker.image('node:16').inside {
sh 'npm install'
sh 'npm test'
}
}
}
}
stage('SonarQube Analysis') {
steps {
script {
def scannerHome = tool 'SonarQubeScanner'
withSonarQubeEnv('SonarQube') {
sh "${scannerHome}/bin/sonar-scanner"
}
}
}
}
stage('Build Image') {
steps {
script {
def image = docker.build("${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}")
docker.withRegistry("https://${DOCKER_REGISTRY}", 'harbor-credentials') {
image.push()
image.push('latest')
}
}
}
}
stage('Security Scan') {
steps {
sh "docker run --rm -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy image ${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
}
}
stage('Deploy to Staging') {
steps {
script {
sh """
docker-compose -f docker-compose.staging.yml down
docker-compose -f docker-compose.staging.yml up -d
"""
}
}
}
stage('Integration Tests') {
steps {
sh 'npm run test:integration'
}
}
stage('Deploy to Production') {
when {
branch 'main'
}
steps {
input message: '部署到生产环境?', ok: '部署'
script {
sh """
docker-compose -f docker-compose.prod.yml down
docker-compose -f docker-compose.prod.yml up -d
"""
}
}
}
}
post {
always {
cleanWs()
}
success {
slackSend channel: '#deployments',
color: 'good',
message: "✅ 部署成功: ${env.JOB_NAME} - ${env.BUILD_NUMBER}"
}
failure {
slackSend channel: '#deployments',
color: 'danger',
message: "❌ 部署失败: ${env.JOB_NAME} - ${env.BUILD_NUMBER}"
}
}
}部署和运维脚本
自动化部署脚本
bash
#!/bin/bash
# auto-deploy.sh
set -e
PROJECT_NAME="$1"
ENVIRONMENT="$2"
if [ -z "$PROJECT_NAME" ] || [ -z "$ENVIRONMENT" ]; then
echo "用法: $0 <project-name> <environment>"
echo "示例: $0 fullstack-app production"
exit 1
fi
echo "🚀 开始部署 $PROJECT_NAME 到 $ENVIRONMENT 环境..."
# 检查 Docker 和 Docker Compose
command -v docker >/dev/null 2>&1 || { echo "❌ Docker 未安装"; exit 1; }
command -v docker-compose >/dev/null 2>&1 || { echo "❌ Docker Compose 未安装"; exit 1; }
# 设置环境变量
export ENVIRONMENT=$ENVIRONMENT
export PROJECT_NAME=$PROJECT_NAME
# 拉取最新代码
if [ -d ".git" ]; then
echo "📥 拉取最新代码..."
git pull origin main
fi
# 构建镜像
echo "🔨 构建镜像..."
docker-compose -f docker-compose.yml -f docker-compose.$ENVIRONMENT.yml build --no-cache
# 运行测试
if [ "$ENVIRONMENT" != "production" ]; then
echo "🧪 运行测试..."
docker-compose -f docker-compose.test.yml up --abort-on-container-exit
docker-compose -f docker-compose.test.yml down
fi
# 备份当前部署
if [ "$ENVIRONMENT" = "production" ]; then
echo "💾 备份当前部署..."
docker-compose -f docker-compose.yml -f docker-compose.$ENVIRONMENT.yml ps > backup/deployment-$(date +%Y%m%d-%H%M%S).txt
fi
# 部署新版本
echo "🚀 部署新版本..."
docker-compose -f docker-compose.yml -f docker-compose.$ENVIRONMENT.yml up -d
# 等待服务启动
echo "⏳ 等待服务启动..."
sleep 30
# 健康检查
echo "🔍 执行健康检查..."
for i in {1..10}; do
if curl -f http://localhost/health >/dev/null 2>&1; then
echo "✅ 健康检查通过"
break
fi
if [ $i -eq 10 ]; then
echo "❌ 健康检查失败"
exit 1
fi
sleep 10
done
# 清理旧镜像
echo "🧹 清理旧镜像..."
docker image prune -f
echo "✅ 部署完成!"
echo "🌐 应用访问地址: http://localhost"监控脚本
bash
#!/bin/bash
# monitor.sh
PROJECT_NAME="$1"
if [ -z "$PROJECT_NAME" ]; then
echo "用法: $0 <project-name>"
exit 1
fi
echo "📊 监控 $PROJECT_NAME 项目状态..."
while true; do
clear
echo "=== Docker 容器状态 ==="
docker-compose ps
echo -e "\n=== 系统资源使用 ==="
docker stats --no-stream
echo -e "\n=== 磁盘使用情况 ==="
docker system df
echo -e "\n=== 网络连接 ==="
docker network ls
echo -e "\n=== 数据卷使用 ==="
docker volume ls
echo -e "\n=== 最近日志 ==="
docker-compose logs --tail=10
echo -e "\n按 Ctrl+C 退出监控..."
sleep 30
done本章小结
本章通过四个完整的实战项目,展示了 Docker 在不同场景下的应用:
项目总结:
- 全栈 Web 应用:展示了前后端分离架构的容器化
- 微服务系统:演示了复杂微服务架构的编排和管理
- 数据处理管道:展示了大数据处理工具的容器化部署
- DevOps 工具链:演示了完整的 CI/CD 流水线搭建
关键技能:
- 多服务容器编排
- 网络和数据管理
- 安全配置和监控
- 自动化部署和运维
- 故障排查和性能优化
最佳实践:
- 使用多阶段构建优化镜像
- 实施健康检查和监控
- 自动化部署和测试
- 安全配置和权限管理
- 文档化和标准化流程
这些实战项目为你提供了丰富的 Docker 应用经验,可以作为实际项目的参考和模板。