Files
ocups-kafka/job_crawler/deploy.sh

154 lines
3.7 KiB
Bash
Raw Normal View History

#!/bin/bash
# 部署脚本
set -e
# 配置
IMAGE_NAME="job-crawler"
IMAGE_TAG="latest"
CONTAINER_NAME="job-crawler"
REGISTRY="" # 私有仓库地址,如: registry.example.com
# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# 显示帮助
show_help() {
echo "用法: ./deploy.sh [命令]"
echo ""
echo "命令:"
echo " build 构建镜像"
echo " push 推送镜像到仓库"
echo " up 启动所有服务 (docker-compose)"
echo " down 停止所有服务"
echo " restart 重启应用服务"
echo " logs 查看应用日志"
echo " logs-kafka 查看Kafka日志"
echo " status 查看服务状态"
echo " clean 清理无用镜像"
echo " reset 清理数据卷并重新启动(慎用)"
echo " help 显示帮助"
}
# 构建镜像
build() {
log_info "构建镜像: ${IMAGE_NAME}:${IMAGE_TAG}"
docker build --no-cache -t ${IMAGE_NAME}:${IMAGE_TAG} .
log_info "构建完成"
}
# 推送镜像
push() {
if [ -z "$REGISTRY" ]; then
log_error "请先配置 REGISTRY 变量"
exit 1
fi
FULL_IMAGE="${REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}"
log_info "推送镜像: ${FULL_IMAGE}"
docker tag ${IMAGE_NAME}:${IMAGE_TAG} ${FULL_IMAGE}
docker push ${FULL_IMAGE}
log_info "推送完成"
}
# 启动服务
up() {
log_info "启动服务..."
# 检查配置文件
if [ ! -f "config/config.yml" ]; then
log_warn "配置文件不存在,从模板复制"
cp config/config.yml.docker config/config.yml
log_warn "请编辑 config/config.yml 填入账号密码后重新运行"
exit 1
fi
docker-compose up -d
log_info "服务已启动"
log_info "API地址: http://localhost:8000"
log_info "API文档: http://localhost:8000/docs"
}
# 停止服务
down() {
log_info "停止服务..."
docker-compose down
log_info "服务已停止"
}
# 重启应用
restart() {
log_info "重启应用服务..."
docker-compose restart app
log_info "重启完成"
}
# 查看应用日志
logs() {
docker-compose logs -f --tail=100 app
}
# 查看Kafka日志
logs_kafka() {
docker-compose logs -f --tail=100 kafka
}
# 查看状态
status() {
echo "=== 容器状态 ==="
docker-compose ps
echo ""
echo "=== 采集状态 ==="
curl -s http://localhost:8000/status 2>/dev/null | python -m json.tool || echo "服务未启动"
}
# 清理镜像
clean() {
log_info "清理无用镜像..."
docker image prune -f
log_info "清理完成"
}
# 重置:清理数据卷并重新启动
reset() {
log_warn "即将清理所有数据卷Kafka数据、采集进度等是否继续[y/N]"
read -r confirm
if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then
log_info "已取消"
exit 0
fi
log_info "停止服务..."
docker-compose down -v
log_info "清理数据卷..."
docker volume rm job_crawler_kafka_data job_crawler_zookeeper_data job_crawler_app_data 2>/dev/null || true
2026-01-15 22:43:49 +08:00
docker build --no-cache -t job-crawler:latest .
log_info "重新启动..."
docker-compose up -d
log_info "重置完成"
}
# 主逻辑
case "${1:-help}" in
build) build ;;
push) push ;;
up) up ;;
down) down ;;
restart) restart ;;
logs) logs ;;
logs-kafka) logs_kafka ;;
status) status ;;
clean) clean ;;
reset) reset ;;
help) show_help ;;
*) show_help ;;
esac