diff --git a/job_crawler/deploy.sh b/job_crawler/deploy.sh index 7f0dbdb..8f8eb50 100644 --- a/job_crawler/deploy.sh +++ b/job_crawler/deploy.sh @@ -29,9 +29,11 @@ show_help() { echo " up 启动所有服务 (docker-compose)" echo " down 停止所有服务" echo " restart 重启应用服务" - echo " logs 查看日志" + echo " logs 查看应用日志" + echo " logs-kafka 查看Kafka日志" echo " status 查看服务状态" echo " clean 清理无用镜像" + echo " reset 清理数据卷并重新启动(慎用)" echo " help 显示帮助" } @@ -87,11 +89,16 @@ restart() { log_info "重启完成" } -# 查看日志 +# 查看应用日志 logs() { docker-compose logs -f --tail=100 app } +# 查看Kafka日志 +logs_kafka() { + docker-compose logs -f --tail=100 kafka +} + # 查看状态 status() { echo "=== 容器状态 ===" @@ -101,23 +108,46 @@ status() { curl -s http://localhost:8000/status 2>/dev/null | python -m json.tool || echo "服务未启动" } -# 清理 +# 清理镜像 clean() { log_info "清理无用镜像..." docker image prune -f log_info "清理完成" } +# 重置:清理数据卷并重新启动 +reset() { + log_warn "即将清理所有数据卷(Kafka数据、采集进度等),是否继续?[y/N]" + read -r confirm + if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then + log_info "已取消" + exit 0 + fi + + log_info "停止服务..." + docker-compose down -v + + log_info "清理数据卷..." + docker volume rm job_crawler_kafka_data job_crawler_zookeeper_data job_crawler_app_data 2>/dev/null || true + + log_info "重新启动..." + docker-compose up -d + + log_info "重置完成" +} + # 主逻辑 case "${1:-help}" in - build) build ;; - push) push ;; - up) up ;; - down) down ;; - restart) restart ;; - logs) logs ;; - status) status ;; - clean) clean ;; - help) show_help ;; - *) show_help ;; + build) build ;; + push) push ;; + up) up ;; + down) down ;; + restart) restart ;; + logs) logs ;; + logs-kafka) logs_kafka ;; + status) status ;; + clean) clean ;; + reset) reset ;; + help) show_help ;; + *) show_help ;; esac