- Add technical documentation (技术方案.md) with system architecture and design details - Create FastAPI application structure with modular organization (api, core, models, services, utils) - Implement job data crawler service with incremental collection from third-party API - Add Kafka service integration with Docker Compose configuration for message queue - Create data models for job listings, progress tracking, and API responses - Implement REST API endpoints for data consumption (/consume, /status) and task management - Add progress persistence layer using SQLite for tracking collection offsets - Implement date filtering logic to extract data published within 7 days - Create API client service for third-party data source integration - Add configuration management with environment-based settings - Include Docker support with Dockerfile and docker-compose.yml for containerized deployment - Add logging configuration and utility functions for date parsing - Include requirements.txt with all Python dependencies and README documentation
25 lines
514 B
Python
25 lines
514 B
Python
"""采集进度模型"""
|
|
from pydantic import BaseModel
|
|
|
|
|
|
class CrawlProgress(BaseModel):
|
|
"""采集进度"""
|
|
task_id: str
|
|
current_offset: int = 0
|
|
total: int = 0
|
|
last_update: str = ""
|
|
status: str = "idle" # idle, running, completed, error
|
|
|
|
|
|
class CrawlStatus(BaseModel):
|
|
"""采集状态响应"""
|
|
task_id: str
|
|
total: int
|
|
current_offset: int
|
|
progress: str
|
|
kafka_lag: int = 0
|
|
status: str
|
|
last_update: str
|
|
filtered_count: int = 0
|
|
produced_count: int = 0
|