- Add comprehensive sequence diagrams documenting container startup, task initialization, and incremental crawling flow - Implement reverse-order crawling logic (from latest to oldest) to optimize performance by processing new data first - Add real-time Kafka message publishing after each batch filtering instead of waiting for task completion - Update progress tracking to store last_start_offset for accurate incremental crawling across sessions - Enhance crawler service with improved offset calculation and batch processing logic - Update configuration files to support new crawling parameters and Kafka integration - Add progress model enhancements to track crawling state and handle edge cases - Improve main application initialization to properly handle lifespan events and task auto-start This change enables efficient incremental data collection where new data is prioritized and published immediately, reducing latency and improving system responsiveness.
26 lines
639 B
Python
26 lines
639 B
Python
"""采集进度模型"""
|
|
from pydantic import BaseModel
|
|
from typing import Optional
|
|
|
|
|
|
class CrawlProgress(BaseModel):
|
|
"""采集进度"""
|
|
task_id: str
|
|
last_start_offset: Optional[int] = None # 上次采集的起始位置,作为下次的截止位置
|
|
total: int = 0
|
|
last_update: str = ""
|
|
status: str = "idle" # idle, running, completed, error
|
|
|
|
|
|
class CrawlStatus(BaseModel):
|
|
"""采集状态响应"""
|
|
task_id: str
|
|
total: int
|
|
last_start_offset: Optional[int] = None
|
|
progress: str
|
|
kafka_lag: int = 0
|
|
status: str
|
|
last_update: str
|
|
filtered_count: int = 0
|
|
produced_count: int = 0
|