- Add technical documentation (技术方案.md) with system architecture and design details - Create FastAPI application structure with modular organization (api, core, models, services, utils) - Implement job data crawler service with incremental collection from third-party API - Add Kafka service integration with Docker Compose configuration for message queue - Create data models for job listings, progress tracking, and API responses - Implement REST API endpoints for data consumption (/consume, /status) and task management - Add progress persistence layer using SQLite for tracking collection offsets - Implement date filtering logic to extract data published within 7 days - Create API client service for third-party data source integration - Add configuration management with environment-based settings - Include Docker support with Dockerfile and docker-compose.yml for containerized deployment - Add logging configuration and utility functions for date parsing - Include requirements.txt with all Python dependencies and README documentation
42 lines
843 B
YAML
42 lines
843 B
YAML
# 招聘数据采集服务配置文件
|
|
|
|
# 应用配置
|
|
app:
|
|
name: job-crawler
|
|
version: 1.0.0
|
|
debug: false
|
|
|
|
# 八爪鱼API配置
|
|
api:
|
|
base_url: https://openapi.bazhuayu.com
|
|
username: "13051331101"
|
|
password: "abc19910515"
|
|
batch_size: 100
|
|
# 多任务配置
|
|
tasks:
|
|
- id: "00f3b445-d8ec-44e8-88b2-4b971a228b1e"
|
|
name: "青岛招聘数据"
|
|
enabled: true
|
|
- id: "task-id-2"
|
|
name: "任务2"
|
|
enabled: false
|
|
- id: "task-id-3"
|
|
name: "任务3"
|
|
enabled: false
|
|
|
|
# Kafka配置
|
|
kafka:
|
|
bootstrap_servers: localhost:9092
|
|
topic: job_data
|
|
consumer_group: job_consumer_group
|
|
|
|
# 采集配置
|
|
crawler:
|
|
interval: 300 # 采集间隔(秒)
|
|
filter_days: 7 # 过滤天数
|
|
max_workers: 5 # 最大并行任务数
|
|
|
|
# 数据库配置
|
|
database:
|
|
path: data/crawl_progress.db
|