mirror of
https://github.com/NanmiCoder/MediaCrawler.git
synced 2026-03-06 06:00:45 +08:00
JSONL(JSON Lines)每行一个 JSON 对象,采用 append 模式写入, 无需读取已有数据,大数据量下性能远优于 JSON 格式。 - 新增 AsyncFileWriter.write_to_jsonl() 核心方法 - 7 个平台新增 JsonlStoreImplement 类并注册到工厂 - 配置默认值从 json 改为 jsonl,CLI/API 枚举同步更新 - db_session.py 守卫条件加入 jsonl,避免误触 ValueError - 词云生成支持读取 JSONL 文件,优先 jsonl 回退 json - 原有 json 选项完全保留,向后兼容 - 更新相关文档和测试
100 lines
2.8 KiB
Python
100 lines
2.8 KiB
Python
# -*- coding: utf-8 -*-
|
|
# Copyright (c) 2025 relakkes@gmail.com
|
|
#
|
|
# This file is part of MediaCrawler project.
|
|
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/api/schemas/crawler.py
|
|
# GitHub: https://github.com/NanmiCoder
|
|
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
|
|
#
|
|
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
|
# 1. 不得用于任何商业用途。
|
|
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
|
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
|
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
|
# 5. 不得用于任何非法或不当的用途。
|
|
#
|
|
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
|
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
|
|
|
from enum import Enum
|
|
from typing import Optional, Literal
|
|
from pydantic import BaseModel
|
|
|
|
|
|
class PlatformEnum(str, Enum):
|
|
"""Supported media platforms"""
|
|
XHS = "xhs"
|
|
DOUYIN = "dy"
|
|
KUAISHOU = "ks"
|
|
BILIBILI = "bili"
|
|
WEIBO = "wb"
|
|
TIEBA = "tieba"
|
|
ZHIHU = "zhihu"
|
|
|
|
|
|
class LoginTypeEnum(str, Enum):
|
|
"""Login method"""
|
|
QRCODE = "qrcode"
|
|
PHONE = "phone"
|
|
COOKIE = "cookie"
|
|
|
|
|
|
class CrawlerTypeEnum(str, Enum):
|
|
"""Crawler type"""
|
|
SEARCH = "search"
|
|
DETAIL = "detail"
|
|
CREATOR = "creator"
|
|
|
|
|
|
class SaveDataOptionEnum(str, Enum):
|
|
"""Data save option"""
|
|
CSV = "csv"
|
|
DB = "db"
|
|
JSON = "json"
|
|
JSONL = "jsonl"
|
|
SQLITE = "sqlite"
|
|
MONGODB = "mongodb"
|
|
EXCEL = "excel"
|
|
|
|
|
|
class CrawlerStartRequest(BaseModel):
|
|
"""Crawler start request"""
|
|
platform: PlatformEnum
|
|
login_type: LoginTypeEnum = LoginTypeEnum.QRCODE
|
|
crawler_type: CrawlerTypeEnum = CrawlerTypeEnum.SEARCH
|
|
keywords: str = "" # Keywords for search mode
|
|
specified_ids: str = "" # Post/video ID list for detail mode, comma-separated
|
|
creator_ids: str = "" # Creator ID list for creator mode, comma-separated
|
|
start_page: int = 1
|
|
enable_comments: bool = True
|
|
enable_sub_comments: bool = False
|
|
save_option: SaveDataOptionEnum = SaveDataOptionEnum.JSONL
|
|
cookies: str = ""
|
|
headless: bool = False
|
|
|
|
|
|
class CrawlerStatusResponse(BaseModel):
|
|
"""Crawler status response"""
|
|
status: Literal["idle", "running", "stopping", "error"]
|
|
platform: Optional[str] = None
|
|
crawler_type: Optional[str] = None
|
|
started_at: Optional[str] = None
|
|
error_message: Optional[str] = None
|
|
|
|
|
|
class LogEntry(BaseModel):
|
|
"""Log entry"""
|
|
id: int
|
|
timestamp: str
|
|
level: Literal["info", "warning", "error", "success", "debug"]
|
|
message: str
|
|
|
|
|
|
class DataFileInfo(BaseModel):
|
|
"""Data file information"""
|
|
name: str
|
|
path: str
|
|
size: int
|
|
modified_at: str
|
|
record_count: Optional[int] = None
|