Files
MediaCrawler/config/base_config.py
程序员阿江(Relakkes) 5294b6d9b7 feat: 支持连接用户已有的 Chrome 浏览器进行爬取
新增 CDP_CONNECT_EXISTING 配置项,默认开启,通过 Chrome 远程调试功能
(chrome://inspect/#remote-debugging) 直接连接用户正在使用的浏览器,
复用真实的 Cookie、扩展和浏览历史,大幅降低平台风控检测风险。

主要变更:
- 新增 _connect_existing_browser 方法,通过 ws:// 直接连接已有浏览器
- 支持等待用户在浏览器端确认连接对话框(60秒超时)
- cleanup 时不关闭用户的浏览器进程
- 修复小红书在真实浏览器下 cookie 过多导致签名失败的问题
- 更新 README、CDP使用指南和常见问题文档
2026-04-15 10:54:29 +08:00

146 lines
6.1 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
# -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/config/base_config.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
# Basic configuration
PLATFORM = "xhs" # Platform, xhs | dy | ks | bili | wb | tieba | zhihu
# 是否使用海外版小红书 (rednote.com)
# 开启后 API 走 webapi.rednote.comcookie 域使用 .rednote.com
XHS_INTERNATIONAL = False
KEYWORDS = "编程副业,编程兼职" # Keyword search configuration, separated by English commas
LOGIN_TYPE = "qrcode" # qrcode or phone or cookie
COOKIES = ""
CRAWLER_TYPE = (
"search" # Crawling type, search (keyword search) | detail (post details) | creator (creator homepage data)
)
# Whether to enable IP proxy
ENABLE_IP_PROXY = False
# Number of proxy IP pools
IP_PROXY_POOL_COUNT = 2
# Proxy IP provider name
IP_PROXY_PROVIDER_NAME = "kuaidaili" # kuaidaili | wandouhttp
# Setting to True will not open the browser (headless browser)
# Setting False will open a browser
# If Xiaohongshu keeps scanning the code to log in but fails, open the browser and manually pass the sliding verification code.
# If Douyin keeps prompting failure, open the browser and see if mobile phone number verification appears after scanning the QR code to log in. If it does, manually go through it and try again.
HEADLESS = False
# Whether to save login status
SAVE_LOGIN_STATE = True
# ==================== CDP (Chrome DevTools Protocol) 配置 ====================
# 是否启用 CDP 模式 - 使用用户本地的 Chrome/Edge 浏览器进行爬取,具有更好的反检测能力
# 开启后,会自动检测并启动用户的 Chrome/Edge 浏览器,通过 CDP 协议进行控制
# 该方式使用真实浏览器环境包括用户的扩展、Cookie 和设置,大幅降低被风控检测的风险
ENABLE_CDP_MODE = True
# CDP 调试端口,用于与浏览器通信
# 如果端口被占用,系统会自动尝试下一个可用端口
CDP_DEBUG_PORT = 9222
# 自定义浏览器路径(可选)
# 如果为空,系统会自动检测 Chrome/Edge 的安装路径
# Windows 示例: "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe"
# macOS 示例: "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
CUSTOM_BROWSER_PATH = ""
# 是否在 CDP 模式下启用无头模式
# 注意:即使设置为 True某些反检测功能在无头模式下可能无法正常工作
CDP_HEADLESS = False
# 浏览器启动超时时间(秒)
BROWSER_LAUNCH_TIMEOUT = 60
# 是否连接用户已打开的浏览器,而不是启动新的浏览器
# 开启后,程序会连接一个已经启用了远程调试的浏览器
# 用户需要在 Chrome 中开启远程调试chrome://inspect/#remote-debugging
# 或者使用命令行参数启动 Chrome--remote-debugging-port=9222
# 这种方式反检测效果最好,因为直接使用用户真实浏览器的所有 Cookie、扩展和浏览历史
CDP_CONNECT_EXISTING = True
# 程序结束时是否自动关闭浏览器
# 设置为 False 可以保持浏览器运行,方便调试
AUTO_CLOSE_BROWSER = True
# Data saving type option configuration, supports: csv, db, json, jsonl, sqlite, excel, postgres. It is best to save to DB, with deduplication function.
SAVE_DATA_OPTION = "jsonl" # csv or db or json or jsonl or sqlite or excel or postgres
# Data saving path, if not specified by default, it will be saved to the data folder.
SAVE_DATA_PATH = ""
# Browser file configuration cached by the user's browser
USER_DATA_DIR = "%s_user_data_dir" # %s will be replaced by platform name
# The number of pages to start crawling starts from the first page by default
START_PAGE = 1
# Control the number of crawled videos/posts
CRAWLER_MAX_NOTES_COUNT = 15
# Controlling the number of concurrent crawlers
MAX_CONCURRENCY_NUM = 1
# Whether to enable crawling media mode (including image or video resources), crawling media is not enabled by default
ENABLE_GET_MEIDAS = False
# Whether to enable comment crawling mode. Comment crawling is enabled by default.
ENABLE_GET_COMMENTS = True
# Control the number of crawled first-level comments (single video/post)
CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES = 10
# Whether to enable the mode of crawling second-level comments. By default, crawling of second-level comments is not enabled.
# If the old version of the project uses db, you need to refer to schema/tables.sql line 287 to add table fields.
ENABLE_GET_SUB_COMMENTS = False
# word cloud related
# Whether to enable generating comment word clouds
ENABLE_GET_WORDCLOUD = False
# Custom words and their groups
# Add rule: xx:yy where xx is a custom-added phrase, and yy is the group name to which the phrase xx is assigned.
CUSTOM_WORDS = {
"零几": "年份", # Recognize "zero points" as a whole
"高频词": "专业术语", # Example custom words
}
# Deactivate (disabled) word file path
STOP_WORDS_FILE = "./docs/hit_stopwords.txt"
# Chinese font file path
FONT_PATH = "./docs/STZHONGS.TTF"
# Crawl interval
CRAWLER_MAX_SLEEP_SEC = 2
# 是否禁用 SSL 证书验证。仅在使用企业代理、Burp Suite、mitmproxy 等会注入自签名证书的中间人代理时设为 True。
# 警告:禁用 SSL 验证将使所有流量暴露于中间人攻击风险,请勿在生产环境中开启。
DISABLE_SSL_VERIFY = False
from .bilibili_config import *
from .xhs_config import *
from .dy_config import *
from .ks_config import *
from .weibo_config import *
from .tieba_config import *
from .zhihu_config import *