diff --git a/config/base_config.py b/config/base_config.py index af0592b..1722ae1 100644 --- a/config/base_config.py +++ b/config/base_config.py @@ -38,7 +38,7 @@ SAVE_LOGIN_STATE = True # 是否启用CDP模式 - 使用用户现有的Chrome/Edge浏览器进行爬取,提供更好的反检测能力 # 启用后将自动检测并启动用户的Chrome/Edge浏览器,通过CDP协议进行控制 # 这种方式使用真实的浏览器环境,包括用户的扩展、Cookie和设置,大大降低被检测的风险 -ENABLE_CDP_MODE = False +ENABLE_CDP_MODE = True # CDP调试端口,用于与浏览器通信 # 如果端口被占用,系统会自动尝试下一个可用端口 diff --git a/config/bilibili_config.py b/config/bilibili_config.py index 7882824..779ab75 100644 --- a/config/bilibili_config.py +++ b/config/bilibili_config.py @@ -13,16 +13,23 @@ # 每天爬取视频/帖子的数量控制 MAX_NOTES_PER_DAY = 1 -# 指定B站视频ID列表 +# 指定B站视频URL列表 (支持完整URL或BV号) +# 示例: +# - 完整URL: "https://www.bilibili.com/video/BV1dwuKzmE26/?spm_id_from=333.1387.homepage.video_card.click" +# - BV号: "BV1d54y1g7db" BILI_SPECIFIED_ID_LIST = [ - "BV1d54y1g7db", + "https://www.bilibili.com/video/BV1dwuKzmE26/?spm_id_from=333.1387.homepage.video_card.click", "BV1Sz4y1U77N", "BV14Q4y1n7jz", # ........................ ] -# 指定B站用户ID列表 +# 指定B站创作者URL列表 (支持完整URL或UID) +# 示例: +# - 完整URL: "https://space.bilibili.com/434377496?spm_id_from=333.1007.0.0" +# - UID: "20813884" BILI_CREATOR_ID_LIST = [ + "https://space.bilibili.com/434377496?spm_id_from=333.1007.0.0", "20813884", # ........................ ] diff --git a/config/dy_config.py b/config/dy_config.py index b974dca..cd36065 100644 --- a/config/dy_config.py +++ b/config/dy_config.py @@ -11,15 +11,27 @@ # 抖音平台配置 PUBLISH_TIME_TYPE = 0 -# 指定DY视频ID列表 +# 指定DY视频URL列表 (支持多种格式) +# 支持格式: +# 1. 完整视频URL: "https://www.douyin.com/video/7525538910311632128" +# 2. 带modal_id的URL: "https://www.douyin.com/user/xxx?modal_id=7525538910311632128" +# 3. 搜索页带modal_id: "https://www.douyin.com/root/search/python?modal_id=7525538910311632128" +# 4. 短链接: "https://v.douyin.com/drIPtQ_WPWY/" +# 5. 纯视频ID: "7280854932641664319" DY_SPECIFIED_ID_LIST = [ - "7280854932641664319", - "7202432992642387233", + "https://www.douyin.com/video/7525538910311632128", + "https://v.douyin.com/drIPtQ_WPWY/", + "https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main&modal_id=7525538910311632128", + "7202432992642387233", # ........................ ] -# 指定DY用户ID列表 +# 指定DY创作者URL列表 (支持完整URL或sec_user_id) +# 支持格式: +# 1. 完整创作者主页URL: "https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main" +# 2. sec_user_id: "MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE" DY_CREATOR_ID_LIST = [ - "MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE", + "https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main", + "MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE" # ........................ ] diff --git a/config/ks_config.py b/config/ks_config.py index 962b457..d84d4a7 100644 --- a/config/ks_config.py +++ b/config/ks_config.py @@ -10,11 +10,22 @@ # 快手平台配置 -# 指定快手视频ID列表 -KS_SPECIFIED_ID_LIST = ["3xf8enb8dbj6uig", "3x6zz972bchmvqe"] +# 指定快手视频URL列表 (支持完整URL或纯ID) +# 支持格式: +# 1. 完整视频URL: "https://www.kuaishou.com/short-video/3x3zxz4mjrsc8ke?authorId=3x84qugg4ch9zhs&streamSource=search" +# 2. 纯视频ID: "3xf8enb8dbj6uig" +KS_SPECIFIED_ID_LIST = [ + "https://www.kuaishou.com/short-video/3x3zxz4mjrsc8ke?authorId=3x84qugg4ch9zhs&streamSource=search&area=searchxxnull&searchKey=python", + "3xf8enb8dbj6uig", + # ........................ +] -# 指定快手用户ID列表 +# 指定快手创作者URL列表 (支持完整URL或纯ID) +# 支持格式: +# 1. 创作者主页URL: "https://www.kuaishou.com/profile/3x84qugg4ch9zhs" +# 2. 纯user_id: "3x4sm73aye7jq7i" KS_CREATOR_ID_LIST = [ + "https://www.kuaishou.com/profile/3x84qugg4ch9zhs", "3x4sm73aye7jq7i", # ........................ ] diff --git a/config/xhs_config.py b/config/xhs_config.py index 485277a..9296905 100644 --- a/config/xhs_config.py +++ b/config/xhs_config.py @@ -21,8 +21,12 @@ XHS_SPECIFIED_NOTE_URL_LIST = [ # ........................ ] -# 指定用户ID列表 +# 指定创作者URL列表 (支持完整URL或纯ID) +# 支持格式: +# 1. 完整创作者主页URL (带xsec_token和xsec_source参数): "https://www.xiaohongshu.com/user/profile/5eb8e1d400000000010075ae?xsec_token=AB1nWBKCo1vE2HEkfoJUOi5B6BE5n7wVrbdpHoWIj5xHw=&xsec_source=pc_feed" +# 2. 纯user_id: "63e36c9a000000002703502b" XHS_CREATOR_ID_LIST = [ - "63e36c9a000000002703502b", + "https://www.xiaohongshu.com/user/profile/5eb8e1d400000000010075ae?xsec_token=AB1nWBKCo1vE2HEkfoJUOi5B6BE5n7wVrbdpHoWIj5xHw=&xsec_source=pc_feed", + "63e36c9a000000002703502b", # ........................ ] diff --git a/media_platform/bilibili/core.py b/media_platform/bilibili/core.py index 1c9c175..39af14a 100644 --- a/media_platform/bilibili/core.py +++ b/media_platform/bilibili/core.py @@ -41,6 +41,7 @@ from var import crawler_type_var, source_keyword_var from .client import BilibiliClient from .exception import DataFetchError from .field import SearchOrderType +from .help import parse_video_info_from_url, parse_creator_info_from_url from .login import BilibiliLogin @@ -103,8 +104,14 @@ class BilibiliCrawler(AbstractCrawler): await self.get_specified_videos(config.BILI_SPECIFIED_ID_LIST) elif config.CRAWLER_TYPE == "creator": if config.CREATOR_MODE: - for creator_id in config.BILI_CREATOR_ID_LIST: - await self.get_creator_videos(int(creator_id)) + for creator_url in config.BILI_CREATOR_ID_LIST: + try: + creator_info = parse_creator_info_from_url(creator_url) + utils.logger.info(f"[BilibiliCrawler.start] Parsed creator ID: {creator_info.creator_id} from {creator_url}") + await self.get_creator_videos(int(creator_info.creator_id)) + except ValueError as e: + utils.logger.error(f"[BilibiliCrawler.start] Failed to parse creator URL: {e}") + continue else: await self.get_all_creator_details(config.BILI_CREATOR_ID_LIST) else: @@ -362,11 +369,23 @@ class BilibiliCrawler(AbstractCrawler): utils.logger.info(f"[BilibiliCrawler.get_creator_videos] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {pn}") pn += 1 - async def get_specified_videos(self, bvids_list: List[str]): + async def get_specified_videos(self, video_url_list: List[str]): """ - get specified videos info + get specified videos info from URLs or BV IDs + :param video_url_list: List of video URLs or BV IDs :return: """ + utils.logger.info("[BilibiliCrawler.get_specified_videos] Parsing video URLs...") + bvids_list = [] + for video_url in video_url_list: + try: + video_info = parse_video_info_from_url(video_url) + bvids_list.append(video_info.video_id) + utils.logger.info(f"[BilibiliCrawler.get_specified_videos] Parsed video ID: {video_info.video_id} from {video_url}") + except ValueError as e: + utils.logger.error(f"[BilibiliCrawler.get_specified_videos] Failed to parse video URL: {e}") + continue + semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM) task_list = [self.get_video_info_task(aid=0, bvid=video_id, semaphore=semaphore) for video_id in bvids_list] video_details = await asyncio.gather(*task_list) @@ -568,18 +587,30 @@ class BilibiliCrawler(AbstractCrawler): extension_file_name = f"video.mp4" await bilibili_store.store_video(aid, content, extension_file_name) - async def get_all_creator_details(self, creator_id_list: List[int]): + async def get_all_creator_details(self, creator_url_list: List[str]): """ - creator_id_list: get details for creator from creator_id_list + creator_url_list: get details for creator from creator URL list """ - utils.logger.info(f"[BilibiliCrawler.get_creator_details] Crawling the detalis of creator") - utils.logger.info(f"[BilibiliCrawler.get_creator_details] creator ids:{creator_id_list}") + utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Crawling the details of creators") + utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Parsing creator URLs...") + + creator_id_list = [] + for creator_url in creator_url_list: + try: + creator_info = parse_creator_info_from_url(creator_url) + creator_id_list.append(int(creator_info.creator_id)) + utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Parsed creator ID: {creator_info.creator_id} from {creator_url}") + except ValueError as e: + utils.logger.error(f"[BilibiliCrawler.get_all_creator_details] Failed to parse creator URL: {e}") + continue + + utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] creator ids:{creator_id_list}") semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM) task_list: List[Task] = [] try: for creator_id in creator_id_list: - task = asyncio.create_task(self.get_creator_details(creator_id, semaphore), name=creator_id) + task = asyncio.create_task(self.get_creator_details(creator_id, semaphore), name=str(creator_id)) task_list.append(task) except Exception as e: utils.logger.warning(f"[BilibiliCrawler.get_all_creator_details] error in the task list. The creator will not be included. {e}") diff --git a/media_platform/bilibili/help.py b/media_platform/bilibili/help.py index b4e6221..614117a 100644 --- a/media_platform/bilibili/help.py +++ b/media_platform/bilibili/help.py @@ -9,15 +9,17 @@ # 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。 - # -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- # @Author : relakkes@gmail.com # @Time : 2023/12/2 23:26 # @Desc : bilibili 请求参数签名 # 逆向实现参考:https://socialsisteryi.github.io/bilibili-API-collect/docs/misc/sign/wbi.html#wbi%E7%AD%BE%E5%90%8D%E7%AE%97%E6%B3%95 +import re import urllib.parse from hashlib import md5 from typing import Dict +from model.m_bilibili import VideoUrlInfo, CreatorUrlInfo from tools import utils @@ -66,16 +68,71 @@ class BilibiliSign: return req_data +def parse_video_info_from_url(url: str) -> VideoUrlInfo: + """ + 从B站视频URL中解析出视频ID + Args: + url: B站视频链接 + - https://www.bilibili.com/video/BV1dwuKzmE26/?spm_id_from=333.1387.homepage.video_card.click + - https://www.bilibili.com/video/BV1d54y1g7db + - BV1d54y1g7db (直接传入BV号) + Returns: + VideoUrlInfo: 包含视频ID的对象 + """ + # 如果传入的已经是BV号,直接返回 + if url.startswith("BV"): + return VideoUrlInfo(video_id=url) + + # 使用正则表达式提取BV号 + # 匹配 /video/BV... 或 /video/av... 格式 + bv_pattern = r'/video/(BV[a-zA-Z0-9]+)' + match = re.search(bv_pattern, url) + + if match: + video_id = match.group(1) + return VideoUrlInfo(video_id=video_id) + + raise ValueError(f"无法从URL中解析出视频ID: {url}") + + +def parse_creator_info_from_url(url: str) -> CreatorUrlInfo: + """ + 从B站创作者空间URL中解析出创作者ID + Args: + url: B站创作者空间链接 + - https://space.bilibili.com/434377496?spm_id_from=333.1007.0.0 + - https://space.bilibili.com/20813884 + - 434377496 (直接传入UID) + Returns: + CreatorUrlInfo: 包含创作者ID的对象 + """ + # 如果传入的已经是纯数字ID,直接返回 + if url.isdigit(): + return CreatorUrlInfo(creator_id=url) + + # 使用正则表达式提取UID + # 匹配 /space.bilibili.com/数字 格式 + uid_pattern = r'space\.bilibili\.com/(\d+)' + match = re.search(uid_pattern, url) + + if match: + creator_id = match.group(1) + return CreatorUrlInfo(creator_id=creator_id) + + raise ValueError(f"无法从URL中解析出创作者ID: {url}") + + if __name__ == '__main__': - _img_key = "7cd084941338484aae1ad9425b84077c" - _sub_key = "4932caff0ff746eab6f01bf08b70ac45" - _search_url = "__refresh__=true&_extra=&ad_resource=5654&category_id=&context=&dynamic_offset=0&from_source=&from_spmid=333.337&gaia_vtoken=&highlight=1&keyword=python&order=click&page=1&page_size=20&platform=pc&qv_id=OQ8f2qtgYdBV1UoEnqXUNUl8LEDAdzsD&search_type=video&single_column=0&source_tag=3&web_location=1430654" - _req_data = dict() - for params in _search_url.split("&"): - kvalues = params.split("=") - key = kvalues[0] - value = kvalues[1] - _req_data[key] = value - print("pre req_data", _req_data) - _req_data = BilibiliSign(img_key=_img_key, sub_key=_sub_key).sign(req_data={"aid":170001}) - print(_req_data) + # 测试视频URL解析 + video_url1 = "https://www.bilibili.com/video/BV1dwuKzmE26/?spm_id_from=333.1387.homepage.video_card.click" + video_url2 = "BV1d54y1g7db" + print("视频URL解析测试:") + print(f"URL1: {video_url1} -> {parse_video_info_from_url(video_url1)}") + print(f"URL2: {video_url2} -> {parse_video_info_from_url(video_url2)}") + + # 测试创作者URL解析 + creator_url1 = "https://space.bilibili.com/434377496?spm_id_from=333.1007.0.0" + creator_url2 = "20813884" + print("\n创作者URL解析测试:") + print(f"URL1: {creator_url1} -> {parse_creator_info_from_url(creator_url1)}") + print(f"URL2: {creator_url2} -> {parse_creator_info_from_url(creator_url2)}") diff --git a/media_platform/douyin/client.py b/media_platform/douyin/client.py index 46a3e8f..5d980ec 100644 --- a/media_platform/douyin/client.py +++ b/media_platform/douyin/client.py @@ -324,3 +324,28 @@ class DouYinClient(AbstractApiClient): except httpx.HTTPError as exc: # some wrong when call httpx.request method, such as connection error, client error, server error or response status code is not 2xx utils.logger.error(f"[DouYinClient.get_aweme_media] {exc.__class__.__name__} for {exc.request.url} - {exc}") # 保留原始异常类型名称,以便开发者调试 return None + + async def resolve_short_url(self, short_url: str) -> str: + """ + 解析抖音短链接,获取重定向后的真实URL + Args: + short_url: 短链接,如 https://v.douyin.com/iF12345ABC/ + Returns: + 重定向后的完整URL + """ + async with httpx.AsyncClient(proxy=self.proxy, follow_redirects=False) as client: + try: + utils.logger.info(f"[DouYinClient.resolve_short_url] Resolving short URL: {short_url}") + response = await client.get(short_url, timeout=10) + + # 短链接通常返回302重定向 + if response.status_code in [301, 302, 303, 307, 308]: + redirect_url = response.headers.get("Location", "") + utils.logger.info(f"[DouYinClient.resolve_short_url] Resolved to: {redirect_url}") + return redirect_url + else: + utils.logger.warning(f"[DouYinClient.resolve_short_url] Unexpected status code: {response.status_code}") + return "" + except Exception as e: + utils.logger.error(f"[DouYinClient.resolve_short_url] Failed to resolve short URL: {e}") + return "" diff --git a/media_platform/douyin/core.py b/media_platform/douyin/core.py index 191e6ab..92aebb3 100644 --- a/media_platform/douyin/core.py +++ b/media_platform/douyin/core.py @@ -33,6 +33,7 @@ from var import crawler_type_var, source_keyword_var from .client import DouYinClient from .exception import DataFetchError from .field import PublishTimeType +from .help import parse_video_info_from_url, parse_creator_info_from_url from .login import DouYinLogin @@ -154,15 +155,39 @@ class DouYinCrawler(AbstractCrawler): await self.batch_get_note_comments(aweme_list) async def get_specified_awemes(self): - """Get the information and comments of the specified post""" + """Get the information and comments of the specified post from URLs or IDs""" + utils.logger.info("[DouYinCrawler.get_specified_awemes] Parsing video URLs...") + aweme_id_list = [] + for video_url in config.DY_SPECIFIED_ID_LIST: + try: + video_info = parse_video_info_from_url(video_url) + + # 处理短链接 + if video_info.url_type == "short": + utils.logger.info(f"[DouYinCrawler.get_specified_awemes] Resolving short link: {video_url}") + resolved_url = await self.dy_client.resolve_short_url(video_url) + if resolved_url: + # 从解析后的URL中提取视频ID + video_info = parse_video_info_from_url(resolved_url) + utils.logger.info(f"[DouYinCrawler.get_specified_awemes] Short link resolved to aweme ID: {video_info.aweme_id}") + else: + utils.logger.error(f"[DouYinCrawler.get_specified_awemes] Failed to resolve short link: {video_url}") + continue + + aweme_id_list.append(video_info.aweme_id) + utils.logger.info(f"[DouYinCrawler.get_specified_awemes] Parsed aweme ID: {video_info.aweme_id} from {video_url}") + except ValueError as e: + utils.logger.error(f"[DouYinCrawler.get_specified_awemes] Failed to parse video URL: {e}") + continue + semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM) - task_list = [self.get_aweme_detail(aweme_id=aweme_id, semaphore=semaphore) for aweme_id in config.DY_SPECIFIED_ID_LIST] + task_list = [self.get_aweme_detail(aweme_id=aweme_id, semaphore=semaphore) for aweme_id in aweme_id_list] aweme_details = await asyncio.gather(*task_list) for aweme_detail in aweme_details: if aweme_detail is not None: await douyin_store.update_douyin_aweme(aweme_item=aweme_detail) await self.get_aweme_media(aweme_item=aweme_detail) - await self.batch_get_note_comments(config.DY_SPECIFIED_ID_LIST) + await self.batch_get_note_comments(aweme_id_list) async def get_aweme_detail(self, aweme_id: str, semaphore: asyncio.Semaphore) -> Any: """Get note detail""" @@ -218,10 +243,20 @@ class DouYinCrawler(AbstractCrawler): async def get_creators_and_videos(self) -> None: """ - Get the information and videos of the specified creator + Get the information and videos of the specified creator from URLs or IDs """ utils.logger.info("[DouYinCrawler.get_creators_and_videos] Begin get douyin creators") - for user_id in config.DY_CREATOR_ID_LIST: + utils.logger.info("[DouYinCrawler.get_creators_and_videos] Parsing creator URLs...") + + for creator_url in config.DY_CREATOR_ID_LIST: + try: + creator_info_parsed = parse_creator_info_from_url(creator_url) + user_id = creator_info_parsed.sec_user_id + utils.logger.info(f"[DouYinCrawler.get_creators_and_videos] Parsed sec_user_id: {user_id} from {creator_url}") + except ValueError as e: + utils.logger.error(f"[DouYinCrawler.get_creators_and_videos] Failed to parse creator URL: {e}") + continue + creator_info: Dict = await self.dy_client.get_user_info(user_id) if creator_info: await douyin_store.save_creator(user_id, creator=creator_info) diff --git a/media_platform/douyin/help.py b/media_platform/douyin/help.py index 1ed3111..d4e245d 100644 --- a/media_platform/douyin/help.py +++ b/media_platform/douyin/help.py @@ -16,10 +16,15 @@ # @Desc : 获取 a_bogus 参数, 学习交流使用,请勿用作商业用途,侵权联系作者删除 import random +import re +from typing import Optional import execjs from playwright.async_api import Page +from model.m_douyin import VideoUrlInfo, CreatorUrlInfo +from tools.crawler_util import extract_url_params_to_dict + douyin_sign_obj = execjs.compile(open('libs/douyin.js', encoding='utf-8-sig').read()) def get_web_id(): @@ -83,3 +88,103 @@ async def get_a_bogus_from_playright(params: str, post_data: dict, user_agent: s return a_bogus + +def parse_video_info_from_url(url: str) -> VideoUrlInfo: + """ + 从抖音视频URL中解析出视频ID + 支持以下格式: + 1. 普通视频链接: https://www.douyin.com/video/7525082444551310602 + 2. 带modal_id参数的链接: + - https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?modal_id=7525082444551310602 + - https://www.douyin.com/root/search/python?modal_id=7471165520058862848 + 3. 短链接: https://v.douyin.com/iF12345ABC/ (需要client解析) + 4. 纯ID: 7525082444551310602 + + Args: + url: 抖音视频链接或ID + Returns: + VideoUrlInfo: 包含视频ID的对象 + """ + # 如果是纯数字ID,直接返回 + if url.isdigit(): + return VideoUrlInfo(aweme_id=url, url_type="normal") + + # 检查是否是短链接 (v.douyin.com) + if "v.douyin.com" in url or url.startswith("http") and len(url) < 50 and "video" not in url: + return VideoUrlInfo(aweme_id="", url_type="short") # 需要通过client解析 + + # 尝试从URL参数中提取modal_id + params = extract_url_params_to_dict(url) + modal_id = params.get("modal_id") + if modal_id: + return VideoUrlInfo(aweme_id=modal_id, url_type="modal") + + # 从标准视频URL中提取ID: /video/数字 + video_pattern = r'/video/(\d+)' + match = re.search(video_pattern, url) + if match: + aweme_id = match.group(1) + return VideoUrlInfo(aweme_id=aweme_id, url_type="normal") + + raise ValueError(f"无法从URL中解析出视频ID: {url}") + + +def parse_creator_info_from_url(url: str) -> CreatorUrlInfo: + """ + 从抖音创作者主页URL中解析出创作者ID (sec_user_id) + 支持以下格式: + 1. 创作者主页: https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main + 2. 纯ID: MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE + + Args: + url: 抖音创作者主页链接或sec_user_id + Returns: + CreatorUrlInfo: 包含创作者ID的对象 + """ + # 如果是纯ID格式(通常以MS4wLjABAAAA开头),直接返回 + if url.startswith("MS4wLjABAAAA") or (not url.startswith("http") and "douyin.com" not in url): + return CreatorUrlInfo(sec_user_id=url) + + # 从创作者主页URL中提取sec_user_id: /user/xxx + user_pattern = r'/user/([^/?]+)' + match = re.search(user_pattern, url) + if match: + sec_user_id = match.group(1) + return CreatorUrlInfo(sec_user_id=sec_user_id) + + raise ValueError(f"无法从URL中解析出创作者ID: {url}") + + +if __name__ == '__main__': + # 测试视频URL解析 + print("=== 视频URL解析测试 ===") + test_urls = [ + "https://www.douyin.com/video/7525082444551310602", + "https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main&modal_id=7525082444551310602", + "https://www.douyin.com/root/search/python?aid=b733a3b0-4662-4639-9a72-c2318fba9f3f&modal_id=7471165520058862848&type=general", + "7525082444551310602", + ] + for url in test_urls: + try: + result = parse_video_info_from_url(url) + print(f"✓ URL: {url[:80]}...") + print(f" 结果: {result}\n") + except Exception as e: + print(f"✗ URL: {url}") + print(f" 错误: {e}\n") + + # 测试创作者URL解析 + print("=== 创作者URL解析测试 ===") + test_creator_urls = [ + "https://www.douyin.com/user/MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE?from_tab_name=main", + "MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE", + ] + for url in test_creator_urls: + try: + result = parse_creator_info_from_url(url) + print(f"✓ URL: {url[:80]}...") + print(f" 结果: {result}\n") + except Exception as e: + print(f"✗ URL: {url}") + print(f" 错误: {e}\n") + diff --git a/media_platform/kuaishou/core.py b/media_platform/kuaishou/core.py index cdbe373..9e11a7f 100644 --- a/media_platform/kuaishou/core.py +++ b/media_platform/kuaishou/core.py @@ -26,6 +26,7 @@ from playwright.async_api import ( import config from base.base_crawler import AbstractCrawler +from model.m_kuaishou import VideoUrlInfo, CreatorUrlInfo from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool from store import kuaishou as kuaishou_store from tools import utils @@ -34,6 +35,7 @@ from var import comment_tasks_var, crawler_type_var, source_keyword_var from .client import KuaiShouClient from .exception import DataFetchError +from .help import parse_video_info_from_url, parse_creator_info_from_url from .login import KuaishouLogin @@ -168,16 +170,27 @@ class KuaishouCrawler(AbstractCrawler): async def get_specified_videos(self): """Get the information and comments of the specified post""" + utils.logger.info("[KuaishouCrawler.get_specified_videos] Parsing video URLs...") + video_ids = [] + for video_url in config.KS_SPECIFIED_ID_LIST: + try: + video_info = parse_video_info_from_url(video_url) + video_ids.append(video_info.video_id) + utils.logger.info(f"Parsed video ID: {video_info.video_id} from {video_url}") + except ValueError as e: + utils.logger.error(f"Failed to parse video URL: {e}") + continue + semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM) task_list = [ self.get_video_info_task(video_id=video_id, semaphore=semaphore) - for video_id in config.KS_SPECIFIED_ID_LIST + for video_id in video_ids ] video_details = await asyncio.gather(*task_list) for video_detail in video_details: if video_detail is not None: await kuaishou_store.update_kuaishou_video(video_detail) - await self.batch_get_video_comments(config.KS_SPECIFIED_ID_LIST) + await self.batch_get_video_comments(video_ids) async def get_video_info_task( self, video_id: str, semaphore: asyncio.Semaphore @@ -367,11 +380,20 @@ class KuaishouCrawler(AbstractCrawler): utils.logger.info( "[KuaiShouCrawler.get_creators_and_videos] Begin get kuaishou creators" ) - for user_id in config.KS_CREATOR_ID_LIST: - # get creator detail info from web html content - createor_info: Dict = await self.ks_client.get_creator_info(user_id=user_id) - if createor_info: - await kuaishou_store.save_creator(user_id, creator=createor_info) + for creator_url in config.KS_CREATOR_ID_LIST: + try: + # Parse creator URL to get user_id + creator_info: CreatorUrlInfo = parse_creator_info_from_url(creator_url) + utils.logger.info(f"[KuaiShouCrawler.get_creators_and_videos] Parse creator URL info: {creator_info}") + user_id = creator_info.user_id + + # get creator detail info from web html content + createor_info: Dict = await self.ks_client.get_creator_info(user_id=user_id) + if createor_info: + await kuaishou_store.save_creator(user_id, creator=createor_info) + except ValueError as e: + utils.logger.error(f"[KuaiShouCrawler.get_creators_and_videos] Failed to parse creator URL: {e}") + continue # Get all video information of the creator all_video_list = await self.ks_client.get_all_videos_by_creator( diff --git a/media_platform/kuaishou/help.py b/media_platform/kuaishou/help.py new file mode 100644 index 0000000..5015f2d --- /dev/null +++ b/media_platform/kuaishou/help.py @@ -0,0 +1,99 @@ +# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则: +# 1. 不得用于任何商业用途。 +# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。 +# 3. 不得进行大规模爬取或对平台造成运营干扰。 +# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。 +# 5. 不得用于任何非法或不当的用途。 +# +# 详细许可条款请参阅项目根目录下的LICENSE文件。 +# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。 + + +# -*- coding: utf-8 -*- + +import re +from model.m_kuaishou import VideoUrlInfo, CreatorUrlInfo + + +def parse_video_info_from_url(url: str) -> VideoUrlInfo: + """ + 从快手视频URL中解析出视频ID + 支持以下格式: + 1. 完整视频URL: "https://www.kuaishou.com/short-video/3x3zxz4mjrsc8ke?authorId=3x84qugg4ch9zhs&streamSource=search" + 2. 纯视频ID: "3x3zxz4mjrsc8ke" + + Args: + url: 快手视频链接或视频ID + Returns: + VideoUrlInfo: 包含视频ID的对象 + """ + # 如果不包含http且不包含kuaishou.com,认为是纯ID + if not url.startswith("http") and "kuaishou.com" not in url: + return VideoUrlInfo(video_id=url, url_type="normal") + + # 从标准视频URL中提取ID: /short-video/视频ID + video_pattern = r'/short-video/([a-zA-Z0-9_-]+)' + match = re.search(video_pattern, url) + if match: + video_id = match.group(1) + return VideoUrlInfo(video_id=video_id, url_type="normal") + + raise ValueError(f"无法从URL中解析出视频ID: {url}") + + +def parse_creator_info_from_url(url: str) -> CreatorUrlInfo: + """ + 从快手创作者主页URL中解析出创作者ID + 支持以下格式: + 1. 创作者主页: "https://www.kuaishou.com/profile/3x84qugg4ch9zhs" + 2. 纯ID: "3x4sm73aye7jq7i" + + Args: + url: 快手创作者主页链接或user_id + Returns: + CreatorUrlInfo: 包含创作者ID的对象 + """ + # 如果不包含http且不包含kuaishou.com,认为是纯ID + if not url.startswith("http") and "kuaishou.com" not in url: + return CreatorUrlInfo(user_id=url) + + # 从创作者主页URL中提取user_id: /profile/xxx + user_pattern = r'/profile/([a-zA-Z0-9_-]+)' + match = re.search(user_pattern, url) + if match: + user_id = match.group(1) + return CreatorUrlInfo(user_id=user_id) + + raise ValueError(f"无法从URL中解析出创作者ID: {url}") + + +if __name__ == '__main__': + # 测试视频URL解析 + print("=== 视频URL解析测试 ===") + test_video_urls = [ + "https://www.kuaishou.com/short-video/3x3zxz4mjrsc8ke?authorId=3x84qugg4ch9zhs&streamSource=search&area=searchxxnull&searchKey=python", + "3xf8enb8dbj6uig", + ] + for url in test_video_urls: + try: + result = parse_video_info_from_url(url) + print(f"✓ URL: {url[:80]}...") + print(f" 结果: {result}\n") + except Exception as e: + print(f"✗ URL: {url}") + print(f" 错误: {e}\n") + + # 测试创作者URL解析 + print("=== 创作者URL解析测试 ===") + test_creator_urls = [ + "https://www.kuaishou.com/profile/3x84qugg4ch9zhs", + "3x4sm73aye7jq7i", + ] + for url in test_creator_urls: + try: + result = parse_creator_info_from_url(url) + print(f"✓ URL: {url[:80]}...") + print(f" 结果: {result}\n") + except Exception as e: + print(f"✗ URL: {url}") + print(f" 错误: {e}\n") diff --git a/media_platform/xhs/client.py b/media_platform/xhs/client.py index 9df39c1..c538874 100644 --- a/media_platform/xhs/client.py +++ b/media_platform/xhs/client.py @@ -451,13 +451,26 @@ class XiaoHongShuClient(AbstractApiClient): result.extend(comments) return result - async def get_creator_info(self, user_id: str) -> Dict: + async def get_creator_info( + self, user_id: str, xsec_token: str = "", xsec_source: str = "" + ) -> Dict: """ 通过解析网页版的用户主页HTML,获取用户个人简要信息 PC端用户主页的网页存在window.__INITIAL_STATE__这个变量上的,解析它即可 - eg: https://www.xiaohongshu.com/user/profile/59d8cb33de5fb4696bf17217 + + Args: + user_id: 用户ID + xsec_token: 验证token (可选,如果URL中包含此参数则传入) + xsec_source: 渠道来源 (可选,如果URL中包含此参数则传入) + + Returns: + Dict: 创作者信息 """ + # 构建URI,如果有xsec参数则添加到URL中 uri = f"/user/profile/{user_id}" + if xsec_token and xsec_source: + uri = f"{uri}?xsec_token={xsec_token}&xsec_source={xsec_source}" + html_content = await self.request( "GET", self._domain + uri, return_response=True, headers=self.headers ) diff --git a/media_platform/xhs/core.py b/media_platform/xhs/core.py index f228392..536c1ca 100644 --- a/media_platform/xhs/core.py +++ b/media_platform/xhs/core.py @@ -26,7 +26,7 @@ from tenacity import RetryError import config from base.base_crawler import AbstractCrawler from config import CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES -from model.m_xiaohongshu import NoteUrlInfo +from model.m_xiaohongshu import NoteUrlInfo, CreatorUrlInfo from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool from store import xhs as xhs_store from tools import utils @@ -36,7 +36,7 @@ from var import crawler_type_var, source_keyword_var from .client import XiaoHongShuClient from .exception import DataFetchError from .field import SearchSortType -from .help import parse_note_info_from_note_url, get_search_id +from .help import parse_note_info_from_note_url, parse_creator_info_from_url, get_search_id from .login import XiaoHongShuLogin @@ -174,11 +174,24 @@ class XiaoHongShuCrawler(AbstractCrawler): async def get_creators_and_notes(self) -> None: """Get creator's notes and retrieve their comment information.""" utils.logger.info("[XiaoHongShuCrawler.get_creators_and_notes] Begin get xiaohongshu creators") - for user_id in config.XHS_CREATOR_ID_LIST: - # get creator detail info from web html content - createor_info: Dict = await self.xhs_client.get_creator_info(user_id=user_id) - if createor_info: - await xhs_store.save_creator(user_id, creator=createor_info) + for creator_url in config.XHS_CREATOR_ID_LIST: + try: + # Parse creator URL to get user_id and security tokens + creator_info: CreatorUrlInfo = parse_creator_info_from_url(creator_url) + utils.logger.info(f"[XiaoHongShuCrawler.get_creators_and_notes] Parse creator URL info: {creator_info}") + user_id = creator_info.user_id + + # get creator detail info from web html content + createor_info: Dict = await self.xhs_client.get_creator_info( + user_id=user_id, + xsec_token=creator_info.xsec_token, + xsec_source=creator_info.xsec_source + ) + if createor_info: + await xhs_store.save_creator(user_id, creator=createor_info) + except ValueError as e: + utils.logger.error(f"[XiaoHongShuCrawler.get_creators_and_notes] Failed to parse creator URL: {e}") + continue # Use fixed crawling interval crawl_interval = config.CRAWLER_MAX_SLEEP_SEC @@ -271,7 +284,7 @@ class XiaoHongShuCrawler(AbstractCrawler): try: note_detail = await self.xhs_client.get_note_by_id(note_id, xsec_source, xsec_token) - except RetryError as e: + except RetryError: pass if not note_detail: diff --git a/media_platform/xhs/help.py b/media_platform/xhs/help.py index 3d96811..2838b67 100644 --- a/media_platform/xhs/help.py +++ b/media_platform/xhs/help.py @@ -15,7 +15,7 @@ import random import time import urllib.parse -from model.m_xiaohongshu import NoteUrlInfo +from model.m_xiaohongshu import NoteUrlInfo, CreatorUrlInfo from tools.crawler_util import extract_url_params_to_dict @@ -306,6 +306,37 @@ def parse_note_info_from_note_url(url: str) -> NoteUrlInfo: return NoteUrlInfo(note_id=note_id, xsec_token=xsec_token, xsec_source=xsec_source) +def parse_creator_info_from_url(url: str) -> CreatorUrlInfo: + """ + 从小红书创作者主页URL中解析出创作者信息 + 支持以下格式: + 1. 完整URL: "https://www.xiaohongshu.com/user/profile/5eb8e1d400000000010075ae?xsec_token=AB1nWBKCo1vE2HEkfoJUOi5B6BE5n7wVrbdpHoWIj5xHw=&xsec_source=pc_feed" + 2. 纯ID: "5eb8e1d400000000010075ae" + + Args: + url: 创作者主页URL或user_id + Returns: + CreatorUrlInfo: 包含user_id, xsec_token, xsec_source的对象 + """ + # 如果是纯ID格式(24位十六进制字符),直接返回 + if len(url) == 24 and all(c in "0123456789abcdef" for c in url): + return CreatorUrlInfo(user_id=url, xsec_token="", xsec_source="") + + # 从URL中提取user_id: /user/profile/xxx + import re + user_pattern = r'/user/profile/([^/?]+)' + match = re.search(user_pattern, url) + if match: + user_id = match.group(1) + # 提取xsec_token和xsec_source参数 + params = extract_url_params_to_dict(url) + xsec_token = params.get("xsec_token", "") + xsec_source = params.get("xsec_source", "") + return CreatorUrlInfo(user_id=user_id, xsec_token=xsec_token, xsec_source=xsec_source) + + raise ValueError(f"无法从URL中解析出创作者信息: {url}") + + if __name__ == '__main__': _img_url = "https://sns-img-bd.xhscdn.com/7a3abfaf-90c1-a828-5de7-022c80b92aa3" # 获取一个图片地址在多个cdn下的url地址 @@ -313,4 +344,19 @@ if __name__ == '__main__': final_img_url = get_img_url_by_trace_id(get_trace_id(_img_url)) print(final_img_url) + # 测试创作者URL解析 + print("\n=== 创作者URL解析测试 ===") + test_creator_urls = [ + "https://www.xiaohongshu.com/user/profile/5eb8e1d400000000010075ae?xsec_token=AB1nWBKCo1vE2HEkfoJUOi5B6BE5n7wVrbdpHoWIj5xHw=&xsec_source=pc_feed", + "5eb8e1d400000000010075ae", + ] + for url in test_creator_urls: + try: + result = parse_creator_info_from_url(url) + print(f"✓ URL: {url[:80]}...") + print(f" 结果: {result}\n") + except Exception as e: + print(f"✗ URL: {url}") + print(f" 错误: {e}\n") + diff --git a/model/m_bilibili.py b/model/m_bilibili.py new file mode 100644 index 0000000..d095add --- /dev/null +++ b/model/m_bilibili.py @@ -0,0 +1,25 @@ +# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则: +# 1. 不得用于任何商业用途。 +# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。 +# 3. 不得进行大规模爬取或对平台造成运营干扰。 +# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。 +# 5. 不得用于任何非法或不当的用途。 +# +# 详细许可条款请参阅项目根目录下的LICENSE文件。 +# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。 + + +# -*- coding: utf-8 -*- + +from pydantic import BaseModel, Field + + +class VideoUrlInfo(BaseModel): + """B站视频URL信息""" + video_id: str = Field(title="video id (BV id)") + video_type: str = Field(default="video", title="video type") + + +class CreatorUrlInfo(BaseModel): + """B站创作者URL信息""" + creator_id: str = Field(title="creator id (UID)") diff --git a/model/m_douyin.py b/model/m_douyin.py index e907b1d..523399b 100644 --- a/model/m_douyin.py +++ b/model/m_douyin.py @@ -1,12 +1,25 @@ -# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则: -# 1. 不得用于任何商业用途。 -# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。 -# 3. 不得进行大规模爬取或对平台造成运营干扰。 -# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。 +# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则: +# 1. 不得用于任何商业用途。 +# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。 +# 3. 不得进行大规模爬取或对平台造成运营干扰。 +# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。 # 5. 不得用于任何非法或不当的用途。 -# -# 详细许可条款请参阅项目根目录下的LICENSE文件。 -# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。 +# +# 详细许可条款请参阅项目根目录下的LICENSE文件。 +# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。 # -*- coding: utf-8 -*- + +from pydantic import BaseModel, Field + + +class VideoUrlInfo(BaseModel): + """抖音视频URL信息""" + aweme_id: str = Field(title="aweme id (video id)") + url_type: str = Field(default="normal", title="url type: normal, short, modal") + + +class CreatorUrlInfo(BaseModel): + """抖音创作者URL信息""" + sec_user_id: str = Field(title="sec_user_id (creator id)") diff --git a/model/m_kuaishou.py b/model/m_kuaishou.py index e907b1d..b7c2080 100644 --- a/model/m_kuaishou.py +++ b/model/m_kuaishou.py @@ -1,12 +1,25 @@ -# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则: -# 1. 不得用于任何商业用途。 -# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。 -# 3. 不得进行大规模爬取或对平台造成运营干扰。 -# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。 +# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则: +# 1. 不得用于任何商业用途。 +# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。 +# 3. 不得进行大规模爬取或对平台造成运营干扰。 +# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。 # 5. 不得用于任何非法或不当的用途。 -# -# 详细许可条款请参阅项目根目录下的LICENSE文件。 -# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。 +# +# 详细许可条款请参阅项目根目录下的LICENSE文件。 +# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。 # -*- coding: utf-8 -*- + +from pydantic import BaseModel, Field + + +class VideoUrlInfo(BaseModel): + """快手视频URL信息""" + video_id: str = Field(title="video id (photo id)") + url_type: str = Field(default="normal", title="url type: normal") + + +class CreatorUrlInfo(BaseModel): + """快手创作者URL信息""" + user_id: str = Field(title="user id (creator id)") diff --git a/model/m_xiaohongshu.py b/model/m_xiaohongshu.py index 53294c6..abccb63 100644 --- a/model/m_xiaohongshu.py +++ b/model/m_xiaohongshu.py @@ -18,4 +18,11 @@ from pydantic import BaseModel, Field class NoteUrlInfo(BaseModel): note_id: str = Field(title="note id") xsec_token: str = Field(title="xsec token") - xsec_source: str = Field(title="xsec source") \ No newline at end of file + xsec_source: str = Field(title="xsec source") + + +class CreatorUrlInfo(BaseModel): + """小红书创作者URL信息""" + user_id: str = Field(title="user id (creator id)") + xsec_token: str = Field(default="", title="xsec token") + xsec_source: str = Field(default="", title="xsec source") \ No newline at end of file