mirror of
https://github.com/NanmiCoder/MediaCrawler.git
synced 2026-02-27 10:40:45 +08:00
Merge pull request #652 from gaoxiaobei/dev
feat(bilibili): Add flexible search modes and fix limit logic
This commit is contained in:
@@ -10,4 +10,4 @@
|
||||
|
||||
|
||||
from .base_config import *
|
||||
from .db_config import *
|
||||
from .db_config import *
|
||||
@@ -10,28 +10,16 @@
|
||||
|
||||
|
||||
# 基础配置
|
||||
PLATFORM = "xhs"
|
||||
PLATFORM = "xhs" # 平台,xhs | dy | ks | bili | wb | tieba | zhihu
|
||||
KEYWORDS = "编程副业,编程兼职" # 关键词搜索配置,以英文逗号分隔
|
||||
LOGIN_TYPE = "qrcode" # qrcode or phone or cookie
|
||||
COOKIES = ""
|
||||
# 具体值参见media_platform.xxx.field下的枚举值,暂时只支持小红书
|
||||
SORT_TYPE = "popularity_descending"
|
||||
# 具体值参见media_platform.xxx.field下的枚举值,暂时只支持抖音
|
||||
PUBLISH_TIME_TYPE = 0
|
||||
CRAWLER_TYPE = (
|
||||
"search" # 爬取类型,search(关键词搜索) | detail(帖子详情)| creator(创作者主页数据)
|
||||
)
|
||||
# 微博搜索类型 default (综合) | real_time (实时) | popular (热门) | video (视频)
|
||||
WEIBO_SEARCH_TYPE = "popular"
|
||||
# 自定义User Agent(暂时仅对XHS有效)
|
||||
UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0"
|
||||
|
||||
# 是否开启 IP 代理
|
||||
ENABLE_IP_PROXY = False
|
||||
|
||||
# 未启用代理时的最大爬取间隔,单位秒(暂时仅对XHS有效)
|
||||
CRAWLER_MAX_SLEEP_SEC = 2
|
||||
|
||||
# 代理IP池数量
|
||||
IP_PROXY_POOL_COUNT = 2
|
||||
|
||||
@@ -102,101 +90,6 @@ CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES = 10
|
||||
# 老版本项目使用了 db, 则需参考 schema/tables.sql line 287 增加表字段
|
||||
ENABLE_GET_SUB_COMMENTS = False
|
||||
|
||||
# 已废弃⚠️⚠️⚠️指定小红书需要爬虫的笔记ID列表
|
||||
# 已废弃⚠️⚠️⚠️ 指定笔记ID笔记列表会因为缺少xsec_token和xsec_source参数导致爬取失败
|
||||
# XHS_SPECIFIED_ID_LIST = [
|
||||
# "66fad51c000000001b0224b8",
|
||||
# # ........................
|
||||
# ]
|
||||
|
||||
# 指定小红书需要爬虫的笔记URL列表, 目前要携带xsec_token和xsec_source参数
|
||||
XHS_SPECIFIED_NOTE_URL_LIST = [
|
||||
"https://www.xiaohongshu.com/explore/66fad51c000000001b0224b8?xsec_token=AB3rO-QopW5sgrJ41GwN01WCXh6yWPxjSoFI9D5JIMgKw=&xsec_source=pc_search"
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定抖音需要爬取的ID列表
|
||||
DY_SPECIFIED_ID_LIST = [
|
||||
"7280854932641664319",
|
||||
"7202432992642387233",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定快手平台需要爬取的ID列表
|
||||
KS_SPECIFIED_ID_LIST = ["3xf8enb8dbj6uig", "3x6zz972bchmvqe"]
|
||||
|
||||
# 指定B站平台需要爬取的视频bvid列表
|
||||
BILI_SPECIFIED_ID_LIST = [
|
||||
"BV1d54y1g7db",
|
||||
"BV1Sz4y1U77N",
|
||||
"BV14Q4y1n7jz",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定微博平台需要爬取的帖子列表
|
||||
WEIBO_SPECIFIED_ID_LIST = [
|
||||
"4982041758140155",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定weibo创作者ID列表
|
||||
WEIBO_CREATOR_ID_LIST = [
|
||||
"5533390220",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定贴吧需要爬取的帖子列表
|
||||
TIEBA_SPECIFIED_ID_LIST = []
|
||||
|
||||
# 指定贴吧名称列表,爬取该贴吧下的帖子
|
||||
TIEBA_NAME_LIST = [
|
||||
# "盗墓笔记"
|
||||
]
|
||||
|
||||
# 指定贴吧创作者URL列表
|
||||
TIEBA_CREATOR_URL_LIST = [
|
||||
"https://tieba.baidu.com/home/main/?id=tb.1.7f139e2e.6CyEwxu3VJruH_-QqpCi6g&fr=frs",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定小红书创作者ID列表
|
||||
XHS_CREATOR_ID_LIST = [
|
||||
"63e36c9a000000002703502b",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定Dy创作者ID列表(sec_id)
|
||||
DY_CREATOR_ID_LIST = [
|
||||
"MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定bili创作者ID列表(sec_id)
|
||||
BILI_CREATOR_ID_LIST = [
|
||||
"20813884",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定快手创作者ID列表
|
||||
KS_CREATOR_ID_LIST = [
|
||||
"3x4sm73aye7jq7i",
|
||||
# ........................
|
||||
]
|
||||
|
||||
|
||||
# 指定知乎创作者主页url列表
|
||||
ZHIHU_CREATOR_URL_LIST = [
|
||||
"https://www.zhihu.com/people/yd1234567",
|
||||
# ........................
|
||||
]
|
||||
|
||||
# 指定知乎需要爬取的帖子ID列表
|
||||
ZHIHU_SPECIFIED_ID_LIST = [
|
||||
"https://www.zhihu.com/question/826896610/answer/4885821440", # 回答
|
||||
"https://zhuanlan.zhihu.com/p/673461588", # 文章
|
||||
"https://www.zhihu.com/zvideo/1539542068422144000", # 视频
|
||||
]
|
||||
|
||||
# 词云相关
|
||||
# 是否开启生成评论词云图
|
||||
ENABLE_GET_WORDCLOUD = False
|
||||
@@ -212,27 +105,3 @@ STOP_WORDS_FILE = "./docs/hit_stopwords.txt"
|
||||
|
||||
# 中文字体文件路径
|
||||
FONT_PATH = "./docs/STZHONGS.TTF"
|
||||
|
||||
# 爬取开始的天数,仅支持 bilibili 关键字搜索,YYYY-MM-DD 格式,若为 None 则表示不设置时间范围,按照默认关键字最多返回 1000 条视频的结果处理
|
||||
START_DAY = "2024-01-01"
|
||||
|
||||
# 爬取结束的天数,仅支持 bilibili 关键字搜索,YYYY-MM-DD 格式,若为 None 则表示不设置时间范围,按照默认关键字最多返回 1000 条视频的结果处理
|
||||
END_DAY = "2024-01-01"
|
||||
|
||||
# 是否开启按每一天进行爬取的选项,仅支持 bilibili 关键字搜索
|
||||
# 若为 False,则忽略 START_DAY 与 END_DAY 设置的值
|
||||
# 若为 True,则按照 START_DAY 至 END_DAY 按照每一天进行筛选,这样能够突破 1000 条视频的限制,最大程度爬取该关键词下的所有视频
|
||||
ALL_DAY = False
|
||||
|
||||
#!!! 下面仅支持 bilibili creator搜索
|
||||
# 爬取评论creator主页还是爬取creator动态和关系列表(True为前者)
|
||||
CREATOR_MODE = True
|
||||
|
||||
# 爬取creator粉丝列表时起始爬取页数
|
||||
START_CONTACTS_PAGE = 1
|
||||
|
||||
# 爬取作者粉丝和关注列表数量控制(单作者)
|
||||
CRAWLER_MAX_CONTACTS_COUNT_SINGLENOTES = 100
|
||||
|
||||
# 爬取作者动态数量控制(单作者)
|
||||
CRAWLER_MAX_DYNAMICS_COUNT_SINGLENOTES = 50
|
||||
|
||||
27
main.py
27
main.py
@@ -45,25 +45,30 @@ class CrawlerFactory:
|
||||
return crawler_class()
|
||||
|
||||
async def main():
|
||||
# Init crawler
|
||||
crawler: Optional[AbstractCrawler] = None
|
||||
try:
|
||||
# parse cmd
|
||||
await cmd_arg.parse_cmd()
|
||||
|
||||
# parse cmd
|
||||
await cmd_arg.parse_cmd()
|
||||
# init db
|
||||
if config.SAVE_DATA_OPTION in ["db", "sqlite"]:
|
||||
await db.init_db()
|
||||
|
||||
# init db
|
||||
if config.SAVE_DATA_OPTION in ["db", "sqlite"]:
|
||||
await db.init_db()
|
||||
crawler = CrawlerFactory.create_crawler(platform=config.PLATFORM)
|
||||
await crawler.start()
|
||||
|
||||
crawler = CrawlerFactory.create_crawler(platform=config.PLATFORM)
|
||||
await crawler.start()
|
||||
finally:
|
||||
if crawler:
|
||||
await crawler.close()
|
||||
|
||||
if config.SAVE_DATA_OPTION in ["db", "sqlite"]:
|
||||
await db.close()
|
||||
|
||||
|
||||
if config.SAVE_DATA_OPTION in ["db", "sqlite"]:
|
||||
await db.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
# asyncio.run(main())
|
||||
asyncio.get_event_loop().run_until_complete(main())
|
||||
except KeyboardInterrupt:
|
||||
print("\n[main] Caught keyboard interrupt, exiting.")
|
||||
sys.exit()
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# @Desc : bilibili 请求客户端
|
||||
import asyncio
|
||||
import json
|
||||
import random
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
from urllib.parse import urlencode
|
||||
|
||||
@@ -53,7 +54,11 @@ class BilibiliClient(AbstractApiClient):
|
||||
method, url, timeout=self.timeout,
|
||||
**kwargs
|
||||
)
|
||||
data: Dict = response.json()
|
||||
try:
|
||||
data: Dict = response.json()
|
||||
except json.JSONDecodeError:
|
||||
utils.logger.error(f"[BilibiliClient.request] Failed to decode JSON from response. status_code: {response.status_code}, response_text: {response.text}")
|
||||
raise DataFetchError(f"Failed to decode JSON, content: {response.text}")
|
||||
if data.get("code") != 0:
|
||||
raise DataFetchError(data.get("message", "unkonw error"))
|
||||
else:
|
||||
@@ -78,8 +83,12 @@ class BilibiliClient(AbstractApiClient):
|
||||
:return:
|
||||
"""
|
||||
local_storage = await self.playwright_page.evaluate("() => window.localStorage")
|
||||
wbi_img_urls = local_storage.get("wbi_img_urls", "") or local_storage.get(
|
||||
"wbi_img_url") + "-" + local_storage.get("wbi_sub_url")
|
||||
wbi_img_urls = local_storage.get("wbi_img_urls", "")
|
||||
if not wbi_img_urls:
|
||||
img_url_from_storage = local_storage.get("wbi_img_url")
|
||||
sub_url_from_storage = local_storage.get("wbi_sub_url")
|
||||
if img_url_from_storage and sub_url_from_storage:
|
||||
wbi_img_urls = f"{img_url_from_storage}-{sub_url_from_storage}"
|
||||
if wbi_img_urls and "-" in wbi_img_urls:
|
||||
img_url, sub_url = wbi_img_urls.split("-")
|
||||
else:
|
||||
@@ -235,16 +244,50 @@ class BilibiliClient(AbstractApiClient):
|
||||
|
||||
:return:
|
||||
"""
|
||||
|
||||
result = []
|
||||
is_end = False
|
||||
next_page = 0
|
||||
max_retries = 3
|
||||
while not is_end and len(result) < max_count:
|
||||
comments_res = await self.get_video_comments(video_id, CommentOrderType.DEFAULT, next_page)
|
||||
comments_res = None
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
comments_res = await self.get_video_comments(video_id, CommentOrderType.DEFAULT, next_page)
|
||||
break # Success
|
||||
except DataFetchError as e:
|
||||
if attempt < max_retries - 1:
|
||||
delay = 5 * (2 ** attempt) + random.uniform(0, 1)
|
||||
utils.logger.warning(
|
||||
f"[BilibiliClient.get_video_all_comments] Retrying video_id {video_id} in {delay:.2f}s... (Attempt {attempt + 1}/{max_retries})"
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
else:
|
||||
utils.logger.error(
|
||||
f"[BilibiliClient.get_video_all_comments] Max retries reached for video_id: {video_id}. Skipping comments. Error: {e}"
|
||||
)
|
||||
is_end = True
|
||||
break
|
||||
if not comments_res:
|
||||
break
|
||||
|
||||
cursor_info: Dict = comments_res.get("cursor")
|
||||
if not cursor_info:
|
||||
utils.logger.warning(f"[BilibiliClient.get_video_all_comments] Could not find 'cursor' in response for video_id: {video_id}. Skipping.")
|
||||
break
|
||||
|
||||
comment_list: List[Dict] = comments_res.get("replies", [])
|
||||
is_end = cursor_info.get("is_end")
|
||||
next_page = cursor_info.get("next")
|
||||
|
||||
# 检查 is_end 和 next 是否存在
|
||||
if "is_end" not in cursor_info or "next" not in cursor_info:
|
||||
utils.logger.warning(f"[BilibiliClient.get_video_all_comments] 'is_end' or 'next' not in cursor for video_id: {video_id}. Assuming end of comments.")
|
||||
is_end = True
|
||||
else:
|
||||
is_end = cursor_info.get("is_end")
|
||||
next_page = cursor_info.get("next")
|
||||
|
||||
if not isinstance(is_end, bool):
|
||||
utils.logger.warning(f"[BilibiliClient.get_video_all_comments] 'is_end' is not a boolean for video_id: {video_id}. Assuming end of comments.")
|
||||
is_end = True
|
||||
if is_fetch_sub_comments:
|
||||
for comment in comment_list:
|
||||
comment_id = comment['rpid']
|
||||
|
||||
34
media_platform/bilibili/config.py
Normal file
34
media_platform/bilibili/config.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
||||
# 1. 不得用于任何商业用途。
|
||||
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
||||
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
||||
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
||||
# 5. 不得用于任何非法或不当的用途。
|
||||
#
|
||||
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
||||
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
||||
|
||||
|
||||
from config import *
|
||||
|
||||
# 每天爬取视频/帖子的数量控制
|
||||
MAX_NOTES_PER_DAY = 1
|
||||
|
||||
# Bilibili 平台配置
|
||||
BILI_SPECIFIED_ID_LIST = [
|
||||
"BV1d54y1g7db",
|
||||
"BV1Sz4y1U77N",
|
||||
"BV14Q4y1n7jz",
|
||||
# ........................
|
||||
]
|
||||
START_DAY = "2024-01-01"
|
||||
END_DAY = "2024-01-01"
|
||||
BILI_SEARCH_MODE = "normal"
|
||||
CREATOR_MODE = True
|
||||
START_CONTACTS_PAGE = 1
|
||||
CRAWLER_MAX_CONTACTS_COUNT_SINGLENOTES = 100
|
||||
CRAWLER_MAX_DYNAMICS_COUNT_SINGLENOTES = 50
|
||||
BILI_CREATOR_ID_LIST = [
|
||||
"20813884",
|
||||
# ........................
|
||||
]
|
||||
@@ -23,8 +23,9 @@ from datetime import datetime, timedelta
|
||||
import pandas as pd
|
||||
|
||||
from playwright.async_api import (BrowserContext, BrowserType, Page, Playwright, async_playwright)
|
||||
from playwright._impl._errors import TargetClosedError
|
||||
|
||||
import config
|
||||
from . import config
|
||||
from base.base_crawler import AbstractCrawler
|
||||
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
|
||||
from store import bilibili as bilibili_store
|
||||
@@ -95,7 +96,6 @@ class BilibiliCrawler(AbstractCrawler):
|
||||
|
||||
crawler_type_var.set(config.CRAWLER_TYPE)
|
||||
if config.CRAWLER_TYPE == "search":
|
||||
# Search for video and retrieve their comment information.
|
||||
await self.search()
|
||||
elif config.CRAWLER_TYPE == "detail":
|
||||
# Get the information and comments of the specified post
|
||||
@@ -111,6 +111,20 @@ class BilibiliCrawler(AbstractCrawler):
|
||||
utils.logger.info(
|
||||
"[BilibiliCrawler.start] Bilibili Crawler finished ...")
|
||||
|
||||
async def search(self):
|
||||
"""
|
||||
search bilibili video
|
||||
"""
|
||||
# Search for video and retrieve their comment information.
|
||||
if config.BILI_SEARCH_MODE == "normal":
|
||||
await self.search_by_keywords()
|
||||
elif config.BILI_SEARCH_MODE == "all_in_time_range":
|
||||
await self.search_by_keywords_in_time_range(daily_limit=False)
|
||||
elif config.BILI_SEARCH_MODE == "daily_limit_in_time_range":
|
||||
await self.search_by_keywords_in_time_range(daily_limit=True)
|
||||
else:
|
||||
utils.logger.warning(f"Unknown BILI_SEARCH_MODE: {config.BILI_SEARCH_MODE}")
|
||||
|
||||
@staticmethod
|
||||
async def get_pubtime_datetime(start: str = config.START_DAY, end: str = config.END_DAY) -> Tuple[str, str]:
|
||||
"""
|
||||
@@ -141,101 +155,138 @@ class BilibiliCrawler(AbstractCrawler):
|
||||
# 将其重新转换为时间戳
|
||||
return str(int(start_day.timestamp())), str(int(end_day.timestamp()))
|
||||
|
||||
async def search(self):
|
||||
async def search_by_keywords(self):
|
||||
"""
|
||||
search bilibili video with keywords
|
||||
search bilibili video with keywords in normal mode
|
||||
:return:
|
||||
"""
|
||||
utils.logger.info("[BilibiliCrawler.search] Begin search bilibli keywords")
|
||||
utils.logger.info("[BilibiliCrawler.search_by_keywords] Begin search bilibli keywords")
|
||||
bili_limit_count = 20 # bilibili limit page fixed value
|
||||
if config.CRAWLER_MAX_NOTES_COUNT < bili_limit_count:
|
||||
config.CRAWLER_MAX_NOTES_COUNT = bili_limit_count
|
||||
start_page = config.START_PAGE # start page number
|
||||
for keyword in config.KEYWORDS.split(","):
|
||||
source_keyword_var.set(keyword)
|
||||
utils.logger.info(f"[BilibiliCrawler.search] Current search keyword: {keyword}")
|
||||
# 每个关键词最多返回 1000 条数据
|
||||
if not config.ALL_DAY:
|
||||
page = 1
|
||||
while (page - start_page + 1) * bili_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
if page < start_page:
|
||||
utils.logger.info(f"[BilibiliCrawler.search] Skip page: {page}")
|
||||
page += 1
|
||||
continue
|
||||
|
||||
utils.logger.info(f"[BilibiliCrawler.search] search bilibili keyword: {keyword}, page: {page}")
|
||||
video_id_list: List[str] = []
|
||||
videos_res = await self.bili_client.search_video_by_keyword(
|
||||
keyword=keyword,
|
||||
page=page,
|
||||
page_size=bili_limit_count,
|
||||
order=SearchOrderType.DEFAULT,
|
||||
pubtime_begin_s=0, # 作品发布日期起始时间戳
|
||||
pubtime_end_s=0 # 作品发布日期结束日期时间戳
|
||||
)
|
||||
video_list: List[Dict] = videos_res.get("result")
|
||||
|
||||
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
|
||||
task_list = []
|
||||
try:
|
||||
task_list = [self.get_video_info_task(aid=video_item.get("aid"), bvid="", semaphore=semaphore) for video_item in video_list]
|
||||
except Exception as e:
|
||||
utils.logger.warning(f"[BilibiliCrawler.search] error in the task list. The video for this page will not be included. {e}")
|
||||
video_items = await asyncio.gather(*task_list)
|
||||
for video_item in video_items:
|
||||
if video_item:
|
||||
video_id_list.append(video_item.get("View").get("aid"))
|
||||
await bilibili_store.update_bilibili_video(video_item)
|
||||
await bilibili_store.update_up_info(video_item)
|
||||
await self.get_bilibili_video(video_item, semaphore)
|
||||
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] Current search keyword: {keyword}")
|
||||
page = 1
|
||||
while (page - start_page + 1) * bili_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
if page < start_page:
|
||||
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] Skip page: {page}")
|
||||
page += 1
|
||||
await self.batch_get_video_comments(video_id_list)
|
||||
# 按照 START_DAY 至 END_DAY 按照每一天进行筛选,这样能够突破 1000 条视频的限制,最大程度爬取该关键词下每一天的所有视频
|
||||
else:
|
||||
for day in pd.date_range(start=config.START_DAY, end=config.END_DAY, freq='D'):
|
||||
# 按照每一天进行爬取的时间戳参数
|
||||
pubtime_begin_s, pubtime_end_s = await self.get_pubtime_datetime(start=day.strftime('%Y-%m-%d'), end=day.strftime('%Y-%m-%d'))
|
||||
page = 1
|
||||
#!该段 while 语句在发生异常时(通常情况下为当天数据为空时)会自动跳转到下一天,以实现最大程度爬取该关键词下当天的所有视频
|
||||
#!除了仅保留现在原有的 try, except Exception 语句外,不要再添加其他的异常处理!!!否则将使该段代码失效,使其仅能爬取当天一天数据而无法跳转到下一天
|
||||
#!除非将该段代码的逻辑进行重构以实现相同的功能,否则不要进行修改!!!
|
||||
while (page - start_page + 1) * bili_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
#! Catch any error if response return nothing, go to next day
|
||||
try:
|
||||
#! Don't skip any page, to make sure gather all video in one day
|
||||
# if page < start_page:
|
||||
# utils.logger.info(f"[BilibiliCrawler.search] Skip page: {page}")
|
||||
# page += 1
|
||||
# continue
|
||||
continue
|
||||
|
||||
utils.logger.info(f"[BilibiliCrawler.search] search bilibili keyword: {keyword}, date: {day.ctime()}, page: {page}")
|
||||
video_id_list: List[str] = []
|
||||
videos_res = await self.bili_client.search_video_by_keyword(
|
||||
keyword=keyword,
|
||||
page=page,
|
||||
page_size=bili_limit_count,
|
||||
order=SearchOrderType.DEFAULT,
|
||||
pubtime_begin_s=pubtime_begin_s, # 作品发布日期起始时间戳
|
||||
pubtime_end_s=pubtime_end_s # 作品发布日期结束日期时间戳
|
||||
)
|
||||
video_list: List[Dict] = videos_res.get("result")
|
||||
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] search bilibili keyword: {keyword}, page: {page}")
|
||||
video_id_list: List[str] = []
|
||||
videos_res = await self.bili_client.search_video_by_keyword(
|
||||
keyword=keyword,
|
||||
page=page,
|
||||
page_size=bili_limit_count,
|
||||
order=SearchOrderType.DEFAULT,
|
||||
pubtime_begin_s=0, # 作品发布日期起始时间戳
|
||||
pubtime_end_s=0 # 作品发布日期结束日期时间戳
|
||||
)
|
||||
video_list: List[Dict] = videos_res.get("result")
|
||||
|
||||
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
|
||||
task_list = [self.get_video_info_task(aid=video_item.get("aid"), bvid="", semaphore=semaphore) for video_item in video_list]
|
||||
video_items = await asyncio.gather(*task_list)
|
||||
for video_item in video_items:
|
||||
if video_item:
|
||||
video_id_list.append(video_item.get("View").get("aid"))
|
||||
await bilibili_store.update_bilibili_video(video_item)
|
||||
await bilibili_store.update_up_info(video_item)
|
||||
await self.get_bilibili_video(video_item, semaphore)
|
||||
page += 1
|
||||
await self.batch_get_video_comments(video_id_list)
|
||||
# go to next day
|
||||
except Exception as e:
|
||||
print(e)
|
||||
if not video_list:
|
||||
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] No more videos for '{keyword}', moving to next keyword.")
|
||||
break
|
||||
|
||||
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
|
||||
task_list = []
|
||||
try:
|
||||
task_list = [self.get_video_info_task(aid=video_item.get("aid"), bvid="", semaphore=semaphore) for video_item in video_list]
|
||||
except Exception as e:
|
||||
utils.logger.warning(f"[BilibiliCrawler.search_by_keywords] error in the task list. The video for this page will not be included. {e}")
|
||||
video_items = await asyncio.gather(*task_list)
|
||||
for video_item in video_items:
|
||||
if video_item:
|
||||
video_id_list.append(video_item.get("View").get("aid"))
|
||||
await bilibili_store.update_bilibili_video(video_item)
|
||||
await bilibili_store.update_up_info(video_item)
|
||||
await self.get_bilibili_video(video_item, semaphore)
|
||||
page += 1
|
||||
await self.batch_get_video_comments(video_id_list)
|
||||
|
||||
async def search_by_keywords_in_time_range(self, daily_limit: bool):
|
||||
"""
|
||||
Search bilibili video with keywords in a given time range.
|
||||
:param daily_limit: if True, strictly limit the number of notes per day and total.
|
||||
"""
|
||||
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Begin search with daily_limit={daily_limit}")
|
||||
bili_limit_count = 20
|
||||
start_page = config.START_PAGE
|
||||
|
||||
for keyword in config.KEYWORDS.split(","):
|
||||
source_keyword_var.set(keyword)
|
||||
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Current search keyword: {keyword}")
|
||||
total_notes_crawled_for_keyword = 0
|
||||
|
||||
for day in pd.date_range(start=config.START_DAY, end=config.END_DAY, freq='D'):
|
||||
if daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}', skipping remaining days.")
|
||||
break
|
||||
|
||||
if not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}', skipping remaining days.")
|
||||
break
|
||||
|
||||
pubtime_begin_s, pubtime_end_s = await self.get_pubtime_datetime(start=day.strftime('%Y-%m-%d'), end=day.strftime('%Y-%m-%d'))
|
||||
page = 1
|
||||
notes_count_this_day = 0
|
||||
|
||||
while True:
|
||||
if notes_count_this_day >= config.MAX_NOTES_PER_DAY:
|
||||
utils.logger.info(f"[BilibiliCrawler.search] Reached MAX_NOTES_PER_DAY limit for {day.ctime()}.")
|
||||
break
|
||||
if daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}'.")
|
||||
break
|
||||
if not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
break
|
||||
|
||||
try:
|
||||
utils.logger.info(f"[BilibiliCrawler.search] search bilibili keyword: {keyword}, date: {day.ctime()}, page: {page}")
|
||||
video_id_list: List[str] = []
|
||||
videos_res = await self.bili_client.search_video_by_keyword(
|
||||
keyword=keyword,
|
||||
page=page,
|
||||
page_size=bili_limit_count,
|
||||
order=SearchOrderType.DEFAULT,
|
||||
pubtime_begin_s=pubtime_begin_s,
|
||||
pubtime_end_s=pubtime_end_s
|
||||
)
|
||||
video_list: List[Dict] = videos_res.get("result")
|
||||
|
||||
if not video_list:
|
||||
utils.logger.info(f"[BilibiliCrawler.search] No more videos for '{keyword}' on {day.ctime()}, moving to next day.")
|
||||
break
|
||||
|
||||
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
|
||||
task_list = [self.get_video_info_task(aid=video_item.get("aid"), bvid="", semaphore=semaphore) for video_item in video_list]
|
||||
video_items = await asyncio.gather(*task_list)
|
||||
|
||||
for video_item in video_items:
|
||||
if video_item:
|
||||
if daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
break
|
||||
if not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT:
|
||||
break
|
||||
if notes_count_this_day >= config.MAX_NOTES_PER_DAY:
|
||||
break
|
||||
notes_count_this_day += 1
|
||||
total_notes_crawled_for_keyword += 1
|
||||
video_id_list.append(video_item.get("View").get("aid"))
|
||||
await bilibili_store.update_bilibili_video(video_item)
|
||||
await bilibili_store.update_up_info(video_item)
|
||||
await self.get_bilibili_video(video_item, semaphore)
|
||||
|
||||
page += 1
|
||||
await self.batch_get_video_comments(video_id_list)
|
||||
|
||||
except Exception as e:
|
||||
utils.logger.error(f"[BilibiliCrawler.search] Error searching on {day.ctime()}: {e}")
|
||||
break
|
||||
|
||||
async def batch_get_video_comments(self, video_id_list: List[str]):
|
||||
"""
|
||||
batch get video comments
|
||||
@@ -268,6 +319,7 @@ class BilibiliCrawler(AbstractCrawler):
|
||||
try:
|
||||
utils.logger.info(
|
||||
f"[BilibiliCrawler.get_comments] begin get video_id: {video_id} comments ...")
|
||||
await asyncio.sleep(random.uniform(0.5, 1.5))
|
||||
await self.bili_client.get_video_all_comments(
|
||||
video_id=video_id,
|
||||
crawl_interval=random.random(),
|
||||
@@ -282,6 +334,8 @@ class BilibiliCrawler(AbstractCrawler):
|
||||
except Exception as e:
|
||||
utils.logger.error(
|
||||
f"[BilibiliCrawler.get_comments] may be been blocked, err:{e}")
|
||||
# Propagate the exception to be caught by the main loop
|
||||
raise
|
||||
|
||||
async def get_creator_videos(self, creator_id: int):
|
||||
"""
|
||||
@@ -472,13 +526,18 @@ class BilibiliCrawler(AbstractCrawler):
|
||||
|
||||
async def close(self):
|
||||
"""Close browser context"""
|
||||
# 如果使用CDP模式,需要特殊处理
|
||||
if self.cdp_manager:
|
||||
await self.cdp_manager.cleanup()
|
||||
self.cdp_manager = None
|
||||
else:
|
||||
await self.browser_context.close()
|
||||
utils.logger.info("[BilibiliCrawler.close] Browser context closed ...")
|
||||
try:
|
||||
# 如果使用CDP模式,需要特殊处理
|
||||
if self.cdp_manager:
|
||||
await self.cdp_manager.cleanup()
|
||||
self.cdp_manager = None
|
||||
elif self.browser_context:
|
||||
await self.browser_context.close()
|
||||
utils.logger.info("[BilibiliCrawler.close] Browser context closed ...")
|
||||
except TargetClosedError:
|
||||
utils.logger.warning("[BilibiliCrawler.close] Browser context was already closed.")
|
||||
except Exception as e:
|
||||
utils.logger.error(f"[BilibiliCrawler.close] An error occurred during close: {e}")
|
||||
|
||||
async def get_bilibili_video(self, video_item: Dict, semaphore: asyncio.Semaphore):
|
||||
"""
|
||||
|
||||
23
media_platform/douyin/config.py
Normal file
23
media_platform/douyin/config.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
||||
# 1. 不得用于任何商业用途。
|
||||
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
||||
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
||||
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
||||
# 5. 不得用于任何非法或不当的用途。
|
||||
#
|
||||
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
||||
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
||||
|
||||
from config import *
|
||||
|
||||
# 抖音平台配置
|
||||
PUBLISH_TIME_TYPE = 0
|
||||
DY_SPECIFIED_ID_LIST = [
|
||||
"7280854932641664319",
|
||||
"7202432992642387233",
|
||||
# ........................
|
||||
]
|
||||
DY_CREATOR_ID_LIST = [
|
||||
"MS4wLjABAAAATJPY7LAlaa5X-c8uNdWkvz0jUGgpw4eeXIwu_8BhvqE",
|
||||
# ........................
|
||||
]
|
||||
@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Tuple
|
||||
from playwright.async_api import (BrowserContext, BrowserType, Page, Playwright,
|
||||
async_playwright)
|
||||
|
||||
import config
|
||||
from . import config
|
||||
from base.base_crawler import AbstractCrawler
|
||||
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
|
||||
from store import douyin as douyin_store
|
||||
|
||||
18
media_platform/kuaishou/config.py
Normal file
18
media_platform/kuaishou/config.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
||||
# 1. 不得用于任何商业用途。
|
||||
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
||||
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
||||
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
||||
# 5. 不得用于任何非法或不当的用途。
|
||||
#
|
||||
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
||||
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
||||
|
||||
from config import *
|
||||
|
||||
# 快手平台配置
|
||||
KS_SPECIFIED_ID_LIST = ["3xf8enb8dbj6uig", "3x6zz972bchmvqe"]
|
||||
KS_CREATOR_ID_LIST = [
|
||||
"3x4sm73aye7jq7i",
|
||||
# ........................
|
||||
]
|
||||
@@ -18,7 +18,7 @@ from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from playwright.async_api import BrowserContext, BrowserType, Page, Playwright, async_playwright
|
||||
|
||||
import config
|
||||
from . import config
|
||||
from base.base_crawler import AbstractCrawler
|
||||
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
|
||||
from store import kuaishou as kuaishou_store
|
||||
|
||||
21
media_platform/tieba/config.py
Normal file
21
media_platform/tieba/config.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
||||
# 1. 不得用于任何商业用途。
|
||||
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
||||
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
||||
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
||||
# 5. 不得用于任何非法或不当的用途。
|
||||
#
|
||||
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
||||
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
||||
|
||||
from config import *
|
||||
|
||||
# 贴吧平台配置
|
||||
TIEBA_SPECIFIED_ID_LIST = []
|
||||
TIEBA_NAME_LIST = [
|
||||
# "盗墓笔记"
|
||||
]
|
||||
TIEBA_CREATOR_URL_LIST = [
|
||||
"https://tieba.baidu.com/home/main/?id=tb.1.7f139e2e.6CyEwxu3VJruH_-QqpCi6g&fr=frs",
|
||||
# ........................
|
||||
]
|
||||
@@ -18,7 +18,7 @@ from typing import Dict, List, Optional, Tuple
|
||||
from playwright.async_api import (BrowserContext, BrowserType, Page, Playwright,
|
||||
async_playwright)
|
||||
|
||||
import config
|
||||
from . import config
|
||||
from base.base_crawler import AbstractCrawler
|
||||
from model.m_baidu_tieba import TiebaCreator, TiebaNote
|
||||
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
|
||||
|
||||
22
media_platform/weibo/config.py
Normal file
22
media_platform/weibo/config.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
||||
# 1. 不得用于任何商业用途。
|
||||
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
||||
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
||||
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
||||
# 5. 不得用于任何非法或不当的用途。
|
||||
#
|
||||
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
||||
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
||||
|
||||
from config import *
|
||||
|
||||
# 微博平台配置
|
||||
WEIBO_SEARCH_TYPE = "popular"
|
||||
WEIBO_SPECIFIED_ID_LIST = [
|
||||
"4982041758140155",
|
||||
# ........................
|
||||
]
|
||||
WEIBO_CREATOR_ID_LIST = [
|
||||
"5533390220",
|
||||
# ........................
|
||||
]
|
||||
@@ -24,7 +24,7 @@ from typing import Dict, List, Optional, Tuple
|
||||
from playwright.async_api import (BrowserContext, BrowserType, Page, Playwright,
|
||||
async_playwright)
|
||||
|
||||
import config
|
||||
from . import config
|
||||
from base.base_crawler import AbstractCrawler
|
||||
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
|
||||
from store import weibo as weibo_store
|
||||
|
||||
24
media_platform/xhs/config.py
Normal file
24
media_platform/xhs/config.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
||||
# 1. 不得用于任何商业用途。
|
||||
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
||||
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
||||
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
||||
# 5. 不得用于任何非法或不当的用途。
|
||||
#
|
||||
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
||||
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
||||
|
||||
from config import *
|
||||
|
||||
# 小红书平台配置
|
||||
SORT_TYPE = "popularity_descending"
|
||||
UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0"
|
||||
CRAWLER_MAX_SLEEP_SEC = 2
|
||||
XHS_SPECIFIED_NOTE_URL_LIST = [
|
||||
"https://www.xiaohongshu.com/explore/66fad51c000000001b0224b8?xsec_token=AB3rO-QopW5sgrJ41GwN01WCXh6yWPxjSoFI9D5JIMgKw=&xsec_source=pc_search"
|
||||
# ........................
|
||||
]
|
||||
XHS_CREATOR_ID_LIST = [
|
||||
"63e36c9a000000002703502b",
|
||||
# ........................
|
||||
]
|
||||
@@ -19,7 +19,7 @@ from typing import Dict, List, Optional, Tuple
|
||||
from playwright.async_api import BrowserContext, BrowserType, Page, Playwright, async_playwright
|
||||
from tenacity import RetryError
|
||||
|
||||
import config
|
||||
from . import config
|
||||
from base.base_crawler import AbstractCrawler
|
||||
from config import CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES
|
||||
from model.m_xiaohongshu import NoteUrlInfo
|
||||
|
||||
22
media_platform/zhihu/config.py
Normal file
22
media_platform/zhihu/config.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
|
||||
# 1. 不得用于任何商业用途。
|
||||
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
|
||||
# 3. 不得进行大规模爬取或对平台造成运营干扰。
|
||||
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
|
||||
# 5. 不得用于任何非法或不当的用途。
|
||||
#
|
||||
# 详细许可条款请参阅项目根目录下的LICENSE文件。
|
||||
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
|
||||
|
||||
from config import *
|
||||
|
||||
# 知乎平台配置
|
||||
ZHIHU_CREATOR_URL_LIST = [
|
||||
"https://www.zhihu.com/people/yd1234567",
|
||||
# ........................
|
||||
]
|
||||
ZHIHU_SPECIFIED_ID_LIST = [
|
||||
"https://www.zhihu.com/question/826896610/answer/4885821440", # 回答
|
||||
"https://zhuanlan.zhihu.com/p/673461588", # 文章
|
||||
"https://www.zhihu.com/zvideo/1539542068422144000", # 视频
|
||||
]
|
||||
@@ -19,7 +19,7 @@ from typing import Dict, List, Optional, Tuple, cast
|
||||
from playwright.async_api import (BrowserContext, BrowserType, Page, Playwright,
|
||||
async_playwright)
|
||||
|
||||
import config
|
||||
from . import config
|
||||
from constant import zhihu as constant
|
||||
from base.base_crawler import AbstractCrawler
|
||||
from model.m_zhihu import ZhihuContent, ZhihuCreator
|
||||
|
||||
Reference in New Issue
Block a user