mirror of
https://github.com/NanmiCoder/MediaCrawler.git
synced 2026-04-21 11:17:38 +08:00
feat: 微博支持评论 & 指定帖子
This commit is contained in:
@@ -2,6 +2,6 @@
|
||||
# @Author : relakkes@gmail.com
|
||||
# @Time : 2023/12/23 15:40
|
||||
# @Desc :
|
||||
from .client import WeiboClient
|
||||
from .core import WeiboCrawler
|
||||
from .login import WeiboLogin
|
||||
from .client import WeiboClient
|
||||
@@ -4,7 +4,9 @@
|
||||
# @Desc : 微博爬虫 API 请求 client
|
||||
|
||||
import asyncio
|
||||
import copy
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
from urllib.parse import urlencode
|
||||
|
||||
@@ -47,12 +49,15 @@ class WeiboClient:
|
||||
else:
|
||||
return data.get("data", {})
|
||||
|
||||
async def get(self, uri: str, params=None) -> Dict:
|
||||
async def get(self, uri: str, params=None, headers=None) -> Dict:
|
||||
final_uri = uri
|
||||
if isinstance(params, dict):
|
||||
final_uri = (f"{uri}?"
|
||||
f"{urlencode(params)}")
|
||||
return await self.request(method="GET", url=f"{self._host}{final_uri}", headers=self.headers)
|
||||
|
||||
if headers is None:
|
||||
headers = self.headers
|
||||
return await self.request(method="GET", url=f"{self._host}{final_uri}", headers=headers)
|
||||
|
||||
async def post(self, uri: str, data: dict) -> Dict:
|
||||
json_str = json.dumps(data, separators=(',', ':'), ensure_ascii=False)
|
||||
@@ -96,3 +101,78 @@ class WeiboClient:
|
||||
"page": page,
|
||||
}
|
||||
return await self.get(uri, params)
|
||||
|
||||
async def get_note_comments(self, mid_id: str, max_id: int) -> Dict:
|
||||
"""get notes comments
|
||||
:param mid_id: 微博ID
|
||||
:param max_id: 分页参数ID
|
||||
:return:
|
||||
"""
|
||||
uri = "/comments/hotflow"
|
||||
params = {
|
||||
"id": mid_id,
|
||||
"mid": mid_id,
|
||||
"max_id_type": 0,
|
||||
}
|
||||
if max_id > 0:
|
||||
params.update({"max_id": max_id})
|
||||
|
||||
referer_url = f"https://m.weibo.cn/detail/{mid_id}"
|
||||
headers = copy.copy(self.headers)
|
||||
headers["Referer"] = referer_url
|
||||
|
||||
return await self.get(uri, params, headers=headers)
|
||||
|
||||
async def get_note_all_comments(self, note_id: str, crawl_interval: float = 1.0, is_fetch_sub_comments=False,
|
||||
callback: Optional[Callable] = None, ):
|
||||
"""
|
||||
get note all comments include sub comments
|
||||
:param note_id:
|
||||
:param crawl_interval:
|
||||
:param is_fetch_sub_comments:
|
||||
:param callback:
|
||||
:return:
|
||||
"""
|
||||
|
||||
result = []
|
||||
is_end = False
|
||||
max_id = -1
|
||||
while not is_end:
|
||||
comments_res = await self.get_note_comments(note_id, max_id)
|
||||
max_id: int = comments_res.get("max_id")
|
||||
comment_list: List[Dict] = comments_res.get("data", [])
|
||||
is_end = max_id == 0
|
||||
if callback: # 如果有回调函数,就执行回调函数
|
||||
await callback(note_id, comment_list)
|
||||
await asyncio.sleep(crawl_interval)
|
||||
if not is_fetch_sub_comments:
|
||||
result.extend(comment_list)
|
||||
continue
|
||||
# todo handle get sub comments
|
||||
return result
|
||||
|
||||
async def get_note_info_by_id(self, note_id: str) -> Dict:
|
||||
"""
|
||||
根据帖子ID获取详情
|
||||
:param note_id:
|
||||
:return:
|
||||
"""
|
||||
url = f"{self._host}/detail/{note_id}"
|
||||
async with httpx.AsyncClient(proxies=self.proxies) as client:
|
||||
response = await client.request(
|
||||
"GET", url, timeout=self.timeout, headers=self.headers
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise DataFetchError(f"get weibo detail err: {response.text}")
|
||||
match = re.search(r'var \$render_data = (\[.*?\])\[0\]', response.text, re.DOTALL)
|
||||
if match:
|
||||
render_data_json = match.group(1)
|
||||
render_data_dict = json.loads(render_data_json)
|
||||
note_detail = render_data_dict[0].get("status")
|
||||
note_item = {
|
||||
"mblog": note_detail
|
||||
}
|
||||
return note_item
|
||||
else:
|
||||
utils.logger.info(f"[WeiboClient.get_note_info_by_id] 未找到$render_data的值")
|
||||
return dict()
|
||||
|
||||
@@ -23,9 +23,9 @@ from var import comment_tasks_var, crawler_type_var
|
||||
|
||||
from .client import WeiboClient
|
||||
from .exception import DataFetchError
|
||||
from .login import WeiboLogin
|
||||
from .field import SearchType
|
||||
from .help import filter_search_result_card
|
||||
from .login import WeiboLogin
|
||||
|
||||
|
||||
class WeiboCrawler(AbstractCrawler):
|
||||
@@ -38,7 +38,7 @@ class WeiboCrawler(AbstractCrawler):
|
||||
|
||||
def __init__(self):
|
||||
self.index_url = "https://m.weibo.cn"
|
||||
self.user_agent = utils.get_user_agent()
|
||||
self.user_agent = utils.get_mobile_user_agent()
|
||||
|
||||
def init_config(self, platform: str, login_type: str, crawler_type: str):
|
||||
self.platform = platform
|
||||
@@ -85,7 +85,7 @@ class WeiboCrawler(AbstractCrawler):
|
||||
await self.search()
|
||||
elif self.crawler_type == "detail":
|
||||
# Get the information and comments of the specified post
|
||||
pass
|
||||
await self.get_specified_notes()
|
||||
else:
|
||||
pass
|
||||
utils.logger.info("[WeiboCrawler.start] Bilibili Crawler finished ...")
|
||||
@@ -109,12 +109,104 @@ class WeiboCrawler(AbstractCrawler):
|
||||
note_id_list: List[str] = []
|
||||
note_list = filter_search_result_card(search_res.get("cards"))
|
||||
for note_item in note_list:
|
||||
if note_item :
|
||||
if note_item:
|
||||
mblog: Dict = note_item.get("mblog")
|
||||
note_id_list.append(mblog.get("id"))
|
||||
await weibo.update_weibo_note(note_item)
|
||||
|
||||
page += 1
|
||||
await self.batch_get_notes_comments(note_id_list)
|
||||
|
||||
async def get_specified_notes(self):
|
||||
"""
|
||||
get specified notes info
|
||||
:return:
|
||||
"""
|
||||
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
|
||||
task_list = [
|
||||
self.get_note_info_task(note_id=note_id, semaphore=semaphore) for note_id in
|
||||
config.WEIBO_SPECIFIED_ID_LIST
|
||||
]
|
||||
video_details = await asyncio.gather(*task_list)
|
||||
for note_item in video_details:
|
||||
if note_item:
|
||||
await weibo.update_weibo_note(note_item)
|
||||
await self.batch_get_notes_comments(config.WEIBO_SPECIFIED_ID_LIST)
|
||||
|
||||
async def get_note_info_task(self, note_id: str, semaphore: asyncio.Semaphore) -> Optional[Dict]:
|
||||
"""
|
||||
Get note detail task
|
||||
:param note_id:
|
||||
:param semaphore:
|
||||
:return:
|
||||
"""
|
||||
async with semaphore:
|
||||
try:
|
||||
result = await self.wb_client.get_note_info_by_id(note_id)
|
||||
return result
|
||||
except DataFetchError as ex:
|
||||
utils.logger.error(f"[WeiboCrawler.get_note_info_task] Get note detail error: {ex}")
|
||||
return None
|
||||
except KeyError as ex:
|
||||
utils.logger.error(
|
||||
f"[WeiboCrawler.get_note_info_task] have not fund note detail note_id:{note_id}, err: {ex}")
|
||||
return None
|
||||
|
||||
async def batch_get_notes_comments(self, note_id_list: List[str]):
|
||||
"""
|
||||
batch get notes comments
|
||||
:param note_id_list:
|
||||
:return:
|
||||
"""
|
||||
utils.logger.info(f"[WeiboCrawler.batch_get_notes_comments] note ids:{note_id_list}")
|
||||
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
|
||||
task_list: List[Task] = []
|
||||
for note_id in note_id_list:
|
||||
task = asyncio.create_task(self.get_note_comments(note_id, semaphore), name=note_id)
|
||||
task_list.append(task)
|
||||
await asyncio.gather(*task_list)
|
||||
|
||||
async def get_note_comments(self, note_id: str, semaphore: asyncio.Semaphore):
|
||||
"""
|
||||
get comment for note id
|
||||
:param note_id:
|
||||
:param semaphore:
|
||||
:return:
|
||||
"""
|
||||
async with semaphore:
|
||||
try:
|
||||
utils.logger.info(f"[WeiboCrawler.get_note_comments] begin get note_id: {note_id} comments ...")
|
||||
|
||||
# Read keyword and quantity from config
|
||||
keywords = config.COMMENT_KEYWORDS
|
||||
max_comments = config.MAX_COMMENTS_PER_POST
|
||||
|
||||
# Download comments
|
||||
all_comments = await self.wb_client.get_note_all_comments(
|
||||
note_id=note_id,
|
||||
crawl_interval=random.random(),
|
||||
)
|
||||
|
||||
# Filter comments by keyword
|
||||
if keywords:
|
||||
filtered_comments = [
|
||||
comment for comment in all_comments if
|
||||
any(keyword in comment["content"]["message"] for keyword in keywords)
|
||||
]
|
||||
else:
|
||||
filtered_comments = all_comments
|
||||
|
||||
# Limit the number of comments
|
||||
if max_comments > 0:
|
||||
filtered_comments = filtered_comments[:max_comments]
|
||||
|
||||
# Update weibo note comments
|
||||
await weibo.batch_update_weibo_note_comments(note_id, filtered_comments)
|
||||
|
||||
except DataFetchError as ex:
|
||||
utils.logger.error(f"[WeiboCrawler.get_note_comments] get note_id: {note_id} comment error: {ex}")
|
||||
except Exception as e:
|
||||
utils.logger.error(f"[WeiboCrawler.get_note_comments] may be been blocked, err:{e}")
|
||||
|
||||
async def create_weibo_client(self, httpx_proxy: Optional[str]) -> WeiboClient:
|
||||
"""Create xhs client"""
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# @Time : 2023/12/24 17:37
|
||||
# @Desc :
|
||||
|
||||
from typing import List, Dict
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
def filter_search_result_card(card_list: List[Dict]) -> List[Dict]:
|
||||
|
||||
Reference in New Issue
Block a user