import { useState, useCallback, useEffect } from "react"; import { VideoSegmentEditUseCase, } from "../usecase/ShotEditUsecase"; import { MatchedPerson, RoleRecognitionResponse } from "@/api/DTO/movieEdit"; import { VideoSegmentEntity } from "../domain/Entities"; import { LensType, SimpleCharacter } from "../domain/valueObject"; import { getUploadToken, uploadToQiniu } from "@/api/common"; import { SaveEditUseCase } from "../usecase/SaveEditUseCase"; /** * 视频片段服务Hook接口 * 定义视频片段服务Hook的所有状态和操作方法 */ export interface UseShotService { // 响应式状态 /** 加载状态 */ loading: boolean; /** 视频片段列表 */ videoSegments: VideoSegmentEntity[]; /** 当前选中的视频片段 */ selectedSegment: VideoSegmentEntity | null; /** 识别出的人物信息 */ matched_persons: MatchedPerson[]; // 操作方法 /** 获取视频片段列表 */ getVideoSegmentList: (projectId: string) => Promise; /** 重新生成视频片段 */ regenerateVideoSegment: () => Promise; /** AI优化视频内容 */ optimizeVideoContent: ( shotId: string, userRequirement: string, lensData: LensType[] ) => Promise; /** 中断当前操作 */ abortOperation: () => void; /** 设置选中的视频片段 */ setSelectedSegment: (segment: VideoSegmentEntity | null) => void; /** 添加新镜头到选中的视频片段 */ addNewLens: () => void; /** 删除指定镜头 */ deleteLens: (lensName: string) => void; /** 获取视频当前帧并上传到七牛云 */ filterRole: ( video: HTMLVideoElement ) => Promise; /** 设置角色简单数据 */ setSimpleCharacter: (characters: SimpleCharacter[]) => void; /** 计算识别框 */ calculateRecognitionBoxes: (containerElement: HTMLElement, matched_persons: MatchedPerson[]) => Array<{ left: number; top: number; width: number; height: number; person_id: string; }>; } /** * 视频片段服务Hook * 提供视频片段相关的所有状态管理和操作方法 * 包括获取视频列表、重新生成视频、AI优化等功能 */ export const useShotService = (): UseShotService => { // 响应式状态 const [loading, setLoading] = useState(false); const [videoSegments, setVideoSegments] = useState([]); const [selectedSegment, setSelectedSegment] = useState(null); const [projectId, setProjectId] = useState(""); const [simpleCharacter, setSimpleCharacter] = useState([]); const [matched_persons, setMatched_persons] = useState([]); // 轮询任务ID const [intervalId, setIntervalId] = useState(null); // UseCase实例 const [vidoEditUseCase] = useState( new VideoSegmentEditUseCase() ); const [generateTaskIds, setGenerateTaskIds] = useState>(new Set()); /** * 获取视频片段列表 * @param projectId 项目ID */ const getVideoSegmentList = useCallback( async (projectId: string): Promise => { try { setLoading(true); const segments = await vidoEditUseCase.getVideoSegmentList(projectId); setProjectId(projectId); setVideoSegments(segments); setIntervalIdHandler(projectId); } catch (error) { console.error("获取视频片段列表失败:", error); } finally { setLoading(false); } }, [vidoEditUseCase] ); const setIntervalIdHandler = async (projectId: string): Promise => { // 每次执行前先清除之前的定时器,确保只存在一个定时器 if (intervalId) { clearInterval(intervalId); setIntervalId(null); } // 定义定时任务,每5秒执行一次 const newIntervalId = setInterval(async () => { try { const segments = await vidoEditUseCase.getVideoSegmentList(projectId); setVideoSegments((prevSegments) => { const existingSegmentsMap = new Map( prevSegments.map((segment) => [segment.id, segment]) ); const segmentsToUpdate = segments.filter( (segment) => segment.id !== selectedSegment?.id ); segmentsToUpdate.forEach((newSegment) => { const existingSegment = existingSegmentsMap.get(newSegment.id); if (existingSegment) { existingSegmentsMap.set(newSegment.id, { ...existingSegment, videoUrl: newSegment.videoUrl, status: newSegment.status, sketchUrl: newSegment.sketchUrl, lens: newSegment.lens, updatedAt: newSegment.updatedAt, loadingProgress: newSegment.loadingProgress, }); } else { existingSegmentsMap.set(newSegment.id, newSegment); } }); return Array.from(existingSegmentsMap.values()); }); } catch (error) { console.error("定时获取视频片段列表失败:", error); } }, 5000); setIntervalId(newIntervalId); }; // 组件卸载时清理定时器 useEffect(() => { return () => { if (intervalId) { clearInterval(intervalId); setIntervalId(null); } }; }, [intervalId]); /** * 重新生成视频片段 * @param shotPrompt 镜头描述数据 * @param shotId 视频片段ID(可选) * @param roleReplaceParams 角色替换参数(可选) * @param sceneReplaceParams 场景替换参数(可选) * @returns Promise 重新生成的视频片段 */ const regenerateVideoSegment = useCallback(async (): Promise => { try { setLoading(true); // 调用API重新生成视频片段,返回任务状态信息 const taskResult = await vidoEditUseCase.regenerateVideoSegment( projectId, selectedSegment!.lens, selectedSegment!.id ); // 保存任务ID用于后续状态查询 setGenerateTaskIds(prev => prev.add(taskResult.task_id)); SaveEditUseCase.setVideoTasks([ ...SaveEditUseCase.videoTasks, { task_id: taskResult.task_id, video_ids: [selectedSegment!.id], }, ]); // 如果重新生成的是现有片段,更新其状态为处理中 (0: 视频加载中) if (selectedSegment) { setVideoSegments((prev) => prev.map((segment) => segment.id === selectedSegment.id ? { ...segment, status: 0, // 设置为视频加载中状态 loadingProgress: 0 // 重置加载进度 } : segment ) ); } // 返回当前选中的片段,因为现在API返回的是任务状态而不是完整的片段 return selectedSegment!; } catch (error) { console.error("重新生成视频片段失败:", error); throw error; } finally { setLoading(false); } }, [projectId, selectedSegment, vidoEditUseCase]); /** * AI优化视频内容 * @param shotId 视频片段ID * @param userRequirement 用户优化需求 * @param lensData 镜头数据数组 * @returns Promise 优化后的镜头数据 */ const optimizeVideoContent = useCallback( async ( shotId: string, userRequirement: string, lensData: LensType[] ): Promise => { try { setLoading(true); const optimizedLensData = await vidoEditUseCase.optimizeVideoContent( shotId, userRequirement, lensData ); // 注意:这里不再更新videoSegments状态,因为返回的是LensType[]而不是VideoSegmentEntity // 调用者需要自己处理优化后的镜头数据 return optimizedLensData; } catch (error) { console.error("AI优化视频内容失败:", error); throw error; } finally { setLoading(false); } }, [vidoEditUseCase] ); /** * 中断当前操作 */ const abortOperation = useCallback((): void => { // vidoEditUseCase.abortOperation(); setLoading(false); }, []); /** * 设置选中的视频片段 */ const setSelectedSegmentHandler = useCallback( (segment: VideoSegmentEntity | null): void => { setSelectedSegment(segment); }, [] ); /** * 添加新镜头到选中的视频片段 * @description 在selectedSegment的lens数组中添加一个新的空镜头,镜头名称按顺序命名 */ const addNewLens = useCallback((): void => { if (!selectedSegment) { console.warn("没有选中的视频片段,无法添加镜头"); return; } // 计算下一个镜头编号 const currentLensCount = selectedSegment.lens.length; const newLensName = `镜头${currentLensCount + 1}`; // 创建新的空镜头 const newLens = new LensType(newLensName, "", []); // 创建更新后的片段 const updatedSegment: VideoSegmentEntity = { ...selectedSegment, lens: [...selectedSegment.lens, newLens], }; // 批量更新状态,避免多次重渲染 setSelectedSegment(updatedSegment); setVideoSegments((prev) => { const segmentIndex = prev.findIndex( (segment) => segment.id === selectedSegment.id ); if (segmentIndex === -1) return prev; const newSegments = [...prev]; newSegments[segmentIndex] = updatedSegment; return newSegments; }); }, [selectedSegment]); /** * 删除指定镜头 * @param lensName 要删除的镜头名称 */ const deleteLens = useCallback( (lensName: string): void => { if (!selectedSegment) { console.warn("没有选中的视频片段,无法删除镜头"); return; } // 过滤掉指定名称的镜头并重新排序 const updatedLens = selectedSegment.lens .filter((lens) => lens.name !== lensName) .map( (lens, index) => new LensType(`镜头${index + 1}`, lens.script, lens.content) ); // 创建更新后的片段 const updatedSegment: VideoSegmentEntity = { ...selectedSegment, lens: updatedLens, }; // 批量更新状态,避免多次重渲染 setSelectedSegment(updatedSegment); setVideoSegments((prev) => { const segmentIndex = prev.findIndex( (segment) => segment.id === selectedSegment.id ); if (segmentIndex === -1) return prev; const newSegments = [...prev]; newSegments[segmentIndex] = updatedSegment; return newSegments; }); }, [selectedSegment] ); /** * 获取视频当前帧的画面,上传到七牛云,并返回七牛云的图片地址,然后调用接口识别出里面的人物信息,返回人物信息 * @param video HTML视频元素 * @param projectId 项目ID * @param videoId 视频ID * @returns Promise 七牛云的图片地址 */ const filterRole = useCallback( async (video: HTMLVideoElement) => { try { // 创建canvas元素来截取视频帧 const canvas = document.createElement("canvas"); const ctx = canvas.getContext("2d"); console.log(video); video.crossOrigin = "anonymous"; if (!ctx) { throw new Error("无法获取canvas上下文"); } // 设置canvas尺寸为视频尺寸 canvas.width = video.videoWidth; canvas.height = video.videoHeight; // 将当前视频帧绘制到canvas上 ctx.drawImage(video, 0, 0, canvas.width, canvas.height); // 将canvas转换为blob const blob = await new Promise((resolve, reject) => { canvas.toBlob((blob) => { if (blob) { resolve(blob); } else { reject(new Error("无法将canvas转换为blob")); } }, "image/png"); }); // 创建File对象 const file = new File([blob], `frame_${Date.now()}.png`, { type: "image/png", }); // 获取上传token const { token } = await getUploadToken(); // 上传到七牛云 const imageUrl = await uploadToQiniu(file, token); // 调用用例中的识别角色方法 try { const recognitionResult = await vidoEditUseCase.recognizeRoleFromImage( projectId, selectedSegment!.id, imageUrl ); console.log("角色识别结果:", recognitionResult); setMatched_persons( recognitionResult.recognition_result.data.matched_persons ); return recognitionResult; } catch (recognitionError) { console.warn("角色识别失败,但图片上传成功:", recognitionError); } } catch (error) { console.error("获取视频帧失败:", error); throw error; } }, [projectId, selectedSegment, vidoEditUseCase] ); /** * 计算识别框的属性 * @description 根据DOM元素尺寸和匹配数据计算识别框的位置和尺寸 * @param containerElement DOM容器元素 * @returns 计算后的识别框属性数组 */ const calculateRecognitionBoxes = ( containerElement: HTMLElement, matched_persons: MatchedPerson[] = [] ): Array<{ /** 横向定位坐标 */ left: number; /** 纵向定位坐标 */ top: number; /** 宽度 */ width: number; /** 高度 */ height: number; /** 人物ID */ person_id: string; }> => { // 获取容器元素的尺寸 const containerRect = containerElement.getBoundingClientRect(); const containerWidth = containerRect.width; const containerHeight = containerRect.height; console.log('recognitionBoxes-width-height', containerWidth, containerHeight); // 计算识别框属性 return matched_persons .map((person) => { // 取出bbox信息 const bbox = (person as any).bbox; if (!bbox) return null; // 计算绝对坐标和尺寸(百分比转像素) const left = (bbox.x || 0) * containerWidth; const top = (bbox.y || 0) * containerHeight; const width = (bbox.width || 0) * containerWidth; const height = (bbox.height || 0) * containerHeight; return { left, top, width, height, person_id: person.person_id }; }) .filter(Boolean) as Array<{ left: number; top: number; width: number; height: number; person_id: string; }>; }; return { // 响应式状态 loading, videoSegments, selectedSegment, matched_persons, // 操作方法 getVideoSegmentList, regenerateVideoSegment, optimizeVideoContent, abortOperation, setSelectedSegment: setSelectedSegmentHandler, addNewLens, deleteLens, filterRole, setSimpleCharacter, calculateRecognitionBoxes }; };