|
发表于 2025-2-21 11:05:27
|
显示全部楼层
reading images...
get key_landmark and face bounding boxes with the default value
start inference
pad talking image to original video
Traceback (most recent call last):
File "E:\MuseTalk-V2\deepface\lib\site-packages\gradio\queueing.py", line 625, in process_events
response = await route_utils.call_process_api(
File "E:\MuseTalk-V2\deepface\lib\site-packages\gradio\route_utils.py", line 322, in call_process_api
output = await app.get_blocks().process_api(
File "E:\MuseTalk-V2\deepface\lib\site-packages\gradio\blocks.py", line 2044, in process_api
result = await self.call_function(
File "E:\MuseTalk-V2\deepface\lib\site-packages\gradio\blocks.py", line 1591, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
File "E:\MuseTalk-V2\deepface\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "E:\MuseTalk-V2\deepface\lib\site-packages\anyio\_backends\_asyncio.py", line 2461, in run_sync_in_worker_thread
return await future
File "E:\MuseTalk-V2\deepface\lib\site-packages\anyio\_backends\_asyncio.py", line 962, in run
result = context.run(func, *args)
File "E:\MuseTalk-V2\deepface\lib\site-packages\gradio\utils.py", line 883, in wrapper
response = f(*args, **kwargs)
File "E:\MuseTalk-V2\deepface\lib\site-packages\gradio\utils.py", line 883, in wrapper
response = f(*args, **kwargs)
File "E:\MuseTalk-V2\deepface\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "<frozen app>", line 259, in inference
FileNotFoundError: 未找到输入视频文件: ./temp.mp4 |
|