- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我指的是this repo是为了让mmaction2 grad-cam demo从短视频离线推理适配到长视频在线推理。脚本如下所示:
注意:为了使这个脚本可以很容易地重现,我注释掉了一些需要很多依赖的代码。
import cv2
import numpy as np
import torchvision.transforms as transforms
import sys
from PIL import Image
#from mmaction.apis import init_recognizer
#from utils.gradcam_utils import GradCAM
import torch
import asyncio
from concurrent.futures import ProcessPoolExecutor
from functools import partial
# sys.path.append('./utils')
async def preprocess_img(arr):
image = Image.fromarray(np.uint8(arr))
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform = transforms.Compose([
transforms.Resize((model_input_height, model_input_width)),
transforms.ToTensor(),
transforms.Normalize(mean, std, inplace=False),
])
normalized_img = transform(image)
img_np = normalized_img.numpy()
return img_np
async def inference(frame_buffer):
print("starting inference")
# inputs = {}
# input_tensor = torch.from_numpy(frame_buffer).type(torch.FloatTensor)
# input_cuda_tensor = input_tensor.cuda()
# inputs['imgs'] = input_cuda_tensor
# results = gradcam(inputs)
# display_buffer = np.squeeze(results[0].cpu().detach().numpy(), axis=0)
# return display_buffer
async def run_blocking_func(loop_, queue_, frame_buffer):
with ProcessPoolExecutor() as pool:
blocking_func = partial(inference, frame_buffer)
frame = await loop_.run_in_executor(pool, blocking_func)
print(frame)
await queue_.put(frame)
await asyncio.sleep(0.01)
async def get_frames(capture):
capture.grab()
ret, frame = capture.retrieve()
if not ret:
print("empty frame")
return
for i in range(32):
img = await preprocess_img(frame)
expandimg = np.expand_dims(img, axis=(0, 1, 3))
print(f'expandimg.shape{expandimg.shape}')
frame_buffer[:, :, :, i, :, :] = expandimg[:, :, :, 0, :, :]
return frame_buffer
async def show_frame(queue_: asyncio.LifoQueue):
display_buffer = await queue_.get()
for i in range(32):
blended_image = display_buffer[i, :, :, :]
cv2.imshow('Grad-CAM VIS', blended_image)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
async def produce(loop_, queue_, cap):
while True:
frame_buffer = await asyncio.create_task(get_frames(cap))
# Apply Grad-CAM
display_buffer = await asyncio.create_task(run_blocking_func(loop_, queue_,frame_buffer))
await queue_.put(display_buffer)
async def consume(queue_):
while True:
if queue_.qsize():
task1 = asyncio.create_task(show_frame(queue_))
await asyncio.wait(task1)
if cv2.waitKey(1) == 27:
break
else:
await asyncio.sleep(0.01)
async def run(loop_, queue_, cap_):
producer_task = asyncio.create_task(produce(loop_, queue_, cap_))
consumer_task = asyncio.create_task(consume(queue_))
await asyncio.gather(producer_task, consumer_task)
if __name__ == '__main__':
# config = '/home/weidawang/Repo/mmaction2/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py'
# checkpoint = '/home/weidawang/Repo/mmaction2/checkpoints/i3d_r50_video_32x2x1_100e_kinetics400_rgb_20200826-e31c6f52.pth'
# device = torch.device('cuda:0')
# model = init_recognizer(config, checkpoint, device=device, use_frames=False)
video_path = 'replace_with_your_video.mp4'
model_input_height = 256
model_input_width = 340
# target_layer_name = 'backbone/layer4/1/relu'
# gradcam = GradCAM(model, target_layer_name)
cap = cv2.VideoCapture(video_path)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
frame_buffer = np.zeros((1, 1, 3, 32, model_input_height, model_input_width))
display_buffer = np.zeros((32, model_input_height, model_input_width, 3)) # (32, 256, 340, 3)
loop = asyncio.get_event_loop()
queue = asyncio.LifoQueue(maxsize=2)
try:
loop.run_until_complete(run(loop_=loop, queue_=queue, cap_=cap))
finally:
print("shutdown service")
loop.close()
但是当我运行它时,它报告以下错误:
concurrent.futures.process._RemoteTraceback:
"""
Traceback (most recent call last):
File "/home/weidawang/miniconda3/lib/python3.7/concurrent/futures/process.py", line 205, in _sendback_result
exception=exception))
File "/home/weidawang/miniconda3/lib/python3.7/multiprocessing/queues.py", line 358, in put
obj = _ForkingPickler.dumps(obj)
File "/home/weidawang/miniconda3/lib/python3.7/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: can't pickle coroutine objects
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/weidawang/Repo/Python-AI-Action-Utils/temp2.py", line 120, in <module>
loop.run_until_complete(run(loop_=loop, queue_=queue, cap_=cap))
File "/home/weidawang/miniconda3/lib/python3.7/asyncio/base_events.py", line 587, in run_until_complete
return future.result()
File "/home/weidawang/Repo/Python-AI-Action-Utils/temp2.py", line 94, in run
await asyncio.gather(producer_task, consumer_task)
File "/home/weidawang/Repo/Python-AI-Action-Utils/temp2.py", line 76, in produce
display_buffer = await asyncio.create_task(run_blocking_func(loop_, queue_,frame_buffer))
File "/home/weidawang/Repo/Python-AI-Action-Utils/temp2.py", line 42, in run_blocking_func
frame = await loop_.run_in_executor(pool, blocking_func)
TypeError: can't pickle coroutine objects
Task was destroyed but it is pending!
task: <Task pending coro=<consume() running at /home/weidawang/Repo/Python-AI-Action-Utils/temp2.py:88> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f7cf1418cd0>()]> cb=[gather.<locals>._done_callback() at /home/weidawang/miniconda3/lib/python3.7/asyncio/tasks.py:691]>
Process finished with exit code 1
最佳答案
如果你使用run_in_executor
,目标函数不应该是async
。您需要在 def inference()
之前删除 async
关键字。
关于python - 类型错误 : can't pickle coroutine objects when i am using asyncio loop. run_in_executor(),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/65557258/
我正在尝试编写一个相当多态的库。我遇到了一种更容易表现出来却很难说出来的情况。它看起来有点像这样: {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE
谁能解释一下这个表达式是如何工作的? type = type || 'any'; 这是否意味着如果类型未定义则使用“任意”? 最佳答案 如果 type 为“falsy”(即 false,或 undef
我有一个界面,在IAnimal.fs中, namespace Kingdom type IAnimal = abstract member Eat : Food -> unit 以及另一个成功
这个问题在这里已经有了答案: 关闭 10 年前。 Possible Duplicate: What is the difference between (type)value and type(va
在 C# 中,default(Nullable) 之间有区别吗? (或 default(long?) )和 default(long) ? Long只是一个例子,它可以是任何其他struct类型。 最
假设我有一个案例类: case class Foo(num: Int, str: String, bool: Boolean) 现在我还有一个简单的包装器: sealed trait Wrapper[
这个问题在这里已经有了答案: Create C# delegate type with ref parameter at runtime (1 个回答) 关闭 2 年前。 为了即时创建委托(dele
我正在尝试获取图像的 dct。一开始我遇到了错误 The function/feature is not implemented (Odd-size DCT's are not implemented
我正在尝试使用 AFNetworking 的 AFPropertyListRequestOperation,但是当我尝试下载它时,出现错误 预期的内容类型{( “应用程序/x-plist” )}, 得
我在下面收到错误。我知道这段代码的意思,但我不知道界面应该是什么样子: Element implicitly has an 'any' type because index expression is
我尝试将 SignalType 从 ReactiveCocoa 扩展为自定义 ErrorType,代码如下所示 enum MyError: ErrorType { // .. cases }
我无法在任何其他问题中找到答案。假设我有一个抽象父类(super class) Abstract0,它有两个子类 Concrete1 和 Concrete1。我希望能够在 Abstract0 中定义类
我想知道为什么这个索引没有用在 RANGE 类型中,而是用在 INDEX 中: 索引: CREATE INDEX myindex ON orders(order_date); 查询: EXPLAIN
我正在使用 RxJava,现在我尝试通过提供 lambda 来订阅可观察对象: observableProvider.stringForKey(CURRENT_DELETED_ID) .sub
我已经尝试了几乎所有解决问题的方法,其中包括。为 提供类型使用app.use(express.static('public'))还有更多,但我似乎无法为此找到解决方案。 index.js : imp
以下哪个 CSS 选择器更快? input[type="submit"] { /* styles */ } 或 [type="submit"] { /* styles */ } 只是好
我不知道这个设置有什么问题,我在 IDEA 中获得了所有注释(@Controller、@Repository、@Service),它在行号左侧显示 bean,然后转到该 bean。 这是错误: 14-
我听从了建议 registering java function as a callback in C function并且可以使用“简单”类型(例如整数和字符串)进行回调,例如: jstring j
有一些 java 类,加载到 Oracle 数据库(版本 11g)和 pl/sql 函数包装器: create or replace function getDataFromJava( in_uLis
我已经从 David Walsh 的 css 动画回调中获取代码并将其修改为 TypeScript。但是,我收到一个错误,我不知道为什么: interface IBrowserPrefix { [
我是一名优秀的程序员,十分优秀!