赞
踩
onnx导出动态分辨率
pytorch1.4.0版本,不挑cuda版本,mxnet tf挑cuda版本
人脸检测可以用
- output_onnx = 'abcd.onnx'
-
- input_names = ["images"]
- output_names = ["detects"]
- inputs = torch.randn(1, 3, 360, 640)
-
-
- dynamic_axes = {'images': {0: 'batch',2:'batch',3:'batch'}, 'detects': {0: 'batch'}}
- # torch.onnx.export(net, inputs, output_onnx, input_names=inputs, output_names=outputs,
- # dynamic_axes=dynamic_axes)
-
- torch_out = torch.onnx._export(net, inputs, output_onnx, export_params=True, verbose=False,
- input_names=input_names, output_names=output_names,opset_version=11,dynamic_axes=dynamic_axes)
- print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
以下代码出自:
- def predict(self, image):
- img, scale, resize = self.pre_process(image)
- im_height, im_width = img.shape[2:]
-
- try:
- self.interpreter.resizeTensor(self.input_tensor, (1, 3, im_height, im_width))
- self.interpreter.resizeSession(self.session)
- tmp_input = MNN.Tensor((1, 3, im_height, im_width), MNN.Halide_Type_Float, img,
- MNN.Tensor_DimensionType_Caffe)
- self.input_tensor.copyFrom(tmp_input)
- self.interpreter.runSession(self.session)
- except Exception as e:
- print(e)
-
- conf = np.array(self.interpreter.getSessionOutput(self.session, self.conf_name).getData())
- loc = np.array(self.interpreter.getSessionOutput(self.session, self.loc_name).getData())
- dets = self.post_process(resize, im_height, im_width, scale, loc.reshape(-1, 4), conf.reshape(-1, 2))
- return dets
在输入Tensor维度不确定或需要修改时,需要调用resizeTensor
来更新维度信息。这种情况一般发生在未设置输入维度和输入维度信息可变的情况。更新完所有Tensor的维度信息之后,需要再调用resizeSession
来进行预推理,进行内存分配及复用。
需要注意的是,如果MNN模型要支持动态图像尺寸输入,导出ONNX模型时,不要使用torch.onnx.export函数中的dynamic_axes参数设置。即下段导出onnx代码中 dynamic_image = False。
先给出代码:
- output_onnx = 'faceDetector.onnx'
- print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
- input_names = ["input0"]
- output_names = ["output0", "output1", "output2"]
- dynamic_image = True
- # 转ONNX 动态输入 -> ONNX转换代码
- if dynamic_image:
- dynamic_axes= {'input0':[0, 2, 3], 'output0':[0, 1], 'output1':[0, 1], 'output2':[0, 1]} #数字0,1等是指张量的维度,表示哪个维度需要动态输入
- inputs = torch.randn(1, 3, 120, args.long_side).to(device)
- torch_out = torch.onnx._export(net, inputs, output_onnx, export_params=True, verbose=False,
- input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes)
- # MNN动态输入 -> ONNX转换代码
- else:
- inputs = torch.randn(1, 3, 120, args.long_side).to(device)
- torch_out = torch.onnx.export(net, inputs, output_onnx, export_params=True, verbose=False,
- input_names=input_names, output_names=output_names)
不难发现,动态尺寸图像输入和固定尺寸图像输入的区别在于torch.onnx.export函数中dynamic_axes参数的设置,示例中的'input0':[0, 2, 3]中数字0,2,3是指张量的维度,表示0,2,3维度可以动态尺寸输入。
如果我们的输入模型参数是不断变化的呢,例如第三个维度的长度不是300,又该如何导出?我们只需要指定一下 第二个维度为动态变化的即可 ,如下:
- torch.onnx.export(model, # model being run
- x, # model input (or a tuple for multiple inputs)
- "model2.onnx", # where to save the model (can be a file or file-like object)
- export_params=True, # store the trained parameter weights inside the model file
- opset_version=10, # the ONNX version to export the model to
- do_constant_folding=True, # whether to execute constant folding for optimization
- input_names=['input'], # the model's input names
- output_names=['output'], # the model's output names
- dynamic_axes={'input': {2: 'int_height'}})
那如果有多个变化的,也是写上相应维度即可,并且输出也可以是变化的,如下:
- torch.onnx.export(model, # model being run
- x, # model input (or a tuple for multiple inputs)
- "model2.onnx", # where to save the model (can be a file or file-like object)
- export_params=True, # store the trained parameter weights inside the model file
- opset_version=10, # the ONNX version to export the model to
- do_constant_folding=True, # whether to execute constant folding for optimization
- input_names=['input'], # the model's input names
- output_names=['output'], # the model's output names
- dynamic_axes= {
- input_name: {0: 'batch_size', 2 : 'in_width', 3: 'int_height'},
- output_name: {0: 'batch_size', 2: 'out_width', 3:'out_height'}})
原文链接:https://blog.csdn.net/weixin_43198122/article/details/124452172
yolov5-face调整失败:
-
- import argparse
- import sys
- import time
-
- sys.path.append('./') # to run '$ python *.py' files in subdirectories
-
- import torch
- import torch.nn as nn
-
- import models
- from models.experimental import attempt_load
- from utils.activations import Hardswish, SiLU
- from utils.general import set_logging, check_img_size
- import onnx
-
- if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default='../yolov5n-face.pt', help='weights path') # from yolov5/models/
- parser.add_argument('--img-size', nargs='+', type=int, default=[320, 320], help='image size') # height, width
- parser.add_argument('--batch-size', type=int, default=1, help='batch size')
- opt = parser.parse_args()
- opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
- print(opt)
- set_logging()
- t = time.time()
-
- # Load PyTorch model
- model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model
- model.eval()
- labels = model.names
-
- # Checks
- gs = int(max(model.stride)) # grid size (max stride)
- opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
-
- # Input
- img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection
-
- # Update model
- for k, m in model.named_modules():
- m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
- if isinstance(m, models.common.Conv): # assign export-friendly activations
- if isinstance(m.act, nn.Hardswish):
- m.act = Hardswish()
- elif isinstance(m.act, nn.SiLU):
- m.act = SiLU()
- # elif isinstance(m, models.yolo.Detect):
- # m.forward = m.forward_export # assign forward (optional)
- if isinstance(m, models.common.ShuffleV2Block):#shufflenet block nn.SiLU
- for i in range(len(m.branch1)):
- if isinstance(m.branch1[i], nn.SiLU):
- m.branch1[i] = SiLU()
- for i in range(len(m.branch2)):
- if isinstance(m.branch2[i], nn.SiLU):
- m.branch2[i] = SiLU()
- model.model[-1].export = True # set Detect() layer export=True
- y = model(img) # dry run
-
- # ONNX export
- print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
- output_onnx= opt.weights.replace('.pt', '.onnx') # filename
- model.fuse() # only for ONNX
- torch.onnx.export(model, img, output_onnx, verbose=False, opset_version=12, input_names=['data'],
- output_names=['stride_' + str(int(x)) for x in model.stride])
- #
- # input_names = ["data"]
- # # output_names = ['stride_' + str(int(x)) for x in model.stride]
- # output_names = ['detects']
- # inputs = torch.randn(1, 3, 640, 640)
- #
- # dynamic_axes = {'data': {0: 'batch', 2: 'batch', 3: 'batch'}, 'detects': {0: 'batch'}}
-
-
- # torch_out = torch.onnx._export(model, img, output_onnx, export_params=True, verbose=False, input_names=input_names,
- # output_names=output_names, opset_version=12, dynamic_axes=dynamic_axes)
-
- # Checks
- onnx_model = onnx.load(output_onnx) # load onnx model
- onnx.checker.check_model(onnx_model) # check onnx model
- # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
- print('ONNX export success, saved as %s' % output_onnx)
- # Finish
- print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。