Trying to convert the model to onnx model, but got error
File "to_onnx.py", line 72, in
export_model(model, img_input, export_model_name)
File "to_onnx.py", line 30, in export_model
torch.onnx.export(model, input, export_model_name, verbose=False, export_params=True, opset_version=11)
File "C:\Users\yyyy\Anaconda3\envs\torchreid\lib\site-packages\torch\onnx_init_.py", line 148, in export
strip_doc_string, dynamic_axes, keep_initializers_as_inputs)
File "C:\Users\yyyy\Anaconda3\envs\torchreid\lib\site-packages\torch\onnx\utils.py", line 66, in export
dynamic_axes=dynamic_axes, keep_initializers_as_inputs=keep_initializers_as_inputs)
File "C:\Users\yyyy\Anaconda3\envs\torchreid\lib\site-packages\torch\onnx\utils.py", line 416, in _export
fixed_batch_size=fixed_batch_size)
File "C:\Users\yyyy\Anaconda3\envs\torchreid\lib\site-packages\torch\onnx\utils.py", line 279, in _model_to_graph
graph, torch_out = _trace_and_get_graph_from_model(model, args, training)
File "C:\Users\yyyy\Anaconda3\envs\torchreid\lib\site-packages\torch\onnx\utils.py", line 236, in _trace_and_get_graph_from_model
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(model, args, _force_outplace=True, return_inputs_states=True)
File "C:\Users\yyyy\Anaconda3\envs\torchreid\lib\site-packages\torch\jit_init.py", line 277, in _get_trace_graph
outs = ONNXTracedModule(f, _force_outplace, return_inputs, return_inputs_states)(*args, **kwargs)
File "C:\Users\yyyy\Anaconda3\envs\torchreid\lib\site-packages\torch\nn\modules\module.py", line 532, in call
result = self.forward(*input, **kwargs)
File "C:\Users\yyyy\Anaconda3\envs\torchreid\lib\site-packages\torch\jit_init.py", line 332, in forward
in_vars, in_desc = _flatten(args)
RuntimeError: Only tuples, lists and Variables supported as JIT inputs/outputs. Dictionaries and strings are also accepted but their usage is not recommended. But got unsupported type numpy.ndarray
to_onnx.py
import os
import glob
import torch
import utils
import cv2
from torchvision.transforms import Compose
from models.midas_net import MidasNet
from models.transforms import Resize, NormalizeImage, PrepareForNet
import onnx
import onnxruntime
def test_model_accuracy(export_model_name, raw_output, input):
ort_session = onnxruntime.InferenceSession(export_model_name)
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(input)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(raw_output), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
def export_model(model, input, export_model_name):
torch.onnx.export(model, input, export_model_name, verbose=False, export_params=True, opset_version=11)
onnx_model = onnx.load(export_model_name)
onnx.checker.check_model(onnx_model)
graph_output = onnx.helper.printable_graph(onnx_model.graph)
with open("graph_output.txt", mode="w") as fout:
fout.write(graph_output)
device = torch.device("cpu")
# load network
model_path = "model.pt"
model = MidasNet(model_path, non_negative=True)
transform = Compose(
[
Resize(
384,
384,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_CUBIC,
),
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet(),
]
)
model.to(device)
model.eval()
img = utils.read_image("input/line_up_00.jpg")
img_input = transform({"image": img})["image"]
# compute
#with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
print("sample type = ", type(sample), ", shape of sample = ", sample.shape)
print(sample)
prediction = model.forward(sample)
export_model_name = "midas.onnx"
export_model(model, img_input, export_model_name)
Environment:
pytorch 1.4.0(installed by anaconda)
os is windows 10 64bits