import pyxir
import pyxir.contrib.target.DPUCZDX8G
import tvm
from tvm import contrib
import tvm.relay as relay
from tvm.relay import transform
from tvm.contrib import utils, graph_executor as graph_runtime
from tvm.contrib.target import vitis_ai
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.vitis_ai import partition_for_vitis_ai
import logging
logging.basicConfig()
logging.getLogger('pyxir').setLevel(logging.INFO)
import os
import numpy as np
from typing import List, Any
from PIL import Image
from collections import namedtuple
import torch
from torch import nn
import torchvision
from torchvision import transforms
import cv2
def do_trace(model, in_size=100):
model_trace = torch.jit.trace(model, torch.rand(1, 3, in_size, in_size),check_trace=False)
model_trace.eval()
return model_trace
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
HOME_DIR = os.getenv('HOME')
QUANT_DIR = os.path.join("/workspace","my_workspace/pics/coco_val2017")
def transform_image_torchvision(image, in_size):
my_preprocess = transforms.Compose(
[
#transforms.Resize(in_size),
transforms.CenterCrop(in_size),
transforms.ToTensor(),
#transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
image = my_preprocess(image)
return image
def transform_image_torchvision_NCHW(image, in_size):
image = transform_image_torchvision(image, in_size)
image = image[np.newaxis, :]
return image
class Dummy(nn.Module):
def __init__(self):
super().__init__()
self.up2 = nn.Sequential(nn.ConvTranspose2d(3, 3, 3, stride=2, padding=0))
def forward(self, x):
return self.up2(x)
model = Dummy()
in_size = 550
inp = torch.rand(1, 3, in_size, in_size)
with torch.no_grad():
out = model(inp)
script_module = do_trace(model, in_size=550)
input_name = "data"
shape_list = [(input_name, (1,3,in_size,in_size))]
target = 'DPUCZDX8G-zcu104'
export_rt_mod_file = "vitis_ai.rtmod"
tvm_target = 'llvm'
lib_kwargs = {}
mod, params = relay.frontend.from_pytorch(script_module, shape_list)
mod = relay.transform.InferType()(mod)
mod["main"] = bind_params_by_name(mod["main"], params)
desired_layouts = {'nn.conv2d_transpose': ['NHWC','default']}
seq = tvm.transform.Sequential([relay.transform.RemoveUnusedFunctions(),
relay.transform.ConvertLayout(desired_layouts),
relay.transform.FoldConstant()])
with tvm.transform.PassContext(opt_level=3,disabled_pass=['FoldSacleAxis']):
mod = seq(mod)
mod = partition_for_vitis_ai(mod, params, dpu=target)
desired_layouts = {'nn.conv2d_transpose': ['NCHW','default']}
seq = tvm.transform.Sequential([relay.transform.RemoveUnusedFunctions(),
relay.transform.ConvertLayout(desired_layouts),
relay.transform.FoldConstant()])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
export_rt_mod_file = os.path.join(os.getcwd(), 'vitis_ai.rtmod')
build_options = {
'dpu': target,
'export_runtime_module': export_rt_mod_file
}
with tvm.transform.PassContext(opt_level=3, config={'relay.ext.vitis_ai.options': build_options}):
lib = relay.build(mod, tvm_target, params=params)
InferenceSession = graph_runtime.GraphModule(lib["default"](tvm.cpu()))
px_quant_size = 128
quant_files = [os.path.join(QUANT_DIR, f) for f in os.listdir(QUANT_DIR)
if f.endswith(('JPEG', 'jpg', 'png'))][:px_quant_size]
def inputs_func(img_files: List[str]):
inputs = []
for img_path in img_files:
frame = Image.open(img_path)
img = transform_image_torchvision_NCHW(frame, 550)
inputs.append(img)
return inputs
quant_images = inputs_func(quant_files)
for index in range(px_quant_size):
InferenceSession.set_input(input_name, quant_images[index])
InferenceSession.run() # <- Breaks here on first attempt
temp = utils.tempdir()
lib.export_library(temp.relpath("tvm_lib.so"))
tvm_target = tvm.target.arm_cpu('ultra96')
lib_kwargs = {
'fcompile': contrib.cc.create_shared,
'cc': "/usr/aarch64-linux-gnu/bin/ld"
}
build_options = {
'load_runtime_module': export_rt_mod_file
}
with tvm.transform.PassContext(opt_level=3, config={'relay.ext.vitis_ai.options': build_options}):
lib_dpuczdx8g = relay.build(mod, tvm_target, params=params)
lib_dpuczdx8g.export_library('tvm_dpu.so', **lib_kwargs)
del InferenceSession
Traceback (most recent call last):
File "compile_dummy_deconvolution.py", line 124, in <module>
InferenceSession.run() # <- Breaks here on first attempt
File "/opt/vitis_ai/conda/envs/vitis-ai-tensorflow/lib/python3.6/site-packages/tvm-0.9.dev543+g0009a308d-py3.6-linux-x86_64.egg/tvm/contrib/graph_executor.py", line 208, in run
self._run()
File "/opt/vitis_ai/conda/envs/vitis-ai-tensorflow/lib/python3.6/site-packages/tvm-0.9.dev543+g0009a308d-py3.6-linux-x86_64.egg/tvm/_ffi/_ctypes/packed_func.py", line 237, in __call__
raise get_last_ffi_error()
ValueError: ValueError: could not broadcast input array from shape (1,1100,1100,3) into shape (1,1101,1101,3)