Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extend support for the resize operator #1125

Open
wants to merge 4 commits into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 26 additions & 1 deletion src/finn/transformation/fpgadataflow/convert_to_hw_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,32 @@ def apply(self, model):
if n.op_type == "Upsample":
scales = model.get_initializer(n.input[1])
else:
scales = model.get_initializer(n.input[2])
if len(n.input) == 2:
# Resize version 10
scales = model.get_initializer(n.input[1])
elif len(n.input) == 3:
# Resize version 11 and up (no size input)
scales = model.get_initializer(n.input[2])
elif len(n.input) == 4:
# Resize version 11 and up
scales_exists = (model.get_initializer(n.input[2]) is not None) and (
len(model.get_initializer(n.input[2])) != 0
)
sizes_exists = (model.get_initializer(n.input[3]) is not None) and (
len(model.get_initializer(n.input[3])) != 0
)
assert scales_exists ^ sizes_exists, (
"%s: Either scales or the target output size must "
"be specified. Specifying both is prohibited." % n.name
)
if scales_exists:
# Scales input
scales = model.get_initializer(n.input[2])
else:
# Convert sizes to scales
sizes = model.get_initializer(n.input[3])
data_input_size = model.get_tensor_shape(n.input[0])
scales = sizes / data_input_size
in_shape = model.get_tensor_shape(n.input[0])

dt = model.get_tensor_datatype(n.input[0])
Expand Down
44 changes: 36 additions & 8 deletions src/finn/transformation/streamline/reorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -769,9 +769,37 @@ def apply(self, model):
consumer = model.find_consumer(n.output[0])
producer = model.find_producer(n.input[0])
if n.op_type == "Upsample":
scales_ind = 1
transformation_ind = 1
d_type = "float32"
else:
scales_ind = 2
if len(n.input) == 2:
# Resize version 10
transformation_ind = 1
d_type = "float32"
elif len(n.input) == 3:
# Resize version 11 and up (no size input)
transformation_ind = 2
d_type = "float32"
elif len(n.input) == 4:
# Resize version 11 and up
scales_exists = (model.get_initializer(n.input[2]) is not None) and (
len(model.get_initializer(n.input[2])) != 0
)
sizes_exists = (model.get_initializer(n.input[3]) is not None) and (
len(model.get_initializer(n.input[3])) != 0
)
assert scales_exists ^ sizes_exists, (
"%s: Either scales or the target output size must "
"be specified. Specifying both is prohibited." % n.name
)
if scales_exists:
# Scales input
transformation_ind = 2
d_type = "float32"
else:
# Sizes input
transformation_ind = 3
d_type = "int64"
if producer is not None and producer.op_type == "Transpose":
perms = list(get_by_name(producer.attribute, "perm").ints)
if perms == [0, 3, 1, 2]:
Expand All @@ -781,12 +809,12 @@ def apply(self, model):
model = model.transform(MoveTransposePastFork())
# topology modified, "ask" ModelWrapper to apply this transform again
return (model, True)
old_value = model.get_initializer(n.input[scales_ind])
old_value = model.get_initializer(n.input[transformation_ind])
new_value = np.array(
[old_value[idx] for idx in (0, 2, 3, 1)],
dtype=np.dtype("float32"),
dtype=np.dtype(d_type),
)
model.set_initializer(n.input[scales_ind], new_value)
model.set_initializer(n.input[transformation_ind], new_value)
start_name = producer.input[0]
mid_name = n.input[0]
end_name = n.output[0]
Expand All @@ -803,12 +831,12 @@ def apply(self, model):
elif consumer is not None and consumer.op_type == "Transpose":
perms = list(get_by_name(consumer.attribute, "perm").ints)
if perms == [0, 2, 3, 1]:
old_value = model.get_initializer(n.input[scales_ind])
old_value = model.get_initializer(n.input[transformation_ind])
new_value = np.array(
[old_value[idx] for idx in (0, 2, 3, 1)],
dtype=np.dtype("float32"),
dtype=np.dtype(d_type),
)
model.set_initializer(n.input[scales_ind], new_value)
model.set_initializer(n.input[transformation_ind], new_value)
start_name = n.input[0]
mid_name = consumer.input[0]
end_name = consumer.output[0]
Expand Down
80 changes: 79 additions & 1 deletion tests/transformation/streamline/test_scale_resize_nhwc.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,60 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt):
return model


def create_resize_transpose_sizes(ifm_dim, ifm_ch, sizes, mode, idt):
ofm_dim_h = sizes[2]
ofm_dim_w = sizes[3]
inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]])

# Empty scales
scales = oh.make_tensor_value_info("scales", TensorProto.FLOAT, [])

# Not actually used, only needed for compliance with the Resize node interface
roi = oh.make_tensor_value_info("roi", TensorProto.FLOAT, [4])

param = oh.make_tensor_value_info("sizes", TensorProto.INT64, [4])

outp_up = oh.make_tensor_value_info(
"outp_up", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w]
)
outp = oh.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch])

resize_node = oh.make_node(
"Resize",
inputs=["inp", "roi", "scales", "sizes"],
outputs=["outp_up"],
name="Resize1",
mode=mode,
)

transpose_node = onnx.helper.make_node(
"Transpose",
inputs=["outp_up"],
outputs=["outp"],
name="Transpose1",
perm=[0, 2, 3, 1],
)

graph = oh.make_graph(
nodes=[resize_node, transpose_node],
name="resize_graph",
inputs=[inp],
outputs=[outp],
value_info=[outp_up, roi, scales, param],
)

model = qonnx_make_model(graph, producer_name="resize_model4")
model = ModelWrapper(model)
model.set_tensor_datatype("inp", idt)
model.set_tensor_datatype("outp", idt)

model.set_tensor_layout("inp", DataLayout.NCHW)
model = model.transform(InferShapes())
model = model.transform(InferDataLayouts())

return model


def check_transform(model):
graph = model.graph
node_ind = 0
Expand All @@ -198,20 +252,27 @@ def check_transform(model):
@pytest.mark.parametrize("ifm_ch", [3])
# scales
@pytest.mark.parametrize("scales", [[1, 1, i, j] for i in range(2, 5) for j in range(2, 5)])
# sizes
@pytest.mark.parametrize(
"sizes", [[1, 3, 2**i, 2**j] for i in range(6, 7) for j in range(6, 7)]
)
# mode
@pytest.mark.parametrize("mode", ["nearest"])
# input datatype
@pytest.mark.parametrize("idt", [DataType["INT4"]])
def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt):
def test_scale_resize_nhwc(ifm_dim, ifm_ch, sizes, scales, mode, idt):
# create models
resize_model1 = create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt)
resize_model2 = create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt)
resize_model3 = create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt)
resize_model4 = create_resize_transpose_sizes(ifm_dim, ifm_ch, sizes, mode, idt)

# set initializers
resize_model1.set_initializer("scales", np.array(scales, dtype=np.float32))
resize_model2.set_initializer("scales", np.array(scales, dtype=np.float32))
resize_model3.set_initializer("scales", np.array(scales, dtype=np.float32))
resize_model4.set_initializer("sizes", np.array(sizes, dtype=np.int64))
resize_model4.set_initializer("scales", np.array([], dtype=np.float32))

# generate input tensor for testing
input_tensor_nchw = gen_finn_dt_tensor(idt, [1, ifm_ch, ifm_dim[0], ifm_dim[1]])
Expand Down Expand Up @@ -269,3 +330,20 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt):
# compare outputs
assert (expected3 == output3).all()
assert check_transform(resize_model3)

# execute fourth model
output_dict4 = oxe.execute_onnx(resize_model4, input_dict_nchw)
expected4 = output_dict4["outp"]

# transform Resize into ResizeNHWC
resize_model4 = resize_model4.transform(MakeScaleResizeNHWC())
resize_model4 = resize_model4.transform(InferDataLayouts())

# execute transformed model
output_node_name4 = resize_model4.graph.output[0].name
output_dict4 = oxe.execute_onnx(resize_model4, input_dict_nchw, return_full_exec_context=False)
output4 = output_dict4[output_node_name4]

# compare outputs
assert (expected4 == output4).all()
assert check_transform(resize_model4)