Skip to content

Commit

Permalink
[static-runtime] add schema checks to most of the ops where these che…
Browse files Browse the repository at this point in the history
…cks are missing (pytorch#84163)

Test Plan: existing unit tests; also fix some failing ones along the way

Differential Revision: D39074902

Pull Request resolved: pytorch#84163
Approved by: https://github.com/mikeiovine
  • Loading branch information
tenpercent authored and pytorchmergebot committed Sep 1, 2022
1 parent d648375 commit bf62ece
Show file tree
Hide file tree
Showing 5 changed files with 303 additions and 112 deletions.
32 changes: 16 additions & 16 deletions benchmarks/static_runtime/test_generated_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3666,8 +3666,8 @@ TEST(StaticRuntime, autogen_bitwise_left_shift_Tensor) {
return (%cloned)
)IR";

auto self0 = at::randint(1, 100, {6, 6, 6}, at::kInt);
auto other0 = at::randint(1, 100, {6, 6, 6}, at::kInt);
auto self0 = at::randint(1, 1 << 4, {6, 6, 6}, at::kInt);
auto other0 = at::randint(1, 26, {6, 6, 6}, at::kInt);
std::vector<IValue> args{self0, other0};
testStaticRuntime(
script,
Expand All @@ -3677,8 +3677,8 @@ TEST(StaticRuntime, autogen_bitwise_left_shift_Tensor) {
/*use_equalnan=*/false,
/*check_resize=*/true);

auto self1 = at::randint(1, 100, {22, 22, 22}, at::kInt);
auto other1 = at::randint(1, 100, {22, 22, 22}, at::kInt);
auto self1 = at::randint(1, 1 << 4, {22, 22, 22}, at::kInt);
auto other1 = at::randint(1, 26, {22, 22, 22}, at::kInt);
std::vector<IValue> args2{self1, other1};
testStaticRuntime(
script,
Expand All @@ -3698,8 +3698,8 @@ TEST(StaticRuntime, autogen_bitwise_right_shift_Tensor) {
return (%cloned)
)IR";

auto self0 = at::randint(1, 100, {6, 6, 6}, at::kInt);
auto other0 = at::randint(1, 100, {6, 6, 6}, at::kInt);
auto self0 = at::randint(1 << 21, 1 << 30, {6, 6, 6}, at::kInt);
auto other0 = at::randint(1, 22, {6, 6, 6}, at::kInt);
std::vector<IValue> args{self0, other0};
testStaticRuntime(
script,
Expand All @@ -3709,8 +3709,8 @@ TEST(StaticRuntime, autogen_bitwise_right_shift_Tensor) {
/*use_equalnan=*/false,
/*check_resize=*/true);

auto self1 = at::randint(1, 100, {22, 22, 22}, at::kInt);
auto other1 = at::randint(1, 100, {22, 22, 22}, at::kInt);
auto self1 = at::randint(1 << 21, 1 << 30, {22, 22, 22}, at::kInt);
auto other1 = at::randint(1, 22, {22, 22, 22}, at::kInt);
std::vector<IValue> args2{self1, other1};
testStaticRuntime(
script,
Expand Down Expand Up @@ -6230,8 +6230,8 @@ TEST(StaticRuntime, autogen_adaptive_max_pool2d_backward) {
return (%cloned)
)IR";

auto grad_output0 = at::randint(-3, 2, {2, 2, 2});
auto self0 = at::randint(-3, 2, {2, 2, 2});
auto grad_output0 = at::randint(-3, 2, {2, 2, 2}, at::kFloat);
auto self0 = at::randint(-3, 2, {2, 2, 2}, at::kFloat);
auto indices0 = at::randint(0, 1, {2, 2, 2}, at::kLong);
std::vector<IValue> args{grad_output0, self0, indices0};
testStaticRuntime(
Expand All @@ -6242,8 +6242,8 @@ TEST(StaticRuntime, autogen_adaptive_max_pool2d_backward) {
/*use_equalnan=*/false,
/*check_resize=*/true);

auto grad_output1 = at::randint(-3, 3, {3, 3, 3});
auto self1 = at::randint(-3, 2, {3, 3, 3});
auto grad_output1 = at::randint(-3, 3, {3, 3, 3}, at::kFloat);
auto self1 = at::randint(-3, 2, {3, 3, 3}, at::kFloat);
auto indices1 = at::randint(0, 1, {3, 3, 3}, at::kLong);
std::vector<IValue> args2{grad_output1, self1, indices1};
testStaticRuntime(
Expand All @@ -6264,8 +6264,8 @@ TEST(StaticRuntime, autogen_adaptive_max_pool3d_backward) {
return (%cloned)
)IR";

auto grad_output0 = at::randint(-3, 2, {2, 2, 2, 2});
auto self0 = at::randint(-3, 2, {2, 2, 2, 2});
auto grad_output0 = at::randint(-3, 2, {2, 2, 2, 2}, at::kFloat);
auto self0 = at::randint(-3, 2, {2, 2, 2, 2}, at::kFloat);
auto indices0 = at::randint(0, 1, {2, 2, 2, 2}, at::kLong);
std::vector<IValue> args{grad_output0, self0, indices0};
testStaticRuntime(
Expand All @@ -6276,8 +6276,8 @@ TEST(StaticRuntime, autogen_adaptive_max_pool3d_backward) {
/*use_equalnan=*/false,
/*check_resize=*/true);

auto grad_output1 = at::randint(-3, 3, {3, 3, 3, 3});
auto self1 = at::randint(-3, 2, {3, 3, 3, 3});
auto grad_output1 = at::randint(-3, 3, {3, 3, 3, 3}, at::kFloat);
auto self1 = at::randint(-3, 2, {3, 3, 3, 3}, at::kFloat);
auto indices1 = at::randint(0, 1, {3, 3, 3, 3}, at::kLong);
std::vector<IValue> args2{grad_output1, self1, indices1};
testStaticRuntime(
Expand Down
8 changes: 4 additions & 4 deletions benchmarks/static_runtime/test_static_runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -325,8 +325,8 @@ TEST(StaticRuntime, ClampIntTensor) {
a = torch.clamp(inp, min, max).clone()
return (a)
)JIT";
auto a = at::randint(0, 20, {2, 3});
auto b = at::randint(0, 20, {4, 3, 2});
auto a = at::randint(0, 20, {2, 3}, at::kFloat);
auto b = at::randint(0, 20, {4, 3, 2}, at::kFloat);
auto min = 5.0f;
auto max = 5.0f;
testStaticRuntime(src, {a, min, max});
Expand Down Expand Up @@ -2845,9 +2845,9 @@ TEST(StaticRuntime, RemainderTensor) {
)JIT";

std::vector<IValue> args1 = {
at::randint(0, 10, {2, 2}), at::randint(0, 10, {2, 2})};
at::randint(0, 10, {2, 2}), at::randint(1, 10, {2, 2})};
std::vector<IValue> args2 = {
at::randint(0, 10, {3, 6}), at::randint(0, 10, {3, 6})};
at::randint(0, 10, {3, 6}), at::randint(1, 10, {3, 6})};

// Use allclose and equalnan since outputs may be NaN.
testStaticRuntime(
Expand Down
Loading

0 comments on commit bf62ece

Please sign in to comment.