Skip to content

Commit

Permalink
[13/N] Fix clang-tidy warnings in jit (pytorch#132411)
Browse files Browse the repository at this point in the history
Follows  pytorch#132209

Pull Request resolved: pytorch#132411
Approved by: https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed Aug 2, 2024
1 parent 1250171 commit 07fe1dd
Show file tree
Hide file tree
Showing 34 changed files with 251 additions and 382 deletions.
7 changes: 2 additions & 5 deletions torch/csrc/jit/tensorexpr/analysis.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ class NodeFinder : public IRVisitor {
std::vector<NodePtr<Op>> nodes;
};

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class VarFinder : public IRVisitor {
public:
void visit(const VarPtr& v) override {
Expand Down Expand Up @@ -111,7 +110,6 @@ class BufFinder : public IRVisitor {
// Finds all kinds of write operations to the provided Buf.
class WritesToBuf : public IRVisitor {
public:
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
WritesToBuf(BufPtr target) : target_(std::move(target)) {}

std::vector<StmtPtr> writes() {
Expand Down Expand Up @@ -143,7 +141,6 @@ class WritesToBuf : public IRVisitor {

class StmtsReadingBuf : public IRVisitor {
public:
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
StmtsReadingBuf(BufPtr target) : target_(std::move(target)) {}

std::vector<StmtPtr> reads() {
Expand Down Expand Up @@ -282,8 +279,8 @@ class BufLiveRange : public IRVisitor {
public:
BufLiveRange(BufPtr b) : buf_(std::move(b)) {}

static std::tuple<int32_t, int32_t> liveRange(StmtPtr s, BufPtr b) {
BlockPtr block = to<Block>(std::move(s));
static std::tuple<int32_t, int32_t> liveRange(const StmtPtr& s, BufPtr b) {
BlockPtr block = to<Block>(s);
// We Only analyze buffer live ranges for block stmts.
if (!block) {
return std::make_tuple(0, 0);
Expand Down
1 change: 0 additions & 1 deletion torch/csrc/jit/tensorexpr/block_codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,6 @@ void BlockPrinter::PrintDistribution(const std::unordered_set<BufPtr>& bufs) {
for (auto& buf : bufs) {
emitIndent();
emitIndent();
auto buf_name = buf->name_hint();
os() << block_analysis_->getFlatInputName(buf) << " = ";
os() << "{(0, 1, )}" << '\n';
}
Expand Down
8 changes: 2 additions & 6 deletions torch/csrc/jit/tensorexpr/block_codegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>

namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {

// A class that analyzes the given program relevant for Block backend.
class BlockAnalysis : public IRVisitor {
Expand Down Expand Up @@ -145,6 +143,4 @@ class TORCH_API BlockCodeGen : public CodeGen {

std::string GetUniqueFuncName(const std::string& func_prefix);
};
} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr
5 changes: 0 additions & 5 deletions torch/csrc/jit/tensorexpr/codegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,20 +155,16 @@ class CodeGen::CallArg {
CallArg(const PaddedBuffer<T>& buffer);

template <typename T>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,cppcoreguidelines-pro-type-const-cast)
CallArg(const std::vector<T>& buffer)
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
: data_(const_cast<T*>(buffer.data())) {}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
CallArg(void* ptr) : data_(ptr) {}

#define ARG_TYPE_CTOR(Type, Name) \
CallArg(Type v) { \
memcpy(buffer_, &v, sizeof(Type)); \
data_ = (void*)buffer_; \
}
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_TYPE_CTOR);
#undef ARG_TYPE_CTOR

Expand Down Expand Up @@ -203,7 +199,6 @@ class CodeGen::CallArg {
TORCH_INTERNAL_ASSERT(data_ == (void*)buffer_); \
return (Type*)data_; \
}
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_PTR_DEFINE);
#undef ARG_PTR_DEFINE

Expand Down
22 changes: 11 additions & 11 deletions torch/csrc/jit/tensorexpr/cpp_codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,52 +79,52 @@ void CppPrinter::printPrologue() {
template <typename T>
inline std::enable_if_t<!std::is_floating_point_v<T>, void> visit_mod(
std::ostream& os,
const ExprPtr lhs,
const ExprPtr rhs) {
const ExprPtr& lhs,
const ExprPtr& rhs) {
os << *lhs << " % " << *rhs;
}

template <typename T>
inline std::enable_if_t<std::is_floating_point_v<T>, void> visit_mod(
std::ostream& os,
const ExprPtr lhs,
const ExprPtr rhs) {
const ExprPtr& lhs,
const ExprPtr& rhs) {
os << "std::fmod(" << *lhs << ", " << *rhs << ")";
}

template <typename T>
inline std::
enable_if_t<std::is_floating_point_v<T> || std::is_integral_v<T>, void>
visit_max(std::ostream& os, const ExprPtr lhs, const ExprPtr rhs) {
visit_max(std::ostream& os, const ExprPtr& lhs, const ExprPtr& rhs) {
os << "std::max(" << *lhs << ", " << *rhs << ")";
}

template <typename T>
inline std::
enable_if_t<!std::is_floating_point_v<T> && !std::is_integral_v<T>, void>
visit_max(std::ostream& os, const ExprPtr lhs, const ExprPtr rhs) {
visit_max(std::ostream& os, const ExprPtr& lhs, const ExprPtr& rhs) {
os << "(" << *lhs << " < " << *rhs << ") ? " << *rhs << " : " << *lhs;
}

template <typename T>
inline std::
enable_if_t<std::is_floating_point_v<T> || std::is_integral_v<T>, void>
visit_min(std::ostream& os, const ExprPtr lhs, const ExprPtr rhs) {
visit_min(std::ostream& os, const ExprPtr& lhs, const ExprPtr& rhs) {
os << "std::min(" << *lhs << ", " << *rhs << ")";
}

template <typename T>
inline std::
enable_if_t<!std::is_floating_point_v<T> && !std::is_integral_v<T>, void>
visit_min(std::ostream& os, const ExprPtr lhs, const ExprPtr rhs) {
visit_min(std::ostream& os, const ExprPtr& lhs, const ExprPtr& rhs) {
os << *lhs << " < " << *rhs << " ? " << *lhs << " : " << *rhs;
}

template <typename T>
void visit_binary_op(
std::ostream& os,
const ExprPtr lhs,
const ExprPtr rhs,
const ExprPtr& lhs,
const ExprPtr& rhs,
IRNodeType op_type) {
switch (op_type) {
case IRNodeType::kMod:
Expand Down Expand Up @@ -242,7 +242,7 @@ void CppPrinter::visit(const IntrinsicsPtr& v) {
}

os() << "std::" << v->func_name() << "(";
for (int i = 0; i < v->nparams(); i++) {
for (size_t i = 0; i < v->nparams(); i++) {
if (i > 0) {
os() << ", ";
}
Expand Down
8 changes: 2 additions & 6 deletions torch/csrc/jit/tensorexpr/cpp_codegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/ir_printer.h>

namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {

class CppVarNameRewriter;

Expand Down Expand Up @@ -97,6 +95,4 @@ class TORCH_API CppCodeGen : public CodeGen {
std::unique_ptr<CppVarNameRewriter> var_name_rewriter_;
};

} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr
5 changes: 3 additions & 2 deletions torch/csrc/jit/tensorexpr/cuda_codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ void CudaPrinter::visit(const IntrinsicsPtr& v) {

// get type of resulting expression.
ScalarType returnType = v->param(0)->dtype().scalar_type();
for (int i = 1; i < v->nparams(); ++i) {
for (size_t i = 1; i < v->nparams(); ++i) {
returnType = promoteTypes(returnType, v->param(i)->dtype().scalar_type());
}

Expand Down Expand Up @@ -1303,7 +1303,8 @@ void CudaCodeGen::CompileToNVRTC(
"--std=c++17", compute.c_str(), "-default-device"};
#endif

auto result = nvrtc().nvrtcCompileProgram(program, args.size(), args.data());
auto result = nvrtc().nvrtcCompileProgram(
program, static_cast<int>(args.size()), args.data());
if (result != NVRTC_SUCCESS) {
size_t logsize = 0;
AT_CUDA_NVRTC_CHECK(nvrtc().nvrtcGetProgramLogSize(program, &logsize));
Expand Down
10 changes: 2 additions & 8 deletions torch/csrc/jit/tensorexpr/cuda_codegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>

namespace torch {
namespace jit {
namespace tensorexpr {
namespace torch::jit::tensorexpr {

// A class that analyzes the given program relevant for Cuda backends.
class CudaAnalysis : public IRVisitor {
Expand Down Expand Up @@ -74,7 +72,6 @@ class CudaAnalysis : public IRVisitor {
// execution parameters, then if those params differ from the max mask each dim.
class GPUMetaVarRewriter : public IRMutator {
public:
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit GPUMetaVarRewriter(const CudaAnalysis* cuda_analysis)
: cuda_analysis_(cuda_analysis) {
gpu_block_vars_ = {
Expand Down Expand Up @@ -208,7 +205,6 @@ class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen {
Initialize();
}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
CudaCodeGen(
StmtPtr stmt,
const std::vector<BufferArg>& buffer_args,
Expand Down Expand Up @@ -287,6 +283,4 @@ class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen {
std::string GetUniqueFuncName(const std::string& func_prefix);
};

} // namespace tensorexpr
} // namespace jit
} // namespace torch
} // namespace torch::jit::tensorexpr
15 changes: 3 additions & 12 deletions torch/csrc/jit/tensorexpr/eval.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,8 @@ inline c10::BFloat16 div_value(c10::BFloat16 lhs, c10::BFloat16 rhs) {
return lhs / rhs;
}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class SimpleIREvaluatorImpl : public IRVisitor {
public:
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
SimpleIREvaluatorImpl() = default;

~SimpleIREvaluatorImpl() override = default;
Expand Down Expand Up @@ -626,8 +624,7 @@ class SimpleIREvaluatorImpl : public IRVisitor {

TORCH_API void visit(const IfThenElsePtr& v) override {
v->condition()->accept(this);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
bool cond_v;
bool cond_v = false;
switch (value_.dtype().scalar_type()) {
#define TYPE_CASE(Type, Name) \
case ScalarType::Name: { \
Expand Down Expand Up @@ -762,9 +759,7 @@ class SimpleIREvaluatorImpl : public IRVisitor {
value_ = InterpValue(val); \
} break;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
// NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds)
TYPE_CASE(c10::quint8, QUInt8);
// NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds)
TYPE_CASE(c10::qint8, QInt8);
#undef TYPE_CASE
default:
Expand Down Expand Up @@ -811,9 +806,7 @@ class SimpleIREvaluatorImpl : public IRVisitor {
} \
} break;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
// NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds)
TYPE_CASE(c10::quint8, QUInt8);
// NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds)
TYPE_CASE(c10::qint8, QInt8);
#undef TYPE_CASE
default:
Expand Down Expand Up @@ -862,8 +855,7 @@ class SimpleIREvaluatorImpl : public IRVisitor {
}
for (const ExprPtr& a : v->args()) {
a->accept(this);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t val;
int64_t val = 0;
if (value().dtype() == kLong) {
val = value().as<int64_t>();
} else if (value().dtype() == kInt) {
Expand Down Expand Up @@ -934,8 +926,7 @@ class SimpleIREvaluatorImpl : public IRVisitor {
}
for (const auto& a : v->args()) {
a->accept(this);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t val;
int64_t val = 0;
if (value().dtype() == kLong) {
val = value().as<int64_t>();
} else if (value().dtype() == kInt) {
Expand Down
5 changes: 0 additions & 5 deletions torch/csrc/jit/tensorexpr/eval.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,10 @@ class InterpValue {
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_CTOR);
#undef VALUE_CTOR

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit InterpValue(c10::quint8 v) : dtype_(kQUInt8) {
QUInt8values.emplace_back(v.val_);
}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
explicit InterpValue(c10::qint8 v) : dtype_(kQInt8) {
QInt8values.emplace_back(v.val_);
}
Expand Down Expand Up @@ -226,7 +224,6 @@ class ExprEval {
TYPE_CASE(c10::qint8, QInt8);
#undef TYPE_CASE
case ScalarType::Bool: {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<unsigned char> ret_val_arg(1);
call_args_extended.emplace_back(ret_val_arg.data());
codegen_->call(call_args_extended);
Expand All @@ -238,7 +235,6 @@ class ExprEval {
}

void call_raw(const std::vector<void*>& args) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<void*> args_extended = args;
switch (dtype_.scalar_type()) {
#define TYPE_CASE(Type, Name) \
Expand All @@ -253,7 +249,6 @@ class ExprEval {
TYPE_CASE(c10::qint8, QInt8);
#undef TYPE_CASE
case ScalarType::Bool: {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<unsigned char> ret_val_arg(1);
args_extended.push_back(ret_val_arg.data());
codegen_->call_raw(args_extended);
Expand Down
Loading

0 comments on commit 07fe1dd

Please sign in to comment.