Skip to content

Commit

Permalink
Fix lint issues
Browse files Browse the repository at this point in the history
  • Loading branch information
preetha-intel committed Apr 22, 2024
1 parent 6c8073d commit 7900f86
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ BasicBackend::BasicBackend(const ONNX_NAMESPACE::ModelProto& model_proto,
} else if (!subgraph_context_.has_dynamic_input_shape &&
global_context_.onnx_model_path_name.find(".onnx") != std::string ::npos) {
// Inputs with static dimenstions
std::string prec_str = (global_context_.precision_str!="ACCURACY")? global_context_.precision_str : global_context_.model_precision;
std::string prec_str = (global_context_.precision_str != "ACCURACY") ? global_context_.precision_str : global_context_.model_precision;

Check warning on line 96 in onnxruntime/core/providers/openvino/backends/basic_backend.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/providers/openvino/backends/basic_backend.cc#L96

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/providers/openvino/backends/basic_backend.cc:96:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
exe_network_ = global_context_.ie_core.CompileModel(global_context_.onnx_model_path_name,
hw_target,
prec_str,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,8 @@ OpenVINOExecutionProvider::GetCapability(const GraphViewer& graph_viewer,
return "";
} else {
auto input_type = graph_viewer.GetInputs()[0]->TypeAsProto()->tensor_type().elem_type();
if (global_context_->precision_str == "ACCURACY" && global_context_->device_type.find("GPU") != std::string::npos) {
if (global_context_->precision_str == "ACCURACY" &&
global_context_->device_type.find("GPU") != std::string::npos) {
if (input_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT) {
return "FP32";
} else if (input_type == ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ struct OpenVINOExecutionProviderInfo {
#endif
} else if (ov_supported_device_types.find(dev_type) != ov_supported_device_types.end()) {
device_type_ = dev_type;
} else if (dev_type.find("HETERO") == 0 || dev_type.find("MULTI") == 0 || dev_type.find("AUTO")==0) {
} else if (dev_type.find("HETERO") == 0 || dev_type.find("MULTI") == 0 || dev_type.find("AUTO") == 0) {
std::vector<std::string> devices = parseDevices(dev_type);
device_type_ = dev_type;
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,13 +142,13 @@ struct OpenVINO_Provider : Provider {
} else if (precision != "ACCURACY" && precision != "FP16" && precision != "FP32") {
ORT_THROW("[ERROR] [OpenVINO] Unsupported inference precision is selected. GPU only supports FP32 / FP16. \n");
}
} else if (device_type.find("NPU") != std:: string::npos) {
} else if (device_type.find("NPU") != std::string::npos) {
if (precision == "" || precision == "ACCURACY" || precision == "FP16") {
precision = "FP16";
} else {
ORT_THROW("[ERROR] [OpenVINO] Unsupported inference precision is selected. NPU only supported FP16. \n");
}
} else if (device_type.find("CPU")!=std::string::npos) {
} else if (device_type.find("CPU") != std::string::npos) {
if (precision == "" || precision == "ACCURACY" || precision == "FP32") {
precision = "FP32";
} else {
Expand Down
10 changes: 6 additions & 4 deletions onnxruntime/test/perftest/ort_test_session.cc
Original file line number Diff line number Diff line change
Expand Up @@ -274,24 +274,26 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device
}
} else if (key == "precision") {
auto device_type = ov_options["device_type"];
if (device_type.find("GPU")!= std::string::npos) {
if (device_type.find("GPU") != std::string::npos) {
if (value == "") {
ov_options[key] = "FP16";
continue;
} else if (value == "ACCURACY" || value == "FP16" || value == "FP32") {
ov_options[key] = value;
continue;
} else {
ORT_THROW("[ERROR] [OpenVINO] Unsupported inference precision is selected. GPU only supported FP32 / FP16. \n");
ORT_THROW(
"[ERROR] [OpenVINO] Unsupported inference precision is selected. "
"GPU only supported FP32 / FP16. \n");
}
}else if (device_type.find("NPU")!= std::string::npos) {
} else if (device_type.find("NPU") != std::string::npos) {
if (value == "" || value == "ACCURACY" || value == "FP16") {
ov_options[key] = "FP16";
continue;
} else {
ORT_THROW("[ERROR] [OpenVINO] Unsupported inference precision is selected. NPU only supported FP16. \n");
}
} else if (device_type.find("CPU")!= std::string::npos) {
} else if (device_type.find("CPU") != std::string::npos) {
if (value == "" || value == "ACCURACY" || value == "FP32") {
ov_options[key] = "FP32";
continue;
Expand Down

0 comments on commit 7900f86

Please sign in to comment.