Spaces:
Sleeping
Sleeping
File size: 1,347 Bytes
dc2106c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
/*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "onnx/defs/shape_inference.h"
#include "onnx/defs/tensor_proto_util.h"
#include "onnx/onnx_pb.h"
namespace ONNX_NAMESPACE {
namespace defs {
namespace math {
namespace utils {
template <typename T>
T GetScalarValueFromTensor(const ONNX_NAMESPACE::TensorProto* t) {
if (t == nullptr) {
return T{};
}
auto data_type = t->data_type();
switch (data_type) {
case ONNX_NAMESPACE::TensorProto::FLOAT:
return static_cast<T>(ONNX_NAMESPACE::ParseData<float>(t).at(0));
case ONNX_NAMESPACE::TensorProto::DOUBLE:
return static_cast<T>(ONNX_NAMESPACE::ParseData<double>(t).at(0));
case ONNX_NAMESPACE::TensorProto::INT32:
return static_cast<T>(ONNX_NAMESPACE::ParseData<int32_t>(t).at(0));
case ONNX_NAMESPACE::TensorProto::INT64:
return static_cast<T>(ONNX_NAMESPACE::ParseData<int64_t>(t).at(0));
default:
fail_shape_inference("Unsupported input data type of ", data_type);
}
}
void MatMulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx, int input1Idx, int input2Idx);
void QLinearMatMulShapeInference(ONNX_NAMESPACE::InferenceContext& ctx);
const char* QLinearMatMulDoc();
} // namespace utils
} // namespace math
} // namespace defs
} // namespace ONNX_NAMESPACE
|