34 return (
f >= 0) ?
f : -
f;
76 float input[1*1*3*3] = {
77 0.1, 0.5, 0.75, -3, 2.5, 2, -2.1, 7.8, 100};
82 operands[0].
data = input;
83 operands[0].
dims[0] = 1;
84 operands[0].
dims[1] = 1;
85 operands[0].
dims[2] = 3;
86 operands[0].
dims[3] = 3;
92 output = operands[1].
data;
93 for (
int i = 0;
i <
sizeof(input) /
sizeof(
float); ++
i) {
95 int output_nan =
isnan(output[
i]);
96 int expected_nan =
isnan(expected_output);
97 if ((!output_nan && !expected_nan &&
fabs(output[
i] - expected_output) >
EPS) ||
98 (output_nan && !expected_nan) || (!output_nan && expected_nan)) {
99 printf(
"at index %d, output: %f, expected_output: %f\n",
i, output[
i], expected_output);
109 int main(
int agrc,
char **argv)
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static __device__ float ceil(float a)
static __device__ float fabs(float a)
static __device__ float floor(float a)
int main(int agrc, char **argv)
static int test(DNNMathUnaryOperation op)
static float get_expected(float f, DNNMathUnaryOperation op)
int ff_dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const void *parameters, NativeContext *ctx)
DNN inference functions interface for native backend.
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
static av_always_inline av_const double round(double x)
DNNMathUnaryOperation un_op
void * data
data pointer with data length in bytes.
int32_t dims[4]
there are two memory layouts, NHWC or NCHW, so we use dims, dims[0] is Number.