You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
4208 lines
213 KiB
4208 lines
213 KiB
R"(
|
|
|
|
|
|
|
|
#ifndef ARM_COMPUTE_HELPER_H
|
|
#define ARM_COMPUTE_HELPER_H
|
|
|
|
|
|
|
|
|
|
#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
|
|
|
|
#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
|
|
|
|
#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
|
|
|
|
#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
|
|
|
|
#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
|
|
|
|
#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
|
|
|
|
#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
|
|
|
|
#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
|
|
|
|
#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
|
|
|
|
#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
|
|
|
|
#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
|
|
|
|
#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
|
|
|
|
#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
|
|
|
|
#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
|
|
|
|
#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
|
|
|
|
#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
|
|
|
|
|
|
|
|
#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
|
|
|
|
#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
|
|
|
|
#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
|
|
|
|
#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
|
|
|
|
#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
|
|
|
|
#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
|
|
|
|
#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
|
|
|
|
#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
|
|
|
|
#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
|
|
|
|
#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
|
|
|
|
#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
|
|
|
|
#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
|
|
|
|
#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
|
|
|
|
#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
|
|
|
|
#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
|
|
|
|
#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
|
|
|
|
|
|
|
|
|
|
#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
|
|
|
|
|
|
#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
|
|
|
|
|
|
#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
|
|
|
|
#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
|
|
|
|
#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
|
|
|
|
#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
|
|
|
|
#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
|
|
|
|
#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
|
|
|
|
#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
|
|
|
|
#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
|
|
|
|
#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
|
|
|
|
#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
|
|
|
|
#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
|
|
|
|
#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
|
|
|
|
#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
|
|
|
|
#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
|
|
|
|
#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
|
|
|
|
#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
|
|
|
|
|
|
|
|
#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
|
|
#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
}
|
|
|
|
#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
|
|
if(!(PARTIAL_COND_X)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
}
|
|
|
|
#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
|
|
if(!(PARTIAL_COND_Y)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
}
|
|
|
|
|
|
#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
|
|
|
|
|
|
#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
|
|
|
|
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
|
|
#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
|
|
|
|
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
|
|
|
|
#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
|
|
|
|
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
|
|
|
|
#else
|
|
|
|
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
#if defined(PARTIAL_STORE_M0)
|
|
|
|
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
|
|
((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
|
|
#else
|
|
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
|
|
((uint)(y * M0))
|
|
#endif
|
|
|
|
|
|
|
|
#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
|
|
STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
|
|
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
|
|
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
|
|
#endif
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
|
|
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
|
|
#endif
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
|
|
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
|
|
#endif
|
|
|
|
#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
|
|
#pragma OPENCL EXTENSION cl_arm_printf : enable
|
|
#endif
|
|
|
|
#define GPU_ARCH_MIDGARD 0x100
|
|
#define GPU_ARCH_BIFROST 0x200
|
|
#define GPU_ARCH_VALHALL 0x300
|
|
|
|
|
|
#define CONCAT(a, b) a##b
|
|
|
|
|
|
#define EXPAND(x) x
|
|
|
|
|
|
#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
|
|
|
|
|
|
#define REV1(x) ((x))
|
|
#define REV2(x) ((x).s10)
|
|
#define REV3(x) ((x).s210)
|
|
#define REV4(x) ((x).s3210)
|
|
#define REV8(x) ((x).s76543210)
|
|
#define REV16(x) ((x).sFEDCBA9876543210)
|
|
|
|
|
|
|
|
#define REVERSE_STR(x, s) REV##s((x))
|
|
#define REVERSE(x, s) REVERSE_STR(x, s)
|
|
|
|
|
|
|
|
#define ROT1_0(x) ((x))
|
|
#define ROT1_1(x) ((x))
|
|
|
|
#define ROT2_0(x) ((x))
|
|
#define ROT2_1(x) ((x).s10)
|
|
#define ROT2_2(x) ((x))
|
|
|
|
#define ROT3_0(x) ((x))
|
|
#define ROT3_1(x) ((x).s201)
|
|
#define ROT3_2(x) ((x).s120)
|
|
#define ROT3_3(x) ((x))
|
|
|
|
#define ROT4_0(x) ((x))
|
|
#define ROT4_1(x) ((x).s3012)
|
|
#define ROT4_2(x) ((x).s2301)
|
|
#define ROT4_3(x) ((x).s1230)
|
|
#define ROT4_4(x) ((x))
|
|
|
|
#define ROT8_0(x) ((x))
|
|
#define ROT8_1(x) ((x).s70123456)
|
|
#define ROT8_2(x) ((x).s67012345)
|
|
#define ROT8_3(x) ((x).s56701234)
|
|
#define ROT8_4(x) ((x).s45670123)
|
|
#define ROT8_5(x) ((x).s34567012)
|
|
#define ROT8_6(x) ((x).s23456701)
|
|
#define ROT8_7(x) ((x).s12345670)
|
|
#define ROT8_8(x) ((x))
|
|
|
|
#define ROT16_0(x) ((x))
|
|
#define ROT16_1(x) ((x).sF0123456789ABCDE)
|
|
#define ROT16_2(x) ((x).sEF0123456789ABCD)
|
|
#define ROT16_3(x) ((x).sDEF0123456789ABC)
|
|
#define ROT16_4(x) ((x).sCDEF0123456789AB)
|
|
#define ROT16_5(x) ((x).sBCDEF0123456789A)
|
|
#define ROT16_6(x) ((x).sABCDEF0123456789)
|
|
#define ROT16_7(x) ((x).s9ABCDEF012345678)
|
|
#define ROT16_8(x) ((x).s89ABCDEF01234567)
|
|
#define ROT16_9(x) ((x).s789ABCDEF0123456)
|
|
#define ROT16_10(x) ((x).s6789ABCDEF012345)
|
|
#define ROT16_11(x) ((x).s56789ABCDEF01234)
|
|
#define ROT16_12(x) ((x).s456789ABCDEF0123)
|
|
#define ROT16_13(x) ((x).s3456789ABCDEF012)
|
|
#define ROT16_14(x) ((x).s23456789ABCDEF01)
|
|
#define ROT16_15(x) ((x).s123456789ABCDEF0)
|
|
#define ROT16_16(x) ((x))
|
|
|
|
|
|
|
|
#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
|
|
#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
|
|
|
|
|
|
|
|
#define V_OFFS1(dt) (dt##1)(0)
|
|
#define V_OFFS2(dt) (dt##2)(0, 1)
|
|
#define V_OFFS3(dt) (dt##3)(0, 1, 2)
|
|
#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
|
|
#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
|
|
#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
|
|
|
|
|
|
|
|
#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
|
|
#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
|
|
|
|
|
|
#define VLOAD_STR(size) vload##size
|
|
#define VLOAD(size) VLOAD_STR(size)
|
|
|
|
|
|
#define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size
|
|
#define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size)
|
|
|
|
#define NO_LOAD(data, offs, ptr) \
|
|
{ \
|
|
}
|
|
|
|
|
|
#define vload_partial_1_0 NO_LOAD
|
|
#define vload_partial_1_1 vload1
|
|
#define vload_partial_1_2 NO_LOAD
|
|
#define vload_partial_1_3 NO_LOAD
|
|
#define vload_partial_1_4 NO_LOAD
|
|
#define vload_partial_1_5 NO_LOAD
|
|
#define vload_partial_1_6 NO_LOAD
|
|
#define vload_partial_1_7 NO_LOAD
|
|
#define vload_partial_1_8 NO_LOAD
|
|
#define vload_partial_1_9 NO_LOAD
|
|
#define vload_partial_1_10 NO_LOAD
|
|
#define vload_partial_1_11 NO_LOAD
|
|
#define vload_partial_1_12 NO_LOAD
|
|
#define vload_partial_1_13 NO_LOAD
|
|
#define vload_partial_1_14 NO_LOAD
|
|
#define vload_partial_1_15 NO_LOAD
|
|
#define vload_partial_1_16 NO_LOAD
|
|
|
|
#define vload_partial_2_0 NO_LOAD
|
|
#define vload_partial_2_1 vload_partial_1
|
|
#define vload_partial_2_2 vload_partial_2
|
|
#define vload_partial_2_3 NO_LOAD
|
|
#define vload_partial_2_4 NO_LOAD
|
|
#define vload_partial_2_5 NO_LOAD
|
|
#define vload_partial_2_6 NO_LOAD
|
|
#define vload_partial_2_7 NO_LOAD
|
|
#define vload_partial_2_8 NO_LOAD
|
|
#define vload_partial_2_9 NO_LOAD
|
|
#define vload_partial_2_10 NO_LOAD
|
|
#define vload_partial_2_11 NO_LOAD
|
|
#define vload_partial_2_12 NO_LOAD
|
|
#define vload_partial_2_13 NO_LOAD
|
|
#define vload_partial_2_14 NO_LOAD
|
|
#define vload_partial_2_15 NO_LOAD
|
|
#define vload_partial_2_16 NO_LOAD
|
|
|
|
#define vload_partial_3_0 NO_LOAD
|
|
#define vload_partial_3_1 vload_partial_1
|
|
#define vload_partial_3_2 vload_partial_2
|
|
#define vload_partial_3_3 vload_partial_3
|
|
#define vload_partial_3_4 NO_LOAD
|
|
#define vload_partial_3_5 NO_LOAD
|
|
#define vload_partial_3_6 NO_LOAD
|
|
#define vload_partial_3_7 NO_LOAD
|
|
#define vload_partial_3_8 NO_LOAD
|
|
#define vload_partial_3_9 NO_LOAD
|
|
#define vload_partial_3_10 NO_LOAD
|
|
#define vload_partial_3_11 NO_LOAD
|
|
#define vload_partial_3_12 NO_LOAD
|
|
#define vload_partial_3_13 NO_LOAD
|
|
#define vload_partial_3_14 NO_LOAD
|
|
#define vload_partial_3_15 NO_LOAD
|
|
#define vload_partial_3_16 NO_LOAD
|
|
|
|
#define vload_partial_4_0 NO_LOAD
|
|
#define vload_partial_4_1 vload_partial_1
|
|
#define vload_partial_4_2 vload_partial_2
|
|
#define vload_partial_4_3 vload_partial_3
|
|
#define vload_partial_4_4 vload_partial_4
|
|
#define vload_partial_4_5 NO_LOAD
|
|
#define vload_partial_4_6 NO_LOAD
|
|
#define vload_partial_4_7 NO_LOAD
|
|
#define vload_partial_4_8 NO_LOAD
|
|
#define vload_partial_4_9 NO_LOAD
|
|
#define vload_partial_4_10 NO_LOAD
|
|
#define vload_partial_4_11 NO_LOAD
|
|
#define vload_partial_4_12 NO_LOAD
|
|
#define vload_partial_4_13 NO_LOAD
|
|
#define vload_partial_4_14 NO_LOAD
|
|
#define vload_partial_4_15 NO_LOAD
|
|
#define vload_partial_4_16 NO_LOAD
|
|
|
|
#define vload_partial_8_0 NO_LOAD
|
|
#define vload_partial_8_1 vload_partial_1
|
|
#define vload_partial_8_2 vload_partial_2
|
|
#define vload_partial_8_3 vload_partial_3
|
|
#define vload_partial_8_4 vload_partial_4
|
|
#define vload_partial_8_5 vload_partial_5
|
|
#define vload_partial_8_6 vload_partial_6
|
|
#define vload_partial_8_7 vload_partial_7
|
|
#define vload_partial_8_8 vload_partial_8
|
|
#define vload_partial_8_9 NO_LOAD
|
|
#define vload_partial_8_10 NO_LOAD
|
|
#define vload_partial_8_11 NO_LOAD
|
|
#define vload_partial_8_12 NO_LOAD
|
|
#define vload_partial_8_13 NO_LOAD
|
|
#define vload_partial_8_14 NO_LOAD
|
|
#define vload_partial_8_15 NO_LOAD
|
|
#define vload_partial_8_16 NO_LOAD
|
|
|
|
#define vload_partial_16_0 NO_LOAD
|
|
#define vload_partial_16_1 vload_partial_1
|
|
#define vload_partial_16_2 vload_partial_2
|
|
#define vload_partial_16_3 vload_partial_3
|
|
#define vload_partial_16_4 vload_partial_4
|
|
#define vload_partial_16_5 vload_partial_5
|
|
#define vload_partial_16_6 vload_partial_6
|
|
#define vload_partial_16_7 vload_partial_7
|
|
#define vload_partial_16_8 vload_partial_8
|
|
#define vload_partial_16_9 vload_partial_9
|
|
#define vload_partial_16_10 vload_partial_10
|
|
#define vload_partial_16_11 vload_partial_11
|
|
#define vload_partial_16_12 vload_partial_12
|
|
#define vload_partial_16_13 vload_partial_13
|
|
#define vload_partial_16_14 vload_partial_14
|
|
#define vload_partial_16_15 vload_partial_15
|
|
#define vload_partial_16_16 vload_partial_16
|
|
|
|
|
|
#define vload_partial_1(DATA, OFFSET, PTR) \
|
|
DATA.s0 = vload1(OFFSET, PTR);
|
|
|
|
#define vload_partial_2(DATA, OFFSET, PTR) \
|
|
DATA.s01 = vload2(OFFSET, PTR);
|
|
|
|
#define vload_partial_3(DATA, OFFSET, PTR) \
|
|
DATA.s012 = vload3(OFFSET, PTR);
|
|
|
|
#define vload_partial_4(DATA, OFFSET, PTR) \
|
|
DATA.s0123 = vload4(OFFSET, PTR);
|
|
|
|
#define vload_partial_5(DATA, OFFSET, PTR) \
|
|
vload_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
DATA.s4 = vload1(OFFSET, PTR + 4);
|
|
|
|
#define vload_partial_6(DATA, OFFSET, PTR) \
|
|
vload_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vload_partial_2(DATA.s45, OFFSET, PTR + 4);
|
|
|
|
#define vload_partial_7(DATA, OFFSET, PTR) \
|
|
vload_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vload_partial_3(DATA.s456, OFFSET, PTR + 4);
|
|
|
|
#define vload_partial_8(DATA, OFFSET, PTR) \
|
|
DATA.s01234567 = vload8(OFFSET, PTR);
|
|
|
|
#define vload_partial_9(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
DATA.s8 = vload1(OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_10(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_2(DATA.s89, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_11(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_3(DATA.s89A, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_12(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_4(DATA.s89AB, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_13(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_14(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_15(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_16(DATA, OFFSET, PTR) \
|
|
DATA = vload16(OFFSET, PTR);
|
|
|
|
|
|
|
|
#define PIXEL_UNIT4 1
|
|
#define PIXEL_UNIT8 2
|
|
#define PIXEL_UNIT16 4
|
|
|
|
|
|
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
|
|
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
|
|
|
|
|
|
#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
|
|
#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
|
|
#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
|
|
#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
|
|
#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
|
|
#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
|
|
#endif
|
|
|
|
#define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values));
|
|
#define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567));
|
|
#define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
|
|
#define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values));
|
|
#define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567));
|
|
#define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
|
|
#endif
|
|
|
|
|
|
#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
|
|
#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
|
|
|
|
|
|
#define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values)
|
|
#define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values)
|
|
|
|
#define VSTORE_STR(size) vstore##size
|
|
#define VSTORE(size) VSTORE_STR(size)
|
|
|
|
#define float1 float
|
|
#define half1 half
|
|
#define char1 char
|
|
#define uchar1 uchar
|
|
#define short1 short
|
|
#define ushort1 ushort
|
|
#define int1 int
|
|
#define uint1 uint
|
|
#define long1 long
|
|
#define ulong1 ulong
|
|
#define double1 double
|
|
|
|
#define vload1(OFFSET, PTR) *(OFFSET + PTR)
|
|
#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
|
|
|
|
|
|
#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
|
|
#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
|
|
|
|
#define NO_STORE(data, offs, ptr) \
|
|
{ \
|
|
}
|
|
|
|
|
|
#define vstore_partial_1_0 NO_STORE
|
|
#define vstore_partial_1_1 vstore1
|
|
#define vstore_partial_1_2 NO_STORE
|
|
#define vstore_partial_1_3 NO_STORE
|
|
#define vstore_partial_1_4 NO_STORE
|
|
#define vstore_partial_1_5 NO_STORE
|
|
#define vstore_partial_1_6 NO_STORE
|
|
#define vstore_partial_1_7 NO_STORE
|
|
#define vstore_partial_1_8 NO_STORE
|
|
#define vstore_partial_1_9 NO_STORE
|
|
#define vstore_partial_1_10 NO_STORE
|
|
#define vstore_partial_1_11 NO_STORE
|
|
#define vstore_partial_1_12 NO_STORE
|
|
#define vstore_partial_1_13 NO_STORE
|
|
#define vstore_partial_1_14 NO_STORE
|
|
#define vstore_partial_1_15 NO_STORE
|
|
#define vstore_partial_1_16 NO_STORE
|
|
|
|
#define vstore_partial_2_0 NO_STORE
|
|
#define vstore_partial_2_1 vstore_partial_1
|
|
#define vstore_partial_2_2 vstore_partial_2
|
|
#define vstore_partial_2_3 NO_STORE
|
|
#define vstore_partial_2_4 NO_STORE
|
|
#define vstore_partial_2_5 NO_STORE
|
|
#define vstore_partial_2_6 NO_STORE
|
|
#define vstore_partial_2_7 NO_STORE
|
|
#define vstore_partial_2_8 NO_STORE
|
|
#define vstore_partial_2_9 NO_STORE
|
|
#define vstore_partial_2_10 NO_STORE
|
|
#define vstore_partial_2_11 NO_STORE
|
|
#define vstore_partial_2_12 NO_STORE
|
|
#define vstore_partial_2_13 NO_STORE
|
|
#define vstore_partial_2_14 NO_STORE
|
|
#define vstore_partial_2_15 NO_STORE
|
|
#define vstore_partial_2_16 NO_STORE
|
|
|
|
#define vstore_partial_3_0 NO_STORE
|
|
#define vstore_partial_3_1 vstore_partial_1
|
|
#define vstore_partial_3_2 vstore_partial_2
|
|
#define vstore_partial_3_3 vstore_partial_3
|
|
#define vstore_partial_3_4 NO_STORE
|
|
#define vstore_partial_3_5 NO_STORE
|
|
#define vstore_partial_3_6 NO_STORE
|
|
#define vstore_partial_3_7 NO_STORE
|
|
#define vstore_partial_3_8 NO_STORE
|
|
#define vstore_partial_3_9 NO_STORE
|
|
#define vstore_partial_3_10 NO_STORE
|
|
#define vstore_partial_3_11 NO_STORE
|
|
#define vstore_partial_3_12 NO_STORE
|
|
#define vstore_partial_3_13 NO_STORE
|
|
#define vstore_partial_3_14 NO_STORE
|
|
#define vstore_partial_3_15 NO_STORE
|
|
#define vstore_partial_3_16 NO_STORE
|
|
|
|
#define vstore_partial_4_0 NO_STORE
|
|
#define vstore_partial_4_1 vstore_partial_1
|
|
#define vstore_partial_4_2 vstore_partial_2
|
|
#define vstore_partial_4_3 vstore_partial_3
|
|
#define vstore_partial_4_4 vstore_partial_4
|
|
#define vstore_partial_4_5 NO_STORE
|
|
#define vstore_partial_4_6 NO_STORE
|
|
#define vstore_partial_4_7 NO_STORE
|
|
#define vstore_partial_4_8 NO_STORE
|
|
#define vstore_partial_4_9 NO_STORE
|
|
#define vstore_partial_4_10 NO_STORE
|
|
#define vstore_partial_4_11 NO_STORE
|
|
#define vstore_partial_4_12 NO_STORE
|
|
#define vstore_partial_4_13 NO_STORE
|
|
#define vstore_partial_4_14 NO_STORE
|
|
#define vstore_partial_4_15 NO_STORE
|
|
#define vstore_partial_4_16 NO_STORE
|
|
|
|
#define vstore_partial_8_0 NO_STORE
|
|
#define vstore_partial_8_1 vstore_partial_1
|
|
#define vstore_partial_8_2 vstore_partial_2
|
|
#define vstore_partial_8_3 vstore_partial_3
|
|
#define vstore_partial_8_4 vstore_partial_4
|
|
#define vstore_partial_8_5 vstore_partial_5
|
|
#define vstore_partial_8_6 vstore_partial_6
|
|
#define vstore_partial_8_7 vstore_partial_7
|
|
#define vstore_partial_8_8 vstore_partial_8
|
|
#define vstore_partial_8_9 NO_STORE
|
|
#define vstore_partial_8_10 NO_STORE
|
|
#define vstore_partial_8_11 NO_STORE
|
|
#define vstore_partial_8_12 NO_STORE
|
|
#define vstore_partial_8_13 NO_STORE
|
|
#define vstore_partial_8_14 NO_STORE
|
|
#define vstore_partial_8_15 NO_STORE
|
|
#define vstore_partial_8_16 NO_STORE
|
|
|
|
#define vstore_partial_16_0 NO_STORE
|
|
#define vstore_partial_16_1 vstore_partial_1
|
|
#define vstore_partial_16_2 vstore_partial_2
|
|
#define vstore_partial_16_3 vstore_partial_3
|
|
#define vstore_partial_16_4 vstore_partial_4
|
|
#define vstore_partial_16_5 vstore_partial_5
|
|
#define vstore_partial_16_6 vstore_partial_6
|
|
#define vstore_partial_16_7 vstore_partial_7
|
|
#define vstore_partial_16_8 vstore_partial_8
|
|
#define vstore_partial_16_9 vstore_partial_9
|
|
#define vstore_partial_16_10 vstore_partial_10
|
|
#define vstore_partial_16_11 vstore_partial_11
|
|
#define vstore_partial_16_12 vstore_partial_12
|
|
#define vstore_partial_16_13 vstore_partial_13
|
|
#define vstore_partial_16_14 vstore_partial_14
|
|
#define vstore_partial_16_15 vstore_partial_15
|
|
#define vstore_partial_16_16 vstore_partial_16
|
|
|
|
|
|
#define vstore_partial_1(DATA, OFFSET, PTR) \
|
|
vstore1(DATA.s0, OFFSET, PTR);
|
|
|
|
#define vstore_partial_2(DATA, OFFSET, PTR) \
|
|
vstore2(DATA.s01, OFFSET, PTR);
|
|
|
|
#define vstore_partial_3(DATA, OFFSET, PTR) \
|
|
vstore3(DATA.s012, OFFSET, PTR);
|
|
|
|
#define vstore_partial_4(DATA, OFFSET, PTR) \
|
|
vstore4(DATA.s0123, OFFSET, PTR);
|
|
|
|
#define vstore_partial_5(DATA, OFFSET, PTR) \
|
|
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vstore1(DATA.s4, OFFSET, PTR + 4);
|
|
|
|
#define vstore_partial_6(DATA, OFFSET, PTR) \
|
|
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
|
|
|
|
#define vstore_partial_7(DATA, OFFSET, PTR) \
|
|
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
|
|
|
|
#define vstore_partial_8(DATA, OFFSET, PTR) \
|
|
vstore8(DATA.s01234567, OFFSET, PTR);
|
|
|
|
#define vstore_partial_9(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore1(DATA.s8, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_10(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_11(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_12(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_13(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_14(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_15(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_16(DATA, OFFSET, PTR) \
|
|
vstore16(DATA, OFFSET, PTR);
|
|
|
|
|
|
|
|
|
|
|
|
#define convert_float_sat convert_float
|
|
#define convert_float1_sat convert_float
|
|
#define convert_float2_sat convert_float2
|
|
#define convert_float3_sat convert_float3
|
|
#define convert_float4_sat convert_float4
|
|
#define convert_float8_sat convert_float8
|
|
#define convert_float16_sat convert_float16
|
|
#define convert_half_sat convert_float
|
|
#define convert_half1_sat convert_half
|
|
#define convert_half2_sat convert_half2
|
|
#define convert_half3_sat convert_half3
|
|
#define convert_half4_sat convert_half4
|
|
#define convert_half8_sat convert_half8
|
|
#define convert_half16_sat convert_half16
|
|
|
|
#define convert_float1 convert_float
|
|
#define convert_half1 convert_half
|
|
#define convert_char1 convert_char
|
|
#define convert_uchar1 convert_uchar
|
|
#define convert_short1 convert_short
|
|
#define convert_ushort1 convert_ushort
|
|
#define convert_int1 convert_int
|
|
#define convert_uint1 convert_uint
|
|
#define convert_long1 convert_long
|
|
#define convert_ulong1 convert_ulong
|
|
#define convert_double1 convert_double
|
|
|
|
#define convert_char1_sat convert_char_sat
|
|
#define convert_uchar1_sat convert_uchar_sat
|
|
#define convert_uchar2_sat convert_uchar2_sat
|
|
#define convert_uchar3_sat convert_uchar3_sat
|
|
#define convert_uchar4_sat convert_uchar4_sat
|
|
#define convert_uchar8_sat convert_uchar8_sat
|
|
#define convert_uchar16_sat convert_uchar16_sat
|
|
#define convert_short1_sat convert_short_sat
|
|
#define convert_ushort1_sat convert_ushort_sat
|
|
#define convert_int1_sat convert_int_sat
|
|
#define convert_uint1_sat convert_uint_sat
|
|
#define convert_long1_sat convert_long_sat
|
|
#define convert_ulong1_sat convert_ulong_sat
|
|
#define convert_double1_sat convert_double_sat
|
|
|
|
#define VEC_DATA_TYPE_STR(type, size) type##size
|
|
#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
|
|
|
|
#define CONVERT_STR(x, type) (convert_##type((x)))
|
|
#define CONVERT(x, type) CONVERT_STR(x, type)
|
|
|
|
#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
|
|
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
|
|
|
|
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
|
|
#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
|
|
|
|
#define select_vec_dt_uchar(size) uchar##size
|
|
#define select_vec_dt_char(size) char##size
|
|
#define select_vec_dt_ushort(size) ushort##size
|
|
#define select_vec_dt_short(size) short##size
|
|
#define select_vec_dt_half(size) short##size
|
|
#define select_vec_dt_uint(size) uint##size
|
|
#define select_vec_dt_int(size) int##size
|
|
#define select_vec_dt_float(size) int##size
|
|
#define select_vec_dt_ulong(size) ulong##size
|
|
#define select_vec_dt_long(size) long##size
|
|
|
|
#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
|
|
#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
|
|
#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
|
|
|
|
#define signed_int_vec_dt_uchar(size) char##size
|
|
#define signed_int_vec_dt_char(size) char##size
|
|
#define signed_int_vec_dt_ushort(size) short##size
|
|
#define signed_int_vec_dt_short(size) short##size
|
|
#define signed_int_vec_dt_half(size) short##size
|
|
#define signed_int_vec_dt_uint(size) int##size
|
|
#define signed_int_vec_dt_int(size) int##size
|
|
#define signed_int_vec_dt_float(size) int##size
|
|
#define signed_int_vec_dt_ulong(size) long##size
|
|
#define signed_int_vec_dt_long(size) long##size
|
|
|
|
#define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size)
|
|
#define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size)
|
|
#define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1)
|
|
|
|
#define sum_reduce_1(x) (x)
|
|
#define sum_reduce_2(x) ((x).s0) + ((x).s1)
|
|
#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
|
|
#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
|
|
#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
|
|
#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
|
|
|
|
#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
|
|
#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
|
|
|
|
#define prod_reduce_1(x) (x)
|
|
#define prod_reduce_2(x) ((x).s0) * ((x).s1)
|
|
#define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2)
|
|
#define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23)
|
|
#define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567)
|
|
#define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF)
|
|
|
|
#define PROD_REDUCE_STR(x, size) prod_reduce_##size(x)
|
|
#define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size)
|
|
|
|
#define max_reduce_1(x) (x)
|
|
#define max_reduce_2(x) max(((x).s0), ((x).s1))
|
|
#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
|
|
#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
|
|
#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
|
|
#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
|
|
|
|
#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
|
|
#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
|
|
|
|
#define VECTOR_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define IMAGE_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR3D_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_step_z, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR4D_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_step_z, \
|
|
uint name##_stride_w, \
|
|
uint name##_step_w, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR5D_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_step_z, \
|
|
uint name##_stride_w, \
|
|
uint name##_step_w, \
|
|
uint name##_stride_v, \
|
|
uint name##_step_v, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define CONVERT_TO_VECTOR_STRUCT(name) \
|
|
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
|
|
|
|
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
|
|
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
|
|
|
|
#define CONVERT_TO_IMAGE_STRUCT(name) \
|
|
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
|
|
|
|
#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
|
|
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
|
|
|
|
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
|
|
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
|
|
|
|
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
|
|
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
|
|
|
|
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
|
|
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
|
|
|
|
#define CONVERT_TO_TENSOR3D_STRUCT(name) \
|
|
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
|
|
name##_stride_z, name##_step_z)
|
|
|
|
#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
|
|
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
|
|
|
|
#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
|
|
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
|
|
name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
|
|
|
|
#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
|
|
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
|
|
|
|
#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
|
|
tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
|
|
name##_stride_z, name##_step_z)
|
|
|
|
|
|
typedef struct Vector
|
|
{
|
|
__global uchar *ptr;
|
|
int offset_first_element_in_bytes;
|
|
int stride_x;
|
|
} Vector;
|
|
|
|
|
|
typedef struct Image
|
|
{
|
|
__global uchar *ptr;
|
|
int offset_first_element_in_bytes;
|
|
int stride_x;
|
|
int stride_y;
|
|
} Image;
|
|
|
|
|
|
typedef struct Tensor3D
|
|
{
|
|
__global uchar *ptr;
|
|
int offset_first_element_in_bytes;
|
|
int stride_x;
|
|
int stride_y;
|
|
int stride_z;
|
|
} Tensor3D;
|
|
|
|
|
|
typedef struct Tensor4D
|
|
{
|
|
__global uchar *ptr;
|
|
int offset_first_element_in_bytes;
|
|
int stride_x;
|
|
int stride_y;
|
|
int stride_z;
|
|
int stride_w;
|
|
} Tensor4D;
|
|
|
|
|
|
inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
|
|
{
|
|
Vector vector =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
};
|
|
vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
|
|
return vector;
|
|
}
|
|
|
|
|
|
inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
|
|
{
|
|
Image img =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y
|
|
};
|
|
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
|
|
return img;
|
|
}
|
|
|
|
|
|
inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
|
|
{
|
|
Image img =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y
|
|
};
|
|
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
|
|
return img;
|
|
}
|
|
|
|
|
|
inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
|
|
{
|
|
Tensor3D tensor =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y,
|
|
.stride_z = stride_z
|
|
};
|
|
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
|
|
return tensor;
|
|
}
|
|
|
|
|
|
inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
|
|
{
|
|
Tensor3D tensor =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y,
|
|
.stride_z = stride_z
|
|
};
|
|
return tensor;
|
|
}
|
|
|
|
inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
|
|
uint step_w,
|
|
uint mod_size)
|
|
{
|
|
Tensor4D tensor =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y,
|
|
.stride_z = stride_z,
|
|
.stride_w = stride_w
|
|
};
|
|
|
|
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
|
|
return tensor;
|
|
}
|
|
|
|
|
|
inline __global const uchar *vector_offset(const Vector *vec, int x)
|
|
{
|
|
return vec->ptr + x * vec->stride_x;
|
|
}
|
|
|
|
|
|
inline __global uchar *offset(const Image *img, int x, int y)
|
|
{
|
|
return img->ptr + x * img->stride_x + y * img->stride_y;
|
|
}
|
|
|
|
|
|
inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
|
|
{
|
|
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
|
|
}
|
|
|
|
|
|
inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
|
|
{
|
|
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
|
|
}
|
|
|
|
|
|
inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
|
|
{
|
|
uint num_elements = width * height;
|
|
|
|
const uint z = index / num_elements;
|
|
|
|
index %= num_elements;
|
|
|
|
const uint y = index / width;
|
|
|
|
index %= width;
|
|
|
|
const uint x = index;
|
|
|
|
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
|
|
}
|
|
|
|
#endif
|
|
|
|
#if GPU_ARCH == GPU_ARCH_BIFROST
|
|
#define MLA(a, b, c) (fma(c, b, a))
|
|
#else
|
|
#define MLA(a, b, c) ((b) * (c) + (a))
|
|
#endif
|
|
|
|
|
|
#define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))
|
|
|
|
|
|
#define logistic_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))
|
|
|
|
|
|
#define tanh_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x))
|
|
|
|
|
|
#define relu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (max((DATA_TYPE)0.0, x))
|
|
|
|
|
|
#define brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x)))
|
|
|
|
|
|
#define lu_brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
|
|
|
|
|
|
#define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))
|
|
|
|
|
|
#define srelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (log((DATA_TYPE)1.0 + exp(x)))
|
|
|
|
|
|
#define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0)))
|
|
|
|
|
|
#define abs_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (fabs(x))
|
|
|
|
|
|
#define square_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * x)
|
|
|
|
|
|
#define sqrt_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (sqrt(x))
|
|
|
|
|
|
#define linear_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x))
|
|
|
|
|
|
#define gelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * (DATA_TYPE)0.5 * ((DATA_TYPE)1.0 + erf(x / (DATA_TYPE)1.41421356237)))
|
|
|
|
|
|
#define identity_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x)
|
|
|
|
#define ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
|
|
|
|
#define ACTIVATION(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
|
|
|
|
#ifndef ARM_COMPUTE_HELPER_H
|
|
#define ARM_COMPUTE_HELPER_H
|
|
|
|
|
|
|
|
|
|
#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
|
|
|
|
#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
|
|
|
|
#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
|
|
|
|
#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
|
|
|
|
#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
|
|
|
|
#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
|
|
|
|
#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
|
|
|
|
#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
|
|
|
|
#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
|
|
|
|
#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
|
|
|
|
#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
|
|
|
|
#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
|
|
|
|
#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
|
|
|
|
#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
|
|
|
|
#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
|
|
|
|
#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
|
|
|
|
|
|
|
|
#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
|
|
|
|
#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
|
|
|
|
#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
|
|
|
|
#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
|
|
|
|
#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
|
|
|
|
#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
|
|
|
|
#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
|
|
|
|
#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
|
|
|
|
#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
|
|
|
|
#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
|
|
|
|
#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
|
|
|
|
#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
|
|
|
|
#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
|
|
|
|
#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
|
|
|
|
#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
|
|
|
|
#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE(N0) \
|
|
(CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
|
|
|
|
|
|
|
|
|
|
#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
|
|
|
|
|
|
#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
|
|
|
|
|
|
#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
|
|
|
|
#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
|
|
|
|
#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
|
|
|
|
#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
|
|
|
|
#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
|
|
|
|
#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
|
|
|
|
#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
|
|
|
|
#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
|
|
|
|
#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
|
|
|
|
#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
|
|
|
|
#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
|
|
|
|
#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
|
|
|
|
#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
|
|
|
|
#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
|
|
|
|
#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
|
|
|
|
#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
|
|
VSTORE_PARTIAL(N0, STORE_N0) \
|
|
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
|
|
|
|
|
|
|
|
#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
|
|
#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
}
|
|
|
|
#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
|
|
if(!(PARTIAL_COND_X)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
}
|
|
|
|
#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
|
|
if(!(PARTIAL_COND_Y)) \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
} \
|
|
else \
|
|
{ \
|
|
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
|
|
}
|
|
|
|
|
|
#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
|
|
|
|
|
|
#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
|
|
|
|
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
|
|
|
|
#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
|
|
|
|
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
|
|
|
|
#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
|
|
|
|
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
|
|
|
|
#else
|
|
|
|
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
|
|
STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
#if defined(PARTIAL_STORE_M0)
|
|
|
|
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
|
|
((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
|
|
#else
|
|
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
|
|
((uint)(y * M0))
|
|
#endif
|
|
|
|
|
|
|
|
#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
|
|
STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
|
|
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
|
|
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
|
|
#endif
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
|
|
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
|
|
#endif
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
|
|
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
|
|
#endif
|
|
|
|
#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
|
|
#pragma OPENCL EXTENSION cl_arm_printf : enable
|
|
#endif
|
|
|
|
#define GPU_ARCH_MIDGARD 0x100
|
|
#define GPU_ARCH_BIFROST 0x200
|
|
#define GPU_ARCH_VALHALL 0x300
|
|
|
|
|
|
#define CONCAT(a, b) a##b
|
|
|
|
|
|
#define EXPAND(x) x
|
|
|
|
|
|
#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
|
|
|
|
|
|
#define REV1(x) ((x))
|
|
#define REV2(x) ((x).s10)
|
|
#define REV3(x) ((x).s210)
|
|
#define REV4(x) ((x).s3210)
|
|
#define REV8(x) ((x).s76543210)
|
|
#define REV16(x) ((x).sFEDCBA9876543210)
|
|
|
|
|
|
|
|
#define REVERSE_STR(x, s) REV##s((x))
|
|
#define REVERSE(x, s) REVERSE_STR(x, s)
|
|
|
|
|
|
|
|
#define ROT1_0(x) ((x))
|
|
#define ROT1_1(x) ((x))
|
|
|
|
#define ROT2_0(x) ((x))
|
|
#define ROT2_1(x) ((x).s10)
|
|
#define ROT2_2(x) ((x))
|
|
|
|
#define ROT3_0(x) ((x))
|
|
#define ROT3_1(x) ((x).s201)
|
|
#define ROT3_2(x) ((x).s120)
|
|
#define ROT3_3(x) ((x))
|
|
|
|
#define ROT4_0(x) ((x))
|
|
#define ROT4_1(x) ((x).s3012)
|
|
#define ROT4_2(x) ((x).s2301)
|
|
#define ROT4_3(x) ((x).s1230)
|
|
#define ROT4_4(x) ((x))
|
|
|
|
#define ROT8_0(x) ((x))
|
|
#define ROT8_1(x) ((x).s70123456)
|
|
#define ROT8_2(x) ((x).s67012345)
|
|
#define ROT8_3(x) ((x).s56701234)
|
|
#define ROT8_4(x) ((x).s45670123)
|
|
#define ROT8_5(x) ((x).s34567012)
|
|
#define ROT8_6(x) ((x).s23456701)
|
|
#define ROT8_7(x) ((x).s12345670)
|
|
#define ROT8_8(x) ((x))
|
|
|
|
#define ROT16_0(x) ((x))
|
|
#define ROT16_1(x) ((x).sF0123456789ABCDE)
|
|
#define ROT16_2(x) ((x).sEF0123456789ABCD)
|
|
#define ROT16_3(x) ((x).sDEF0123456789ABC)
|
|
#define ROT16_4(x) ((x).sCDEF0123456789AB)
|
|
#define ROT16_5(x) ((x).sBCDEF0123456789A)
|
|
#define ROT16_6(x) ((x).sABCDEF0123456789)
|
|
#define ROT16_7(x) ((x).s9ABCDEF012345678)
|
|
#define ROT16_8(x) ((x).s89ABCDEF01234567)
|
|
#define ROT16_9(x) ((x).s789ABCDEF0123456)
|
|
#define ROT16_10(x) ((x).s6789ABCDEF012345)
|
|
#define ROT16_11(x) ((x).s56789ABCDEF01234)
|
|
#define ROT16_12(x) ((x).s456789ABCDEF0123)
|
|
#define ROT16_13(x) ((x).s3456789ABCDEF012)
|
|
#define ROT16_14(x) ((x).s23456789ABCDEF01)
|
|
#define ROT16_15(x) ((x).s123456789ABCDEF0)
|
|
#define ROT16_16(x) ((x))
|
|
|
|
|
|
|
|
#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
|
|
#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
|
|
|
|
|
|
|
|
#define V_OFFS1(dt) (dt##1)(0)
|
|
#define V_OFFS2(dt) (dt##2)(0, 1)
|
|
#define V_OFFS3(dt) (dt##3)(0, 1, 2)
|
|
#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
|
|
#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
|
|
#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
|
|
|
|
|
|
|
|
#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
|
|
#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
|
|
|
|
|
|
#define VLOAD_STR(size) vload##size
|
|
#define VLOAD(size) VLOAD_STR(size)
|
|
|
|
|
|
#define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size
|
|
#define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size)
|
|
|
|
#define NO_LOAD(data, offs, ptr) \
|
|
{ \
|
|
}
|
|
|
|
|
|
#define vload_partial_1_0 NO_LOAD
|
|
#define vload_partial_1_1 vload1
|
|
#define vload_partial_1_2 NO_LOAD
|
|
#define vload_partial_1_3 NO_LOAD
|
|
#define vload_partial_1_4 NO_LOAD
|
|
#define vload_partial_1_5 NO_LOAD
|
|
#define vload_partial_1_6 NO_LOAD
|
|
#define vload_partial_1_7 NO_LOAD
|
|
#define vload_partial_1_8 NO_LOAD
|
|
#define vload_partial_1_9 NO_LOAD
|
|
#define vload_partial_1_10 NO_LOAD
|
|
#define vload_partial_1_11 NO_LOAD
|
|
#define vload_partial_1_12 NO_LOAD
|
|
#define vload_partial_1_13 NO_LOAD
|
|
#define vload_partial_1_14 NO_LOAD
|
|
#define vload_partial_1_15 NO_LOAD
|
|
#define vload_partial_1_16 NO_LOAD
|
|
|
|
#define vload_partial_2_0 NO_LOAD
|
|
#define vload_partial_2_1 vload_partial_1
|
|
#define vload_partial_2_2 vload_partial_2
|
|
#define vload_partial_2_3 NO_LOAD
|
|
#define vload_partial_2_4 NO_LOAD
|
|
#define vload_partial_2_5 NO_LOAD
|
|
#define vload_partial_2_6 NO_LOAD
|
|
#define vload_partial_2_7 NO_LOAD
|
|
#define vload_partial_2_8 NO_LOAD
|
|
#define vload_partial_2_9 NO_LOAD
|
|
#define vload_partial_2_10 NO_LOAD
|
|
#define vload_partial_2_11 NO_LOAD
|
|
#define vload_partial_2_12 NO_LOAD
|
|
#define vload_partial_2_13 NO_LOAD
|
|
#define vload_partial_2_14 NO_LOAD
|
|
#define vload_partial_2_15 NO_LOAD
|
|
#define vload_partial_2_16 NO_LOAD
|
|
|
|
#define vload_partial_3_0 NO_LOAD
|
|
#define vload_partial_3_1 vload_partial_1
|
|
#define vload_partial_3_2 vload_partial_2
|
|
#define vload_partial_3_3 vload_partial_3
|
|
#define vload_partial_3_4 NO_LOAD
|
|
#define vload_partial_3_5 NO_LOAD
|
|
#define vload_partial_3_6 NO_LOAD
|
|
#define vload_partial_3_7 NO_LOAD
|
|
#define vload_partial_3_8 NO_LOAD
|
|
#define vload_partial_3_9 NO_LOAD
|
|
#define vload_partial_3_10 NO_LOAD
|
|
#define vload_partial_3_11 NO_LOAD
|
|
#define vload_partial_3_12 NO_LOAD
|
|
#define vload_partial_3_13 NO_LOAD
|
|
#define vload_partial_3_14 NO_LOAD
|
|
#define vload_partial_3_15 NO_LOAD
|
|
#define vload_partial_3_16 NO_LOAD
|
|
|
|
#define vload_partial_4_0 NO_LOAD
|
|
#define vload_partial_4_1 vload_partial_1
|
|
#define vload_partial_4_2 vload_partial_2
|
|
#define vload_partial_4_3 vload_partial_3
|
|
#define vload_partial_4_4 vload_partial_4
|
|
#define vload_partial_4_5 NO_LOAD
|
|
#define vload_partial_4_6 NO_LOAD
|
|
#define vload_partial_4_7 NO_LOAD
|
|
#define vload_partial_4_8 NO_LOAD
|
|
#define vload_partial_4_9 NO_LOAD
|
|
#define vload_partial_4_10 NO_LOAD
|
|
#define vload_partial_4_11 NO_LOAD
|
|
#define vload_partial_4_12 NO_LOAD
|
|
#define vload_partial_4_13 NO_LOAD
|
|
#define vload_partial_4_14 NO_LOAD
|
|
#define vload_partial_4_15 NO_LOAD
|
|
#define vload_partial_4_16 NO_LOAD
|
|
|
|
#define vload_partial_8_0 NO_LOAD
|
|
#define vload_partial_8_1 vload_partial_1
|
|
#define vload_partial_8_2 vload_partial_2
|
|
#define vload_partial_8_3 vload_partial_3
|
|
#define vload_partial_8_4 vload_partial_4
|
|
#define vload_partial_8_5 vload_partial_5
|
|
#define vload_partial_8_6 vload_partial_6
|
|
#define vload_partial_8_7 vload_partial_7
|
|
#define vload_partial_8_8 vload_partial_8
|
|
#define vload_partial_8_9 NO_LOAD
|
|
#define vload_partial_8_10 NO_LOAD
|
|
#define vload_partial_8_11 NO_LOAD
|
|
#define vload_partial_8_12 NO_LOAD
|
|
#define vload_partial_8_13 NO_LOAD
|
|
#define vload_partial_8_14 NO_LOAD
|
|
#define vload_partial_8_15 NO_LOAD
|
|
#define vload_partial_8_16 NO_LOAD
|
|
|
|
#define vload_partial_16_0 NO_LOAD
|
|
#define vload_partial_16_1 vload_partial_1
|
|
#define vload_partial_16_2 vload_partial_2
|
|
#define vload_partial_16_3 vload_partial_3
|
|
#define vload_partial_16_4 vload_partial_4
|
|
#define vload_partial_16_5 vload_partial_5
|
|
#define vload_partial_16_6 vload_partial_6
|
|
#define vload_partial_16_7 vload_partial_7
|
|
#define vload_partial_16_8 vload_partial_8
|
|
#define vload_partial_16_9 vload_partial_9
|
|
#define vload_partial_16_10 vload_partial_10
|
|
#define vload_partial_16_11 vload_partial_11
|
|
#define vload_partial_16_12 vload_partial_12
|
|
#define vload_partial_16_13 vload_partial_13
|
|
#define vload_partial_16_14 vload_partial_14
|
|
#define vload_partial_16_15 vload_partial_15
|
|
#define vload_partial_16_16 vload_partial_16
|
|
|
|
|
|
#define vload_partial_1(DATA, OFFSET, PTR) \
|
|
DATA.s0 = vload1(OFFSET, PTR);
|
|
|
|
#define vload_partial_2(DATA, OFFSET, PTR) \
|
|
DATA.s01 = vload2(OFFSET, PTR);
|
|
|
|
#define vload_partial_3(DATA, OFFSET, PTR) \
|
|
DATA.s012 = vload3(OFFSET, PTR);
|
|
|
|
#define vload_partial_4(DATA, OFFSET, PTR) \
|
|
DATA.s0123 = vload4(OFFSET, PTR);
|
|
|
|
#define vload_partial_5(DATA, OFFSET, PTR) \
|
|
vload_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
DATA.s4 = vload1(OFFSET, PTR + 4);
|
|
|
|
#define vload_partial_6(DATA, OFFSET, PTR) \
|
|
vload_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vload_partial_2(DATA.s45, OFFSET, PTR + 4);
|
|
|
|
#define vload_partial_7(DATA, OFFSET, PTR) \
|
|
vload_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vload_partial_3(DATA.s456, OFFSET, PTR + 4);
|
|
|
|
#define vload_partial_8(DATA, OFFSET, PTR) \
|
|
DATA.s01234567 = vload8(OFFSET, PTR);
|
|
|
|
#define vload_partial_9(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
DATA.s8 = vload1(OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_10(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_2(DATA.s89, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_11(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_3(DATA.s89A, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_12(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_4(DATA.s89AB, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_13(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_14(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_15(DATA, OFFSET, PTR) \
|
|
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8);
|
|
|
|
#define vload_partial_16(DATA, OFFSET, PTR) \
|
|
DATA = vload16(OFFSET, PTR);
|
|
|
|
|
|
|
|
#define PIXEL_UNIT4 1
|
|
#define PIXEL_UNIT8 2
|
|
#define PIXEL_UNIT16 4
|
|
|
|
|
|
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
|
|
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
|
|
|
|
|
|
#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
|
|
#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
|
|
#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
|
|
#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
|
|
#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
|
|
#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
|
|
#endif
|
|
|
|
#define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values));
|
|
#define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567));
|
|
#define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
|
|
|
|
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
|
|
#define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values));
|
|
#define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567));
|
|
#define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF));
|
|
#endif
|
|
|
|
|
|
#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
|
|
#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
|
|
|
|
|
|
#define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values)
|
|
#define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values)
|
|
|
|
#define VSTORE_STR(size) vstore##size
|
|
#define VSTORE(size) VSTORE_STR(size)
|
|
|
|
#define float1 float
|
|
#define half1 half
|
|
#define char1 char
|
|
#define uchar1 uchar
|
|
#define short1 short
|
|
#define ushort1 ushort
|
|
#define int1 int
|
|
#define uint1 uint
|
|
#define long1 long
|
|
#define ulong1 ulong
|
|
#define double1 double
|
|
|
|
#define vload1(OFFSET, PTR) *(OFFSET + PTR)
|
|
#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
|
|
|
|
|
|
#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
|
|
#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
|
|
|
|
#define NO_STORE(data, offs, ptr) \
|
|
{ \
|
|
}
|
|
|
|
|
|
#define vstore_partial_1_0 NO_STORE
|
|
#define vstore_partial_1_1 vstore1
|
|
#define vstore_partial_1_2 NO_STORE
|
|
#define vstore_partial_1_3 NO_STORE
|
|
#define vstore_partial_1_4 NO_STORE
|
|
#define vstore_partial_1_5 NO_STORE
|
|
#define vstore_partial_1_6 NO_STORE
|
|
#define vstore_partial_1_7 NO_STORE
|
|
#define vstore_partial_1_8 NO_STORE
|
|
#define vstore_partial_1_9 NO_STORE
|
|
#define vstore_partial_1_10 NO_STORE
|
|
#define vstore_partial_1_11 NO_STORE
|
|
#define vstore_partial_1_12 NO_STORE
|
|
#define vstore_partial_1_13 NO_STORE
|
|
#define vstore_partial_1_14 NO_STORE
|
|
#define vstore_partial_1_15 NO_STORE
|
|
#define vstore_partial_1_16 NO_STORE
|
|
|
|
#define vstore_partial_2_0 NO_STORE
|
|
#define vstore_partial_2_1 vstore_partial_1
|
|
#define vstore_partial_2_2 vstore_partial_2
|
|
#define vstore_partial_2_3 NO_STORE
|
|
#define vstore_partial_2_4 NO_STORE
|
|
#define vstore_partial_2_5 NO_STORE
|
|
#define vstore_partial_2_6 NO_STORE
|
|
#define vstore_partial_2_7 NO_STORE
|
|
#define vstore_partial_2_8 NO_STORE
|
|
#define vstore_partial_2_9 NO_STORE
|
|
#define vstore_partial_2_10 NO_STORE
|
|
#define vstore_partial_2_11 NO_STORE
|
|
#define vstore_partial_2_12 NO_STORE
|
|
#define vstore_partial_2_13 NO_STORE
|
|
#define vstore_partial_2_14 NO_STORE
|
|
#define vstore_partial_2_15 NO_STORE
|
|
#define vstore_partial_2_16 NO_STORE
|
|
|
|
#define vstore_partial_3_0 NO_STORE
|
|
#define vstore_partial_3_1 vstore_partial_1
|
|
#define vstore_partial_3_2 vstore_partial_2
|
|
#define vstore_partial_3_3 vstore_partial_3
|
|
#define vstore_partial_3_4 NO_STORE
|
|
#define vstore_partial_3_5 NO_STORE
|
|
#define vstore_partial_3_6 NO_STORE
|
|
#define vstore_partial_3_7 NO_STORE
|
|
#define vstore_partial_3_8 NO_STORE
|
|
#define vstore_partial_3_9 NO_STORE
|
|
#define vstore_partial_3_10 NO_STORE
|
|
#define vstore_partial_3_11 NO_STORE
|
|
#define vstore_partial_3_12 NO_STORE
|
|
#define vstore_partial_3_13 NO_STORE
|
|
#define vstore_partial_3_14 NO_STORE
|
|
#define vstore_partial_3_15 NO_STORE
|
|
#define vstore_partial_3_16 NO_STORE
|
|
|
|
#define vstore_partial_4_0 NO_STORE
|
|
#define vstore_partial_4_1 vstore_partial_1
|
|
#define vstore_partial_4_2 vstore_partial_2
|
|
#define vstore_partial_4_3 vstore_partial_3
|
|
#define vstore_partial_4_4 vstore_partial_4
|
|
#define vstore_partial_4_5 NO_STORE
|
|
#define vstore_partial_4_6 NO_STORE
|
|
#define vstore_partial_4_7 NO_STORE
|
|
#define vstore_partial_4_8 NO_STORE
|
|
#define vstore_partial_4_9 NO_STORE
|
|
#define vstore_partial_4_10 NO_STORE
|
|
#define vstore_partial_4_11 NO_STORE
|
|
#define vstore_partial_4_12 NO_STORE
|
|
#define vstore_partial_4_13 NO_STORE
|
|
#define vstore_partial_4_14 NO_STORE
|
|
#define vstore_partial_4_15 NO_STORE
|
|
#define vstore_partial_4_16 NO_STORE
|
|
|
|
#define vstore_partial_8_0 NO_STORE
|
|
#define vstore_partial_8_1 vstore_partial_1
|
|
#define vstore_partial_8_2 vstore_partial_2
|
|
#define vstore_partial_8_3 vstore_partial_3
|
|
#define vstore_partial_8_4 vstore_partial_4
|
|
#define vstore_partial_8_5 vstore_partial_5
|
|
#define vstore_partial_8_6 vstore_partial_6
|
|
#define vstore_partial_8_7 vstore_partial_7
|
|
#define vstore_partial_8_8 vstore_partial_8
|
|
#define vstore_partial_8_9 NO_STORE
|
|
#define vstore_partial_8_10 NO_STORE
|
|
#define vstore_partial_8_11 NO_STORE
|
|
#define vstore_partial_8_12 NO_STORE
|
|
#define vstore_partial_8_13 NO_STORE
|
|
#define vstore_partial_8_14 NO_STORE
|
|
#define vstore_partial_8_15 NO_STORE
|
|
#define vstore_partial_8_16 NO_STORE
|
|
|
|
#define vstore_partial_16_0 NO_STORE
|
|
#define vstore_partial_16_1 vstore_partial_1
|
|
#define vstore_partial_16_2 vstore_partial_2
|
|
#define vstore_partial_16_3 vstore_partial_3
|
|
#define vstore_partial_16_4 vstore_partial_4
|
|
#define vstore_partial_16_5 vstore_partial_5
|
|
#define vstore_partial_16_6 vstore_partial_6
|
|
#define vstore_partial_16_7 vstore_partial_7
|
|
#define vstore_partial_16_8 vstore_partial_8
|
|
#define vstore_partial_16_9 vstore_partial_9
|
|
#define vstore_partial_16_10 vstore_partial_10
|
|
#define vstore_partial_16_11 vstore_partial_11
|
|
#define vstore_partial_16_12 vstore_partial_12
|
|
#define vstore_partial_16_13 vstore_partial_13
|
|
#define vstore_partial_16_14 vstore_partial_14
|
|
#define vstore_partial_16_15 vstore_partial_15
|
|
#define vstore_partial_16_16 vstore_partial_16
|
|
|
|
|
|
#define vstore_partial_1(DATA, OFFSET, PTR) \
|
|
vstore1(DATA.s0, OFFSET, PTR);
|
|
|
|
#define vstore_partial_2(DATA, OFFSET, PTR) \
|
|
vstore2(DATA.s01, OFFSET, PTR);
|
|
|
|
#define vstore_partial_3(DATA, OFFSET, PTR) \
|
|
vstore3(DATA.s012, OFFSET, PTR);
|
|
|
|
#define vstore_partial_4(DATA, OFFSET, PTR) \
|
|
vstore4(DATA.s0123, OFFSET, PTR);
|
|
|
|
#define vstore_partial_5(DATA, OFFSET, PTR) \
|
|
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vstore1(DATA.s4, OFFSET, PTR + 4);
|
|
|
|
#define vstore_partial_6(DATA, OFFSET, PTR) \
|
|
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
|
|
|
|
#define vstore_partial_7(DATA, OFFSET, PTR) \
|
|
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
|
|
vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
|
|
|
|
#define vstore_partial_8(DATA, OFFSET, PTR) \
|
|
vstore8(DATA.s01234567, OFFSET, PTR);
|
|
|
|
#define vstore_partial_9(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore1(DATA.s8, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_10(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_11(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_12(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_13(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_14(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_15(DATA, OFFSET, PTR) \
|
|
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
|
|
vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
|
|
|
|
#define vstore_partial_16(DATA, OFFSET, PTR) \
|
|
vstore16(DATA, OFFSET, PTR);
|
|
|
|
|
|
|
|
|
|
|
|
#define convert_float_sat convert_float
|
|
#define convert_float1_sat convert_float
|
|
#define convert_float2_sat convert_float2
|
|
#define convert_float3_sat convert_float3
|
|
#define convert_float4_sat convert_float4
|
|
#define convert_float8_sat convert_float8
|
|
#define convert_float16_sat convert_float16
|
|
#define convert_half_sat convert_float
|
|
#define convert_half1_sat convert_half
|
|
#define convert_half2_sat convert_half2
|
|
#define convert_half3_sat convert_half3
|
|
#define convert_half4_sat convert_half4
|
|
#define convert_half8_sat convert_half8
|
|
#define convert_half16_sat convert_half16
|
|
|
|
#define convert_float1 convert_float
|
|
#define convert_half1 convert_half
|
|
#define convert_char1 convert_char
|
|
#define convert_uchar1 convert_uchar
|
|
#define convert_short1 convert_short
|
|
#define convert_ushort1 convert_ushort
|
|
#define convert_int1 convert_int
|
|
#define convert_uint1 convert_uint
|
|
#define convert_long1 convert_long
|
|
#define convert_ulong1 convert_ulong
|
|
#define convert_double1 convert_double
|
|
|
|
#define convert_char1_sat convert_char_sat
|
|
#define convert_uchar1_sat convert_uchar_sat
|
|
#define convert_uchar2_sat convert_uchar2_sat
|
|
#define convert_uchar3_sat convert_uchar3_sat
|
|
#define convert_uchar4_sat convert_uchar4_sat
|
|
#define convert_uchar8_sat convert_uchar8_sat
|
|
#define convert_uchar16_sat convert_uchar16_sat
|
|
#define convert_short1_sat convert_short_sat
|
|
#define convert_ushort1_sat convert_ushort_sat
|
|
#define convert_int1_sat convert_int_sat
|
|
#define convert_uint1_sat convert_uint_sat
|
|
#define convert_long1_sat convert_long_sat
|
|
#define convert_ulong1_sat convert_ulong_sat
|
|
#define convert_double1_sat convert_double_sat
|
|
|
|
#define VEC_DATA_TYPE_STR(type, size) type##size
|
|
#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
|
|
|
|
#define CONVERT_STR(x, type) (convert_##type((x)))
|
|
#define CONVERT(x, type) CONVERT_STR(x, type)
|
|
|
|
#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
|
|
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
|
|
|
|
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
|
|
#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
|
|
|
|
#define select_vec_dt_uchar(size) uchar##size
|
|
#define select_vec_dt_char(size) char##size
|
|
#define select_vec_dt_ushort(size) ushort##size
|
|
#define select_vec_dt_short(size) short##size
|
|
#define select_vec_dt_half(size) short##size
|
|
#define select_vec_dt_uint(size) uint##size
|
|
#define select_vec_dt_int(size) int##size
|
|
#define select_vec_dt_float(size) int##size
|
|
#define select_vec_dt_ulong(size) ulong##size
|
|
#define select_vec_dt_long(size) long##size
|
|
|
|
#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
|
|
#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
|
|
#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
|
|
|
|
#define signed_int_vec_dt_uchar(size) char##size
|
|
#define signed_int_vec_dt_char(size) char##size
|
|
#define signed_int_vec_dt_ushort(size) short##size
|
|
#define signed_int_vec_dt_short(size) short##size
|
|
#define signed_int_vec_dt_half(size) short##size
|
|
#define signed_int_vec_dt_uint(size) int##size
|
|
#define signed_int_vec_dt_int(size) int##size
|
|
#define signed_int_vec_dt_float(size) int##size
|
|
#define signed_int_vec_dt_ulong(size) long##size
|
|
#define signed_int_vec_dt_long(size) long##size
|
|
|
|
#define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size)
|
|
#define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size)
|
|
#define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1)
|
|
|
|
#define sum_reduce_1(x) (x)
|
|
#define sum_reduce_2(x) ((x).s0) + ((x).s1)
|
|
#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
|
|
#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
|
|
#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
|
|
#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
|
|
|
|
#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
|
|
#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
|
|
|
|
#define prod_reduce_1(x) (x)
|
|
#define prod_reduce_2(x) ((x).s0) * ((x).s1)
|
|
#define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2)
|
|
#define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23)
|
|
#define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567)
|
|
#define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF)
|
|
|
|
#define PROD_REDUCE_STR(x, size) prod_reduce_##size(x)
|
|
#define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size)
|
|
|
|
#define max_reduce_1(x) (x)
|
|
#define max_reduce_2(x) max(((x).s0), ((x).s1))
|
|
#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
|
|
#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
|
|
#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
|
|
#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
|
|
|
|
#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
|
|
#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
|
|
|
|
#define VECTOR_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define IMAGE_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR3D_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_step_z, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR4D_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_step_z, \
|
|
uint name##_stride_w, \
|
|
uint name##_step_w, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR5D_DECLARATION(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_step_z, \
|
|
uint name##_stride_w, \
|
|
uint name##_step_w, \
|
|
uint name##_stride_v, \
|
|
uint name##_step_v, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define CONVERT_TO_VECTOR_STRUCT(name) \
|
|
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
|
|
|
|
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
|
|
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
|
|
|
|
#define CONVERT_TO_IMAGE_STRUCT(name) \
|
|
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
|
|
|
|
#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
|
|
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
|
|
|
|
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
|
|
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
|
|
|
|
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
|
|
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
|
|
|
|
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
|
|
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
|
|
|
|
#define CONVERT_TO_TENSOR3D_STRUCT(name) \
|
|
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
|
|
name##_stride_z, name##_step_z)
|
|
|
|
#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
|
|
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
|
|
|
|
#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
|
|
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
|
|
name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
|
|
|
|
#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
|
|
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
|
|
|
|
#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
|
|
tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
|
|
name##_stride_z, name##_step_z)
|
|
|
|
|
|
typedef struct Vector
|
|
{
|
|
__global uchar *ptr;
|
|
int offset_first_element_in_bytes;
|
|
int stride_x;
|
|
} Vector;
|
|
|
|
|
|
typedef struct Image
|
|
{
|
|
__global uchar *ptr;
|
|
int offset_first_element_in_bytes;
|
|
int stride_x;
|
|
int stride_y;
|
|
} Image;
|
|
|
|
|
|
typedef struct Tensor3D
|
|
{
|
|
__global uchar *ptr;
|
|
int offset_first_element_in_bytes;
|
|
int stride_x;
|
|
int stride_y;
|
|
int stride_z;
|
|
} Tensor3D;
|
|
|
|
|
|
typedef struct Tensor4D
|
|
{
|
|
__global uchar *ptr;
|
|
int offset_first_element_in_bytes;
|
|
int stride_x;
|
|
int stride_y;
|
|
int stride_z;
|
|
int stride_w;
|
|
} Tensor4D;
|
|
|
|
|
|
inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
|
|
{
|
|
Vector vector =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
};
|
|
vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
|
|
return vector;
|
|
}
|
|
|
|
|
|
inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
|
|
{
|
|
Image img =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y
|
|
};
|
|
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
|
|
return img;
|
|
}
|
|
|
|
|
|
inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
|
|
{
|
|
Image img =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y
|
|
};
|
|
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
|
|
return img;
|
|
}
|
|
|
|
|
|
inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
|
|
{
|
|
Tensor3D tensor =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y,
|
|
.stride_z = stride_z
|
|
};
|
|
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
|
|
return tensor;
|
|
}
|
|
|
|
|
|
inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
|
|
{
|
|
Tensor3D tensor =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y,
|
|
.stride_z = stride_z
|
|
};
|
|
return tensor;
|
|
}
|
|
|
|
inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
|
|
uint step_w,
|
|
uint mod_size)
|
|
{
|
|
Tensor4D tensor =
|
|
{
|
|
.ptr = ptr,
|
|
.offset_first_element_in_bytes = offset_first_element_in_bytes,
|
|
.stride_x = stride_x,
|
|
.stride_y = stride_y,
|
|
.stride_z = stride_z,
|
|
.stride_w = stride_w
|
|
};
|
|
|
|
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
|
|
return tensor;
|
|
}
|
|
|
|
|
|
inline __global const uchar *vector_offset(const Vector *vec, int x)
|
|
{
|
|
return vec->ptr + x * vec->stride_x;
|
|
}
|
|
|
|
|
|
inline __global uchar *offset(const Image *img, int x, int y)
|
|
{
|
|
return img->ptr + x * img->stride_x + y * img->stride_y;
|
|
}
|
|
|
|
|
|
inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
|
|
{
|
|
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
|
|
}
|
|
|
|
|
|
inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
|
|
{
|
|
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
|
|
}
|
|
|
|
|
|
inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
|
|
{
|
|
uint num_elements = width * height;
|
|
|
|
const uint z = index / num_elements;
|
|
|
|
index %= num_elements;
|
|
|
|
const uint y = index / width;
|
|
|
|
index %= width;
|
|
|
|
const uint x = index;
|
|
|
|
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifndef SRC_CORE_CL_CL_KERNELS_TILE_HELPERS
|
|
#define SRC_CORE_CL_CL_KERNELS_TILE_HELPERS
|
|
|
|
|
|
|
|
|
|
#define TILE_VECTOR_SIZE1 1
|
|
#define TILE_VECTOR_SIZE2 2
|
|
#define TILE_VECTOR_SIZE3 3
|
|
#define TILE_VECTOR_SIZE4 4
|
|
#define TILE_VECTOR_SIZE5 8
|
|
#define TILE_VECTOR_SIZE6 8
|
|
#define TILE_VECTOR_SIZE7 8
|
|
#define TILE_VECTOR_SIZE8 8
|
|
#define TILE_VECTOR_SIZE9 16
|
|
#define TILE_VECTOR_SIZE10 16
|
|
#define TILE_VECTOR_SIZE11 16
|
|
#define TILE_VECTOR_SIZE12 16
|
|
#define TILE_VECTOR_SIZE13 16
|
|
#define TILE_VECTOR_SIZE14 16
|
|
#define TILE_VECTOR_SIZE15 16
|
|
#define TILE_VECTOR_SIZE16 16
|
|
|
|
#define TILE_VECTOR_TYPE1(DATA_TYPE) DATA_TYPE##1
|
|
#define TILE_VECTOR_TYPE2(DATA_TYPE) DATA_TYPE##2
|
|
#define TILE_VECTOR_TYPE3(DATA_TYPE) DATA_TYPE##3
|
|
#define TILE_VECTOR_TYPE4(DATA_TYPE) DATA_TYPE##4
|
|
#define TILE_VECTOR_TYPE5(DATA_TYPE) DATA_TYPE##8
|
|
#define TILE_VECTOR_TYPE6(DATA_TYPE) DATA_TYPE##8
|
|
#define TILE_VECTOR_TYPE7(DATA_TYPE) DATA_TYPE##8
|
|
#define TILE_VECTOR_TYPE8(DATA_TYPE) DATA_TYPE##8
|
|
#define TILE_VECTOR_TYPE9(DATA_TYPE) DATA_TYPE##16
|
|
#define TILE_VECTOR_TYPE10(DATA_TYPE) DATA_TYPE##16
|
|
#define TILE_VECTOR_TYPE11(DATA_TYPE) DATA_TYPE##16
|
|
#define TILE_VECTOR_TYPE12(DATA_TYPE) DATA_TYPE##16
|
|
#define TILE_VECTOR_TYPE13(DATA_TYPE) DATA_TYPE##16
|
|
#define TILE_VECTOR_TYPE14(DATA_TYPE) DATA_TYPE##16
|
|
#define TILE_VECTOR_TYPE15(DATA_TYPE) DATA_TYPE##16
|
|
#define TILE_VECTOR_TYPE16(DATA_TYPE) DATA_TYPE##16
|
|
|
|
|
|
#define TILE(DATA_TYPE, H, W, BASENAME) TILE_STR(DATA_TYPE, H, W, BASENAME)
|
|
#define TILE_STR(DATA_TYPE, H, W, BASENAME) \
|
|
union { \
|
|
DATA_TYPE s[TILE_VECTOR_SIZE##W]; \
|
|
TILE_VECTOR_TYPE##W(DATA_TYPE) v; \
|
|
} BASENAME[H]
|
|
|
|
#define TENSOR4D_IMAGE(name) \
|
|
__read_only image2d_t name##_img, \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_step_z, \
|
|
uint name##_stride_w, \
|
|
uint name##_step_w, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR4D_BUFFER(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_x, \
|
|
uint name##_step_x, \
|
|
uint name##_stride_y, \
|
|
uint name##_step_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_step_z, \
|
|
uint name##_stride_w, \
|
|
uint name##_step_w, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR4D_STR(name, type) TENSOR4D_##type(name)
|
|
#define TENSOR4D(name, type) TENSOR4D_STR(name, type)
|
|
|
|
#define TENSOR4D_T_IMAGE(name) \
|
|
__read_only image2d_t name##_img, \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_stride_w, \
|
|
uint name##_c, \
|
|
uint name##_w, \
|
|
uint name##_h, \
|
|
uint name##_n, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR4D_T_BUFFER(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_stride_w, \
|
|
uint name##_c, \
|
|
uint name##_w, \
|
|
uint name##_h, \
|
|
uint name##_n, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR4D_T_STR(name, type) TENSOR4D_T_##type(name)
|
|
|
|
|
|
#define TENSOR4D_T(name, type) TENSOR4D_T_STR(name, type)
|
|
|
|
#define TENSOR4D_RO_T_IMAGE(name) \
|
|
__read_only image2d_t name##_img, \
|
|
TENSOR4D_T_BUFFER(name)
|
|
|
|
#define TENSOR4D_RO_T_BUFFER(name) TENSOR4D_T_BUFFER(name)
|
|
|
|
#define TENSOR4D_RO_T_STR(name, type) TENSOR4D_RO_T_##type(name)
|
|
|
|
|
|
#define TENSOR4D_RO_T(name, type) TENSOR4D_RO_T_STR(name, type)
|
|
|
|
#define TENSOR4D_WO_T_IMAGE(name) \
|
|
__write_only image2d_t name##_img, \
|
|
TENSOR4D_T_BUFFER(name)
|
|
|
|
#define TENSOR4D_WO_T_BUFFER(name) TENSOR4D_T_BUFFER(name)
|
|
|
|
#define TENSOR4D_WO_T_STR(name, type) TENSOR4D_WO_T_##type(name)
|
|
|
|
|
|
#define TENSOR4D_WO_T(name, type) TENSOR4D_WO_T_STR(name, type)
|
|
|
|
#define TENSOR3D_T_IMAGE(name) \
|
|
__read_only image2d_t name##_img, \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_w, \
|
|
uint name##_h, \
|
|
uint name##_n, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR3D_T_BUFFER(name) \
|
|
__global uchar *name##_ptr, \
|
|
uint name##_stride_y, \
|
|
uint name##_stride_z, \
|
|
uint name##_w, \
|
|
uint name##_h, \
|
|
uint name##_n, \
|
|
uint name##_offset_first_element_in_bytes
|
|
|
|
#define TENSOR3D_T_STR(name, type) TENSOR3D_T_##type(name)
|
|
#define TENSOR3D_T(name, type) TENSOR3D_T_STR(name, type)
|
|
|
|
#if !defined(UNROLL_WITH_PRAGMA)
|
|
#define UNROLL_INCR(idx, step, macro) idx += (step); (macro)
|
|
|
|
#define LOOP_UNROLLING_1(idx, step, macro) (macro)
|
|
#define LOOP_UNROLLING_2(idx, step, macro) LOOP_UNROLLING_1(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_3(idx, step, macro) LOOP_UNROLLING_2(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_4(idx, step, macro) LOOP_UNROLLING_3(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_5(idx, step, macro) LOOP_UNROLLING_4(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_6(idx, step, macro) LOOP_UNROLLING_5(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_7(idx, step, macro) LOOP_UNROLLING_6(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_8(idx, step, macro) LOOP_UNROLLING_7(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_9(idx, step, macro) LOOP_UNROLLING_8(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_10(idx, step, macro) LOOP_UNROLLING_9(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_11(idx, step, macro) LOOP_UNROLLING_10(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_12(idx, step, macro) LOOP_UNROLLING_11(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_13(idx, step, macro) LOOP_UNROLLING_12(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_14(idx, step, macro) LOOP_UNROLLING_13(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_15(idx, step, macro) LOOP_UNROLLING_14(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_16(idx, step, macro) LOOP_UNROLLING_15(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_17(idx, step, macro) LOOP_UNROLLING_16(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_18(idx, step, macro) LOOP_UNROLLING_17(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_19(idx, step, macro) LOOP_UNROLLING_18(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_20(idx, step, macro) LOOP_UNROLLING_19(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_21(idx, step, macro) LOOP_UNROLLING_20(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_22(idx, step, macro) LOOP_UNROLLING_21(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_23(idx, step, macro) LOOP_UNROLLING_22(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_24(idx, step, macro) LOOP_UNROLLING_23(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_25(idx, step, macro) LOOP_UNROLLING_24(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_26(idx, step, macro) LOOP_UNROLLING_25(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_27(idx, step, macro) LOOP_UNROLLING_26(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_28(idx, step, macro) LOOP_UNROLLING_27(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_29(idx, step, macro) LOOP_UNROLLING_28(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_30(idx, step, macro) LOOP_UNROLLING_29(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_31(idx, step, macro) LOOP_UNROLLING_30(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_32(idx, step, macro) LOOP_UNROLLING_31(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_33(idx, step, macro) LOOP_UNROLLING_32(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_34(idx, step, macro) LOOP_UNROLLING_33(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_35(idx, step, macro) LOOP_UNROLLING_34(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_36(idx, step, macro) LOOP_UNROLLING_35(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_37(idx, step, macro) LOOP_UNROLLING_36(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_38(idx, step, macro) LOOP_UNROLLING_37(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_39(idx, step, macro) LOOP_UNROLLING_38(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_40(idx, step, macro) LOOP_UNROLLING_39(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_41(idx, step, macro) LOOP_UNROLLING_40(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_42(idx, step, macro) LOOP_UNROLLING_41(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_43(idx, step, macro) LOOP_UNROLLING_42(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_44(idx, step, macro) LOOP_UNROLLING_43(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_45(idx, step, macro) LOOP_UNROLLING_44(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_46(idx, step, macro) LOOP_UNROLLING_45(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_47(idx, step, macro) LOOP_UNROLLING_46(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_48(idx, step, macro) LOOP_UNROLLING_47(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_49(idx, step, macro) LOOP_UNROLLING_48(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_50(idx, step, macro) LOOP_UNROLLING_49(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_51(idx, step, macro) LOOP_UNROLLING_50(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_52(idx, step, macro) LOOP_UNROLLING_51(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_53(idx, step, macro) LOOP_UNROLLING_52(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_54(idx, step, macro) LOOP_UNROLLING_53(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_55(idx, step, macro) LOOP_UNROLLING_54(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_56(idx, step, macro) LOOP_UNROLLING_55(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_57(idx, step, macro) LOOP_UNROLLING_56(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_58(idx, step, macro) LOOP_UNROLLING_57(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_59(idx, step, macro) LOOP_UNROLLING_58(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_60(idx, step, macro) LOOP_UNROLLING_59(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_61(idx, step, macro) LOOP_UNROLLING_60(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_62(idx, step, macro) LOOP_UNROLLING_61(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_63(idx, step, macro) LOOP_UNROLLING_62(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_64(idx, step, macro) LOOP_UNROLLING_63(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_65(idx, step, macro) LOOP_UNROLLING_64(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_66(idx, step, macro) LOOP_UNROLLING_65(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_67(idx, step, macro) LOOP_UNROLLING_66(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_68(idx, step, macro) LOOP_UNROLLING_67(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_69(idx, step, macro) LOOP_UNROLLING_68(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_70(idx, step, macro) LOOP_UNROLLING_69(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_71(idx, step, macro) LOOP_UNROLLING_70(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_72(idx, step, macro) LOOP_UNROLLING_71(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_73(idx, step, macro) LOOP_UNROLLING_72(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_74(idx, step, macro) LOOP_UNROLLING_73(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_75(idx, step, macro) LOOP_UNROLLING_74(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_76(idx, step, macro) LOOP_UNROLLING_75(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_77(idx, step, macro) LOOP_UNROLLING_76(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_78(idx, step, macro) LOOP_UNROLLING_77(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_79(idx, step, macro) LOOP_UNROLLING_78(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_80(idx, step, macro) LOOP_UNROLLING_79(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_81(idx, step, macro) LOOP_UNROLLING_80(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_82(idx, step, macro) LOOP_UNROLLING_81(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_83(idx, step, macro) LOOP_UNROLLING_82(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_84(idx, step, macro) LOOP_UNROLLING_83(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_85(idx, step, macro) LOOP_UNROLLING_84(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_86(idx, step, macro) LOOP_UNROLLING_85(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_87(idx, step, macro) LOOP_UNROLLING_86(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_88(idx, step, macro) LOOP_UNROLLING_87(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_89(idx, step, macro) LOOP_UNROLLING_88(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_90(idx, step, macro) LOOP_UNROLLING_89(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_91(idx, step, macro) LOOP_UNROLLING_90(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_92(idx, step, macro) LOOP_UNROLLING_91(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_93(idx, step, macro) LOOP_UNROLLING_92(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_94(idx, step, macro) LOOP_UNROLLING_93(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_95(idx, step, macro) LOOP_UNROLLING_94(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_96(idx, step, macro) LOOP_UNROLLING_95(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_97(idx, step, macro) LOOP_UNROLLING_96(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_98(idx, step, macro) LOOP_UNROLLING_97(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_99(idx, step, macro) LOOP_UNROLLING_98(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_100(idx, step, macro) LOOP_UNROLLING_99(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_101(idx, step, macro) LOOP_UNROLLING_100(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_102(idx, step, macro) LOOP_UNROLLING_101(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_103(idx, step, macro) LOOP_UNROLLING_102(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_104(idx, step, macro) LOOP_UNROLLING_103(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_105(idx, step, macro) LOOP_UNROLLING_104(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_106(idx, step, macro) LOOP_UNROLLING_105(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_107(idx, step, macro) LOOP_UNROLLING_106(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_108(idx, step, macro) LOOP_UNROLLING_107(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_109(idx, step, macro) LOOP_UNROLLING_108(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_110(idx, step, macro) LOOP_UNROLLING_109(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_111(idx, step, macro) LOOP_UNROLLING_110(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_112(idx, step, macro) LOOP_UNROLLING_111(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_113(idx, step, macro) LOOP_UNROLLING_112(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_114(idx, step, macro) LOOP_UNROLLING_113(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_115(idx, step, macro) LOOP_UNROLLING_114(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_116(idx, step, macro) LOOP_UNROLLING_115(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_117(idx, step, macro) LOOP_UNROLLING_116(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_118(idx, step, macro) LOOP_UNROLLING_117(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_119(idx, step, macro) LOOP_UNROLLING_118(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_120(idx, step, macro) LOOP_UNROLLING_119(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_121(idx, step, macro) LOOP_UNROLLING_120(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_122(idx, step, macro) LOOP_UNROLLING_121(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_123(idx, step, macro) LOOP_UNROLLING_122(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_124(idx, step, macro) LOOP_UNROLLING_123(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_125(idx, step, macro) LOOP_UNROLLING_124(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_126(idx, step, macro) LOOP_UNROLLING_125(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_127(idx, step, macro) LOOP_UNROLLING_126(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
#define LOOP_UNROLLING_128(idx, step, macro) LOOP_UNROLLING_127(idx, step, macro); UNROLL_INCR(idx, step, macro)
|
|
|
|
#define LOOP_UNROLLING_STR(type, idx, start, step, num, macro) \
|
|
{ \
|
|
type idx = start; \
|
|
LOOP_UNROLLING_##num(idx, step, macro); \
|
|
}
|
|
#else
|
|
#define LOOP_UNROLLING_STR(type, idx, start, step, num, macro) \
|
|
{ \
|
|
_Pragma("unroll") \
|
|
for(type idx = start; idx < (num * step); idx += step) \
|
|
{ \
|
|
(macro); \
|
|
} \
|
|
}
|
|
#endif
|
|
#define LOOP_UNROLLING(type, idx, start, step, num, macro) LOOP_UNROLLING_STR(type, idx, start, step, num, macro)
|
|
|
|
|
|
#define GET_SPATIAL_IDX(IDX, N0, PARTIAL_N0) (max((int)(get_global_id(IDX) * N0 - (N0 - PARTIAL_N0) % N0), 0))
|
|
|
|
|
|
#define DOT_PRODUCT_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) DOT_PRODUCT_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c)
|
|
#define DOT_PRODUCT_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) DOT_PRODUCT##K0##_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c)
|
|
#define DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
c += (C_DATA_TYPE)(a) * (C_DATA_TYPE)(b); \
|
|
})
|
|
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_khr_integer_dot_product)
|
|
#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)));
|
|
#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0));
|
|
#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((a), (b));
|
|
#elif defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
|
|
#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)), (c));
|
|
#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0), (c));
|
|
#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((a), (b), (c));
|
|
#elif defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
|
|
#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)));
|
|
#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0));
|
|
#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((a), (b));
|
|
#else
|
|
#define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
c += (C_DATA_TYPE)(a).s0 * (C_DATA_TYPE)(b).s0; \
|
|
c += (C_DATA_TYPE)(a).s1 * (C_DATA_TYPE)(b).s1; \
|
|
})
|
|
#define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c); \
|
|
c += (C_DATA_TYPE)(a).s2 * (C_DATA_TYPE)(b).s2; \
|
|
})
|
|
#define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, x, y, val) \
|
|
({ \
|
|
val += (C_DATA_TYPE)(x).s0 * (C_DATA_TYPE)(y).s0; \
|
|
val += (C_DATA_TYPE)(x).s1 * (C_DATA_TYPE)(y).s1; \
|
|
val += (C_DATA_TYPE)(x).s2 * (C_DATA_TYPE)(y).s2; \
|
|
val += (C_DATA_TYPE)(x).s3 * (C_DATA_TYPE)(y).s3; \
|
|
})
|
|
#endif
|
|
#define DOT_PRODUCT5_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
|
|
DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s4), ((b).s4), c); \
|
|
})
|
|
#define DOT_PRODUCT6_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
|
|
DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s45), ((b).s45), c); \
|
|
})
|
|
#define DOT_PRODUCT7_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \
|
|
DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s456), ((b).s456), c); \
|
|
})
|
|
#define DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).lo), ((b).lo), c); \
|
|
DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).hi), ((b).hi), c); \
|
|
})
|
|
#define DOT_PRODUCT9_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
|
|
DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s8), ((b).s8), c); \
|
|
})
|
|
#define DOT_PRODUCT10_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
|
|
DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89), ((b).s89), c); \
|
|
})
|
|
#define DOT_PRODUCT11_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
|
|
DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89A), ((b).s89A), c); \
|
|
})
|
|
#define DOT_PRODUCT12_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
|
|
DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89AB), ((b).s89AB), c); \
|
|
})
|
|
#define DOT_PRODUCT13_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
|
|
DOT_PRODUCT5_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABC), ((b).s89ABC), c); \
|
|
})
|
|
#define DOT_PRODUCT14_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
|
|
DOT_PRODUCT6_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABCD), ((b).s89ABCD), c); \
|
|
})
|
|
#define DOT_PRODUCT15_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \
|
|
DOT_PRODUCT7_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABCDE), ((b).s89ABCDE), c); \
|
|
})
|
|
#define DOT_PRODUCT16_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \
|
|
({ \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).lo), ((b).lo), c); \
|
|
DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).hi), ((b).hi), c); \
|
|
})
|
|
|
|
|
|
#define REDUCE_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) REDUCE_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c)
|
|
#define REDUCE_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) DOT_PRODUCT_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, (TILE_VECTOR_TYPE##K0(B_DATA_TYPE))1, c)
|
|
|
|
|
|
#define V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y) V_LOAD_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y)
|
|
#define V_LOAD_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y) V_LOAD_##TENSOR_TYPE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y)
|
|
#define V_LOAD_BUFFER(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) \
|
|
VLOAD(WIDTH) \
|
|
(0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (Y) * (STRIDE_Y)))
|
|
#define V_LOAD_IMAGE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) READ_IMAGE2D(DATA_TYPE, CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(WIDTH), TENSOR##_img, (X) / 4, (Y))
|
|
|
|
|
|
#define V_STORE(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES) V_STORE_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES)
|
|
#define V_STORE_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES) V_STORE_##TENSOR_TYPE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES)
|
|
#define V_STORE_BUFFER(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES) \
|
|
VSTORE(WIDTH) \
|
|
(VALUES, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (Y) * (STRIDE_Y)))
|
|
#define V_STORE_IMAGE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES) WRITE_IMAGE2D(DATA_TYPE, CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(WIDTH), TENSOR##_img, (X) / 4, (Y), VALUES)
|
|
|
|
|
|
#define T_LOAD(DATA_TYPE, HEIGHT, WIDTH, TENSOR_TYPE, TENSOR, X, Y, YI_MULTIPLIER, STRIDE_Y, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
|
|
{ \
|
|
dst[_i].v = V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, ((Y) + _i * (int)(YI_MULTIPLIER)), STRIDE_Y); \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_LOAD_INDIRECT(DATA_TYPE, HEIGHT, WIDTH, TENSOR_TYPE, TENSOR, X, STRIDE_Y, indirect_y, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
|
|
{ \
|
|
dst[_i].v = V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, (indirect_y[_i].v), STRIDE_Y); \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, HEIGHT, WIDTH0, WIDTH1, TENSOR_TYPE, TENSOR, X, STRIDE_Y, WIDTH1_CONDITION, dst, indirect_y) \
|
|
({ \
|
|
if(WIDTH1_CONDITION) \
|
|
{ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
|
|
{ \
|
|
VLOAD_PARTIAL(WIDTH0, WIDTH1) \
|
|
(dst[HEIGHT - 1 - _i].v, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
|
|
}) \
|
|
} \
|
|
else \
|
|
{ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
|
|
{ \
|
|
dst[HEIGHT - 1 - _i].v = V_LOAD(DATA_TYPE, WIDTH0, TENSOR_TYPE, TENSOR, X, (indirect_y[HEIGHT - 1 - _i].v), STRIDE_Y); \
|
|
}) \
|
|
} \
|
|
})
|
|
|
|
#define T_LOAD_NHWC(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _yk, 0, 1, TILE_HEIGHT, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _xk, 0, 1, TILE_WIDTH, \
|
|
{ \
|
|
int _src_y = (X) + _xk + ((Y) + _yk) * (TENSOR_WIDTH); \
|
|
_src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \
|
|
int _src_valid_y = (((X) + _xk) >= 0 && ((X) + _xk) < (int)(TENSOR_WIDTH) && ((Y) + _yk) >= 0 && ((Y) + _yk) < (int)(TENSOR_HEIGHT)); \
|
|
if(_src_valid_y != 0) \
|
|
{ \
|
|
dst[_xk + _yk * (TILE_WIDTH)].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
|
|
} \
|
|
}) \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_LOAD_NHWC_WITH_DILATION(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, DILATION_X, DILATION_Y, BOUNDARY_CHECK, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _yk, 0, 1, TILE_HEIGHT, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _xk, 0, 1, TILE_WIDTH, \
|
|
{ \
|
|
int _src_y = (X) + _xk * (DILATION_X); \
|
|
int _src_z = ((Y) + _yk * (DILATION_Y)); \
|
|
int _src_w = (B); \
|
|
bool _src_valid_y = (((X) + _xk * (DILATION_X)) >= 0) && (((X) + _xk * (DILATION_X)) < (int)(TENSOR_WIDTH)) && (((Y) + _yk * (DILATION_Y)) >= 0) && (((Y) + _yk * (DILATION_Y)) < (int)(TENSOR_HEIGHT)); \
|
|
if(!(BOUNDARY_CHECK)) \
|
|
{ \
|
|
dst[_xk + _yk * (TILE_WIDTH)].v = VLOAD(TILE_CHANNELS) \
|
|
(0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (C) * sizeof(DATA_TYPE) + (_src_y) * (TENSOR##_stride_y) + (_src_z) * (TENSOR##_stride_z) + (_src_w) * (TENSOR##_stride_w))); \
|
|
} \
|
|
else \
|
|
{ \
|
|
if(_src_valid_y) \
|
|
{ \
|
|
dst[_xk + _yk * (TILE_WIDTH)].v = VLOAD(TILE_CHANNELS) \
|
|
(0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (C) * sizeof(DATA_TYPE) + (_src_y) * (TENSOR##_stride_y) + (_src_z) * (TENSOR##_stride_z) + (_src_w) * (TENSOR##_stride_w))); \
|
|
} \
|
|
} \
|
|
}) \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_LOAD_NHWC_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, xi, yi, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
|
|
{ \
|
|
int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH); \
|
|
_src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \
|
|
int _src_valid_y = (((X) + xi[_i].v) >= 0 && ((X) + xi[_i].v) < (int)(TENSOR_WIDTH) && ((Y) + yi[_i].v) >= 0 && ((Y) + yi[_i].v) < (int)(TENSOR_HEIGHT)); \
|
|
if(_src_valid_y != 0) \
|
|
{ \
|
|
dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
|
|
} \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_LOAD2D_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) T_LOAD2D_INDIRECT_STR(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst)
|
|
#define T_LOAD2D_INDIRECT_STR(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) T_LOAD2D_INDIRECT_##TENSOR_TYPE(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst)
|
|
#define T_LOAD2D_INDIRECT_BUFFER(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
|
|
{ \
|
|
if(yi[0].s[_i] >= 0) \
|
|
{ \
|
|
dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[0].s[_i], STRIDE_Y); \
|
|
} \
|
|
}) \
|
|
})
|
|
|
|
#define T_LOAD2D_INDIRECT_IMAGE(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
|
|
{ \
|
|
dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[0].s[_i], STRIDE_Y); \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_LOAD_NDHWC_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Z, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, TENSOR_DEPTH, STRIDE_Y, xi, yi, zi, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
|
|
{ \
|
|
int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH) + ((Z) + zi[_i].v) * (TENSOR_WIDTH * TENSOR_HEIGHT); \
|
|
_src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT) * (int)(TENSOR_DEPTH); \
|
|
int _src_valid_y = (((X) + xi[_i].v) >= 0 && ((X) + xi[_i].v) < (int)(TENSOR_WIDTH) && ((Y) + yi[_i].v) >= 0 && ((Y) + yi[_i].v) < (int)(TENSOR_HEIGHT) \
|
|
&& ((Z) + zi[_i].v) >= 0 && ((Z) + zi[_i].v) < (int)(TENSOR_DEPTH)); \
|
|
if(_src_valid_y != 0) \
|
|
{ \
|
|
dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
|
|
} \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, HEIGHT, WIDTH0, WIDTH1, TENSOR_TYPE, TENSOR, X, STRIDE_Y, WIDTH1_CONDITION, src, indirect_y) \
|
|
({ \
|
|
if(WIDTH1_CONDITION) \
|
|
{ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
|
|
{ \
|
|
VSTORE_PARTIAL(WIDTH0, WIDTH1) \
|
|
(CONVERT(src[HEIGHT - 1 - _i].v, VEC_DATA_TYPE(DATA_TYPE, WIDTH0)), 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
|
|
}) \
|
|
} \
|
|
else \
|
|
{ \
|
|
LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
|
|
{ \
|
|
VSTORE(WIDTH0) \
|
|
(CONVERT(src[HEIGHT - 1 - _i].v, VEC_DATA_TYPE(DATA_TYPE, WIDTH0)), 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
|
|
}) \
|
|
} \
|
|
})
|
|
|
|
|
|
#define T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, lhs, rhs, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
ACC_DATA_TYPE _tm = 0; \
|
|
LOOP_UNROLLING(int, _k0, 0, 1, K0, \
|
|
{ \
|
|
_tm += ((ACC_DATA_TYPE)lhs[_m0].s[_k0] * (ACC_DATA_TYPE)WEI_OFFSET); \
|
|
}) \
|
|
LOOP_UNROLLING(int, _n0, 0, 1, N0, \
|
|
{ \
|
|
dst[_m0].s[_n0] += _tm; \
|
|
LOOP_UNROLLING(int, _k0, 0, 1, K0, \
|
|
{ \
|
|
dst[_m0].s[_n0] += ((ACC_DATA_TYPE)rhs[_n0].s[_k0] * (ACC_DATA_TYPE)SRC_OFFSET); \
|
|
}) \
|
|
}) \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_QUANTIZE8(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) T_QUANTIZE8_STR(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst)
|
|
#define T_QUANTIZE8_STR(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) T_QUANTIZE8_##QUANTIZATION_TYPE(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst)
|
|
|
|
|
|
#define T_QUANTIZE8_PER_TENSOR(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _n0, 0, 1, N0, \
|
|
{ \
|
|
SRC_DATA_TYPE _tmp = 0; \
|
|
SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
|
|
_src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-DST_SHIFT)), ((SRC_DATA_TYPE)DST_SHIFT < (SRC_DATA_TYPE)0)); \
|
|
SRC_DATA_TYPE overflow = _src == DST_MULTIPLIER && _src == INT_MIN; \
|
|
long a_64 = (long)(_src); \
|
|
long b_64 = (long)(DST_MULTIPLIER); \
|
|
long ab_64 = a_64 * b_64; \
|
|
long mask1 = 1 << 30; \
|
|
long mask2 = 1 - (1 << 30); \
|
|
long is_positive_or_zero = ab_64 >= 0; \
|
|
long nudge = select(mask2, mask1, is_positive_or_zero); \
|
|
SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
|
|
_tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
|
|
if(DST_SHIFT >= 0) \
|
|
{ \
|
|
long mask = ((((int)1) << DST_SHIFT) - (long)1); \
|
|
long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \
|
|
_tmp = (_tmp & mask) > threshold ? (_tmp >> DST_SHIFT) + (int)1 : (_tmp >> DST_SHIFT); \
|
|
} \
|
|
_tmp += DST_OFFSET; \
|
|
dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
|
|
}) \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_QUANTIZE8_PER_CHANNEL(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _n0, 0, 1, N0, \
|
|
{ \
|
|
SRC_DATA_TYPE _tmp = 0; \
|
|
SRC_DATA_TYPE _tmp2 = 0; \
|
|
SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
|
|
SRC_DATA_TYPE _dst_multiplier = dst_multipliers[0].s[_n0]; \
|
|
SRC_DATA_TYPE _dst_shift = dst_shifts[0].s[_n0]; \
|
|
_src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-_dst_shift)), ((SRC_DATA_TYPE)_dst_shift < (SRC_DATA_TYPE)0)); \
|
|
SRC_DATA_TYPE overflow = _src == _dst_multiplier && _src == INT_MIN; \
|
|
long a_64 = (long)(_src); \
|
|
long b_64 = (long)(_dst_multiplier); \
|
|
long ab_64 = a_64 * b_64; \
|
|
long mask1 = 1 << 30; \
|
|
long mask2 = 1 - (1 << 30); \
|
|
long is_positive_or_zero = ab_64 >= 0; \
|
|
long nudge = select(mask2, mask1, is_positive_or_zero); \
|
|
SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
|
|
_tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
|
|
long mask = ((((int)1) << _dst_shift) - (int)1); \
|
|
long threshold = (mask >> 1) + any(_tmp); \
|
|
_tmp2 = _tmp >> _dst_shift; \
|
|
_tmp2 += select(0, 1, (_tmp & mask) > threshold); \
|
|
_tmp = select(_tmp, _tmp2, _dst_shift >= 0); \
|
|
_tmp += DST_OFFSET; \
|
|
dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
|
|
}) \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_QUANTIZE8_ASYMMETRIC(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _n0, 0, 1, N0, \
|
|
{ \
|
|
SRC_DATA_TYPE _tmp = 0; \
|
|
SRC_DATA_TYPE _src = src[_m0].s[_n0]; \
|
|
_src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-DST_SHIFT)), ((SRC_DATA_TYPE)DST_SHIFT < (SRC_DATA_TYPE)0)); \
|
|
SRC_DATA_TYPE overflow = _src == DST_MULTIPLIER && _src == INT_MIN; \
|
|
long a_64 = (long)(_src); \
|
|
long b_64 = (long)(DST_MULTIPLIER); \
|
|
long ab_64 = a_64 * b_64; \
|
|
long mask1 = 1 << 30; \
|
|
long mask2 = 1 - (1 << 30); \
|
|
long is_positive_or_zero = ab_64 >= 0; \
|
|
long nudge = select(mask2, mask1, is_positive_or_zero); \
|
|
SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \
|
|
_tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \
|
|
if(DST_SHIFT >= 0) \
|
|
{ \
|
|
long mask = ((((int)1) << DST_SHIFT) - (int)1); \
|
|
long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \
|
|
_tmp = (_tmp & mask) > threshold ? (_tmp >> DST_SHIFT) + (int)1 : (_tmp >> DST_SHIFT); \
|
|
} \
|
|
_tmp += DST_OFFSET; \
|
|
dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
|
|
}) \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_ROWSET_MASK(DATA_TYPE, M0, N0, VALUE_TO_SET, a, mask) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _n0, 0, 1, N0, \
|
|
{ \
|
|
a[_m0].s[_n0] = select((DATA_TYPE)(a[_m0].s[_n0]), (DATA_TYPE)(VALUE_TO_SET), (SELECT_DATA_TYPE(DATA_TYPE))(mask[_m0].v == (DATA_TYPE)0)); \
|
|
}) \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, src, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, N0, src[_m0].v, A_VAL, B_VAL); \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define relu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (max((DATA_TYPE)ZERO_VALUE, x))
|
|
|
|
#define brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)ZERO_VALUE, x)))
|
|
|
|
#define lu_brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
|
|
|
|
#define hard_swish_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (x * ((min(max((DATA_TYPE)(x + (DATA_TYPE)3.f), (DATA_TYPE)0.f), (DATA_TYPE)6.f)) * (DATA_TYPE)0.166666667f))
|
|
|
|
#define identity_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (x)
|
|
|
|
#define ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) op##_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x)
|
|
#define ACTIVATION_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x)
|
|
|
|
#define V_ADD(A_VAL, B_VAL) ((A_VAL) + (B_VAL))
|
|
#define V_SUB(A_VAL, B_VAL) ((A_VAL) - (B_VAL))
|
|
#define V_DIV(A_VAL, B_VAL) ((A_VAL) / (B_VAL))
|
|
#define V_MUL(A_VAL, B_VAL) ((A_VAL) * (B_VAL))
|
|
|
|
|
|
#define T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_VALUE, A_VAL, B_VAL, src, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = ACTIVATION_QUANTIZED(ACTIVATION_TYPE, DATA_TYPE, N0, ZERO_VALUE, A_VAL, B_VAL, src[_m0].v); \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_ADD(DATA_TYPE, M0, N0, lhs, rhs, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = lhs[_m0].v + rhs[_m0].v; \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_ADD_CONSTANT(DATA_TYPE, M0, N0, lhs, rhs_constant, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = lhs[_m0].v + (DATA_TYPE)rhs_constant; \
|
|
}) \
|
|
})
|
|
|
|
#define T_ELTWISE_BROADCAST_ADD_X(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
#define T_ELTWISE_BROADCAST_LHS_X_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
#define T_ELTWISE_BROADCAST_RHS_X_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
|
|
#define T_ELTWISE_BROADCAST_LHS_X_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
#define T_ELTWISE_BROADCAST_RHS_X_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
|
|
#define T_ELTWISE_BROADCAST_DIV_X(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_DIV, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
|
|
#define T_ELTWISE_BROADCAST_LHS_X_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
#define T_ELTWISE_BROADCAST_RHS_X_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
|
|
|
|
#define T_SCALE_CONSTANT(DATA_TYPE, M0, N0, lhs, rhs_constant, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = lhs[_m0].v * (DATA_TYPE)rhs_constant; \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_ELTWISE_BROADCAST_X(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_ELTWISE_BROADCAST_LHS_X(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \
|
|
}) \
|
|
})
|
|
|
|
#define T_ELTWISE_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
#define T_ELTWISE_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
#define T_ELTWISE_DIV(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_DIV, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
#define T_ELTWISE_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst)
|
|
|
|
|
|
#define T_ELTWISE(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_FLOOR(DST_DATA_TYPE, M0, N0, src, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m0, 0, 1, M0, \
|
|
{ \
|
|
dst[_m0].v = floor(CONVERT(src[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \
|
|
}) \
|
|
})
|
|
|
|
|
|
#define T_MMUL(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, LHS_LAYOUT, RHS_LAYOUT, lhs, rhs, dst) T_MMUL_##LHS_LAYOUT##_##RHS_LAYOUT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
|
|
#define T_MMUL_NT_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
|
|
#define T_MMUL_NT_T_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
|
|
#define T_MMUL_NT_T_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
|
|
#define T_MMUL_NT_T_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
|
|
#define T_MMUL_NT_T_char_char_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
|
|
#define T_MMUL_NT_T_uchar_uchar_uint(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
|
|
#define T_MMUL_NT_T_uchar_uchar_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
|
|
#define T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
|
|
{ \
|
|
LOOP_UNROLLING(int, _m, 0, 1, M0, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _n, 0, 1, N0, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _k, 0, 1, K0, \
|
|
{ \
|
|
dst[_m].s[_n] = fma((DST_DATA_TYPE)(lhs[_m].s[_k]), (DST_DATA_TYPE)(rhs[_n].s[_k]), dst[_m].s[_n]); \
|
|
}) \
|
|
}) \
|
|
}) \
|
|
}
|
|
|
|
#define T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
|
|
({ \
|
|
LOOP_UNROLLING(int, _m, 0, 1, M0, \
|
|
{ \
|
|
LOOP_UNROLLING(int, _n, 0, 1, N0, \
|
|
{ \
|
|
DOT_PRODUCT_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, K0, (lhs[_m].v), (rhs[_n].v), dst[_m].s[_n]); \
|
|
}) \
|
|
}) \
|
|
})
|
|
|
|
#endif
|
|
|
|
#if defined(NUM_TILES_X) && defined(OUTPUT_TILE_W) && defined(OUTPUT_TILE_H)
|
|
#if defined(VEC_SIZE) && VEC_SIZE == 2
|
|
|
|
__kernel void winograd_output_transform_2x2_3x3_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
|
|
#if defined(SRC_DEPTH)
|
|
Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
|
|
const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
|
|
#else
|
|
Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
|
|
const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
|
|
#endif
|
|
|
|
|
|
DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
|
|
DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
|
|
DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
|
|
DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
|
|
|
|
#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
|
|
|
|
|
|
|
|
float out00 = d00 + d01 + d02;
|
|
float out01 = d01 - d02 - d03;
|
|
#else
|
|
|
|
DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
|
|
DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
|
|
DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
|
|
DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
|
|
|
|
DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
|
|
DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
|
|
DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
|
|
DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
|
|
|
|
DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
|
|
DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
|
|
DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
|
|
DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
|
|
|
|
|
|
float k0 = d01 + d11 + d21;
|
|
float k1 = d02 + d12 + d22;
|
|
float k2 = d11 - d21 - d31;
|
|
float k3 = d12 - d22 - d32;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
float out00 = d10;
|
|
float out01 = -d13;
|
|
float out10 = d10;
|
|
float out11 = -d13;
|
|
|
|
out00 += d00 + d20 + k0 + k1;
|
|
out01 += k0 - k1 - (d03 + d23);
|
|
out10 += -d20 - d30 + k2 + k3;
|
|
out11 += k2 - k3 + d23 + d33;
|
|
#endif
|
|
|
|
int y_in = get_global_id(1);
|
|
int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
|
|
int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
|
|
int z_out = get_global_id(0);
|
|
#if defined(SRC_DEPTH)
|
|
int batch = get_global_id(2) / SRC_DEPTH;
|
|
#endif
|
|
|
|
#if defined(HAS_BIAS)
|
|
|
|
Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
|
|
|
|
float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
|
|
|
|
out00 += (float)b;
|
|
out01 += (float)b;
|
|
#endif
|
|
|
|
|
|
#if defined(SRC_DEPTH)
|
|
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
|
|
#else
|
|
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
|
|
#endif
|
|
|
|
|
|
#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
const VEC_DATA_TYPE(DATA_TYPE, 2)
|
|
out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL);
|
|
*((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
|
|
*((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
|
|
#else
|
|
vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL), 0,
|
|
(__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
|
|
#endif
|
|
|
|
#if !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
#if defined(HAS_BIAS)
|
|
|
|
out10 += (DATA_TYPE)b;
|
|
out11 += (DATA_TYPE)b;
|
|
#endif
|
|
vstore2(ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out10, out11), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL), 0,
|
|
(__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
#if defined(VEC_SIZE) && VEC_SIZE == 4
|
|
|
|
__kernel void winograd_output_transform_4x4_3x3_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
|
|
#if defined(SRC_DEPTH)
|
|
Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
|
|
const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
|
|
#else
|
|
Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
|
|
const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
|
|
#endif
|
|
|
|
|
|
DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
|
|
DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
|
|
DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
|
|
DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
|
|
DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
|
|
DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
|
|
|
|
#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
|
|
float out00 = d00 + d01 + d02 + d03 + d04;
|
|
float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04;
|
|
float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04;
|
|
float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05;
|
|
#else
|
|
|
|
DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
|
|
DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
|
|
DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
|
|
DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
|
|
DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
|
|
DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
|
|
|
|
DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
|
|
DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
|
|
DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
|
|
DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
|
|
DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
|
|
DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
|
|
|
|
DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
|
|
DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
|
|
DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
|
|
DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
|
|
DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
|
|
DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
|
|
|
|
DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
|
|
DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
|
|
DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
|
|
DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
|
|
DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
|
|
DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
|
|
|
|
DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
|
|
DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
|
|
DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
|
|
DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
|
|
DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
|
|
DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
|
|
|
|
|
|
float out00 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
|
|
float out01 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
|
|
float out02 = (float)d01 + (float)d21 + (float)d41 + (float)d11 + (float)d31;
|
|
float out03 = (float)d01 + d21 + (float)d41 + (float)d11 + (float)d31;
|
|
|
|
float k0 = d03 + d04 + d13 + d14 + d23 + d24 + d33 + d34 + d43 + d44;
|
|
float k1 = 2.0f * d03 - 2.0f * d04 + 2.0f * d13 - 2.0f * d14 + 2.0f * d23 - 2.0f * d24 + 2.0f * d33 - 2.0f * d34 + 2.0f * d43 - 2.0f * d44;
|
|
|
|
out00 += k0 + d00 + d02 + d10 + d12 + d20 + d22 + d30 + d32 + d40 + d42;
|
|
out01 += k1 - d02 - d12 - d22 - d32 - d42;
|
|
out02 += 4.0f * k0 + d02 + d12 + d22 + d32 + d42;
|
|
out03 += 4.0f * k1 - d02 - d12 - d22 - d32 - d42 + d05 + d15 + d25 + d35 + d45;
|
|
|
|
|
|
float out10 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
|
|
float out11 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
|
|
float out12 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
|
|
float out13 = d11 - d21 + 2.0f * d31 - 2.0f * d41;
|
|
|
|
k0 = d13 + d14 - d23 - d24 + 2.0f * d33 + 2.0f * d34 - 2.0f * d43 - 2.0f * d44;
|
|
k1 = 2.0f * d13 - 2.0f * d14 - 2.0f * d23 + 2.0f * d24 + 4.0f * d33 - 4.0f * d34 - 4.0f * d43 + 4.0f * d44;
|
|
|
|
out10 += k0 + d10 + d12 - d20 - d22 + 2.0f * d30 + 2.0f * d32 - 2.0f * d40 - 2.0f * d42;
|
|
out11 += k1 - d12 + d22 - 2.0f * d32 + 2.0f * d42;
|
|
out12 += 4.0f * k0 + d12 - d22 + 2.0f * d32 - 2.0f * d42;
|
|
out13 += 4.0f * k1 - d12 + d15 + d22 - d25 - 2.0f * d32 + 2.0f * d35 + 2.0f * d42 - 2.0f * d45;
|
|
|
|
|
|
float out20 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
|
|
float out21 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
|
|
float out22 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
|
|
float out23 = d11 + d21 + 4.0f * d31 + 4.0f * d41;
|
|
|
|
k0 = d13 + d14 + d23 + d24 + 4.0f * d33 + 4.0f * d34 + 4.0f * d43 + 4.0f * d44;
|
|
k1 = 2.0f * d13 - 2.0f * d14 + 2.0f * d23 - 2.0f * d24 + 8.0f * d33 - 8.0f * d34 + 8.0f * d43 - 8.0f * d44;
|
|
|
|
out20 += k0 + d10 + d12 + d20 + d22 + 4.0f * d30 + 4.0f * d32 + 4.0f * d40 + 4.0f * d42;
|
|
out21 += k1 - d12 - d22 - 4.0f * d32 - 4.0f * d42;
|
|
out22 += 4.0f * k0 + d12 + d22 + 4.0f * d32 + 4.0f * d42;
|
|
out23 += 4.0f * k1 - d12 + d15 - d22 + d25 - 4.0f * d32 + 4.0f * d35 - 4.0f * d42 + 4.0f * d45;
|
|
|
|
|
|
float out30 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
|
|
float out31 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
|
|
float out32 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
|
|
float out33 = d11 - d21 + 8.0f * d31 - 8.0f * d41 + d51;
|
|
|
|
k0 = d13 + d14 - d23 - d24 + 8.0f * d33 + 8.0f * d34 - 8.0f * d43 - 8.0f * d44 + d53 + d54;
|
|
k1 = 2.0f * d13 - 2.0f * d14 - 2.0f * d23 + 2.0f * d24 + 16.0f * d33 - 16.0f * d34 - 16.0f * d43 + 16.0f * d44 + 2.0f * d53 - 2.0f * d54;
|
|
|
|
out30 += k0 + d10 + d12 - d20 - d22 + 8.0f * d30 + 8.0f * d32 - 8.0f * d40 - 8.0f * d42 + d50 + d52;
|
|
out31 += k1 - d12 + d22 - 8.0f * d32 + 8.0f * d42 - d52;
|
|
out32 += 4.0f * k0 + d12 - d22 + 8.0f * d32 - 8.0f * d42 + d52;
|
|
out33 += 4.0f * k1 - d12 + d15 + d22 - d25 - 8.0f * d32 + 8.0f * d35 + 8.0f * d42 - 8.0f * d45 - d52 + d55;
|
|
#endif
|
|
|
|
int y_in = get_global_id(1);
|
|
int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
|
|
int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
|
|
int z_out = get_global_id(0);
|
|
#if defined(SRC_DEPTH)
|
|
int batch = get_global_id(2) / SRC_DEPTH;
|
|
#endif
|
|
|
|
#if defined(HAS_BIAS)
|
|
|
|
Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
|
|
|
|
float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
|
|
|
|
out00 += (float)b;
|
|
out01 += (float)b;
|
|
out02 += (float)b;
|
|
out03 += (float)b;
|
|
#endif
|
|
|
|
|
|
#if defined(SRC_DEPTH)
|
|
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
|
|
#else
|
|
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
|
|
#endif
|
|
|
|
|
|
const VEC_DATA_TYPE(DATA_TYPE, 4)
|
|
out0_dt = CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4));
|
|
|
|
#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
*((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
|
|
*((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
|
|
*((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y)) = out0_dt.s2;
|
|
*((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y)) = out0_dt.s3;
|
|
#else
|
|
vstore4(out0_dt, 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
|
|
#endif
|
|
|
|
#if !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
#if defined(HAS_BIAS)
|
|
|
|
out10 += (float)b;
|
|
out11 += (float)b;
|
|
out12 += (float)b;
|
|
out13 += (float)b;
|
|
|
|
out20 += (float)b;
|
|
out21 += (float)b;
|
|
out22 += (float)b;
|
|
out23 += (float)b;
|
|
|
|
out30 += (float)b;
|
|
out31 += (float)b;
|
|
out32 += (float)b;
|
|
out33 += (float)b;
|
|
#endif
|
|
vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out10, out11, out12, out13), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4)), 0,
|
|
(__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
|
|
vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out20, out21, out22, out23), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4)), 0,
|
|
(__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y));
|
|
vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out30, out31, out32, out33), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4)), 0,
|
|
(__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y));
|
|
#endif
|
|
}
|
|
|
|
#define COMPUTE_TMP_COL(col, d0, d1, d2, d3, d4, d5, d6, d7, comm_fact) \
|
|
({ \
|
|
comm_fact.s0 = d1 + d2; \
|
|
comm_fact.s1 = d3 + d4; \
|
|
comm_fact.s2 = d5 + d6; \
|
|
\
|
|
col.s0 = comm_fact.s0 + comm_fact.s1 + 8.f * comm_fact.s2 + d0; \
|
|
col.s2 = comm_fact.s0 + 4.f * comm_fact.s1 + 2.f * comm_fact.s2; \
|
|
\
|
|
comm_fact.s0 = d1 - d2; \
|
|
comm_fact.s1 = d3 - d4; \
|
|
comm_fact.s2 = d5 - d6; \
|
|
\
|
|
col.s1 = comm_fact.s0 + 2.f * comm_fact.s1 + 4.f * comm_fact.s2; \
|
|
col.s3 = comm_fact.s0 + 8.f * comm_fact.s1 + comm_fact.s2 + d7; \
|
|
})
|
|
|
|
|
|
__kernel void winograd_output_transform_4x4_5x5_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
|
|
#if defined(SRC_DEPTH)
|
|
Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
|
|
const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
|
|
#else
|
|
|
|
Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
|
|
const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
|
|
#endif
|
|
|
|
|
|
int y_in = get_global_id(1);
|
|
int x_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
|
|
int y_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
|
|
int z_out = get_global_id(0);
|
|
#if defined(SRC_DEPTH)
|
|
int batch = get_global_id(2) / SRC_DEPTH;
|
|
#endif
|
|
|
|
#if defined(SRC_DEPTH)
|
|
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z + batch * dst_stride_w;
|
|
#else
|
|
|
|
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE) + y_out * dst_stride_y + z_out * dst_stride_z;
|
|
#endif
|
|
|
|
|
|
DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
|
|
DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
|
|
DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
|
|
DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
|
|
DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
|
|
DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
|
|
DATA_TYPE d06 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
|
|
DATA_TYPE d07 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
|
|
|
|
#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
|
|
float out00 = d00 + d01 + d02 + d03 + d04 + 8.0f * d05 + 8.0f * d06;
|
|
float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04 + 4.0f * d05 - 4.0f * d06;
|
|
float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04 + 2.0f * d05 + 2.0f * d06;
|
|
float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05 - d06 + d07;
|
|
|
|
#if defined(HAS_BIAS)
|
|
|
|
Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
|
|
|
|
float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
|
|
|
|
out00 += (DATA_TYPE)b;
|
|
out01 += (DATA_TYPE)b;
|
|
out02 += (DATA_TYPE)b;
|
|
out03 += (DATA_TYPE)b;
|
|
#endif
|
|
|
|
|
|
#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
VEC_DATA_TYPE(DATA_TYPE, 4)
|
|
out0_dt = CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), A_VAL,
|
|
B_VAL),
|
|
VEC_DATA_TYPE(DATA_TYPE, 4));
|
|
*((__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)) = out0_dt.s0;
|
|
*((__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)) = out0_dt.s1;
|
|
*((__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y)) = out0_dt.s2;
|
|
*((__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y)) = out0_dt.s3;
|
|
#else
|
|
vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), A_VAL, B_VAL), VEC_DATA_TYPE(DATA_TYPE, 4)),
|
|
0, (__global DATA_TYPE *)(dst_addr));
|
|
#endif
|
|
|
|
#else
|
|
|
|
DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
|
|
DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
|
|
DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
|
|
DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
|
|
DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
|
|
DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
|
|
DATA_TYPE d16 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
|
|
DATA_TYPE d17 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
|
|
|
|
DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
|
|
DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
|
|
DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
|
|
DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
|
|
DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
|
|
DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
|
|
DATA_TYPE d26 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
|
|
DATA_TYPE d27 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
|
|
|
|
DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
|
|
DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
|
|
DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
|
|
DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
|
|
DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
|
|
DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
|
|
DATA_TYPE d36 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
|
|
DATA_TYPE d37 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
|
|
|
|
DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
|
|
DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
|
|
DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
|
|
DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
|
|
DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 36 * src_stride_z));
|
|
DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 37 * src_stride_z));
|
|
DATA_TYPE d46 = *((__global DATA_TYPE *)(src_addr + 38 * src_stride_z));
|
|
DATA_TYPE d47 = *((__global DATA_TYPE *)(src_addr + 39 * src_stride_z));
|
|
|
|
DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 40 * src_stride_z));
|
|
DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 41 * src_stride_z));
|
|
DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 42 * src_stride_z));
|
|
DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 43 * src_stride_z));
|
|
DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 44 * src_stride_z));
|
|
DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 45 * src_stride_z));
|
|
DATA_TYPE d56 = *((__global DATA_TYPE *)(src_addr + 46 * src_stride_z));
|
|
DATA_TYPE d57 = *((__global DATA_TYPE *)(src_addr + 47 * src_stride_z));
|
|
|
|
DATA_TYPE d60 = *((__global DATA_TYPE *)(src_addr + 48 * src_stride_z));
|
|
DATA_TYPE d61 = *((__global DATA_TYPE *)(src_addr + 49 * src_stride_z));
|
|
DATA_TYPE d62 = *((__global DATA_TYPE *)(src_addr + 50 * src_stride_z));
|
|
DATA_TYPE d63 = *((__global DATA_TYPE *)(src_addr + 51 * src_stride_z));
|
|
DATA_TYPE d64 = *((__global DATA_TYPE *)(src_addr + 52 * src_stride_z));
|
|
DATA_TYPE d65 = *((__global DATA_TYPE *)(src_addr + 53 * src_stride_z));
|
|
DATA_TYPE d66 = *((__global DATA_TYPE *)(src_addr + 54 * src_stride_z));
|
|
DATA_TYPE d67 = *((__global DATA_TYPE *)(src_addr + 55 * src_stride_z));
|
|
|
|
DATA_TYPE d70 = *((__global DATA_TYPE *)(src_addr + 56 * src_stride_z));
|
|
DATA_TYPE d71 = *((__global DATA_TYPE *)(src_addr + 57 * src_stride_z));
|
|
DATA_TYPE d72 = *((__global DATA_TYPE *)(src_addr + 58 * src_stride_z));
|
|
DATA_TYPE d73 = *((__global DATA_TYPE *)(src_addr + 59 * src_stride_z));
|
|
DATA_TYPE d74 = *((__global DATA_TYPE *)(src_addr + 60 * src_stride_z));
|
|
DATA_TYPE d75 = *((__global DATA_TYPE *)(src_addr + 61 * src_stride_z));
|
|
DATA_TYPE d76 = *((__global DATA_TYPE *)(src_addr + 62 * src_stride_z));
|
|
DATA_TYPE d77 = *((__global DATA_TYPE *)(src_addr + 63 * src_stride_z));
|
|
|
|
|
|
VEC_DATA_TYPE(float, 4)
|
|
comm_fact0, comm_fact1, comm_fact2;
|
|
VEC_DATA_TYPE(float, 4)
|
|
tmp_col0, tmp_col1, tmp_col2, tmp_col3, tmp_col4, tmp_col5, tmp_col6, tmp_col7;
|
|
|
|
COMPUTE_TMP_COL(tmp_col0, d00, d10, d20, d30, d40, d50, d60, d70, comm_fact0);
|
|
COMPUTE_TMP_COL(tmp_col1, d01, d11, d21, d31, d41, d51, d61, d71, comm_fact0);
|
|
COMPUTE_TMP_COL(tmp_col2, d02, d12, d22, d32, d42, d52, d62, d72, comm_fact0);
|
|
COMPUTE_TMP_COL(tmp_col3, d03, d13, d23, d33, d43, d53, d63, d73, comm_fact0);
|
|
COMPUTE_TMP_COL(tmp_col4, d04, d14, d24, d34, d44, d54, d64, d74, comm_fact0);
|
|
COMPUTE_TMP_COL(tmp_col5, d05, d15, d25, d35, d45, d55, d65, d75, comm_fact0);
|
|
COMPUTE_TMP_COL(tmp_col6, d06, d16, d26, d36, d46, d56, d66, d76, comm_fact0);
|
|
COMPUTE_TMP_COL(tmp_col7, d07, d17, d27, d37, d47, d57, d67, d77, comm_fact0);
|
|
|
|
|
|
comm_fact0 = tmp_col1 + tmp_col2;
|
|
comm_fact1 = tmp_col3 + tmp_col4;
|
|
comm_fact2 = tmp_col5 + tmp_col6;
|
|
|
|
VEC_DATA_TYPE(float, 4)
|
|
out_col0 = comm_fact0 + comm_fact1 + (float)8.f * comm_fact2 + tmp_col0;
|
|
VEC_DATA_TYPE(float, 4)
|
|
out_col2 = comm_fact0 + (float)4.f * comm_fact1 + (float)2.f * comm_fact2;
|
|
|
|
comm_fact0 = tmp_col1 - tmp_col2;
|
|
comm_fact1 = tmp_col3 - tmp_col4;
|
|
comm_fact2 = tmp_col5 - tmp_col6;
|
|
|
|
VEC_DATA_TYPE(float, 4)
|
|
out_col1 = comm_fact0 + (float)2.f * comm_fact1 + (float)4.f * comm_fact2;
|
|
VEC_DATA_TYPE(float, 4)
|
|
out_col3 = comm_fact0 + (float)8.f * comm_fact1 + comm_fact2 + tmp_col7;
|
|
|
|
#if defined(HAS_BIAS)
|
|
|
|
Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
|
|
|
|
float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, z_out)));
|
|
|
|
out_col0 += (VEC_DATA_TYPE(float, 4))b;
|
|
out_col1 += (VEC_DATA_TYPE(float, 4))b;
|
|
out_col2 += (VEC_DATA_TYPE(float, 4))b;
|
|
out_col3 += (VEC_DATA_TYPE(float, 4))b;
|
|
#endif
|
|
|
|
|
|
vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out_col0.s0, out_col1.s0, out_col2.s0, out_col3.s0), A_VAL, B_VAL),
|
|
VEC_DATA_TYPE(DATA_TYPE, 4)),
|
|
0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y));
|
|
vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out_col0.s1, out_col1.s1, out_col2.s1, out_col3.s1), A_VAL, B_VAL),
|
|
VEC_DATA_TYPE(DATA_TYPE, 4)),
|
|
0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y));
|
|
vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out_col0.s2, out_col1.s2, out_col2.s2, out_col3.s2), A_VAL, B_VAL),
|
|
VEC_DATA_TYPE(DATA_TYPE, 4)),
|
|
0, (__global DATA_TYPE *)(dst_addr + 2 * dst_stride_y));
|
|
vstore4(CONVERT(ACTIVATION(ACTIVATION_TYPE, float, VEC_SIZE, (VEC_DATA_TYPE(float, 4))(out_col0.s3, out_col1.s3, out_col2.s3, out_col3.s3), A_VAL, B_VAL),
|
|
VEC_DATA_TYPE(DATA_TYPE, 4)),
|
|
0, (__global DATA_TYPE *)(dst_addr + 3 * dst_stride_y));
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL)
|
|
#if defined(VEC_SIZE) && VEC_SIZE == 2
|
|
|
|
__kernel void winograd_output_transform_2x1_3x1_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
winograd_output_transform_2x2_3x3_nchw(src_ptr,
|
|
src_stride_x,
|
|
src_step_x,
|
|
src_stride_y,
|
|
src_step_y,
|
|
src_stride_z,
|
|
src_step_z,
|
|
src_stride_w,
|
|
src_step_w,
|
|
src_offset_first_element_in_bytes,
|
|
dst_ptr,
|
|
dst_stride_x,
|
|
dst_step_x,
|
|
dst_stride_y,
|
|
dst_step_y,
|
|
dst_stride_z,
|
|
dst_step_z,
|
|
dst_stride_w,
|
|
dst_step_w,
|
|
dst_offset_first_element_in_bytes
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
bias_ptr,
|
|
bias_stride_x,
|
|
bias_step_x,
|
|
bias_offset_first_element_in_bytes
|
|
#endif
|
|
);
|
|
}
|
|
|
|
#endif
|
|
|
|
#if defined(VEC_SIZE) && VEC_SIZE == 4
|
|
|
|
__kernel void winograd_output_transform_4x1_3x1_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
winograd_output_transform_4x4_3x3_nchw(src_ptr,
|
|
src_stride_x,
|
|
src_step_x,
|
|
src_stride_y,
|
|
src_step_y,
|
|
src_stride_z,
|
|
src_step_z,
|
|
src_stride_w,
|
|
src_step_w,
|
|
src_offset_first_element_in_bytes,
|
|
dst_ptr,
|
|
dst_stride_x,
|
|
dst_step_x,
|
|
dst_stride_y,
|
|
dst_step_y,
|
|
dst_stride_z,
|
|
dst_step_z,
|
|
dst_stride_w,
|
|
dst_step_w,
|
|
dst_offset_first_element_in_bytes
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
bias_ptr,
|
|
bias_stride_x,
|
|
bias_step_x,
|
|
bias_offset_first_element_in_bytes
|
|
#endif
|
|
);
|
|
}
|
|
|
|
|
|
__kernel void winograd_output_transform_4x1_5x1_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
winograd_output_transform_4x4_5x5_nchw(src_ptr,
|
|
src_stride_x,
|
|
src_step_x,
|
|
src_stride_y,
|
|
src_step_y,
|
|
src_stride_z,
|
|
src_step_z,
|
|
src_stride_w,
|
|
src_step_w,
|
|
src_offset_first_element_in_bytes,
|
|
dst_ptr,
|
|
dst_stride_x,
|
|
dst_step_x,
|
|
dst_stride_y,
|
|
dst_step_y,
|
|
dst_stride_z,
|
|
dst_step_z,
|
|
dst_stride_w,
|
|
dst_step_w,
|
|
dst_offset_first_element_in_bytes
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
bias_ptr,
|
|
bias_stride_x,
|
|
bias_step_x,
|
|
bias_offset_first_element_in_bytes
|
|
#endif
|
|
);
|
|
}
|
|
|
|
#endif
|
|
#endif
|
|
|
|
#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
|
|
#if defined(VEC_SIZE) && VEC_SIZE == 2
|
|
|
|
__kernel void winograd_output_transform_1x2_1x3_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
winograd_output_transform_2x2_3x3_nchw(src_ptr,
|
|
src_stride_x,
|
|
src_step_x,
|
|
src_stride_y,
|
|
src_step_y,
|
|
src_stride_z,
|
|
src_step_z,
|
|
src_stride_w,
|
|
src_step_w,
|
|
src_offset_first_element_in_bytes,
|
|
dst_ptr,
|
|
dst_stride_x,
|
|
dst_step_x,
|
|
dst_stride_y,
|
|
dst_step_y,
|
|
dst_stride_z,
|
|
dst_step_z,
|
|
dst_stride_w,
|
|
dst_step_w,
|
|
dst_offset_first_element_in_bytes
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
bias_ptr,
|
|
bias_stride_x,
|
|
bias_step_x,
|
|
bias_offset_first_element_in_bytes
|
|
#endif
|
|
);
|
|
}
|
|
|
|
#endif
|
|
|
|
#if defined(VEC_SIZE) && VEC_SIZE == 4
|
|
|
|
__kernel void winograd_output_transform_1x4_1x3_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
winograd_output_transform_4x4_3x3_nchw(src_ptr,
|
|
src_stride_x,
|
|
src_step_x,
|
|
src_stride_y,
|
|
src_step_y,
|
|
src_stride_z,
|
|
src_step_z,
|
|
src_stride_w,
|
|
src_step_w,
|
|
src_offset_first_element_in_bytes,
|
|
dst_ptr,
|
|
dst_stride_x,
|
|
dst_step_x,
|
|
dst_stride_y,
|
|
dst_step_y,
|
|
dst_stride_z,
|
|
dst_step_z,
|
|
dst_stride_w,
|
|
dst_step_w,
|
|
dst_offset_first_element_in_bytes
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
bias_ptr,
|
|
bias_stride_x,
|
|
bias_step_x,
|
|
bias_offset_first_element_in_bytes
|
|
#endif
|
|
);
|
|
}
|
|
|
|
|
|
__kernel void winograd_output_transform_1x4_1x5_nchw(
|
|
TENSOR4D_DECLARATION(src),
|
|
TENSOR4D_DECLARATION(dst)
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
VECTOR_DECLARATION(bias)
|
|
#endif
|
|
)
|
|
{
|
|
winograd_output_transform_4x4_5x5_nchw(src_ptr,
|
|
src_stride_x,
|
|
src_step_x,
|
|
src_stride_y,
|
|
src_step_y,
|
|
src_stride_z,
|
|
src_step_z,
|
|
src_stride_w,
|
|
src_step_w,
|
|
src_offset_first_element_in_bytes,
|
|
dst_ptr,
|
|
dst_stride_x,
|
|
dst_step_x,
|
|
dst_stride_y,
|
|
dst_step_y,
|
|
dst_stride_z,
|
|
dst_step_z,
|
|
dst_stride_w,
|
|
dst_step_w,
|
|
dst_offset_first_element_in_bytes
|
|
#if defined(HAS_BIAS)
|
|
,
|
|
bias_ptr,
|
|
bias_stride_x,
|
|
bias_step_x,
|
|
bias_offset_first_element_in_bytes
|
|
#endif
|
|
);
|
|
}
|
|
|
|
#endif
|
|
#endif
|
|
#endif )" |