tcg: Add choose_vector_size

This unifies 5 copies of checks for supported vector size,
and in the process fixes a missing check in tcg_gen_gvec_2s.

This lead to an assertion failure for 64-bit vector multiply,
which is not available in the AVX instruction set.

Suggested-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
master
Richard Henderson 2018-02-17 08:30:16 -08:00
parent 7f34ed4bcd
commit adb196cbd5
1 changed files with 259 additions and 179 deletions

View File

@ -351,6 +351,42 @@ static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
} }
} }
/* Select a supported vector type for implementing an operation on SIZE
* bytes. If OP is 0, assume that the real operation to be performed is
* required by all backends. Otherwise, make sure than OP can be performed
* on elements of size VECE in the selected type. Do not select V64 if
* PREFER_I64 is true. Return 0 if no vector type is selected.
*/
static TCGType choose_vector_type(TCGOpcode op, unsigned vece, uint32_t size,
bool prefer_i64)
{
if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) {
if (op == 0) {
return TCG_TYPE_V256;
}
/* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
* It is hard to imagine a case in which v256 is supported
* but v128 is not, but check anyway.
*/
if (tcg_can_emit_vec_op(op, TCG_TYPE_V256, vece)
&& (size % 32 == 0
|| tcg_can_emit_vec_op(op, TCG_TYPE_V128, vece))) {
return TCG_TYPE_V256;
}
}
if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16)
&& (op == 0 || tcg_can_emit_vec_op(op, TCG_TYPE_V128, vece))) {
return TCG_TYPE_V128;
}
if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8)
&& (op == 0 || tcg_can_emit_vec_op(op, TCG_TYPE_V64, vece))) {
return TCG_TYPE_V64;
}
return 0;
}
/* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C. /* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
* Only one of IN_32 or IN_64 may be set; * Only one of IN_32 or IN_64 may be set;
* IN_C is used if IN_32 and IN_64 are unset. * IN_C is used if IN_32 and IN_64 are unset.
@ -376,19 +412,12 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
} }
} }
type = 0; /* Implement inline with a vector type, if possible.
if (TCG_TARGET_HAS_v256 && check_size_impl(oprsz, 32)) { * Prefer integer when 64-bit host and no variable dup.
type = TCG_TYPE_V256; */
} else if (TCG_TARGET_HAS_v128 && check_size_impl(oprsz, 16)) { type = choose_vector_type(0, vece, oprsz,
type = TCG_TYPE_V128; (TCG_TARGET_REG_BITS == 64 && in_32 == NULL
} else if (TCG_TARGET_HAS_v64 && check_size_impl(oprsz, 8) && (in_64 == NULL || vece == MO_64)));
/* Prefer integer when 64-bit host and no variable dup. */
&& !(TCG_TARGET_REG_BITS == 64 && in_32 == NULL
&& (in_64 == NULL || vece == MO_64))) {
type = TCG_TYPE_V64;
}
/* Implement inline with a vector type, if possible. */
if (type != 0) { if (type != 0) {
TCGv_vec t_vec = tcg_temp_new_vec(type); TCGv_vec t_vec = tcg_temp_new_vec(type);
@ -414,21 +443,30 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
} }
i = 0; i = 0;
if (TCG_TARGET_HAS_v256) { switch (type) {
case TCG_TYPE_V256:
/* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
for (; i + 32 <= oprsz; i += 32) { for (; i + 32 <= oprsz; i += 32) {
tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V256); tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V256);
} }
} /* fallthru */
if (TCG_TARGET_HAS_v128) { case TCG_TYPE_V128:
for (; i + 16 <= oprsz; i += 16) { for (; i + 16 <= oprsz; i += 16) {
tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V128); tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V128);
} }
} break;
if (TCG_TARGET_HAS_v64) { case TCG_TYPE_V64:
for (; i < oprsz; i += 8) { for (; i < oprsz; i += 8) {
tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64); tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
} }
break;
default:
g_assert_not_reached();
} }
tcg_temp_free_vec(t_vec); tcg_temp_free_vec(t_vec);
goto done; goto done;
} }
@ -484,7 +522,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
} }
tcg_temp_free_i64(t_64); tcg_temp_free_i64(t_64);
goto done; goto done;
} }
} }
/* Otherwise implement out of line. */ /* Otherwise implement out of line. */
@ -866,49 +904,55 @@ static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g)
{ {
TCGType type;
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs); check_size_align(oprsz, maxsz, dofs | aofs);
check_overlap_2(dofs, aofs, maxsz); check_overlap_2(dofs, aofs, maxsz);
/* Recall that ARM SVE allows vector sizes that are not a power of 2. type = 0;
Expand with successively smaller host vector sizes. The intent is if (g->fniv) {
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */ type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
/* ??? For maxsz > oprsz, the host may be able to use an opr-sized }
operation, zeroing the balance of the register. We can then switch (type) {
use a max-sized store to implement the clearing without an extra case TCG_TYPE_V256:
store operation. This is true for aarch64 and x86_64 hosts. */ /* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
if (TCG_TARGET_HAS_v256 && g->fniv && check_size_impl(oprsz, 32) * that e.g. size == 80 would be expanded with 2x32 + 1x16.
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V256, g->vece))) { */
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32); some = QEMU_ALIGN_DOWN(oprsz, 32);
expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv); expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv);
if (some == oprsz) { if (some == oprsz) {
goto done; break;
} }
dofs += some; dofs += some;
aofs += some; aofs += some;
oprsz -= some; oprsz -= some;
maxsz -= some; maxsz -= some;
} /* fallthru */
case TCG_TYPE_V128:
if (TCG_TARGET_HAS_v128 && g->fniv && check_size_impl(oprsz, 16)
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V128, g->vece))) {
expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv); expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv);
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64 break;
&& g->fniv && check_size_impl(oprsz, 8) case TCG_TYPE_V64:
&& (!g->opc
|| tcg_can_emit_vec_op(g->opc, TCG_TYPE_V64, g->vece))) {
expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv); expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv);
} else if (g->fni8 && check_size_impl(oprsz, 8)) { break;
expand_2_i64(dofs, aofs, oprsz, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) { case 0:
expand_2_i32(dofs, aofs, oprsz, g->fni4); if (g->fni8 && check_size_impl(oprsz, 8)) {
} else { expand_2_i64(dofs, aofs, oprsz, g->fni8);
assert(g->fno != NULL); } else if (g->fni4 && check_size_impl(oprsz, 4)) {
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno); expand_2_i32(dofs, aofs, oprsz, g->fni4);
return; } else {
assert(g->fno != NULL);
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
return;
}
break;
default:
g_assert_not_reached();
} }
done:
if (oprsz < maxsz) { if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz); expand_clr(dofs + oprsz, maxsz - oprsz);
} }
@ -918,53 +962,64 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
uint32_t maxsz, int64_t c, const GVecGen2i *g) uint32_t maxsz, int64_t c, const GVecGen2i *g)
{ {
TCGType type;
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs); check_size_align(oprsz, maxsz, dofs | aofs);
check_overlap_2(dofs, aofs, maxsz); check_overlap_2(dofs, aofs, maxsz);
/* Recall that ARM SVE allows vector sizes that are not a power of 2. type = 0;
Expand with successively smaller host vector sizes. The intent is if (g->fniv) {
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */ type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
}
if (TCG_TARGET_HAS_v256 && g->fniv && check_size_impl(oprsz, 32) switch (type) {
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V256, g->vece))) { case TCG_TYPE_V256:
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32); /* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
expand_2i_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, expand_2i_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
c, g->load_dest, g->fniv); c, g->load_dest, g->fniv);
if (some == oprsz) { if (some == oprsz) {
goto done; break;
} }
dofs += some; dofs += some;
aofs += some; aofs += some;
oprsz -= some; oprsz -= some;
maxsz -= some; maxsz -= some;
} /* fallthru */
case TCG_TYPE_V128:
if (TCG_TARGET_HAS_v128 && g->fniv && check_size_impl(oprsz, 16)
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V128, g->vece))) {
expand_2i_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, expand_2i_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
c, g->load_dest, g->fniv); c, g->load_dest, g->fniv);
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64 break;
&& g->fniv && check_size_impl(oprsz, 8) case TCG_TYPE_V64:
&& (!g->opc
|| tcg_can_emit_vec_op(g->opc, TCG_TYPE_V64, g->vece))) {
expand_2i_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, expand_2i_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
c, g->load_dest, g->fniv); c, g->load_dest, g->fniv);
} else if (g->fni8 && check_size_impl(oprsz, 8)) { break;
expand_2i_i64(dofs, aofs, oprsz, c, g->load_dest, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) { case 0:
expand_2i_i32(dofs, aofs, oprsz, c, g->load_dest, g->fni4); if (g->fni8 && check_size_impl(oprsz, 8)) {
} else { expand_2i_i64(dofs, aofs, oprsz, c, g->load_dest, g->fni8);
if (g->fno) { } else if (g->fni4 && check_size_impl(oprsz, 4)) {
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno); expand_2i_i32(dofs, aofs, oprsz, c, g->load_dest, g->fni4);
} else { } else {
TCGv_i64 tcg_c = tcg_const_i64(c); if (g->fno) {
tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz, maxsz, c, g->fnoi); tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, c, g->fno);
tcg_temp_free_i64(tcg_c); } else {
TCGv_i64 tcg_c = tcg_const_i64(c);
tcg_gen_gvec_2i_ool(dofs, aofs, tcg_c, oprsz,
maxsz, c, g->fnoi);
tcg_temp_free_i64(tcg_c);
}
return;
} }
return; break;
default:
g_assert_not_reached();
} }
done:
if (oprsz < maxsz) { if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz); expand_clr(dofs + oprsz, maxsz - oprsz);
} }
@ -981,37 +1036,30 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
type = 0; type = 0;
if (g->fniv) { if (g->fniv) {
if (TCG_TARGET_HAS_v256 && check_size_impl(oprsz, 32)) { type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
type = TCG_TYPE_V256;
} else if (TCG_TARGET_HAS_v128 && check_size_impl(oprsz, 16)) {
type = TCG_TYPE_V128;
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64
&& check_size_impl(oprsz, 8)) {
type = TCG_TYPE_V64;
}
} }
if (type != 0) { if (type != 0) {
TCGv_vec t_vec = tcg_temp_new_vec(type); TCGv_vec t_vec = tcg_temp_new_vec(type);
uint32_t some;
tcg_gen_dup_i64_vec(g->vece, t_vec, c); tcg_gen_dup_i64_vec(g->vece, t_vec, c);
/* Recall that ARM SVE allows vector sizes that are not a power of 2.
Expand with successively smaller host vector sizes. The intent is
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */
switch (type) { switch (type) {
case TCG_TYPE_V256: case TCG_TYPE_V256:
{ /* Recall that ARM SVE allows vector sizes that are not a
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32); * power of 2, but always a multiple of 16. The intent is
expand_2s_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, * that e.g. size == 80 would be expanded with 2x32 + 1x16.
t_vec, g->scalar_first, g->fniv); */
if (some == oprsz) { some = QEMU_ALIGN_DOWN(oprsz, 32);
break; expand_2s_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
} t_vec, g->scalar_first, g->fniv);
dofs += some; if (some == oprsz) {
aofs += some; break;
oprsz -= some;
maxsz -= some;
} }
dofs += some;
aofs += some;
oprsz -= some;
maxsz -= some;
/* fallthru */ /* fallthru */
case TCG_TYPE_V128: case TCG_TYPE_V128:
@ -1055,48 +1103,60 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g)
{ {
TCGType type;
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs); check_size_align(oprsz, maxsz, dofs | aofs | bofs);
check_overlap_3(dofs, aofs, bofs, maxsz); check_overlap_3(dofs, aofs, bofs, maxsz);
/* Recall that ARM SVE allows vector sizes that are not a power of 2. type = 0;
Expand with successively smaller host vector sizes. The intent is if (g->fniv) {
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */ type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
}
if (TCG_TARGET_HAS_v256 && g->fniv && check_size_impl(oprsz, 32) switch (type) {
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V256, g->vece))) { case TCG_TYPE_V256:
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32); /* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
g->load_dest, g->fniv); g->load_dest, g->fniv);
if (some == oprsz) { if (some == oprsz) {
goto done; break;
} }
dofs += some; dofs += some;
aofs += some; aofs += some;
bofs += some; bofs += some;
oprsz -= some; oprsz -= some;
maxsz -= some; maxsz -= some;
} /* fallthru */
case TCG_TYPE_V128:
if (TCG_TARGET_HAS_v128 && g->fniv && check_size_impl(oprsz, 16)
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V128, g->vece))) {
expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
g->load_dest, g->fniv); g->load_dest, g->fniv);
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64 break;
&& g->fniv && check_size_impl(oprsz, 8) case TCG_TYPE_V64:
&& (!g->opc
|| tcg_can_emit_vec_op(g->opc, TCG_TYPE_V64, g->vece))) {
expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
g->load_dest, g->fniv); g->load_dest, g->fniv);
} else if (g->fni8 && check_size_impl(oprsz, 8)) { break;
expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) { case 0:
expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4); if (g->fni8 && check_size_impl(oprsz, 8)) {
} else { expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8);
assert(g->fno != NULL); } else if (g->fni4 && check_size_impl(oprsz, 4)) {
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, g->data, g->fno); expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4);
} else {
assert(g->fno != NULL);
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz,
maxsz, g->data, g->fno);
return;
}
break;
default:
g_assert_not_reached();
} }
done:
if (oprsz < maxsz) { if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz); expand_clr(dofs + oprsz, maxsz - oprsz);
} }
@ -1106,20 +1166,27 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g) uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g)
{ {
TCGType type;
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs); check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs);
check_overlap_4(dofs, aofs, bofs, cofs, maxsz); check_overlap_4(dofs, aofs, bofs, cofs, maxsz);
/* Recall that ARM SVE allows vector sizes that are not a power of 2. type = 0;
Expand with successively smaller host vector sizes. The intent is if (g->fniv) {
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */ type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
}
if (TCG_TARGET_HAS_v256 && g->fniv && check_size_impl(oprsz, 32) switch (type) {
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V256, g->vece))) { case TCG_TYPE_V256:
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32); /* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
expand_4_vec(g->vece, dofs, aofs, bofs, cofs, some, expand_4_vec(g->vece, dofs, aofs, bofs, cofs, some,
32, TCG_TYPE_V256, g->fniv); 32, TCG_TYPE_V256, g->fniv);
if (some == oprsz) { if (some == oprsz) {
goto done; break;
} }
dofs += some; dofs += some;
aofs += some; aofs += some;
@ -1127,30 +1194,33 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
cofs += some; cofs += some;
oprsz -= some; oprsz -= some;
maxsz -= some; maxsz -= some;
} /* fallthru */
case TCG_TYPE_V128:
if (TCG_TARGET_HAS_v128 && g->fniv && check_size_impl(oprsz, 16)
&& (!g->opc || tcg_can_emit_vec_op(g->opc, TCG_TYPE_V128, g->vece))) {
expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz, expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
16, TCG_TYPE_V128, g->fniv); 16, TCG_TYPE_V128, g->fniv);
} else if (TCG_TARGET_HAS_v64 && !g->prefer_i64 break;
&& g->fniv && check_size_impl(oprsz, 8) case TCG_TYPE_V64:
&& (!g->opc
|| tcg_can_emit_vec_op(g->opc, TCG_TYPE_V64, g->vece))) {
expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz, expand_4_vec(g->vece, dofs, aofs, bofs, cofs, oprsz,
8, TCG_TYPE_V64, g->fniv); 8, TCG_TYPE_V64, g->fniv);
} else if (g->fni8 && check_size_impl(oprsz, 8)) { break;
expand_4_i64(dofs, aofs, bofs, cofs, oprsz, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) { case 0:
expand_4_i32(dofs, aofs, bofs, cofs, oprsz, g->fni4); if (g->fni8 && check_size_impl(oprsz, 8)) {
} else { expand_4_i64(dofs, aofs, bofs, cofs, oprsz, g->fni8);
assert(g->fno != NULL); } else if (g->fni4 && check_size_impl(oprsz, 4)) {
tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs, expand_4_i32(dofs, aofs, bofs, cofs, oprsz, g->fni4);
oprsz, maxsz, g->data, g->fno); } else {
return; assert(g->fno != NULL);
tcg_gen_gvec_4_ool(dofs, aofs, bofs, cofs,
oprsz, maxsz, g->data, g->fno);
return;
}
break;
default:
g_assert_not_reached();
} }
done:
if (oprsz < maxsz) { if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz); expand_clr(dofs + oprsz, maxsz - oprsz);
} }
@ -2155,6 +2225,8 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
[TCG_COND_LTU] = ltu_fn, [TCG_COND_LTU] = ltu_fn,
[TCG_COND_LEU] = leu_fn, [TCG_COND_LEU] = leu_fn,
}; };
TCGType type;
uint32_t some;
check_size_align(oprsz, maxsz, dofs | aofs | bofs); check_size_align(oprsz, maxsz, dofs | aofs | bofs);
check_overlap_3(dofs, aofs, bofs, maxsz); check_overlap_3(dofs, aofs, bofs, maxsz);
@ -2165,51 +2237,59 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
return; return;
} }
/* Recall that ARM SVE allows vector sizes that are not a power of 2. /* Implement inline with a vector type, if possible.
Expand with successively smaller host vector sizes. The intent is * Prefer integer when 64-bit host and 64-bit comparison.
that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */ */
type = choose_vector_type(INDEX_op_cmp_vec, vece, oprsz,
if (TCG_TARGET_HAS_v256 && check_size_impl(oprsz, 32) TCG_TARGET_REG_BITS == 64 && vece == MO_64);
&& tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V256, vece)) { switch (type) {
uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32); case TCG_TYPE_V256:
/* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
expand_cmp_vec(vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond); expand_cmp_vec(vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond);
if (some == oprsz) { if (some == oprsz) {
goto done; break;
} }
dofs += some; dofs += some;
aofs += some; aofs += some;
bofs += some; bofs += some;
oprsz -= some; oprsz -= some;
maxsz -= some; maxsz -= some;
} /* fallthru */
case TCG_TYPE_V128:
if (TCG_TARGET_HAS_v128 && check_size_impl(oprsz, 16)
&& tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V128, vece)) {
expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond); expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond);
} else if (TCG_TARGET_HAS_v64 break;
&& check_size_impl(oprsz, 8) case TCG_TYPE_V64:
&& (TCG_TARGET_REG_BITS == 32 || vece != MO_64)
&& tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V64, vece)) {
expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond); expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond);
} else if (vece == MO_64 && check_size_impl(oprsz, 8)) { break;
expand_cmp_i64(dofs, aofs, bofs, oprsz, cond);
} else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
expand_cmp_i32(dofs, aofs, bofs, oprsz, cond);
} else {
gen_helper_gvec_3 * const *fn = fns[cond];
if (fn == NULL) { case 0:
uint32_t tmp; if (vece == MO_64 && check_size_impl(oprsz, 8)) {
tmp = aofs, aofs = bofs, bofs = tmp; expand_cmp_i64(dofs, aofs, bofs, oprsz, cond);
cond = tcg_swap_cond(cond); } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
fn = fns[cond]; expand_cmp_i32(dofs, aofs, bofs, oprsz, cond);
assert(fn != NULL); } else {
gen_helper_gvec_3 * const *fn = fns[cond];
if (fn == NULL) {
uint32_t tmp;
tmp = aofs, aofs = bofs, bofs = tmp;
cond = tcg_swap_cond(cond);
fn = fns[cond];
assert(fn != NULL);
}
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]);
return;
} }
tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]); break;
return;
default:
g_assert_not_reached();
} }
done:
if (oprsz < maxsz) { if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz); expand_clr(dofs + oprsz, maxsz - oprsz);
} }