target/arm: Avoid tcg_const_ptr in handle_vec_simd_sqshrn

It is easy enough to use mov instead of or-with-zero
and relying on the optimizer to fold away the or.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
master
Richard Henderson 2023-02-25 12:24:07 -10:00
parent a2c4fb8cae
commit 1b7bc9b5c8
1 changed files with 6 additions and 2 deletions

View File

@ -8459,7 +8459,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
tcg_rn = tcg_temp_new_i64();
tcg_rd = tcg_temp_new_i64();
tcg_rd_narrowed = tcg_temp_new_i32();
tcg_final = tcg_const_i64(0);
tcg_final = tcg_temp_new_i64();
if (round) {
tcg_round = tcg_constant_i64(1ULL << (shift - 1));
@ -8473,7 +8473,11 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
false, is_u_shift, size+1, shift);
narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
if (i == 0) {
tcg_gen_mov_i64(tcg_final, tcg_rd);
} else {
tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
}
}
if (!is_q) {