diff --git a/tcg/README b/tcg/README index ce8bebab37..1d48aa963f 100644 --- a/tcg/README +++ b/tcg/README @@ -402,6 +402,23 @@ double-word product T0. The later is returned in two single-word outputs. Similar to mulu2, except the two inputs T1 and T2 are signed. +********* Memory Barrier support + +* mb <$arg> + +Generate a target memory barrier instruction to ensure memory ordering as being +enforced by a corresponding guest memory barrier instruction. The ordering +enforced by the backend may be stricter than the ordering required by the guest. +It cannot be weaker. This opcode takes a constant argument which is required to +generate the appropriate barrier instruction. The backend should take care to +emit the target barrier instruction only when necessary i.e., for SMP guests and +when MTTCG is enabled. + +The guest translators should generate this opcode for all guest instructions +which have ordering side effects. + +Please see docs/atomics.txt for more information on memory barriers. + ********* 64-bit guest on 32-bit host support The following opcodes are internal to TCG. Thus they are to be implemented by diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 0243c99094..291d50bb7d 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -148,6 +148,23 @@ void tcg_gen_op6(TCGContext *ctx, TCGOpcode opc, TCGArg a1, TCGArg a2, tcg_emit_op(ctx, opc, pi); } +void tcg_gen_mb(TCGBar mb_type) +{ + bool emit_barriers = true; + +#ifndef CONFIG_USER_ONLY + /* TODO: When MTTCG is available for system mode, we will check + * the following condition and enable emit_barriers + * (qemu_tcg_mttcg_enabled() && smp_cpus > 1) + */ + emit_barriers = false; +#endif + + if (emit_barriers) { + tcg_gen_op1(&tcg_ctx, INDEX_op_mb, mb_type); + } +} + /* 32 bit ops */ void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index f217e80747..02cb376681 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -261,6 +261,8 @@ static inline void tcg_gen_br(TCGLabel *l) tcg_gen_op1(&tcg_ctx, INDEX_op_br, label_arg(l)); } +void tcg_gen_mb(TCGBar); + /* Helper calls. */ /* 32 bit ops */ diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h index 6d0410c4b9..45528d2192 100644 --- a/tcg/tcg-opc.h +++ b/tcg/tcg-opc.h @@ -42,6 +42,8 @@ DEF(br, 0, 0, 1, TCG_OPF_BB_END) # define IMPL64 TCG_OPF_64BIT #endif +DEF(mb, 0, 0, 1, 0) + DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT) DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT) DEF(setcond_i32, 1, 2, 1, 0) diff --git a/tcg/tcg.h b/tcg/tcg.h index 0384d7adf0..c9949aa79b 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -465,6 +465,23 @@ static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t) #define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1) #define TCG_CALL_DUMMY_ARG ((TCGArg)(-1)) +typedef enum { + /* Used to indicate the type of accesses on which ordering + is to be ensured. Modeled after SPARC barriers. */ + TCG_MO_LD_LD = 0x01, + TCG_MO_ST_LD = 0x02, + TCG_MO_LD_ST = 0x04, + TCG_MO_ST_ST = 0x08, + TCG_MO_ALL = 0x0F, /* OR of the above */ + + /* Used to indicate the kind of ordering which is to be ensured by the + instruction. These types are derived from x86/aarch64 instructions. + It should be noted that these are different from C11 semantics. */ + TCG_BAR_LDAQ = 0x10, /* Following ops will not come forward */ + TCG_BAR_STRL = 0x20, /* Previous ops will not be delayed */ + TCG_BAR_SC = 0x30, /* No ops cross barrier; OR of the above */ +} TCGBar; + /* Conditions. Note that these are laid out for easy manipulation by the functions below: bit 0 is used for inverting;