[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 08/11] target/s390x: vxeh2: vector {load, store} byte reversed
From: |
Richard Henderson |
Subject: |
[PATCH v3 08/11] target/s390x: vxeh2: vector {load, store} byte reversed element |
Date: |
Mon, 7 Mar 2022 15:53:55 -1000 |
From: David Miller <dmiller423@gmail.com>
This includes VLEBR* and VSTEBR* (single element);
VLBRREP (load single element and replicate); and
VLLEBRZ (load single element and zero).
Signed-off-by: David Miller <dmiller423@gmail.com>
Message-Id: <20220307020327.3003-6-dmiller423@gmail.com>
[rth: Split out elements (plural) from element (scalar),
Use tcg little-endian memory operations.]
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/s390x/tcg/translate_vx.c.inc | 85 +++++++++++++++++++++++++++++
target/s390x/tcg/insn-data.def | 12 ++++
2 files changed, 97 insertions(+)
diff --git a/target/s390x/tcg/translate_vx.c.inc
b/target/s390x/tcg/translate_vx.c.inc
index 9a82401d71..ce77578325 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -457,6 +457,73 @@ static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o)
return DISAS_NEXT;
}
+static DisasJumpType op_vlebr(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = s->insn->data;
+ const uint8_t enr = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (!valid_vec_element(enr, es)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
+ write_vec_element_i64(tmp, get_field(s, v1), enr, es);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlbrrep(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (es < ES_16 || es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
+ gen_gvec_dup_i64(es, get_field(s, v1), tmp);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vllebrz(DisasContext *s, DisasOps *o)
+{
+ const uint8_t m3 = get_field(s, m3);
+ TCGv_i64 tmp;
+ int es, lshift;
+
+ switch (m3) {
+ case ES_16:
+ case ES_32:
+ case ES_64:
+ es = m3;
+ lshift = 0;
+ break;
+ case 6:
+ es = ES_32;
+ lshift = 32;
+ break;
+ default:
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
+ tcg_gen_shli_i64(tmp, tmp, lshift);
+
+ write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
+ write_vec_element_i64(tcg_constant_i64(0), get_field(s, v1), 1, ES_64);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
static DisasJumpType op_vlbr(DisasContext *s, DisasOps *o)
{
const uint8_t es = get_field(s, m3);
@@ -1048,6 +1115,24 @@ static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
return DISAS_NEXT;
}
+static DisasJumpType op_vstebr(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = s->insn->data;
+ const uint8_t enr = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (!valid_vec_element(enr, es)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ read_vec_element_i64(tmp, get_field(s, v1), enr, es);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
static DisasJumpType op_vstbr(DisasContext *s, DisasOps *o)
{
const uint8_t es = get_field(s, m3);
diff --git a/target/s390x/tcg/insn-data.def b/target/s390x/tcg/insn-data.def
index ee6e1dc9e5..b80f989002 100644
--- a/target/s390x/tcg/insn-data.def
+++ b/target/s390x/tcg/insn-data.def
@@ -1027,6 +1027,14 @@
F(0xe756, VLR, VRR_a, V, 0, 0, 0, 0, vlr, 0, IF_VEC)
/* VECTOR LOAD AND REPLICATE */
F(0xe705, VLREP, VRX, V, la2, 0, 0, 0, vlrep, 0, IF_VEC)
+/* VECTOR LOAD BYTE REVERSED ELEMENT */
+ E(0xe601, VLEBRH, VRX, VE2, la2, 0, 0, 0, vlebr, 0, ES_16, IF_VEC)
+ E(0xe603, VLEBRF, VRX, VE2, la2, 0, 0, 0, vlebr, 0, ES_32, IF_VEC)
+ E(0xe602, VLEBRG, VRX, VE2, la2, 0, 0, 0, vlebr, 0, ES_64, IF_VEC)
+/* VECTOR LOAD BYTE REVERSED ELEMENT AND REPLOCATE */
+ F(0xe605, VLBRREP, VRX, VE2, la2, 0, 0, 0, vlbrrep, 0, IF_VEC)
+/* VECTOR LOAD BYTE REVERSED ELEMENT AND ZERO */
+ F(0xe604, VLLEBRZ, VRX, VE2, la2, 0, 0, 0, vllebrz, 0, IF_VEC)
/* VECTOR LOAD BYTE REVERSED ELEMENTS */
F(0xe606, VLBR, VRX, VE2, la2, 0, 0, 0, vlbr, 0, IF_VEC)
/* VECTOR LOAD ELEMENT */
@@ -1081,6 +1089,10 @@
F(0xe75f, VSEG, VRR_a, V, 0, 0, 0, 0, vseg, 0, IF_VEC)
/* VECTOR STORE */
F(0xe70e, VST, VRX, V, la2, 0, 0, 0, vst, 0, IF_VEC)
+/* VECTOR STORE BYTE REVERSED ELEMENT */
+ E(0xe609, VSTEBRH, VRX, VE2, la2, 0, 0, 0, vstebr, 0, ES_16, IF_VEC)
+ E(0xe60b, VSTEBRF, VRX, VE2, la2, 0, 0, 0, vstebr, 0, ES_32, IF_VEC)
+ E(0xe60a, VSTEBRG, VRX, VE2, la2, 0, 0, 0, vstebr, 0, ES_64, IF_VEC)
/* VECTOR STORE BYTE REVERSED ELEMENTS */
F(0xe60e, VSTBR, VRX, VE2, la2, 0, 0, 0, vstbr, 0, IF_VEC)
/* VECTOR STORE ELEMENT */
--
2.25.1
- [PATCH v3 04/11] target/s390x: vxeh2: Update for changes to vector shifts, (continued)
- [PATCH v3 04/11] target/s390x: vxeh2: Update for changes to vector shifts, Richard Henderson, 2022/03/07
- [PATCH v3 05/11] target/s390x: vxeh2: vector shift double by bit, Richard Henderson, 2022/03/07
- [PATCH v3 07/11] target/s390x: vxeh2: vector {load, store} byte reversed elements, Richard Henderson, 2022/03/07
- [PATCH v3 06/11] target/s390x: vxeh2: vector {load, store} elements reversed, Richard Henderson, 2022/03/07
- [PATCH v3 08/11] target/s390x: vxeh2: vector {load, store} byte reversed element,
Richard Henderson <=
- [PATCH v3 09/11] target/s390x: add S390_FEAT_VECTOR_ENH2 to cpu max, Richard Henderson, 2022/03/07
- [PATCH v3 10/11] tests/tcg/s390x: Tests for Vector Enhancements Facility 2, Richard Henderson, 2022/03/07
- [PATCH v3 11/11] target/s390x: Fix writeback to v1 in helper_vstl, Richard Henderson, 2022/03/07
- Re: [PATCH v3 00/11] s390x/tcg: Implement Vector-Enhancements Facility 2, David Miller, 2022/03/07