@@ -8257,6 +8257,14 @@ void Assembler::vmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8257
8257
emit_int16(0x5F, (0xC0 | encode));
8258
8258
}
8259
8259
8260
+ void Assembler::eminmaxsh(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
8261
+ assert(VM_Version::supports_avx10_2(), "");
8262
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8263
+ attributes.set_is_evex_instruction();
8264
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
8265
+ emit_int24(0x53, (0xC0 | encode), imm8);
8266
+ }
8267
+
8260
8268
void Assembler::vminsh(XMMRegister dst, XMMRegister nds, XMMRegister src) {
8261
8269
assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
8262
8270
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
@@ -8771,12 +8779,68 @@ void Assembler::vmaxps(XMMRegister dst, XMMRegister nds, XMMRegister src, int ve
8771
8779
emit_int16(0x5F, (0xC0 | encode));
8772
8780
}
8773
8781
8782
+ void Assembler::evminmaxps(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len) {
8783
+ assert(VM_Version::supports_avx10_2(), "");
8784
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8785
+ attributes.set_is_evex_instruction();
8786
+ attributes.set_embedded_opmask_register_specifier(mask);
8787
+ if (merge) {
8788
+ attributes.reset_is_clear_context();
8789
+ }
8790
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8791
+ emit_int24(0x52, (0xC0 | encode), imm8);
8792
+ }
8793
+
8794
+ void Assembler::evminmaxps(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int imm8, int vector_len) {
8795
+ assert(VM_Version::supports_avx10_2(), "");
8796
+ InstructionMark im(this);
8797
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8798
+ attributes.set_is_evex_instruction();
8799
+ attributes.set_embedded_opmask_register_specifier(mask);
8800
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8801
+ if (merge) {
8802
+ attributes.reset_is_clear_context();
8803
+ }
8804
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8805
+ emit_int8(0x52);
8806
+ emit_operand(dst, src, 0);
8807
+ emit_int8(imm8);
8808
+ }
8809
+
8774
8810
void Assembler::maxpd(XMMRegister dst, XMMRegister src) {
8775
8811
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
8776
8812
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8777
8813
emit_int16(0x5F, (0xC0 | encode));
8778
8814
}
8779
8815
8816
+ void Assembler::evminmaxpd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len) {
8817
+ assert(VM_Version::supports_avx10_2(), "");
8818
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
8819
+ attributes.set_is_evex_instruction();
8820
+ attributes.set_embedded_opmask_register_specifier(mask);
8821
+ if (merge) {
8822
+ attributes.reset_is_clear_context();
8823
+ }
8824
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8825
+ emit_int24(0x52, (0xC0 | encode), imm8);
8826
+ }
8827
+
8828
+ void Assembler::evminmaxpd(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int imm8, int vector_len) {
8829
+ assert(VM_Version::supports_avx10_2(), "");
8830
+ InstructionMark im(this);
8831
+ InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
8832
+ attributes.set_is_evex_instruction();
8833
+ attributes.set_embedded_opmask_register_specifier(mask);
8834
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
8835
+ if (merge) {
8836
+ attributes.reset_is_clear_context();
8837
+ }
8838
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
8839
+ emit_int8(0x52);
8840
+ emit_operand(dst, src, 0);
8841
+ emit_int8(imm8);
8842
+ }
8843
+
8780
8844
void Assembler::vmaxpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
8781
8845
assert(vector_len >= AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
8782
8846
InstructionAttr attributes(vector_len, /* vex_w */true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
@@ -13119,6 +13183,14 @@ void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
13119
13183
emit_int16(0x5D, (0xC0 | encode));
13120
13184
}
13121
13185
13186
+ void Assembler::eminmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
13187
+ assert(VM_Version::supports_avx10_2(), "");
13188
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13189
+ attributes.set_is_evex_instruction();
13190
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13191
+ emit_int24(0x53, (0xC0 | encode), imm8);
13192
+ }
13193
+
13122
13194
void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
13123
13195
assert(VM_Version::supports_avx(), "");
13124
13196
InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
@@ -13127,6 +13199,14 @@ void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
13127
13199
emit_int16(0x5D, (0xC0 | encode));
13128
13200
}
13129
13201
13202
+ void Assembler::eminmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
13203
+ assert(VM_Version::supports_avx10_2(), "");
13204
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
13205
+ attributes.set_is_evex_instruction();
13206
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
13207
+ emit_int24(0x53, (0xC0 | encode), imm8);
13208
+ }
13209
+
13130
13210
void Assembler::vcmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
13131
13211
assert(VM_Version::supports_avx(), "");
13132
13212
assert(vector_len <= AVX_256bit, "");
@@ -16526,6 +16606,34 @@ void Assembler::evminph(XMMRegister dst, XMMRegister nds, Address src, int vecto
16526
16606
emit_operand(dst, src, 0);
16527
16607
}
16528
16608
16609
+ void Assembler::evminmaxph(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int imm8, int vector_len) {
16610
+ assert(VM_Version::supports_avx10_2(), "");
16611
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false,/* uses_vl */ true);
16612
+ attributes.set_is_evex_instruction();
16613
+ attributes.set_embedded_opmask_register_specifier(mask);
16614
+ if (merge) {
16615
+ attributes.reset_is_clear_context();
16616
+ }
16617
+ int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
16618
+ emit_int24(0x52, (0xC0 | encode), imm8);
16619
+ }
16620
+
16621
+ void Assembler::evminmaxph(XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int imm8, int vector_len) {
16622
+ assert(VM_Version::supports_avx10_2(), "");
16623
+ InstructionMark im(this);
16624
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
16625
+ attributes.set_is_evex_instruction();
16626
+ attributes.set_embedded_opmask_register_specifier(mask);
16627
+ if (merge) {
16628
+ attributes.reset_is_clear_context();
16629
+ }
16630
+ attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_NObit);
16631
+ vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, &attributes);
16632
+ emit_int8(0x52);
16633
+ emit_operand(dst, src, 0);
16634
+ emit_int8(imm8);
16635
+ }
16636
+
16529
16637
void Assembler::evmaxph(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
16530
16638
assert(VM_Version::supports_avx512_fp16(), "requires AVX512-FP16");
16531
16639
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "");
0 commit comments