@@ -1598,67 +1598,52 @@ void C2_MacroAssembler::vinsert(BasicType typ, XMMRegister dst, XMMRegister src,
1598
1598
}
1599
1599
}
1600
1600
1601
- void C2_MacroAssembler::vgather8b_masked_offset (BasicType elem_bt,
1602
- XMMRegister dst, Register base,
1603
- Register idx_base,
1604
- Register offset, Register mask,
1605
- Register mask_idx, Register rtmp,
1606
- int vlen_enc) {
1601
+ void C2_MacroAssembler::vgather8b_masked (BasicType elem_bt, XMMRegister dst,
1602
+ Register base, Register idx_base,
1603
+ Register mask, Register mask_idx,
1604
+ Register rtmp, int vlen_enc) {
1607
1605
vpxor (dst, dst, dst, vlen_enc);
1608
1606
if (elem_bt == T_SHORT) {
1609
1607
for (int i = 0 ; i < 4 ; i++) {
1610
- // dst[i] = mask[i] ? src[offset + idx_base[i]] : 0
1608
+ // dst[i] = mask[i] ? src[idx_base[i]] : 0
1611
1609
Label skip_load;
1612
1610
btq (mask, mask_idx);
1613
1611
jccb (Assembler::carryClear, skip_load);
1614
1612
movl (rtmp, Address (idx_base, i * 4 ));
1615
- if (offset != noreg) {
1616
- addl (rtmp, offset);
1617
- }
1618
1613
pinsrw (dst, Address (base, rtmp, Address::times_2), i);
1619
1614
bind (skip_load);
1620
1615
incq (mask_idx);
1621
1616
}
1622
1617
} else {
1623
1618
assert (elem_bt == T_BYTE, " " );
1624
1619
for (int i = 0 ; i < 8 ; i++) {
1625
- // dst[i] = mask[i] ? src[offset + idx_base[i]] : 0
1620
+ // dst[i] = mask[i] ? src[idx_base[i]] : 0
1626
1621
Label skip_load;
1627
1622
btq (mask, mask_idx);
1628
1623
jccb (Assembler::carryClear, skip_load);
1629
1624
movl (rtmp, Address (idx_base, i * 4 ));
1630
- if (offset != noreg) {
1631
- addl (rtmp, offset);
1632
- }
1633
1625
pinsrb (dst, Address (base, rtmp), i);
1634
1626
bind (skip_load);
1635
1627
incq (mask_idx);
1636
1628
}
1637
1629
}
1638
1630
}
1639
1631
1640
- void C2_MacroAssembler::vgather8b_offset (BasicType elem_bt, XMMRegister dst,
1641
- Register base, Register idx_base,
1642
- Register offset, Register rtmp,
1643
- int vlen_enc) {
1632
+ void C2_MacroAssembler::vgather8b (BasicType elem_bt, XMMRegister dst,
1633
+ Register base, Register idx_base,
1634
+ Register rtmp, int vlen_enc) {
1644
1635
vpxor (dst, dst, dst, vlen_enc);
1645
1636
if (elem_bt == T_SHORT) {
1646
1637
for (int i = 0 ; i < 4 ; i++) {
1647
- // dst[i] = src[offset + idx_base[i]]
1638
+ // dst[i] = src[idx_base[i]]
1648
1639
movl (rtmp, Address (idx_base, i * 4 ));
1649
- if (offset != noreg) {
1650
- addl (rtmp, offset);
1651
- }
1652
1640
pinsrw (dst, Address (base, rtmp, Address::times_2), i);
1653
1641
}
1654
1642
} else {
1655
1643
assert (elem_bt == T_BYTE, " " );
1656
1644
for (int i = 0 ; i < 8 ; i++) {
1657
- // dst[i] = src[offset + idx_base[i]]
1645
+ // dst[i] = src[idx_base[i]]
1658
1646
movl (rtmp, Address (idx_base, i * 4 ));
1659
- if (offset != noreg) {
1660
- addl (rtmp, offset);
1661
- }
1662
1647
pinsrb (dst, Address (base, rtmp), i);
1663
1648
}
1664
1649
}
@@ -1687,11 +1672,10 @@ void C2_MacroAssembler::vgather8b_offset(BasicType elem_bt, XMMRegister dst,
1687
1672
*/
1688
1673
void C2_MacroAssembler::vgather_subword (BasicType elem_ty, XMMRegister dst,
1689
1674
Register base, Register idx_base,
1690
- Register offset, Register mask,
1691
- XMMRegister xtmp1, XMMRegister xtmp2,
1692
- XMMRegister temp_dst, Register rtmp,
1693
- Register mask_idx, Register length,
1694
- int vector_len, int vlen_enc) {
1675
+ Register mask, XMMRegister xtmp1,
1676
+ XMMRegister xtmp2, XMMRegister temp_dst,
1677
+ Register rtmp, Register mask_idx,
1678
+ Register length, int vector_len, int vlen_enc) {
1695
1679
Label GATHER8_LOOP;
1696
1680
assert (is_subword_type (elem_ty), " " );
1697
1681
movl (length, vector_len);
@@ -1705,9 +1689,9 @@ void C2_MacroAssembler::vgather_subword(BasicType elem_ty, XMMRegister dst,
1705
1689
bind (GATHER8_LOOP);
1706
1690
// TMP_VEC_64(temp_dst) = PICK_SUB_WORDS_FROM_GATHER_INDICES
1707
1691
if (mask == noreg) {
1708
- vgather8b_offset (elem_ty, temp_dst, base, idx_base, offset , rtmp, vlen_enc);
1692
+ vgather8b (elem_ty, temp_dst, base, idx_base, rtmp, vlen_enc);
1709
1693
} else {
1710
- vgather8b_masked_offset (elem_ty, temp_dst, base, idx_base, offset , mask, mask_idx, rtmp, vlen_enc);
1694
+ vgather8b_masked (elem_ty, temp_dst, base, idx_base, mask, mask_idx, rtmp, vlen_enc);
1711
1695
}
1712
1696
// TEMP_PERM_VEC(temp_dst) = PERMUTE TMP_VEC_64(temp_dst) PERM_INDEX(xtmp1)
1713
1697
vpermd (temp_dst, xtmp1, temp_dst, vlen_enc == Assembler::AVX_512bit ? vlen_enc : Assembler::AVX_256bit);
0 commit comments