@@ -32,6 +32,14 @@ pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
32
32
} ) ;
33
33
}
34
34
35
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.rbit.v" ) => {
36
+ intrinsic_args ! ( fx, args => ( a) ; intrinsic) ;
37
+
38
+ simd_for_each_lane ( fx, a, ret, & |fx, _lane_ty, _res_lane_ty, lane| {
39
+ fx. bcx . ins ( ) . bitrev ( lane)
40
+ } ) ;
41
+ }
42
+
35
43
_ if intrinsic. starts_with ( "llvm.aarch64.neon.sqadd.v" ) => {
36
44
intrinsic_args ! ( fx, args => ( x, y) ; intrinsic) ;
37
45
@@ -48,6 +56,78 @@ pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
48
56
} ) ;
49
57
}
50
58
59
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.smax.v" ) => {
60
+ intrinsic_args ! ( fx, args => ( x, y) ; intrinsic) ;
61
+
62
+ simd_pair_for_each_lane ( fx, x, y, ret, & |fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
63
+ let gt = fx. bcx . ins ( ) . icmp ( IntCC :: SignedGreaterThan , x_lane, y_lane) ;
64
+ fx. bcx . ins ( ) . select ( gt, x_lane, y_lane)
65
+ } ) ;
66
+ }
67
+
68
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.umax.v" ) => {
69
+ intrinsic_args ! ( fx, args => ( x, y) ; intrinsic) ;
70
+
71
+ simd_pair_for_each_lane ( fx, x, y, ret, & |fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
72
+ let gt = fx. bcx . ins ( ) . icmp ( IntCC :: UnsignedGreaterThan , x_lane, y_lane) ;
73
+ fx. bcx . ins ( ) . select ( gt, x_lane, y_lane)
74
+ } ) ;
75
+ }
76
+
77
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.smaxv.i" ) => {
78
+ intrinsic_args ! ( fx, args => ( v) ; intrinsic) ;
79
+
80
+ simd_reduce ( fx, v, None , ret, & |fx, _ty, a, b| {
81
+ let gt = fx. bcx . ins ( ) . icmp ( IntCC :: SignedGreaterThan , a, b) ;
82
+ fx. bcx . ins ( ) . select ( gt, a, b)
83
+ } ) ;
84
+ }
85
+
86
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.umaxv.i" ) => {
87
+ intrinsic_args ! ( fx, args => ( v) ; intrinsic) ;
88
+
89
+ simd_reduce ( fx, v, None , ret, & |fx, _ty, a, b| {
90
+ let gt = fx. bcx . ins ( ) . icmp ( IntCC :: UnsignedGreaterThan , a, b) ;
91
+ fx. bcx . ins ( ) . select ( gt, a, b)
92
+ } ) ;
93
+ }
94
+
95
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.smin.v" ) => {
96
+ intrinsic_args ! ( fx, args => ( x, y) ; intrinsic) ;
97
+
98
+ simd_pair_for_each_lane ( fx, x, y, ret, & |fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
99
+ let gt = fx. bcx . ins ( ) . icmp ( IntCC :: SignedLessThan , x_lane, y_lane) ;
100
+ fx. bcx . ins ( ) . select ( gt, x_lane, y_lane)
101
+ } ) ;
102
+ }
103
+
104
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.umin.v" ) => {
105
+ intrinsic_args ! ( fx, args => ( x, y) ; intrinsic) ;
106
+
107
+ simd_pair_for_each_lane ( fx, x, y, ret, & |fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
108
+ let gt = fx. bcx . ins ( ) . icmp ( IntCC :: UnsignedLessThan , x_lane, y_lane) ;
109
+ fx. bcx . ins ( ) . select ( gt, x_lane, y_lane)
110
+ } ) ;
111
+ }
112
+
113
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.sminv.i" ) => {
114
+ intrinsic_args ! ( fx, args => ( v) ; intrinsic) ;
115
+
116
+ simd_reduce ( fx, v, None , ret, & |fx, _ty, a, b| {
117
+ let gt = fx. bcx . ins ( ) . icmp ( IntCC :: SignedLessThan , a, b) ;
118
+ fx. bcx . ins ( ) . select ( gt, a, b)
119
+ } ) ;
120
+ }
121
+
122
+ _ if intrinsic. starts_with ( "llvm.aarch64.neon.uminv.i" ) => {
123
+ intrinsic_args ! ( fx, args => ( v) ; intrinsic) ;
124
+
125
+ simd_reduce ( fx, v, None , ret, & |fx, _ty, a, b| {
126
+ let gt = fx. bcx . ins ( ) . icmp ( IntCC :: UnsignedLessThan , a, b) ;
127
+ fx. bcx . ins ( ) . select ( gt, a, b)
128
+ } ) ;
129
+ }
130
+
51
131
/*
52
132
_ if intrinsic.starts_with("llvm.aarch64.neon.sshl.v")
53
133
|| intrinsic.starts_with("llvm.aarch64.neon.sqshl.v")
0 commit comments