@@ -1248,3 +1248,71 @@ define <9 x i16> @PR172010(<4 x i64> %a0) {
12481248 %result = shufflevector <16 x i16 > zeroinitializer , <16 x i16 > %trunc , <9 x i32 > <i32 31 , i32 18 , i32 28 , i32 20 , i32 7 , i32 5 , i32 8 , i32 4 , i32 7 >
12491249 ret <9 x i16 > %result
12501250}
1251+
1252+ define <8 x float > @PR173030 (i8 %a0 , i16 %a1 , i32 %a2 ) {
1253+ ; X86-LABEL: PR173030:
1254+ ; X86: # %bb.0:
1255+ ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1256+ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1257+ ; X86-NEXT: vmovd %ecx, %xmm0
1258+ ; X86-NEXT: # kill: def $cl killed $cl killed $ecx def $ecx
1259+ ; X86-NEXT: incb %cl
1260+ ; X86-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
1261+ ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1262+ ; X86-NEXT: vmovd %ecx, %xmm1
1263+ ; X86-NEXT: incl %ecx
1264+ ; X86-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
1265+ ; X86-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1266+ ; X86-NEXT: vmovd %eax, %xmm1
1267+ ; X86-NEXT: incl %eax
1268+ ; X86-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
1269+ ; X86-NEXT: vcvtdq2ps %xmm1, %xmm1
1270+ ; X86-NEXT: vpmovsxbd %xmm0, %ymm0
1271+ ; X86-NEXT: vcvtdq2ps %ymm0, %ymm0
1272+ ; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1273+ ; X86-NEXT: vextractf128 $1, %ymm0, %xmm0
1274+ ; X86-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
1275+ ; X86-NEXT: vbroadcastss {{.*#+}} ymm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1276+ ; X86-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1277+ ; X86-NEXT: retl
1278+ ;
1279+ ; X64-LABEL: PR173030:
1280+ ; X64: # %bb.0:
1281+ ; X64-NEXT: # kill: def $edi killed $edi def $rdi
1282+ ; X64-NEXT: leal 1(%rdi), %eax
1283+ ; X64-NEXT: vmovd %edi, %xmm0
1284+ ; X64-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
1285+ ; X64-NEXT: vmovd %esi, %xmm1
1286+ ; X64-NEXT: incl %esi
1287+ ; X64-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1
1288+ ; X64-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1289+ ; X64-NEXT: vmovd %edx, %xmm1
1290+ ; X64-NEXT: incl %edx
1291+ ; X64-NEXT: vpinsrd $1, %edx, %xmm1, %xmm1
1292+ ; X64-NEXT: vcvtdq2ps %xmm1, %xmm1
1293+ ; X64-NEXT: vpmovsxbd %xmm0, %ymm0
1294+ ; X64-NEXT: vcvtdq2ps %ymm0, %ymm0
1295+ ; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
1296+ ; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
1297+ ; X64-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
1298+ ; X64-NEXT: vbroadcastss {{.*#+}} ymm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1299+ ; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
1300+ ; X64-NEXT: retq
1301+ %i = add i8 %a0 , 1
1302+ %i1 = insertelement <2 x i8 > poison, i8 %a0 , i64 0
1303+ %i2 = insertelement <2 x i8 > %i1 , i8 %i , i64 1
1304+ %i3 = sitofp <2 x i8 > %i2 to <2 x float >
1305+ %i4 = add i16 %a1 , 1
1306+ %i5 = insertelement <2 x i16 > poison, i16 %a1 , i64 0
1307+ %i6 = insertelement <2 x i16 > %i5 , i16 %i4 , i64 1
1308+ %i7 = sitofp <2 x i16 > %i6 to <2 x float >
1309+ %i8 = add i32 %a2 , 1
1310+ %i9 = insertelement <2 x i32 > poison, i32 %a2 , i64 0
1311+ %i10 = insertelement <2 x i32 > %i9 , i32 %i8 , i64 1
1312+ %i11 = sitofp <2 x i32 > %i10 to <2 x float >
1313+ %i12 = shufflevector <2 x float > %i3 , <2 x float > %i7 , <8 x i32 > <i32 0 , i32 1 , i32 2 , i32 3 , i32 poison, i32 poison, i32 poison, i32 poison>
1314+ %i13 = shufflevector <2 x float > %i11 , <2 x float > poison, <8 x i32 > <i32 0 , i32 1 , i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
1315+ %i14 = shufflevector <8 x float > %i12 , <8 x float > %i13 , <8 x i32 > <i32 0 , i32 1 , i32 2 , i32 3 , i32 8 , i32 9 , i32 poison, i32 poison>
1316+ %i15 = shufflevector <8 x float > %i14 , <8 x float > <float poison, float poison, float poison, float poison, float poison, float poison, float 1 .000000e+00 , float 1 .000000e+00 >, <8 x i32 > <i32 0 , i32 1 , i32 2 , i32 3 , i32 4 , i32 5 , i32 14 , i32 15 >
1317+ ret <8 x float > %i15
1318+ }
0 commit comments