|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 |
| 2 | +; RUN: opt -S -passes=slp-vectorizer < %s -mtriple=riscv64-unknown-linux -mattr=+v | FileCheck %s |
| 3 | + |
| 4 | +define i32 @sum_of_abs_stride_2(ptr noalias %a, ptr noalias %b) { |
| 5 | +; CHECK-LABEL: define i32 @sum_of_abs_stride_2 |
| 6 | +; CHECK-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
| 7 | +; CHECK-NEXT: entry: |
| 8 | +; CHECK-NEXT: [[TMP0:%.*]] = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i64(ptr align 1 [[A]], i64 2, <8 x i1> splat (i1 true), i32 8) |
| 9 | +; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i8> @llvm.abs.v8i8(<8 x i8> [[TMP0]], i1 false) |
| 10 | +; CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i8> [[TMP1]] to <8 x i32> |
| 11 | +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) |
| 12 | +; CHECK-NEXT: ret i32 [[TMP3]] |
| 13 | +; |
| 14 | +entry: |
| 15 | + %0 = load i8, ptr %a, align 1 |
| 16 | + %spec.select.i = tail call i8 @llvm.abs.i8(i8 %0, i1 false) |
| 17 | + %conv = sext i8 %spec.select.i to i32 |
| 18 | + %arrayidx.1 = getelementptr inbounds i8, ptr %a, i64 2 |
| 19 | + %1 = load i8, ptr %arrayidx.1, align 1 |
| 20 | + %spec.select.i.1 = tail call i8 @llvm.abs.i8(i8 %1, i1 false) |
| 21 | + %conv.1 = sext i8 %spec.select.i.1 to i32 |
| 22 | + %add.1 = add nsw i32 %conv, %conv.1 |
| 23 | + %arrayidx.2 = getelementptr inbounds i8, ptr %a, i64 4 |
| 24 | + %2 = load i8, ptr %arrayidx.2, align 1 |
| 25 | + %spec.select.i.2 = tail call i8 @llvm.abs.i8(i8 %2, i1 false) |
| 26 | + %conv.2 = sext i8 %spec.select.i.2 to i32 |
| 27 | + %add.2 = add nsw i32 %add.1, %conv.2 |
| 28 | + %arrayidx.3 = getelementptr inbounds i8, ptr %a, i64 6 |
| 29 | + %3 = load i8, ptr %arrayidx.3, align 1 |
| 30 | + %spec.select.i.3 = tail call i8 @llvm.abs.i8(i8 %3, i1 false) |
| 31 | + %conv.3 = sext i8 %spec.select.i.3 to i32 |
| 32 | + %add.3 = add nsw i32 %add.2, %conv.3 |
| 33 | + %arrayidx.4 = getelementptr inbounds i8, ptr %a, i64 8 |
| 34 | + %4 = load i8, ptr %arrayidx.4, align 1 |
| 35 | + %spec.select.i.4 = tail call i8 @llvm.abs.i8(i8 %4, i1 false) |
| 36 | + %conv.4 = sext i8 %spec.select.i.4 to i32 |
| 37 | + %add.4 = add nsw i32 %add.3, %conv.4 |
| 38 | + %arrayidx.5 = getelementptr inbounds i8, ptr %a, i64 10 |
| 39 | + %5 = load i8, ptr %arrayidx.5, align 1 |
| 40 | + %spec.select.i.5 = tail call i8 @llvm.abs.i8(i8 %5, i1 false) |
| 41 | + %conv.5 = sext i8 %spec.select.i.5 to i32 |
| 42 | + %add.5 = add nsw i32 %add.4, %conv.5 |
| 43 | + %arrayidx.6 = getelementptr inbounds i8, ptr %a, i64 12 |
| 44 | + %6 = load i8, ptr %arrayidx.6, align 1 |
| 45 | + %spec.select.i.6 = tail call i8 @llvm.abs.i8(i8 %6, i1 false) |
| 46 | + %conv.6 = sext i8 %spec.select.i.6 to i32 |
| 47 | + %add.6 = add nsw i32 %add.5, %conv.6 |
| 48 | + %arrayidx.7 = getelementptr inbounds i8, ptr %a, i64 14 |
| 49 | + %7 = load i8, ptr %arrayidx.7, align 1 |
| 50 | + %spec.select.i.7 = tail call i8 @llvm.abs.i8(i8 %7, i1 false) |
| 51 | + %conv.7 = sext i8 %spec.select.i.7 to i32 |
| 52 | + %add.7 = add nsw i32 %add.6, %conv.7 |
| 53 | + ret i32 %add.7 |
| 54 | +} |
| 55 | + |
| 56 | +define i32 @sum_of_abs_stride_3(ptr noalias %a, ptr noalias %b) { |
| 57 | +; CHECK-LABEL: define i32 @sum_of_abs_stride_3 |
| 58 | +; CHECK-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { |
| 59 | +; CHECK-NEXT: entry: |
| 60 | +; CHECK-NEXT: [[TMP0:%.*]] = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i64(ptr align 1 [[A]], i64 3, <8 x i1> splat (i1 true), i32 8) |
| 61 | +; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i8> @llvm.abs.v8i8(<8 x i8> [[TMP0]], i1 false) |
| 62 | +; CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i8> [[TMP1]] to <8 x i32> |
| 63 | +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) |
| 64 | +; CHECK-NEXT: ret i32 [[TMP3]] |
| 65 | +; |
| 66 | +entry: |
| 67 | + %0 = load i8, ptr %a, align 1 |
| 68 | + %spec.select.i = tail call i8 @llvm.abs.i8(i8 %0, i1 false) |
| 69 | + %conv = sext i8 %spec.select.i to i32 |
| 70 | + %arrayidx.1 = getelementptr inbounds i8, ptr %a, i64 3 |
| 71 | + %1 = load i8, ptr %arrayidx.1, align 1 |
| 72 | + %spec.select.i.1 = tail call i8 @llvm.abs.i8(i8 %1, i1 false) |
| 73 | + %conv.1 = sext i8 %spec.select.i.1 to i32 |
| 74 | + %add.1 = add nsw i32 %conv, %conv.1 |
| 75 | + %arrayidx.2 = getelementptr inbounds i8, ptr %a, i64 6 |
| 76 | + %2 = load i8, ptr %arrayidx.2, align 1 |
| 77 | + %spec.select.i.2 = tail call i8 @llvm.abs.i8(i8 %2, i1 false) |
| 78 | + %conv.2 = sext i8 %spec.select.i.2 to i32 |
| 79 | + %add.2 = add nsw i32 %add.1, %conv.2 |
| 80 | + %arrayidx.3 = getelementptr inbounds i8, ptr %a, i64 9 |
| 81 | + %3 = load i8, ptr %arrayidx.3, align 1 |
| 82 | + %spec.select.i.3 = tail call i8 @llvm.abs.i8(i8 %3, i1 false) |
| 83 | + %conv.3 = sext i8 %spec.select.i.3 to i32 |
| 84 | + %add.3 = add nsw i32 %add.2, %conv.3 |
| 85 | + %arrayidx.4 = getelementptr inbounds i8, ptr %a, i64 12 |
| 86 | + %4 = load i8, ptr %arrayidx.4, align 1 |
| 87 | + %spec.select.i.4 = tail call i8 @llvm.abs.i8(i8 %4, i1 false) |
| 88 | + %conv.4 = sext i8 %spec.select.i.4 to i32 |
| 89 | + %add.4 = add nsw i32 %add.3, %conv.4 |
| 90 | + %arrayidx.5 = getelementptr inbounds i8, ptr %a, i64 15 |
| 91 | + %5 = load i8, ptr %arrayidx.5, align 1 |
| 92 | + %spec.select.i.5 = tail call i8 @llvm.abs.i8(i8 %5, i1 false) |
| 93 | + %conv.5 = sext i8 %spec.select.i.5 to i32 |
| 94 | + %add.5 = add nsw i32 %add.4, %conv.5 |
| 95 | + %arrayidx.6 = getelementptr inbounds i8, ptr %a, i64 18 |
| 96 | + %6 = load i8, ptr %arrayidx.6, align 1 |
| 97 | + %spec.select.i.6 = tail call i8 @llvm.abs.i8(i8 %6, i1 false) |
| 98 | + %conv.6 = sext i8 %spec.select.i.6 to i32 |
| 99 | + %add.6 = add nsw i32 %add.5, %conv.6 |
| 100 | + %arrayidx.7 = getelementptr inbounds i8, ptr %a, i64 21 |
| 101 | + %7 = load i8, ptr %arrayidx.7, align 1 |
| 102 | + %spec.select.i.7 = tail call i8 @llvm.abs.i8(i8 %7, i1 false) |
| 103 | + %conv.7 = sext i8 %spec.select.i.7 to i32 |
| 104 | + %add.7 = add nsw i32 %add.6, %conv.7 |
| 105 | + ret i32 %add.7 |
| 106 | +} |
| 107 | +declare i8 @llvm.abs.i8(i8, i1 immarg) |
0 commit comments