From fe042904829b83a61c1f4bc904f8f9e5b6da891e Mon Sep 17 00:00:00 2001 From: David Green Date: Fri, 29 Nov 2024 17:41:05 +0000 Subject: [PATCH] [AArch64] Change the default vscale-for-tuning to 1. (#117174) Most AArch64 cpus outside of Neoverse V1 (256) and A64FX (512) have an SVE vector length of 128, and in environments like Android (where no mcpu option is common) we would expect all cpus to match. This patch changes the default vector length to 128 with -mcpu=generic, to match the most common case. --- llvm/lib/Target/AArch64/AArch64Subtarget.h | 2 +- .../Analysis/CostModel/AArch64/sve-gather.ll | 82 +- .../CostModel/AArch64/sve-intrinsics.ll | 838 ++++++++++++------ .../Analysis/CostModel/AArch64/sve-scatter.ll | 76 +- .../AArch64/clamped-trip-count.ll | 84 +- .../AArch64/conditional-branches-cost.ll | 158 ++-- .../AArch64/induction-costs-sve.ll | 95 +- .../AArch64/low_trip_count_predicates.ll | 356 +++++--- .../AArch64/reduction-recurrence-costs-sve.ll | 471 ++++++---- .../scalable-vectorization-cost-tuning.ll | 37 +- .../LoopVectorize/AArch64/scalable-vf-hint.ll | 4 +- .../LoopVectorize/AArch64/store-costs-sve.ll | 10 +- .../AArch64/sve-strict-fadd-cost.ll | 37 +- .../AArch64/type-shrinkage-zext-costs.ll | 64 +- .../AArch64/veclib-function-calls.ll | 8 +- 15 files changed, 1387 insertions(+), 935 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h index d860c29e2291ae..7b1f316d048e53 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -88,7 +88,7 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo { unsigned StreamingHazardSize; unsigned MinSVEVectorSizeInBits; unsigned MaxSVEVectorSizeInBits; - unsigned VScaleForTuning = 2; + unsigned VScaleForTuning = 1; TailFoldingOpts DefaultSVETFOpts = TailFoldingOpts::Disabled; bool EnableSubregLiveness; diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-gather.ll b/llvm/test/Analysis/CostModel/AArch64/sve-gather.ll index 98d40a0f69d2f3..35b987532e3941 100644 --- a/llvm/test/Analysis/CostModel/AArch64/sve-gather.ll +++ b/llvm/test/Analysis/CostModel/AArch64/sve-gather.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py -; RUN: opt -passes="print" 2>&1 -disable-output < %s | FileCheck %s +; RUN: opt -passes="print" 2>&1 -disable-output < %s | FileCheck %s --check-prefix=CHECK-VSCALE-1 ; RUN: opt -passes="print" 2>&1 -disable-output -mcpu=neoverse-v1 < %s | FileCheck %s --check-prefix=CHECK-VSCALE-2 ; RUN: opt -passes="print" 2>&1 -disable-output -mcpu=neoverse-n2 < %s | FileCheck %s --check-prefix=CHECK-VSCALE-1 ; RUN: opt -passes="print" 2>&1 -disable-output -mcpu=cortex-a510 < %s | FileCheck %s --check-prefix=CHECK-VSCALE-1 @@ -9,12 +9,12 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple="aarch64--linux-gnu" define void @masked_gathers( %nxv4i1mask, %nxv8i1mask, <4 x i1> %v4i1mask, <1 x i1> %v1i1mask, %nxv1i1mask) #0 { -; CHECK-LABEL: 'masked_gathers' -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv8i32 = call @llvm.masked.gather.nxv8i32.nxv8p0( undef, i32 0, %nxv8i1mask, zeroinitializer) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %res.nxv1i64 = call @llvm.masked.gather.nxv1i64.nxv1p0( undef, i32 0, %nxv1i1mask, zeroinitializer) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %res.nxv4i1 = call @llvm.masked.gather.nxv4i1.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_gathers' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv8i32 = call @llvm.masked.gather.nxv8i32.nxv8p0( undef, i32 0, %nxv8i1mask, zeroinitializer) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %res.nxv1i64 = call @llvm.masked.gather.nxv1i64.nxv1p0( undef, i32 0, %nxv1i1mask, zeroinitializer) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %res.nxv4i1 = call @llvm.masked.gather.nxv4i1.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; CHECK-VSCALE-2-LABEL: 'masked_gathers' ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) @@ -22,13 +22,6 @@ define void @masked_gathers( %nxv4i1mask, %nx ; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %res.nxv1i64 = call @llvm.masked.gather.nxv1i64.nxv1p0( undef, i32 0, %nxv1i1mask, zeroinitializer) ; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %res.nxv4i1 = call @llvm.masked.gather.nxv4i1.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void -; -; CHECK-VSCALE-1-LABEL: 'masked_gathers' -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv8i32 = call @llvm.masked.gather.nxv8i32.nxv8p0( undef, i32 0, %nxv8i1mask, zeroinitializer) -; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %res.nxv1i64 = call @llvm.masked.gather.nxv1i64.nxv1p0( undef, i32 0, %nxv1i1mask, zeroinitializer) -; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %res.nxv4i1 = call @llvm.masked.gather.nxv4i1.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; %res.nxv4i32 = call @llvm.masked.gather.nxv4i32( undef, i32 0, %nxv4i1mask, zeroinitializer) %res.nxv8i32 = call @llvm.masked.gather.nxv8i32( undef, i32 0, %nxv8i1mask, zeroinitializer) @@ -38,23 +31,17 @@ define void @masked_gathers( %nxv4i1mask, %nx } define void @masked_gathers_tune_generic( %nxv4i1mask, %nxv8i1mask, <4 x i1> %v4i1mask, <1 x i1> %v1i1mask, %nxv1i1mask) #1 { -; CHECK-LABEL: 'masked_gathers_tune_generic' -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv8i32 = call @llvm.masked.gather.nxv8i32.nxv8p0( undef, i32 0, %nxv8i1mask, zeroinitializer) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %res.nxv1i64 = call @llvm.masked.gather.nxv1i64.nxv1p0( undef, i32 0, %nxv1i1mask, zeroinitializer) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_gathers_tune_generic' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv8i32 = call @llvm.masked.gather.nxv8i32.nxv8p0( undef, i32 0, %nxv8i1mask, zeroinitializer) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %res.nxv1i64 = call @llvm.masked.gather.nxv1i64.nxv1p0( undef, i32 0, %nxv1i1mask, zeroinitializer) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; CHECK-VSCALE-2-LABEL: 'masked_gathers_tune_generic' -; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) -; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv8i32 = call @llvm.masked.gather.nxv8i32.nxv8p0( undef, i32 0, %nxv8i1mask, zeroinitializer) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv8i32 = call @llvm.masked.gather.nxv8i32.nxv8p0( undef, i32 0, %nxv8i1mask, zeroinitializer) ; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %res.nxv1i64 = call @llvm.masked.gather.nxv1i64.nxv1p0( undef, i32 0, %nxv1i1mask, zeroinitializer) ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void -; -; CHECK-VSCALE-1-LABEL: 'masked_gathers_tune_generic' -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4i32 = call @llvm.masked.gather.nxv4i32.nxv4p0( undef, i32 0, %nxv4i1mask, zeroinitializer) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv8i32 = call @llvm.masked.gather.nxv8i32.nxv8p0( undef, i32 0, %nxv8i1mask, zeroinitializer) -; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %res.nxv1i64 = call @llvm.masked.gather.nxv1i64.nxv1p0( undef, i32 0, %nxv1i1mask, zeroinitializer) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; %res.nxv4i32 = call @llvm.masked.gather.nxv4i32( undef, i32 0, %nxv4i1mask, zeroinitializer) %res.nxv8i32 = call @llvm.masked.gather.nxv8i32( undef, i32 0, %nxv8i1mask, zeroinitializer) @@ -63,16 +50,16 @@ define void @masked_gathers_tune_generic( %nxv4i1mask, @llvm.masked.gather.nxv4f64.nxv4p0( undef, i32 1, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv2f64 = call @llvm.masked.gather.nxv2f64.nxv2p0( undef, i32 1, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv8f32 = call @llvm.masked.gather.nxv8f32.nxv8p0( undef, i32 1, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4f32 = call @llvm.masked.gather.nxv4f32.nxv4p0( undef, i32 1, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv2f32 = call @llvm.masked.gather.nxv2f32.nxv2p0( undef, i32 1, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %res.nxv16i16 = call @llvm.masked.gather.nxv16i16.nxv16p0( undef, i32 1, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv8i16 = call @llvm.masked.gather.nxv8i16.nxv8p0( undef, i32 1, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4i16 = call @llvm.masked.gather.nxv4i16.nxv4p0( undef, i32 1, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_gathers_no_vscale_range' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4f64 = call @llvm.masked.gather.nxv4f64.nxv4p0( undef, i32 1, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %res.nxv2f64 = call @llvm.masked.gather.nxv2f64.nxv2p0( undef, i32 1, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv8f32 = call @llvm.masked.gather.nxv8f32.nxv8p0( undef, i32 1, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4f32 = call @llvm.masked.gather.nxv4f32.nxv4p0( undef, i32 1, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %res.nxv2f32 = call @llvm.masked.gather.nxv2f32.nxv2p0( undef, i32 1, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv16i16 = call @llvm.masked.gather.nxv16i16.nxv16p0( undef, i32 1, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv8i16 = call @llvm.masked.gather.nxv8i16.nxv8p0( undef, i32 1, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4i16 = call @llvm.masked.gather.nxv4i16.nxv4p0( undef, i32 1, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; CHECK-VSCALE-2-LABEL: 'masked_gathers_no_vscale_range' ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4f64 = call @llvm.masked.gather.nxv4f64.nxv4p0( undef, i32 1, undef, undef) @@ -84,17 +71,6 @@ define void @masked_gathers_no_vscale_range() #2 { ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv8i16 = call @llvm.masked.gather.nxv8i16.nxv8p0( undef, i32 1, undef, undef) ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv4i16 = call @llvm.masked.gather.nxv4i16.nxv4p0( undef, i32 1, undef, undef) ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void -; -; CHECK-VSCALE-1-LABEL: 'masked_gathers_no_vscale_range' -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4f64 = call @llvm.masked.gather.nxv4f64.nxv4p0( undef, i32 1, undef, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %res.nxv2f64 = call @llvm.masked.gather.nxv2f64.nxv2p0( undef, i32 1, undef, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv8f32 = call @llvm.masked.gather.nxv8f32.nxv8p0( undef, i32 1, undef, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4f32 = call @llvm.masked.gather.nxv4f32.nxv4p0( undef, i32 1, undef, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %res.nxv2f32 = call @llvm.masked.gather.nxv2f32.nxv2p0( undef, i32 1, undef, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res.nxv16i16 = call @llvm.masked.gather.nxv16i16.nxv16p0( undef, i32 1, undef, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res.nxv8i16 = call @llvm.masked.gather.nxv8i16.nxv8p0( undef, i32 1, undef, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res.nxv4i16 = call @llvm.masked.gather.nxv4i16.nxv4p0( undef, i32 1, undef, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; %res.nxv4f64 = call @llvm.masked.gather.nxv4f64( undef, i32 1, undef, undef) %res.nxv2f64 = call @llvm.masked.gather.nxv2f64( undef, i32 1, undef, undef) @@ -111,17 +87,13 @@ define void @masked_gathers_no_vscale_range() #2 { } define <2 x i128> @masked_gather_v1i128(<2 x ptr> %ld, <2 x i1> %masks, <2 x i128> %passthru) #3 { -; CHECK-LABEL: 'masked_gather_v1i128' -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %res = call <2 x i128> @llvm.masked.gather.v2i128.v2p0(<2 x ptr> %ld, i32 0, <2 x i1> %masks, <2 x i128> %passthru) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i128> %res +; CHECK-VSCALE-1-LABEL: 'masked_gather_v1i128' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %res = call <2 x i128> @llvm.masked.gather.v2i128.v2p0(<2 x ptr> %ld, i32 0, <2 x i1> %masks, <2 x i128> %passthru) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i128> %res ; ; CHECK-VSCALE-2-LABEL: 'masked_gather_v1i128' ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %res = call <2 x i128> @llvm.masked.gather.v2i128.v2p0(<2 x ptr> %ld, i32 0, <2 x i1> %masks, <2 x i128> %passthru) ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i128> %res -; -; CHECK-VSCALE-1-LABEL: 'masked_gather_v1i128' -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %res = call <2 x i128> @llvm.masked.gather.v2i128.v2p0(<2 x ptr> %ld, i32 0, <2 x i1> %masks, <2 x i128> %passthru) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <2 x i128> %res ; %res = call <2 x i128> @llvm.masked.gather.v2i128.v2p0(<2 x ptr> %ld, i32 0, <2 x i1> %masks, <2 x i128> %passthru) ret <2 x i128> %res diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll index 1ecd02e5c124a6..cca4ea73ee6628 100644 --- a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll +++ b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll @@ -1,16 +1,24 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py -; RUN: opt < %s -passes="print" 2>&1 -disable-output -S -mtriple=aarch64--linux-gnu -mattr=+sve | FileCheck %s +; RUN: opt < %s -passes="print" 2>&1 -disable-output -S -mtriple=aarch64--linux-gnu -mattr=+sve | FileCheck %s --check-prefix=CHECK-VSCALE-1 +; RUN: opt < %s -passes="print" 2>&1 -mcpu=neoverse-v1 -disable-output -S -mtriple=aarch64--linux-gnu -mattr=+sve | FileCheck %s --check-prefix=CHECK-VSCALE-2 ; RUN: opt < %s -passes="print" 2>&1 -type-based-intrinsic-cost -disable-output -S -mtriple=aarch64--linux-gnu -mattr=+sve | FileCheck %s --check-prefix=TYPE_BASED_ONLY target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" define void @vector_insert_extract( %v0, %v1, <16 x i32> %v2) { -; CHECK-LABEL: 'vector_insert_extract' -; CHECK-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( %v0, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %insert_fixed_into_scalable = call @llvm.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'vector_insert_extract' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( %v0, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %insert_fixed_into_scalable = call @llvm.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'vector_insert_extract' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( %v0, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %insert_fixed_into_scalable = call @llvm.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'vector_insert_extract' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( %v0, i64 0) @@ -31,17 +39,29 @@ declare @llvm.vector.extract.nxv4i32.nxv16i32( @llvm.vector.insert.nxv16i32.nxv4i32(, , i64) define void @vector_insert_extract_idxzero_128b() #1 { -; CHECK-LABEL: 'vector_insert_extract_idxzero_128b' -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %insert_legal_fixed_into_scalable = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %extract_legal_fixed_from_scalable = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv16i1_nxv2i1 = call @llvm.vector.insert.nxv16i1.nxv2i1( undef, undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4i1_nxv16i1 = call @llvm.vector.extract.nxv4i1.nxv16i1( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %extract_v8i1_nxv8i1 = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %insert_v2f32_nxv2f32 = call @llvm.vector.insert.nxv2f32.v2f32( undef, <2 x float> undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %extract_v4f16_nxv4f16 = call <4 x half> @llvm.vector.extract.v4f16.nxv4f16( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv2f32_nxv4f32 = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4f32_nxv8f32 = call @llvm.vector.extract.nxv4f32.nxv8f32( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'vector_insert_extract_idxzero_128b' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %insert_legal_fixed_into_scalable = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %extract_legal_fixed_from_scalable = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv16i1_nxv2i1 = call @llvm.vector.insert.nxv16i1.nxv2i1( undef, undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4i1_nxv16i1 = call @llvm.vector.extract.nxv4i1.nxv16i1( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %extract_v8i1_nxv8i1 = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %insert_v2f32_nxv2f32 = call @llvm.vector.insert.nxv2f32.v2f32( undef, <2 x float> undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %extract_v4f16_nxv4f16 = call <4 x half> @llvm.vector.extract.v4f16.nxv4f16( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv2f32_nxv4f32 = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4f32_nxv8f32 = call @llvm.vector.extract.nxv4f32.nxv8f32( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'vector_insert_extract_idxzero_128b' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %insert_legal_fixed_into_scalable = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %extract_legal_fixed_from_scalable = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv16i1_nxv2i1 = call @llvm.vector.insert.nxv16i1.nxv2i1( undef, undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4i1_nxv16i1 = call @llvm.vector.extract.nxv4i1.nxv16i1( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %extract_v8i1_nxv8i1 = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %insert_v2f32_nxv2f32 = call @llvm.vector.insert.nxv2f32.v2f32( undef, <2 x float> undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %extract_v4f16_nxv4f16 = call <4 x half> @llvm.vector.extract.v4f16.nxv4f16( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv2f32_nxv4f32 = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4f32_nxv8f32 = call @llvm.vector.extract.nxv4f32.nxv8f32( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'vector_insert_extract_idxzero_128b' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %insert_legal_fixed_into_scalable = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> undef, i64 0) @@ -76,17 +96,29 @@ declare @llvm.vector.insert.nxv4f32.nxv2f32( @llvm.vector.extract.nxv4f32.nxv8f32(, i64) define void @vector_insert_extract_idxzero_256b() #2 { -; CHECK-LABEL: 'vector_insert_extract_idxzero_256b' -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %insert_legal_fixed_into_scalable = call @llvm.vector.insert.nxv8i16.v16i16( undef, <16 x i16> undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %extract_legal_fixed_from_scalable = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv16i1_nxv2i1 = call @llvm.vector.insert.nxv16i1.nxv2i1( undef, undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4i1_nxv16i1 = call @llvm.vector.extract.nxv4i1.nxv16i1( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %extract_v8i1_nxv8i1 = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %insert_v2f32_nxv2f32 = call @llvm.vector.insert.nxv2f32.v2f32( undef, <2 x float> undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %extract_v4f16_nxv4f16 = call <4 x half> @llvm.vector.extract.v4f16.nxv4f16( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv2f32_nxv4f32 = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4f32_nxv8f32 = call @llvm.vector.extract.nxv4f32.nxv8f32( undef, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'vector_insert_extract_idxzero_256b' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %insert_legal_fixed_into_scalable = call @llvm.vector.insert.nxv8i16.v16i16( undef, <16 x i16> undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %extract_legal_fixed_from_scalable = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv16i1_nxv2i1 = call @llvm.vector.insert.nxv16i1.nxv2i1( undef, undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4i1_nxv16i1 = call @llvm.vector.extract.nxv4i1.nxv16i1( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %extract_v8i1_nxv8i1 = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %insert_v2f32_nxv2f32 = call @llvm.vector.insert.nxv2f32.v2f32( undef, <2 x float> undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %extract_v4f16_nxv4f16 = call <4 x half> @llvm.vector.extract.v4f16.nxv4f16( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv2f32_nxv4f32 = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4f32_nxv8f32 = call @llvm.vector.extract.nxv4f32.nxv8f32( undef, i64 0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'vector_insert_extract_idxzero_256b' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %insert_legal_fixed_into_scalable = call @llvm.vector.insert.nxv8i16.v16i16( undef, <16 x i16> undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %extract_legal_fixed_from_scalable = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv16i1_nxv2i1 = call @llvm.vector.insert.nxv16i1.nxv2i1( undef, undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4i1_nxv16i1 = call @llvm.vector.extract.nxv4i1.nxv16i1( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %extract_v8i1_nxv8i1 = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %insert_v2f32_nxv2f32 = call @llvm.vector.insert.nxv2f32.v2f32( undef, <2 x float> undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %extract_v4f16_nxv4f16 = call <4 x half> @llvm.vector.extract.v4f16.nxv4f16( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_nxv2f32_nxv4f32 = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_nxv4f32_nxv8f32 = call @llvm.vector.extract.nxv4f32.nxv8f32( undef, i64 0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'vector_insert_extract_idxzero_256b' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %insert_legal_fixed_into_scalable = call @llvm.vector.insert.nxv8i16.v16i16( undef, <16 x i16> undef, i64 0) @@ -115,44 +147,83 @@ declare @llvm.vector.insert.nxv8i16.v16i16( declare <8 x float> @llvm.vector.extract.v8f32.nxv4f32(, i64) define void @reductions( %v0, %v1, %v2, %v3) { -; CHECK-LABEL: 'reductions' -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %add_nxv1i32 = call i32 @llvm.vector.reduce.add.nxv1i32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %add_nxv4i32 = call i32 @llvm.vector.reduce.add.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %add_nxv4i64 = call i64 @llvm.vector.reduce.add.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv1i32 = call i32 @llvm.vector.reduce.mul.nxv1i32( undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv4i32 = call i32 @llvm.vector.reduce.mul.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv4i64 = call i64 @llvm.vector.reduce.mul.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %and_nxv1i32 = call i32 @llvm.vector.reduce.and.nxv1i32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %and_nxv4i32 = call i32 @llvm.vector.reduce.and.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %and_nxv4i64 = call i64 @llvm.vector.reduce.and.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %or_nxv1i32 = call i32 @llvm.vector.reduce.or.nxv1i32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %or_nxv4i32 = call i32 @llvm.vector.reduce.or.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %or_nxv4i64 = call i64 @llvm.vector.reduce.or.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %xor_nxv1i32 = call i32 @llvm.vector.reduce.xor.nxv1i32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %xor_nxv4i32 = call i32 @llvm.vector.reduce.xor.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %xor_nxv4i64 = call i64 @llvm.vector.reduce.xor.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %umin_nxv1i64 = call i64 @llvm.vector.reduce.umin.nxv1i64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %umin_nxv4i32 = call i32 @llvm.vector.reduce.umin.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %umin_nxv4i64 = call i64 @llvm.vector.reduce.umin.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %smin_nxv1i64 = call i64 @llvm.vector.reduce.smin.nxv1i64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %smin_nxv4i32 = call i32 @llvm.vector.reduce.smin.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %smin_nxv4i64 = call i64 @llvm.vector.reduce.smin.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %umax_nxv1i64 = call i64 @llvm.vector.reduce.umax.nxv1i64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %umax_nxv4i32 = call i32 @llvm.vector.reduce.umax.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %umax_nxv4i64 = call i64 @llvm.vector.reduce.umax.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %smax_nxv1i64 = call i64 @llvm.vector.reduce.smax.nxv1i64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %smax_nxv4i32 = call i32 @llvm.vector.reduce.smax.nxv4i32( %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %smax_nxv4i64 = call i64 @llvm.vector.reduce.smax.nxv4i64( %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %fadd_nxv1f32 = call fast float @llvm.vector.reduce.fadd.nxv1f32(float 0.000000e+00, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_nxv4f32 = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, %v2) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_nxv4f64 = call fast double @llvm.vector.reduce.fadd.nxv4f64(double 0.000000e+00, %v3) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %fmin_nxv1f32 = call fast float @llvm.vector.reduce.fmin.nxv1f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fmin_nxv4f32 = call fast float @llvm.vector.reduce.fmin.nxv4f32( %v2) -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fmin_nxv4f64 = call fast double @llvm.vector.reduce.fmin.nxv4f64( %v3) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %fmax_nxv1f32 = call fast float @llvm.vector.reduce.fmax.nxv1f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fmax_nxv4f32 = call fast float @llvm.vector.reduce.fmax.nxv4f32( %v2) -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fmax_nxv4f64 = call fast double @llvm.vector.reduce.fmax.nxv4f64( %v3) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'reductions' +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %add_nxv1i32 = call i32 @llvm.vector.reduce.add.nxv1i32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %add_nxv4i32 = call i32 @llvm.vector.reduce.add.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %add_nxv4i64 = call i64 @llvm.vector.reduce.add.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv1i32 = call i32 @llvm.vector.reduce.mul.nxv1i32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv4i32 = call i32 @llvm.vector.reduce.mul.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv4i64 = call i64 @llvm.vector.reduce.mul.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %and_nxv1i32 = call i32 @llvm.vector.reduce.and.nxv1i32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %and_nxv4i32 = call i32 @llvm.vector.reduce.and.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %and_nxv4i64 = call i64 @llvm.vector.reduce.and.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %or_nxv1i32 = call i32 @llvm.vector.reduce.or.nxv1i32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %or_nxv4i32 = call i32 @llvm.vector.reduce.or.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %or_nxv4i64 = call i64 @llvm.vector.reduce.or.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %xor_nxv1i32 = call i32 @llvm.vector.reduce.xor.nxv1i32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %xor_nxv4i32 = call i32 @llvm.vector.reduce.xor.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %xor_nxv4i64 = call i64 @llvm.vector.reduce.xor.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %umin_nxv1i64 = call i64 @llvm.vector.reduce.umin.nxv1i64( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %umin_nxv4i32 = call i32 @llvm.vector.reduce.umin.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %umin_nxv4i64 = call i64 @llvm.vector.reduce.umin.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %smin_nxv1i64 = call i64 @llvm.vector.reduce.smin.nxv1i64( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %smin_nxv4i32 = call i32 @llvm.vector.reduce.smin.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %smin_nxv4i64 = call i64 @llvm.vector.reduce.smin.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %umax_nxv1i64 = call i64 @llvm.vector.reduce.umax.nxv1i64( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %umax_nxv4i32 = call i32 @llvm.vector.reduce.umax.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %umax_nxv4i64 = call i64 @llvm.vector.reduce.umax.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %smax_nxv1i64 = call i64 @llvm.vector.reduce.smax.nxv1i64( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %smax_nxv4i32 = call i32 @llvm.vector.reduce.smax.nxv4i32( %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %smax_nxv4i64 = call i64 @llvm.vector.reduce.smax.nxv4i64( %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %fadd_nxv1f32 = call fast float @llvm.vector.reduce.fadd.nxv1f32(float 0.000000e+00, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_nxv4f32 = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, %v2) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_nxv4f64 = call fast double @llvm.vector.reduce.fadd.nxv4f64(double 0.000000e+00, %v3) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %fmin_nxv1f32 = call fast float @llvm.vector.reduce.fmin.nxv1f32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fmin_nxv4f32 = call fast float @llvm.vector.reduce.fmin.nxv4f32( %v2) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fmin_nxv4f64 = call fast double @llvm.vector.reduce.fmin.nxv4f64( %v3) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %fmax_nxv1f32 = call fast float @llvm.vector.reduce.fmax.nxv1f32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fmax_nxv4f32 = call fast float @llvm.vector.reduce.fmax.nxv4f32( %v2) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fmax_nxv4f64 = call fast double @llvm.vector.reduce.fmax.nxv4f64( %v3) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'reductions' +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %add_nxv1i32 = call i32 @llvm.vector.reduce.add.nxv1i32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %add_nxv4i32 = call i32 @llvm.vector.reduce.add.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %add_nxv4i64 = call i64 @llvm.vector.reduce.add.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv1i32 = call i32 @llvm.vector.reduce.mul.nxv1i32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv4i32 = call i32 @llvm.vector.reduce.mul.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %mul_nxv4i64 = call i64 @llvm.vector.reduce.mul.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %and_nxv1i32 = call i32 @llvm.vector.reduce.and.nxv1i32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %and_nxv4i32 = call i32 @llvm.vector.reduce.and.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %and_nxv4i64 = call i64 @llvm.vector.reduce.and.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %or_nxv1i32 = call i32 @llvm.vector.reduce.or.nxv1i32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %or_nxv4i32 = call i32 @llvm.vector.reduce.or.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %or_nxv4i64 = call i64 @llvm.vector.reduce.or.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %xor_nxv1i32 = call i32 @llvm.vector.reduce.xor.nxv1i32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %xor_nxv4i32 = call i32 @llvm.vector.reduce.xor.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %xor_nxv4i64 = call i64 @llvm.vector.reduce.xor.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %umin_nxv1i64 = call i64 @llvm.vector.reduce.umin.nxv1i64( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %umin_nxv4i32 = call i32 @llvm.vector.reduce.umin.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %umin_nxv4i64 = call i64 @llvm.vector.reduce.umin.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %smin_nxv1i64 = call i64 @llvm.vector.reduce.smin.nxv1i64( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %smin_nxv4i32 = call i32 @llvm.vector.reduce.smin.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %smin_nxv4i64 = call i64 @llvm.vector.reduce.smin.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %umax_nxv1i64 = call i64 @llvm.vector.reduce.umax.nxv1i64( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %umax_nxv4i32 = call i32 @llvm.vector.reduce.umax.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %umax_nxv4i64 = call i64 @llvm.vector.reduce.umax.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %smax_nxv1i64 = call i64 @llvm.vector.reduce.smax.nxv1i64( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %smax_nxv4i32 = call i32 @llvm.vector.reduce.smax.nxv4i32( %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %smax_nxv4i64 = call i64 @llvm.vector.reduce.smax.nxv4i64( %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %fadd_nxv1f32 = call fast float @llvm.vector.reduce.fadd.nxv1f32(float 0.000000e+00, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fadd_nxv4f32 = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, %v2) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %fadd_nxv4f64 = call fast double @llvm.vector.reduce.fadd.nxv4f64(double 0.000000e+00, %v3) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %fmin_nxv1f32 = call fast float @llvm.vector.reduce.fmin.nxv1f32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fmin_nxv4f32 = call fast float @llvm.vector.reduce.fmin.nxv4f32( %v2) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fmin_nxv4f64 = call fast double @llvm.vector.reduce.fmin.nxv4f64( %v3) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %fmax_nxv1f32 = call fast float @llvm.vector.reduce.fmax.nxv1f32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %fmax_nxv4f32 = call fast float @llvm.vector.reduce.fmax.nxv4f32( %v2) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fmax_nxv4f64 = call fast double @llvm.vector.reduce.fmax.nxv4f64( %v3) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'reductions' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %add_nxv1i32 = call i32 @llvm.vector.reduce.add.nxv1i32( undef) @@ -235,16 +306,23 @@ define void @reductions( %v0, %v1, %v0, %v1) { -; CHECK-LABEL: 'strict_fp_reductions' -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_nxv4f32 = call float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, %v0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_nxv4f64 = call double @llvm.vector.reduce.fadd.nxv4f64(double 0.000000e+00, %v1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %fmul_nxv4f32 = call float @llvm.vector.reduce.fmul.nxv4f32(float 0.000000e+00, %v0) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %fmul_nxv4f64 = call double @llvm.vector.reduce.fmul.nxv4f64(double 0.000000e+00, %v1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'strict_fp_reductions' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_nxv4f32 = call float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_nxv4f64 = call double @llvm.vector.reduce.fadd.nxv4f64(double 0.000000e+00, %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %fmul_nxv4f32 = call float @llvm.vector.reduce.fmul.nxv4f32(float 0.000000e+00, %v0) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %fmul_nxv4f64 = call double @llvm.vector.reduce.fmul.nxv4f64(double 0.000000e+00, %v1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'strict_fp_reductions' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_nxv4f32 = call float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_nxv4f64 = call double @llvm.vector.reduce.fadd.nxv4f64(double 0.000000e+00, %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %fmul_nxv4f32 = call float @llvm.vector.reduce.fmul.nxv4f32(float 0.000000e+00, %v0) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %fmul_nxv4f64 = call double @llvm.vector.reduce.fmul.nxv4f64(double 0.000000e+00, %v1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'strict_fp_reductions' -; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_nxv4f32 = call float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, %v0) -; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %fadd_nxv4f64 = call double @llvm.vector.reduce.fadd.nxv4f64(double 0.000000e+00, %v1) +; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_nxv4f32 = call float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, %v0) +; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %fadd_nxv4f64 = call double @llvm.vector.reduce.fadd.nxv4f64(double 0.000000e+00, %v1) ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %fmul_nxv4f32 = call float @llvm.vector.reduce.fmul.nxv4f32(float 0.000000e+00, %v0) ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %fmul_nxv4f64 = call double @llvm.vector.reduce.fmul.nxv4f64(double 0.000000e+00, %v1) ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void @@ -286,10 +364,15 @@ declare double @llvm.vector.reduce.fmax.nxv4f64() define void @count_zeroes( %A) { -; CHECK-LABEL: 'count_zeroes' -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %ctlz = call @llvm.ctlz.nxv4i32( %A, i1 true) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %cttz = call @llvm.cttz.nxv4i32( %A, i1 true) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'count_zeroes' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %ctlz = call @llvm.ctlz.nxv4i32( %A, i1 true) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %cttz = call @llvm.cttz.nxv4i32( %A, i1 true) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'count_zeroes' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %ctlz = call @llvm.ctlz.nxv4i32( %A, i1 true) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %cttz = call @llvm.cttz.nxv4i32( %A, i1 true) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'count_zeroes' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %ctlz = call @llvm.ctlz.nxv4i32( %A, i1 true) @@ -305,35 +388,65 @@ declare @llvm.cttz.nxv4i32(, i1) define void @vector_reverse() #0 { -; CHECK-LABEL: 'vector_reverse' -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv16i8 = call @llvm.vector.reverse.nxv16i8( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv32i8 = call @llvm.vector.reverse.nxv32i8( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i16 = call @llvm.vector.reverse.nxv2i16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i16 = call @llvm.vector.reverse.nxv4i16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8i16 = call @llvm.vector.reverse.nxv8i16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16i16 = call @llvm.vector.reverse.nxv16i16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i32 = call @llvm.vector.reverse.nxv4i32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv8i32 = call @llvm.vector.reverse.nxv8i32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i64 = call @llvm.vector.reverse.nxv2i64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv4i64 = call @llvm.vector.reverse.nxv4i64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f16 = call @llvm.vector.reverse.nxv2f16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4f16 = call @llvm.vector.reverse.nxv4f16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8f16 = call @llvm.vector.reverse.nxv8f16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16f16 = call @llvm.vector.reverse.nxv16f16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f32 = call @llvm.vector.reverse.nxv2f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4f32 = call @llvm.vector.reverse.nxv4f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv8f32 = call @llvm.vector.reverse.nxv8f32( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f64 = call @llvm.vector.reverse.nxv2f64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv4f64 = call @llvm.vector.reverse.nxv4f64( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2bf16 = call @llvm.vector.reverse.nxv2bf16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4bf16 = call @llvm.vector.reverse.nxv4bf16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8bf16 = call @llvm.vector.reverse.nxv8bf16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16bf16 = call @llvm.vector.reverse.nxv16bf16( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv16i1 = call @llvm.vector.reverse.nxv16i1( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8i1 = call @llvm.vector.reverse.nxv8i1( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i1 = call @llvm.vector.reverse.nxv4i1( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i1 = call @llvm.vector.reverse.nxv2i1( undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'vector_reverse' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv16i8 = call @llvm.vector.reverse.nxv16i8( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv32i8 = call @llvm.vector.reverse.nxv32i8( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i16 = call @llvm.vector.reverse.nxv2i16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i16 = call @llvm.vector.reverse.nxv4i16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8i16 = call @llvm.vector.reverse.nxv8i16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16i16 = call @llvm.vector.reverse.nxv16i16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i32 = call @llvm.vector.reverse.nxv4i32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv8i32 = call @llvm.vector.reverse.nxv8i32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i64 = call @llvm.vector.reverse.nxv2i64( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv4i64 = call @llvm.vector.reverse.nxv4i64( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f16 = call @llvm.vector.reverse.nxv2f16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4f16 = call @llvm.vector.reverse.nxv4f16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8f16 = call @llvm.vector.reverse.nxv8f16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16f16 = call @llvm.vector.reverse.nxv16f16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f32 = call @llvm.vector.reverse.nxv2f32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4f32 = call @llvm.vector.reverse.nxv4f32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv8f32 = call @llvm.vector.reverse.nxv8f32( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f64 = call @llvm.vector.reverse.nxv2f64( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv4f64 = call @llvm.vector.reverse.nxv4f64( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2bf16 = call @llvm.vector.reverse.nxv2bf16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4bf16 = call @llvm.vector.reverse.nxv4bf16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8bf16 = call @llvm.vector.reverse.nxv8bf16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16bf16 = call @llvm.vector.reverse.nxv16bf16( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv16i1 = call @llvm.vector.reverse.nxv16i1( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8i1 = call @llvm.vector.reverse.nxv8i1( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i1 = call @llvm.vector.reverse.nxv4i1( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i1 = call @llvm.vector.reverse.nxv2i1( undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'vector_reverse' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv16i8 = call @llvm.vector.reverse.nxv16i8( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv32i8 = call @llvm.vector.reverse.nxv32i8( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i16 = call @llvm.vector.reverse.nxv2i16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i16 = call @llvm.vector.reverse.nxv4i16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8i16 = call @llvm.vector.reverse.nxv8i16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16i16 = call @llvm.vector.reverse.nxv16i16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i32 = call @llvm.vector.reverse.nxv4i32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv8i32 = call @llvm.vector.reverse.nxv8i32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i64 = call @llvm.vector.reverse.nxv2i64( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv4i64 = call @llvm.vector.reverse.nxv4i64( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f16 = call @llvm.vector.reverse.nxv2f16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4f16 = call @llvm.vector.reverse.nxv4f16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8f16 = call @llvm.vector.reverse.nxv8f16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16f16 = call @llvm.vector.reverse.nxv16f16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f32 = call @llvm.vector.reverse.nxv2f32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4f32 = call @llvm.vector.reverse.nxv4f32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv8f32 = call @llvm.vector.reverse.nxv8f32( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2f64 = call @llvm.vector.reverse.nxv2f64( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv4f64 = call @llvm.vector.reverse.nxv4f64( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2bf16 = call @llvm.vector.reverse.nxv2bf16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4bf16 = call @llvm.vector.reverse.nxv4bf16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8bf16 = call @llvm.vector.reverse.nxv8bf16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %reverse_nxv16bf16 = call @llvm.vector.reverse.nxv16bf16( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv16i1 = call @llvm.vector.reverse.nxv16i1( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv8i1 = call @llvm.vector.reverse.nxv8i1( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv4i1 = call @llvm.vector.reverse.nxv4i1( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %reverse_nxv2i1 = call @llvm.vector.reverse.nxv2i1( undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'vector_reverse' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %reverse_nxv16i8 = call @llvm.vector.reverse.nxv16i8( undef) @@ -424,17 +537,29 @@ declare @llvm.vector.reverse.nxv4i1() declare @llvm.vector.reverse.nxv2i1() define void @unsupported_fp_ops( %vec, i32 %extraarg) { -; CHECK-LABEL: 'unsupported_fp_ops' -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %sin = call @llvm.sin.nxv4f32( %vec) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %cos = call @llvm.cos.nxv4f32( %vec) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %pow = call @llvm.pow.nxv4f32( %vec, %vec) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 %extraarg) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %exp = call @llvm.exp.nxv4f32( %vec) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %exp2 = call @llvm.exp2.nxv4f32( %vec) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %log = call @llvm.log.nxv4f32( %vec) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %log2 = call @llvm.log2.nxv4f32( %vec) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %log10 = call @llvm.log10.nxv4f32( %vec) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'unsupported_fp_ops' +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %sin = call @llvm.sin.nxv4f32( %vec) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %cos = call @llvm.cos.nxv4f32( %vec) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %pow = call @llvm.pow.nxv4f32( %vec, %vec) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 %extraarg) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %exp = call @llvm.exp.nxv4f32( %vec) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %exp2 = call @llvm.exp2.nxv4f32( %vec) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %log = call @llvm.log.nxv4f32( %vec) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %log2 = call @llvm.log2.nxv4f32( %vec) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %log10 = call @llvm.log10.nxv4f32( %vec) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'unsupported_fp_ops' +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %sin = call @llvm.sin.nxv4f32( %vec) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %cos = call @llvm.cos.nxv4f32( %vec) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %pow = call @llvm.pow.nxv4f32( %vec, %vec) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 %extraarg) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %exp = call @llvm.exp.nxv4f32( %vec) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %exp2 = call @llvm.exp2.nxv4f32( %vec) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %log = call @llvm.log.nxv4f32( %vec) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %log2 = call @llvm.log2.nxv4f32( %vec) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %log10 = call @llvm.log10.nxv4f32( %vec) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'unsupported_fp_ops' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %sin = call @llvm.sin.nxv4f32( %vec) @@ -462,9 +587,13 @@ define void @unsupported_fp_ops( %vec, i32 %extraarg) { } define void @powi( %vec) { -; CHECK-LABEL: 'powi' -; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 42) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'powi' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 42) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'powi' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 42) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'powi' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %powi = call @llvm.powi.nxv4f32.i32( %vec, i32 42) @@ -485,69 +614,133 @@ declare @llvm.log2.nxv4f32() declare @llvm.log10.nxv4f32() define void @vector_splice() #0 { -; CHECK-LABEL: 'vector_splice' -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv16i8 = call @llvm.vector.splice.nxv16i8( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv32i8 = call @llvm.vector.splice.nxv32i8( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2i16 = call @llvm.vector.splice.nxv2i16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4i16 = call @llvm.vector.splice.nxv4i16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8i16 = call @llvm.vector.splice.nxv8i16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16i16 = call @llvm.vector.splice.nxv16i16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4i32 = call @llvm.vector.splice.nxv4i32( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv8i32 = call @llvm.vector.splice.nxv8i32( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2i64 = call @llvm.vector.splice.nxv2i64( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv4i64 = call @llvm.vector.splice.nxv4i64( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f16 = call @llvm.vector.splice.nxv2f16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4f16 = call @llvm.vector.splice.nxv4f16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8f16 = call @llvm.vector.splice.nxv8f16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16f16 = call @llvm.vector.splice.nxv16f16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f32 = call @llvm.vector.splice.nxv2f32( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4f32 = call @llvm.vector.splice.nxv4f32( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv8f32 = call @llvm.vector.splice.nxv8f32( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f64 = call @llvm.vector.splice.nxv2f64( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv4f64 = call @llvm.vector.splice.nxv4f64( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2bf16 = call @llvm.vector.splice.nxv2bf16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4bf16 = call @llvm.vector.splice.nxv4bf16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8bf16 = call @llvm.vector.splice.nxv8bf16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16bf16 = call @llvm.vector.splice.nxv16bf16( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv16i1 = call @llvm.vector.splice.nxv16i1( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8i1 = call @llvm.vector.splice.nxv8i1( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i1 = call @llvm.vector.splice.nxv4i1( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i1 = call @llvm.vector.splice.nxv2i1( zeroinitializer, zeroinitializer, i32 1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv16i8_neg = call @llvm.vector.splice.nxv16i8( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv32i8_neg = call @llvm.vector.splice.nxv32i8( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i16_neg = call @llvm.vector.splice.nxv1i16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i16_neg = call @llvm.vector.splice.nxv2i16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i16_neg = call @llvm.vector.splice.nxv4i16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8i16_neg = call @llvm.vector.splice.nxv8i16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16i16_neg = call @llvm.vector.splice.nxv16i16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i32_neg = call @llvm.vector.splice.nxv4i32( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv8i32_neg = call @llvm.vector.splice.nxv8i32( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i64_neg = call @llvm.vector.splice.nxv1i64( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i64_neg = call @llvm.vector.splice.nxv2i64( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv4i64_neg = call @llvm.vector.splice.nxv4i64( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f16_neg = call @llvm.vector.splice.nxv1f16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f16_neg = call @llvm.vector.splice.nxv2f16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4f16_neg = call @llvm.vector.splice.nxv4f16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8f16_neg = call @llvm.vector.splice.nxv8f16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16f16_neg = call @llvm.vector.splice.nxv16f16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f32_neg = call @llvm.vector.splice.nxv1f32( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f32_neg = call @llvm.vector.splice.nxv2f32( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4f32_neg = call @llvm.vector.splice.nxv4f32( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv8f32_neg = call @llvm.vector.splice.nxv8f32( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f64_neg = call @llvm.vector.splice.nxv1f64( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f64_neg = call @llvm.vector.splice.nxv2f64( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv4f64_neg = call @llvm.vector.splice.nxv4f64( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1bf16_neg = call @llvm.vector.splice.nxv1bf16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2bf16_neg = call @llvm.vector.splice.nxv2bf16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4bf16_neg = call @llvm.vector.splice.nxv4bf16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8bf16_neg = call @llvm.vector.splice.nxv8bf16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16bf16_neg = call @llvm.vector.splice.nxv16bf16( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv16i1_neg = call @llvm.vector.splice.nxv16i1( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv8i1_neg = call @llvm.vector.splice.nxv8i1( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv4i1_neg = call @llvm.vector.splice.nxv4i1( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv2i1_neg = call @llvm.vector.splice.nxv2i1( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i1_neg = call @llvm.vector.splice.nxv1i1( zeroinitializer, zeroinitializer, i32 -1) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'vector_splice' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv16i8 = call @llvm.vector.splice.nxv16i8( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv32i8 = call @llvm.vector.splice.nxv32i8( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2i16 = call @llvm.vector.splice.nxv2i16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4i16 = call @llvm.vector.splice.nxv4i16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8i16 = call @llvm.vector.splice.nxv8i16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16i16 = call @llvm.vector.splice.nxv16i16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4i32 = call @llvm.vector.splice.nxv4i32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv8i32 = call @llvm.vector.splice.nxv8i32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2i64 = call @llvm.vector.splice.nxv2i64( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv4i64 = call @llvm.vector.splice.nxv4i64( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f16 = call @llvm.vector.splice.nxv2f16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4f16 = call @llvm.vector.splice.nxv4f16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8f16 = call @llvm.vector.splice.nxv8f16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16f16 = call @llvm.vector.splice.nxv16f16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f32 = call @llvm.vector.splice.nxv2f32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4f32 = call @llvm.vector.splice.nxv4f32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv8f32 = call @llvm.vector.splice.nxv8f32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f64 = call @llvm.vector.splice.nxv2f64( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv4f64 = call @llvm.vector.splice.nxv4f64( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2bf16 = call @llvm.vector.splice.nxv2bf16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4bf16 = call @llvm.vector.splice.nxv4bf16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8bf16 = call @llvm.vector.splice.nxv8bf16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16bf16 = call @llvm.vector.splice.nxv16bf16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv16i1 = call @llvm.vector.splice.nxv16i1( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8i1 = call @llvm.vector.splice.nxv8i1( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i1 = call @llvm.vector.splice.nxv4i1( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i1 = call @llvm.vector.splice.nxv2i1( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv16i8_neg = call @llvm.vector.splice.nxv16i8( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv32i8_neg = call @llvm.vector.splice.nxv32i8( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i16_neg = call @llvm.vector.splice.nxv1i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i16_neg = call @llvm.vector.splice.nxv2i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i16_neg = call @llvm.vector.splice.nxv4i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8i16_neg = call @llvm.vector.splice.nxv8i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16i16_neg = call @llvm.vector.splice.nxv16i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i32_neg = call @llvm.vector.splice.nxv4i32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv8i32_neg = call @llvm.vector.splice.nxv8i32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i64_neg = call @llvm.vector.splice.nxv1i64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i64_neg = call @llvm.vector.splice.nxv2i64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv4i64_neg = call @llvm.vector.splice.nxv4i64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f16_neg = call @llvm.vector.splice.nxv1f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f16_neg = call @llvm.vector.splice.nxv2f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4f16_neg = call @llvm.vector.splice.nxv4f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8f16_neg = call @llvm.vector.splice.nxv8f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16f16_neg = call @llvm.vector.splice.nxv16f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f32_neg = call @llvm.vector.splice.nxv1f32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f32_neg = call @llvm.vector.splice.nxv2f32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4f32_neg = call @llvm.vector.splice.nxv4f32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv8f32_neg = call @llvm.vector.splice.nxv8f32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f64_neg = call @llvm.vector.splice.nxv1f64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f64_neg = call @llvm.vector.splice.nxv2f64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv4f64_neg = call @llvm.vector.splice.nxv4f64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1bf16_neg = call @llvm.vector.splice.nxv1bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2bf16_neg = call @llvm.vector.splice.nxv2bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4bf16_neg = call @llvm.vector.splice.nxv4bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8bf16_neg = call @llvm.vector.splice.nxv8bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16bf16_neg = call @llvm.vector.splice.nxv16bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv16i1_neg = call @llvm.vector.splice.nxv16i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv8i1_neg = call @llvm.vector.splice.nxv8i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv4i1_neg = call @llvm.vector.splice.nxv4i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv2i1_neg = call @llvm.vector.splice.nxv2i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i1_neg = call @llvm.vector.splice.nxv1i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'vector_splice' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv16i8 = call @llvm.vector.splice.nxv16i8( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv32i8 = call @llvm.vector.splice.nxv32i8( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2i16 = call @llvm.vector.splice.nxv2i16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4i16 = call @llvm.vector.splice.nxv4i16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8i16 = call @llvm.vector.splice.nxv8i16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16i16 = call @llvm.vector.splice.nxv16i16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4i32 = call @llvm.vector.splice.nxv4i32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv8i32 = call @llvm.vector.splice.nxv8i32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2i64 = call @llvm.vector.splice.nxv2i64( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv4i64 = call @llvm.vector.splice.nxv4i64( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f16 = call @llvm.vector.splice.nxv2f16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4f16 = call @llvm.vector.splice.nxv4f16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8f16 = call @llvm.vector.splice.nxv8f16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16f16 = call @llvm.vector.splice.nxv16f16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f32 = call @llvm.vector.splice.nxv2f32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4f32 = call @llvm.vector.splice.nxv4f32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv8f32 = call @llvm.vector.splice.nxv8f32( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2f64 = call @llvm.vector.splice.nxv2f64( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv4f64 = call @llvm.vector.splice.nxv4f64( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv2bf16 = call @llvm.vector.splice.nxv2bf16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv4bf16 = call @llvm.vector.splice.nxv4bf16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %splice_nxv8bf16 = call @llvm.vector.splice.nxv8bf16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %splice_nxv16bf16 = call @llvm.vector.splice.nxv16bf16( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv16i1 = call @llvm.vector.splice.nxv16i1( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8i1 = call @llvm.vector.splice.nxv8i1( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i1 = call @llvm.vector.splice.nxv4i1( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i1 = call @llvm.vector.splice.nxv2i1( zeroinitializer, zeroinitializer, i32 1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv16i8_neg = call @llvm.vector.splice.nxv16i8( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv32i8_neg = call @llvm.vector.splice.nxv32i8( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i16_neg = call @llvm.vector.splice.nxv1i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i16_neg = call @llvm.vector.splice.nxv2i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i16_neg = call @llvm.vector.splice.nxv4i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8i16_neg = call @llvm.vector.splice.nxv8i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16i16_neg = call @llvm.vector.splice.nxv16i16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4i32_neg = call @llvm.vector.splice.nxv4i32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv8i32_neg = call @llvm.vector.splice.nxv8i32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i64_neg = call @llvm.vector.splice.nxv1i64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2i64_neg = call @llvm.vector.splice.nxv2i64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv4i64_neg = call @llvm.vector.splice.nxv4i64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f16_neg = call @llvm.vector.splice.nxv1f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f16_neg = call @llvm.vector.splice.nxv2f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4f16_neg = call @llvm.vector.splice.nxv4f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8f16_neg = call @llvm.vector.splice.nxv8f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16f16_neg = call @llvm.vector.splice.nxv16f16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f32_neg = call @llvm.vector.splice.nxv1f32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f32_neg = call @llvm.vector.splice.nxv2f32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4f32_neg = call @llvm.vector.splice.nxv4f32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv8f32_neg = call @llvm.vector.splice.nxv8f32( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1f64_neg = call @llvm.vector.splice.nxv1f64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2f64_neg = call @llvm.vector.splice.nxv2f64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv4f64_neg = call @llvm.vector.splice.nxv4f64( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1bf16_neg = call @llvm.vector.splice.nxv1bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv2bf16_neg = call @llvm.vector.splice.nxv2bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv4bf16_neg = call @llvm.vector.splice.nxv4bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %splice_nxv8bf16_neg = call @llvm.vector.splice.nxv8bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %splice_nxv16bf16_neg = call @llvm.vector.splice.nxv16bf16( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv16i1_neg = call @llvm.vector.splice.nxv16i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv8i1_neg = call @llvm.vector.splice.nxv8i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv4i1_neg = call @llvm.vector.splice.nxv4i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %splice_nxv2i1_neg = call @llvm.vector.splice.nxv2i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv1i1_neg = call @llvm.vector.splice.nxv1i1( zeroinitializer, zeroinitializer, i32 -1) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'vector_splice' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %splice_nxv16i8 = call @llvm.vector.splice.nxv16i8( zeroinitializer, zeroinitializer, i32 1) @@ -718,28 +911,51 @@ declare @llvm.vector.splice.nxv2f64(, declare @llvm.vector.splice.nxv4f64(, , i32) define void @get_lane_mask() #0 { -; CHECK-LABEL: 'get_lane_mask' -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv16i1_i64 = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv8i1_i64 = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv4i1_i64 = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv2i1_i64 = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv16i1_i32 = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 undef, i32 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv8i1_i32 = call @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv4i1_i32 = call @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv2i1_i32 = call @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %mask_nxv32i1_i64 = call @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %mask_nxv16i1_i16 = call @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v2i1_i64 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_v16i1_i32 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 undef, i32 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v8i1_i32 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 undef, i32 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v4i1_i32 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 undef, i32 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v2i1_i32 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 undef, i32 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 undef, i64 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16 undef, i16 undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'get_lane_mask' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv16i1_i64 = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv8i1_i64 = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv4i1_i64 = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv2i1_i64 = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv16i1_i32 = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv8i1_i32 = call @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv4i1_i32 = call @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv2i1_i32 = call @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %mask_nxv32i1_i64 = call @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %mask_nxv16i1_i16 = call @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v2i1_i64 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_v16i1_i32 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v8i1_i32 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v4i1_i32 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v2i1_i32 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16 undef, i16 undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'get_lane_mask' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv16i1_i64 = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv8i1_i64 = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv4i1_i64 = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv2i1_i64 = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv16i1_i32 = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv8i1_i32 = call @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv4i1_i32 = call @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %mask_nxv2i1_i32 = call @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %mask_nxv32i1_i64 = call @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %mask_nxv16i1_i16 = call @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v2i1_i64 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %mask_v16i1_i32 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %mask_v8i1_i32 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %mask_v4i1_i32 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %mask_v2i1_i32 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 undef, i32 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 undef, i64 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16 undef, i16 undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'get_lane_mask' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %mask_nxv16i1_i64 = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 undef, i64 undef) @@ -794,12 +1010,19 @@ define void @get_lane_mask() #0 { } define void @fshr() #0 { -; CHECK-LABEL: 'fshr' -; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %1 = call @llvm.fshr.nxv16i8( undef, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %2 = call @llvm.fshr.nxv8i16( undef, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %3 = call @llvm.fshr.nxv4i32( undef, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %4 = call @llvm.fshr.nxv2i64( undef, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'fshr' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %1 = call @llvm.fshr.nxv16i8( undef, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %2 = call @llvm.fshr.nxv8i16( undef, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %3 = call @llvm.fshr.nxv4i32( undef, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %4 = call @llvm.fshr.nxv2i64( undef, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'fshr' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %1 = call @llvm.fshr.nxv16i8( undef, undef, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %2 = call @llvm.fshr.nxv8i16( undef, undef, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %3 = call @llvm.fshr.nxv4i32( undef, undef, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %4 = call @llvm.fshr.nxv2i64( undef, undef, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'fshr' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %1 = call @llvm.fshr.nxv16i8( undef, undef, undef) @@ -816,12 +1039,19 @@ define void @fshr() #0 { } define void @fshl() #0 { -; CHECK-LABEL: 'fshl' -; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %1 = call @llvm.fshl.nxv16i8( undef, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %2 = call @llvm.fshl.nxv8i16( undef, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %3 = call @llvm.fshl.nxv4i32( undef, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %4 = call @llvm.fshl.nxv2i64( undef, undef, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'fshl' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %1 = call @llvm.fshl.nxv16i8( undef, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %2 = call @llvm.fshl.nxv8i16( undef, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %3 = call @llvm.fshl.nxv4i32( undef, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %4 = call @llvm.fshl.nxv2i64( undef, undef, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'fshl' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %1 = call @llvm.fshl.nxv16i8( undef, undef, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %2 = call @llvm.fshl.nxv8i16( undef, undef, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %3 = call @llvm.fshl.nxv4i32( undef, undef, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %4 = call @llvm.fshl.nxv2i64( undef, undef, undef) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'fshl' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %1 = call @llvm.fshl.nxv16i8( undef, undef, undef) @@ -838,9 +1068,13 @@ define void @fshl() #0 { } define @masked_gather_nxv4i32( %ld, %masks, %passthru) { -; CHECK-LABEL: 'masked_gather_nxv4i32' -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res = call @llvm.masked.gather.nxv4i32.nxv4p0( %ld, i32 0, %masks, %passthru) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret %res +; CHECK-VSCALE-1-LABEL: 'masked_gather_nxv4i32' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %res = call @llvm.masked.gather.nxv4i32.nxv4p0( %ld, i32 0, %masks, %passthru) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret %res +; +; CHECK-VSCALE-2-LABEL: 'masked_gather_nxv4i32' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res = call @llvm.masked.gather.nxv4i32.nxv4p0( %ld, i32 0, %masks, %passthru) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret %res ; ; TYPE_BASED_ONLY-LABEL: 'masked_gather_nxv4i32' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %res = call @llvm.masked.gather.nxv4i32.nxv4p0( %ld, i32 0, %masks, %passthru) @@ -851,9 +1085,13 @@ define @masked_gather_nxv4i32( %ld, @masked_gather_nxv8i32( %ld, %masks, %passthru) { -; CHECK-LABEL: 'masked_gather_nxv8i32' -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res = call @llvm.masked.gather.nxv8i32.nxv8p0( %ld, i32 0, %masks, %passthru) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret %res +; CHECK-VSCALE-1-LABEL: 'masked_gather_nxv8i32' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %res = call @llvm.masked.gather.nxv8i32.nxv8p0( %ld, i32 0, %masks, %passthru) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret %res +; +; CHECK-VSCALE-2-LABEL: 'masked_gather_nxv8i32' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %res = call @llvm.masked.gather.nxv8i32.nxv8p0( %ld, i32 0, %masks, %passthru) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret %res ; ; TYPE_BASED_ONLY-LABEL: 'masked_gather_nxv8i32' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: %res = call @llvm.masked.gather.nxv8i32.nxv8p0( %ld, i32 0, %masks, %passthru) @@ -864,9 +1102,13 @@ define @masked_gather_nxv8i32( %ld, @masked_gather_v4i32(<4 x ptr> %ld, <4 x i1> %masks, <4 x i32> %passthru) { -; CHECK-LABEL: 'masked_gather_v4i32' -; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %res = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ld, i32 0, <4 x i1> %masks, <4 x i32> %passthru) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <4 x i32> %res +; CHECK-VSCALE-1-LABEL: 'masked_gather_v4i32' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %res = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ld, i32 0, <4 x i1> %masks, <4 x i32> %passthru) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <4 x i32> %res +; +; CHECK-VSCALE-2-LABEL: 'masked_gather_v4i32' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %res = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ld, i32 0, <4 x i1> %masks, <4 x i32> %passthru) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <4 x i32> %res ; ; TYPE_BASED_ONLY-LABEL: 'masked_gather_v4i32' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %res = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ld, i32 0, <4 x i1> %masks, <4 x i32> %passthru) @@ -877,9 +1119,13 @@ define <4 x i32> @masked_gather_v4i32(<4 x ptr> %ld, <4 x i1> %masks, <4 x i32> } define <1 x i128> @masked_gather_v1i128(<1 x ptr> %ld, <1 x i1> %masks, <1 x i128> %passthru) { -; CHECK-LABEL: 'masked_gather_v1i128' -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %res = call <1 x i128> @llvm.masked.gather.v1i128.v1p0(<1 x ptr> %ld, i32 0, <1 x i1> %masks, <1 x i128> %passthru) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <1 x i128> %res +; CHECK-VSCALE-1-LABEL: 'masked_gather_v1i128' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %res = call <1 x i128> @llvm.masked.gather.v1i128.v1p0(<1 x ptr> %ld, i32 0, <1 x i1> %masks, <1 x i128> %passthru) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <1 x i128> %res +; +; CHECK-VSCALE-2-LABEL: 'masked_gather_v1i128' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %res = call <1 x i128> @llvm.masked.gather.v1i128.v1p0(<1 x ptr> %ld, i32 0, <1 x i1> %masks, <1 x i128> %passthru) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret <1 x i128> %res ; ; TYPE_BASED_ONLY-LABEL: 'masked_gather_v1i128' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %res = call <1 x i128> @llvm.masked.gather.v1i128.v1p0(<1 x ptr> %ld, i32 0, <1 x i1> %masks, <1 x i128> %passthru) @@ -890,9 +1136,13 @@ define <1 x i128> @masked_gather_v1i128(<1 x ptr> %ld, <1 x i1> %masks, <1 x i12 } define void @masked_scatter_nxv4i32( %data, %ptrs, %masks) { -; CHECK-LABEL: 'masked_scatter_nxv4i32' -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( %data, %ptrs, i32 0, %masks) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_scatter_nxv4i32' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( %data, %ptrs, i32 0, %masks) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'masked_scatter_nxv4i32' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( %data, %ptrs, i32 0, %masks) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'masked_scatter_nxv4i32' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( %data, %ptrs, i32 0, %masks) @@ -904,9 +1154,13 @@ define void @masked_scatter_nxv4i32( %data, } define void @masked_scatter_nxv8i32( %data, %ptrs, %masks) { -; CHECK-LABEL: 'masked_scatter_nxv8i32' -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( %data, %ptrs, i32 0, %masks) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_scatter_nxv8i32' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( %data, %ptrs, i32 0, %masks) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'masked_scatter_nxv8i32' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( %data, %ptrs, i32 0, %masks) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'masked_scatter_nxv8i32' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( %data, %ptrs, i32 0, %masks) @@ -918,9 +1172,13 @@ define void @masked_scatter_nxv8i32( %data, } define void @masked_scatter_v4i32(<4 x i32> %data, <4 x ptr> %ptrs, <4 x i1> %masks) { -; CHECK-LABEL: 'masked_scatter_v4i32' -; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data, <4 x ptr> %ptrs, i32 0, <4 x i1> %masks) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_scatter_v4i32' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 28 for instruction: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data, <4 x ptr> %ptrs, i32 0, <4 x i1> %masks) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'masked_scatter_v4i32' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 28 for instruction: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data, <4 x ptr> %ptrs, i32 0, <4 x i1> %masks) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'masked_scatter_v4i32' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 28 for instruction: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data, <4 x ptr> %ptrs, i32 0, <4 x i1> %masks) @@ -932,9 +1190,13 @@ define void @masked_scatter_v4i32(<4 x i32> %data, <4 x ptr> %ptrs, <4 x i1> %ma } define void @masked_scatter_v1i128(<1 x i128> %data, <1 x ptr> %ptrs, <1 x i1> %masks) { -; CHECK-LABEL: 'masked_scatter_v1i128' -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.v1i128.v1p0(<1 x i128> %data, <1 x ptr> %ptrs, i32 0, <1 x i1> %masks) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_scatter_v1i128' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.v1i128.v1p0(<1 x i128> %data, <1 x ptr> %ptrs, i32 0, <1 x i1> %masks) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'masked_scatter_v1i128' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.v1i128.v1p0(<1 x i128> %data, <1 x ptr> %ptrs, i32 0, <1 x i1> %masks) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'masked_scatter_v1i128' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.masked.scatter.v1i128.v1p0(<1 x i128> %data, <1 x ptr> %ptrs, i32 0, <1 x i1> %masks) @@ -946,9 +1208,13 @@ define void @masked_scatter_v1i128(<1 x i128> %data, <1 x ptr> %ptrs, <1 x i1> % } define void @histogram_nxv2i64( %buckets, %mask) #3 { -; CHECK-LABEL: 'histogram_nxv2i64' -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64( %buckets, i64 1, %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_nxv2i64' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64( %buckets, i64 1, %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_nxv2i64' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64( %buckets, i64 1, %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_nxv2i64' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64( %buckets, i64 1, %mask) @@ -959,9 +1225,13 @@ define void @histogram_nxv2i64( %buckets, %m } define void @histogram_nxv4i32( %buckets, %mask) #3 { -; CHECK-LABEL: 'histogram_nxv4i32' -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32( %buckets, i32 1, %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_nxv4i32' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32( %buckets, i32 1, %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_nxv4i32' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32( %buckets, i32 1, %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_nxv4i32' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32( %buckets, i32 1, %mask) @@ -972,9 +1242,13 @@ define void @histogram_nxv4i32( %buckets, %m } define void @histogram_nxv8i16( %buckets, %mask) #3 { -; CHECK-LABEL: 'histogram_nxv8i16' -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16( %buckets, i16 1, %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_nxv8i16' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16( %buckets, i16 1, %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_nxv8i16' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16( %buckets, i16 1, %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_nxv8i16' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv8p0.i16( %buckets, i16 1, %mask) @@ -985,9 +1259,13 @@ define void @histogram_nxv8i16( %buckets, %m } define void @histogram_nxv16i8( %buckets, %mask) #3 { -; CHECK-LABEL: 'histogram_nxv16i8' -; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8( %buckets, i8 1, %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_nxv16i8' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8( %buckets, i8 1, %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_nxv16i8' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8( %buckets, i8 1, %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_nxv16i8' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.experimental.vector.histogram.add.nxv16p0.i8( %buckets, i8 1, %mask) @@ -998,9 +1276,13 @@ define void @histogram_nxv16i8( %buckets, } define void @histogram_v2i64(<2 x ptr> %buckets, <2 x i1> %mask) { -; CHECK-LABEL: 'histogram_v2i64' -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_v2i64' +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_v2i64' +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_v2i64' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v2p0.i64(<2 x ptr> %buckets, i64 1, <2 x i1> %mask) @@ -1011,9 +1293,13 @@ define void @histogram_v2i64(<2 x ptr> %buckets, <2 x i1> %mask) { } define void @histogram_v4i32(<4 x ptr> %buckets, <4 x i1> %mask) { -; CHECK-LABEL: 'histogram_v4i32' -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_v4i32' +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_v4i32' +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_v4i32' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v4p0.i32(<4 x ptr> %buckets, i32 1, <4 x i1> %mask) @@ -1024,9 +1310,13 @@ define void @histogram_v4i32(<4 x ptr> %buckets, <4 x i1> %mask) { } define void @histogram_v8i16(<8 x ptr> %buckets, <8 x i1> %mask) { -; CHECK-LABEL: 'histogram_v8i16' -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_v8i16' +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_v8i16' +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_v8i16' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v8p0.i16(<8 x ptr> %buckets, i16 1, <8 x i1> %mask) @@ -1037,9 +1327,13 @@ define void @histogram_v8i16(<8 x ptr> %buckets, <8 x i1> %mask) { } define void @histogram_v16i8(<16 x ptr> %buckets, <16 x i1> %mask) { -; CHECK-LABEL: 'histogram_v16i8' -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_v16i8' +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_v16i8' +; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_v16i8' ; TYPE_BASED_ONLY-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.experimental.vector.histogram.add.v16p0.i8(<16 x ptr> %buckets, i8 1, <16 x i1> %mask) @@ -1050,9 +1344,13 @@ define void @histogram_v16i8(<16 x ptr> %buckets, <16 x i1> %mask) { } define void @histogram_nxv4i64( %buckets, %mask) #3 { -; CHECK-LABEL: 'histogram_nxv4i64' -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i64( %buckets, i64 1, %mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'histogram_nxv4i64' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i64( %buckets, i64 1, %mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; CHECK-VSCALE-2-LABEL: 'histogram_nxv4i64' +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i64( %buckets, i64 1, %mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; TYPE_BASED_ONLY-LABEL: 'histogram_nxv4i64' ; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.experimental.vector.histogram.add.nxv4p0.i64( %buckets, i64 1, %mask) diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-scatter.ll b/llvm/test/Analysis/CostModel/AArch64/sve-scatter.ll index 66e6bd4b307ea4..b9defdf8be2c72 100644 --- a/llvm/test/Analysis/CostModel/AArch64/sve-scatter.ll +++ b/llvm/test/Analysis/CostModel/AArch64/sve-scatter.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py -; RUN: opt -passes="print" 2>&1 -disable-output < %s | FileCheck %s +; RUN: opt -passes="print" 2>&1 -disable-output < %s | FileCheck %s --check-prefix=CHECK-VSCALE-1 ; RUN: opt -passes="print" 2>&1 -disable-output -mcpu=neoverse-v1 < %s | FileCheck %s --check-prefix=CHECK-VSCALE-2 ; RUN: opt -passes="print" 2>&1 -disable-output -mcpu=neoverse-n2 < %s | FileCheck %s --check-prefix=CHECK-VSCALE-1 ; RUN: opt -passes="print" 2>&1 -disable-output -mcpu=cortex-a510 < %s | FileCheck %s --check-prefix=CHECK-VSCALE-1 @@ -9,12 +9,12 @@ target triple="aarch64--linux-gnu" target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" define void @masked_scatters( %nxv4i1mask, %nxv8i1mask, <4 x i1> %v4i1mask, <1 x i1> %v1i1mask, %nxv1i1mask) #0 { -; CHECK-LABEL: 'masked_scatters' -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0( undef, undef, i32 0, %nxv1i1mask) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i1.nxv4p0( undef, undef, i32 0, %nxv4i1mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_scatters' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0( undef, undef, i32 0, %nxv1i1mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i1.nxv4p0( undef, undef, i32 0, %nxv4i1mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; CHECK-VSCALE-2-LABEL: 'masked_scatters' ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) @@ -23,13 +23,6 @@ define void @masked_scatters( %nxv4i1mask, %n ; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i1.nxv4p0( undef, undef, i32 0, %nxv4i1mask) ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; -; CHECK-VSCALE-1-LABEL: 'masked_scatters' -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) -; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0( undef, undef, i32 0, %nxv1i1mask) -; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i1.nxv4p0( undef, undef, i32 0, %nxv4i1mask) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void -; ; CHECK-V2-LABEL: 'masked_scatters' ; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 52 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) ; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 104 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) @@ -45,27 +38,21 @@ define void @masked_scatters( %nxv4i1mask, %n } define void @masked_scatters_tune_generic( %nxv4i1mask, %nxv8i1mask, <4 x i1> %v4i1mask, <1 x i1> %v1i1mask, %nxv1i1mask) #1 { -; CHECK-LABEL: 'masked_scatters_tune_generic' -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0( undef, undef, i32 0, %nxv1i1mask) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_scatters_tune_generic' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0( undef, undef, i32 0, %nxv1i1mask) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; CHECK-VSCALE-2-LABEL: 'masked_scatters_tune_generic' -; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) -; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) +; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) ; CHECK-VSCALE-2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0( undef, undef, i32 0, %nxv1i1mask) ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; -; CHECK-VSCALE-1-LABEL: 'masked_scatters_tune_generic' -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) -; CHECK-VSCALE-1-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0( undef, undef, i32 0, %nxv1i1mask) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void -; ; CHECK-V2-LABEL: 'masked_scatters_tune_generic' -; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) -; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) +; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4i32.nxv4p0( undef, undef, i32 0, %nxv4i1mask) +; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8i32.nxv8p0( undef, undef, i32 0, %nxv8i1mask) ; CHECK-V2-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1i64.nxv1p0( undef, undef, i32 0, %nxv1i1mask) ; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; @@ -76,16 +63,16 @@ define void @masked_scatters_tune_generic( %nxv4i1mask, undef, undef, i32 1, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv2f64.nxv2p0( undef, undef, i32 1, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8f32.nxv8p0( undef, undef, i32 1, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0( undef, undef, i32 1, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0( undef, undef, i32 1, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: call void @llvm.masked.scatter.nxv16i16.nxv16p0( undef, undef, i32 1, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv8i16.nxv8p0( undef, undef, i32 1, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i16.nxv4p0( undef, undef, i32 1, undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; CHECK-VSCALE-1-LABEL: 'masked_scatters_no_vscale_range' +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4f64.nxv4p0( undef, undef, i32 1, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.masked.scatter.nxv2f64.nxv2p0( undef, undef, i32 1, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8f32.nxv8p0( undef, undef, i32 1, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0( undef, undef, i32 1, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0( undef, undef, i32 1, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv16i16.nxv16p0( undef, undef, i32 1, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8i16.nxv8p0( undef, undef, i32 1, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4i16.nxv4p0( undef, undef, i32 1, undef) +; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; ; CHECK-VSCALE-2-LABEL: 'masked_scatters_no_vscale_range' ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4f64.nxv4p0( undef, undef, i32 1, undef) @@ -98,17 +85,6 @@ define void @masked_scatters_no_vscale_range() #2 { ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv4i16.nxv4p0( undef, undef, i32 1, undef) ; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; -; CHECK-VSCALE-1-LABEL: 'masked_scatters_no_vscale_range' -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4f64.nxv4p0( undef, undef, i32 1, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.masked.scatter.nxv2f64.nxv2p0( undef, undef, i32 1, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8f32.nxv8p0( undef, undef, i32 1, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0( undef, undef, i32 1, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 20 for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0( undef, undef, i32 1, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 160 for instruction: call void @llvm.masked.scatter.nxv16i16.nxv16p0( undef, undef, i32 1, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 80 for instruction: call void @llvm.masked.scatter.nxv8i16.nxv8p0( undef, undef, i32 1, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 40 for instruction: call void @llvm.masked.scatter.nxv4i16.nxv4p0( undef, undef, i32 1, undef) -; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void -; ; CHECK-V2-LABEL: 'masked_scatters_no_vscale_range' ; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 52 for instruction: call void @llvm.masked.scatter.nxv4f64.nxv4p0( undef, undef, i32 1, undef) ; CHECK-V2-NEXT: Cost Model: Found an estimated cost of 26 for instruction: call void @llvm.masked.scatter.nxv2f64.nxv2p0( undef, undef, i32 1, undef) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll index c4559dc070e451..1948211858d446 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll @@ -8,42 +8,40 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 8, [[TMP4]] ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 [[N_VEC]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 -; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 8) -; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP8:%.*]] = add [[TMP7]], zeroinitializer -; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP8]], splat (i64 1) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP9]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 8) +; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.stepvector.nxv8i64() +; CHECK-NEXT: [[TMP9:%.*]] = add [[TMP8]], zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = mul [[TMP9]], splat (i64 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP7]] ; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP6]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP12]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[VAL]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP12]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[VAL]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]] -; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw [[VEC_IND]], splat (i64 3) -; CHECK-NEXT: [[TMP15:%.*]] = lshr [[BROADCAST_SPLAT]], [[TMP14]] -; CHECK-NEXT: [[TMP16:%.*]] = trunc [[TMP15]] to +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw nsw [[VEC_IND]], splat (i64 3) +; CHECK-NEXT: [[TMP11:%.*]] = lshr [[BROADCAST_SPLAT]], [[TMP10]] +; CHECK-NEXT: [[TMP14:%.*]] = trunc [[TMP11]] to ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0 -; CHECK-NEXT: call void @llvm.masked.store.nxv4i8.p0( [[TMP16]], ptr [[TMP17]], i32 1, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0( [[TMP14]], ptr [[TMP17]], i32 1, [[ACTIVE_LANE_MASK]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] -; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 8) -; CHECK-NEXT: [[TMP18:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP20:%.*]] = extractelement [[TMP18]], i32 0 -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 8) +; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: @@ -97,42 +95,40 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 ; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP1]], 1 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[WIDE_TRIP_COUNT]], [[TMP4]] ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 [[N_VEC]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 -; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]]) -; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP8:%.*]] = add [[TMP7]], zeroinitializer -; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP8]], splat (i64 1) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP9]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]]) +; CHECK-NEXT: [[TMP8:%.*]] = call @llvm.stepvector.nxv8i64() +; CHECK-NEXT: [[TMP9:%.*]] = add [[TMP8]], zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = mul [[TMP9]], splat (i64 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP7]] ; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP6]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP12]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[VAL]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP12]], i64 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[VAL]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]] -; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw [[VEC_IND]], splat (i64 3) -; CHECK-NEXT: [[TMP15:%.*]] = lshr [[BROADCAST_SPLAT]], [[TMP14]] -; CHECK-NEXT: [[TMP16:%.*]] = trunc [[TMP15]] to +; CHECK-NEXT: [[TMP10:%.*]] = shl nuw nsw [[VEC_IND]], splat (i64 3) +; CHECK-NEXT: [[TMP11:%.*]] = lshr [[BROADCAST_SPLAT]], [[TMP10]] +; CHECK-NEXT: [[TMP14:%.*]] = trunc [[TMP11]] to ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0 -; CHECK-NEXT: call void @llvm.masked.store.nxv4i8.p0( [[TMP16]], ptr [[TMP17]], i32 1, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0( [[TMP14]], ptr [[TMP17]], i32 1, [[ACTIVE_LANE_MASK]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] -; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]]) -; CHECK-NEXT: [[TMP18:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: [[TMP20:%.*]] = extractelement [[TMP18]], i32 0 -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]]) +; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll index dff0bbe59a7065..69560305706364 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll @@ -494,10 +494,7 @@ define i32 @header_mask_and_invariant_compare(ptr %A, ptr %B, ptr %C, ptr %D, pt ; DEFAULT-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]], ptr [[E:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] { ; DEFAULT-NEXT: entry: ; DEFAULT-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 -; DEFAULT-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 -; DEFAULT-NEXT: [[TMP3:%.*]] = call i64 @llvm.umax.i64(i64 64, i64 [[TMP2]]) -; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP3]] +; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 60 ; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; DEFAULT: vector.memcheck: ; DEFAULT-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[E]], i64 4 @@ -536,34 +533,55 @@ define i32 @header_mask_and_invariant_compare(ptr %A, ptr %B, ptr %C, ptr %D, pt ; DEFAULT-NEXT: [[CONFLICT_RDX27:%.*]] = or i1 [[CONFLICT_RDX23]], [[FOUND_CONFLICT26]] ; DEFAULT-NEXT: br i1 [[CONFLICT_RDX27]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; DEFAULT: vector.ph: -; DEFAULT-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 -; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP7]] +; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4 ; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; DEFAULT-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4 -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT32:%.*]] = insertelement poison, ptr [[E]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT33:%.*]] = shufflevector [[BROADCAST_SPLATINSERT32]], poison, zeroinitializer ; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]] ; DEFAULT: vector.body: -; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE37:%.*]] ] ; DEFAULT-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 -; DEFAULT-NEXT: [[TMP11:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META7:![0-9]+]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement poison, i32 [[TMP11]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector [[BROADCAST_SPLATINSERT28]], poison, zeroinitializer -; DEFAULT-NEXT: [[TMP12:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META10:![0-9]+]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP12]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; DEFAULT-NEXT: [[TMP13:%.*]] = or [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]] -; DEFAULT-NEXT: [[TMP14:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META12:![0-9]+]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement poison, i32 [[TMP14]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector [[BROADCAST_SPLATINSERT30]], poison, zeroinitializer -; DEFAULT-NEXT: [[TMP15:%.*]] = icmp ugt [[BROADCAST_SPLAT31]], [[TMP13]] +; DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META7:![0-9]+]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement <4 x i32> poison, i32 [[TMP9]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT28]], <4 x i32> poison, <4 x i32> zeroinitializer +; DEFAULT-NEXT: [[TMP19:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META10:![0-9]+]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP19]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; DEFAULT-NEXT: [[TMP6:%.*]] = or <4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]] +; DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META12:![0-9]+]] +; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0 +; DEFAULT-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT30]], <4 x i32> poison, <4 x i32> zeroinitializer +; DEFAULT-NEXT: [[TMP8:%.*]] = icmp ugt <4 x i32> [[BROADCAST_SPLAT31]], [[TMP6]] ; DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[D]], i64 [[TMP10]] -; DEFAULT-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0( [[TMP13]], [[BROADCAST_SPLAT33]], i32 4, [[TMP15]]), !alias.scope [[META14:![0-9]+]], !noalias [[META16:![0-9]+]] +; DEFAULT-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0 +; DEFAULT-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; DEFAULT: pred.store.if: +; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0 +; DEFAULT-NEXT: store i32 [[TMP11]], ptr [[E]], align 4, !alias.scope [[META14:![0-9]+]], !noalias [[META16:![0-9]+]] +; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE]] +; DEFAULT: pred.store.continue: +; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1 +; DEFAULT-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF32:%.*]], label [[PRED_STORE_CONTINUE33:%.*]] +; DEFAULT: pred.store.if32: +; DEFAULT-NEXT: [[TMP13:%.*]] = extractelement <4 x i32> [[TMP6]], i32 1 +; DEFAULT-NEXT: store i32 [[TMP13]], ptr [[E]], align 4, !alias.scope [[META14]], !noalias [[META16]] +; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE33]] +; DEFAULT: pred.store.continue33: +; DEFAULT-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2 +; DEFAULT-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF34:%.*]], label [[PRED_STORE_CONTINUE35:%.*]] +; DEFAULT: pred.store.if34: +; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP6]], i32 2 +; DEFAULT-NEXT: store i32 [[TMP15]], ptr [[E]], align 4, !alias.scope [[META14]], !noalias [[META16]] +; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE35]] +; DEFAULT: pred.store.continue35: +; DEFAULT-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3 +; DEFAULT-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF36:%.*]], label [[PRED_STORE_CONTINUE37]] +; DEFAULT: pred.store.if36: +; DEFAULT-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[TMP6]], i32 3 +; DEFAULT-NEXT: store i32 [[TMP22]], ptr [[E]], align 4, !alias.scope [[META14]], !noalias [[META16]] +; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE37]] +; DEFAULT: pred.store.continue37: ; DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[TMP16]], i32 0 -; DEFAULT-NEXT: call void @llvm.masked.store.nxv4i32.p0( zeroinitializer, ptr [[TMP17]], i32 4, [[TMP15]]), !alias.scope [[META18:![0-9]+]], !noalias [[META19:![0-9]+]] -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; DEFAULT-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr [[TMP17]], i32 4, <4 x i1> [[TMP8]]), !alias.scope [[META18:![0-9]+]], !noalias [[META19:![0-9]+]] +; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; DEFAULT-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; DEFAULT: middle.block: @@ -634,48 +652,66 @@ define i32 @header_mask_and_invariant_compare(ptr %A, ptr %B, ptr %C, ptr %D, pt ; PRED-NEXT: [[CONFLICT_RDX27:%.*]] = or i1 [[CONFLICT_RDX23]], [[FOUND_CONFLICT26]] ; PRED-NEXT: br i1 [[CONFLICT_RDX27]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; PRED: vector.ph: -; PRED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 -; PRED-NEXT: [[TMP7:%.*]] = sub i64 [[TMP4]], 1 -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP7]] -; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]] +; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], 3 +; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 ; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; PRED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4 -; PRED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4 -; PRED-NEXT: [[TMP12:%.*]] = sub i64 [[TMP0]], [[TMP11]] -; PRED-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP0]], [[TMP11]] +; PRED-NEXT: [[TMP12:%.*]] = sub i64 [[TMP0]], 4 +; PRED-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP0]], 4 ; PRED-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0 -; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[TMP0]]) -; PRED-NEXT: [[BROADCAST_SPLATINSERT32:%.*]] = insertelement poison, ptr [[E]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT33:%.*]] = shufflevector [[BROADCAST_SPLATINSERT32]], poison, zeroinitializer +; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[TMP0]]) ; PRED-NEXT: br label [[VECTOR_BODY:%.*]] ; PRED: vector.body: -; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE37:%.*]] ] +; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_STORE_CONTINUE37]] ] ; PRED-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0 -; PRED-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META4:![0-9]+]] -; PRED-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement poison, i32 [[TMP16]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector [[BROADCAST_SPLATINSERT28]], poison, zeroinitializer -; PRED-NEXT: [[TMP17:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META7:![0-9]+]] -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP17]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; PRED-NEXT: [[TMP18:%.*]] = or [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]] -; PRED-NEXT: [[TMP19:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META9:![0-9]+]] -; PRED-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement poison, i32 [[TMP19]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector [[BROADCAST_SPLATINSERT30]], poison, zeroinitializer -; PRED-NEXT: [[TMP20:%.*]] = icmp ugt [[BROADCAST_SPLAT31]], [[TMP18]] -; PRED-NEXT: [[TMP21:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP20]], zeroinitializer +; PRED-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META4:![0-9]+]] +; PRED-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT28]], <4 x i32> poison, <4 x i32> zeroinitializer +; PRED-NEXT: [[TMP8:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META7:![0-9]+]] +; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP8]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; PRED-NEXT: [[TMP9:%.*]] = or <4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]] +; PRED-NEXT: [[TMP10:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META9:![0-9]+]] +; PRED-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement <4 x i32> poison, i32 [[TMP10]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT30]], <4 x i32> poison, <4 x i32> zeroinitializer +; PRED-NEXT: [[TMP11:%.*]] = icmp ugt <4 x i32> [[BROADCAST_SPLAT31]], [[TMP9]] +; PRED-NEXT: [[TMP25:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i1> [[TMP11]], <4 x i1> zeroinitializer ; PRED-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[D]], i64 [[TMP15]] -; PRED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0( [[TMP18]], [[BROADCAST_SPLAT33]], i32 4, [[TMP21]]), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] +; PRED-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP25]], i32 0 +; PRED-NEXT: br i1 [[TMP26]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; PRED: pred.store.if: +; PRED-NEXT: [[TMP27:%.*]] = extractelement <4 x i32> [[TMP9]], i32 0 +; PRED-NEXT: store i32 [[TMP27]], ptr [[E]], align 4, !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] +; PRED-NEXT: br label [[PRED_STORE_CONTINUE]] +; PRED: pred.store.continue: +; PRED-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP25]], i32 1 +; PRED-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF32:%.*]], label [[PRED_STORE_CONTINUE33:%.*]] +; PRED: pred.store.if32: +; PRED-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[TMP9]], i32 1 +; PRED-NEXT: store i32 [[TMP17]], ptr [[E]], align 4, !alias.scope [[META11]], !noalias [[META13]] +; PRED-NEXT: br label [[PRED_STORE_CONTINUE33]] +; PRED: pred.store.continue33: +; PRED-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP25]], i32 2 +; PRED-NEXT: br i1 [[TMP18]], label [[PRED_STORE_IF34:%.*]], label [[PRED_STORE_CONTINUE35:%.*]] +; PRED: pred.store.if34: +; PRED-NEXT: [[TMP19:%.*]] = extractelement <4 x i32> [[TMP9]], i32 2 +; PRED-NEXT: store i32 [[TMP19]], ptr [[E]], align 4, !alias.scope [[META11]], !noalias [[META13]] +; PRED-NEXT: br label [[PRED_STORE_CONTINUE35]] +; PRED: pred.store.continue35: +; PRED-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP25]], i32 3 +; PRED-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF36:%.*]], label [[PRED_STORE_CONTINUE37]] +; PRED: pred.store.if36: +; PRED-NEXT: [[TMP21:%.*]] = extractelement <4 x i32> [[TMP9]], i32 3 +; PRED-NEXT: store i32 [[TMP21]], ptr [[E]], align 4, !alias.scope [[META11]], !noalias [[META13]] +; PRED-NEXT: br label [[PRED_STORE_CONTINUE37]] +; PRED: pred.store.continue37: ; PRED-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[TMP22]], i32 0 -; PRED-NEXT: call void @llvm.masked.store.nxv4i32.p0( zeroinitializer, ptr [[TMP23]], i32 4, [[TMP21]]), !alias.scope [[META15:![0-9]+]], !noalias [[META16:![0-9]+]] -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]] -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP14]]) -; PRED-NEXT: [[TMP24:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; PRED-NEXT: [[TMP25:%.*]] = extractelement [[TMP24]], i32 0 -; PRED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; PRED-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr [[TMP23]], i32 4, <4 x i1> [[TMP25]]), !alias.scope [[META15:![0-9]+]], !noalias [[META16:![0-9]+]] +; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 [[TMP14]]) +; PRED-NEXT: [[TMP28:%.*]] = xor <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; PRED-NEXT: [[TMP24:%.*]] = extractelement <4 x i1> [[TMP28]], i32 0 +; PRED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; PRED: middle.block: ; PRED-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] ; PRED: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll index 4faed427b4e175..e4c23999919456 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll @@ -8,12 +8,12 @@ target triple = "arm64-apple-macosx14.0.0" define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; DEFAULT-LABEL: define void @iv_casts( ; DEFAULT-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i32 [[X:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { -; DEFAULT-NEXT: iter.check: +; DEFAULT-NEXT: entry: ; DEFAULT-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64 ; DEFAULT-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64 ; DEFAULT-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 ; DEFAULT-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 +; DEFAULT-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 16 ; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] ; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; DEFAULT: vector.memcheck: @@ -22,12 +22,7 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 ; DEFAULT-NEXT: [[TMP6:%.*]] = sub i64 [[DST1]], [[SRC2]] ; DEFAULT-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] -; DEFAULT-NEXT: br i1 [[DIFF_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] -; DEFAULT: vector.main.loop.iter.check: -; DEFAULT-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16 -; DEFAULT-NEXT: [[MIN_ITERS_CHECK3:%.*]] = icmp ult i64 [[TMP0]], [[TMP8]] -; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK3]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; DEFAULT-NEXT: br i1 [[DIFF_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_PH:%.*]] ; DEFAULT: vector.ph: ; DEFAULT-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() ; DEFAULT-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 16 @@ -72,49 +67,10 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DEFAULT: middle.block: -; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] -; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] -; DEFAULT: vec.epilog.iter.check: -; DEFAULT-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; DEFAULT-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP46:%.*]] = mul i64 [[TMP45]], 4 -; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP46]] -; DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] -; DEFAULT: vec.epilog.ph: -; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; DEFAULT-NEXT: [[TMP47:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP48:%.*]] = mul i64 [[TMP47]], 4 -; DEFAULT-NEXT: [[N_MOD_VF5:%.*]] = urem i64 [[TMP0]], [[TMP48]] -; DEFAULT-NEXT: [[N_VEC6:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF5]] -; DEFAULT-NEXT: [[TMP49:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP50:%.*]] = mul i64 [[TMP49]], 4 -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement poison, i32 [[X]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector [[BROADCAST_SPLATINSERT8]], poison, zeroinitializer -; DEFAULT-NEXT: [[TMP51:%.*]] = trunc [[BROADCAST_SPLAT9]] to -; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] -; DEFAULT: vec.epilog.vector.body: -; DEFAULT-NEXT: [[INDEX10:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; DEFAULT-NEXT: [[TMP52:%.*]] = add i64 [[INDEX10]], 0 -; DEFAULT-NEXT: [[TMP53:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP52]] -; DEFAULT-NEXT: [[TMP54:%.*]] = getelementptr i8, ptr [[TMP53]], i32 0 -; DEFAULT-NEXT: [[WIDE_LOAD11:%.*]] = load , ptr [[TMP54]], align 1 -; DEFAULT-NEXT: [[TMP55:%.*]] = zext [[WIDE_LOAD11]] to -; DEFAULT-NEXT: [[TMP56:%.*]] = mul [[TMP55]], [[TMP51]] -; DEFAULT-NEXT: [[TMP57:%.*]] = zext [[WIDE_LOAD11]] to -; DEFAULT-NEXT: [[TMP58:%.*]] = or [[TMP56]], [[TMP57]] -; DEFAULT-NEXT: [[TMP59:%.*]] = lshr [[TMP58]], trunc ( splat (i32 1) to ) -; DEFAULT-NEXT: [[TMP60:%.*]] = trunc [[TMP59]] to -; DEFAULT-NEXT: [[TMP61:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP52]] -; DEFAULT-NEXT: [[TMP62:%.*]] = getelementptr i8, ptr [[TMP61]], i32 0 -; DEFAULT-NEXT: store [[TMP60]], ptr [[TMP62]], align 1 -; DEFAULT-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX10]], [[TMP50]] -; DEFAULT-NEXT: [[TMP63:%.*]] = icmp eq i64 [[INDEX_NEXT12]], [[N_VEC6]] -; DEFAULT-NEXT: br i1 [[TMP63]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; DEFAULT: vec.epilog.middle.block: -; DEFAULT-NEXT: [[CMP_N7:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC6]] -; DEFAULT-NEXT: br i1 [[CMP_N7]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] -; DEFAULT: vec.epilog.scalar.ph: -; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC6]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; DEFAULT-NEXT: [[CMP_N7:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; DEFAULT-NEXT: br i1 [[CMP_N7]], label [[EXIT:%.*]], label [[VEC_EPILOG_SCALAR_PH]] +; DEFAULT: scalar.ph: +; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] ; DEFAULT-NEXT: br label [[LOOP:%.*]] ; DEFAULT: loop: ; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] @@ -130,7 +86,7 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] ; DEFAULT-NEXT: store i8 [[CONV36_US]], ptr [[GEP_DST]], align 1 ; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] +; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; DEFAULT: exit: ; DEFAULT-NEXT: ret void ; @@ -277,7 +233,7 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 { ; DEFAULT-NEXT: store i32 1, ptr [[TMP21]], align 4 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; DEFAULT-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; DEFAULT-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; DEFAULT: middle.block: ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -293,7 +249,7 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 { ; DEFAULT-NEXT: store i32 1, ptr [[GEP]], align 4 ; DEFAULT-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; DEFAULT: exit: ; DEFAULT-NEXT: ret void ; @@ -445,7 +401,7 @@ define void @trunc_ivs_and_store(i32 %x, ptr %dst, i64 %N) #0 { ; DEFAULT-NEXT: store i32 [[TMP15]], ptr [[TMP24]], align 4 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; DEFAULT-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; DEFAULT-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; DEFAULT: middle.block: ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -464,7 +420,7 @@ define void @trunc_ivs_and_store(i32 %x, ptr %dst, i64 %N) #0 { ; DEFAULT-NEXT: store i32 [[IV_2]], ptr [[GEP]], align 4 ; DEFAULT-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1 ; DEFAULT-NEXT: [[EXITCOND_3_NOT:%.*]] = icmp eq i64 [[IV_1]], [[N]] -; DEFAULT-NEXT: br i1 [[EXITCOND_3_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; DEFAULT-NEXT: br i1 [[EXITCOND_3_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] ; DEFAULT: exit: ; DEFAULT-NEXT: ret void ; @@ -643,7 +599,7 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 { ; DEFAULT-NEXT: store i32 [[TMP14]], ptr [[TMP23]], align 4 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; DEFAULT-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; DEFAULT-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; DEFAULT: middle.block: ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -662,7 +618,7 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 { ; DEFAULT-NEXT: store i32 [[IV_2]], ptr [[GEP]], align 4 ; DEFAULT-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1 ; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_1]], [[N]] -; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] +; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] ; DEFAULT: exit: ; DEFAULT-NEXT: ret void ; @@ -824,7 +780,7 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) { ; DEFAULT-NEXT: store i32 0, ptr [[TMP10]], align 8 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; DEFAULT-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; DEFAULT-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; DEFAULT: middle.block: ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX1]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -840,7 +796,7 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) { ; DEFAULT-NEXT: [[IV_1_NEXT]] = add i32 [[IV_1]], 1 ; DEFAULT-NEXT: [[IV_EXT]] = zext i32 [[IV_1_NEXT]] to i64 ; DEFAULT-NEXT: [[C:%.*]] = icmp ult i64 [[IV_EXT]], [[N]] -; DEFAULT-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP12:![0-9]+]] +; DEFAULT-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP11:![0-9]+]] ; DEFAULT: exit: ; DEFAULT-NEXT: ret void ; @@ -934,16 +890,15 @@ attributes #0 = { "target-features"="+sve" } ; DEFAULT: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; DEFAULT: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; DEFAULT: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; DEFAULT: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} -; DEFAULT: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]} -; DEFAULT: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; DEFAULT: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]} -; DEFAULT: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} -; DEFAULT: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]]} -; DEFAULT: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} -; DEFAULT: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]} -; DEFAULT: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]} -; DEFAULT: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]} +; DEFAULT: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} +; DEFAULT: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; DEFAULT: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]} +; DEFAULT: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} +; DEFAULT: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]} +; DEFAULT: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; DEFAULT: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]} +; DEFAULT: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} +; DEFAULT: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]} ;. ; PRED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; PRED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll index 59b879e0867dcc..b9dc3ec2fb1c9a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll @@ -1,7 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; REQUIRES: asserts -; RUN: opt -S < %s -p loop-vectorize -debug-only=loop-vectorize -mattr=+sve 2>%t | FileCheck %s -; RUN: cat %t | FileCheck %s --check-prefix=DEBUG +; RUN: opt -S < %s -p loop-vectorize -debug-only=loop-vectorize -mattr=+sve 2>%t | FileCheck %s --check-prefixes=CHECK,CHECK-VS1 +; RUN: cat %t | FileCheck %s --check-prefixes=DEBUG,DEBUG-VS1 +; RUN: opt -S < %s -p loop-vectorize -debug-only=loop-vectorize -mcpu=neoverse-v1 -sve-tail-folding=disabled 2>%t | FileCheck %s --check-prefixes=CHECK,CHECK-VS2 +; RUN: cat %t | FileCheck %s --check-prefixes=DEBUG,DEBUG-VS2 target triple = "aarch64-unknown-linux-gnu" @@ -9,8 +11,10 @@ target triple = "aarch64-unknown-linux-gnu" ; DEBUG: LV: Found trip count: 0 ; DEBUG: LV: Found maximum trip count: 19 ; DEBUG: LV: IC is 1 -; DEBUG: LV: VF is vscale x 8 -; DEBUG: Main Loop VF:vscale x 8, Main Loop UF:1, Epilogue Loop VF:vscale x 4, Epilogue Loop UF:1 +; DEBUG-VS1: LV: VF is vscale x 16 +; DEBUG-VS1: Main Loop VF:vscale x 16, Main Loop UF:1, Epilogue Loop VF:vscale x 8, Epilogue Loop UF:1 +; DEBUG-VS2: LV: VF is vscale x 8 +; DEBUG-VS2: Main Loop VF:vscale x 8, Main Loop UF:1, Epilogue Loop VF:vscale x 4, Epilogue Loop UF:1 ; DEBUG-LABEL: LV: Checking a loop in 'trip_count_too_small' ; DEBUG: LV: Found a loop with a very small trip count. This loop is worth vectorizing only if no scalar iteration overheads are incurred. @@ -32,113 +36,221 @@ target triple = "aarch64-unknown-linux-gnu" ; DEBUG: Executing best plan with VF=vscale x 16, UF=1 define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef %val) { -; CHECK-LABEL: define void @low_vf_ic_is_better( -; CHECK-SAME: ptr nocapture noundef [[P:%.*]], i32 [[TC:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TC]], 19 -; CHECK-NEXT: br i1 [[CMP7]], label %[[ITER_CHECK:.*]], label %[[WHILE_END:.*]] -; CHECK: [[ITER_CHECK]]: -; CHECK-NEXT: [[CONV:%.*]] = trunc i16 [[VAL]] to i8 -; CHECK-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4 -; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[TC]] to i64 -; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TC]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; CHECK-NEXT: [[TMP3:%.*]] = sub i64 20, [[TMP2]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP5]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] -; CHECK: [[VECTOR_SCEVCHECK]]: -; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TC]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64 -; CHECK-NEXT: [[TMP8:%.*]] = sub i64 19, [[TMP7]] -; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 -; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP6]], [[TMP9]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP10]], [[TMP6]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 4294967295 -; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]] -; CHECK-NEXT: br i1 [[TMP13]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] -; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP3]], [[TMP15]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 8 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[CONV]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] -; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX]] -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[OFFSET_IDX]], 0 -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP20]] -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP21]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP22]], align 1 -; CHECK-NEXT: [[TMP23:%.*]] = add [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store [[TMP23]], ptr [[TMP22]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] -; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] -; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[IND_END5:%.*]] = add i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] -; CHECK-NEXT: [[TMP32:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP33:%.*]] = mul i64 [[TMP32]], 4 -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP33]] -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] -; CHECK: [[VEC_EPILOG_PH]]: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; CHECK-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 4 -; CHECK-NEXT: [[N_MOD_VF3:%.*]] = urem i64 [[TMP3]], [[TMP35]] -; CHECK-NEXT: [[N_VEC4:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF3]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC4]] -; CHECK-NEXT: [[TMP36:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[TMP36]], 4 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement poison, i8 [[CONV]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector [[BROADCAST_SPLATINSERT8]], poison, zeroinitializer -; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] -; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT11:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = add i64 [[TMP0]], [[INDEX6]] -; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[OFFSET_IDX7]], 0 -; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP38]] -; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[TMP39]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load , ptr [[TMP40]], align 1 -; CHECK-NEXT: [[TMP41:%.*]] = add [[WIDE_LOAD7]], [[BROADCAST_SPLAT9]] -; CHECK-NEXT: store [[TMP41]], ptr [[TMP40]], align 1 -; CHECK-NEXT: [[INDEX_NEXT11]] = add nuw i64 [[INDEX6]], [[TMP37]] -; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT11]], [[N_VEC4]] -; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N12:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC4]] -; CHECK-NEXT: br i1 [[CMP_N12]], label %[[WHILE_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]] -; CHECK: [[VEC_EPILOG_SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[ITER_CHECK]] ] -; CHECK-NEXT: br label %[[WHILE_BODY:.*]] -; CHECK: [[WHILE_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP43]], [[CONV]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[TMP44:%.*]] = and i64 [[INDVARS_IV_NEXT]], 4294967295 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP44]], 19 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; CHECK: [[WHILE_END_LOOPEXIT]]: -; CHECK-NEXT: br label %[[WHILE_END]] -; CHECK: [[WHILE_END]]: -; CHECK-NEXT: ret void +; CHECK-VS1-LABEL: define void @low_vf_ic_is_better( +; CHECK-VS1-SAME: ptr nocapture noundef [[P:%.*]], i32 [[TC:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-VS1-NEXT: [[ENTRY:.*:]] +; CHECK-VS1-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TC]], 19 +; CHECK-VS1-NEXT: br i1 [[CMP7]], label %[[ITER_CHECK:.*]], label %[[WHILE_END:.*]] +; CHECK-VS1: [[ITER_CHECK]]: +; CHECK-VS1-NEXT: [[CONV:%.*]] = trunc i16 [[VAL]] to i8 +; CHECK-VS1-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4 +; CHECK-VS1-NEXT: [[TMP0:%.*]] = zext nneg i32 [[TC]] to i64 +; CHECK-VS1-NEXT: [[TMP1:%.*]] = add i32 [[TC]], 1 +; CHECK-VS1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 +; CHECK-VS1-NEXT: [[TMP3:%.*]] = sub i64 20, [[TMP2]] +; CHECK-VS1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS1-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 +; CHECK-VS1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP5]] +; CHECK-VS1-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; CHECK-VS1: [[VECTOR_SCEVCHECK]]: +; CHECK-VS1-NEXT: [[TMP6:%.*]] = add i32 [[TC]], 1 +; CHECK-VS1-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64 +; CHECK-VS1-NEXT: [[TMP8:%.*]] = sub i64 19, [[TMP7]] +; CHECK-VS1-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 +; CHECK-VS1-NEXT: [[TMP10:%.*]] = add i32 [[TMP6]], [[TMP9]] +; CHECK-VS1-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP10]], [[TMP6]] +; CHECK-VS1-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 4294967295 +; CHECK-VS1-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]] +; CHECK-VS1-NEXT: br i1 [[TMP13]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK-VS1: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; CHECK-VS1-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS1-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 16 +; CHECK-VS1-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP3]], [[TMP15]] +; CHECK-VS1-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-VS1: [[VECTOR_PH]]: +; CHECK-VS1-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS1-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 16 +; CHECK-VS1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]] +; CHECK-VS1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] +; CHECK-VS1-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS1-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 16 +; CHECK-VS1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[CONV]], i64 0 +; CHECK-VS1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-VS1-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK-VS1: [[VECTOR_BODY]]: +; CHECK-VS1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-VS1-NEXT: [[TMP20:%.*]] = add i64 [[TMP0]], [[INDEX]] +; CHECK-VS1-NEXT: [[TMP21:%.*]] = add i64 [[TMP20]], 0 +; CHECK-VS1-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP21]] +; CHECK-VS1-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TMP22]], i32 0 +; CHECK-VS1-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP23]], align 1 +; CHECK-VS1-NEXT: [[TMP24:%.*]] = add [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; CHECK-VS1-NEXT: store [[TMP24]], ptr [[TMP23]], align 1 +; CHECK-VS1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] +; CHECK-VS1-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-VS1-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-VS1: [[MIDDLE_BLOCK]]: +; CHECK-VS1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] +; CHECK-VS1-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK-VS1: [[VEC_EPILOG_ITER_CHECK]]: +; CHECK-VS1-NEXT: [[IND_END4:%.*]] = add i64 [[TMP0]], [[N_VEC]] +; CHECK-VS1-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] +; CHECK-VS1-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS1-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 8 +; CHECK-VS1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP27]] +; CHECK-VS1-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-VS1: [[VEC_EPILOG_PH]]: +; CHECK-VS1-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-VS1-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS1-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 8 +; CHECK-VS1-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[TMP3]], [[TMP29]] +; CHECK-VS1-NEXT: [[N_VEC3:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF2]] +; CHECK-VS1-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC3]] +; CHECK-VS1-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS1-NEXT: [[TMP31:%.*]] = mul i64 [[TMP30]], 8 +; CHECK-VS1-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i8 [[CONV]], i64 0 +; CHECK-VS1-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer +; CHECK-VS1-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK-VS1: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-VS1-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-VS1-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX5]] +; CHECK-VS1-NEXT: [[TMP32:%.*]] = add i64 [[OFFSET_IDX]], 0 +; CHECK-VS1-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP32]] +; CHECK-VS1-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[TMP33]], i32 0 +; CHECK-VS1-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP34]], align 1 +; CHECK-VS1-NEXT: [[TMP35:%.*]] = add [[WIDE_LOAD6]], [[BROADCAST_SPLAT8]] +; CHECK-VS1-NEXT: store [[TMP35]], ptr [[TMP34]], align 1 +; CHECK-VS1-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP31]] +; CHECK-VS1-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC3]] +; CHECK-VS1-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-VS1: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; CHECK-VS1-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC3]] +; CHECK-VS1-NEXT: br i1 [[CMP_N10]], label %[[WHILE_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK-VS1: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-VS1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END4]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[ITER_CHECK]] ] +; CHECK-VS1-NEXT: br label %[[WHILE_BODY:.*]] +; CHECK-VS1: [[WHILE_BODY]]: +; CHECK-VS1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[WHILE_BODY]] ] +; CHECK-VS1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-VS1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[IV]] +; CHECK-VS1-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-VS1-NEXT: [[ADD:%.*]] = add i8 [[TMP37]], [[CONV]] +; CHECK-VS1-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1 +; CHECK-VS1-NEXT: [[TMP38:%.*]] = and i64 [[IV_NEXT]], 4294967295 +; CHECK-VS1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP38]], 19 +; CHECK-VS1-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-VS1: [[WHILE_END_LOOPEXIT]]: +; CHECK-VS1-NEXT: br label %[[WHILE_END]] +; CHECK-VS1: [[WHILE_END]]: +; CHECK-VS1-NEXT: ret void +; +; CHECK-VS2-LABEL: define void @low_vf_ic_is_better( +; CHECK-VS2-SAME: ptr nocapture noundef [[P:%.*]], i32 [[TC:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-VS2-NEXT: [[ENTRY:.*:]] +; CHECK-VS2-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TC]], 19 +; CHECK-VS2-NEXT: br i1 [[CMP7]], label %[[ITER_CHECK:.*]], label %[[WHILE_END:.*]] +; CHECK-VS2: [[ITER_CHECK]]: +; CHECK-VS2-NEXT: [[CONV:%.*]] = trunc i16 [[VAL]] to i8 +; CHECK-VS2-NEXT: [[V:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 4 +; CHECK-VS2-NEXT: [[TMP0:%.*]] = zext nneg i32 [[TC]] to i64 +; CHECK-VS2-NEXT: [[TMP1:%.*]] = add i32 [[TC]], 1 +; CHECK-VS2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 +; CHECK-VS2-NEXT: [[TMP3:%.*]] = sub i64 20, [[TMP2]] +; CHECK-VS2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS2-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; CHECK-VS2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP5]] +; CHECK-VS2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; CHECK-VS2: [[VECTOR_SCEVCHECK]]: +; CHECK-VS2-NEXT: [[TMP6:%.*]] = add i32 [[TC]], 1 +; CHECK-VS2-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64 +; CHECK-VS2-NEXT: [[TMP8:%.*]] = sub i64 19, [[TMP7]] +; CHECK-VS2-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 +; CHECK-VS2-NEXT: [[TMP10:%.*]] = add i32 [[TMP6]], [[TMP9]] +; CHECK-VS2-NEXT: [[TMP11:%.*]] = icmp ult i32 [[TMP10]], [[TMP6]] +; CHECK-VS2-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 4294967295 +; CHECK-VS2-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]] +; CHECK-VS2-NEXT: br i1 [[TMP13]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK-VS2: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; CHECK-VS2-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS2-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 8 +; CHECK-VS2-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP3]], [[TMP15]] +; CHECK-VS2-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-VS2: [[VECTOR_PH]]: +; CHECK-VS2-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS2-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 8 +; CHECK-VS2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP17]] +; CHECK-VS2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] +; CHECK-VS2-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 8 +; CHECK-VS2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[CONV]], i64 0 +; CHECK-VS2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-VS2-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK-VS2: [[VECTOR_BODY]]: +; CHECK-VS2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-VS2-NEXT: [[TMP20:%.*]] = add i64 [[TMP0]], [[INDEX]] +; CHECK-VS2-NEXT: [[TMP21:%.*]] = add i64 [[TMP20]], 0 +; CHECK-VS2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP21]] +; CHECK-VS2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TMP22]], i32 0 +; CHECK-VS2-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP23]], align 1 +; CHECK-VS2-NEXT: [[TMP24:%.*]] = add [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; CHECK-VS2-NEXT: store [[TMP24]], ptr [[TMP23]], align 1 +; CHECK-VS2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] +; CHECK-VS2-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-VS2-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-VS2: [[MIDDLE_BLOCK]]: +; CHECK-VS2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] +; CHECK-VS2-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK-VS2: [[VEC_EPILOG_ITER_CHECK]]: +; CHECK-VS2-NEXT: [[IND_END4:%.*]] = add i64 [[TMP0]], [[N_VEC]] +; CHECK-VS2-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] +; CHECK-VS2-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS2-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 4 +; CHECK-VS2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP27]] +; CHECK-VS2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-VS2: [[VEC_EPILOG_PH]]: +; CHECK-VS2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-VS2-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS2-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 4 +; CHECK-VS2-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[TMP3]], [[TMP29]] +; CHECK-VS2-NEXT: [[N_VEC3:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF2]] +; CHECK-VS2-NEXT: [[IND_END:%.*]] = add i64 [[TMP0]], [[N_VEC3]] +; CHECK-VS2-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VS2-NEXT: [[TMP31:%.*]] = mul i64 [[TMP30]], 4 +; CHECK-VS2-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i8 [[CONV]], i64 0 +; CHECK-VS2-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer +; CHECK-VS2-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK-VS2: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-VS2-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-VS2-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX5]] +; CHECK-VS2-NEXT: [[TMP32:%.*]] = add i64 [[OFFSET_IDX]], 0 +; CHECK-VS2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP32]] +; CHECK-VS2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[TMP33]], i32 0 +; CHECK-VS2-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP34]], align 1 +; CHECK-VS2-NEXT: [[TMP35:%.*]] = add [[WIDE_LOAD6]], [[BROADCAST_SPLAT8]] +; CHECK-VS2-NEXT: store [[TMP35]], ptr [[TMP34]], align 1 +; CHECK-VS2-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX5]], [[TMP31]] +; CHECK-VS2-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC3]] +; CHECK-VS2-NEXT: br i1 [[TMP36]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-VS2: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; CHECK-VS2-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC3]] +; CHECK-VS2-NEXT: br i1 [[CMP_N10]], label %[[WHILE_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK-VS2: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-VS2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END4]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[ITER_CHECK]] ] +; CHECK-VS2-NEXT: br label %[[WHILE_BODY:.*]] +; CHECK-VS2: [[WHILE_BODY]]: +; CHECK-VS2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[WHILE_BODY]] ] +; CHECK-VS2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-VS2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[IV]] +; CHECK-VS2-NEXT: [[TMP37:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-VS2-NEXT: [[ADD:%.*]] = add i8 [[TMP37]], [[CONV]] +; CHECK-VS2-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1 +; CHECK-VS2-NEXT: [[TMP38:%.*]] = and i64 [[IV_NEXT]], 4294967295 +; CHECK-VS2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP38]], 19 +; CHECK-VS2-NEXT: br i1 [[EXITCOND_NOT]], label %[[WHILE_END_LOOPEXIT]], label %[[WHILE_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-VS2: [[WHILE_END_LOOPEXIT]]: +; CHECK-VS2-NEXT: br label %[[WHILE_END]] +; CHECK-VS2: [[WHILE_END]]: +; CHECK-VS2-NEXT: ret void ; entry: %cmp7 = icmp ult i32 %tc, 19 @@ -387,11 +499,19 @@ while.end: !0 = distinct !{!0, !1} !1 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} ;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]} +; CHECK-VS1: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK-VS1: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK-VS1: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK-VS1: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; CHECK-VS1: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]} +; CHECK-VS1: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} +; CHECK-VS1: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]} +;. +; CHECK-VS2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK-VS2: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK-VS2: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK-VS2: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; CHECK-VS2: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]} +; CHECK-VS2: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} +; CHECK-VS2: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll index 307b47c42a4233..0cea16d103678b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 ; RUN: opt -p loop-vectorize -S %s | FileCheck --check-prefixes=DEFAULT %s -; RUN: opt -p loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -S %s | FileCheck --check-prefixes=PRED %s +; RUN: opt -p loop-vectorize -mcpu=neoverse-v1 -S %s | FileCheck --check-prefixes=VSCALEFORTUNING2 %s +; RUN: opt -p loop-vectorize -mcpu=neoverse-v1 -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -S %s | FileCheck --check-prefixes=PRED %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "arm64-apple-macosx14.0.0" @@ -9,104 +10,12 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; DEFAULT-LABEL: define i32 @chained_recurrences( ; DEFAULT-SAME: i32 [[X:%.*]], i64 [[Y:%.*]], ptr [[SRC_1:%.*]], i32 [[Z:%.*]], ptr [[SRC_2:%.*]]) #[[ATTR0:[0-9]+]] { ; DEFAULT-NEXT: entry: -; DEFAULT-NEXT: [[TMP0:%.*]] = add i64 [[Y]], 1 -; DEFAULT-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 8 -; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] -; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; DEFAULT: vector.ph: -; DEFAULT-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 -; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] -; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; DEFAULT-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 -; DEFAULT-NEXT: [[TMP13:%.*]] = add i64 [[Y]], 1 -; DEFAULT-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP13]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement poison, i32 [[X]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT7:%.*]] = shufflevector [[BROADCAST_SPLATINSERT6]], poison, zeroinitializer -; DEFAULT-NEXT: [[TMP25:%.*]] = lshr [[BROADCAST_SPLAT7]], splat (i32 1) -; DEFAULT-NEXT: [[TMP31:%.*]] = shl [[BROADCAST_SPLAT7]], splat (i32 1) -; DEFAULT-NEXT: [[TMP33:%.*]] = or [[TMP25]], [[TMP31]] -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement poison, i32 [[Z]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector [[BROADCAST_SPLATINSERT8]], poison, zeroinitializer -; DEFAULT-NEXT: [[TMP39:%.*]] = or [[BROADCAST_SPLAT9]], [[BROADCAST_SPLAT7]] -; DEFAULT-NEXT: [[TMP14:%.*]] = and [[TMP39]], splat (i32 1) -; DEFAULT-NEXT: [[TMP43:%.*]] = xor [[TMP14]], splat (i32 1) -; DEFAULT-NEXT: [[TMP45:%.*]] = zext [[TMP43]] to -; DEFAULT-NEXT: [[TMP61:%.*]] = extractelement [[TMP45]], i32 0 -; DEFAULT-NEXT: [[TMP62:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP61]] -; DEFAULT-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, ptr [[TMP62]], i64 0 -; DEFAULT-NEXT: [[TMP47:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; DEFAULT-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() -; DEFAULT-NEXT: [[TMP8:%.*]] = mul i32 [[TMP7]], 4 -; DEFAULT-NEXT: [[TMP9:%.*]] = sub i32 [[TMP8]], 1 -; DEFAULT-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i32 0, i32 [[TMP9]] -; DEFAULT-NEXT: [[TMP10:%.*]] = call i32 @llvm.vscale.i32() -; DEFAULT-NEXT: [[TMP11:%.*]] = mul i32 [[TMP10]], 4 -; DEFAULT-NEXT: [[TMP12:%.*]] = sub i32 [[TMP11]], 1 -; DEFAULT-NEXT: [[VECTOR_RECUR_INIT1:%.*]] = insertelement poison, i32 0, i32 [[TMP12]] -; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]] -; DEFAULT: vector.body: -; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; DEFAULT-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT5:%.*]], [[VECTOR_BODY]] ] -; DEFAULT-NEXT: [[VECTOR_RECUR2:%.*]] = phi [ [[VECTOR_RECUR_INIT1]], [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ] -; DEFAULT-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP57:%.*]], [[VECTOR_BODY]] ] -; DEFAULT-NEXT: [[VEC_PHI3:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP58:%.*]], [[VECTOR_BODY]] ] -; DEFAULT-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP15]], align 4 -; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement poison, i32 [[TMP18]], i64 0 -; DEFAULT-NEXT: [[BROADCAST_SPLAT5]] = shufflevector [[BROADCAST_SPLATINSERT4]], poison, zeroinitializer -; DEFAULT-NEXT: [[TMP19:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR]], [[BROADCAST_SPLAT5]], i32 -1) -; DEFAULT-NEXT: [[TMP20]] = call @llvm.vector.splice.nxv4i32( [[BROADCAST_SPLAT5]], [[BROADCAST_SPLAT5]], i32 -1) -; DEFAULT-NEXT: [[TMP21:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR2]], [[TMP19]], i32 -1) -; DEFAULT-NEXT: [[TMP22:%.*]] = call @llvm.vector.splice.nxv4i32( [[TMP19]], [[TMP20]], i32 -1) -; DEFAULT-NEXT: [[TMP23:%.*]] = or [[TMP21]], [[BROADCAST_SPLAT7]] -; DEFAULT-NEXT: [[TMP24:%.*]] = or [[TMP22]], [[BROADCAST_SPLAT7]] -; DEFAULT-NEXT: [[TMP34:%.*]] = shl [[TMP23]], splat (i32 1) -; DEFAULT-NEXT: [[TMP32:%.*]] = shl [[TMP24]], splat (i32 1) -; DEFAULT-NEXT: [[TMP29:%.*]] = or [[TMP34]], splat (i32 2) -; DEFAULT-NEXT: [[TMP30:%.*]] = or [[TMP32]], splat (i32 2) -; DEFAULT-NEXT: [[TMP35:%.*]] = or [[TMP33]], [[TMP29]] -; DEFAULT-NEXT: [[TMP36:%.*]] = or [[TMP33]], [[TMP30]] -; DEFAULT-NEXT: [[TMP37:%.*]] = or [[TMP35]], [[BROADCAST_SPLAT7]] -; DEFAULT-NEXT: [[TMP38:%.*]] = or [[TMP36]], [[BROADCAST_SPLAT7]] -; DEFAULT-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP47]], i32 4, splat (i1 true), poison) -; DEFAULT-NEXT: [[WIDE_MASKED_GATHER10:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP47]], i32 4, splat (i1 true), poison) -; DEFAULT-NEXT: [[TMP49:%.*]] = lshr [[TMP37]], splat (i32 1) -; DEFAULT-NEXT: [[TMP50:%.*]] = lshr [[TMP38]], splat (i32 1) -; DEFAULT-NEXT: [[TMP51:%.*]] = zext [[TMP49]] to -; DEFAULT-NEXT: [[TMP52:%.*]] = zext [[TMP50]] to -; DEFAULT-NEXT: [[TMP53:%.*]] = getelementptr i32, ptr [[SRC_2]], [[TMP51]] -; DEFAULT-NEXT: [[TMP54:%.*]] = getelementptr i32, ptr [[SRC_2]], [[TMP52]] -; DEFAULT-NEXT: [[WIDE_MASKED_GATHER11:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP53]], i32 4, splat (i1 true), poison) -; DEFAULT-NEXT: [[WIDE_MASKED_GATHER12:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP54]], i32 4, splat (i1 true), poison) -; DEFAULT-NEXT: [[TMP55:%.*]] = or [[WIDE_MASKED_GATHER]], [[VEC_PHI]] -; DEFAULT-NEXT: [[TMP56:%.*]] = or [[WIDE_MASKED_GATHER10]], [[VEC_PHI3]] -; DEFAULT-NEXT: [[TMP57]] = or [[TMP55]], [[WIDE_MASKED_GATHER11]] -; DEFAULT-NEXT: [[TMP58]] = or [[TMP56]], [[WIDE_MASKED_GATHER12]] -; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] -; DEFAULT-NEXT: [[TMP59:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; DEFAULT-NEXT: br i1 [[TMP59]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; DEFAULT: middle.block: -; DEFAULT-NEXT: [[BIN_RDX:%.*]] = or [[TMP58]], [[TMP57]] -; DEFAULT-NEXT: [[TMP60:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[BIN_RDX]]) -; DEFAULT-NEXT: [[TMP64:%.*]] = call i32 @llvm.vscale.i32() -; DEFAULT-NEXT: [[TMP65:%.*]] = mul i32 [[TMP64]], 4 -; DEFAULT-NEXT: [[TMP66:%.*]] = sub i32 [[TMP65]], 1 -; DEFAULT-NEXT: [[VECTOR_RECUR_EXTRACT13:%.*]] = extractelement [[TMP20]], i32 [[TMP66]] -; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] -; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] -; DEFAULT: scalar.ph: -; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; DEFAULT-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[TMP18]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; DEFAULT-NEXT: [[SCALAR_RECUR_INIT14:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT13]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; DEFAULT-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP60]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] ; DEFAULT-NEXT: br label [[LOOP:%.*]] ; DEFAULT: loop: -; DEFAULT-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP68:%.*]], [[LOOP]] ] -; DEFAULT-NEXT: [[SCALAR_RECUR15:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT14]], [[SCALAR_PH]] ], [ [[SCALAR_RECUR]], [[LOOP]] ] -; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; DEFAULT-NEXT: [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_2:%.*]], [[LOOP]] ] +; DEFAULT-NEXT: [[TMP0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP68:%.*]], [[LOOP]] ] +; DEFAULT-NEXT: [[SCALAR_RECUR15:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP0]], [[LOOP]] ] +; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; DEFAULT-NEXT: [[SUM_RED:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[RED_2:%.*]], [[LOOP]] ] ; DEFAULT-NEXT: [[TMP67:%.*]] = add i64 [[Y]], 1 ; DEFAULT-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP67]] ; DEFAULT-NEXT: [[TMP68]] = load i32, ptr [[GEP_1]], align 4 @@ -132,109 +41,240 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; DEFAULT-NEXT: [[RED_1:%.*]] = or i32 [[TMP74]], [[SUM_RED]] ; DEFAULT-NEXT: [[RED_2]] = or i32 [[RED_1]], [[TMP75]] ; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[Y]] -; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[LOOP]] ; DEFAULT: exit: -; DEFAULT-NEXT: [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], [[LOOP]] ], [ [[TMP60]], [[MIDDLE_BLOCK]] ] +; DEFAULT-NEXT: [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], [[LOOP]] ] ; DEFAULT-NEXT: ret i32 [[RED_2_LCSSA]] ; +; VSCALEFORTUNING2-LABEL: define i32 @chained_recurrences( +; VSCALEFORTUNING2-SAME: i32 [[X:%.*]], i64 [[Y:%.*]], ptr [[SRC_1:%.*]], i32 [[Z:%.*]], ptr [[SRC_2:%.*]]) #[[ATTR0:[0-9]+]] { +; VSCALEFORTUNING2-NEXT: entry: +; VSCALEFORTUNING2-NEXT: [[TMP0:%.*]] = add i64 [[Y]], 1 +; VSCALEFORTUNING2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; VSCALEFORTUNING2-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 8 +; VSCALEFORTUNING2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; VSCALEFORTUNING2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; VSCALEFORTUNING2: vector.ph: +; VSCALEFORTUNING2-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; VSCALEFORTUNING2-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 +; VSCALEFORTUNING2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; VSCALEFORTUNING2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; VSCALEFORTUNING2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; VSCALEFORTUNING2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 +; VSCALEFORTUNING2-NEXT: [[TMP7:%.*]] = add i64 [[Y]], 1 +; VSCALEFORTUNING2-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP7]] +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[X]], i64 0 +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; VSCALEFORTUNING2-NEXT: [[TMP9:%.*]] = lshr [[BROADCAST_SPLAT]], splat (i32 1) +; VSCALEFORTUNING2-NEXT: [[TMP10:%.*]] = shl [[BROADCAST_SPLAT]], splat (i32 1) +; VSCALEFORTUNING2-NEXT: [[TMP11:%.*]] = or [[TMP9]], [[TMP10]] +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[Z]], i64 0 +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; VSCALEFORTUNING2-NEXT: [[TMP12:%.*]] = or [[BROADCAST_SPLAT2]], [[BROADCAST_SPLAT]] +; VSCALEFORTUNING2-NEXT: [[TMP13:%.*]] = and [[TMP12]], splat (i32 1) +; VSCALEFORTUNING2-NEXT: [[TMP14:%.*]] = xor [[TMP13]], splat (i32 1) +; VSCALEFORTUNING2-NEXT: [[TMP15:%.*]] = zext [[TMP14]] to +; VSCALEFORTUNING2-NEXT: [[TMP16:%.*]] = extractelement [[TMP15]], i32 0 +; VSCALEFORTUNING2-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP16]] +; VSCALEFORTUNING2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, ptr [[TMP17]], i64 0 +; VSCALEFORTUNING2-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; VSCALEFORTUNING2-NEXT: [[TMP18:%.*]] = call i32 @llvm.vscale.i32() +; VSCALEFORTUNING2-NEXT: [[TMP19:%.*]] = mul i32 [[TMP18]], 4 +; VSCALEFORTUNING2-NEXT: [[TMP20:%.*]] = sub i32 [[TMP19]], 1 +; VSCALEFORTUNING2-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i32 0, i32 [[TMP20]] +; VSCALEFORTUNING2-NEXT: [[TMP21:%.*]] = call i32 @llvm.vscale.i32() +; VSCALEFORTUNING2-NEXT: [[TMP22:%.*]] = mul i32 [[TMP21]], 4 +; VSCALEFORTUNING2-NEXT: [[TMP23:%.*]] = sub i32 [[TMP22]], 1 +; VSCALEFORTUNING2-NEXT: [[VECTOR_RECUR_INIT3:%.*]] = insertelement poison, i32 0, i32 [[TMP23]] +; VSCALEFORTUNING2-NEXT: br label [[VECTOR_BODY:%.*]] +; VSCALEFORTUNING2: vector.body: +; VSCALEFORTUNING2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; VSCALEFORTUNING2-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT7:%.*]], [[VECTOR_BODY]] ] +; VSCALEFORTUNING2-NEXT: [[VECTOR_RECUR4:%.*]] = phi [ [[VECTOR_RECUR_INIT3]], [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] +; VSCALEFORTUNING2-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP47:%.*]], [[VECTOR_BODY]] ] +; VSCALEFORTUNING2-NEXT: [[VEC_PHI5:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP48:%.*]], [[VECTOR_BODY]] ] +; VSCALEFORTUNING2-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP8]], align 4 +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT6:%.*]] = insertelement poison, i32 [[TMP24]], i64 0 +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT7]] = shufflevector [[BROADCAST_SPLATINSERT6]], poison, zeroinitializer +; VSCALEFORTUNING2-NEXT: [[TMP25:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR]], [[BROADCAST_SPLAT7]], i32 -1) +; VSCALEFORTUNING2-NEXT: [[TMP26]] = call @llvm.vector.splice.nxv4i32( [[BROADCAST_SPLAT7]], [[BROADCAST_SPLAT7]], i32 -1) +; VSCALEFORTUNING2-NEXT: [[TMP27:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR4]], [[TMP25]], i32 -1) +; VSCALEFORTUNING2-NEXT: [[TMP28:%.*]] = call @llvm.vector.splice.nxv4i32( [[TMP25]], [[TMP26]], i32 -1) +; VSCALEFORTUNING2-NEXT: [[TMP29:%.*]] = or [[TMP27]], [[BROADCAST_SPLAT]] +; VSCALEFORTUNING2-NEXT: [[TMP30:%.*]] = or [[TMP28]], [[BROADCAST_SPLAT]] +; VSCALEFORTUNING2-NEXT: [[TMP31:%.*]] = shl [[TMP29]], splat (i32 1) +; VSCALEFORTUNING2-NEXT: [[TMP32:%.*]] = shl [[TMP30]], splat (i32 1) +; VSCALEFORTUNING2-NEXT: [[TMP33:%.*]] = or [[TMP31]], splat (i32 2) +; VSCALEFORTUNING2-NEXT: [[TMP34:%.*]] = or [[TMP32]], splat (i32 2) +; VSCALEFORTUNING2-NEXT: [[TMP35:%.*]] = or [[TMP11]], [[TMP33]] +; VSCALEFORTUNING2-NEXT: [[TMP36:%.*]] = or [[TMP11]], [[TMP34]] +; VSCALEFORTUNING2-NEXT: [[TMP37:%.*]] = or [[TMP35]], [[BROADCAST_SPLAT]] +; VSCALEFORTUNING2-NEXT: [[TMP38:%.*]] = or [[TMP36]], [[BROADCAST_SPLAT]] +; VSCALEFORTUNING2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[DOTSPLAT]], i32 4, splat (i1 true), poison) +; VSCALEFORTUNING2-NEXT: [[WIDE_MASKED_GATHER8:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[DOTSPLAT]], i32 4, splat (i1 true), poison) +; VSCALEFORTUNING2-NEXT: [[TMP39:%.*]] = lshr [[TMP37]], splat (i32 1) +; VSCALEFORTUNING2-NEXT: [[TMP40:%.*]] = lshr [[TMP38]], splat (i32 1) +; VSCALEFORTUNING2-NEXT: [[TMP41:%.*]] = zext [[TMP39]] to +; VSCALEFORTUNING2-NEXT: [[TMP42:%.*]] = zext [[TMP40]] to +; VSCALEFORTUNING2-NEXT: [[TMP43:%.*]] = getelementptr i32, ptr [[SRC_2]], [[TMP41]] +; VSCALEFORTUNING2-NEXT: [[TMP44:%.*]] = getelementptr i32, ptr [[SRC_2]], [[TMP42]] +; VSCALEFORTUNING2-NEXT: [[WIDE_MASKED_GATHER9:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP43]], i32 4, splat (i1 true), poison) +; VSCALEFORTUNING2-NEXT: [[WIDE_MASKED_GATHER10:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP44]], i32 4, splat (i1 true), poison) +; VSCALEFORTUNING2-NEXT: [[TMP45:%.*]] = or [[WIDE_MASKED_GATHER]], [[VEC_PHI]] +; VSCALEFORTUNING2-NEXT: [[TMP46:%.*]] = or [[WIDE_MASKED_GATHER8]], [[VEC_PHI5]] +; VSCALEFORTUNING2-NEXT: [[TMP47]] = or [[TMP45]], [[WIDE_MASKED_GATHER9]] +; VSCALEFORTUNING2-NEXT: [[TMP48]] = or [[TMP46]], [[WIDE_MASKED_GATHER10]] +; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; VSCALEFORTUNING2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VSCALEFORTUNING2-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VSCALEFORTUNING2: middle.block: +; VSCALEFORTUNING2-NEXT: [[BIN_RDX:%.*]] = or [[TMP48]], [[TMP47]] +; VSCALEFORTUNING2-NEXT: [[TMP50:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[BIN_RDX]]) +; VSCALEFORTUNING2-NEXT: [[TMP51:%.*]] = call i32 @llvm.vscale.i32() +; VSCALEFORTUNING2-NEXT: [[TMP52:%.*]] = mul i32 [[TMP51]], 4 +; VSCALEFORTUNING2-NEXT: [[TMP53:%.*]] = sub i32 [[TMP52]], 1 +; VSCALEFORTUNING2-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement [[TMP26]], i32 [[TMP53]] +; VSCALEFORTUNING2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; VSCALEFORTUNING2-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; VSCALEFORTUNING2: scalar.ph: +; VSCALEFORTUNING2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; VSCALEFORTUNING2-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[TMP24]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; VSCALEFORTUNING2-NEXT: [[SCALAR_RECUR_INIT11:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; VSCALEFORTUNING2-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP50]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; VSCALEFORTUNING2-NEXT: br label [[LOOP:%.*]] +; VSCALEFORTUNING2: loop: +; VSCALEFORTUNING2-NEXT: [[TMP54:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP57:%.*]], [[LOOP]] ] +; VSCALEFORTUNING2-NEXT: [[TMP55:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT11]], [[SCALAR_PH]] ], [ [[TMP54]], [[LOOP]] ] +; VSCALEFORTUNING2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; VSCALEFORTUNING2-NEXT: [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_2:%.*]], [[LOOP]] ] +; VSCALEFORTUNING2-NEXT: [[TMP56:%.*]] = add i64 [[Y]], 1 +; VSCALEFORTUNING2-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP56]] +; VSCALEFORTUNING2-NEXT: [[TMP57]] = load i32, ptr [[GEP_1]], align 4 +; VSCALEFORTUNING2-NEXT: [[OR3:%.*]] = or i32 [[TMP55]], [[X]] +; VSCALEFORTUNING2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; VSCALEFORTUNING2-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 1 +; VSCALEFORTUNING2-NEXT: [[TMP58:%.*]] = shl i32 [[OR3]], 1 +; VSCALEFORTUNING2-NEXT: [[TMP59:%.*]] = or i32 [[TMP58]], 2 +; VSCALEFORTUNING2-NEXT: [[SHL19:%.*]] = shl i32 [[X]], 1 +; VSCALEFORTUNING2-NEXT: [[TMP60:%.*]] = or i32 [[SHR]], [[SHL19]] +; VSCALEFORTUNING2-NEXT: [[TMP61:%.*]] = or i32 [[TMP60]], [[TMP59]] +; VSCALEFORTUNING2-NEXT: [[TMP62:%.*]] = or i32 [[TMP61]], [[X]] +; VSCALEFORTUNING2-NEXT: [[OR20:%.*]] = or i32 [[Z]], [[X]] +; VSCALEFORTUNING2-NEXT: [[NOT:%.*]] = and i32 [[OR20]], 1 +; VSCALEFORTUNING2-NEXT: [[AND:%.*]] = xor i32 [[NOT]], 1 +; VSCALEFORTUNING2-NEXT: [[IDX_EXT_1:%.*]] = zext i32 [[AND]] to i64 +; VSCALEFORTUNING2-NEXT: [[GEP_2:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[IDX_EXT_1]] +; VSCALEFORTUNING2-NEXT: [[TMP63:%.*]] = load i32, ptr [[GEP_2]], align 4 +; VSCALEFORTUNING2-NEXT: [[SHR24:%.*]] = lshr i32 [[TMP62]], 1 +; VSCALEFORTUNING2-NEXT: [[IDX_EXT_2:%.*]] = zext i32 [[SHR24]] to i64 +; VSCALEFORTUNING2-NEXT: [[GEP_3:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[IDX_EXT_2]] +; VSCALEFORTUNING2-NEXT: [[TMP64:%.*]] = load i32, ptr [[GEP_3]], align 4 +; VSCALEFORTUNING2-NEXT: [[RED_1:%.*]] = or i32 [[TMP63]], [[SUM_RED]] +; VSCALEFORTUNING2-NEXT: [[RED_2]] = or i32 [[RED_1]], [[TMP64]] +; VSCALEFORTUNING2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[Y]] +; VSCALEFORTUNING2-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; VSCALEFORTUNING2: exit: +; VSCALEFORTUNING2-NEXT: [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], [[LOOP]] ], [ [[TMP50]], [[MIDDLE_BLOCK]] ] +; VSCALEFORTUNING2-NEXT: ret i32 [[RED_2_LCSSA]] +; ; PRED-LABEL: define i32 @chained_recurrences( ; PRED-SAME: i32 [[X:%.*]], i64 [[Y:%.*]], ptr [[SRC_1:%.*]], i32 [[Z:%.*]], ptr [[SRC_2:%.*]]) #[[ATTR0:[0-9]+]] { ; PRED-NEXT: entry: ; PRED-NEXT: [[TMP0:%.*]] = add i64 [[Y]], 1 -; PRED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; PRED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; PRED: vector.ph: ; PRED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 -; PRED-NEXT: [[TMP5:%.*]] = sub i64 [[TMP2]], 1 -; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP5]] +; PRED-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 1 +; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP3]] ; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]] ; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; PRED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; PRED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 ; PRED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; PRED-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 -; PRED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; PRED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4 -; PRED-NEXT: [[TMP10:%.*]] = sub i64 [[TMP0]], [[TMP9]] -; PRED-NEXT: [[TMP11:%.*]] = icmp ugt i64 [[TMP0]], [[TMP9]] -; PRED-NEXT: [[TMP12:%.*]] = select i1 [[TMP11]], i64 [[TMP10]], i64 0 +; PRED-NEXT: [[TMP8:%.*]] = sub i64 [[TMP0]], [[TMP7]] +; PRED-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP0]], [[TMP7]] +; PRED-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i64 [[TMP8]], i64 0 ; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[TMP0]]) -; PRED-NEXT: [[TMP19:%.*]] = add i64 [[Y]], 1 -; PRED-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP19]] -; PRED-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, i32 [[X]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer -; PRED-NEXT: [[TMP25:%.*]] = lshr [[BROADCAST_SPLAT4]], splat (i32 1) -; PRED-NEXT: [[TMP28:%.*]] = shl [[BROADCAST_SPLAT4]], splat (i32 1) -; PRED-NEXT: [[TMP29:%.*]] = or [[TMP25]], [[TMP28]] -; PRED-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i32 [[Z]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer -; PRED-NEXT: [[TMP32:%.*]] = or [[BROADCAST_SPLAT6]], [[BROADCAST_SPLAT4]] -; PRED-NEXT: [[TMP26:%.*]] = and [[TMP32]], splat (i32 1) -; PRED-NEXT: [[TMP34:%.*]] = xor [[TMP26]], splat (i32 1) -; PRED-NEXT: [[TMP35:%.*]] = zext [[TMP34]] to -; PRED-NEXT: [[TMP46:%.*]] = extractelement [[TMP35]], i32 0 -; PRED-NEXT: [[TMP47:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP46]] -; PRED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, ptr [[TMP47]], i64 0 -; PRED-NEXT: [[TMP36:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; PRED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vscale.i32() -; PRED-NEXT: [[TMP14:%.*]] = mul i32 [[TMP13]], 4 -; PRED-NEXT: [[TMP15:%.*]] = sub i32 [[TMP14]], 1 -; PRED-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i32 0, i32 [[TMP15]] -; PRED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vscale.i32() -; PRED-NEXT: [[TMP17:%.*]] = mul i32 [[TMP16]], 4 -; PRED-NEXT: [[TMP18:%.*]] = sub i32 [[TMP17]], 1 -; PRED-NEXT: [[VECTOR_RECUR_INIT1:%.*]] = insertelement poison, i32 0, i32 [[TMP18]] -; PRED-NEXT: br label [[VECTOR_BODY:%.*]] +; PRED-NEXT: [[TMP11:%.*]] = add i64 [[Y]], 1 +; PRED-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP11]] +; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[X]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; PRED-NEXT: [[TMP13:%.*]] = lshr [[BROADCAST_SPLAT]], splat (i32 1) +; PRED-NEXT: [[TMP14:%.*]] = shl [[BROADCAST_SPLAT]], splat (i32 1) +; PRED-NEXT: [[TMP15:%.*]] = or [[TMP13]], [[TMP14]] +; PRED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[Z]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; PRED-NEXT: [[TMP16:%.*]] = or [[BROADCAST_SPLAT2]], [[BROADCAST_SPLAT]] +; PRED-NEXT: [[TMP17:%.*]] = and [[TMP16]], splat (i32 1) +; PRED-NEXT: [[TMP18:%.*]] = xor [[TMP17]], splat (i32 1) +; PRED-NEXT: [[TMP19:%.*]] = zext [[TMP18]] to +; PRED-NEXT: [[TMP20:%.*]] = extractelement [[TMP19]], i32 0 +; PRED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[TMP20]] +; PRED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, ptr [[TMP21]], i64 0 +; PRED-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; PRED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vscale.i32() +; PRED-NEXT: [[TMP23:%.*]] = mul i32 [[TMP22]], 4 +; PRED-NEXT: [[TMP24:%.*]] = sub i32 [[TMP23]], 1 +; PRED-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement poison, i32 0, i32 [[TMP24]] +; PRED-NEXT: [[TMP25:%.*]] = call i32 @llvm.vscale.i32() +; PRED-NEXT: [[TMP26:%.*]] = mul i32 [[TMP25]], 4 +; PRED-NEXT: [[TMP27:%.*]] = sub i32 [[TMP26]], 1 +; PRED-NEXT: [[VECTOR_RECUR_INIT3:%.*]] = insertelement poison, i32 0, i32 [[TMP27]] +; PRED-NEXT: br label [[LOOP:%.*]] ; PRED: vector.body: -; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; PRED-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT:%.*]], [[VECTOR_BODY]] ] -; PRED-NEXT: [[VECTOR_RECUR2:%.*]] = phi [ [[VECTOR_RECUR_INIT1]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] -; PRED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP42:%.*]], [[VECTOR_BODY]] ] -; PRED-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4 -; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP21]], i64 0 -; PRED-NEXT: [[BROADCAST_SPLAT]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; PRED-NEXT: [[TMP22]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR]], [[BROADCAST_SPLAT]], i32 -1) -; PRED-NEXT: [[TMP23:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR2]], [[TMP22]], i32 -1) -; PRED-NEXT: [[TMP24:%.*]] = or [[TMP23]], [[BROADCAST_SPLAT4]] -; PRED-NEXT: [[TMP33:%.*]] = shl [[TMP24]], splat (i32 1) -; PRED-NEXT: [[TMP27:%.*]] = or [[TMP33]], splat (i32 2) -; PRED-NEXT: [[TMP30:%.*]] = or [[TMP29]], [[TMP27]] -; PRED-NEXT: [[TMP31:%.*]] = or [[TMP30]], [[BROADCAST_SPLAT4]] -; PRED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP36]], i32 4, [[ACTIVE_LANE_MASK]], poison) -; PRED-NEXT: [[TMP37:%.*]] = lshr [[TMP31]], splat (i32 1) -; PRED-NEXT: [[TMP38:%.*]] = zext [[TMP37]] to -; PRED-NEXT: [[TMP39:%.*]] = getelementptr i32, ptr [[SRC_2]], [[TMP38]] -; PRED-NEXT: [[WIDE_MASKED_GATHER7:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP39]], i32 4, [[ACTIVE_LANE_MASK]], poison) -; PRED-NEXT: [[TMP40:%.*]] = or [[WIDE_MASKED_GATHER]], [[VEC_PHI]] -; PRED-NEXT: [[TMP41:%.*]] = or [[TMP40]], [[WIDE_MASKED_GATHER7]] -; PRED-NEXT: [[TMP42]] = select [[ACTIVE_LANE_MASK]], [[TMP41]], [[VEC_PHI]] -; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]] -; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP12]]) -; PRED-NEXT: [[TMP43:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; PRED-NEXT: [[TMP44:%.*]] = extractelement [[TMP43]], i32 0 -; PRED-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[LOOP]] ] +; PRED-NEXT: [[VECTOR_RECUR:%.*]] = phi [ [[VECTOR_RECUR_INIT]], [[ENTRY]] ], [ [[BROADCAST_SPLAT6:%.*]], [[LOOP]] ] +; PRED-NEXT: [[VECTOR_RECUR4:%.*]] = phi [ [[VECTOR_RECUR_INIT3]], [[ENTRY]] ], [ [[TMP29:%.*]], [[LOOP]] ] +; PRED-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[ENTRY]] ], [ [[TMP41:%.*]], [[LOOP]] ] +; PRED-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP12]], align 4 +; PRED-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i32 [[TMP28]], i64 0 +; PRED-NEXT: [[BROADCAST_SPLAT6]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer +; PRED-NEXT: [[TMP29]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR]], [[BROADCAST_SPLAT6]], i32 -1) +; PRED-NEXT: [[TMP30:%.*]] = call @llvm.vector.splice.nxv4i32( [[VECTOR_RECUR4]], [[TMP29]], i32 -1) +; PRED-NEXT: [[TMP31:%.*]] = or [[TMP30]], [[BROADCAST_SPLAT]] +; PRED-NEXT: [[TMP32:%.*]] = shl [[TMP31]], splat (i32 1) +; PRED-NEXT: [[TMP33:%.*]] = or [[TMP32]], splat (i32 2) +; PRED-NEXT: [[TMP34:%.*]] = or [[TMP15]], [[TMP33]] +; PRED-NEXT: [[TMP35:%.*]] = or [[TMP34]], [[BROADCAST_SPLAT]] +; PRED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[DOTSPLAT]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; PRED-NEXT: [[TMP36:%.*]] = lshr [[TMP35]], splat (i32 1) +; PRED-NEXT: [[TMP37:%.*]] = zext [[TMP36]] to +; PRED-NEXT: [[TMP38:%.*]] = getelementptr i32, ptr [[SRC_2]], [[TMP37]] +; PRED-NEXT: [[WIDE_MASKED_GATHER7:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP38]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; PRED-NEXT: [[TMP39:%.*]] = or [[WIDE_MASKED_GATHER]], [[VEC_PHI]] +; PRED-NEXT: [[TMP40:%.*]] = or [[TMP39]], [[WIDE_MASKED_GATHER7]] +; PRED-NEXT: [[TMP41]] = select [[ACTIVE_LANE_MASK]], [[TMP40]], [[VEC_PHI]] +; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[TMP5]] +; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[IV]], i64 [[TMP10]]) +; PRED-NEXT: [[TMP42:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; PRED-NEXT: [[TMP43:%.*]] = extractelement [[TMP42]], i32 0 +; PRED-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] ; PRED: middle.block: -; PRED-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP42]]) -; PRED-NEXT: [[TMP49:%.*]] = call i32 @llvm.vscale.i32() -; PRED-NEXT: [[TMP50:%.*]] = mul i32 [[TMP49]], 4 -; PRED-NEXT: [[TMP51:%.*]] = sub i32 [[TMP50]], 1 -; PRED-NEXT: [[VECTOR_RECUR_EXTRACT8:%.*]] = extractelement [[TMP22]], i32 [[TMP51]] +; PRED-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP41]]) +; PRED-NEXT: [[TMP45:%.*]] = call i32 @llvm.vscale.i32() +; PRED-NEXT: [[TMP46:%.*]] = mul i32 [[TMP45]], 4 +; PRED-NEXT: [[TMP47:%.*]] = sub i32 [[TMP46]], 1 +; PRED-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement [[TMP29]], i32 [[TMP47]] ; PRED-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] ; PRED: scalar.ph: -; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; PRED-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; PRED-NEXT: [[SCALAR_RECUR_INIT9:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT8]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; PRED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP45]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] -; PRED-NEXT: br label [[LOOP:%.*]] +; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; PRED-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[TMP28]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1]] ] +; PRED-NEXT: [[SCALAR_RECUR_INIT8:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1]] ] +; PRED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP44]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1]] ] +; PRED-NEXT: br label [[LOOP1:%.*]] ; PRED: loop: -; PRED-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP53:%.*]], [[LOOP]] ] -; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT9]], [[SCALAR_PH]] ], [ [[SCALAR_RECUR]], [[LOOP]] ] -; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_2:%.*]], [[LOOP]] ] +; PRED-NEXT: [[TMP48:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP53:%.*]], [[LOOP1]] ] +; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT8]], [[SCALAR_PH]] ], [ [[TMP48]], [[LOOP1]] ] +; PRED-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[LOOP1]] ] +; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_2:%.*]], [[LOOP1]] ] ; PRED-NEXT: [[TMP52:%.*]] = add i64 [[Y]], 1 ; PRED-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP52]] ; PRED-NEXT: [[TMP53]] = load i32, ptr [[GEP_1]], align 4 ; PRED-NEXT: [[OR3:%.*]] = or i32 [[SCALAR_RECUR10]], [[X]] -; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; PRED-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1 ; PRED-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 1 ; PRED-NEXT: [[TMP54:%.*]] = shl i32 [[OR3]], 1 ; PRED-NEXT: [[TMP55:%.*]] = or i32 [[TMP54]], 2 @@ -254,10 +294,10 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED-NEXT: [[TMP60:%.*]] = load i32, ptr [[GEP_3]], align 4 ; PRED-NEXT: [[RED_1:%.*]] = or i32 [[TMP59]], [[SUM_RED]] ; PRED-NEXT: [[RED_2]] = or i32 [[RED_1]], [[TMP60]] -; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[Y]] -; PRED-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], [[Y]] +; PRED-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP3:![0-9]+]] ; PRED: exit: -; PRED-NEXT: [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], [[LOOP]] ], [ [[TMP45]], [[MIDDLE_BLOCK]] ] +; PRED-NEXT: [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], [[LOOP1]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ] ; PRED-NEXT: ret i32 [[RED_2_LCSSA]] ; entry: @@ -336,7 +376,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[TMP22]] = or [[TMP20]], [[VEC_PHI1]] ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; DEFAULT-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; DEFAULT-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; DEFAULT-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DEFAULT: middle.block: ; DEFAULT-NEXT: [[BIN_RDX:%.*]] = or [[TMP22]], [[TMP21]] ; DEFAULT-NEXT: [[TMP24:%.*]] = call i16 @llvm.vector.reduce.or.nxv4i16( [[BIN_RDX]]) @@ -355,11 +395,71 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[RED_NEXT]] = or i16 [[DIV]], [[RED]] ; DEFAULT-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; DEFAULT: exit: ; DEFAULT-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i16 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP24]], [[MIDDLE_BLOCK]] ] ; DEFAULT-NEXT: ret i16 [[RED_NEXT_LCSSA]] ; +; VSCALEFORTUNING2-LABEL: define i16 @reduce_udiv( +; VSCALEFORTUNING2-SAME: ptr [[SRC:%.*]], i16 [[X:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { +; VSCALEFORTUNING2-NEXT: entry: +; VSCALEFORTUNING2-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 +; VSCALEFORTUNING2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; VSCALEFORTUNING2-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 8 +; VSCALEFORTUNING2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; VSCALEFORTUNING2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; VSCALEFORTUNING2: vector.ph: +; VSCALEFORTUNING2-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; VSCALEFORTUNING2-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 +; VSCALEFORTUNING2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; VSCALEFORTUNING2-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; VSCALEFORTUNING2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; VSCALEFORTUNING2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i16 [[X]], i64 0 +; VSCALEFORTUNING2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; VSCALEFORTUNING2-NEXT: br label [[VECTOR_BODY:%.*]] +; VSCALEFORTUNING2: vector.body: +; VSCALEFORTUNING2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; VSCALEFORTUNING2-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; VSCALEFORTUNING2-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ] +; VSCALEFORTUNING2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0 +; VSCALEFORTUNING2-NEXT: [[TMP8:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP7]] +; VSCALEFORTUNING2-NEXT: [[TMP9:%.*]] = getelementptr i16, ptr [[TMP8]], i32 0 +; VSCALEFORTUNING2-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; VSCALEFORTUNING2-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4 +; VSCALEFORTUNING2-NEXT: [[TMP12:%.*]] = getelementptr i16, ptr [[TMP8]], i64 [[TMP11]] +; VSCALEFORTUNING2-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP9]], align 2 +; VSCALEFORTUNING2-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP12]], align 2 +; VSCALEFORTUNING2-NEXT: [[TMP13:%.*]] = udiv [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; VSCALEFORTUNING2-NEXT: [[TMP14:%.*]] = udiv [[WIDE_LOAD2]], [[BROADCAST_SPLAT]] +; VSCALEFORTUNING2-NEXT: [[TMP15]] = or [[TMP13]], [[VEC_PHI]] +; VSCALEFORTUNING2-NEXT: [[TMP16]] = or [[TMP14]], [[VEC_PHI1]] +; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; VSCALEFORTUNING2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VSCALEFORTUNING2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VSCALEFORTUNING2: middle.block: +; VSCALEFORTUNING2-NEXT: [[BIN_RDX:%.*]] = or [[TMP16]], [[TMP15]] +; VSCALEFORTUNING2-NEXT: [[TMP18:%.*]] = call i16 @llvm.vector.reduce.or.nxv4i16( [[BIN_RDX]]) +; VSCALEFORTUNING2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; VSCALEFORTUNING2-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; VSCALEFORTUNING2: scalar.ph: +; VSCALEFORTUNING2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; VSCALEFORTUNING2-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ [[TMP18]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; VSCALEFORTUNING2-NEXT: br label [[LOOP:%.*]] +; VSCALEFORTUNING2: loop: +; VSCALEFORTUNING2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; VSCALEFORTUNING2-NEXT: [[RED:%.*]] = phi i16 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] +; VSCALEFORTUNING2-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] +; VSCALEFORTUNING2-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 +; VSCALEFORTUNING2-NEXT: [[DIV:%.*]] = udiv i16 [[L]], [[X]] +; VSCALEFORTUNING2-NEXT: [[RED_NEXT]] = or i16 [[DIV]], [[RED]] +; VSCALEFORTUNING2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; VSCALEFORTUNING2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] +; VSCALEFORTUNING2-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; VSCALEFORTUNING2: exit: +; VSCALEFORTUNING2-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i16 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] +; VSCALEFORTUNING2-NEXT: ret i16 [[RED_NEXT_LCSSA]] +; ; PRED-LABEL: define i16 @reduce_udiv( ; PRED-SAME: ptr [[SRC:%.*]], i16 [[X:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; PRED-NEXT: entry: @@ -444,8 +544,13 @@ attributes #0 = { "target-features"="+sve" } ; DEFAULT: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; DEFAULT: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} ; DEFAULT: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} -; DEFAULT: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} -; DEFAULT: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} +;. +; VSCALEFORTUNING2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; VSCALEFORTUNING2: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; VSCALEFORTUNING2: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; VSCALEFORTUNING2: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +; VSCALEFORTUNING2: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; VSCALEFORTUNING2: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} ;. ; PRED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; PRED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll index 225108fe89de0c..1565d1ce982e3d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll @@ -1,42 +1,39 @@ ; REQUIRES: asserts ; RUN: opt -mtriple=aarch64 -mattr=+sve \ ; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ -; RUN: | FileCheck %s --check-prefixes=GENERIC,VF-VSCALE16 +; RUN: | FileCheck %s --check-prefixes=VSCALEFORTUNING1 ; RUN: opt -mtriple=aarch64 -mattr=+sve -mcpu=generic \ ; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ -; RUN: | FileCheck %s --check-prefixes=GENERIC,VF-VSCALE16 +; RUN: | FileCheck %s --check-prefixes=VSCALEFORTUNING1 ; RUN: opt -mtriple=aarch64 -mcpu=neoverse-v1 \ ; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ -; RUN: | FileCheck %s --check-prefixes=NEOVERSE-V1,VF-VSCALE16 +; RUN: | FileCheck %s --check-prefixes=VSCALEFORTUNING2 ; RUN: opt -mtriple=aarch64 -mcpu=neoverse-n2 \ ; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ -; RUN: | FileCheck %s --check-prefixes=NEOVERSE-N2,VF-VSCALE16 +; RUN: | FileCheck %s --check-prefixes=VSCALEFORTUNING1 ; RUN: opt -mtriple=aarch64 -mcpu=neoverse-v2 \ ; RUN: -force-target-instruction-cost=1 -passes=loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ -; RUN: | FileCheck %s --check-prefixes=NEOVERSE-V2,VF-16 +; RUN: | FileCheck %s --check-prefixes=NEOVERSEV2 -; GENERIC: Cost for VF vscale x 2: 11 (Estimated cost per lane: 2. -; GENERIC: Cost for VF vscale x 4: 11 (Estimated cost per lane: 1. -; GENERIC: LV: Selecting VF: vscale x 16 +; VSCALEFORTUNING1: Cost for VF vscale x 2: 11 (Estimated cost per lane: 5. +; VSCALEFORTUNING1: Cost for VF vscale x 4: 11 (Estimated cost per lane: 2. +; VSCALEFORTUNING1: LV: Selecting VF: vscale x 16 -; NEOVERSE-V1: Cost for VF vscale x 2: 11 (Estimated cost per lane: 2. -; NEOVERSE-V1: Cost for VF vscale x 4: 11 (Estimated cost per lane: 1. -; NEOVERSE-V1: LV: Selecting VF: vscale x 16 +; VSCALEFORTUNING2: Cost for VF vscale x 2: 11 (Estimated cost per lane: 2. +; VSCALEFORTUNING2: Cost for VF vscale x 4: 11 (Estimated cost per lane: 1. +; VSCALEFORTUNING2: LV: Selecting VF: vscale x 16 -; NEOVERSE-N2: Cost for VF vscale x 2: 11 (Estimated cost per lane: 5. -; NEOVERSE-N2: Cost for VF vscale x 4: 11 (Estimated cost per lane: 2. -; NEOVERSE-N2: LV: Selecting VF: vscale x 16 +; NEOVERSEV2: Cost for VF vscale x 2: 11 (Estimated cost per lane: 5. +; NEOVERSEV2: Cost for VF vscale x 4: 11 (Estimated cost per lane: 2. +; NEOVERSEV2: LV: Selecting VF: 16 -; NEOVERSE-V2: Cost for VF vscale x 2: 11 (Estimated cost per lane: 5. -; NEOVERSE-V2: Cost for VF vscale x 4: 11 (Estimated cost per lane: 2. -; NEOVERSE-V2: LV: Selecting VF: 16 - -; VF-16: <16 x i8> -; VF-VSCALE16: +; VSCALEFORTUNING1: +; VSCALEFORTUNING2: +; NEOVERSEV2: <16 x i8> define void @test0(ptr %a, ptr %b, ptr %c) #0 { entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vf-hint.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vf-hint.ll index 47b159b1553b80..7cb56bb10809ae 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vf-hint.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vf-hint.ll @@ -187,9 +187,9 @@ exit: ; CHECK-DBG: LV: User VF=vscale x 4 is unsafe. Ignoring scalable UserVF. ; CHECK-DBG: remark: :0:0: User-specified vectorization factor vscale x 4 is unsafe. Ignoring the hint to let the compiler pick a more suitable value. ; CHECK-DBG: Found feasible scalable VF = vscale x 2 -; CHECK-DBG: LV: Selecting VF: vscale x 2. +; CHECK-DBG: LV: Selecting VF: 4. ; CHECK-LABEL: @test4 -; CHECK: +; CHECK-NOT: define void @test4(ptr %a, ptr %b) #0 { entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll index 6f5b6f48e1fc63..b9a4afe8af4516 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll @@ -15,23 +15,27 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; DEFAULT: vector.main.loop.iter.check: ; DEFAULT-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 16 +; DEFAULT-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 32 ; DEFAULT-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 101, [[TMP3]] ; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; DEFAULT: vector.ph: ; DEFAULT-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 16 +; DEFAULT-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 32 ; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 101, [[TMP5]] ; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 101, [[N_MOD_VF]] ; DEFAULT-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 16 +; DEFAULT-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 32 ; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]] ; DEFAULT: vector.body: ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; DEFAULT-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 0 ; DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP8]] ; DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP9]], i32 0 +; DEFAULT-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() +; DEFAULT-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 16 +; DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP9]], i64 [[TMP23]] ; DEFAULT-NEXT: store zeroinitializer, ptr [[TMP10]], align 1 +; DEFAULT-NEXT: store zeroinitializer, ptr [[TMP24]], align 1 ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; DEFAULT-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-strict-fadd-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-strict-fadd-cost.ll index b84e8de678140f..0efdf077dca669 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-strict-fadd-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-strict-fadd-cost.ll @@ -1,21 +1,25 @@ ; REQUIRES: asserts ; RUN: opt < %s -passes=loop-vectorize -debug -disable-output -force-ordered-reductions=true -hints-allow-reordering=false \ -; RUN: -prefer-predicate-over-epilogue=scalar-epilogue -force-vector-interleave=1 -S 2>&1 | FileCheck %s +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VSCALE1 ; RUN: opt < %s -passes=loop-vectorize -debug -disable-output -force-ordered-reductions=true -hints-allow-reordering=false \ ; RUN: -prefer-predicate-over-epilogue=scalar-epilogue -force-vector-interleave=1 \ -; RUN: -mcpu=neoverse-n2 -S 2>&1 | FileCheck %s --check-prefix=CHECK-CPU-NEOVERSE-N2 +; RUN: -mcpu=neoverse-v1 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VSCALE2 +; RUN: opt < %s -passes=loop-vectorize -debug -disable-output -force-ordered-reductions=true -hints-allow-reordering=false \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue -force-vector-interleave=1 \ +; RUN: -mcpu=neoverse-n2 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VSCALE1 target triple="aarch64-unknown-linux-gnu" -; CHECK-LABEL: LV: Checking a loop in 'fadd_strict32' -; CHECK: Cost of 4 for VF vscale x 2: -; CHECK: in-loop reduction %add = fadd float %0, %sum.07 -; CHECK: Cost of 8 for VF vscale x 4: -; CHECK: in-loop reduction %add = fadd float %0, %sum.07 -; CHECK-CPU-NEOVERSE-N2: Cost of 2 for VF vscale x 2: -; CHECK-CPU-NEOVERSE-N2: in-loop reduction %add = fadd float %0, %sum.07 -; CHECK-CPU-NEOVERSE-N2: Cost of 4 for VF vscale x 4: -; CHECK-CPU-NEOVERSE-N2: in-loop reduction %add = fadd float %0, %sum.07 +; CHECK-VSCALE2-LABEL: LV: Checking a loop in 'fadd_strict32' +; CHECK-VSCALE2: Cost of 4 for VF vscale x 2: +; CHECK-VSCALE2: in-loop reduction %add = fadd float %0, %sum.07 +; CHECK-VSCALE2: Cost of 8 for VF vscale x 4: +; CHECK-VSCALE2: in-loop reduction %add = fadd float %0, %sum.07 +; CHECK-VSCALE1-LABEL: LV: Checking a loop in 'fadd_strict32' +; CHECK-VSCALE1: Cost of 2 for VF vscale x 2: +; CHECK-VSCALE1: in-loop reduction %add = fadd float %0, %sum.07 +; CHECK-VSCALE1: Cost of 4 for VF vscale x 4: +; CHECK-VSCALE1: in-loop reduction %add = fadd float %0, %sum.07 define float @fadd_strict32(ptr noalias nocapture readonly %a, i64 %n) #0 { entry: @@ -36,11 +40,12 @@ for.end: } -; CHECK-LABEL: LV: Checking a loop in 'fadd_strict64' -; CHECK: Cost of 4 for VF vscale x 2: -; CHECK: in-loop reduction %add = fadd double %0, %sum.07 -; CHECK-CPU-NEOVERSE-N2: Cost of 2 for VF vscale x 2: -; CHECK-CPU-NEOVERSE-N2: in-loop reduction %add = fadd double %0, %sum.07 +; CHECK-VSCALE2-LABEL: LV: Checking a loop in 'fadd_strict64' +; CHECK-VSCALE2: Cost of 4 for VF vscale x 2: +; CHECK-VSCALE2: in-loop reduction %add = fadd double %0, %sum.07 +; CHECK-VSCALE1-LABEL: LV: Checking a loop in 'fadd_strict64' +; CHECK-VSCALE1: Cost of 2 for VF vscale x 2: +; CHECK-VSCALE1: in-loop reduction %add = fadd double %0, %sum.07 define double @fadd_strict64(ptr noalias nocapture readonly %a, i64 %n) #0 { entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll index 68c8e4dd93fd78..bd2e5dcb3dba48 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-zext-costs.ll @@ -23,29 +23,23 @@ define void @zext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 16 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 16 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 8 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP9]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = zext [[WIDE_LOAD]] to -; CHECK-NEXT: [[TMP11:%.*]] = add [[TMP10]], trunc ( splat (i32 2) to ) -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] -; CHECK-NEXT: store [[TMP11]], ptr [[TMP12]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i16> +; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i16> [[TMP4]], splat (i16 2) +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] +; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP6]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -55,8 +49,8 @@ define void @zext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP14]] to i32 +; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP8]] to i32 ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV]], 2 ; CHECK-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDVARS_IV]] @@ -105,29 +99,23 @@ define void @sext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LEN]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 16 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 16 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 8 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP9]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = sext [[WIDE_LOAD]] to -; CHECK-NEXT: [[TMP11:%.*]] = add [[TMP10]], trunc ( splat (i32 2) to ) -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] -; CHECK-NEXT: store [[TMP11]], ptr [[TMP12]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i16> +; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i16> [[TMP4]], splat (i16 2) +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDEX]] +; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP6]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -137,8 +125,8 @@ define void @sext_i8_i16(ptr noalias nocapture readonly %p, ptr noalias nocaptur ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP14]] to i32 +; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP8]] to i32 ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV]], 2 ; CHECK-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i16, ptr [[Q]], i64 [[INDVARS_IV]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll index b2695cbf6e9682..4ad9987b090699 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/veclib-function-calls.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --scrub-attributes --filter "call.*(cos|sin|tan|cbrt|erf|exp[^e]|gamma|log|sqrt|copysign|dim|min|mod|hypot|nextafter|pow|fma)" --version 2 ; RUN: opt -mattr=+neon -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -S < %s | FileCheck %s -check-prefix=SLEEF-NEON -; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s -check-prefix=SLEEF-SVE -; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -S < %s | FileCheck %s -check-prefixes=SLEEF-SVE-NOPRED +; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -mcpu=neoverse-v1 -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s -check-prefix=SLEEF-SVE +; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -mcpu=neoverse-v1 -force-vector-interleave=1 -S < %s | FileCheck %s -check-prefixes=SLEEF-SVE-NOPRED ; RUN: opt -mattr=+neon -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -S < %s | FileCheck %s -check-prefix=ARMPL-NEON -; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s -check-prefix=ARMPL-SVE -; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -S < %s | FileCheck %s -check-prefixes=ARMPL-SVE-NOPRED +; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -mcpu=neoverse-v1 -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s -check-prefix=ARMPL-SVE +; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -mcpu=neoverse-v1 -force-vector-interleave=1 -S < %s | FileCheck %s -check-prefixes=ARMPL-SVE-NOPRED