15
15
#include " AArch64CallLowering.h"
16
16
#include " AArch64ISelLowering.h"
17
17
#include " AArch64MachineFunctionInfo.h"
18
+ #include " AArch64RegisterInfo.h"
18
19
#include " AArch64Subtarget.h"
19
20
#include " llvm/ADT/ArrayRef.h"
20
21
#include " llvm/ADT/SmallVector.h"
@@ -546,13 +547,98 @@ bool AArch64CallLowering::fallBackToDAGISel(const MachineFunction &MF) const {
546
547
return false ;
547
548
}
548
549
550
+ void AArch64CallLowering::saveVarArgRegisters (
551
+ MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
552
+ CCState &CCInfo) const {
553
+ auto GPRArgRegs = AArch64::getGPRArgRegs ();
554
+ auto FPRArgRegs = AArch64::getFPRArgRegs ();
555
+
556
+ MachineFunction &MF = MIRBuilder.getMF ();
557
+ MachineRegisterInfo &MRI = MF.getRegInfo ();
558
+ MachineFrameInfo &MFI = MF.getFrameInfo ();
559
+ AArch64FunctionInfo *FuncInfo = MF.getInfo <AArch64FunctionInfo>();
560
+ auto &Subtarget = MF.getSubtarget <AArch64Subtarget>();
561
+ bool IsWin64CC =
562
+ Subtarget.isCallingConvWin64 (CCInfo.getCallingConv ());
563
+ const LLT p0 = LLT::pointer (0 , 64 );
564
+ const LLT s64 = LLT::scalar (64 );
565
+
566
+ unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated (GPRArgRegs);
567
+ unsigned NumVariadicGPRArgRegs = GPRArgRegs.size () - FirstVariadicGPR + 1 ;
568
+
569
+ unsigned GPRSaveSize = 8 * (GPRArgRegs.size () - FirstVariadicGPR);
570
+ int GPRIdx = 0 ;
571
+ if (GPRSaveSize != 0 ) {
572
+ if (IsWin64CC) {
573
+ GPRIdx = MFI.CreateFixedObject (GPRSaveSize,
574
+ -static_cast <int >(GPRSaveSize), false );
575
+ } else
576
+ GPRIdx = MFI.CreateStackObject (GPRSaveSize, Align (8 ), false );
577
+
578
+ auto FIN = MIRBuilder.buildFrameIndex (p0, GPRIdx);
579
+ auto Offset =
580
+ MIRBuilder.buildConstant (MRI.createGenericVirtualRegister (s64), 8 );
581
+
582
+ for (unsigned i = FirstVariadicGPR; i < GPRArgRegs.size (); ++i) {
583
+ Register Val = MRI.createGenericVirtualRegister (s64);
584
+ Handler.assignValueToReg (
585
+ Val, GPRArgRegs[i],
586
+ CCValAssign::getReg (i + MF.getFunction ().getNumOperands (), MVT::i64,
587
+ GPRArgRegs[i], MVT::i64, CCValAssign::Full));
588
+ auto MPO = IsWin64CC ? MachinePointerInfo::getFixedStack (
589
+ MF, GPRIdx, (i - FirstVariadicGPR) * 8 )
590
+ : MachinePointerInfo::getStack (MF, i * 8 );
591
+ MIRBuilder.buildStore (Val, FIN, MPO, inferAlignFromPtrInfo (MF, MPO));
592
+
593
+ FIN = MIRBuilder.buildPtrAdd (MRI.createGenericVirtualRegister (p0),
594
+ FIN.getReg (0 ), Offset);
595
+ }
596
+ }
597
+ FuncInfo->setVarArgsGPRIndex (GPRIdx);
598
+ FuncInfo->setVarArgsGPRSize (GPRSaveSize);
599
+
600
+ if (Subtarget.hasFPARMv8 () && !IsWin64CC) {
601
+ unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated (FPRArgRegs);
602
+
603
+ unsigned FPRSaveSize = 16 * (FPRArgRegs.size () - FirstVariadicFPR);
604
+ int FPRIdx = 0 ;
605
+ if (FPRSaveSize != 0 ) {
606
+ FPRIdx = MFI.CreateStackObject (FPRSaveSize, Align (16 ), false );
607
+
608
+ auto FIN = MIRBuilder.buildFrameIndex (p0, FPRIdx);
609
+ auto Offset =
610
+ MIRBuilder.buildConstant (MRI.createGenericVirtualRegister (s64), 16 );
611
+
612
+ for (unsigned i = FirstVariadicFPR; i < FPRArgRegs.size (); ++i) {
613
+ Register Val = MRI.createGenericVirtualRegister (LLT::scalar (128 ));
614
+ Handler.assignValueToReg (
615
+ Val, FPRArgRegs[i],
616
+ CCValAssign::getReg (
617
+ i + MF.getFunction ().getNumOperands () + NumVariadicGPRArgRegs,
618
+ MVT::f128, FPRArgRegs[i], MVT::f128, CCValAssign::Full));
619
+
620
+ auto MPO = MachinePointerInfo::getStack (MF, i * 16 );
621
+ MIRBuilder.buildStore (Val, FIN, MPO, inferAlignFromPtrInfo (MF, MPO));
622
+
623
+ FIN = MIRBuilder.buildPtrAdd (MRI.createGenericVirtualRegister (p0),
624
+ FIN.getReg (0 ), Offset);
625
+ }
626
+ }
627
+ FuncInfo->setVarArgsFPRIndex (FPRIdx);
628
+ FuncInfo->setVarArgsFPRSize (FPRSaveSize);
629
+ }
630
+ }
631
+
549
632
bool AArch64CallLowering::lowerFormalArguments (
550
633
MachineIRBuilder &MIRBuilder, const Function &F,
551
634
ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const {
552
635
MachineFunction &MF = MIRBuilder.getMF ();
553
636
MachineBasicBlock &MBB = MIRBuilder.getMBB ();
554
637
MachineRegisterInfo &MRI = MF.getRegInfo ();
555
638
auto &DL = F.getParent ()->getDataLayout ();
639
+ auto &Subtarget = MF.getSubtarget <AArch64Subtarget>();
640
+ // TODO: Support Arm64EC
641
+ bool IsWin64 = Subtarget.isCallingConvWin64 (F.getCallingConv ()) && !Subtarget.isWindowsArm64EC ();
556
642
557
643
SmallVector<ArgInfo, 8 > SplitArgs;
558
644
SmallVector<std::pair<Register, Register>> BoolArgs;
@@ -598,13 +684,14 @@ bool AArch64CallLowering::lowerFormalArguments(
598
684
MIRBuilder.setInstr (*MBB.begin ());
599
685
600
686
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
601
- CCAssignFn *AssignFn =
602
- TLI.CCAssignFnForCall (F.getCallingConv (), /* IsVarArg=*/ false );
687
+ CCAssignFn *AssignFn = TLI.CCAssignFnForCall (F.getCallingConv (), IsWin64);
603
688
604
689
AArch64IncomingValueAssigner Assigner (AssignFn, AssignFn);
605
690
FormalArgHandler Handler (MIRBuilder, MRI);
606
- if (!determineAndHandleAssignments (Handler, Assigner, SplitArgs, MIRBuilder,
607
- F.getCallingConv (), F.isVarArg ()))
691
+ SmallVector<CCValAssign, 16 > ArgLocs;
692
+ CCState CCInfo (F.getCallingConv (), F.isVarArg (), MF, ArgLocs, F.getContext ());
693
+ if (!determineAssignments (Assigner, SplitArgs, CCInfo) ||
694
+ !handleAssignments (Handler, SplitArgs, CCInfo, ArgLocs, MIRBuilder))
608
695
return false ;
609
696
610
697
if (!BoolArgs.empty ()) {
@@ -622,10 +709,14 @@ bool AArch64CallLowering::lowerFormalArguments(
622
709
AArch64FunctionInfo *FuncInfo = MF.getInfo <AArch64FunctionInfo>();
623
710
uint64_t StackOffset = Assigner.StackOffset ;
624
711
if (F.isVarArg ()) {
625
- auto &Subtarget = MF.getSubtarget <AArch64Subtarget>();
626
- if (!Subtarget.isTargetDarwin ()) {
627
- // FIXME: we need to reimplement saveVarArgsRegisters from
628
- // AArch64ISelLowering.
712
+ if ((!Subtarget.isTargetDarwin () && !Subtarget.isWindowsArm64EC ()) || IsWin64) {
713
+ // The AAPCS variadic function ABI is identical to the non-variadic
714
+ // one. As a result there may be more arguments in registers and we should
715
+ // save them for future reference.
716
+ // Win64 variadic functions also pass arguments in registers, but all
717
+ // float arguments are passed in integer registers.
718
+ saveVarArgRegisters (MIRBuilder, Handler, CCInfo);
719
+ } else if (Subtarget.isWindowsArm64EC ()) {
629
720
return false ;
630
721
}
631
722
@@ -657,7 +748,6 @@ bool AArch64CallLowering::lowerFormalArguments(
657
748
// in this function later.
658
749
FuncInfo->setBytesInStackArgArea (StackOffset);
659
750
660
- auto &Subtarget = MF.getSubtarget <AArch64Subtarget>();
661
751
if (Subtarget.hasCustomCallingConv ())
662
752
Subtarget.getRegisterInfo ()->UpdateCustomCalleeSavedRegs (MF);
663
753
0 commit comments