switch_aarch64_gcc.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. /*
  2. * this is the internal transfer function.
  3. *
  4. * HISTORY
  5. * 07-Sep-16 Add clang support using x register naming. Fredrik Fornwall
  6. * 13-Apr-13 Add support for strange GCC caller-save decisions
  7. * 08-Apr-13 File creation. Michael Matz
  8. *
  9. * NOTES
  10. *
  11. * Simply save all callee saved registers
  12. *
  13. */
  14. #define STACK_REFPLUS 1
  15. #ifdef SLP_EVAL
  16. #define STACK_MAGIC 0
  17. #define REGS_TO_SAVE "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", \
  18. "x27", "x28", "x30" /* aka lr */, \
  19. "v8", "v9", "v10", "v11", \
  20. "v12", "v13", "v14", "v15"
  21. /*
  22. * Recall:
  23. asm asm-qualifiers ( AssemblerTemplate
  24. : OutputOperands
  25. [ : InputOperands
  26. [ : Clobbers ] ])
  27. or (if asm-qualifiers contains 'goto')
  28. asm asm-qualifiers ( AssemblerTemplate
  29. : OutputOperands
  30. : InputOperands
  31. : Clobbers
  32. : GotoLabels)
  33. and OutputOperands are
  34. [ [asmSymbolicName] ] constraint (cvariablename)
  35. When a name is given, refer to it as ``%[the name]``.
  36. When not given, ``%i`` where ``i`` is the zero-based index.
  37. constraints starting with ``=`` means only writing; ``+`` means
  38. reading and writing.
  39. This is followed by ``r`` (must be register) or ``m`` (must be memory)
  40. and these can be combined.
  41. The ``cvariablename`` is actually an lvalue expression.
  42. In AArch65, 31 general purpose registers. If named X0... they are
  43. 64-bit. If named W0... they are the bottom 32 bits of the
  44. corresponding 64 bit register.
  45. XZR and WZR are hardcoded to 0, and ignore writes.
  46. Arguments are in X0..X7. C++ uses X0 for ``this``. X0 holds simple return
  47. values (?)
  48. Whenever a W register is written, the top half of the X register is zeroed.
  49. */
  50. static int
  51. slp_switch(void)
  52. {
  53. int err;
  54. void *fp;
  55. /* Windowz uses a 32-bit long on a 64-bit platform, unlike the rest of
  56. the world, and in theory we can be compiled with GCC/llvm on 64-bit
  57. windows. So we need a fixed-width type.
  58. */
  59. int64_t *stackref, stsizediff;
  60. __asm__ volatile ("" : : : REGS_TO_SAVE);
  61. __asm__ volatile ("str x29, %0" : "=m"(fp) : : );
  62. __asm__ ("mov %0, sp" : "=r" (stackref));
  63. {
  64. SLP_SAVE_STATE(stackref, stsizediff);
  65. __asm__ volatile (
  66. "add sp,sp,%0\n"
  67. "add x29,x29,%0\n"
  68. :
  69. : "r" (stsizediff)
  70. );
  71. SLP_RESTORE_STATE();
  72. /* SLP_SAVE_STATE macro contains some return statements
  73. (of -1 and 1). It falls through only when
  74. the return value of slp_save_state() is zero, which
  75. is placed in x0.
  76. In that case we (slp_switch) also want to return zero
  77. (also in x0 of course).
  78. Now, some GCC versions (seen with 4.8) think it's a
  79. good idea to save/restore x0 around the call to
  80. slp_restore_state(), instead of simply zeroing it
  81. at the return below. But slp_restore_state
  82. writes random values to the stack slot used for this
  83. save/restore (from when it once was saved above in
  84. SLP_SAVE_STATE, when it was still uninitialized), so
  85. "restoring" that precious zero actually makes us
  86. return random values. There are some ways to make
  87. GCC not use that zero value in the normal return path
  88. (e.g. making err volatile, but that costs a little
  89. stack space), and the simplest is to call a function
  90. that returns an unknown value (which happens to be zero),
  91. so the saved/restored value is unused.
  92. Thus, this line stores a 0 into the ``err`` variable
  93. (which must be held in a register for this instruction,
  94. of course). The ``w`` qualifier causes the instruction
  95. to use W0 instead of X0, otherwise we get a warning
  96. about a value size mismatch (because err is an int,
  97. and aarch64 platforms are LP64: 32-bit int, 64 bit long
  98. and pointer).
  99. */
  100. __asm__ volatile ("mov %w0, #0" : "=r" (err));
  101. }
  102. __asm__ volatile ("ldr x29, %0" : : "m" (fp) :);
  103. __asm__ volatile ("" : : : REGS_TO_SAVE);
  104. return err;
  105. }
  106. #endif