switch_amd64_unix.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. /*
  2. * this is the internal transfer function.
  3. *
  4. * HISTORY
  5. * 3-May-13 Ralf Schmitt <ralf@systemexit.de>
  6. * Add support for strange GCC caller-save decisions
  7. * (ported from switch_aarch64_gcc.h)
  8. * 18-Aug-11 Alexey Borzenkov <snaury@gmail.com>
  9. * Correctly save rbp, csr and cw
  10. * 01-Apr-04 Hye-Shik Chang <perky@FreeBSD.org>
  11. * Ported from i386 to amd64.
  12. * 24-Nov-02 Christian Tismer <tismer@tismer.com>
  13. * needed to add another magic constant to insure
  14. * that f in slp_eval_frame(PyFrameObject *f)
  15. * STACK_REFPLUS will probably be 1 in most cases.
  16. * gets included into the saved stack area.
  17. * 17-Sep-02 Christian Tismer <tismer@tismer.com>
  18. * after virtualizing stack save/restore, the
  19. * stack size shrunk a bit. Needed to introduce
  20. * an adjustment STACK_MAGIC per platform.
  21. * 15-Sep-02 Gerd Woetzel <gerd.woetzel@GMD.DE>
  22. * slightly changed framework for spark
  23. * 31-Avr-02 Armin Rigo <arigo@ulb.ac.be>
  24. * Added ebx, esi and edi register-saves.
  25. * 01-Mar-02 Samual M. Rushing <rushing@ironport.com>
  26. * Ported from i386.
  27. */
  28. #define STACK_REFPLUS 1
  29. #ifdef SLP_EVAL
  30. /* #define STACK_MAGIC 3 */
  31. /* the above works fine with gcc 2.96, but 2.95.3 wants this */
  32. #define STACK_MAGIC 0
  33. #define REGS_TO_SAVE "r12", "r13", "r14", "r15"
  34. static int
  35. slp_switch(void)
  36. {
  37. int err;
  38. void* rbp;
  39. void* rbx;
  40. unsigned int csr;
  41. unsigned short cw;
  42. /* This used to be declared 'register', but that does nothing in
  43. modern compilers and is explicitly forbidden in some new
  44. standards. */
  45. long *stackref, stsizediff;
  46. __asm__ volatile ("" : : : REGS_TO_SAVE);
  47. __asm__ volatile ("fstcw %0" : "=m" (cw));
  48. __asm__ volatile ("stmxcsr %0" : "=m" (csr));
  49. __asm__ volatile ("movq %%rbp, %0" : "=m" (rbp));
  50. __asm__ volatile ("movq %%rbx, %0" : "=m" (rbx));
  51. __asm__ ("movq %%rsp, %0" : "=g" (stackref));
  52. {
  53. SLP_SAVE_STATE(stackref, stsizediff);
  54. __asm__ volatile (
  55. "addq %0, %%rsp\n"
  56. "addq %0, %%rbp\n"
  57. :
  58. : "r" (stsizediff)
  59. );
  60. SLP_RESTORE_STATE();
  61. __asm__ volatile ("xorq %%rax, %%rax" : "=a" (err));
  62. }
  63. __asm__ volatile ("movq %0, %%rbx" : : "m" (rbx));
  64. __asm__ volatile ("movq %0, %%rbp" : : "m" (rbp));
  65. __asm__ volatile ("ldmxcsr %0" : : "m" (csr));
  66. __asm__ volatile ("fldcw %0" : : "m" (cw));
  67. __asm__ volatile ("" : : : REGS_TO_SAVE);
  68. return err;
  69. }
  70. #endif
  71. /*
  72. * further self-processing support
  73. */
  74. /*
  75. * if you want to add self-inspection tools, place them
  76. * here. See the x86_msvc for the necessary defines.
  77. * These features are highly experimental und not
  78. * essential yet.
  79. */