/* [<][>][^][v][top][bottom][index][help] */
1 /* Copyright (C) 1992,1993,1995-2000,2002-2005,2006
2 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper, <drepper@gnu.org>, August 1995.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307 USA. */
20
21 /* Copied from: glibc-2.5/sysdeps/unix/sysv/linux/i386/sysdep.h */
22
23 #ifndef _LINUX_I386_SYSDEP_H
24 #define _LINUX_I386_SYSDEP_H 1
25
26 // DMTCP doesn't need this.
27 // /* There is some commonality. */
28 // #include <sysdeps/unix/i386/sysdep.h>
29 // #include <bp-sym.h>
30 // #include <bp-asm.h>
31 // /* Defines RTLD_PRIVATE_ERRNO and USE_DL_SYSINFO. */
32 // #include <dl-sysdep.h>
33 // #include <tls.h>
34
35
36 /* For Linux we can use the system call table in the header file
37 /usr/include/asm/unistd.h
38 of the kernel. But these symbols do not follow the SYS_* syntax
39 so we have to redefine the `SYS_ify' macro here. */
40 #undef SYS_ify
41 #define SYS_ify(syscall_name) __NR_##syscall_name
42
43 #if defined USE_DL_SYSINFO \
44 && (!defined NOT_IN_libc || defined IS_IN_libpthread)
45 # define I386_USE_SYSENTER 1
46 #else
47 # undef I386_USE_SYSENTER
48 #endif
49
50 #ifdef __ASSEMBLER__
51
52 /* Linux uses a negative return value to indicate syscall errors,
53 unlike most Unices, which use the condition codes' carry flag.
54
55 Since version 2.1 the return value of a system call might be
56 negative even if the call succeeded. E.g., the `lseek' system call
57 might return a large offset. Therefore we must not anymore test
58 for < 0, but test for a real error by making sure the value in %eax
59 is a real error number. Linus said he will make sure the no syscall
60 returns a value in -1 .. -4095 as a valid result so we can safely
61 test with -4095. */
62
63 /* We don't want the label for the error handle to be global when we define
64 it here. */
65 #ifdef PIC
66 # define SYSCALL_ERROR_LABEL 0f
67 #else
68 # define SYSCALL_ERROR_LABEL syscall_error
69 #endif
70
71 #undef PSEUDO
72 #define PSEUDO(name, syscall_name, args) \
73 .text; \
74 ENTRY (name) \
75 DO_CALL (syscall_name, args); \
76 cmpl $-4095, %eax; \
77 jae SYSCALL_ERROR_LABEL; \
78 L(pseudo_end):
79
80 #undef PSEUDO_END
81 #define PSEUDO_END(name) \
82 SYSCALL_ERROR_HANDLER \
83 END (name)
84
85 #undef PSEUDO_NOERRNO
86 #define PSEUDO_NOERRNO(name, syscall_name, args) \
87 .text; \
88 ENTRY (name) \
89 DO_CALL (syscall_name, args)
90
91 #undef PSEUDO_END_NOERRNO
92 #define PSEUDO_END_NOERRNO(name) \
93 END (name)
94
95 #define ret_NOERRNO ret
96
97 /* The function has to return the error code. */
98 #undef PSEUDO_ERRVAL
99 #define PSEUDO_ERRVAL(name, syscall_name, args) \
100 .text; \
101 ENTRY (name) \
102 DO_CALL (syscall_name, args); \
103 negl %eax
104
105 #undef PSEUDO_END_ERRVAL
106 #define PSEUDO_END_ERRVAL(name) \
107 END (name)
108
109 #define ret_ERRVAL ret
110
111 #ifndef PIC
112 # define SYSCALL_ERROR_HANDLER /* Nothing here; code in sysdep.S is used. */
113 #else
114
115 # if RTLD_PRIVATE_ERRNO
116 # define SYSCALL_ERROR_HANDLER \
117 0:SETUP_PIC_REG(cx); \
118 addl $_GLOBAL_OFFSET_TABLE_, %ecx; \
119 xorl %edx, %edx; \
120 subl %eax, %edx; \
121 movl %edx, rtld_errno@GOTOFF(%ecx); \
122 orl $-1, %eax; \
123 jmp L(pseudo_end);
124
125 # elif defined _LIBC_REENTRANT
126
127 # if USE___THREAD
128 # ifndef NOT_IN_libc
129 # define SYSCALL_ERROR_ERRNO __libc_errno
130 # else
131 # define SYSCALL_ERROR_ERRNO errno
132 # endif
133 # define SYSCALL_ERROR_HANDLER \
134 0:SETUP_PIC_REG (cx); \
135 addl $_GLOBAL_OFFSET_TABLE_, %ecx; \
136 movl SYSCALL_ERROR_ERRNO@GOTNTPOFF(%ecx), %ecx; \
137 xorl %edx, %edx; \
138 subl %eax, %edx; \
139 SYSCALL_ERROR_HANDLER_TLS_STORE (%edx, %ecx); \
140 orl $-1, %eax; \
141 jmp L(pseudo_end);
142 # ifndef NO_TLS_DIRECT_SEG_REFS
143 # define SYSCALL_ERROR_HANDLER_TLS_STORE(src, destoff) \
144 movl src, %gs:(destoff)
145 # else
146 # define SYSCALL_ERROR_HANDLER_TLS_STORE(src, destoff) \
147 addl %gs:0, destoff; \
148 movl src, (destoff)
149 # endif
150 # else
151 # define SYSCALL_ERROR_HANDLER \
152 0:pushl %ebx; \
153 cfi_adjust_cfa_offset (4); \
154 cfi_rel_offset (ebx, 0); \
155 SETUP_PIC_REG (bx); \
156 addl $_GLOBAL_OFFSET_TABLE_, %ebx; \
157 xorl %edx, %edx; \
158 subl %eax, %edx; \
159 pushl %edx; \
160 cfi_adjust_cfa_offset (4); \
161 PUSH_ERRNO_LOCATION_RETURN; \
162 call BP_SYM (__errno_location)@PLT; \
163 POP_ERRNO_LOCATION_RETURN; \
164 popl %ecx; \
165 cfi_adjust_cfa_offset (-4); \
166 popl %ebx; \
167 cfi_adjust_cfa_offset (-4); \
168 cfi_restore (ebx); \
169 movl %ecx, (%eax); \
170 orl $-1, %eax; \
171 jmp L(pseudo_end);
172 /* A quick note: it is assumed that the call to `__errno_location' does
173 not modify the stack! */
174 # endif
175 # else
176 /* Store (- %eax) into errno through the GOT. */
177 # define SYSCALL_ERROR_HANDLER \
178 0:SETUP_PIC_REG(cx); \
179 addl $_GLOBAL_OFFSET_TABLE_, %ecx; \
180 xorl %edx, %edx; \
181 subl %eax, %edx; \
182 movl errno@GOT(%ecx), %ecx; \
183 movl %edx, (%ecx); \
184 orl $-1, %eax; \
185 jmp L(pseudo_end);
186 # endif /* _LIBC_REENTRANT */
187 #endif /* PIC */
188
189
190 /* The original calling convention for system calls on Linux/i386 is
191 to use int $0x80. */
192 #ifdef I386_USE_SYSENTER
193 # ifdef SHARED
194 # define ENTER_KERNEL call *%gs:SYSINFO_OFFSET
195 # else
196 # define ENTER_KERNEL call *_dl_sysinfo
197 # endif
198 #else
199 # define ENTER_KERNEL int $0x80
200 #endif
201
202 /* Linux takes system call arguments in registers:
203
204 syscall number %eax call-clobbered
205 arg 1 %ebx call-saved
206 arg 2 %ecx call-clobbered
207 arg 3 %edx call-clobbered
208 arg 4 %esi call-saved
209 arg 5 %edi call-saved
210
211 The stack layout upon entering the function is:
212
213 20(%esp) Arg# 5
214 16(%esp) Arg# 4
215 12(%esp) Arg# 3
216 8(%esp) Arg# 2
217 4(%esp) Arg# 1
218 (%esp) Return address
219
220 (Of course a function with say 3 arguments does not have entries for
221 arguments 4 and 5.)
222
223 The following code tries hard to be optimal. A general assumption
224 (which is true according to the data books I have) is that
225
226 2 * xchg is more expensive than pushl + movl + popl
227
228 Beside this a neat trick is used. The calling conventions for Linux
229 tell that among the registers used for parameters %ecx and %edx need
230 not be saved. Beside this we may clobber this registers even when
231 they are not used for parameter passing.
232
233 As a result one can see below that we save the content of the %ebx
234 register in the %edx register when we have less than 3 arguments
235 (2 * movl is less expensive than pushl + popl).
236
237 Second unlike for the other registers we don't save the content of
238 %ecx and %edx when we have more than 1 and 2 registers resp.
239
240 The code below might look a bit long but we have to take care for
241 the pipelined processors (i586). Here the `pushl' and `popl'
242 instructions are marked as NP (not pairable) but the exception is
243 two consecutive of these instruction. This gives no penalty on
244 other processors though. */
245
246 #undef DO_CALL
247 #define DO_CALL(syscall_name, args) \
248 PUSHARGS_##args \
249 DOARGS_##args \
250 movl $SYS_ify (syscall_name), %eax; \
251 ENTER_KERNEL \
252 POPARGS_##args
253
254 #define PUSHARGS_0 /* No arguments to push. */
255 #define DOARGS_0 /* No arguments to frob. */
256 #define POPARGS_0 /* No arguments to pop. */
257 #define _PUSHARGS_0 /* No arguments to push. */
258 #define _DOARGS_0(n) /* No arguments to frob. */
259 #define _POPARGS_0 /* No arguments to pop. */
260
261 #define PUSHARGS_1 movl %ebx, %edx; L(SAVEBX1): PUSHARGS_0
262 #define DOARGS_1 _DOARGS_1 (4)
263 #define POPARGS_1 POPARGS_0; movl %edx, %ebx; L(RESTBX1):
264 #define _PUSHARGS_1 pushl %ebx; cfi_adjust_cfa_offset (4); \
265 cfi_rel_offset (ebx, 0); L(PUSHBX1): _PUSHARGS_0
266 #define _DOARGS_1(n) movl n(%esp), %ebx; _DOARGS_0(n-4)
267 #define _POPARGS_1 _POPARGS_0; popl %ebx; cfi_adjust_cfa_offset (-4); \
268 cfi_restore (ebx); L(POPBX1):
269
270 #define PUSHARGS_2 PUSHARGS_1
271 #define DOARGS_2 _DOARGS_2 (8)
272 #define POPARGS_2 POPARGS_1
273 #define _PUSHARGS_2 _PUSHARGS_1
274 #define _DOARGS_2(n) movl n(%esp), %ecx; _DOARGS_1 (n-4)
275 #define _POPARGS_2 _POPARGS_1
276
277 #define PUSHARGS_3 _PUSHARGS_2
278 #define DOARGS_3 _DOARGS_3 (16)
279 #define POPARGS_3 _POPARGS_3
280 #define _PUSHARGS_3 _PUSHARGS_2
281 #define _DOARGS_3(n) movl n(%esp), %edx; _DOARGS_2 (n-4)
282 #define _POPARGS_3 _POPARGS_2
283
284 #define PUSHARGS_4 _PUSHARGS_4
285 #define DOARGS_4 _DOARGS_4 (24)
286 #define POPARGS_4 _POPARGS_4
287 #define _PUSHARGS_4 pushl %esi; cfi_adjust_cfa_offset (4); \
288 cfi_rel_offset (esi, 0); L(PUSHSI1): _PUSHARGS_3
289 #define _DOARGS_4(n) movl n(%esp), %esi; _DOARGS_3 (n-4)
290 #define _POPARGS_4 _POPARGS_3; popl %esi; cfi_adjust_cfa_offset (-4); \
291 cfi_restore (esi); L(POPSI1):
292
293 #define PUSHARGS_5 _PUSHARGS_5
294 #define DOARGS_5 _DOARGS_5 (32)
295 #define POPARGS_5 _POPARGS_5
296 #define _PUSHARGS_5 pushl %edi; cfi_adjust_cfa_offset (4); \
297 cfi_rel_offset (edi, 0); L(PUSHDI1): _PUSHARGS_4
298 #define _DOARGS_5(n) movl n(%esp), %edi; _DOARGS_4 (n-4)
299 #define _POPARGS_5 _POPARGS_4; popl %edi; cfi_adjust_cfa_offset (-4); \
300 cfi_restore (edi); L(POPDI1):
301
302 #define PUSHARGS_6 _PUSHARGS_6
303 #define DOARGS_6 _DOARGS_6 (36)
304 #define POPARGS_6 _POPARGS_6
305 #define _PUSHARGS_6 pushl %ebp; cfi_adjust_cfa_offset (4); \
306 cfi_rel_offset (ebp, 0); L(PUSHBP1): _PUSHARGS_5
307 #define _DOARGS_6(n) movl n(%esp), %ebp; _DOARGS_5 (n-4)
308 #define _POPARGS_6 _POPARGS_5; popl %ebp; cfi_adjust_cfa_offset (-4); \
309 cfi_restore (ebp); L(POPBP1):
310
311 #else /* !__ASSEMBLER__ */
312
313 /* We need some help from the assembler to generate optimal code. We
314 define some macros here which later will be used. */
315 asm (".L__X'%ebx = 1\n\t"
316 ".L__X'%ecx = 2\n\t"
317 ".L__X'%edx = 2\n\t"
318 ".L__X'%eax = 3\n\t"
319 ".L__X'%esi = 3\n\t"
320 ".L__X'%edi = 3\n\t"
321 ".L__X'%ebp = 3\n\t"
322 ".L__X'%esp = 3\n\t"
323 ".macro bpushl name reg\n\t"
324 ".if 1 - \\name\n\t"
325 ".if 2 - \\name\n\t"
326 "error\n\t"
327 ".else\n\t"
328 "xchgl \\reg, %ebx\n\t"
329 ".endif\n\t"
330 ".endif\n\t"
331 ".endm\n\t"
332 ".macro bpopl name reg\n\t"
333 ".if 1 - \\name\n\t"
334 ".if 2 - \\name\n\t"
335 "error\n\t"
336 ".else\n\t"
337 "xchgl \\reg, %ebx\n\t"
338 ".endif\n\t"
339 ".endif\n\t"
340 ".endm\n\t");
341
342 /* Define a macro which expands inline into the wrapper code for a system
343 call. */
344 #undef INLINE_SYSCALL
345 #define INLINE_SYSCALL(name, nr, args...) \
346 ({ \
347 unsigned int resultvar = INTERNAL_SYSCALL (name, , nr, args); \
348 if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (resultvar, ), 0)) \
349 { \
350 __set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
351 resultvar = 0xffffffff; \
352 } \
353 (int) resultvar; })
354
355 /* Define a macro which expands inline into the wrapper code for a system
356 call. This use is for internal calls that do not need to handle errors
357 normally. It will never touch errno. This returns just what the kernel
358 gave back.
359
360 The _NCS variant allows non-constant syscall numbers but it is not
361 possible to use more than four parameters. */
362 #undef INTERNAL_SYSCALL
363 #ifdef I386_USE_SYSENTER
364 # ifdef SHARED
365 # define INTERNAL_SYSCALL(name, err, nr, args...) \
366 ({ \
367 register unsigned int resultvar; \
368 EXTRAVAR_##nr \
369 asm volatile ( \
370 LOADARGS_##nr \
371 "movl %1, %%eax\n\t" \
372 "call *%%gs:%P2\n\t" \
373 RESTOREARGS_##nr \
374 : "=a" (resultvar) \
375 : "i" (__NR_##name), "i" (offsetof (tcbhead_t, sysinfo)) \
376 ASMFMT_##nr(args) : "memory", "cc"); \
377 (int) resultvar; })
378 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
379 ({ \
380 register unsigned int resultvar; \
381 EXTRAVAR_##nr \
382 asm volatile ( \
383 LOADARGS_##nr \
384 "call *%%gs:%P2\n\t" \
385 RESTOREARGS_##nr \
386 : "=a" (resultvar) \
387 : "0" (name), "i" (offsetof (tcbhead_t, sysinfo)) \
388 ASMFMT_##nr(args) : "memory", "cc"); \
389 (int) resultvar; })
390 # else
391 # define INTERNAL_SYSCALL(name, err, nr, args...) \
392 ({ \
393 register unsigned int resultvar; \
394 EXTRAVAR_##nr \
395 asm volatile ( \
396 LOADARGS_##nr \
397 "movl %1, %%eax\n\t" \
398 "call *_dl_sysinfo\n\t" \
399 RESTOREARGS_##nr \
400 : "=a" (resultvar) \
401 : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc"); \
402 (int) resultvar; })
403 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
404 ({ \
405 register unsigned int resultvar; \
406 EXTRAVAR_##nr \
407 asm volatile ( \
408 LOADARGS_##nr \
409 "call *_dl_sysinfo\n\t" \
410 RESTOREARGS_##nr \
411 : "=a" (resultvar) \
412 : "0" (name) ASMFMT_##nr(args) : "memory", "cc"); \
413 (int) resultvar; })
414 # endif
415 #else
416 # define INTERNAL_SYSCALL(name, err, nr, args...) \
417 ({ \
418 register unsigned int resultvar; \
419 EXTRAVAR_##nr \
420 asm volatile ( \
421 LOADARGS_##nr \
422 "movl %1, %%eax\n\t" \
423 "int $0x80\n\t" \
424 RESTOREARGS_##nr \
425 : "=a" (resultvar) \
426 : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc"); \
427 (int) resultvar; })
428 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
429 ({ \
430 register unsigned int resultvar; \
431 EXTRAVAR_##nr \
432 asm volatile ( \
433 LOADARGS_##nr \
434 "int $0x80\n\t" \
435 RESTOREARGS_##nr \
436 : "=a" (resultvar) \
437 : "0" (name) ASMFMT_##nr(args) : "memory", "cc"); \
438 (int) resultvar; })
439 #endif
440
441 #undef INTERNAL_SYSCALL_DECL
442 #define INTERNAL_SYSCALL_DECL(err) do { } while (0)
443
444 #undef INTERNAL_SYSCALL_ERROR_P
445 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
446 ((unsigned int) (val) >= 0xfffff001u)
447
448 #undef INTERNAL_SYSCALL_ERRNO
449 #define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
450
451 #define LOADARGS_0
452 #ifdef __PIC__
453 # if defined I386_USE_SYSENTER && defined SHARED
454 # define LOADARGS_1 \
455 "bpushl .L__X'%k3, %k3\n\t"
456 # define LOADARGS_5 \
457 "movl %%ebx, %4\n\t" \
458 "movl %3, %%ebx\n\t"
459 # else
460 # define LOADARGS_1 \
461 "bpushl .L__X'%k2, %k2\n\t"
462 # define LOADARGS_5 \
463 "movl %%ebx, %3\n\t" \
464 "movl %2, %%ebx\n\t"
465 # endif
466 # define LOADARGS_2 LOADARGS_1
467 # define LOADARGS_3 \
468 "xchgl %%ebx, %%edi\n\t"
469 # define LOADARGS_4 LOADARGS_3
470 #else
471 # define LOADARGS_1
472 # define LOADARGS_2
473 # define LOADARGS_3
474 # define LOADARGS_4
475 # define LOADARGS_5
476 #endif
477
478 #define RESTOREARGS_0
479 #ifdef __PIC__
480 # if defined I386_USE_SYSENTER && defined SHARED
481 # define RESTOREARGS_1 \
482 "bpopl .L__X'%k3, %k3\n\t"
483 # define RESTOREARGS_5 \
484 "movl %4, %%ebx"
485 # else
486 # define RESTOREARGS_1 \
487 "bpopl .L__X'%k2, %k2\n\t"
488 # define RESTOREARGS_5 \
489 "movl %3, %%ebx"
490 # endif
491 # define RESTOREARGS_2 RESTOREARGS_1
492 # define RESTOREARGS_3 \
493 "xchgl %%edi, %%ebx\n\t"
494 # define RESTOREARGS_4 RESTOREARGS_3
495 #else
496 # define RESTOREARGS_1
497 # define RESTOREARGS_2
498 # define RESTOREARGS_3
499 # define RESTOREARGS_4
500 # define RESTOREARGS_5
501 #endif
502
503 #define ASMFMT_0()
504 #ifdef __PIC__
505 # define ASMFMT_1(arg1) \
506 , "cd" (arg1)
507 # define ASMFMT_2(arg1, arg2) \
508 , "d" (arg1), "c" (arg2)
509 # define ASMFMT_3(arg1, arg2, arg3) \
510 , "D" (arg1), "c" (arg2), "d" (arg3)
511 # define ASMFMT_4(arg1, arg2, arg3, arg4) \
512 , "D" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
513 # define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
514 , "0" (arg1), "m" (_xv), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
515 #else
516 # define ASMFMT_1(arg1) \
517 , "b" (arg1)
518 # define ASMFMT_2(arg1, arg2) \
519 , "b" (arg1), "c" (arg2)
520 # define ASMFMT_3(arg1, arg2, arg3) \
521 , "b" (arg1), "c" (arg2), "d" (arg3)
522 # define ASMFMT_4(arg1, arg2, arg3, arg4) \
523 , "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
524 # define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
525 , "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
526 #endif
527
528 #define EXTRAVAR_0
529 #define EXTRAVAR_1
530 #define EXTRAVAR_2
531 #define EXTRAVAR_3
532 #define EXTRAVAR_4
533 #ifdef __PIC__
534 # define EXTRAVAR_5 int _xv;
535 #else
536 # define EXTRAVAR_5
537 #endif
538
539 /* Consistency check for position-independent code. */
540 #ifdef __PIC__
541 # define check_consistency() \
542 ({ int __res; \
543 __asm__ __volatile__ \
544 ("call __i686.get_pc_thunk.cx;" \
545 "addl $_GLOBAL_OFFSET_TABLE_, %%ecx;" \
546 "subl %%ebx, %%ecx;" \
547 "je 1f;" \
548 "ud2;" \
549 "1:\n" \
550 ".section .gnu.linkonce.t.__i686.get_pc_thunk.cx,\"ax\",@progbits;" \
551 ".globl __i686.get_pc_thunk.cx;" \
552 ".hidden __i686.get_pc_thunk.cx;" \
553 ".type __i686.get_pc_thunk.cx,@function;" \
554 "__i686.get_pc_thunk.cx:" \
555 "movl (%%esp), %%ecx;" \
556 "ret;" \
557 ".previous" \
558 : "=c" (__res)); \
559 __res; })
560 #endif
561
562 #endif /* __ASSEMBLER__ */
563
564
565 /* Pointer mangling support. */
566 #if defined NOT_IN_libc && defined IS_IN_rtld
567 /* We cannot use the thread descriptor because in ld.so we use setjmp
568 earlier than the descriptor is initialized. Using a global variable
569 is too complicated here since we have no PC-relative addressing mode. */
570 #else
571 # ifdef __ASSEMBLER__
572 # define PTR_MANGLE(reg) xorl %gs:POINTER_GUARD, reg
573 # define PTR_DEMANGLE(reg) PTR_MANGLE (reg)
574 # else
575 # define PTR_MANGLE(var) asm ("xorl %%gs:%c2, %0" \
576 : "=r" (var) \
577 : "0" (var), \
578 "i" (offsetof (tcbhead_t, \
579 pointer_guard)))
580 # define PTR_DEMANGLE(var) PTR_MANGLE (var)
581 # endif
582 #endif
583
584 #endif /* linux/i386/sysdep.h */