2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * These routines make two important assumptions:
16 * 1. atomic_t is really an int and can be freely cast back and forth
17 * (validated in __init_atomic_per_cpu).
19 * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
20 * the same locking convention that all the kernel atomic routines use.
23 #ifndef _ASM_TILE_FUTEX_H
24 #define _ASM_TILE_FUTEX_H
28 #include <linux/futex.h>
29 #include <linux/uaccess.h>
30 #include <linux/errno.h>
32 extern struct __get_user futex_set(int __user *v, int i);
33 extern struct __get_user futex_add(int __user *v, int n);
34 extern struct __get_user futex_or(int __user *v, int n);
35 extern struct __get_user futex_andn(int __user *v, int n);
36 extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
39 extern struct __get_user futex_xor(int __user *v, int n);
41 static inline struct __get_user futex_xor(int __user *uaddr, int n)
43 struct __get_user asm_ret = __get_user_4(uaddr);
49 asm_ret = futex_cmpxchg(uaddr, oldval, newval);
50 } while (asm_ret.err == 0 && oldval != asm_ret.val);
56 static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
58 int op = (encoded_op >> 28) & 7;
59 int cmp = (encoded_op >> 24) & 15;
60 int oparg = (encoded_op << 8) >> 20;
61 int cmparg = (encoded_op << 20) >> 20;
63 struct __get_user asm_ret;
65 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
68 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
74 asm_ret = futex_set(uaddr, oparg);
77 asm_ret = futex_add(uaddr, oparg);
80 asm_ret = futex_or(uaddr, oparg);
83 asm_ret = futex_andn(uaddr, oparg);
86 asm_ret = futex_xor(uaddr, oparg);
89 asm_ret.err = -ENOSYS;
98 ret = (asm_ret.val == cmparg);
100 case FUTEX_OP_CMP_NE:
101 ret = (asm_ret.val != cmparg);
103 case FUTEX_OP_CMP_LT:
104 ret = (asm_ret.val < cmparg);
106 case FUTEX_OP_CMP_GE:
107 ret = (asm_ret.val >= cmparg);
109 case FUTEX_OP_CMP_LE:
110 ret = (asm_ret.val <= cmparg);
112 case FUTEX_OP_CMP_GT:
113 ret = (asm_ret.val > cmparg);
122 static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
125 struct __get_user asm_ret;
127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
130 asm_ret = futex_cmpxchg(uaddr, oldval, newval);
131 return asm_ret.err ? asm_ret.err : asm_ret.val;
135 /* Return failure from the atomic wrappers. */
136 struct __get_user __atomic_bad_address(int __user *addr);
139 #endif /* !__ASSEMBLY__ */
141 #endif /* _ASM_TILE_FUTEX_H */