4 #include <linux/list.h>
5 #include <linux/workqueue.h>
6 #include <linux/aio_abi.h>
8 #include <linux/rcupdate.h>
10 #include <linux/atomic.h>
14 #define KIOCB_SYNC_KEY (~0U)
17 #define KIF_CANCELLED 2
19 #define kiocbSetCancelled(iocb) set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
21 #define kiocbClearCancelled(iocb) clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
23 #define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
25 /* is there a better place to document function pointer methods? */
27 * ki_retry - iocb forward progress callback
28 * @kiocb: The kiocb struct to advance by performing an operation.
30 * This callback is called when the AIO core wants a given AIO operation
31 * to make forward progress. The kiocb argument describes the operation
32 * that is to be performed. As the operation proceeds, perhaps partially,
33 * ki_retry is expected to update the kiocb with progress made. Typically
34 * ki_retry is set in the AIO core and it itself calls file_operations
37 * ki_retry's return value determines when the AIO operation is completed
38 * and an event is generated in the AIO event ring. Except the special
39 * return values described below, the value that is returned from ki_retry
40 * is transferred directly into the completion ring as the operation's
41 * resulting status. Once this has happened ki_retry *MUST NOT* reference
42 * the kiocb pointer again.
44 * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
45 * will be called on the kiocb pointer in the future. The AIO core will
46 * not ask the method again -- ki_retry must ensure forward progress.
47 * aio_complete() must be called once and only once in the future, multiple
48 * calls may result in undefined behaviour.
51 unsigned long ki_flags;
53 unsigned ki_key; /* id of this request */
56 struct kioctx *ki_ctx; /* may be NULL for sync ops */
57 int (*ki_cancel)(struct kiocb *, struct io_event *);
58 ssize_t (*ki_retry)(struct kiocb *);
59 void (*ki_dtor)(struct kiocb *);
63 struct task_struct *tsk;
66 __u64 ki_user_data; /* user's data for completion */
70 /* State that we remember to be able to restart/retry */
71 unsigned short ki_opcode;
72 size_t ki_nbytes; /* copy of iocb->aio_nbytes */
73 char __user *ki_buf; /* remaining iocb->aio_buf */
74 size_t ki_left; /* remaining bytes */
75 struct iovec ki_inline_vec; /* inline vector */
76 struct iovec *ki_iovec;
77 unsigned long ki_nr_segs;
78 unsigned long ki_cur_seg;
80 struct list_head ki_list; /* the aio core uses this
82 struct list_head ki_batch; /* batch allocation */
85 * If the aio_resfd field of the userspace iocb is not zero,
86 * this is the underlying eventfd context to deliver events to.
88 struct eventfd_ctx *ki_eventfd;
91 static inline bool is_sync_kiocb(struct kiocb *kiocb)
93 return kiocb->ki_key == KIOCB_SYNC_KEY;
96 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
98 *kiocb = (struct kiocb) {
99 .ki_users = ATOMIC_INIT(1),
100 .ki_key = KIOCB_SYNC_KEY,
102 .ki_obj.tsk = current,
108 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
109 extern void aio_put_req(struct kiocb *iocb);
110 extern void aio_complete(struct kiocb *iocb, long res, long res2);
112 extern void exit_aio(struct mm_struct *mm);
113 extern long do_io_submit(aio_context_t ctx_id, long nr,
114 struct iocb __user *__user *iocbpp, bool compat);
116 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
117 static inline void aio_put_req(struct kiocb *iocb) { }
118 static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
120 static inline void exit_aio(struct mm_struct *mm) { }
121 static inline long do_io_submit(aio_context_t ctx_id, long nr,
122 struct iocb __user * __user *iocbpp,
123 bool compat) { return 0; }
124 #endif /* CONFIG_AIO */
126 static inline struct kiocb *list_kiocb(struct list_head *h)
128 return list_entry(h, struct kiocb, ki_list);
132 extern unsigned long aio_nr;
133 extern unsigned long aio_max_nr;
135 #endif /* __LINUX__AIO_H */