4 #include <linux/list.h>
5 #include <linux/workqueue.h>
6 #include <linux/aio_abi.h>
8 #include <linux/rcupdate.h>
10 #include <linux/atomic.h>
15 #define KIOCB_SYNC_KEY (~0U)
17 #define KIOCB_CANCELLED ((void *) (~0ULL))
19 typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
21 /* is there a better place to document function pointer methods? */
23 * ki_retry - iocb forward progress callback
24 * @kiocb: The kiocb struct to advance by performing an operation.
26 * This callback is called when the AIO core wants a given AIO operation
27 * to make forward progress. The kiocb argument describes the operation
28 * that is to be performed. As the operation proceeds, perhaps partially,
29 * ki_retry is expected to update the kiocb with progress made. Typically
30 * ki_retry is set in the AIO core and it itself calls file_operations
33 * ki_retry's return value determines when the AIO operation is completed
34 * and an event is generated in the AIO event ring. Except the special
35 * return values described below, the value that is returned from ki_retry
36 * is transferred directly into the completion ring as the operation's
37 * resulting status. Once this has happened ki_retry *MUST NOT* reference
38 * the kiocb pointer again.
40 * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
41 * will be called on the kiocb pointer in the future. The AIO core will
42 * not ask the method again -- ki_retry must ensure forward progress.
43 * aio_complete() must be called once and only once in the future, multiple
44 * calls may result in undefined behaviour.
48 unsigned ki_key; /* id of this request */
51 struct kioctx *ki_ctx; /* may be NULL for sync ops */
52 kiocb_cancel_fn *ki_cancel;
53 ssize_t (*ki_retry)(struct kiocb *);
54 void (*ki_dtor)(struct kiocb *);
58 struct task_struct *tsk;
61 __u64 ki_user_data; /* user's data for completion */
65 /* State that we remember to be able to restart/retry */
66 unsigned short ki_opcode;
67 size_t ki_nbytes; /* copy of iocb->aio_nbytes */
68 char __user *ki_buf; /* remaining iocb->aio_buf */
69 size_t ki_left; /* remaining bytes */
70 struct iovec ki_inline_vec; /* inline vector */
71 struct iovec *ki_iovec;
72 unsigned long ki_nr_segs;
73 unsigned long ki_cur_seg;
75 struct list_head ki_list; /* the aio core uses this
77 struct list_head ki_batch; /* batch allocation */
80 * If the aio_resfd field of the userspace iocb is not zero,
81 * this is the underlying eventfd context to deliver events to.
83 struct eventfd_ctx *ki_eventfd;
86 static inline bool is_sync_kiocb(struct kiocb *kiocb)
88 return kiocb->ki_key == KIOCB_SYNC_KEY;
91 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
93 *kiocb = (struct kiocb) {
94 .ki_users = ATOMIC_INIT(1),
95 .ki_key = KIOCB_SYNC_KEY,
97 .ki_obj.tsk = current,
103 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
104 extern void aio_put_req(struct kiocb *iocb);
105 extern void aio_complete(struct kiocb *iocb, long res, long res2);
107 extern void exit_aio(struct mm_struct *mm);
108 extern long do_io_submit(aio_context_t ctx_id, long nr,
109 struct iocb __user *__user *iocbpp, bool compat);
110 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
112 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
113 static inline void aio_put_req(struct kiocb *iocb) { }
114 static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
116 static inline void exit_aio(struct mm_struct *mm) { }
117 static inline long do_io_submit(aio_context_t ctx_id, long nr,
118 struct iocb __user * __user *iocbpp,
119 bool compat) { return 0; }
120 static inline void kiocb_set_cancel_fn(struct kiocb *req,
121 kiocb_cancel_fn *cancel) { }
122 #endif /* CONFIG_AIO */
124 static inline struct kiocb *list_kiocb(struct list_head *h)
126 return list_entry(h, struct kiocb, ki_list);
130 extern unsigned long aio_nr;
131 extern unsigned long aio_max_nr;
133 #endif /* __LINUX__AIO_H */