]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/ceph/osd_client.c
Merge tag 'armsoc-tee' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[karo-tx-linux.git] / net / ceph / osd_client.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
21
22 #define OSD_OPREPLY_FRONT_LEN   512
23
24 static struct kmem_cache        *ceph_osd_request_cache;
25
26 static const struct ceph_connection_operations osd_con_ops;
27
28 /*
29  * Implement client access to distributed object storage cluster.
30  *
31  * All data objects are stored within a cluster/cloud of OSDs, or
32  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
33  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
34  * remote daemons serving up and coordinating consistent and safe
35  * access to storage.
36  *
37  * Cluster membership and the mapping of data objects onto storage devices
38  * are described by the osd map.
39  *
40  * We keep track of pending OSD requests (read, write), resubmit
41  * requests to different OSDs when the cluster topology/data layout
42  * change, or retry the affected requests when the communications
43  * channel with an OSD is reset.
44  */
45
46 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
47 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void link_linger(struct ceph_osd *osd,
49                         struct ceph_osd_linger_request *lreq);
50 static void unlink_linger(struct ceph_osd *osd,
51                           struct ceph_osd_linger_request *lreq);
52
53 #if 1
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
55 {
56         bool wrlocked = true;
57
58         if (unlikely(down_read_trylock(sem))) {
59                 wrlocked = false;
60                 up_read(sem);
61         }
62
63         return wrlocked;
64 }
65 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
66 {
67         WARN_ON(!rwsem_is_locked(&osdc->lock));
68 }
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
72 }
73 static inline void verify_osd_locked(struct ceph_osd *osd)
74 {
75         struct ceph_osd_client *osdc = osd->o_osdc;
76
77         WARN_ON(!(mutex_is_locked(&osd->lock) &&
78                   rwsem_is_locked(&osdc->lock)) &&
79                 !rwsem_is_wrlocked(&osdc->lock));
80 }
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
82 {
83         WARN_ON(!mutex_is_locked(&lreq->lock));
84 }
85 #else
86 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
88 static inline void verify_osd_locked(struct ceph_osd *osd) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
90 #endif
91
92 /*
93  * calculate the mapping of a file extent onto an object, and fill out the
94  * request accordingly.  shorten extent as necessary if it crosses an
95  * object boundary.
96  *
97  * fill osd op in request message.
98  */
99 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
100                         u64 *objnum, u64 *objoff, u64 *objlen)
101 {
102         u64 orig_len = *plen;
103         int r;
104
105         /* object extent? */
106         r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
107                                           objoff, objlen);
108         if (r < 0)
109                 return r;
110         if (*objlen < orig_len) {
111                 *plen = *objlen;
112                 dout(" skipping last %llu, final file extent %llu~%llu\n",
113                      orig_len - *plen, off, *plen);
114         }
115
116         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
117
118         return 0;
119 }
120
121 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
122 {
123         memset(osd_data, 0, sizeof (*osd_data));
124         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
125 }
126
127 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
128                         struct page **pages, u64 length, u32 alignment,
129                         bool pages_from_pool, bool own_pages)
130 {
131         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
132         osd_data->pages = pages;
133         osd_data->length = length;
134         osd_data->alignment = alignment;
135         osd_data->pages_from_pool = pages_from_pool;
136         osd_data->own_pages = own_pages;
137 }
138
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
140                         struct ceph_pagelist *pagelist)
141 {
142         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
143         osd_data->pagelist = pagelist;
144 }
145
146 #ifdef CONFIG_BLOCK
147 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
148                         struct bio *bio, size_t bio_length)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
151         osd_data->bio = bio;
152         osd_data->bio_length = bio_length;
153 }
154 #endif /* CONFIG_BLOCK */
155
156 #define osd_req_op_data(oreq, whch, typ, fld)                           \
157 ({                                                                      \
158         struct ceph_osd_request *__oreq = (oreq);                       \
159         unsigned int __whch = (whch);                                   \
160         BUG_ON(__whch >= __oreq->r_num_ops);                            \
161         &__oreq->r_ops[__whch].typ.fld;                                 \
162 })
163
164 static struct ceph_osd_data *
165 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
166 {
167         BUG_ON(which >= osd_req->r_num_ops);
168
169         return &osd_req->r_ops[which].raw_data_in;
170 }
171
172 struct ceph_osd_data *
173 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
174                         unsigned int which)
175 {
176         return osd_req_op_data(osd_req, which, extent, osd_data);
177 }
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
179
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
181                         unsigned int which, struct page **pages,
182                         u64 length, u32 alignment,
183                         bool pages_from_pool, bool own_pages)
184 {
185         struct ceph_osd_data *osd_data;
186
187         osd_data = osd_req_op_raw_data_in(osd_req, which);
188         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
189                                 pages_from_pool, own_pages);
190 }
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
192
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
194                         unsigned int which, struct page **pages,
195                         u64 length, u32 alignment,
196                         bool pages_from_pool, bool own_pages)
197 {
198         struct ceph_osd_data *osd_data;
199
200         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
201         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
202                                 pages_from_pool, own_pages);
203 }
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
205
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
207                         unsigned int which, struct ceph_pagelist *pagelist)
208 {
209         struct ceph_osd_data *osd_data;
210
211         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
212         ceph_osd_data_pagelist_init(osd_data, pagelist);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
215
216 #ifdef CONFIG_BLOCK
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
218                         unsigned int which, struct bio *bio, size_t bio_length)
219 {
220         struct ceph_osd_data *osd_data;
221
222         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
223         ceph_osd_data_bio_init(osd_data, bio, bio_length);
224 }
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
226 #endif /* CONFIG_BLOCK */
227
228 static void osd_req_op_cls_request_info_pagelist(
229                         struct ceph_osd_request *osd_req,
230                         unsigned int which, struct ceph_pagelist *pagelist)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
235         ceph_osd_data_pagelist_init(osd_data, pagelist);
236 }
237
238 void osd_req_op_cls_request_data_pagelist(
239                         struct ceph_osd_request *osd_req,
240                         unsigned int which, struct ceph_pagelist *pagelist)
241 {
242         struct ceph_osd_data *osd_data;
243
244         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
245         ceph_osd_data_pagelist_init(osd_data, pagelist);
246         osd_req->r_ops[which].cls.indata_len += pagelist->length;
247         osd_req->r_ops[which].indata_len += pagelist->length;
248 }
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
250
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
252                         unsigned int which, struct page **pages, u64 length,
253                         u32 alignment, bool pages_from_pool, bool own_pages)
254 {
255         struct ceph_osd_data *osd_data;
256
257         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
258         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
259                                 pages_from_pool, own_pages);
260         osd_req->r_ops[which].cls.indata_len += length;
261         osd_req->r_ops[which].indata_len += length;
262 }
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
264
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
266                         unsigned int which, struct page **pages, u64 length,
267                         u32 alignment, bool pages_from_pool, bool own_pages)
268 {
269         struct ceph_osd_data *osd_data;
270
271         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
272         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
273                                 pages_from_pool, own_pages);
274 }
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
276
277 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
278 {
279         switch (osd_data->type) {
280         case CEPH_OSD_DATA_TYPE_NONE:
281                 return 0;
282         case CEPH_OSD_DATA_TYPE_PAGES:
283                 return osd_data->length;
284         case CEPH_OSD_DATA_TYPE_PAGELIST:
285                 return (u64)osd_data->pagelist->length;
286 #ifdef CONFIG_BLOCK
287         case CEPH_OSD_DATA_TYPE_BIO:
288                 return (u64)osd_data->bio_length;
289 #endif /* CONFIG_BLOCK */
290         default:
291                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
292                 return 0;
293         }
294 }
295
296 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
297 {
298         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
299                 int num_pages;
300
301                 num_pages = calc_pages_for((u64)osd_data->alignment,
302                                                 (u64)osd_data->length);
303                 ceph_release_page_vector(osd_data->pages, num_pages);
304         }
305         ceph_osd_data_init(osd_data);
306 }
307
308 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
309                         unsigned int which)
310 {
311         struct ceph_osd_req_op *op;
312
313         BUG_ON(which >= osd_req->r_num_ops);
314         op = &osd_req->r_ops[which];
315
316         switch (op->op) {
317         case CEPH_OSD_OP_READ:
318         case CEPH_OSD_OP_WRITE:
319         case CEPH_OSD_OP_WRITEFULL:
320                 ceph_osd_data_release(&op->extent.osd_data);
321                 break;
322         case CEPH_OSD_OP_CALL:
323                 ceph_osd_data_release(&op->cls.request_info);
324                 ceph_osd_data_release(&op->cls.request_data);
325                 ceph_osd_data_release(&op->cls.response_data);
326                 break;
327         case CEPH_OSD_OP_SETXATTR:
328         case CEPH_OSD_OP_CMPXATTR:
329                 ceph_osd_data_release(&op->xattr.osd_data);
330                 break;
331         case CEPH_OSD_OP_STAT:
332                 ceph_osd_data_release(&op->raw_data_in);
333                 break;
334         case CEPH_OSD_OP_NOTIFY_ACK:
335                 ceph_osd_data_release(&op->notify_ack.request_data);
336                 break;
337         case CEPH_OSD_OP_NOTIFY:
338                 ceph_osd_data_release(&op->notify.request_data);
339                 ceph_osd_data_release(&op->notify.response_data);
340                 break;
341         case CEPH_OSD_OP_LIST_WATCHERS:
342                 ceph_osd_data_release(&op->list_watchers.response_data);
343                 break;
344         default:
345                 break;
346         }
347 }
348
349 /*
350  * Assumes @t is zero-initialized.
351  */
352 static void target_init(struct ceph_osd_request_target *t)
353 {
354         ceph_oid_init(&t->base_oid);
355         ceph_oloc_init(&t->base_oloc);
356         ceph_oid_init(&t->target_oid);
357         ceph_oloc_init(&t->target_oloc);
358
359         ceph_osds_init(&t->acting);
360         ceph_osds_init(&t->up);
361         t->size = -1;
362         t->min_size = -1;
363
364         t->osd = CEPH_HOMELESS_OSD;
365 }
366
367 static void target_copy(struct ceph_osd_request_target *dest,
368                         const struct ceph_osd_request_target *src)
369 {
370         ceph_oid_copy(&dest->base_oid, &src->base_oid);
371         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
372         ceph_oid_copy(&dest->target_oid, &src->target_oid);
373         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
374
375         dest->pgid = src->pgid; /* struct */
376         dest->pg_num = src->pg_num;
377         dest->pg_num_mask = src->pg_num_mask;
378         ceph_osds_copy(&dest->acting, &src->acting);
379         ceph_osds_copy(&dest->up, &src->up);
380         dest->size = src->size;
381         dest->min_size = src->min_size;
382         dest->sort_bitwise = src->sort_bitwise;
383
384         dest->flags = src->flags;
385         dest->paused = src->paused;
386
387         dest->osd = src->osd;
388 }
389
390 static void target_destroy(struct ceph_osd_request_target *t)
391 {
392         ceph_oid_destroy(&t->base_oid);
393         ceph_oloc_destroy(&t->base_oloc);
394         ceph_oid_destroy(&t->target_oid);
395         ceph_oloc_destroy(&t->target_oloc);
396 }
397
398 /*
399  * requests
400  */
401 static void request_release_checks(struct ceph_osd_request *req)
402 {
403         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
404         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
405         WARN_ON(!list_empty(&req->r_unsafe_item));
406         WARN_ON(req->r_osd);
407 }
408
409 static void ceph_osdc_release_request(struct kref *kref)
410 {
411         struct ceph_osd_request *req = container_of(kref,
412                                             struct ceph_osd_request, r_kref);
413         unsigned int which;
414
415         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
416              req->r_request, req->r_reply);
417         request_release_checks(req);
418
419         if (req->r_request)
420                 ceph_msg_put(req->r_request);
421         if (req->r_reply)
422                 ceph_msg_put(req->r_reply);
423
424         for (which = 0; which < req->r_num_ops; which++)
425                 osd_req_op_data_release(req, which);
426
427         target_destroy(&req->r_t);
428         ceph_put_snap_context(req->r_snapc);
429
430         if (req->r_mempool)
431                 mempool_free(req, req->r_osdc->req_mempool);
432         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
433                 kmem_cache_free(ceph_osd_request_cache, req);
434         else
435                 kfree(req);
436 }
437
438 void ceph_osdc_get_request(struct ceph_osd_request *req)
439 {
440         dout("%s %p (was %d)\n", __func__, req,
441              kref_read(&req->r_kref));
442         kref_get(&req->r_kref);
443 }
444 EXPORT_SYMBOL(ceph_osdc_get_request);
445
446 void ceph_osdc_put_request(struct ceph_osd_request *req)
447 {
448         if (req) {
449                 dout("%s %p (was %d)\n", __func__, req,
450                      kref_read(&req->r_kref));
451                 kref_put(&req->r_kref, ceph_osdc_release_request);
452         }
453 }
454 EXPORT_SYMBOL(ceph_osdc_put_request);
455
456 static void request_init(struct ceph_osd_request *req)
457 {
458         /* req only, each op is zeroed in _osd_req_op_init() */
459         memset(req, 0, sizeof(*req));
460
461         kref_init(&req->r_kref);
462         init_completion(&req->r_completion);
463         RB_CLEAR_NODE(&req->r_node);
464         RB_CLEAR_NODE(&req->r_mc_node);
465         INIT_LIST_HEAD(&req->r_unsafe_item);
466
467         target_init(&req->r_t);
468 }
469
470 /*
471  * This is ugly, but it allows us to reuse linger registration and ping
472  * requests, keeping the structure of the code around send_linger{_ping}()
473  * reasonable.  Setting up a min_nr=2 mempool for each linger request
474  * and dealing with copying ops (this blasts req only, watch op remains
475  * intact) isn't any better.
476  */
477 static void request_reinit(struct ceph_osd_request *req)
478 {
479         struct ceph_osd_client *osdc = req->r_osdc;
480         bool mempool = req->r_mempool;
481         unsigned int num_ops = req->r_num_ops;
482         u64 snapid = req->r_snapid;
483         struct ceph_snap_context *snapc = req->r_snapc;
484         bool linger = req->r_linger;
485         struct ceph_msg *request_msg = req->r_request;
486         struct ceph_msg *reply_msg = req->r_reply;
487
488         dout("%s req %p\n", __func__, req);
489         WARN_ON(kref_read(&req->r_kref) != 1);
490         request_release_checks(req);
491
492         WARN_ON(kref_read(&request_msg->kref) != 1);
493         WARN_ON(kref_read(&reply_msg->kref) != 1);
494         target_destroy(&req->r_t);
495
496         request_init(req);
497         req->r_osdc = osdc;
498         req->r_mempool = mempool;
499         req->r_num_ops = num_ops;
500         req->r_snapid = snapid;
501         req->r_snapc = snapc;
502         req->r_linger = linger;
503         req->r_request = request_msg;
504         req->r_reply = reply_msg;
505 }
506
507 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
508                                                struct ceph_snap_context *snapc,
509                                                unsigned int num_ops,
510                                                bool use_mempool,
511                                                gfp_t gfp_flags)
512 {
513         struct ceph_osd_request *req;
514
515         if (use_mempool) {
516                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
517                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
518         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
519                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
520         } else {
521                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
522                 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
523                               gfp_flags);
524         }
525         if (unlikely(!req))
526                 return NULL;
527
528         request_init(req);
529         req->r_osdc = osdc;
530         req->r_mempool = use_mempool;
531         req->r_num_ops = num_ops;
532         req->r_snapid = CEPH_NOSNAP;
533         req->r_snapc = ceph_get_snap_context(snapc);
534
535         dout("%s req %p\n", __func__, req);
536         return req;
537 }
538 EXPORT_SYMBOL(ceph_osdc_alloc_request);
539
540 static int ceph_oloc_encoding_size(struct ceph_object_locator *oloc)
541 {
542         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
543 }
544
545 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
546 {
547         struct ceph_osd_client *osdc = req->r_osdc;
548         struct ceph_msg *msg;
549         int msg_size;
550
551         WARN_ON(ceph_oid_empty(&req->r_base_oid));
552         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
553
554         /* create request message */
555         msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
556         msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
557         msg_size += CEPH_ENCODING_START_BLK_LEN +
558                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
559         msg_size += 1 + 8 + 4 + 4; /* pgid */
560         msg_size += 4 + req->r_base_oid.name_len; /* oid */
561         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
562         msg_size += 8; /* snapid */
563         msg_size += 8; /* snap_seq */
564         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
565         msg_size += 4; /* retry_attempt */
566
567         if (req->r_mempool)
568                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
569         else
570                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
571         if (!msg)
572                 return -ENOMEM;
573
574         memset(msg->front.iov_base, 0, msg->front.iov_len);
575         req->r_request = msg;
576
577         /* create reply message */
578         msg_size = OSD_OPREPLY_FRONT_LEN;
579         msg_size += req->r_base_oid.name_len;
580         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
581
582         if (req->r_mempool)
583                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
584         else
585                 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
586         if (!msg)
587                 return -ENOMEM;
588
589         req->r_reply = msg;
590
591         return 0;
592 }
593 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
594
595 static bool osd_req_opcode_valid(u16 opcode)
596 {
597         switch (opcode) {
598 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
599 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
600 #undef GENERATE_CASE
601         default:
602                 return false;
603         }
604 }
605
606 /*
607  * This is an osd op init function for opcodes that have no data or
608  * other information associated with them.  It also serves as a
609  * common init routine for all the other init functions, below.
610  */
611 static struct ceph_osd_req_op *
612 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
613                  u16 opcode, u32 flags)
614 {
615         struct ceph_osd_req_op *op;
616
617         BUG_ON(which >= osd_req->r_num_ops);
618         BUG_ON(!osd_req_opcode_valid(opcode));
619
620         op = &osd_req->r_ops[which];
621         memset(op, 0, sizeof (*op));
622         op->op = opcode;
623         op->flags = flags;
624
625         return op;
626 }
627
628 void osd_req_op_init(struct ceph_osd_request *osd_req,
629                      unsigned int which, u16 opcode, u32 flags)
630 {
631         (void)_osd_req_op_init(osd_req, which, opcode, flags);
632 }
633 EXPORT_SYMBOL(osd_req_op_init);
634
635 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
636                                 unsigned int which, u16 opcode,
637                                 u64 offset, u64 length,
638                                 u64 truncate_size, u32 truncate_seq)
639 {
640         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
641                                                       opcode, 0);
642         size_t payload_len = 0;
643
644         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
645                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
646                opcode != CEPH_OSD_OP_TRUNCATE);
647
648         op->extent.offset = offset;
649         op->extent.length = length;
650         op->extent.truncate_size = truncate_size;
651         op->extent.truncate_seq = truncate_seq;
652         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
653                 payload_len += length;
654
655         op->indata_len = payload_len;
656 }
657 EXPORT_SYMBOL(osd_req_op_extent_init);
658
659 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
660                                 unsigned int which, u64 length)
661 {
662         struct ceph_osd_req_op *op;
663         u64 previous;
664
665         BUG_ON(which >= osd_req->r_num_ops);
666         op = &osd_req->r_ops[which];
667         previous = op->extent.length;
668
669         if (length == previous)
670                 return;         /* Nothing to do */
671         BUG_ON(length > previous);
672
673         op->extent.length = length;
674         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
675                 op->indata_len -= previous - length;
676 }
677 EXPORT_SYMBOL(osd_req_op_extent_update);
678
679 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
680                                 unsigned int which, u64 offset_inc)
681 {
682         struct ceph_osd_req_op *op, *prev_op;
683
684         BUG_ON(which + 1 >= osd_req->r_num_ops);
685
686         prev_op = &osd_req->r_ops[which];
687         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
688         /* dup previous one */
689         op->indata_len = prev_op->indata_len;
690         op->outdata_len = prev_op->outdata_len;
691         op->extent = prev_op->extent;
692         /* adjust offset */
693         op->extent.offset += offset_inc;
694         op->extent.length -= offset_inc;
695
696         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
697                 op->indata_len -= offset_inc;
698 }
699 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
700
701 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
702                         u16 opcode, const char *class, const char *method)
703 {
704         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
705                                                       opcode, 0);
706         struct ceph_pagelist *pagelist;
707         size_t payload_len = 0;
708         size_t size;
709
710         BUG_ON(opcode != CEPH_OSD_OP_CALL);
711
712         pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
713         BUG_ON(!pagelist);
714         ceph_pagelist_init(pagelist);
715
716         op->cls.class_name = class;
717         size = strlen(class);
718         BUG_ON(size > (size_t) U8_MAX);
719         op->cls.class_len = size;
720         ceph_pagelist_append(pagelist, class, size);
721         payload_len += size;
722
723         op->cls.method_name = method;
724         size = strlen(method);
725         BUG_ON(size > (size_t) U8_MAX);
726         op->cls.method_len = size;
727         ceph_pagelist_append(pagelist, method, size);
728         payload_len += size;
729
730         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
731
732         op->indata_len = payload_len;
733 }
734 EXPORT_SYMBOL(osd_req_op_cls_init);
735
736 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
737                           u16 opcode, const char *name, const void *value,
738                           size_t size, u8 cmp_op, u8 cmp_mode)
739 {
740         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
741                                                       opcode, 0);
742         struct ceph_pagelist *pagelist;
743         size_t payload_len;
744
745         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
746
747         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
748         if (!pagelist)
749                 return -ENOMEM;
750
751         ceph_pagelist_init(pagelist);
752
753         payload_len = strlen(name);
754         op->xattr.name_len = payload_len;
755         ceph_pagelist_append(pagelist, name, payload_len);
756
757         op->xattr.value_len = size;
758         ceph_pagelist_append(pagelist, value, size);
759         payload_len += size;
760
761         op->xattr.cmp_op = cmp_op;
762         op->xattr.cmp_mode = cmp_mode;
763
764         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
765         op->indata_len = payload_len;
766         return 0;
767 }
768 EXPORT_SYMBOL(osd_req_op_xattr_init);
769
770 /*
771  * @watch_opcode: CEPH_OSD_WATCH_OP_*
772  */
773 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
774                                   u64 cookie, u8 watch_opcode)
775 {
776         struct ceph_osd_req_op *op;
777
778         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
779         op->watch.cookie = cookie;
780         op->watch.op = watch_opcode;
781         op->watch.gen = 0;
782 }
783
784 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
785                                 unsigned int which,
786                                 u64 expected_object_size,
787                                 u64 expected_write_size)
788 {
789         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
790                                                       CEPH_OSD_OP_SETALLOCHINT,
791                                                       0);
792
793         op->alloc_hint.expected_object_size = expected_object_size;
794         op->alloc_hint.expected_write_size = expected_write_size;
795
796         /*
797          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
798          * not worth a feature bit.  Set FAILOK per-op flag to make
799          * sure older osds don't trip over an unsupported opcode.
800          */
801         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
802 }
803 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
804
805 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
806                                 struct ceph_osd_data *osd_data)
807 {
808         u64 length = ceph_osd_data_length(osd_data);
809
810         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
811                 BUG_ON(length > (u64) SIZE_MAX);
812                 if (length)
813                         ceph_msg_data_add_pages(msg, osd_data->pages,
814                                         length, osd_data->alignment);
815         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
816                 BUG_ON(!length);
817                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
818 #ifdef CONFIG_BLOCK
819         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
820                 ceph_msg_data_add_bio(msg, osd_data->bio, length);
821 #endif
822         } else {
823                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
824         }
825 }
826
827 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
828                              const struct ceph_osd_req_op *src)
829 {
830         if (WARN_ON(!osd_req_opcode_valid(src->op))) {
831                 pr_err("unrecognized osd opcode %d\n", src->op);
832
833                 return 0;
834         }
835
836         switch (src->op) {
837         case CEPH_OSD_OP_STAT:
838                 break;
839         case CEPH_OSD_OP_READ:
840         case CEPH_OSD_OP_WRITE:
841         case CEPH_OSD_OP_WRITEFULL:
842         case CEPH_OSD_OP_ZERO:
843         case CEPH_OSD_OP_TRUNCATE:
844                 dst->extent.offset = cpu_to_le64(src->extent.offset);
845                 dst->extent.length = cpu_to_le64(src->extent.length);
846                 dst->extent.truncate_size =
847                         cpu_to_le64(src->extent.truncate_size);
848                 dst->extent.truncate_seq =
849                         cpu_to_le32(src->extent.truncate_seq);
850                 break;
851         case CEPH_OSD_OP_CALL:
852                 dst->cls.class_len = src->cls.class_len;
853                 dst->cls.method_len = src->cls.method_len;
854                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
855                 break;
856         case CEPH_OSD_OP_STARTSYNC:
857                 break;
858         case CEPH_OSD_OP_WATCH:
859                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
860                 dst->watch.ver = cpu_to_le64(0);
861                 dst->watch.op = src->watch.op;
862                 dst->watch.gen = cpu_to_le32(src->watch.gen);
863                 break;
864         case CEPH_OSD_OP_NOTIFY_ACK:
865                 break;
866         case CEPH_OSD_OP_NOTIFY:
867                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
868                 break;
869         case CEPH_OSD_OP_LIST_WATCHERS:
870                 break;
871         case CEPH_OSD_OP_SETALLOCHINT:
872                 dst->alloc_hint.expected_object_size =
873                     cpu_to_le64(src->alloc_hint.expected_object_size);
874                 dst->alloc_hint.expected_write_size =
875                     cpu_to_le64(src->alloc_hint.expected_write_size);
876                 break;
877         case CEPH_OSD_OP_SETXATTR:
878         case CEPH_OSD_OP_CMPXATTR:
879                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
880                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
881                 dst->xattr.cmp_op = src->xattr.cmp_op;
882                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
883                 break;
884         case CEPH_OSD_OP_CREATE:
885         case CEPH_OSD_OP_DELETE:
886                 break;
887         default:
888                 pr_err("unsupported osd opcode %s\n",
889                         ceph_osd_op_name(src->op));
890                 WARN_ON(1);
891
892                 return 0;
893         }
894
895         dst->op = cpu_to_le16(src->op);
896         dst->flags = cpu_to_le32(src->flags);
897         dst->payload_len = cpu_to_le32(src->indata_len);
898
899         return src->indata_len;
900 }
901
902 /*
903  * build new request AND message, calculate layout, and adjust file
904  * extent as needed.
905  *
906  * if the file was recently truncated, we include information about its
907  * old and new size so that the object can be updated appropriately.  (we
908  * avoid synchronously deleting truncated objects because it's slow.)
909  *
910  * if @do_sync, include a 'startsync' command so that the osd will flush
911  * data quickly.
912  */
913 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
914                                                struct ceph_file_layout *layout,
915                                                struct ceph_vino vino,
916                                                u64 off, u64 *plen,
917                                                unsigned int which, int num_ops,
918                                                int opcode, int flags,
919                                                struct ceph_snap_context *snapc,
920                                                u32 truncate_seq,
921                                                u64 truncate_size,
922                                                bool use_mempool)
923 {
924         struct ceph_osd_request *req;
925         u64 objnum = 0;
926         u64 objoff = 0;
927         u64 objlen = 0;
928         int r;
929
930         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
931                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
932                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
933
934         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
935                                         GFP_NOFS);
936         if (!req) {
937                 r = -ENOMEM;
938                 goto fail;
939         }
940
941         /* calculate max write size */
942         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
943         if (r)
944                 goto fail;
945
946         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
947                 osd_req_op_init(req, which, opcode, 0);
948         } else {
949                 u32 object_size = layout->object_size;
950                 u32 object_base = off - objoff;
951                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
952                         if (truncate_size <= object_base) {
953                                 truncate_size = 0;
954                         } else {
955                                 truncate_size -= object_base;
956                                 if (truncate_size > object_size)
957                                         truncate_size = object_size;
958                         }
959                 }
960                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
961                                        truncate_size, truncate_seq);
962         }
963
964         req->r_abort_on_full = true;
965         req->r_flags = flags;
966         req->r_base_oloc.pool = layout->pool_id;
967         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
968         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
969
970         req->r_snapid = vino.snap;
971         if (flags & CEPH_OSD_FLAG_WRITE)
972                 req->r_data_offset = off;
973
974         r = ceph_osdc_alloc_messages(req, GFP_NOFS);
975         if (r)
976                 goto fail;
977
978         return req;
979
980 fail:
981         ceph_osdc_put_request(req);
982         return ERR_PTR(r);
983 }
984 EXPORT_SYMBOL(ceph_osdc_new_request);
985
986 /*
987  * We keep osd requests in an rbtree, sorted by ->r_tid.
988  */
989 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
990 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
991
992 static bool osd_homeless(struct ceph_osd *osd)
993 {
994         return osd->o_osd == CEPH_HOMELESS_OSD;
995 }
996
997 static bool osd_registered(struct ceph_osd *osd)
998 {
999         verify_osdc_locked(osd->o_osdc);
1000
1001         return !RB_EMPTY_NODE(&osd->o_node);
1002 }
1003
1004 /*
1005  * Assumes @osd is zero-initialized.
1006  */
1007 static void osd_init(struct ceph_osd *osd)
1008 {
1009         refcount_set(&osd->o_ref, 1);
1010         RB_CLEAR_NODE(&osd->o_node);
1011         osd->o_requests = RB_ROOT;
1012         osd->o_linger_requests = RB_ROOT;
1013         INIT_LIST_HEAD(&osd->o_osd_lru);
1014         INIT_LIST_HEAD(&osd->o_keepalive_item);
1015         osd->o_incarnation = 1;
1016         mutex_init(&osd->lock);
1017 }
1018
1019 static void osd_cleanup(struct ceph_osd *osd)
1020 {
1021         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1022         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1023         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1024         WARN_ON(!list_empty(&osd->o_osd_lru));
1025         WARN_ON(!list_empty(&osd->o_keepalive_item));
1026
1027         if (osd->o_auth.authorizer) {
1028                 WARN_ON(osd_homeless(osd));
1029                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1030         }
1031 }
1032
1033 /*
1034  * Track open sessions with osds.
1035  */
1036 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1037 {
1038         struct ceph_osd *osd;
1039
1040         WARN_ON(onum == CEPH_HOMELESS_OSD);
1041
1042         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1043         osd_init(osd);
1044         osd->o_osdc = osdc;
1045         osd->o_osd = onum;
1046
1047         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1048
1049         return osd;
1050 }
1051
1052 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1053 {
1054         if (refcount_inc_not_zero(&osd->o_ref)) {
1055                 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1056                      refcount_read(&osd->o_ref));
1057                 return osd;
1058         } else {
1059                 dout("get_osd %p FAIL\n", osd);
1060                 return NULL;
1061         }
1062 }
1063
1064 static void put_osd(struct ceph_osd *osd)
1065 {
1066         dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1067              refcount_read(&osd->o_ref) - 1);
1068         if (refcount_dec_and_test(&osd->o_ref)) {
1069                 osd_cleanup(osd);
1070                 kfree(osd);
1071         }
1072 }
1073
1074 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1075
1076 static void __move_osd_to_lru(struct ceph_osd *osd)
1077 {
1078         struct ceph_osd_client *osdc = osd->o_osdc;
1079
1080         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1081         BUG_ON(!list_empty(&osd->o_osd_lru));
1082
1083         spin_lock(&osdc->osd_lru_lock);
1084         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1085         spin_unlock(&osdc->osd_lru_lock);
1086
1087         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1088 }
1089
1090 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1091 {
1092         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1093             RB_EMPTY_ROOT(&osd->o_linger_requests))
1094                 __move_osd_to_lru(osd);
1095 }
1096
1097 static void __remove_osd_from_lru(struct ceph_osd *osd)
1098 {
1099         struct ceph_osd_client *osdc = osd->o_osdc;
1100
1101         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1102
1103         spin_lock(&osdc->osd_lru_lock);
1104         if (!list_empty(&osd->o_osd_lru))
1105                 list_del_init(&osd->o_osd_lru);
1106         spin_unlock(&osdc->osd_lru_lock);
1107 }
1108
1109 /*
1110  * Close the connection and assign any leftover requests to the
1111  * homeless session.
1112  */
1113 static void close_osd(struct ceph_osd *osd)
1114 {
1115         struct ceph_osd_client *osdc = osd->o_osdc;
1116         struct rb_node *n;
1117
1118         verify_osdc_wrlocked(osdc);
1119         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1120
1121         ceph_con_close(&osd->o_con);
1122
1123         for (n = rb_first(&osd->o_requests); n; ) {
1124                 struct ceph_osd_request *req =
1125                     rb_entry(n, struct ceph_osd_request, r_node);
1126
1127                 n = rb_next(n); /* unlink_request() */
1128
1129                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1130                 unlink_request(osd, req);
1131                 link_request(&osdc->homeless_osd, req);
1132         }
1133         for (n = rb_first(&osd->o_linger_requests); n; ) {
1134                 struct ceph_osd_linger_request *lreq =
1135                     rb_entry(n, struct ceph_osd_linger_request, node);
1136
1137                 n = rb_next(n); /* unlink_linger() */
1138
1139                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1140                      lreq->linger_id);
1141                 unlink_linger(osd, lreq);
1142                 link_linger(&osdc->homeless_osd, lreq);
1143         }
1144
1145         __remove_osd_from_lru(osd);
1146         erase_osd(&osdc->osds, osd);
1147         put_osd(osd);
1148 }
1149
1150 /*
1151  * reset osd connect
1152  */
1153 static int reopen_osd(struct ceph_osd *osd)
1154 {
1155         struct ceph_entity_addr *peer_addr;
1156
1157         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1158
1159         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1160             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1161                 close_osd(osd);
1162                 return -ENODEV;
1163         }
1164
1165         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1166         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1167                         !ceph_con_opened(&osd->o_con)) {
1168                 struct rb_node *n;
1169
1170                 dout("osd addr hasn't changed and connection never opened, "
1171                      "letting msgr retry\n");
1172                 /* touch each r_stamp for handle_timeout()'s benfit */
1173                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1174                         struct ceph_osd_request *req =
1175                             rb_entry(n, struct ceph_osd_request, r_node);
1176                         req->r_stamp = jiffies;
1177                 }
1178
1179                 return -EAGAIN;
1180         }
1181
1182         ceph_con_close(&osd->o_con);
1183         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1184         osd->o_incarnation++;
1185
1186         return 0;
1187 }
1188
1189 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1190                                           bool wrlocked)
1191 {
1192         struct ceph_osd *osd;
1193
1194         if (wrlocked)
1195                 verify_osdc_wrlocked(osdc);
1196         else
1197                 verify_osdc_locked(osdc);
1198
1199         if (o != CEPH_HOMELESS_OSD)
1200                 osd = lookup_osd(&osdc->osds, o);
1201         else
1202                 osd = &osdc->homeless_osd;
1203         if (!osd) {
1204                 if (!wrlocked)
1205                         return ERR_PTR(-EAGAIN);
1206
1207                 osd = create_osd(osdc, o);
1208                 insert_osd(&osdc->osds, osd);
1209                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1210                               &osdc->osdmap->osd_addr[osd->o_osd]);
1211         }
1212
1213         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1214         return osd;
1215 }
1216
1217 /*
1218  * Create request <-> OSD session relation.
1219  *
1220  * @req has to be assigned a tid, @osd may be homeless.
1221  */
1222 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1223 {
1224         verify_osd_locked(osd);
1225         WARN_ON(!req->r_tid || req->r_osd);
1226         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1227              req, req->r_tid);
1228
1229         if (!osd_homeless(osd))
1230                 __remove_osd_from_lru(osd);
1231         else
1232                 atomic_inc(&osd->o_osdc->num_homeless);
1233
1234         get_osd(osd);
1235         insert_request(&osd->o_requests, req);
1236         req->r_osd = osd;
1237 }
1238
1239 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1240 {
1241         verify_osd_locked(osd);
1242         WARN_ON(req->r_osd != osd);
1243         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1244              req, req->r_tid);
1245
1246         req->r_osd = NULL;
1247         erase_request(&osd->o_requests, req);
1248         put_osd(osd);
1249
1250         if (!osd_homeless(osd))
1251                 maybe_move_osd_to_lru(osd);
1252         else
1253                 atomic_dec(&osd->o_osdc->num_homeless);
1254 }
1255
1256 static bool __pool_full(struct ceph_pg_pool_info *pi)
1257 {
1258         return pi->flags & CEPH_POOL_FLAG_FULL;
1259 }
1260
1261 static bool have_pool_full(struct ceph_osd_client *osdc)
1262 {
1263         struct rb_node *n;
1264
1265         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1266                 struct ceph_pg_pool_info *pi =
1267                     rb_entry(n, struct ceph_pg_pool_info, node);
1268
1269                 if (__pool_full(pi))
1270                         return true;
1271         }
1272
1273         return false;
1274 }
1275
1276 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1277 {
1278         struct ceph_pg_pool_info *pi;
1279
1280         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1281         if (!pi)
1282                 return false;
1283
1284         return __pool_full(pi);
1285 }
1286
1287 /*
1288  * Returns whether a request should be blocked from being sent
1289  * based on the current osdmap and osd_client settings.
1290  */
1291 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1292                                     const struct ceph_osd_request_target *t,
1293                                     struct ceph_pg_pool_info *pi)
1294 {
1295         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1296         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1297                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1298                        __pool_full(pi);
1299
1300         WARN_ON(pi->id != t->base_oloc.pool);
1301         return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1302                ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1303                (osdc->osdmap->epoch < osdc->epoch_barrier);
1304 }
1305
1306 enum calc_target_result {
1307         CALC_TARGET_NO_ACTION = 0,
1308         CALC_TARGET_NEED_RESEND,
1309         CALC_TARGET_POOL_DNE,
1310 };
1311
1312 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1313                                            struct ceph_osd_request_target *t,
1314                                            u32 *last_force_resend,
1315                                            bool any_change)
1316 {
1317         struct ceph_pg_pool_info *pi;
1318         struct ceph_pg pgid, last_pgid;
1319         struct ceph_osds up, acting;
1320         bool force_resend = false;
1321         bool need_check_tiering = false;
1322         bool need_resend = false;
1323         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1324         enum calc_target_result ct_res;
1325         int ret;
1326
1327         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1328         if (!pi) {
1329                 t->osd = CEPH_HOMELESS_OSD;
1330                 ct_res = CALC_TARGET_POOL_DNE;
1331                 goto out;
1332         }
1333
1334         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1335                 if (last_force_resend &&
1336                     *last_force_resend < pi->last_force_request_resend) {
1337                         *last_force_resend = pi->last_force_request_resend;
1338                         force_resend = true;
1339                 } else if (!last_force_resend) {
1340                         force_resend = true;
1341                 }
1342         }
1343         if (ceph_oid_empty(&t->target_oid) || force_resend) {
1344                 ceph_oid_copy(&t->target_oid, &t->base_oid);
1345                 need_check_tiering = true;
1346         }
1347         if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1348                 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1349                 need_check_tiering = true;
1350         }
1351
1352         if (need_check_tiering &&
1353             (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1354                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1355                         t->target_oloc.pool = pi->read_tier;
1356                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1357                         t->target_oloc.pool = pi->write_tier;
1358         }
1359
1360         ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1361                                         &t->target_oloc, &pgid);
1362         if (ret) {
1363                 WARN_ON(ret != -ENOENT);
1364                 t->osd = CEPH_HOMELESS_OSD;
1365                 ct_res = CALC_TARGET_POOL_DNE;
1366                 goto out;
1367         }
1368         last_pgid.pool = pgid.pool;
1369         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1370
1371         ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1372         if (any_change &&
1373             ceph_is_new_interval(&t->acting,
1374                                  &acting,
1375                                  &t->up,
1376                                  &up,
1377                                  t->size,
1378                                  pi->size,
1379                                  t->min_size,
1380                                  pi->min_size,
1381                                  t->pg_num,
1382                                  pi->pg_num,
1383                                  t->sort_bitwise,
1384                                  sort_bitwise,
1385                                  &last_pgid))
1386                 force_resend = true;
1387
1388         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1389                 t->paused = false;
1390                 need_resend = true;
1391         }
1392
1393         if (ceph_pg_compare(&t->pgid, &pgid) ||
1394             ceph_osds_changed(&t->acting, &acting, any_change) ||
1395             force_resend) {
1396                 t->pgid = pgid; /* struct */
1397                 ceph_osds_copy(&t->acting, &acting);
1398                 ceph_osds_copy(&t->up, &up);
1399                 t->size = pi->size;
1400                 t->min_size = pi->min_size;
1401                 t->pg_num = pi->pg_num;
1402                 t->pg_num_mask = pi->pg_num_mask;
1403                 t->sort_bitwise = sort_bitwise;
1404
1405                 t->osd = acting.primary;
1406                 need_resend = true;
1407         }
1408
1409         ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1410 out:
1411         dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1412         return ct_res;
1413 }
1414
1415 static void setup_request_data(struct ceph_osd_request *req,
1416                                struct ceph_msg *msg)
1417 {
1418         u32 data_len = 0;
1419         int i;
1420
1421         if (!list_empty(&msg->data))
1422                 return;
1423
1424         WARN_ON(msg->data_length);
1425         for (i = 0; i < req->r_num_ops; i++) {
1426                 struct ceph_osd_req_op *op = &req->r_ops[i];
1427
1428                 switch (op->op) {
1429                 /* request */
1430                 case CEPH_OSD_OP_WRITE:
1431                 case CEPH_OSD_OP_WRITEFULL:
1432                         WARN_ON(op->indata_len != op->extent.length);
1433                         ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1434                         break;
1435                 case CEPH_OSD_OP_SETXATTR:
1436                 case CEPH_OSD_OP_CMPXATTR:
1437                         WARN_ON(op->indata_len != op->xattr.name_len +
1438                                                   op->xattr.value_len);
1439                         ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1440                         break;
1441                 case CEPH_OSD_OP_NOTIFY_ACK:
1442                         ceph_osdc_msg_data_add(msg,
1443                                                &op->notify_ack.request_data);
1444                         break;
1445
1446                 /* reply */
1447                 case CEPH_OSD_OP_STAT:
1448                         ceph_osdc_msg_data_add(req->r_reply,
1449                                                &op->raw_data_in);
1450                         break;
1451                 case CEPH_OSD_OP_READ:
1452                         ceph_osdc_msg_data_add(req->r_reply,
1453                                                &op->extent.osd_data);
1454                         break;
1455                 case CEPH_OSD_OP_LIST_WATCHERS:
1456                         ceph_osdc_msg_data_add(req->r_reply,
1457                                                &op->list_watchers.response_data);
1458                         break;
1459
1460                 /* both */
1461                 case CEPH_OSD_OP_CALL:
1462                         WARN_ON(op->indata_len != op->cls.class_len +
1463                                                   op->cls.method_len +
1464                                                   op->cls.indata_len);
1465                         ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1466                         /* optional, can be NONE */
1467                         ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1468                         /* optional, can be NONE */
1469                         ceph_osdc_msg_data_add(req->r_reply,
1470                                                &op->cls.response_data);
1471                         break;
1472                 case CEPH_OSD_OP_NOTIFY:
1473                         ceph_osdc_msg_data_add(msg,
1474                                                &op->notify.request_data);
1475                         ceph_osdc_msg_data_add(req->r_reply,
1476                                                &op->notify.response_data);
1477                         break;
1478                 }
1479
1480                 data_len += op->indata_len;
1481         }
1482
1483         WARN_ON(data_len != msg->data_length);
1484 }
1485
1486 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1487 {
1488         void *p = msg->front.iov_base;
1489         void *const end = p + msg->front_alloc_len;
1490         u32 data_len = 0;
1491         int i;
1492
1493         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1494                 /* snapshots aren't writeable */
1495                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1496         } else {
1497                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1498                         req->r_data_offset || req->r_snapc);
1499         }
1500
1501         setup_request_data(req, msg);
1502
1503         ceph_encode_32(&p, 1); /* client_inc, always 1 */
1504         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1505         ceph_encode_32(&p, req->r_flags);
1506         ceph_encode_timespec(p, &req->r_mtime);
1507         p += sizeof(struct ceph_timespec);
1508
1509         /* reassert_version */
1510         memset(p, 0, sizeof(struct ceph_eversion));
1511         p += sizeof(struct ceph_eversion);
1512
1513         /* oloc */
1514         ceph_start_encoding(&p, 5, 4,
1515                             ceph_oloc_encoding_size(&req->r_t.target_oloc));
1516         ceph_encode_64(&p, req->r_t.target_oloc.pool);
1517         ceph_encode_32(&p, -1); /* preferred */
1518         ceph_encode_32(&p, 0); /* key len */
1519         if (req->r_t.target_oloc.pool_ns)
1520                 ceph_encode_string(&p, end, req->r_t.target_oloc.pool_ns->str,
1521                                    req->r_t.target_oloc.pool_ns->len);
1522         else
1523                 ceph_encode_32(&p, 0);
1524
1525         /* pgid */
1526         ceph_encode_8(&p, 1);
1527         ceph_encode_64(&p, req->r_t.pgid.pool);
1528         ceph_encode_32(&p, req->r_t.pgid.seed);
1529         ceph_encode_32(&p, -1); /* preferred */
1530
1531         /* oid */
1532         ceph_encode_32(&p, req->r_t.target_oid.name_len);
1533         memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1534         p += req->r_t.target_oid.name_len;
1535
1536         /* ops, can imply data */
1537         ceph_encode_16(&p, req->r_num_ops);
1538         for (i = 0; i < req->r_num_ops; i++) {
1539                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1540                 p += sizeof(struct ceph_osd_op);
1541         }
1542
1543         ceph_encode_64(&p, req->r_snapid); /* snapid */
1544         if (req->r_snapc) {
1545                 ceph_encode_64(&p, req->r_snapc->seq);
1546                 ceph_encode_32(&p, req->r_snapc->num_snaps);
1547                 for (i = 0; i < req->r_snapc->num_snaps; i++)
1548                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
1549         } else {
1550                 ceph_encode_64(&p, 0); /* snap_seq */
1551                 ceph_encode_32(&p, 0); /* snaps len */
1552         }
1553
1554         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1555
1556         BUG_ON(p > end);
1557         msg->front.iov_len = p - msg->front.iov_base;
1558         msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1559         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1560         msg->hdr.data_len = cpu_to_le32(data_len);
1561         /*
1562          * The header "data_off" is a hint to the receiver allowing it
1563          * to align received data into its buffers such that there's no
1564          * need to re-copy it before writing it to disk (direct I/O).
1565          */
1566         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1567
1568         dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1569              req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1570              msg->front.iov_len, data_len);
1571 }
1572
1573 /*
1574  * @req has to be assigned a tid and registered.
1575  */
1576 static void send_request(struct ceph_osd_request *req)
1577 {
1578         struct ceph_osd *osd = req->r_osd;
1579
1580         verify_osd_locked(osd);
1581         WARN_ON(osd->o_osd != req->r_t.osd);
1582
1583         /*
1584          * We may have a previously queued request message hanging
1585          * around.  Cancel it to avoid corrupting the msgr.
1586          */
1587         if (req->r_sent)
1588                 ceph_msg_revoke(req->r_request);
1589
1590         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1591         if (req->r_attempts)
1592                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1593         else
1594                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1595
1596         encode_request(req, req->r_request);
1597
1598         dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1599              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1600              req->r_t.osd, req->r_flags, req->r_attempts);
1601
1602         req->r_t.paused = false;
1603         req->r_stamp = jiffies;
1604         req->r_attempts++;
1605
1606         req->r_sent = osd->o_incarnation;
1607         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1608         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1609 }
1610
1611 static void maybe_request_map(struct ceph_osd_client *osdc)
1612 {
1613         bool continuous = false;
1614
1615         verify_osdc_locked(osdc);
1616         WARN_ON(!osdc->osdmap->epoch);
1617
1618         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1619             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1620             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1621                 dout("%s osdc %p continuous\n", __func__, osdc);
1622                 continuous = true;
1623         } else {
1624                 dout("%s osdc %p onetime\n", __func__, osdc);
1625         }
1626
1627         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
1628                                osdc->osdmap->epoch + 1, continuous))
1629                 ceph_monc_renew_subs(&osdc->client->monc);
1630 }
1631
1632 static void complete_request(struct ceph_osd_request *req, int err);
1633 static void send_map_check(struct ceph_osd_request *req);
1634
1635 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
1636 {
1637         struct ceph_osd_client *osdc = req->r_osdc;
1638         struct ceph_osd *osd;
1639         enum calc_target_result ct_res;
1640         bool need_send = false;
1641         bool promoted = false;
1642         bool need_abort = false;
1643
1644         WARN_ON(req->r_tid);
1645         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
1646
1647 again:
1648         ct_res = calc_target(osdc, &req->r_t, &req->r_last_force_resend, false);
1649         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
1650                 goto promote;
1651
1652         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
1653         if (IS_ERR(osd)) {
1654                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
1655                 goto promote;
1656         }
1657
1658         if (osdc->osdmap->epoch < osdc->epoch_barrier) {
1659                 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
1660                      osdc->epoch_barrier);
1661                 req->r_t.paused = true;
1662                 maybe_request_map(osdc);
1663         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1664                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1665                 dout("req %p pausewr\n", req);
1666                 req->r_t.paused = true;
1667                 maybe_request_map(osdc);
1668         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1669                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1670                 dout("req %p pauserd\n", req);
1671                 req->r_t.paused = true;
1672                 maybe_request_map(osdc);
1673         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1674                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1675                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
1676                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1677                     pool_full(osdc, req->r_t.base_oloc.pool))) {
1678                 dout("req %p full/pool_full\n", req);
1679                 pr_warn_ratelimited("FULL or reached pool quota\n");
1680                 req->r_t.paused = true;
1681                 maybe_request_map(osdc);
1682                 if (req->r_abort_on_full)
1683                         need_abort = true;
1684         } else if (!osd_homeless(osd)) {
1685                 need_send = true;
1686         } else {
1687                 maybe_request_map(osdc);
1688         }
1689
1690         mutex_lock(&osd->lock);
1691         /*
1692          * Assign the tid atomically with send_request() to protect
1693          * multiple writes to the same object from racing with each
1694          * other, resulting in out of order ops on the OSDs.
1695          */
1696         req->r_tid = atomic64_inc_return(&osdc->last_tid);
1697         link_request(osd, req);
1698         if (need_send)
1699                 send_request(req);
1700         else if (need_abort)
1701                 complete_request(req, -ENOSPC);
1702         mutex_unlock(&osd->lock);
1703
1704         if (ct_res == CALC_TARGET_POOL_DNE)
1705                 send_map_check(req);
1706
1707         if (promoted)
1708                 downgrade_write(&osdc->lock);
1709         return;
1710
1711 promote:
1712         up_read(&osdc->lock);
1713         down_write(&osdc->lock);
1714         wrlocked = true;
1715         promoted = true;
1716         goto again;
1717 }
1718
1719 static void account_request(struct ceph_osd_request *req)
1720 {
1721         WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
1722         WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
1723
1724         req->r_flags |= CEPH_OSD_FLAG_ONDISK;
1725         atomic_inc(&req->r_osdc->num_requests);
1726
1727         req->r_start_stamp = jiffies;
1728 }
1729
1730 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
1731 {
1732         ceph_osdc_get_request(req);
1733         account_request(req);
1734         __submit_request(req, wrlocked);
1735 }
1736
1737 static void finish_request(struct ceph_osd_request *req)
1738 {
1739         struct ceph_osd_client *osdc = req->r_osdc;
1740         struct ceph_osd *osd = req->r_osd;
1741
1742         verify_osd_locked(osd);
1743         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1744
1745         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
1746         unlink_request(osd, req);
1747         atomic_dec(&osdc->num_requests);
1748
1749         /*
1750          * If an OSD has failed or returned and a request has been sent
1751          * twice, it's possible to get a reply and end up here while the
1752          * request message is queued for delivery.  We will ignore the
1753          * reply, so not a big deal, but better to try and catch it.
1754          */
1755         ceph_msg_revoke(req->r_request);
1756         ceph_msg_revoke_incoming(req->r_reply);
1757 }
1758
1759 static void __complete_request(struct ceph_osd_request *req)
1760 {
1761         if (req->r_callback) {
1762                 dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
1763                      req->r_tid, req->r_callback, req->r_result);
1764                 req->r_callback(req);
1765         }
1766 }
1767
1768 /*
1769  * This is open-coded in handle_reply().
1770  */
1771 static void complete_request(struct ceph_osd_request *req, int err)
1772 {
1773         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1774
1775         req->r_result = err;
1776         finish_request(req);
1777         __complete_request(req);
1778         complete_all(&req->r_completion);
1779         ceph_osdc_put_request(req);
1780 }
1781
1782 static void cancel_map_check(struct ceph_osd_request *req)
1783 {
1784         struct ceph_osd_client *osdc = req->r_osdc;
1785         struct ceph_osd_request *lookup_req;
1786
1787         verify_osdc_wrlocked(osdc);
1788
1789         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1790         if (!lookup_req)
1791                 return;
1792
1793         WARN_ON(lookup_req != req);
1794         erase_request_mc(&osdc->map_checks, req);
1795         ceph_osdc_put_request(req);
1796 }
1797
1798 static void cancel_request(struct ceph_osd_request *req)
1799 {
1800         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1801
1802         cancel_map_check(req);
1803         finish_request(req);
1804         complete_all(&req->r_completion);
1805         ceph_osdc_put_request(req);
1806 }
1807
1808 static void abort_request(struct ceph_osd_request *req, int err)
1809 {
1810         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1811
1812         cancel_map_check(req);
1813         complete_request(req, err);
1814 }
1815
1816 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
1817 {
1818         if (likely(eb > osdc->epoch_barrier)) {
1819                 dout("updating epoch_barrier from %u to %u\n",
1820                                 osdc->epoch_barrier, eb);
1821                 osdc->epoch_barrier = eb;
1822                 /* Request map if we're not to the barrier yet */
1823                 if (eb > osdc->osdmap->epoch)
1824                         maybe_request_map(osdc);
1825         }
1826 }
1827
1828 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
1829 {
1830         down_read(&osdc->lock);
1831         if (unlikely(eb > osdc->epoch_barrier)) {
1832                 up_read(&osdc->lock);
1833                 down_write(&osdc->lock);
1834                 update_epoch_barrier(osdc, eb);
1835                 up_write(&osdc->lock);
1836         } else {
1837                 up_read(&osdc->lock);
1838         }
1839 }
1840 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
1841
1842 /*
1843  * Drop all pending requests that are stalled waiting on a full condition to
1844  * clear, and complete them with ENOSPC as the return code. Set the
1845  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
1846  * cancelled.
1847  */
1848 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
1849 {
1850         struct rb_node *n;
1851         bool victims = false;
1852
1853         dout("enter abort_on_full\n");
1854
1855         if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
1856                 goto out;
1857
1858         /* Scan list and see if there is anything to abort */
1859         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1860                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1861                 struct rb_node *m;
1862
1863                 m = rb_first(&osd->o_requests);
1864                 while (m) {
1865                         struct ceph_osd_request *req = rb_entry(m,
1866                                         struct ceph_osd_request, r_node);
1867                         m = rb_next(m);
1868
1869                         if (req->r_abort_on_full) {
1870                                 victims = true;
1871                                 break;
1872                         }
1873                 }
1874                 if (victims)
1875                         break;
1876         }
1877
1878         if (!victims)
1879                 goto out;
1880
1881         /*
1882          * Update the barrier to current epoch if it's behind that point,
1883          * since we know we have some calls to be aborted in the tree.
1884          */
1885         update_epoch_barrier(osdc, osdc->osdmap->epoch);
1886
1887         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1888                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1889                 struct rb_node *m;
1890
1891                 m = rb_first(&osd->o_requests);
1892                 while (m) {
1893                         struct ceph_osd_request *req = rb_entry(m,
1894                                         struct ceph_osd_request, r_node);
1895                         m = rb_next(m);
1896
1897                         if (req->r_abort_on_full &&
1898                             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1899                              pool_full(osdc, req->r_t.target_oloc.pool)))
1900                                 abort_request(req, -ENOSPC);
1901                 }
1902         }
1903 out:
1904         dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
1905 }
1906
1907 static void check_pool_dne(struct ceph_osd_request *req)
1908 {
1909         struct ceph_osd_client *osdc = req->r_osdc;
1910         struct ceph_osdmap *map = osdc->osdmap;
1911
1912         verify_osdc_wrlocked(osdc);
1913         WARN_ON(!map->epoch);
1914
1915         if (req->r_attempts) {
1916                 /*
1917                  * We sent a request earlier, which means that
1918                  * previously the pool existed, and now it does not
1919                  * (i.e., it was deleted).
1920                  */
1921                 req->r_map_dne_bound = map->epoch;
1922                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
1923                      req->r_tid);
1924         } else {
1925                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
1926                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
1927         }
1928
1929         if (req->r_map_dne_bound) {
1930                 if (map->epoch >= req->r_map_dne_bound) {
1931                         /* we had a new enough map */
1932                         pr_info_ratelimited("tid %llu pool does not exist\n",
1933                                             req->r_tid);
1934                         complete_request(req, -ENOENT);
1935                 }
1936         } else {
1937                 send_map_check(req);
1938         }
1939 }
1940
1941 static void map_check_cb(struct ceph_mon_generic_request *greq)
1942 {
1943         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
1944         struct ceph_osd_request *req;
1945         u64 tid = greq->private_data;
1946
1947         WARN_ON(greq->result || !greq->u.newest);
1948
1949         down_write(&osdc->lock);
1950         req = lookup_request_mc(&osdc->map_checks, tid);
1951         if (!req) {
1952                 dout("%s tid %llu dne\n", __func__, tid);
1953                 goto out_unlock;
1954         }
1955
1956         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
1957              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
1958         if (!req->r_map_dne_bound)
1959                 req->r_map_dne_bound = greq->u.newest;
1960         erase_request_mc(&osdc->map_checks, req);
1961         check_pool_dne(req);
1962
1963         ceph_osdc_put_request(req);
1964 out_unlock:
1965         up_write(&osdc->lock);
1966 }
1967
1968 static void send_map_check(struct ceph_osd_request *req)
1969 {
1970         struct ceph_osd_client *osdc = req->r_osdc;
1971         struct ceph_osd_request *lookup_req;
1972         int ret;
1973
1974         verify_osdc_wrlocked(osdc);
1975
1976         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1977         if (lookup_req) {
1978                 WARN_ON(lookup_req != req);
1979                 return;
1980         }
1981
1982         ceph_osdc_get_request(req);
1983         insert_request_mc(&osdc->map_checks, req);
1984         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
1985                                           map_check_cb, req->r_tid);
1986         WARN_ON(ret);
1987 }
1988
1989 /*
1990  * lingering requests, watch/notify v2 infrastructure
1991  */
1992 static void linger_release(struct kref *kref)
1993 {
1994         struct ceph_osd_linger_request *lreq =
1995             container_of(kref, struct ceph_osd_linger_request, kref);
1996
1997         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
1998              lreq->reg_req, lreq->ping_req);
1999         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2000         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2001         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2002         WARN_ON(!list_empty(&lreq->scan_item));
2003         WARN_ON(!list_empty(&lreq->pending_lworks));
2004         WARN_ON(lreq->osd);
2005
2006         if (lreq->reg_req)
2007                 ceph_osdc_put_request(lreq->reg_req);
2008         if (lreq->ping_req)
2009                 ceph_osdc_put_request(lreq->ping_req);
2010         target_destroy(&lreq->t);
2011         kfree(lreq);
2012 }
2013
2014 static void linger_put(struct ceph_osd_linger_request *lreq)
2015 {
2016         if (lreq)
2017                 kref_put(&lreq->kref, linger_release);
2018 }
2019
2020 static struct ceph_osd_linger_request *
2021 linger_get(struct ceph_osd_linger_request *lreq)
2022 {
2023         kref_get(&lreq->kref);
2024         return lreq;
2025 }
2026
2027 static struct ceph_osd_linger_request *
2028 linger_alloc(struct ceph_osd_client *osdc)
2029 {
2030         struct ceph_osd_linger_request *lreq;
2031
2032         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2033         if (!lreq)
2034                 return NULL;
2035
2036         kref_init(&lreq->kref);
2037         mutex_init(&lreq->lock);
2038         RB_CLEAR_NODE(&lreq->node);
2039         RB_CLEAR_NODE(&lreq->osdc_node);
2040         RB_CLEAR_NODE(&lreq->mc_node);
2041         INIT_LIST_HEAD(&lreq->scan_item);
2042         INIT_LIST_HEAD(&lreq->pending_lworks);
2043         init_completion(&lreq->reg_commit_wait);
2044         init_completion(&lreq->notify_finish_wait);
2045
2046         lreq->osdc = osdc;
2047         target_init(&lreq->t);
2048
2049         dout("%s lreq %p\n", __func__, lreq);
2050         return lreq;
2051 }
2052
2053 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2054 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2055 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2056
2057 /*
2058  * Create linger request <-> OSD session relation.
2059  *
2060  * @lreq has to be registered, @osd may be homeless.
2061  */
2062 static void link_linger(struct ceph_osd *osd,
2063                         struct ceph_osd_linger_request *lreq)
2064 {
2065         verify_osd_locked(osd);
2066         WARN_ON(!lreq->linger_id || lreq->osd);
2067         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2068              osd->o_osd, lreq, lreq->linger_id);
2069
2070         if (!osd_homeless(osd))
2071                 __remove_osd_from_lru(osd);
2072         else
2073                 atomic_inc(&osd->o_osdc->num_homeless);
2074
2075         get_osd(osd);
2076         insert_linger(&osd->o_linger_requests, lreq);
2077         lreq->osd = osd;
2078 }
2079
2080 static void unlink_linger(struct ceph_osd *osd,
2081                           struct ceph_osd_linger_request *lreq)
2082 {
2083         verify_osd_locked(osd);
2084         WARN_ON(lreq->osd != osd);
2085         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2086              osd->o_osd, lreq, lreq->linger_id);
2087
2088         lreq->osd = NULL;
2089         erase_linger(&osd->o_linger_requests, lreq);
2090         put_osd(osd);
2091
2092         if (!osd_homeless(osd))
2093                 maybe_move_osd_to_lru(osd);
2094         else
2095                 atomic_dec(&osd->o_osdc->num_homeless);
2096 }
2097
2098 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2099 {
2100         verify_osdc_locked(lreq->osdc);
2101
2102         return !RB_EMPTY_NODE(&lreq->osdc_node);
2103 }
2104
2105 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2106 {
2107         struct ceph_osd_client *osdc = lreq->osdc;
2108         bool registered;
2109
2110         down_read(&osdc->lock);
2111         registered = __linger_registered(lreq);
2112         up_read(&osdc->lock);
2113
2114         return registered;
2115 }
2116
2117 static void linger_register(struct ceph_osd_linger_request *lreq)
2118 {
2119         struct ceph_osd_client *osdc = lreq->osdc;
2120
2121         verify_osdc_wrlocked(osdc);
2122         WARN_ON(lreq->linger_id);
2123
2124         linger_get(lreq);
2125         lreq->linger_id = ++osdc->last_linger_id;
2126         insert_linger_osdc(&osdc->linger_requests, lreq);
2127 }
2128
2129 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2130 {
2131         struct ceph_osd_client *osdc = lreq->osdc;
2132
2133         verify_osdc_wrlocked(osdc);
2134
2135         erase_linger_osdc(&osdc->linger_requests, lreq);
2136         linger_put(lreq);
2137 }
2138
2139 static void cancel_linger_request(struct ceph_osd_request *req)
2140 {
2141         struct ceph_osd_linger_request *lreq = req->r_priv;
2142
2143         WARN_ON(!req->r_linger);
2144         cancel_request(req);
2145         linger_put(lreq);
2146 }
2147
2148 struct linger_work {
2149         struct work_struct work;
2150         struct ceph_osd_linger_request *lreq;
2151         struct list_head pending_item;
2152         unsigned long queued_stamp;
2153
2154         union {
2155                 struct {
2156                         u64 notify_id;
2157                         u64 notifier_id;
2158                         void *payload; /* points into @msg front */
2159                         size_t payload_len;
2160
2161                         struct ceph_msg *msg; /* for ceph_msg_put() */
2162                 } notify;
2163                 struct {
2164                         int err;
2165                 } error;
2166         };
2167 };
2168
2169 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2170                                        work_func_t workfn)
2171 {
2172         struct linger_work *lwork;
2173
2174         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2175         if (!lwork)
2176                 return NULL;
2177
2178         INIT_WORK(&lwork->work, workfn);
2179         INIT_LIST_HEAD(&lwork->pending_item);
2180         lwork->lreq = linger_get(lreq);
2181
2182         return lwork;
2183 }
2184
2185 static void lwork_free(struct linger_work *lwork)
2186 {
2187         struct ceph_osd_linger_request *lreq = lwork->lreq;
2188
2189         mutex_lock(&lreq->lock);
2190         list_del(&lwork->pending_item);
2191         mutex_unlock(&lreq->lock);
2192
2193         linger_put(lreq);
2194         kfree(lwork);
2195 }
2196
2197 static void lwork_queue(struct linger_work *lwork)
2198 {
2199         struct ceph_osd_linger_request *lreq = lwork->lreq;
2200         struct ceph_osd_client *osdc = lreq->osdc;
2201
2202         verify_lreq_locked(lreq);
2203         WARN_ON(!list_empty(&lwork->pending_item));
2204
2205         lwork->queued_stamp = jiffies;
2206         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2207         queue_work(osdc->notify_wq, &lwork->work);
2208 }
2209
2210 static void do_watch_notify(struct work_struct *w)
2211 {
2212         struct linger_work *lwork = container_of(w, struct linger_work, work);
2213         struct ceph_osd_linger_request *lreq = lwork->lreq;
2214
2215         if (!linger_registered(lreq)) {
2216                 dout("%s lreq %p not registered\n", __func__, lreq);
2217                 goto out;
2218         }
2219
2220         WARN_ON(!lreq->is_watch);
2221         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2222              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2223              lwork->notify.payload_len);
2224         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2225                   lwork->notify.notifier_id, lwork->notify.payload,
2226                   lwork->notify.payload_len);
2227
2228 out:
2229         ceph_msg_put(lwork->notify.msg);
2230         lwork_free(lwork);
2231 }
2232
2233 static void do_watch_error(struct work_struct *w)
2234 {
2235         struct linger_work *lwork = container_of(w, struct linger_work, work);
2236         struct ceph_osd_linger_request *lreq = lwork->lreq;
2237
2238         if (!linger_registered(lreq)) {
2239                 dout("%s lreq %p not registered\n", __func__, lreq);
2240                 goto out;
2241         }
2242
2243         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2244         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2245
2246 out:
2247         lwork_free(lwork);
2248 }
2249
2250 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2251 {
2252         struct linger_work *lwork;
2253
2254         lwork = lwork_alloc(lreq, do_watch_error);
2255         if (!lwork) {
2256                 pr_err("failed to allocate error-lwork\n");
2257                 return;
2258         }
2259
2260         lwork->error.err = lreq->last_error;
2261         lwork_queue(lwork);
2262 }
2263
2264 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2265                                        int result)
2266 {
2267         if (!completion_done(&lreq->reg_commit_wait)) {
2268                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2269                 complete_all(&lreq->reg_commit_wait);
2270         }
2271 }
2272
2273 static void linger_commit_cb(struct ceph_osd_request *req)
2274 {
2275         struct ceph_osd_linger_request *lreq = req->r_priv;
2276
2277         mutex_lock(&lreq->lock);
2278         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2279              lreq->linger_id, req->r_result);
2280         linger_reg_commit_complete(lreq, req->r_result);
2281         lreq->committed = true;
2282
2283         if (!lreq->is_watch) {
2284                 struct ceph_osd_data *osd_data =
2285                     osd_req_op_data(req, 0, notify, response_data);
2286                 void *p = page_address(osd_data->pages[0]);
2287
2288                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2289                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2290
2291                 /* make note of the notify_id */
2292                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2293                         lreq->notify_id = ceph_decode_64(&p);
2294                         dout("lreq %p notify_id %llu\n", lreq,
2295                              lreq->notify_id);
2296                 } else {
2297                         dout("lreq %p no notify_id\n", lreq);
2298                 }
2299         }
2300
2301         mutex_unlock(&lreq->lock);
2302         linger_put(lreq);
2303 }
2304
2305 static int normalize_watch_error(int err)
2306 {
2307         /*
2308          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2309          * notification and a failure to reconnect because we raced with
2310          * the delete appear the same to the user.
2311          */
2312         if (err == -ENOENT)
2313                 err = -ENOTCONN;
2314
2315         return err;
2316 }
2317
2318 static void linger_reconnect_cb(struct ceph_osd_request *req)
2319 {
2320         struct ceph_osd_linger_request *lreq = req->r_priv;
2321
2322         mutex_lock(&lreq->lock);
2323         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2324              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2325         if (req->r_result < 0) {
2326                 if (!lreq->last_error) {
2327                         lreq->last_error = normalize_watch_error(req->r_result);
2328                         queue_watch_error(lreq);
2329                 }
2330         }
2331
2332         mutex_unlock(&lreq->lock);
2333         linger_put(lreq);
2334 }
2335
2336 static void send_linger(struct ceph_osd_linger_request *lreq)
2337 {
2338         struct ceph_osd_request *req = lreq->reg_req;
2339         struct ceph_osd_req_op *op = &req->r_ops[0];
2340
2341         verify_osdc_wrlocked(req->r_osdc);
2342         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2343
2344         if (req->r_osd)
2345                 cancel_linger_request(req);
2346
2347         request_reinit(req);
2348         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2349         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2350         req->r_flags = lreq->t.flags;
2351         req->r_mtime = lreq->mtime;
2352
2353         mutex_lock(&lreq->lock);
2354         if (lreq->is_watch && lreq->committed) {
2355                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2356                         op->watch.cookie != lreq->linger_id);
2357                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2358                 op->watch.gen = ++lreq->register_gen;
2359                 dout("lreq %p reconnect register_gen %u\n", lreq,
2360                      op->watch.gen);
2361                 req->r_callback = linger_reconnect_cb;
2362         } else {
2363                 if (!lreq->is_watch)
2364                         lreq->notify_id = 0;
2365                 else
2366                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2367                 dout("lreq %p register\n", lreq);
2368                 req->r_callback = linger_commit_cb;
2369         }
2370         mutex_unlock(&lreq->lock);
2371
2372         req->r_priv = linger_get(lreq);
2373         req->r_linger = true;
2374
2375         submit_request(req, true);
2376 }
2377
2378 static void linger_ping_cb(struct ceph_osd_request *req)
2379 {
2380         struct ceph_osd_linger_request *lreq = req->r_priv;
2381
2382         mutex_lock(&lreq->lock);
2383         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2384              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2385              lreq->last_error);
2386         if (lreq->register_gen == req->r_ops[0].watch.gen) {
2387                 if (!req->r_result) {
2388                         lreq->watch_valid_thru = lreq->ping_sent;
2389                 } else if (!lreq->last_error) {
2390                         lreq->last_error = normalize_watch_error(req->r_result);
2391                         queue_watch_error(lreq);
2392                 }
2393         } else {
2394                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2395                      lreq->register_gen, req->r_ops[0].watch.gen);
2396         }
2397
2398         mutex_unlock(&lreq->lock);
2399         linger_put(lreq);
2400 }
2401
2402 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2403 {
2404         struct ceph_osd_client *osdc = lreq->osdc;
2405         struct ceph_osd_request *req = lreq->ping_req;
2406         struct ceph_osd_req_op *op = &req->r_ops[0];
2407
2408         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2409                 dout("%s PAUSERD\n", __func__);
2410                 return;
2411         }
2412
2413         lreq->ping_sent = jiffies;
2414         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2415              __func__, lreq, lreq->linger_id, lreq->ping_sent,
2416              lreq->register_gen);
2417
2418         if (req->r_osd)
2419                 cancel_linger_request(req);
2420
2421         request_reinit(req);
2422         target_copy(&req->r_t, &lreq->t);
2423
2424         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2425                 op->watch.cookie != lreq->linger_id ||
2426                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2427         op->watch.gen = lreq->register_gen;
2428         req->r_callback = linger_ping_cb;
2429         req->r_priv = linger_get(lreq);
2430         req->r_linger = true;
2431
2432         ceph_osdc_get_request(req);
2433         account_request(req);
2434         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2435         link_request(lreq->osd, req);
2436         send_request(req);
2437 }
2438
2439 static void linger_submit(struct ceph_osd_linger_request *lreq)
2440 {
2441         struct ceph_osd_client *osdc = lreq->osdc;
2442         struct ceph_osd *osd;
2443
2444         calc_target(osdc, &lreq->t, &lreq->last_force_resend, false);
2445         osd = lookup_create_osd(osdc, lreq->t.osd, true);
2446         link_linger(osd, lreq);
2447
2448         send_linger(lreq);
2449 }
2450
2451 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2452 {
2453         struct ceph_osd_client *osdc = lreq->osdc;
2454         struct ceph_osd_linger_request *lookup_lreq;
2455
2456         verify_osdc_wrlocked(osdc);
2457
2458         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2459                                        lreq->linger_id);
2460         if (!lookup_lreq)
2461                 return;
2462
2463         WARN_ON(lookup_lreq != lreq);
2464         erase_linger_mc(&osdc->linger_map_checks, lreq);
2465         linger_put(lreq);
2466 }
2467
2468 /*
2469  * @lreq has to be both registered and linked.
2470  */
2471 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2472 {
2473         if (lreq->is_watch && lreq->ping_req->r_osd)
2474                 cancel_linger_request(lreq->ping_req);
2475         if (lreq->reg_req->r_osd)
2476                 cancel_linger_request(lreq->reg_req);
2477         cancel_linger_map_check(lreq);
2478         unlink_linger(lreq->osd, lreq);
2479         linger_unregister(lreq);
2480 }
2481
2482 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2483 {
2484         struct ceph_osd_client *osdc = lreq->osdc;
2485
2486         down_write(&osdc->lock);
2487         if (__linger_registered(lreq))
2488                 __linger_cancel(lreq);
2489         up_write(&osdc->lock);
2490 }
2491
2492 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2493
2494 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2495 {
2496         struct ceph_osd_client *osdc = lreq->osdc;
2497         struct ceph_osdmap *map = osdc->osdmap;
2498
2499         verify_osdc_wrlocked(osdc);
2500         WARN_ON(!map->epoch);
2501
2502         if (lreq->register_gen) {
2503                 lreq->map_dne_bound = map->epoch;
2504                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2505                      lreq, lreq->linger_id);
2506         } else {
2507                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2508                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2509                      map->epoch);
2510         }
2511
2512         if (lreq->map_dne_bound) {
2513                 if (map->epoch >= lreq->map_dne_bound) {
2514                         /* we had a new enough map */
2515                         pr_info("linger_id %llu pool does not exist\n",
2516                                 lreq->linger_id);
2517                         linger_reg_commit_complete(lreq, -ENOENT);
2518                         __linger_cancel(lreq);
2519                 }
2520         } else {
2521                 send_linger_map_check(lreq);
2522         }
2523 }
2524
2525 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2526 {
2527         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2528         struct ceph_osd_linger_request *lreq;
2529         u64 linger_id = greq->private_data;
2530
2531         WARN_ON(greq->result || !greq->u.newest);
2532
2533         down_write(&osdc->lock);
2534         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2535         if (!lreq) {
2536                 dout("%s linger_id %llu dne\n", __func__, linger_id);
2537                 goto out_unlock;
2538         }
2539
2540         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2541              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2542              greq->u.newest);
2543         if (!lreq->map_dne_bound)
2544                 lreq->map_dne_bound = greq->u.newest;
2545         erase_linger_mc(&osdc->linger_map_checks, lreq);
2546         check_linger_pool_dne(lreq);
2547
2548         linger_put(lreq);
2549 out_unlock:
2550         up_write(&osdc->lock);
2551 }
2552
2553 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
2554 {
2555         struct ceph_osd_client *osdc = lreq->osdc;
2556         struct ceph_osd_linger_request *lookup_lreq;
2557         int ret;
2558
2559         verify_osdc_wrlocked(osdc);
2560
2561         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2562                                        lreq->linger_id);
2563         if (lookup_lreq) {
2564                 WARN_ON(lookup_lreq != lreq);
2565                 return;
2566         }
2567
2568         linger_get(lreq);
2569         insert_linger_mc(&osdc->linger_map_checks, lreq);
2570         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2571                                           linger_map_check_cb, lreq->linger_id);
2572         WARN_ON(ret);
2573 }
2574
2575 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
2576 {
2577         int ret;
2578
2579         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2580         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
2581         return ret ?: lreq->reg_commit_error;
2582 }
2583
2584 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
2585 {
2586         int ret;
2587
2588         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2589         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
2590         return ret ?: lreq->notify_finish_error;
2591 }
2592
2593 /*
2594  * Timeout callback, called every N seconds.  When 1 or more OSD
2595  * requests has been active for more than N seconds, we send a keepalive
2596  * (tag + timestamp) to its OSD to ensure any communications channel
2597  * reset is detected.
2598  */
2599 static void handle_timeout(struct work_struct *work)
2600 {
2601         struct ceph_osd_client *osdc =
2602                 container_of(work, struct ceph_osd_client, timeout_work.work);
2603         struct ceph_options *opts = osdc->client->options;
2604         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2605         unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
2606         LIST_HEAD(slow_osds);
2607         struct rb_node *n, *p;
2608
2609         dout("%s osdc %p\n", __func__, osdc);
2610         down_write(&osdc->lock);
2611
2612         /*
2613          * ping osds that are a bit slow.  this ensures that if there
2614          * is a break in the TCP connection we will notice, and reopen
2615          * a connection with that osd (from the fault callback).
2616          */
2617         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2618                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2619                 bool found = false;
2620
2621                 for (p = rb_first(&osd->o_requests); p; ) {
2622                         struct ceph_osd_request *req =
2623                             rb_entry(p, struct ceph_osd_request, r_node);
2624
2625                         p = rb_next(p); /* abort_request() */
2626
2627                         if (time_before(req->r_stamp, cutoff)) {
2628                                 dout(" req %p tid %llu on osd%d is laggy\n",
2629                                      req, req->r_tid, osd->o_osd);
2630                                 found = true;
2631                         }
2632                         if (opts->osd_request_timeout &&
2633                             time_before(req->r_start_stamp, expiry_cutoff)) {
2634                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2635                                        req->r_tid, osd->o_osd);
2636                                 abort_request(req, -ETIMEDOUT);
2637                         }
2638                 }
2639                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2640                         struct ceph_osd_linger_request *lreq =
2641                             rb_entry(p, struct ceph_osd_linger_request, node);
2642
2643                         dout(" lreq %p linger_id %llu is served by osd%d\n",
2644                              lreq, lreq->linger_id, osd->o_osd);
2645                         found = true;
2646
2647                         mutex_lock(&lreq->lock);
2648                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
2649                                 send_linger_ping(lreq);
2650                         mutex_unlock(&lreq->lock);
2651                 }
2652
2653                 if (found)
2654                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
2655         }
2656
2657         if (opts->osd_request_timeout) {
2658                 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
2659                         struct ceph_osd_request *req =
2660                             rb_entry(p, struct ceph_osd_request, r_node);
2661
2662                         p = rb_next(p); /* abort_request() */
2663
2664                         if (time_before(req->r_start_stamp, expiry_cutoff)) {
2665                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2666                                        req->r_tid, osdc->homeless_osd.o_osd);
2667                                 abort_request(req, -ETIMEDOUT);
2668                         }
2669                 }
2670         }
2671
2672         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2673                 maybe_request_map(osdc);
2674
2675         while (!list_empty(&slow_osds)) {
2676                 struct ceph_osd *osd = list_first_entry(&slow_osds,
2677                                                         struct ceph_osd,
2678                                                         o_keepalive_item);
2679                 list_del_init(&osd->o_keepalive_item);
2680                 ceph_con_keepalive(&osd->o_con);
2681         }
2682
2683         up_write(&osdc->lock);
2684         schedule_delayed_work(&osdc->timeout_work,
2685                               osdc->client->options->osd_keepalive_timeout);
2686 }
2687
2688 static void handle_osds_timeout(struct work_struct *work)
2689 {
2690         struct ceph_osd_client *osdc =
2691                 container_of(work, struct ceph_osd_client,
2692                              osds_timeout_work.work);
2693         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
2694         struct ceph_osd *osd, *nosd;
2695
2696         dout("%s osdc %p\n", __func__, osdc);
2697         down_write(&osdc->lock);
2698         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
2699                 if (time_before(jiffies, osd->lru_ttl))
2700                         break;
2701
2702                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
2703                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
2704                 close_osd(osd);
2705         }
2706
2707         up_write(&osdc->lock);
2708         schedule_delayed_work(&osdc->osds_timeout_work,
2709                               round_jiffies_relative(delay));
2710 }
2711
2712 static int ceph_oloc_decode(void **p, void *end,
2713                             struct ceph_object_locator *oloc)
2714 {
2715         u8 struct_v, struct_cv;
2716         u32 len;
2717         void *struct_end;
2718         int ret = 0;
2719
2720         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2721         struct_v = ceph_decode_8(p);
2722         struct_cv = ceph_decode_8(p);
2723         if (struct_v < 3) {
2724                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2725                         struct_v, struct_cv);
2726                 goto e_inval;
2727         }
2728         if (struct_cv > 6) {
2729                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2730                         struct_v, struct_cv);
2731                 goto e_inval;
2732         }
2733         len = ceph_decode_32(p);
2734         ceph_decode_need(p, end, len, e_inval);
2735         struct_end = *p + len;
2736
2737         oloc->pool = ceph_decode_64(p);
2738         *p += 4; /* skip preferred */
2739
2740         len = ceph_decode_32(p);
2741         if (len > 0) {
2742                 pr_warn("ceph_object_locator::key is set\n");
2743                 goto e_inval;
2744         }
2745
2746         if (struct_v >= 5) {
2747                 bool changed = false;
2748
2749                 len = ceph_decode_32(p);
2750                 if (len > 0) {
2751                         ceph_decode_need(p, end, len, e_inval);
2752                         if (!oloc->pool_ns ||
2753                             ceph_compare_string(oloc->pool_ns, *p, len))
2754                                 changed = true;
2755                         *p += len;
2756                 } else {
2757                         if (oloc->pool_ns)
2758                                 changed = true;
2759                 }
2760                 if (changed) {
2761                         /* redirect changes namespace */
2762                         pr_warn("ceph_object_locator::nspace is changed\n");
2763                         goto e_inval;
2764                 }
2765         }
2766
2767         if (struct_v >= 6) {
2768                 s64 hash = ceph_decode_64(p);
2769                 if (hash != -1) {
2770                         pr_warn("ceph_object_locator::hash is set\n");
2771                         goto e_inval;
2772                 }
2773         }
2774
2775         /* skip the rest */
2776         *p = struct_end;
2777 out:
2778         return ret;
2779
2780 e_inval:
2781         ret = -EINVAL;
2782         goto out;
2783 }
2784
2785 static int ceph_redirect_decode(void **p, void *end,
2786                                 struct ceph_request_redirect *redir)
2787 {
2788         u8 struct_v, struct_cv;
2789         u32 len;
2790         void *struct_end;
2791         int ret;
2792
2793         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2794         struct_v = ceph_decode_8(p);
2795         struct_cv = ceph_decode_8(p);
2796         if (struct_cv > 1) {
2797                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2798                         struct_v, struct_cv);
2799                 goto e_inval;
2800         }
2801         len = ceph_decode_32(p);
2802         ceph_decode_need(p, end, len, e_inval);
2803         struct_end = *p + len;
2804
2805         ret = ceph_oloc_decode(p, end, &redir->oloc);
2806         if (ret)
2807                 goto out;
2808
2809         len = ceph_decode_32(p);
2810         if (len > 0) {
2811                 pr_warn("ceph_request_redirect::object_name is set\n");
2812                 goto e_inval;
2813         }
2814
2815         len = ceph_decode_32(p);
2816         *p += len; /* skip osd_instructions */
2817
2818         /* skip the rest */
2819         *p = struct_end;
2820 out:
2821         return ret;
2822
2823 e_inval:
2824         ret = -EINVAL;
2825         goto out;
2826 }
2827
2828 struct MOSDOpReply {
2829         struct ceph_pg pgid;
2830         u64 flags;
2831         int result;
2832         u32 epoch;
2833         int num_ops;
2834         u32 outdata_len[CEPH_OSD_MAX_OPS];
2835         s32 rval[CEPH_OSD_MAX_OPS];
2836         int retry_attempt;
2837         struct ceph_eversion replay_version;
2838         u64 user_version;
2839         struct ceph_request_redirect redirect;
2840 };
2841
2842 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
2843 {
2844         void *p = msg->front.iov_base;
2845         void *const end = p + msg->front.iov_len;
2846         u16 version = le16_to_cpu(msg->hdr.version);
2847         struct ceph_eversion bad_replay_version;
2848         u8 decode_redir;
2849         u32 len;
2850         int ret;
2851         int i;
2852
2853         ceph_decode_32_safe(&p, end, len, e_inval);
2854         ceph_decode_need(&p, end, len, e_inval);
2855         p += len; /* skip oid */
2856
2857         ret = ceph_decode_pgid(&p, end, &m->pgid);
2858         if (ret)
2859                 return ret;
2860
2861         ceph_decode_64_safe(&p, end, m->flags, e_inval);
2862         ceph_decode_32_safe(&p, end, m->result, e_inval);
2863         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
2864         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
2865         p += sizeof(bad_replay_version);
2866         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
2867
2868         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
2869         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
2870                 goto e_inval;
2871
2872         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
2873                          e_inval);
2874         for (i = 0; i < m->num_ops; i++) {
2875                 struct ceph_osd_op *op = p;
2876
2877                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
2878                 p += sizeof(*op);
2879         }
2880
2881         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
2882         for (i = 0; i < m->num_ops; i++)
2883                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
2884
2885         if (version >= 5) {
2886                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
2887                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
2888                 p += sizeof(m->replay_version);
2889                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
2890         } else {
2891                 m->replay_version = bad_replay_version; /* struct */
2892                 m->user_version = le64_to_cpu(m->replay_version.version);
2893         }
2894
2895         if (version >= 6) {
2896                 if (version >= 7)
2897                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
2898                 else
2899                         decode_redir = 1;
2900         } else {
2901                 decode_redir = 0;
2902         }
2903
2904         if (decode_redir) {
2905                 ret = ceph_redirect_decode(&p, end, &m->redirect);
2906                 if (ret)
2907                         return ret;
2908         } else {
2909                 ceph_oloc_init(&m->redirect.oloc);
2910         }
2911
2912         return 0;
2913
2914 e_inval:
2915         return -EINVAL;
2916 }
2917
2918 /*
2919  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
2920  * specified.
2921  */
2922 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2923 {
2924         struct ceph_osd_client *osdc = osd->o_osdc;
2925         struct ceph_osd_request *req;
2926         struct MOSDOpReply m;
2927         u64 tid = le64_to_cpu(msg->hdr.tid);
2928         u32 data_len = 0;
2929         int ret;
2930         int i;
2931
2932         dout("%s msg %p tid %llu\n", __func__, msg, tid);
2933
2934         down_read(&osdc->lock);
2935         if (!osd_registered(osd)) {
2936                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
2937                 goto out_unlock_osdc;
2938         }
2939         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
2940
2941         mutex_lock(&osd->lock);
2942         req = lookup_request(&osd->o_requests, tid);
2943         if (!req) {
2944                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
2945                 goto out_unlock_session;
2946         }
2947
2948         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
2949         ret = decode_MOSDOpReply(msg, &m);
2950         m.redirect.oloc.pool_ns = NULL;
2951         if (ret) {
2952                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2953                        req->r_tid, ret);
2954                 ceph_msg_dump(msg);
2955                 goto fail_request;
2956         }
2957         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2958              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2959              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2960              le64_to_cpu(m.replay_version.version), m.user_version);
2961
2962         if (m.retry_attempt >= 0) {
2963                 if (m.retry_attempt != req->r_attempts - 1) {
2964                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2965                              req, req->r_tid, m.retry_attempt,
2966                              req->r_attempts - 1);
2967                         goto out_unlock_session;
2968                 }
2969         } else {
2970                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2971         }
2972
2973         if (!ceph_oloc_empty(&m.redirect.oloc)) {
2974                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2975                      m.redirect.oloc.pool);
2976                 unlink_request(osd, req);
2977                 mutex_unlock(&osd->lock);
2978
2979                 /*
2980                  * Not ceph_oloc_copy() - changing pool_ns is not
2981                  * supported.
2982                  */
2983                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
2984                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
2985                 req->r_tid = 0;
2986                 __submit_request(req, false);
2987                 goto out_unlock_osdc;
2988         }
2989
2990         if (m.num_ops != req->r_num_ops) {
2991                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2992                        req->r_num_ops, req->r_tid);
2993                 goto fail_request;
2994         }
2995         for (i = 0; i < req->r_num_ops; i++) {
2996                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
2997                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
2998                 req->r_ops[i].rval = m.rval[i];
2999                 req->r_ops[i].outdata_len = m.outdata_len[i];
3000                 data_len += m.outdata_len[i];
3001         }
3002         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3003                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3004                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
3005                 goto fail_request;
3006         }
3007         dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3008              req, req->r_tid, m.result, data_len);
3009
3010         /*
3011          * Since we only ever request ONDISK, we should only ever get
3012          * one (type of) reply back.
3013          */
3014         WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3015         req->r_result = m.result ?: data_len;
3016         finish_request(req);
3017         mutex_unlock(&osd->lock);
3018         up_read(&osdc->lock);
3019
3020         __complete_request(req);
3021         complete_all(&req->r_completion);
3022         ceph_osdc_put_request(req);
3023         return;
3024
3025 fail_request:
3026         complete_request(req, -EIO);
3027 out_unlock_session:
3028         mutex_unlock(&osd->lock);
3029 out_unlock_osdc:
3030         up_read(&osdc->lock);
3031 }
3032
3033 static void set_pool_was_full(struct ceph_osd_client *osdc)
3034 {
3035         struct rb_node *n;
3036
3037         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3038                 struct ceph_pg_pool_info *pi =
3039                     rb_entry(n, struct ceph_pg_pool_info, node);
3040
3041                 pi->was_full = __pool_full(pi);
3042         }
3043 }
3044
3045 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3046 {
3047         struct ceph_pg_pool_info *pi;
3048
3049         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3050         if (!pi)
3051                 return false;
3052
3053         return pi->was_full && !__pool_full(pi);
3054 }
3055
3056 static enum calc_target_result
3057 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3058 {
3059         struct ceph_osd_client *osdc = lreq->osdc;
3060         enum calc_target_result ct_res;
3061
3062         ct_res = calc_target(osdc, &lreq->t, &lreq->last_force_resend, true);
3063         if (ct_res == CALC_TARGET_NEED_RESEND) {
3064                 struct ceph_osd *osd;
3065
3066                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3067                 if (osd != lreq->osd) {
3068                         unlink_linger(lreq->osd, lreq);
3069                         link_linger(osd, lreq);
3070                 }
3071         }
3072
3073         return ct_res;
3074 }
3075
3076 /*
3077  * Requeue requests whose mapping to an OSD has changed.
3078  */
3079 static void scan_requests(struct ceph_osd *osd,
3080                           bool force_resend,
3081                           bool cleared_full,
3082                           bool check_pool_cleared_full,
3083                           struct rb_root *need_resend,
3084                           struct list_head *need_resend_linger)
3085 {
3086         struct ceph_osd_client *osdc = osd->o_osdc;
3087         struct rb_node *n;
3088         bool force_resend_writes;
3089
3090         for (n = rb_first(&osd->o_linger_requests); n; ) {
3091                 struct ceph_osd_linger_request *lreq =
3092                     rb_entry(n, struct ceph_osd_linger_request, node);
3093                 enum calc_target_result ct_res;
3094
3095                 n = rb_next(n); /* recalc_linger_target() */
3096
3097                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3098                      lreq->linger_id);
3099                 ct_res = recalc_linger_target(lreq);
3100                 switch (ct_res) {
3101                 case CALC_TARGET_NO_ACTION:
3102                         force_resend_writes = cleared_full ||
3103                             (check_pool_cleared_full &&
3104                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3105                         if (!force_resend && !force_resend_writes)
3106                                 break;
3107
3108                         /* fall through */
3109                 case CALC_TARGET_NEED_RESEND:
3110                         cancel_linger_map_check(lreq);
3111                         /*
3112                          * scan_requests() for the previous epoch(s)
3113                          * may have already added it to the list, since
3114                          * it's not unlinked here.
3115                          */
3116                         if (list_empty(&lreq->scan_item))
3117                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3118                         break;
3119                 case CALC_TARGET_POOL_DNE:
3120                         check_linger_pool_dne(lreq);
3121                         break;
3122                 }
3123         }
3124
3125         for (n = rb_first(&osd->o_requests); n; ) {
3126                 struct ceph_osd_request *req =
3127                     rb_entry(n, struct ceph_osd_request, r_node);
3128                 enum calc_target_result ct_res;
3129
3130                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3131
3132                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3133                 ct_res = calc_target(osdc, &req->r_t,
3134                                      &req->r_last_force_resend, false);
3135                 switch (ct_res) {
3136                 case CALC_TARGET_NO_ACTION:
3137                         force_resend_writes = cleared_full ||
3138                             (check_pool_cleared_full &&
3139                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3140                         if (!force_resend &&
3141                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3142                              !force_resend_writes))
3143                                 break;
3144
3145                         /* fall through */
3146                 case CALC_TARGET_NEED_RESEND:
3147                         cancel_map_check(req);
3148                         unlink_request(osd, req);
3149                         insert_request(need_resend, req);
3150                         break;
3151                 case CALC_TARGET_POOL_DNE:
3152                         check_pool_dne(req);
3153                         break;
3154                 }
3155         }
3156 }
3157
3158 static int handle_one_map(struct ceph_osd_client *osdc,
3159                           void *p, void *end, bool incremental,
3160                           struct rb_root *need_resend,
3161                           struct list_head *need_resend_linger)
3162 {
3163         struct ceph_osdmap *newmap;
3164         struct rb_node *n;
3165         bool skipped_map = false;
3166         bool was_full;
3167
3168         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3169         set_pool_was_full(osdc);
3170
3171         if (incremental)
3172                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3173         else
3174                 newmap = ceph_osdmap_decode(&p, end);
3175         if (IS_ERR(newmap))
3176                 return PTR_ERR(newmap);
3177
3178         if (newmap != osdc->osdmap) {
3179                 /*
3180                  * Preserve ->was_full before destroying the old map.
3181                  * For pools that weren't in the old map, ->was_full
3182                  * should be false.
3183                  */
3184                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3185                         struct ceph_pg_pool_info *pi =
3186                             rb_entry(n, struct ceph_pg_pool_info, node);
3187                         struct ceph_pg_pool_info *old_pi;
3188
3189                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3190                         if (old_pi)
3191                                 pi->was_full = old_pi->was_full;
3192                         else
3193                                 WARN_ON(pi->was_full);
3194                 }
3195
3196                 if (osdc->osdmap->epoch &&
3197                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3198                         WARN_ON(incremental);
3199                         skipped_map = true;
3200                 }
3201
3202                 ceph_osdmap_destroy(osdc->osdmap);
3203                 osdc->osdmap = newmap;
3204         }
3205
3206         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3207         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3208                       need_resend, need_resend_linger);
3209
3210         for (n = rb_first(&osdc->osds); n; ) {
3211                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3212
3213                 n = rb_next(n); /* close_osd() */
3214
3215                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3216                               need_resend_linger);
3217                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3218                     memcmp(&osd->o_con.peer_addr,
3219                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3220                            sizeof(struct ceph_entity_addr)))
3221                         close_osd(osd);
3222         }
3223
3224         return 0;
3225 }
3226
3227 static void kick_requests(struct ceph_osd_client *osdc,
3228                           struct rb_root *need_resend,
3229                           struct list_head *need_resend_linger)
3230 {
3231         struct ceph_osd_linger_request *lreq, *nlreq;
3232         struct rb_node *n;
3233
3234         for (n = rb_first(need_resend); n; ) {
3235                 struct ceph_osd_request *req =
3236                     rb_entry(n, struct ceph_osd_request, r_node);
3237                 struct ceph_osd *osd;
3238
3239                 n = rb_next(n);
3240                 erase_request(need_resend, req); /* before link_request() */
3241
3242                 WARN_ON(req->r_osd);
3243                 calc_target(osdc, &req->r_t, NULL, false);
3244                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3245                 link_request(osd, req);
3246                 if (!req->r_linger) {
3247                         if (!osd_homeless(osd) && !req->r_t.paused)
3248                                 send_request(req);
3249                 } else {
3250                         cancel_linger_request(req);
3251                 }
3252         }
3253
3254         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3255                 if (!osd_homeless(lreq->osd))
3256                         send_linger(lreq);
3257
3258                 list_del_init(&lreq->scan_item);
3259         }
3260 }
3261
3262 /*
3263  * Process updated osd map.
3264  *
3265  * The message contains any number of incremental and full maps, normally
3266  * indicating some sort of topology change in the cluster.  Kick requests
3267  * off to different OSDs as needed.
3268  */
3269 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3270 {
3271         void *p = msg->front.iov_base;
3272         void *const end = p + msg->front.iov_len;
3273         u32 nr_maps, maplen;
3274         u32 epoch;
3275         struct ceph_fsid fsid;
3276         struct rb_root need_resend = RB_ROOT;
3277         LIST_HEAD(need_resend_linger);
3278         bool handled_incremental = false;
3279         bool was_pauserd, was_pausewr;
3280         bool pauserd, pausewr;
3281         int err;
3282
3283         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3284         down_write(&osdc->lock);
3285
3286         /* verify fsid */
3287         ceph_decode_need(&p, end, sizeof(fsid), bad);
3288         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3289         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3290                 goto bad;
3291
3292         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3293         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3294                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3295                       have_pool_full(osdc);
3296
3297         /* incremental maps */
3298         ceph_decode_32_safe(&p, end, nr_maps, bad);
3299         dout(" %d inc maps\n", nr_maps);
3300         while (nr_maps > 0) {
3301                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3302                 epoch = ceph_decode_32(&p);
3303                 maplen = ceph_decode_32(&p);
3304                 ceph_decode_need(&p, end, maplen, bad);
3305                 if (osdc->osdmap->epoch &&
3306                     osdc->osdmap->epoch + 1 == epoch) {
3307                         dout("applying incremental map %u len %d\n",
3308                              epoch, maplen);
3309                         err = handle_one_map(osdc, p, p + maplen, true,
3310                                              &need_resend, &need_resend_linger);
3311                         if (err)
3312                                 goto bad;
3313                         handled_incremental = true;
3314                 } else {
3315                         dout("ignoring incremental map %u len %d\n",
3316                              epoch, maplen);
3317                 }
3318                 p += maplen;
3319                 nr_maps--;
3320         }
3321         if (handled_incremental)
3322                 goto done;
3323
3324         /* full maps */
3325         ceph_decode_32_safe(&p, end, nr_maps, bad);
3326         dout(" %d full maps\n", nr_maps);
3327         while (nr_maps) {
3328                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3329                 epoch = ceph_decode_32(&p);
3330                 maplen = ceph_decode_32(&p);
3331                 ceph_decode_need(&p, end, maplen, bad);
3332                 if (nr_maps > 1) {
3333                         dout("skipping non-latest full map %u len %d\n",
3334                              epoch, maplen);
3335                 } else if (osdc->osdmap->epoch >= epoch) {
3336                         dout("skipping full map %u len %d, "
3337                              "older than our %u\n", epoch, maplen,
3338                              osdc->osdmap->epoch);
3339                 } else {
3340                         dout("taking full map %u len %d\n", epoch, maplen);
3341                         err = handle_one_map(osdc, p, p + maplen, false,
3342                                              &need_resend, &need_resend_linger);
3343                         if (err)
3344                                 goto bad;
3345                 }
3346                 p += maplen;
3347                 nr_maps--;
3348         }
3349
3350 done:
3351         /*
3352          * subscribe to subsequent osdmap updates if full to ensure
3353          * we find out when we are no longer full and stop returning
3354          * ENOSPC.
3355          */
3356         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3357         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3358                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3359                   have_pool_full(osdc);
3360         if (was_pauserd || was_pausewr || pauserd || pausewr ||
3361             osdc->osdmap->epoch < osdc->epoch_barrier)
3362                 maybe_request_map(osdc);
3363
3364         kick_requests(osdc, &need_resend, &need_resend_linger);
3365
3366         ceph_osdc_abort_on_full(osdc);
3367         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3368                           osdc->osdmap->epoch);
3369         up_write(&osdc->lock);
3370         wake_up_all(&osdc->client->auth_wq);
3371         return;
3372
3373 bad:
3374         pr_err("osdc handle_map corrupt msg\n");
3375         ceph_msg_dump(msg);
3376         up_write(&osdc->lock);
3377 }
3378
3379 /*
3380  * Resubmit requests pending on the given osd.
3381  */
3382 static void kick_osd_requests(struct ceph_osd *osd)
3383 {
3384         struct rb_node *n;
3385
3386         for (n = rb_first(&osd->o_requests); n; ) {
3387                 struct ceph_osd_request *req =
3388                     rb_entry(n, struct ceph_osd_request, r_node);
3389
3390                 n = rb_next(n); /* cancel_linger_request() */
3391
3392                 if (!req->r_linger) {
3393                         if (!req->r_t.paused)
3394                                 send_request(req);
3395                 } else {
3396                         cancel_linger_request(req);
3397                 }
3398         }
3399         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3400                 struct ceph_osd_linger_request *lreq =
3401                     rb_entry(n, struct ceph_osd_linger_request, node);
3402
3403                 send_linger(lreq);
3404         }
3405 }
3406
3407 /*
3408  * If the osd connection drops, we need to resubmit all requests.
3409  */
3410 static void osd_fault(struct ceph_connection *con)
3411 {
3412         struct ceph_osd *osd = con->private;
3413         struct ceph_osd_client *osdc = osd->o_osdc;
3414
3415         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3416
3417         down_write(&osdc->lock);
3418         if (!osd_registered(osd)) {
3419                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3420                 goto out_unlock;
3421         }
3422
3423         if (!reopen_osd(osd))
3424                 kick_osd_requests(osd);
3425         maybe_request_map(osdc);
3426
3427 out_unlock:
3428         up_write(&osdc->lock);
3429 }
3430
3431 /*
3432  * Process osd watch notifications
3433  */
3434 static void handle_watch_notify(struct ceph_osd_client *osdc,
3435                                 struct ceph_msg *msg)
3436 {
3437         void *p = msg->front.iov_base;
3438         void *const end = p + msg->front.iov_len;
3439         struct ceph_osd_linger_request *lreq;
3440         struct linger_work *lwork;
3441         u8 proto_ver, opcode;
3442         u64 cookie, notify_id;
3443         u64 notifier_id = 0;
3444         s32 return_code = 0;
3445         void *payload = NULL;
3446         u32 payload_len = 0;
3447
3448         ceph_decode_8_safe(&p, end, proto_ver, bad);
3449         ceph_decode_8_safe(&p, end, opcode, bad);
3450         ceph_decode_64_safe(&p, end, cookie, bad);
3451         p += 8; /* skip ver */
3452         ceph_decode_64_safe(&p, end, notify_id, bad);
3453
3454         if (proto_ver >= 1) {
3455                 ceph_decode_32_safe(&p, end, payload_len, bad);
3456                 ceph_decode_need(&p, end, payload_len, bad);
3457                 payload = p;
3458                 p += payload_len;
3459         }
3460
3461         if (le16_to_cpu(msg->hdr.version) >= 2)
3462                 ceph_decode_32_safe(&p, end, return_code, bad);
3463
3464         if (le16_to_cpu(msg->hdr.version) >= 3)
3465                 ceph_decode_64_safe(&p, end, notifier_id, bad);
3466
3467         down_read(&osdc->lock);
3468         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
3469         if (!lreq) {
3470                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
3471                      cookie);
3472                 goto out_unlock_osdc;
3473         }
3474
3475         mutex_lock(&lreq->lock);
3476         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
3477              opcode, cookie, lreq, lreq->is_watch);
3478         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
3479                 if (!lreq->last_error) {
3480                         lreq->last_error = -ENOTCONN;
3481                         queue_watch_error(lreq);
3482                 }
3483         } else if (!lreq->is_watch) {
3484                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3485                 if (lreq->notify_id && lreq->notify_id != notify_id) {
3486                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
3487                              lreq->notify_id, notify_id);
3488                 } else if (!completion_done(&lreq->notify_finish_wait)) {
3489                         struct ceph_msg_data *data =
3490                             list_first_entry_or_null(&msg->data,
3491                                                      struct ceph_msg_data,
3492                                                      links);
3493
3494                         if (data) {
3495                                 if (lreq->preply_pages) {
3496                                         WARN_ON(data->type !=
3497                                                         CEPH_MSG_DATA_PAGES);
3498                                         *lreq->preply_pages = data->pages;
3499                                         *lreq->preply_len = data->length;
3500                                 } else {
3501                                         ceph_release_page_vector(data->pages,
3502                                                calc_pages_for(0, data->length));
3503                                 }
3504                         }
3505                         lreq->notify_finish_error = return_code;
3506                         complete_all(&lreq->notify_finish_wait);
3507                 }
3508         } else {
3509                 /* CEPH_WATCH_EVENT_NOTIFY */
3510                 lwork = lwork_alloc(lreq, do_watch_notify);
3511                 if (!lwork) {
3512                         pr_err("failed to allocate notify-lwork\n");
3513                         goto out_unlock_lreq;
3514                 }
3515
3516                 lwork->notify.notify_id = notify_id;
3517                 lwork->notify.notifier_id = notifier_id;
3518                 lwork->notify.payload = payload;
3519                 lwork->notify.payload_len = payload_len;
3520                 lwork->notify.msg = ceph_msg_get(msg);
3521                 lwork_queue(lwork);
3522         }
3523
3524 out_unlock_lreq:
3525         mutex_unlock(&lreq->lock);
3526 out_unlock_osdc:
3527         up_read(&osdc->lock);
3528         return;
3529
3530 bad:
3531         pr_err("osdc handle_watch_notify corrupt msg\n");
3532 }
3533
3534 /*
3535  * Register request, send initial attempt.
3536  */
3537 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3538                             struct ceph_osd_request *req,
3539                             bool nofail)
3540 {
3541         down_read(&osdc->lock);
3542         submit_request(req, false);
3543         up_read(&osdc->lock);
3544
3545         return 0;
3546 }
3547 EXPORT_SYMBOL(ceph_osdc_start_request);
3548
3549 /*
3550  * Unregister a registered request.  The request is not completed:
3551  * ->r_result isn't set and __complete_request() isn't called.
3552  */
3553 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
3554 {
3555         struct ceph_osd_client *osdc = req->r_osdc;
3556
3557         down_write(&osdc->lock);
3558         if (req->r_osd)
3559                 cancel_request(req);
3560         up_write(&osdc->lock);
3561 }
3562 EXPORT_SYMBOL(ceph_osdc_cancel_request);
3563
3564 /*
3565  * @timeout: in jiffies, 0 means "wait forever"
3566  */
3567 static int wait_request_timeout(struct ceph_osd_request *req,
3568                                 unsigned long timeout)
3569 {
3570         long left;
3571
3572         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3573         left = wait_for_completion_killable_timeout(&req->r_completion,
3574                                                 ceph_timeout_jiffies(timeout));
3575         if (left <= 0) {
3576                 left = left ?: -ETIMEDOUT;
3577                 ceph_osdc_cancel_request(req);
3578         } else {
3579                 left = req->r_result; /* completed */
3580         }
3581
3582         return left;
3583 }
3584
3585 /*
3586  * wait for a request to complete
3587  */
3588 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
3589                            struct ceph_osd_request *req)
3590 {
3591         return wait_request_timeout(req, 0);
3592 }
3593 EXPORT_SYMBOL(ceph_osdc_wait_request);
3594
3595 /*
3596  * sync - wait for all in-flight requests to flush.  avoid starvation.
3597  */
3598 void ceph_osdc_sync(struct ceph_osd_client *osdc)
3599 {
3600         struct rb_node *n, *p;
3601         u64 last_tid = atomic64_read(&osdc->last_tid);
3602
3603 again:
3604         down_read(&osdc->lock);
3605         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3606                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3607
3608                 mutex_lock(&osd->lock);
3609                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
3610                         struct ceph_osd_request *req =
3611                             rb_entry(p, struct ceph_osd_request, r_node);
3612
3613                         if (req->r_tid > last_tid)
3614                                 break;
3615
3616                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
3617                                 continue;
3618
3619                         ceph_osdc_get_request(req);
3620                         mutex_unlock(&osd->lock);
3621                         up_read(&osdc->lock);
3622                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
3623                              __func__, req, req->r_tid, last_tid);
3624                         wait_for_completion(&req->r_completion);
3625                         ceph_osdc_put_request(req);
3626                         goto again;
3627                 }
3628
3629                 mutex_unlock(&osd->lock);
3630         }
3631
3632         up_read(&osdc->lock);
3633         dout("%s done last_tid %llu\n", __func__, last_tid);
3634 }
3635 EXPORT_SYMBOL(ceph_osdc_sync);
3636
3637 static struct ceph_osd_request *
3638 alloc_linger_request(struct ceph_osd_linger_request *lreq)
3639 {
3640         struct ceph_osd_request *req;
3641
3642         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
3643         if (!req)
3644                 return NULL;
3645
3646         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3647         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3648
3649         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
3650                 ceph_osdc_put_request(req);
3651                 return NULL;
3652         }
3653
3654         return req;
3655 }
3656
3657 /*
3658  * Returns a handle, caller owns a ref.
3659  */
3660 struct ceph_osd_linger_request *
3661 ceph_osdc_watch(struct ceph_osd_client *osdc,
3662                 struct ceph_object_id *oid,
3663                 struct ceph_object_locator *oloc,
3664                 rados_watchcb2_t wcb,
3665                 rados_watcherrcb_t errcb,
3666                 void *data)
3667 {
3668         struct ceph_osd_linger_request *lreq;
3669         int ret;
3670
3671         lreq = linger_alloc(osdc);
3672         if (!lreq)
3673                 return ERR_PTR(-ENOMEM);
3674
3675         lreq->is_watch = true;
3676         lreq->wcb = wcb;
3677         lreq->errcb = errcb;
3678         lreq->data = data;
3679         lreq->watch_valid_thru = jiffies;
3680
3681         ceph_oid_copy(&lreq->t.base_oid, oid);
3682         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3683         lreq->t.flags = CEPH_OSD_FLAG_WRITE;
3684         ktime_get_real_ts(&lreq->mtime);
3685
3686         lreq->reg_req = alloc_linger_request(lreq);
3687         if (!lreq->reg_req) {
3688                 ret = -ENOMEM;
3689                 goto err_put_lreq;
3690         }
3691
3692         lreq->ping_req = alloc_linger_request(lreq);
3693         if (!lreq->ping_req) {
3694                 ret = -ENOMEM;
3695                 goto err_put_lreq;
3696         }
3697
3698         down_write(&osdc->lock);
3699         linger_register(lreq); /* before osd_req_op_* */
3700         osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
3701                               CEPH_OSD_WATCH_OP_WATCH);
3702         osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
3703                               CEPH_OSD_WATCH_OP_PING);
3704         linger_submit(lreq);
3705         up_write(&osdc->lock);
3706
3707         ret = linger_reg_commit_wait(lreq);
3708         if (ret) {
3709                 linger_cancel(lreq);
3710                 goto err_put_lreq;
3711         }
3712
3713         return lreq;
3714
3715 err_put_lreq:
3716         linger_put(lreq);
3717         return ERR_PTR(ret);
3718 }
3719 EXPORT_SYMBOL(ceph_osdc_watch);
3720
3721 /*
3722  * Releases a ref.
3723  *
3724  * Times out after mount_timeout to preserve rbd unmap behaviour
3725  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3726  * with mount_timeout").
3727  */
3728 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3729                       struct ceph_osd_linger_request *lreq)
3730 {
3731         struct ceph_options *opts = osdc->client->options;
3732         struct ceph_osd_request *req;
3733         int ret;
3734
3735         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3736         if (!req)
3737                 return -ENOMEM;
3738
3739         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3740         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3741         req->r_flags = CEPH_OSD_FLAG_WRITE;
3742         ktime_get_real_ts(&req->r_mtime);
3743         osd_req_op_watch_init(req, 0, lreq->linger_id,
3744                               CEPH_OSD_WATCH_OP_UNWATCH);
3745
3746         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3747         if (ret)
3748                 goto out_put_req;
3749
3750         ceph_osdc_start_request(osdc, req, false);
3751         linger_cancel(lreq);
3752         linger_put(lreq);
3753         ret = wait_request_timeout(req, opts->mount_timeout);
3754
3755 out_put_req:
3756         ceph_osdc_put_request(req);
3757         return ret;
3758 }
3759 EXPORT_SYMBOL(ceph_osdc_unwatch);
3760
3761 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
3762                                       u64 notify_id, u64 cookie, void *payload,
3763                                       size_t payload_len)
3764 {
3765         struct ceph_osd_req_op *op;
3766         struct ceph_pagelist *pl;
3767         int ret;
3768
3769         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
3770
3771         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3772         if (!pl)
3773                 return -ENOMEM;
3774
3775         ceph_pagelist_init(pl);
3776         ret = ceph_pagelist_encode_64(pl, notify_id);
3777         ret |= ceph_pagelist_encode_64(pl, cookie);
3778         if (payload) {
3779                 ret |= ceph_pagelist_encode_32(pl, payload_len);
3780                 ret |= ceph_pagelist_append(pl, payload, payload_len);
3781         } else {
3782                 ret |= ceph_pagelist_encode_32(pl, 0);
3783         }
3784         if (ret) {
3785                 ceph_pagelist_release(pl);
3786                 return -ENOMEM;
3787         }
3788
3789         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
3790         op->indata_len = pl->length;
3791         return 0;
3792 }
3793
3794 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
3795                          struct ceph_object_id *oid,
3796                          struct ceph_object_locator *oloc,
3797                          u64 notify_id,
3798                          u64 cookie,
3799                          void *payload,
3800                          size_t payload_len)
3801 {
3802         struct ceph_osd_request *req;
3803         int ret;
3804
3805         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3806         if (!req)
3807                 return -ENOMEM;
3808
3809         ceph_oid_copy(&req->r_base_oid, oid);
3810         ceph_oloc_copy(&req->r_base_oloc, oloc);
3811         req->r_flags = CEPH_OSD_FLAG_READ;
3812
3813         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3814         if (ret)
3815                 goto out_put_req;
3816
3817         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
3818                                          payload_len);
3819         if (ret)
3820                 goto out_put_req;
3821
3822         ceph_osdc_start_request(osdc, req, false);
3823         ret = ceph_osdc_wait_request(osdc, req);
3824
3825 out_put_req:
3826         ceph_osdc_put_request(req);
3827         return ret;
3828 }
3829 EXPORT_SYMBOL(ceph_osdc_notify_ack);
3830
3831 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
3832                                   u64 cookie, u32 prot_ver, u32 timeout,
3833                                   void *payload, size_t payload_len)
3834 {
3835         struct ceph_osd_req_op *op;
3836         struct ceph_pagelist *pl;
3837         int ret;
3838
3839         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
3840         op->notify.cookie = cookie;
3841
3842         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3843         if (!pl)
3844                 return -ENOMEM;
3845
3846         ceph_pagelist_init(pl);
3847         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
3848         ret |= ceph_pagelist_encode_32(pl, timeout);
3849         ret |= ceph_pagelist_encode_32(pl, payload_len);
3850         ret |= ceph_pagelist_append(pl, payload, payload_len);
3851         if (ret) {
3852                 ceph_pagelist_release(pl);
3853                 return -ENOMEM;
3854         }
3855
3856         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
3857         op->indata_len = pl->length;
3858         return 0;
3859 }
3860
3861 /*
3862  * @timeout: in seconds
3863  *
3864  * @preply_{pages,len} are initialized both on success and error.
3865  * The caller is responsible for:
3866  *
3867  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3868  */
3869 int ceph_osdc_notify(struct ceph_osd_client *osdc,
3870                      struct ceph_object_id *oid,
3871                      struct ceph_object_locator *oloc,
3872                      void *payload,
3873                      size_t payload_len,
3874                      u32 timeout,
3875                      struct page ***preply_pages,
3876                      size_t *preply_len)
3877 {
3878         struct ceph_osd_linger_request *lreq;
3879         struct page **pages;
3880         int ret;
3881
3882         WARN_ON(!timeout);
3883         if (preply_pages) {
3884                 *preply_pages = NULL;
3885                 *preply_len = 0;
3886         }
3887
3888         lreq = linger_alloc(osdc);
3889         if (!lreq)
3890                 return -ENOMEM;
3891
3892         lreq->preply_pages = preply_pages;
3893         lreq->preply_len = preply_len;
3894
3895         ceph_oid_copy(&lreq->t.base_oid, oid);
3896         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3897         lreq->t.flags = CEPH_OSD_FLAG_READ;
3898
3899         lreq->reg_req = alloc_linger_request(lreq);
3900         if (!lreq->reg_req) {
3901                 ret = -ENOMEM;
3902                 goto out_put_lreq;
3903         }
3904
3905         /* for notify_id */
3906         pages = ceph_alloc_page_vector(1, GFP_NOIO);
3907         if (IS_ERR(pages)) {
3908                 ret = PTR_ERR(pages);
3909                 goto out_put_lreq;
3910         }
3911
3912         down_write(&osdc->lock);
3913         linger_register(lreq); /* before osd_req_op_* */
3914         ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
3915                                      timeout, payload, payload_len);
3916         if (ret) {
3917                 linger_unregister(lreq);
3918                 up_write(&osdc->lock);
3919                 ceph_release_page_vector(pages, 1);
3920                 goto out_put_lreq;
3921         }
3922         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
3923                                                  response_data),
3924                                  pages, PAGE_SIZE, 0, false, true);
3925         linger_submit(lreq);
3926         up_write(&osdc->lock);
3927
3928         ret = linger_reg_commit_wait(lreq);
3929         if (!ret)
3930                 ret = linger_notify_finish_wait(lreq);
3931         else
3932                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
3933
3934         linger_cancel(lreq);
3935 out_put_lreq:
3936         linger_put(lreq);
3937         return ret;
3938 }
3939 EXPORT_SYMBOL(ceph_osdc_notify);
3940
3941 /*
3942  * Return the number of milliseconds since the watch was last
3943  * confirmed, or an error.  If there is an error, the watch is no
3944  * longer valid, and should be destroyed with ceph_osdc_unwatch().
3945  */
3946 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
3947                           struct ceph_osd_linger_request *lreq)
3948 {
3949         unsigned long stamp, age;
3950         int ret;
3951
3952         down_read(&osdc->lock);
3953         mutex_lock(&lreq->lock);
3954         stamp = lreq->watch_valid_thru;
3955         if (!list_empty(&lreq->pending_lworks)) {
3956                 struct linger_work *lwork =
3957                     list_first_entry(&lreq->pending_lworks,
3958                                      struct linger_work,
3959                                      pending_item);
3960
3961                 if (time_before(lwork->queued_stamp, stamp))
3962                         stamp = lwork->queued_stamp;
3963         }
3964         age = jiffies - stamp;
3965         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
3966              lreq, lreq->linger_id, age, lreq->last_error);
3967         /* we are truncating to msecs, so return a safe upper bound */
3968         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
3969
3970         mutex_unlock(&lreq->lock);
3971         up_read(&osdc->lock);
3972         return ret;
3973 }
3974
3975 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
3976 {
3977         u8 struct_v;
3978         u32 struct_len;
3979         int ret;
3980
3981         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
3982                                   &struct_v, &struct_len);
3983         if (ret)
3984                 return ret;
3985
3986         ceph_decode_copy(p, &item->name, sizeof(item->name));
3987         item->cookie = ceph_decode_64(p);
3988         *p += 4; /* skip timeout_seconds */
3989         if (struct_v >= 2) {
3990                 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
3991                 ceph_decode_addr(&item->addr);
3992         }
3993
3994         dout("%s %s%llu cookie %llu addr %s\n", __func__,
3995              ENTITY_NAME(item->name), item->cookie,
3996              ceph_pr_addr(&item->addr.in_addr));
3997         return 0;
3998 }
3999
4000 static int decode_watchers(void **p, void *end,
4001                            struct ceph_watch_item **watchers,
4002                            u32 *num_watchers)
4003 {
4004         u8 struct_v;
4005         u32 struct_len;
4006         int i;
4007         int ret;
4008
4009         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4010                                   &struct_v, &struct_len);
4011         if (ret)
4012                 return ret;
4013
4014         *num_watchers = ceph_decode_32(p);
4015         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4016         if (!*watchers)
4017                 return -ENOMEM;
4018
4019         for (i = 0; i < *num_watchers; i++) {
4020                 ret = decode_watcher(p, end, *watchers + i);
4021                 if (ret) {
4022                         kfree(*watchers);
4023                         return ret;
4024                 }
4025         }
4026
4027         return 0;
4028 }
4029
4030 /*
4031  * On success, the caller is responsible for:
4032  *
4033  *     kfree(watchers);
4034  */
4035 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4036                             struct ceph_object_id *oid,
4037                             struct ceph_object_locator *oloc,
4038                             struct ceph_watch_item **watchers,
4039                             u32 *num_watchers)
4040 {
4041         struct ceph_osd_request *req;
4042         struct page **pages;
4043         int ret;
4044
4045         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4046         if (!req)
4047                 return -ENOMEM;
4048
4049         ceph_oid_copy(&req->r_base_oid, oid);
4050         ceph_oloc_copy(&req->r_base_oloc, oloc);
4051         req->r_flags = CEPH_OSD_FLAG_READ;
4052
4053         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4054         if (ret)
4055                 goto out_put_req;
4056
4057         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4058         if (IS_ERR(pages)) {
4059                 ret = PTR_ERR(pages);
4060                 goto out_put_req;
4061         }
4062
4063         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4064         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4065                                                  response_data),
4066                                  pages, PAGE_SIZE, 0, false, true);
4067
4068         ceph_osdc_start_request(osdc, req, false);
4069         ret = ceph_osdc_wait_request(osdc, req);
4070         if (ret >= 0) {
4071                 void *p = page_address(pages[0]);
4072                 void *const end = p + req->r_ops[0].outdata_len;
4073
4074                 ret = decode_watchers(&p, end, watchers, num_watchers);
4075         }
4076
4077 out_put_req:
4078         ceph_osdc_put_request(req);
4079         return ret;
4080 }
4081 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4082
4083 /*
4084  * Call all pending notify callbacks - for use after a watch is
4085  * unregistered, to make sure no more callbacks for it will be invoked
4086  */
4087 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4088 {
4089         dout("%s osdc %p\n", __func__, osdc);
4090         flush_workqueue(osdc->notify_wq);
4091 }
4092 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4093
4094 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4095 {
4096         down_read(&osdc->lock);
4097         maybe_request_map(osdc);
4098         up_read(&osdc->lock);
4099 }
4100 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4101
4102 /*
4103  * Execute an OSD class method on an object.
4104  *
4105  * @flags: CEPH_OSD_FLAG_*
4106  * @resp_len: in/out param for reply length
4107  */
4108 int ceph_osdc_call(struct ceph_osd_client *osdc,
4109                    struct ceph_object_id *oid,
4110                    struct ceph_object_locator *oloc,
4111                    const char *class, const char *method,
4112                    unsigned int flags,
4113                    struct page *req_page, size_t req_len,
4114                    struct page *resp_page, size_t *resp_len)
4115 {
4116         struct ceph_osd_request *req;
4117         int ret;
4118
4119         if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4120                 return -E2BIG;
4121
4122         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4123         if (!req)
4124                 return -ENOMEM;
4125
4126         ceph_oid_copy(&req->r_base_oid, oid);
4127         ceph_oloc_copy(&req->r_base_oloc, oloc);
4128         req->r_flags = flags;
4129
4130         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4131         if (ret)
4132                 goto out_put_req;
4133
4134         osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4135         if (req_page)
4136                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4137                                                   0, false, false);
4138         if (resp_page)
4139                 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4140                                                    *resp_len, 0, false, false);
4141
4142         ceph_osdc_start_request(osdc, req, false);
4143         ret = ceph_osdc_wait_request(osdc, req);
4144         if (ret >= 0) {
4145                 ret = req->r_ops[0].rval;
4146                 if (resp_page)
4147                         *resp_len = req->r_ops[0].outdata_len;
4148         }
4149
4150 out_put_req:
4151         ceph_osdc_put_request(req);
4152         return ret;
4153 }
4154 EXPORT_SYMBOL(ceph_osdc_call);
4155
4156 /*
4157  * init, shutdown
4158  */
4159 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4160 {
4161         int err;
4162
4163         dout("init\n");
4164         osdc->client = client;
4165         init_rwsem(&osdc->lock);
4166         osdc->osds = RB_ROOT;
4167         INIT_LIST_HEAD(&osdc->osd_lru);
4168         spin_lock_init(&osdc->osd_lru_lock);
4169         osd_init(&osdc->homeless_osd);
4170         osdc->homeless_osd.o_osdc = osdc;
4171         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4172         osdc->last_linger_id = CEPH_LINGER_ID_START;
4173         osdc->linger_requests = RB_ROOT;
4174         osdc->map_checks = RB_ROOT;
4175         osdc->linger_map_checks = RB_ROOT;
4176         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4177         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4178
4179         err = -ENOMEM;
4180         osdc->osdmap = ceph_osdmap_alloc();
4181         if (!osdc->osdmap)
4182                 goto out;
4183
4184         osdc->req_mempool = mempool_create_slab_pool(10,
4185                                                      ceph_osd_request_cache);
4186         if (!osdc->req_mempool)
4187                 goto out_map;
4188
4189         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4190                                 PAGE_SIZE, 10, true, "osd_op");
4191         if (err < 0)
4192                 goto out_mempool;
4193         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4194                                 PAGE_SIZE, 10, true, "osd_op_reply");
4195         if (err < 0)
4196                 goto out_msgpool;
4197
4198         err = -ENOMEM;
4199         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4200         if (!osdc->notify_wq)
4201                 goto out_msgpool_reply;
4202
4203         schedule_delayed_work(&osdc->timeout_work,
4204                               osdc->client->options->osd_keepalive_timeout);
4205         schedule_delayed_work(&osdc->osds_timeout_work,
4206             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4207
4208         return 0;
4209
4210 out_msgpool_reply:
4211         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4212 out_msgpool:
4213         ceph_msgpool_destroy(&osdc->msgpool_op);
4214 out_mempool:
4215         mempool_destroy(osdc->req_mempool);
4216 out_map:
4217         ceph_osdmap_destroy(osdc->osdmap);
4218 out:
4219         return err;
4220 }
4221
4222 void ceph_osdc_stop(struct ceph_osd_client *osdc)
4223 {
4224         flush_workqueue(osdc->notify_wq);
4225         destroy_workqueue(osdc->notify_wq);
4226         cancel_delayed_work_sync(&osdc->timeout_work);
4227         cancel_delayed_work_sync(&osdc->osds_timeout_work);
4228
4229         down_write(&osdc->lock);
4230         while (!RB_EMPTY_ROOT(&osdc->osds)) {
4231                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4232                                                 struct ceph_osd, o_node);
4233                 close_osd(osd);
4234         }
4235         up_write(&osdc->lock);
4236         WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
4237         osd_cleanup(&osdc->homeless_osd);
4238
4239         WARN_ON(!list_empty(&osdc->osd_lru));
4240         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4241         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4242         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4243         WARN_ON(atomic_read(&osdc->num_requests));
4244         WARN_ON(atomic_read(&osdc->num_homeless));
4245
4246         ceph_osdmap_destroy(osdc->osdmap);
4247         mempool_destroy(osdc->req_mempool);
4248         ceph_msgpool_destroy(&osdc->msgpool_op);
4249         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4250 }
4251
4252 /*
4253  * Read some contiguous pages.  If we cross a stripe boundary, shorten
4254  * *plen.  Return number of bytes read, or error.
4255  */
4256 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4257                         struct ceph_vino vino, struct ceph_file_layout *layout,
4258                         u64 off, u64 *plen,
4259                         u32 truncate_seq, u64 truncate_size,
4260                         struct page **pages, int num_pages, int page_align)
4261 {
4262         struct ceph_osd_request *req;
4263         int rc = 0;
4264
4265         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
4266              vino.snap, off, *plen);
4267         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
4268                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
4269                                     NULL, truncate_seq, truncate_size,
4270                                     false);
4271         if (IS_ERR(req))
4272                 return PTR_ERR(req);
4273
4274         /* it may be a short read due to an object boundary */
4275         osd_req_op_extent_osd_data_pages(req, 0,
4276                                 pages, *plen, page_align, false, false);
4277
4278         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
4279              off, *plen, *plen, page_align);
4280
4281         rc = ceph_osdc_start_request(osdc, req, false);
4282         if (!rc)
4283                 rc = ceph_osdc_wait_request(osdc, req);
4284
4285         ceph_osdc_put_request(req);
4286         dout("readpages result %d\n", rc);
4287         return rc;
4288 }
4289 EXPORT_SYMBOL(ceph_osdc_readpages);
4290
4291 /*
4292  * do a synchronous write on N pages
4293  */
4294 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
4295                          struct ceph_file_layout *layout,
4296                          struct ceph_snap_context *snapc,
4297                          u64 off, u64 len,
4298                          u32 truncate_seq, u64 truncate_size,
4299                          struct timespec *mtime,
4300                          struct page **pages, int num_pages)
4301 {
4302         struct ceph_osd_request *req;
4303         int rc = 0;
4304         int page_align = off & ~PAGE_MASK;
4305
4306         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
4307                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
4308                                     snapc, truncate_seq, truncate_size,
4309                                     true);
4310         if (IS_ERR(req))
4311                 return PTR_ERR(req);
4312
4313         /* it may be a short write due to an object boundary */
4314         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
4315                                 false, false);
4316         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
4317
4318         req->r_mtime = *mtime;
4319         rc = ceph_osdc_start_request(osdc, req, true);
4320         if (!rc)
4321                 rc = ceph_osdc_wait_request(osdc, req);
4322
4323         ceph_osdc_put_request(req);
4324         if (rc == 0)
4325                 rc = len;
4326         dout("writepages result %d\n", rc);
4327         return rc;
4328 }
4329 EXPORT_SYMBOL(ceph_osdc_writepages);
4330
4331 int ceph_osdc_setup(void)
4332 {
4333         size_t size = sizeof(struct ceph_osd_request) +
4334             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
4335
4336         BUG_ON(ceph_osd_request_cache);
4337         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
4338                                                    0, 0, NULL);
4339
4340         return ceph_osd_request_cache ? 0 : -ENOMEM;
4341 }
4342 EXPORT_SYMBOL(ceph_osdc_setup);
4343
4344 void ceph_osdc_cleanup(void)
4345 {
4346         BUG_ON(!ceph_osd_request_cache);
4347         kmem_cache_destroy(ceph_osd_request_cache);
4348         ceph_osd_request_cache = NULL;
4349 }
4350 EXPORT_SYMBOL(ceph_osdc_cleanup);
4351
4352 /*
4353  * handle incoming message
4354  */
4355 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4356 {
4357         struct ceph_osd *osd = con->private;
4358         struct ceph_osd_client *osdc = osd->o_osdc;
4359         int type = le16_to_cpu(msg->hdr.type);
4360
4361         switch (type) {
4362         case CEPH_MSG_OSD_MAP:
4363                 ceph_osdc_handle_map(osdc, msg);
4364                 break;
4365         case CEPH_MSG_OSD_OPREPLY:
4366                 handle_reply(osd, msg);
4367                 break;
4368         case CEPH_MSG_WATCH_NOTIFY:
4369                 handle_watch_notify(osdc, msg);
4370                 break;
4371
4372         default:
4373                 pr_err("received unknown message type %d %s\n", type,
4374                        ceph_msg_type_name(type));
4375         }
4376
4377         ceph_msg_put(msg);
4378 }
4379
4380 /*
4381  * Lookup and return message for incoming reply.  Don't try to do
4382  * anything about a larger than preallocated data portion of the
4383  * message at the moment - for now, just skip the message.
4384  */
4385 static struct ceph_msg *get_reply(struct ceph_connection *con,
4386                                   struct ceph_msg_header *hdr,
4387                                   int *skip)
4388 {
4389         struct ceph_osd *osd = con->private;
4390         struct ceph_osd_client *osdc = osd->o_osdc;
4391         struct ceph_msg *m = NULL;
4392         struct ceph_osd_request *req;
4393         int front_len = le32_to_cpu(hdr->front_len);
4394         int data_len = le32_to_cpu(hdr->data_len);
4395         u64 tid = le64_to_cpu(hdr->tid);
4396
4397         down_read(&osdc->lock);
4398         if (!osd_registered(osd)) {
4399                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
4400                 *skip = 1;
4401                 goto out_unlock_osdc;
4402         }
4403         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
4404
4405         mutex_lock(&osd->lock);
4406         req = lookup_request(&osd->o_requests, tid);
4407         if (!req) {
4408                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
4409                      osd->o_osd, tid);
4410                 *skip = 1;
4411                 goto out_unlock_session;
4412         }
4413
4414         ceph_msg_revoke_incoming(req->r_reply);
4415
4416         if (front_len > req->r_reply->front_alloc_len) {
4417                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4418                         __func__, osd->o_osd, req->r_tid, front_len,
4419                         req->r_reply->front_alloc_len);
4420                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
4421                                  false);
4422                 if (!m)
4423                         goto out_unlock_session;
4424                 ceph_msg_put(req->r_reply);
4425                 req->r_reply = m;
4426         }
4427
4428         if (data_len > req->r_reply->data_length) {
4429                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4430                         __func__, osd->o_osd, req->r_tid, data_len,
4431                         req->r_reply->data_length);
4432                 m = NULL;
4433                 *skip = 1;
4434                 goto out_unlock_session;
4435         }
4436
4437         m = ceph_msg_get(req->r_reply);
4438         dout("get_reply tid %lld %p\n", tid, m);
4439
4440 out_unlock_session:
4441         mutex_unlock(&osd->lock);
4442 out_unlock_osdc:
4443         up_read(&osdc->lock);
4444         return m;
4445 }
4446
4447 /*
4448  * TODO: switch to a msg-owned pagelist
4449  */
4450 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4451 {
4452         struct ceph_msg *m;
4453         int type = le16_to_cpu(hdr->type);
4454         u32 front_len = le32_to_cpu(hdr->front_len);
4455         u32 data_len = le32_to_cpu(hdr->data_len);
4456
4457         m = ceph_msg_new(type, front_len, GFP_NOIO, false);
4458         if (!m)
4459                 return NULL;
4460
4461         if (data_len) {
4462                 struct page **pages;
4463                 struct ceph_osd_data osd_data;
4464
4465                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4466                                                GFP_NOIO);
4467                 if (IS_ERR(pages)) {
4468                         ceph_msg_put(m);
4469                         return NULL;
4470                 }
4471
4472                 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
4473                                          false);
4474                 ceph_osdc_msg_data_add(m, &osd_data);
4475         }
4476
4477         return m;
4478 }
4479
4480 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
4481                                   struct ceph_msg_header *hdr,
4482                                   int *skip)
4483 {
4484         struct ceph_osd *osd = con->private;
4485         int type = le16_to_cpu(hdr->type);
4486
4487         *skip = 0;
4488         switch (type) {
4489         case CEPH_MSG_OSD_MAP:
4490         case CEPH_MSG_WATCH_NOTIFY:
4491                 return alloc_msg_with_page_vector(hdr);
4492         case CEPH_MSG_OSD_OPREPLY:
4493                 return get_reply(con, hdr, skip);
4494         default:
4495                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
4496                         osd->o_osd, type);
4497                 *skip = 1;
4498                 return NULL;
4499         }
4500 }
4501
4502 /*
4503  * Wrappers to refcount containing ceph_osd struct
4504  */
4505 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
4506 {
4507         struct ceph_osd *osd = con->private;
4508         if (get_osd(osd))
4509                 return con;
4510         return NULL;
4511 }
4512
4513 static void put_osd_con(struct ceph_connection *con)
4514 {
4515         struct ceph_osd *osd = con->private;
4516         put_osd(osd);
4517 }
4518
4519 /*
4520  * authentication
4521  */
4522 /*
4523  * Note: returned pointer is the address of a structure that's
4524  * managed separately.  Caller must *not* attempt to free it.
4525  */
4526 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4527                                         int *proto, int force_new)
4528 {
4529         struct ceph_osd *o = con->private;
4530         struct ceph_osd_client *osdc = o->o_osdc;
4531         struct ceph_auth_client *ac = osdc->client->monc.auth;
4532         struct ceph_auth_handshake *auth = &o->o_auth;
4533
4534         if (force_new && auth->authorizer) {
4535                 ceph_auth_destroy_authorizer(auth->authorizer);
4536                 auth->authorizer = NULL;
4537         }
4538         if (!auth->authorizer) {
4539                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4540                                                       auth);
4541                 if (ret)
4542                         return ERR_PTR(ret);
4543         } else {
4544                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4545                                                      auth);
4546                 if (ret)
4547                         return ERR_PTR(ret);
4548         }
4549         *proto = ac->protocol;
4550
4551         return auth;
4552 }
4553
4554
4555 static int verify_authorizer_reply(struct ceph_connection *con)
4556 {
4557         struct ceph_osd *o = con->private;
4558         struct ceph_osd_client *osdc = o->o_osdc;
4559         struct ceph_auth_client *ac = osdc->client->monc.auth;
4560
4561         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
4562 }
4563
4564 static int invalidate_authorizer(struct ceph_connection *con)
4565 {
4566         struct ceph_osd *o = con->private;
4567         struct ceph_osd_client *osdc = o->o_osdc;
4568         struct ceph_auth_client *ac = osdc->client->monc.auth;
4569
4570         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
4571         return ceph_monc_validate_auth(&osdc->client->monc);
4572 }
4573
4574 static int osd_sign_message(struct ceph_msg *msg)
4575 {
4576         struct ceph_osd *o = msg->con->private;
4577         struct ceph_auth_handshake *auth = &o->o_auth;
4578
4579         return ceph_auth_sign_message(auth, msg);
4580 }
4581
4582 static int osd_check_message_signature(struct ceph_msg *msg)
4583 {
4584         struct ceph_osd *o = msg->con->private;
4585         struct ceph_auth_handshake *auth = &o->o_auth;
4586
4587         return ceph_auth_check_message_signature(auth, msg);
4588 }
4589
4590 static const struct ceph_connection_operations osd_con_ops = {
4591         .get = get_osd_con,
4592         .put = put_osd_con,
4593         .dispatch = dispatch,
4594         .get_authorizer = get_authorizer,
4595         .verify_authorizer_reply = verify_authorizer_reply,
4596         .invalidate_authorizer = invalidate_authorizer,
4597         .alloc_msg = alloc_msg,
4598         .sign_message = osd_sign_message,
4599         .check_message_signature = osd_check_message_signature,
4600         .fault = osd_fault,
4601 };