1 /* FS-Cache worker operation management routines
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/operations.txt
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
20 atomic_t fscache_op_debug_id;
21 EXPORT_SYMBOL(fscache_op_debug_id);
24 * fscache_enqueue_operation - Enqueue an operation for processing
25 * @op: The operation to enqueue
27 * Enqueue an operation for processing by the FS-Cache thread pool.
29 * This will get its own ref on the object.
31 void fscache_enqueue_operation(struct fscache_operation *op)
33 _enter("{OBJ%x OP%x,%u}",
34 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
36 fscache_set_op_state(op, "EnQ");
38 ASSERT(list_empty(&op->pend_link));
39 ASSERT(op->processor != NULL);
40 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41 ASSERTCMP(atomic_read(&op->usage), >, 0);
43 fscache_stat(&fscache_n_op_enqueue);
44 switch (op->flags & FSCACHE_OP_TYPE) {
45 case FSCACHE_OP_ASYNC:
46 _debug("queue async");
47 atomic_inc(&op->usage);
48 if (!queue_work(fscache_op_wq, &op->work))
49 fscache_put_operation(op);
51 case FSCACHE_OP_MYTHREAD:
52 _debug("queue for caller's attention");
55 printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
61 EXPORT_SYMBOL(fscache_enqueue_operation);
66 static void fscache_run_op(struct fscache_object *object,
67 struct fscache_operation *op)
69 fscache_set_op_state(op, "Run");
71 object->n_in_progress++;
72 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
73 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
75 fscache_enqueue_operation(op);
76 fscache_stat(&fscache_n_op_run);
80 * submit an exclusive operation for an object
81 * - other ops are excluded from running simultaneously with this one
82 * - this gets any extra refs it needs on an op
84 int fscache_submit_exclusive_op(struct fscache_object *object,
85 struct fscache_operation *op)
89 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
91 fscache_set_op_state(op, "SubmitX");
93 spin_lock(&object->lock);
94 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
95 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
96 ASSERT(list_empty(&op->pend_link));
99 if (fscache_object_is_active(object)) {
102 object->n_exclusive++; /* reads and writes must wait */
104 if (object->n_ops > 1) {
105 atomic_inc(&op->usage);
106 list_add_tail(&op->pend_link, &object->pending_ops);
107 fscache_stat(&fscache_n_op_pend);
108 } else if (!list_empty(&object->pending_ops)) {
109 atomic_inc(&op->usage);
110 list_add_tail(&op->pend_link, &object->pending_ops);
111 fscache_stat(&fscache_n_op_pend);
112 fscache_start_operations(object);
114 ASSERTCMP(object->n_in_progress, ==, 0);
115 fscache_run_op(object, op);
118 /* need to issue a new write op after this */
119 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
121 } else if (object->state == FSCACHE_OBJECT_CREATING) {
124 object->n_exclusive++; /* reads and writes must wait */
125 atomic_inc(&op->usage);
126 list_add_tail(&op->pend_link, &object->pending_ops);
127 fscache_stat(&fscache_n_op_pend);
130 /* not allowed to submit ops in any other state */
134 spin_unlock(&object->lock);
139 * report an unexpected submission
141 static void fscache_report_unexpected_submission(struct fscache_object *object,
142 struct fscache_operation *op,
143 unsigned long ostate)
145 static bool once_only;
146 struct fscache_operation *p;
153 kdebug("unexpected submission OP%x [OBJ%x %s]",
154 op->debug_id, object->debug_id,
155 fscache_object_states[object->state]);
156 kdebug("objstate=%s [%s]",
157 fscache_object_states[object->state],
158 fscache_object_states[ostate]);
159 kdebug("objflags=%lx", object->flags);
160 kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
161 kdebug("ops=%u inp=%u exc=%u",
162 object->n_ops, object->n_in_progress, object->n_exclusive);
164 if (!list_empty(&object->pending_ops)) {
166 list_for_each_entry(p, &object->pending_ops, pend_link) {
167 ASSERTCMP(p->object, ==, object);
168 kdebug("%p %p", op->processor, op->release);
179 * submit an operation for an object
180 * - objects may be submitted only in the following states:
181 * - during object creation (write ops may be submitted)
182 * - whilst the object is active
183 * - after an I/O error incurred in one of the two above states (op rejected)
184 * - this gets any extra refs it needs on an op
186 int fscache_submit_op(struct fscache_object *object,
187 struct fscache_operation *op)
189 unsigned long ostate;
192 _enter("{OBJ%x OP%x},{%u}",
193 object->debug_id, op->debug_id, atomic_read(&op->usage));
195 ASSERTCMP(atomic_read(&op->usage), >, 0);
197 fscache_set_op_state(op, "Submit");
199 spin_lock(&object->lock);
200 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
201 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
202 ASSERT(list_empty(&op->pend_link));
204 ostate = object->state;
207 if (fscache_object_is_active(object)) {
211 if (object->n_exclusive > 0) {
212 atomic_inc(&op->usage);
213 list_add_tail(&op->pend_link, &object->pending_ops);
214 fscache_stat(&fscache_n_op_pend);
215 } else if (!list_empty(&object->pending_ops)) {
216 atomic_inc(&op->usage);
217 list_add_tail(&op->pend_link, &object->pending_ops);
218 fscache_stat(&fscache_n_op_pend);
219 fscache_start_operations(object);
221 ASSERTCMP(object->n_exclusive, ==, 0);
222 fscache_run_op(object, op);
225 } else if (object->state == FSCACHE_OBJECT_CREATING) {
228 atomic_inc(&op->usage);
229 list_add_tail(&op->pend_link, &object->pending_ops);
230 fscache_stat(&fscache_n_op_pend);
232 } else if (object->state == FSCACHE_OBJECT_DYING ||
233 object->state == FSCACHE_OBJECT_LC_DYING ||
234 object->state == FSCACHE_OBJECT_WITHDRAWING) {
235 fscache_stat(&fscache_n_op_rejected);
237 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
238 fscache_report_unexpected_submission(object, op, ostate);
239 ASSERT(!fscache_object_is_active(object));
245 spin_unlock(&object->lock);
250 * queue an object for withdrawal on error, aborting all following asynchronous
253 void fscache_abort_object(struct fscache_object *object)
255 _enter("{OBJ%x}", object->debug_id);
257 fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
261 * jump start the operation processing on an object
262 * - caller must hold object->lock
264 void fscache_start_operations(struct fscache_object *object)
266 struct fscache_operation *op;
269 while (!list_empty(&object->pending_ops) && !stop) {
270 op = list_entry(object->pending_ops.next,
271 struct fscache_operation, pend_link);
273 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
274 if (object->n_in_progress > 0)
278 list_del_init(&op->pend_link);
279 fscache_run_op(object, op);
281 /* the pending queue was holding a ref on the object */
282 fscache_put_operation(op);
285 ASSERTCMP(object->n_in_progress, <=, object->n_ops);
287 _debug("woke %d ops on OBJ%x",
288 object->n_in_progress, object->debug_id);
292 * cancel an operation that's pending on an object
294 int fscache_cancel_op(struct fscache_operation *op)
296 struct fscache_object *object = op->object;
299 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
301 spin_lock(&object->lock);
304 if (!list_empty(&op->pend_link)) {
305 fscache_stat(&fscache_n_op_cancelled);
306 list_del_init(&op->pend_link);
308 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
309 object->n_exclusive--;
310 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
311 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
312 fscache_put_operation(op);
316 spin_unlock(&object->lock);
317 _leave(" = %d", ret);
322 * release an operation
323 * - queues pending ops if this is the last in-progress op
325 void fscache_put_operation(struct fscache_operation *op)
327 struct fscache_object *object;
328 struct fscache_cache *cache;
330 _enter("{OBJ%x OP%x,%d}",
331 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
333 ASSERTCMP(atomic_read(&op->usage), >, 0);
335 if (!atomic_dec_and_test(&op->usage))
338 fscache_set_op_state(op, "Put");
341 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
344 fscache_stat(&fscache_n_op_release);
353 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
354 atomic_dec(&object->n_reads);
356 /* now... we may get called with the object spinlock held, so we
357 * complete the cleanup here only if we can immediately acquire the
358 * lock, and defer it otherwise */
359 if (!spin_trylock(&object->lock)) {
361 fscache_stat(&fscache_n_op_deferred_release);
363 cache = object->cache;
364 spin_lock(&cache->op_gc_list_lock);
365 list_add_tail(&op->pend_link, &cache->op_gc_list);
366 spin_unlock(&cache->op_gc_list_lock);
367 schedule_work(&cache->op_gc);
372 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
373 ASSERTCMP(object->n_exclusive, >, 0);
374 object->n_exclusive--;
377 ASSERTCMP(object->n_in_progress, >, 0);
378 object->n_in_progress--;
379 if (object->n_in_progress == 0)
380 fscache_start_operations(object);
382 ASSERTCMP(object->n_ops, >, 0);
384 if (object->n_ops == 0)
385 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
387 spin_unlock(&object->lock);
392 EXPORT_SYMBOL(fscache_put_operation);
395 * garbage collect operations that have had their release deferred
397 void fscache_operation_gc(struct work_struct *work)
399 struct fscache_operation *op;
400 struct fscache_object *object;
401 struct fscache_cache *cache =
402 container_of(work, struct fscache_cache, op_gc);
408 spin_lock(&cache->op_gc_list_lock);
409 if (list_empty(&cache->op_gc_list)) {
410 spin_unlock(&cache->op_gc_list_lock);
414 op = list_entry(cache->op_gc_list.next,
415 struct fscache_operation, pend_link);
416 list_del(&op->pend_link);
417 spin_unlock(&cache->op_gc_list_lock);
421 _debug("GC DEFERRED REL OBJ%x OP%x",
422 object->debug_id, op->debug_id);
423 fscache_stat(&fscache_n_op_gc);
425 ASSERTCMP(atomic_read(&op->usage), ==, 0);
427 spin_lock(&object->lock);
428 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
429 ASSERTCMP(object->n_exclusive, >, 0);
430 object->n_exclusive--;
433 ASSERTCMP(object->n_in_progress, >, 0);
434 object->n_in_progress--;
435 if (object->n_in_progress == 0)
436 fscache_start_operations(object);
438 ASSERTCMP(object->n_ops, >, 0);
440 if (object->n_ops == 0)
441 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
443 spin_unlock(&object->lock);
445 } while (count++ < 20);
447 if (!list_empty(&cache->op_gc_list))
448 schedule_work(&cache->op_gc);
454 * execute an operation using fs_op_wq to provide processing context -
455 * the caller holds a ref to this object, so we don't need to hold one
457 void fscache_op_work_func(struct work_struct *work)
459 struct fscache_operation *op =
460 container_of(work, struct fscache_operation, work);
463 _enter("{OBJ%x OP%x,%d}",
464 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
466 ASSERT(op->processor != NULL);
469 fscache_hist(fscache_ops_histogram, start);
470 fscache_put_operation(op);