int);
#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool);
extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
extern void conn_flush_workqueue(struct drbd_tconn *tconn);
return page;
/* Not enough pages immediately available this time.
- * No need to jump around here, drbd_pp_alloc will retry this
+ * No need to jump around here, drbd_alloc_pages will retry this
* function "soon". */
if (page) {
tmp = page_chain_tail(page, NULL);
}
/**
- * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
+ * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @mdev: DRBD device.
* @number: number of pages requested
* @retry: whether to retry, if not enough pages are available right now
*
* Returns a page chain linked via page->private.
*/
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
+struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
+ bool retry)
{
struct page *page = NULL;
DEFINE_WAIT(wait);
break;
if (signal_pending(current)) {
- dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
+ dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
break;
}
return page;
}
-/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
+/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
* Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
return NULL;
}
- page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
if (!page)
goto fail;
if (!data_size)
return 0;
- page = drbd_pp_alloc(mdev, 1, 1);
+ page = drbd_alloc_pages(mdev, 1, 1);
data = kmap(page);
while (data_size) {
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(mdev, peer_req);
peer_req = NULL;
inc_rs_pending(mdev);
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(mdev, peer_req);
peer_req = NULL;
inc_rs_pending(mdev);
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
drbd_free_peer_req(mdev, peer_req);
if (!eq)
drbd_ov_out_of_sync_found(mdev, sector, size);